From 9931cdc1e7db4fbb4b79ac6f893872ddd3ad8553 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 26 Apr 2021 18:33:30 +0200 Subject: [PATCH 0001/2828] stable-3 has been created, prepare for next 3.x.y release. --- changelogs/changelog.yaml | 2 +- changelogs/fragments/1475-xfconf-facts.yml | 4 -- ...lesystem-fix-1457-resizefs-idempotency.yml | 5 -- ...n_projects_not_initialized_has_changed.yml | 3 - .../1661-gitlab-deploy-key-update-pubkey.yml | 5 -- ...d-id-props-to-redfish-inventory-output.yml | 2 - .../fragments/1695-parted-updatedregex.yaml | 4 -- changelogs/fragments/1702_homebrew_tap.yml | 2 - .../1703-sensu_silence-fix_json_parsing.yml | 2 - .../1714-gitlab_runner-required-reg-token.yml | 2 - .../1715-proxmox_kvm-add-vmid-to-returns.yml | 2 - ...721-fix-nomad_job_info-no-jobs-failure.yml | 2 - changelogs/fragments/1722_timezone.yml | 2 - ...adog_monitor-add-missing-monitor-types.yml | 2 - ...ixes-for-updating-existing-gitlab-user.yml | 2 - changelogs/fragments/1735-imc-sessions.yml | 2 - .../fragments/1740-aerospike_migration.yml | 2 - .../fragments/1741-use-path-argspec.yml | 4 -- ...ase-insensitive-hostname-fqdn-matching.yml | 2 - ...document-fstypes-supported-by-resizefs.yml | 3 - .../1761-redfish-tidy-up-validation.yml | 2 - changelogs/fragments/1765-proxmox-params.yml | 2 - .../fragments/1766-zfs-fixed-sanity.yml | 2 - .../1771-centurylink-validation-elements.yml | 2 - .../fragments/1776-git_config-tilde_value.yml | 2 - .../1783-proxmox-kvm-fix-args-500-error.yaml | 3 - ...ease-nios_host_record-dns-bypass-check.yml | 3 - .../fragments/1795-list-elements-batch1.yml | 27 -------- .../1813-lxd_profile-merge-profiles.yml | 2 - ...4-dnsimple-add-support-for-caa-records.yml | 2 - .../1819-tidyup-pylint-blacklistnames.yml | 17 ----- .../1830-valmod_docmissingtype_batch1.yml | 7 -- .../1833-zfs-creation-only-properties.yaml | 2 - .../1838-runit-deprecate-param-dist.yml | 2 - .../fragments/1847-proxmox-kvm-fix-status.yml | 2 - ...er-fix-state-is-clean-without-release.yaml | 2 - changelogs/fragments/1861-python3-keys.yml | 22 ------- .../1867-modhelper-cmdmixin-dict-params.yml | 2 - .../fragments/1871-infoblox-inventory.yml | 2 - .../fragments/1880-fix_cobbler_system_ssl.yml | 2 - ...nmcli-ensure-slave-type-for-bond-slave.yml | 2 - .../1885-sanity-check-fixes-batch3.yml | 18 ----- ...894-feat-nmcli-add-method4-and-method6.yml | 2 - .../1895-proxmox-kvm-fix-issue-1875.yml | 3 - ...m_versionlock-lock_unlock_concurrently.yml | 3 - .../1914-add-sanitization-to-url.yml | 3 - .../1916-add-version-sort-filter.yml | 3 - .../1927-removed-parameter-invalid.yml | 12 ---- .../fragments/1928-bigpanda-message.yml | 2 - changelogs/fragments/1929-grove-message.yml | 4 -- .../fragments/1949-proxmox-inventory-tags.yml | 5 -- changelogs/fragments/1970-valmod-batch7.yml | 18 ----- .../1972-ini_file-empty-str-value.yml | 2 - .../1977-jenkinsjob-validate-certs.yml | 2 - .../fragments/1978-jira-transition-logic.yml | 4 -- ...proxmox-inventory-fix-template-in-pool.yml | 3 - .../fragments/1993-haproxy-fix-draining.yml | 3 - .../fragments/1999-proxmox-fix-issue-1955.yml | 3 - .../2000-proxmox_kvm-tag-support.yml | 3 - changelogs/fragments/2001-no_log-false.yml | 2 - changelogs/fragments/2006-valmod-batch8.yml | 4 -- ...te-java-cert-replace-cert-when-changed.yml | 7 -- .../2013-proxmox-purge-parameter.yml | 3 - .../2014-allow-root-for-kibana-plugin.yaml | 2 - .../2020-remove-unused-param-in-rax.yml | 2 - .../fragments/2024-module-helper-fixes.yml | 4 -- ...ish-session-create-delete-authenticate.yml | 2 - .../2031-ipa_sudorule_add_runasextusers.yml | 3 - changelogs/fragments/2032-one_image-pyone.yml | 2 - .../fragments/2036-scaleway-inventory.yml | 3 - .../fragments/2037-add-from-csv-filter.yml | 7 -- ...index-error-in-redfish-set-manager-nic.yml | 2 - changelogs/fragments/2057-nios-devel.yml | 2 - .../fragments/2061-archive-refactor1.yml | 2 - .../fragments/2065-snmp-facts-timeout.yml | 2 - .../2072-stacki-host-params-fallback.yml | 2 - ...t-PATH-env-variable-in-zypper-modules.yaml | 2 - .../fragments/2110-vdo-add_force_option.yaml | 3 - .../2116-add-fields-to-ipa-config-module.yml | 2 - .../fragments/2125-git-config-scope-file.yml | 2 - .../2135-vmadm-resolvers-type-fix.yml | 2 - .../2139-dimensiondata_network-str-format.yml | 2 - .../2142-apache2_mod_proxy-cleanup.yml | 2 - ...143-kibana_plugin-fixed-function-calls.yml | 2 - .../fragments/2144-atomic_get_bin_path.yml | 4 -- .../2146-npm-add_no_bin_links_option.yaml | 3 - ...148-proxmox-inventory-agent-interfaces.yml | 3 - .../fragments/2157-unreachable-code.yml | 4 -- ...pa-user-sshpubkey-multi-word-comments.yaml | 2 - changelogs/fragments/2160-list-literals.yml | 11 ---- .../fragments/2161-pkgutil-list-extend.yml | 2 - .../fragments/2162-modhelper-variables.yml | 2 - .../fragments/2162-proxmox-constructable.yml | 3 - ...ystore_1667_improve_temp_files_storage.yml | 5 -- ...2174-ipa-user-userauthtype-multiselect.yml | 2 - ...re_1668_dont_expose_secrets_on_cmdline.yml | 4 -- ...3-java_keystore_improve_error_handling.yml | 6 -- .../2185-xfconf-absent-check-mode.yml | 2 - .../2188-xfconf-modhelper-variables.yml | 3 - changelogs/fragments/2192-add-jira-attach.yml | 2 - .../2203-modhelper-cause-changes-deco.yml | 2 - .../2204-github_repo-fix-baseurl_port.yml | 2 - changelogs/fragments/2208-jira-revamp.yml | 2 - changelogs/fragments/2218-cpanm-revamp.yml | 5 -- .../fragments/2220_nmcli_wifi_support.yaml | 3 - .../2223_nmcli_no_IP_config_on_slave.yaml | 3 - .../2224_nmcli_allow_MAC_overwrite.yaml | 3 - ..._keystore-1669-ssl-input-files-by-path.yml | 6 -- changelogs/fragments/2236-jira-isinstance.yml | 2 - changelogs/fragments/2244-hashids-filters.yml | 6 -- ...2245-proxmox_fix_agent_string_handling.yml | 3 - changelogs/fragments/2246-terraform.yaml | 4 -- ...9-linode_v4-support-private_ip-option.yaml | 2 - ...eycloak-modules-to-take-token-as-param.yml | 5 -- .../fragments/2257-ldap_entry-params.yml | 2 - ...2259-proxmox-multi-nic-and-unsupported.yml | 5 -- .../2262-java_keystore-passphrase.yml | 8 --- ...vol_size_addition-subtraction_support.yaml | 5 -- .../fragments/2268-validation-univetion.yml | 4 -- .../2280-pids-new-pattern-option.yml | 3 - .../fragments/2282-nmap-fix-cache-support.yml | 2 - ...-influxdb_retention_policy-idempotence.yml | 4 -- ...-terraform-add-plugin_paths-parameter.yaml | 3 - .../2329-hiera-lookup-plugin-return-type.yaml | 2 - .../fragments/2340-jenkins_plugin-py2.yml | 2 - .../fragments/2349-jira-bugfix-b64decode.yml | 2 - ...620-consul_io-env-variables-conf-based.yml | 5 -- .../fragments/719-manageiq-resource_id.yml | 2 - .../fragments/720-cloudforms_inventory.yml | 2 - ...-invocate-feature-when-variable-is-set.yml | 2 - .../948-dellemc-migration-removal.yml | 13 ---- .../fragments/CVE-2021-20191_no_log.yml | 4 -- changelogs/fragments/allow_funcd_to_load.yml | 2 - changelogs/fragments/dict-filter.yml | 3 - .../fragments/meta-runtime-deprecations.yml | 2 - changelogs/fragments/no_log-fixes.yml | 25 ------- .../fragments/path_join-shim-filter.yml | 3 - .../fragments/remove-deprecated-features.yml | 16 ----- .../fragments/remove-deprecated-modules.yml | 66 ------------------- changelogs/fragments/selective-core-2.11.yml | 2 - galaxy.yml | 2 +- 141 files changed, 2 insertions(+), 619 deletions(-) delete mode 100644 changelogs/fragments/1475-xfconf-facts.yml delete mode 100644 changelogs/fragments/1478-filesystem-fix-1457-resizefs-idempotency.yml delete mode 100644 changelogs/fragments/1596-xfs_quota-feedback_on_projects_not_initialized_has_changed.yml delete mode 100644 changelogs/fragments/1661-gitlab-deploy-key-update-pubkey.yml delete mode 100644 changelogs/fragments/1691-add-name-and-id-props-to-redfish-inventory-output.yml delete mode 100644 changelogs/fragments/1695-parted-updatedregex.yaml delete mode 100644 changelogs/fragments/1702_homebrew_tap.yml delete mode 100644 changelogs/fragments/1703-sensu_silence-fix_json_parsing.yml delete mode 100644 changelogs/fragments/1714-gitlab_runner-required-reg-token.yml delete mode 100644 changelogs/fragments/1715-proxmox_kvm-add-vmid-to-returns.yml delete mode 100644 changelogs/fragments/1721-fix-nomad_job_info-no-jobs-failure.yml delete mode 100644 changelogs/fragments/1722_timezone.yml delete mode 100644 changelogs/fragments/1723-datadog_monitor-add-missing-monitor-types.yml delete mode 100644 changelogs/fragments/1724-various-fixes-for-updating-existing-gitlab-user.yml delete mode 100644 changelogs/fragments/1735-imc-sessions.yml delete mode 100644 changelogs/fragments/1740-aerospike_migration.yml delete mode 100644 changelogs/fragments/1741-use-path-argspec.yml delete mode 100644 changelogs/fragments/1744-case-insensitive-hostname-fqdn-matching.yml delete mode 100644 changelogs/fragments/1753-document-fstypes-supported-by-resizefs.yml delete mode 100644 changelogs/fragments/1761-redfish-tidy-up-validation.yml delete mode 100644 changelogs/fragments/1765-proxmox-params.yml delete mode 100644 changelogs/fragments/1766-zfs-fixed-sanity.yml delete mode 100644 changelogs/fragments/1771-centurylink-validation-elements.yml delete mode 100644 changelogs/fragments/1776-git_config-tilde_value.yml delete mode 100644 changelogs/fragments/1783-proxmox-kvm-fix-args-500-error.yaml delete mode 100644 changelogs/fragments/1788-ease-nios_host_record-dns-bypass-check.yml delete mode 100644 changelogs/fragments/1795-list-elements-batch1.yml delete mode 100644 changelogs/fragments/1813-lxd_profile-merge-profiles.yml delete mode 100644 changelogs/fragments/1814-dnsimple-add-support-for-caa-records.yml delete mode 100644 changelogs/fragments/1819-tidyup-pylint-blacklistnames.yml delete mode 100644 changelogs/fragments/1830-valmod_docmissingtype_batch1.yml delete mode 100644 changelogs/fragments/1833-zfs-creation-only-properties.yaml delete mode 100644 changelogs/fragments/1838-runit-deprecate-param-dist.yml delete mode 100644 changelogs/fragments/1847-proxmox-kvm-fix-status.yml delete mode 100644 changelogs/fragments/1852-deploy-helper-fix-state-is-clean-without-release.yaml delete mode 100644 changelogs/fragments/1861-python3-keys.yml delete mode 100644 changelogs/fragments/1867-modhelper-cmdmixin-dict-params.yml delete mode 100644 changelogs/fragments/1871-infoblox-inventory.yml delete mode 100644 changelogs/fragments/1880-fix_cobbler_system_ssl.yml delete mode 100644 changelogs/fragments/1882-fix-nmcli-ensure-slave-type-for-bond-slave.yml delete mode 100644 changelogs/fragments/1885-sanity-check-fixes-batch3.yml delete mode 100644 changelogs/fragments/1894-feat-nmcli-add-method4-and-method6.yml delete mode 100644 changelogs/fragments/1895-proxmox-kvm-fix-issue-1875.yml delete mode 100644 changelogs/fragments/1912-yum_versionlock-lock_unlock_concurrently.yml delete mode 100644 changelogs/fragments/1914-add-sanitization-to-url.yml delete mode 100644 changelogs/fragments/1916-add-version-sort-filter.yml delete mode 100644 changelogs/fragments/1927-removed-parameter-invalid.yml delete mode 100644 changelogs/fragments/1928-bigpanda-message.yml delete mode 100644 changelogs/fragments/1929-grove-message.yml delete mode 100644 changelogs/fragments/1949-proxmox-inventory-tags.yml delete mode 100644 changelogs/fragments/1970-valmod-batch7.yml delete mode 100644 changelogs/fragments/1972-ini_file-empty-str-value.yml delete mode 100644 changelogs/fragments/1977-jenkinsjob-validate-certs.yml delete mode 100644 changelogs/fragments/1978-jira-transition-logic.yml delete mode 100644 changelogs/fragments/1991-proxmox-inventory-fix-template-in-pool.yml delete mode 100644 changelogs/fragments/1993-haproxy-fix-draining.yml delete mode 100644 changelogs/fragments/1999-proxmox-fix-issue-1955.yml delete mode 100644 changelogs/fragments/2000-proxmox_kvm-tag-support.yml delete mode 100644 changelogs/fragments/2001-no_log-false.yml delete mode 100644 changelogs/fragments/2006-valmod-batch8.yml delete mode 100644 changelogs/fragments/2008-update-java-cert-replace-cert-when-changed.yml delete mode 100644 changelogs/fragments/2013-proxmox-purge-parameter.yml delete mode 100644 changelogs/fragments/2014-allow-root-for-kibana-plugin.yaml delete mode 100644 changelogs/fragments/2020-remove-unused-param-in-rax.yml delete mode 100644 changelogs/fragments/2024-module-helper-fixes.yml delete mode 100644 changelogs/fragments/2027-add-redfish-session-create-delete-authenticate.yml delete mode 100644 changelogs/fragments/2031-ipa_sudorule_add_runasextusers.yml delete mode 100644 changelogs/fragments/2032-one_image-pyone.yml delete mode 100644 changelogs/fragments/2036-scaleway-inventory.yml delete mode 100644 changelogs/fragments/2037-add-from-csv-filter.yml delete mode 100644 changelogs/fragments/2040-fix-index-error-in-redfish-set-manager-nic.yml delete mode 100644 changelogs/fragments/2057-nios-devel.yml delete mode 100644 changelogs/fragments/2061-archive-refactor1.yml delete mode 100644 changelogs/fragments/2065-snmp-facts-timeout.yml delete mode 100644 changelogs/fragments/2072-stacki-host-params-fallback.yml delete mode 100644 changelogs/fragments/2094-bugfix-respect-PATH-env-variable-in-zypper-modules.yaml delete mode 100644 changelogs/fragments/2110-vdo-add_force_option.yaml delete mode 100644 changelogs/fragments/2116-add-fields-to-ipa-config-module.yml delete mode 100644 changelogs/fragments/2125-git-config-scope-file.yml delete mode 100644 changelogs/fragments/2135-vmadm-resolvers-type-fix.yml delete mode 100644 changelogs/fragments/2139-dimensiondata_network-str-format.yml delete mode 100644 changelogs/fragments/2142-apache2_mod_proxy-cleanup.yml delete mode 100644 changelogs/fragments/2143-kibana_plugin-fixed-function-calls.yml delete mode 100644 changelogs/fragments/2144-atomic_get_bin_path.yml delete mode 100644 changelogs/fragments/2146-npm-add_no_bin_links_option.yaml delete mode 100644 changelogs/fragments/2148-proxmox-inventory-agent-interfaces.yml delete mode 100644 changelogs/fragments/2157-unreachable-code.yml delete mode 100644 changelogs/fragments/2159-ipa-user-sshpubkey-multi-word-comments.yaml delete mode 100644 changelogs/fragments/2160-list-literals.yml delete mode 100644 changelogs/fragments/2161-pkgutil-list-extend.yml delete mode 100644 changelogs/fragments/2162-modhelper-variables.yml delete mode 100644 changelogs/fragments/2162-proxmox-constructable.yml delete mode 100644 changelogs/fragments/2163-java_keystore_1667_improve_temp_files_storage.yml delete mode 100644 changelogs/fragments/2174-ipa-user-userauthtype-multiselect.yml delete mode 100644 changelogs/fragments/2177-java_keystore_1668_dont_expose_secrets_on_cmdline.yml delete mode 100644 changelogs/fragments/2183-java_keystore_improve_error_handling.yml delete mode 100644 changelogs/fragments/2185-xfconf-absent-check-mode.yml delete mode 100644 changelogs/fragments/2188-xfconf-modhelper-variables.yml delete mode 100644 changelogs/fragments/2192-add-jira-attach.yml delete mode 100644 changelogs/fragments/2203-modhelper-cause-changes-deco.yml delete mode 100644 changelogs/fragments/2204-github_repo-fix-baseurl_port.yml delete mode 100644 changelogs/fragments/2208-jira-revamp.yml delete mode 100644 changelogs/fragments/2218-cpanm-revamp.yml delete mode 100644 changelogs/fragments/2220_nmcli_wifi_support.yaml delete mode 100644 changelogs/fragments/2223_nmcli_no_IP_config_on_slave.yaml delete mode 100644 changelogs/fragments/2224_nmcli_allow_MAC_overwrite.yaml delete mode 100644 changelogs/fragments/2230-java_keystore-1669-ssl-input-files-by-path.yml delete mode 100644 changelogs/fragments/2236-jira-isinstance.yml delete mode 100644 changelogs/fragments/2244-hashids-filters.yml delete mode 100644 changelogs/fragments/2245-proxmox_fix_agent_string_handling.yml delete mode 100644 changelogs/fragments/2246-terraform.yaml delete mode 100644 changelogs/fragments/2249-linode_v4-support-private_ip-option.yaml delete mode 100644 changelogs/fragments/2250-allow-keycloak-modules-to-take-token-as-param.yml delete mode 100644 changelogs/fragments/2257-ldap_entry-params.yml delete mode 100644 changelogs/fragments/2259-proxmox-multi-nic-and-unsupported.yml delete mode 100644 changelogs/fragments/2262-java_keystore-passphrase.yml delete mode 100644 changelogs/fragments/2267-lvol_size_addition-subtraction_support.yaml delete mode 100644 changelogs/fragments/2268-validation-univetion.yml delete mode 100644 changelogs/fragments/2280-pids-new-pattern-option.yml delete mode 100644 changelogs/fragments/2282-nmap-fix-cache-support.yml delete mode 100644 changelogs/fragments/2284-influxdb_retention_policy-idempotence.yml delete mode 100644 changelogs/fragments/2308-terraform-add-plugin_paths-parameter.yaml delete mode 100644 changelogs/fragments/2329-hiera-lookup-plugin-return-type.yaml delete mode 100644 changelogs/fragments/2340-jenkins_plugin-py2.yml delete mode 100644 changelogs/fragments/2349-jira-bugfix-b64decode.yml delete mode 100644 changelogs/fragments/620-consul_io-env-variables-conf-based.yml delete mode 100644 changelogs/fragments/719-manageiq-resource_id.yml delete mode 100644 changelogs/fragments/720-cloudforms_inventory.yml delete mode 100644 changelogs/fragments/816-only-invocate-feature-when-variable-is-set.yml delete mode 100644 changelogs/fragments/948-dellemc-migration-removal.yml delete mode 100644 changelogs/fragments/CVE-2021-20191_no_log.yml delete mode 100644 changelogs/fragments/allow_funcd_to_load.yml delete mode 100644 changelogs/fragments/dict-filter.yml delete mode 100644 changelogs/fragments/meta-runtime-deprecations.yml delete mode 100644 changelogs/fragments/no_log-fixes.yml delete mode 100644 changelogs/fragments/path_join-shim-filter.yml delete mode 100644 changelogs/fragments/remove-deprecated-features.yml delete mode 100644 changelogs/fragments/remove-deprecated-modules.yml delete mode 100644 changelogs/fragments/selective-core-2.11.yml diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml index e78468a3ca..114b6d6b29 100644 --- a/changelogs/changelog.yaml +++ b/changelogs/changelog.yaml @@ -1,2 +1,2 @@ -ancestor: 2.0.0 +ancestor: 3.0.0 releases: {} diff --git a/changelogs/fragments/1475-xfconf-facts.yml b/changelogs/fragments/1475-xfconf-facts.yml deleted file mode 100644 index cffc6f023e..0000000000 --- a/changelogs/fragments/1475-xfconf-facts.yml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: - - xfconf - added option ``disable_facts`` to disable facts and its associated deprecation warning (https://github.com/ansible-collections/community.general/issues/1475). -deprecated_features: - - xfconf - returning output as facts is deprecated, this will be removed in community.general 4.0.0. Please register the task output in a variable and use it instead. You can already switch to the new behavior now by using the new ``disable_facts`` option (https://github.com/ansible-collections/community.general/pull/1747). diff --git a/changelogs/fragments/1478-filesystem-fix-1457-resizefs-idempotency.yml b/changelogs/fragments/1478-filesystem-fix-1457-resizefs-idempotency.yml deleted file mode 100644 index a90444308e..0000000000 --- a/changelogs/fragments/1478-filesystem-fix-1457-resizefs-idempotency.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -bugfixes: - - filesystem - do not fail when ``resizefs=yes`` and ``fstype=xfs`` if there is nothing to do, even if - the filesystem is not mounted. This only covers systems supporting access to unmounted XFS filesystems. - Others will still fail (https://github.com/ansible-collections/community.general/issues/1457, https://github.com/ansible-collections/community.general/pull/1478). diff --git a/changelogs/fragments/1596-xfs_quota-feedback_on_projects_not_initialized_has_changed.yml b/changelogs/fragments/1596-xfs_quota-feedback_on_projects_not_initialized_has_changed.yml deleted file mode 100644 index ba75a86a62..0000000000 --- a/changelogs/fragments/1596-xfs_quota-feedback_on_projects_not_initialized_has_changed.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - xfs_quota - the feedback for initializing project quota using xfs_quota binary from ``xfsprogs`` has changed since the version it was written for (https://github.com/ansible-collections/community.general/pull/1596). diff --git a/changelogs/fragments/1661-gitlab-deploy-key-update-pubkey.yml b/changelogs/fragments/1661-gitlab-deploy-key-update-pubkey.yml deleted file mode 100644 index f6edfc6f53..0000000000 --- a/changelogs/fragments/1661-gitlab-deploy-key-update-pubkey.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -minor_changes: - - gitlab_deploy_key - when the given key title already exists but has a different public key, the public key will now be updated to given value (https://github.com/ansible-collections/community.general/pull/1661). -breaking_changes: - - gitlab_deploy_key - if for an already existing key title a different public key was given as parameter nothing happened, now this changed so that the public key is updated to the new value (https://github.com/ansible-collections/community.general/pull/1661). diff --git a/changelogs/fragments/1691-add-name-and-id-props-to-redfish-inventory-output.yml b/changelogs/fragments/1691-add-name-and-id-props-to-redfish-inventory-output.yml deleted file mode 100644 index 1cf8897018..0000000000 --- a/changelogs/fragments/1691-add-name-and-id-props-to-redfish-inventory-output.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - redfish_info module, redfish_utils module utils - add ``Name`` and ``Id`` properties to output of Redfish inventory commands (https://github.com/ansible-collections/community.general/issues/1650). diff --git a/changelogs/fragments/1695-parted-updatedregex.yaml b/changelogs/fragments/1695-parted-updatedregex.yaml deleted file mode 100644 index fb3a5a5eaa..0000000000 --- a/changelogs/fragments/1695-parted-updatedregex.yaml +++ /dev/null @@ -1,4 +0,0 @@ -bugfixes: - - parted - change the regex that decodes the partition size to better support different formats that parted uses. - Change the regex that validates parted's version string - (https://github.com/ansible-collections/community.general/pull/1695). diff --git a/changelogs/fragments/1702_homebrew_tap.yml b/changelogs/fragments/1702_homebrew_tap.yml deleted file mode 100644 index 7eabc45a9b..0000000000 --- a/changelogs/fragments/1702_homebrew_tap.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- homebrew_tap - add support to specify search path for ``brew`` executable (https://github.com/ansible-collections/community.general/issues/1702). diff --git a/changelogs/fragments/1703-sensu_silence-fix_json_parsing.yml b/changelogs/fragments/1703-sensu_silence-fix_json_parsing.yml deleted file mode 100644 index 18d39b5674..0000000000 --- a/changelogs/fragments/1703-sensu_silence-fix_json_parsing.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - sensu-silence module - fix json parsing of sensu API responses on Python 3.5 (https://github.com/ansible-collections/community.general/pull/1703). diff --git a/changelogs/fragments/1714-gitlab_runner-required-reg-token.yml b/changelogs/fragments/1714-gitlab_runner-required-reg-token.yml deleted file mode 100644 index ec73bf422c..0000000000 --- a/changelogs/fragments/1714-gitlab_runner-required-reg-token.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - gitlab_runner - parameter ``registration_token`` was required but is used only when ``state`` is ``present`` (https://github.com/ansible-collections/community.general/issues/1714). diff --git a/changelogs/fragments/1715-proxmox_kvm-add-vmid-to-returns.yml b/changelogs/fragments/1715-proxmox_kvm-add-vmid-to-returns.yml deleted file mode 100644 index b4561f5145..0000000000 --- a/changelogs/fragments/1715-proxmox_kvm-add-vmid-to-returns.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - proxmox_kvm module - actually implemented ``vmid`` and ``status`` return values. Updated documentation to reflect current situation (https://github.com/ansible-collections/community.general/issues/1410, https://github.com/ansible-collections/community.general/pull/1715). diff --git a/changelogs/fragments/1721-fix-nomad_job_info-no-jobs-failure.yml b/changelogs/fragments/1721-fix-nomad_job_info-no-jobs-failure.yml deleted file mode 100644 index c3c3d804e3..0000000000 --- a/changelogs/fragments/1721-fix-nomad_job_info-no-jobs-failure.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - nomad_job_info - fix module failure when nomad client returns no jobs (https://github.com/ansible-collections/community.general/pull/1721). diff --git a/changelogs/fragments/1722_timezone.yml b/changelogs/fragments/1722_timezone.yml deleted file mode 100644 index cae337effd..0000000000 --- a/changelogs/fragments/1722_timezone.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- timezone - add Gentoo and Alpine Linux support (https://github.com/ansible-collections/community.general/issues/781). diff --git a/changelogs/fragments/1723-datadog_monitor-add-missing-monitor-types.yml b/changelogs/fragments/1723-datadog_monitor-add-missing-monitor-types.yml deleted file mode 100644 index 8b01717897..0000000000 --- a/changelogs/fragments/1723-datadog_monitor-add-missing-monitor-types.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - datadog_monitor - add missing monitor types ``query alert``, ``trace-analytics alert``, ``rum alert`` (https://github.com/ansible-collections/community.general/pull/1723). diff --git a/changelogs/fragments/1724-various-fixes-for-updating-existing-gitlab-user.yml b/changelogs/fragments/1724-various-fixes-for-updating-existing-gitlab-user.yml deleted file mode 100644 index eab67e0f47..0000000000 --- a/changelogs/fragments/1724-various-fixes-for-updating-existing-gitlab-user.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - gitlab_user - make updates to the ``isadmin``, ``password`` and ``confirm`` options of an already existing GitLab user work (https://github.com/ansible-collections/community.general/pull/1724). diff --git a/changelogs/fragments/1735-imc-sessions.yml b/changelogs/fragments/1735-imc-sessions.yml deleted file mode 100644 index 057393d06c..0000000000 --- a/changelogs/fragments/1735-imc-sessions.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - imc_rest - explicitly logging out instead of registering the call in ```atexit``` (https://github.com/ansible-collections/community.general/issues/1735). diff --git a/changelogs/fragments/1740-aerospike_migration.yml b/changelogs/fragments/1740-aerospike_migration.yml deleted file mode 100644 index e66963aae7..0000000000 --- a/changelogs/fragments/1740-aerospike_migration.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- "aerospike_migration - fix typo that caused ``migrate_tx_key`` instead of ``migrate_rx_key`` being used (https://github.com/ansible-collections/community.general/pull/1739)." diff --git a/changelogs/fragments/1741-use-path-argspec.yml b/changelogs/fragments/1741-use-path-argspec.yml deleted file mode 100644 index ed05fee16a..0000000000 --- a/changelogs/fragments/1741-use-path-argspec.yml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: -- "oci_vcn - ``api_user_key_file`` is now of type ``path`` and no longer ``str``. A side effect is that certain expansions are made, like ``~`` is replaced by the user's home directory, and environment variables like ``$HOME`` or ``$TEMP`` are evaluated (https://github.com/ansible-collections/community.general/pull/1741)." -- "lxd_container - ``client_key`` and ``client_cert`` are now of type ``path`` and no longer ``str``. A side effect is that certain expansions are made, like ``~`` is replaced by the user's home directory, and environment variables like ``$HOME`` or ``$TEMP`` are evaluated (https://github.com/ansible-collections/community.general/pull/1741)." -- "lxd_profile - ``client_key`` and ``client_cert`` are now of type ``path`` and no longer ``str``. A side effect is that certain expansions are made, like ``~`` is replaced by the user's home directory, and environment variables like ``$HOME`` or ``$TEMP`` are evaluated (https://github.com/ansible-collections/community.general/pull/1741)." diff --git a/changelogs/fragments/1744-case-insensitive-hostname-fqdn-matching.yml b/changelogs/fragments/1744-case-insensitive-hostname-fqdn-matching.yml deleted file mode 100644 index 0e9c086b96..0000000000 --- a/changelogs/fragments/1744-case-insensitive-hostname-fqdn-matching.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - redfish_config - case insensitive search for situations where the hostname/FQDN case on iLO doesn't match variable's case (https://github.com/ansible-collections/community.general/pull/1744). diff --git a/changelogs/fragments/1753-document-fstypes-supported-by-resizefs.yml b/changelogs/fragments/1753-document-fstypes-supported-by-resizefs.yml deleted file mode 100644 index 9b1329412c..0000000000 --- a/changelogs/fragments/1753-document-fstypes-supported-by-resizefs.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - filesystem - remove ``swap`` from list of FS supported by ``resizefs=yes`` (https://github.com/ansible-collections/community.general/issues/790). diff --git a/changelogs/fragments/1761-redfish-tidy-up-validation.yml b/changelogs/fragments/1761-redfish-tidy-up-validation.yml deleted file mode 100644 index 751c7ca30d..0000000000 --- a/changelogs/fragments/1761-redfish-tidy-up-validation.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - redfish modules - explicitly setting lists' elements to ``str`` (https://github.com/ansible-collections/community.general/pull/1761). diff --git a/changelogs/fragments/1765-proxmox-params.yml b/changelogs/fragments/1765-proxmox-params.yml deleted file mode 100644 index fd6d63c788..0000000000 --- a/changelogs/fragments/1765-proxmox-params.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - proxmox* modules - refactored some parameter validation code into use of ``env_fallback``, ``required_if``, ``required_together``, ``required_one_of`` (https://github.com/ansible-collections/community.general/pull/1765). diff --git a/changelogs/fragments/1766-zfs-fixed-sanity.yml b/changelogs/fragments/1766-zfs-fixed-sanity.yml deleted file mode 100644 index ac31084e2c..0000000000 --- a/changelogs/fragments/1766-zfs-fixed-sanity.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - zfs_delegate_admin - the elements of ``users``, ``groups`` and ``permissions`` are now enforced to be strings (https://github.com/ansible-collections/community.general/pull/1766). diff --git a/changelogs/fragments/1771-centurylink-validation-elements.yml b/changelogs/fragments/1771-centurylink-validation-elements.yml deleted file mode 100644 index 4c7a9bbbe4..0000000000 --- a/changelogs/fragments/1771-centurylink-validation-elements.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - clc_* modules - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1771). diff --git a/changelogs/fragments/1776-git_config-tilde_value.yml b/changelogs/fragments/1776-git_config-tilde_value.yml deleted file mode 100644 index c98912a24d..0000000000 --- a/changelogs/fragments/1776-git_config-tilde_value.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - git_config - prevent ``run_command`` from expanding values (https://github.com/ansible-collections/community.general/issues/1776). diff --git a/changelogs/fragments/1783-proxmox-kvm-fix-args-500-error.yaml b/changelogs/fragments/1783-proxmox-kvm-fix-args-500-error.yaml deleted file mode 100644 index 5e46b066a8..0000000000 --- a/changelogs/fragments/1783-proxmox-kvm-fix-args-500-error.yaml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - proxmox_kvm - do not add ``args`` if ``proxmox_default_behavior`` is set to no_defaults (https://github.com/ansible-collections/community.general/issues/1641). - - proxmox_kvm - stop implicitly adding ``force`` equal to ``false``. Proxmox API requires not implemented parameters otherwise, and assumes ``force`` to be ``false`` by default anyways (https://github.com/ansible-collections/community.general/pull/1783). diff --git a/changelogs/fragments/1788-ease-nios_host_record-dns-bypass-check.yml b/changelogs/fragments/1788-ease-nios_host_record-dns-bypass-check.yml deleted file mode 100644 index 6b1a43cc25..0000000000 --- a/changelogs/fragments/1788-ease-nios_host_record-dns-bypass-check.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - nios_host_record - allow DNS Bypass for views other than default (https://github.com/ansible-collections/community.general/issues/1786). diff --git a/changelogs/fragments/1795-list-elements-batch1.yml b/changelogs/fragments/1795-list-elements-batch1.yml deleted file mode 100644 index 9b057c7712..0000000000 --- a/changelogs/fragments/1795-list-elements-batch1.yml +++ /dev/null @@ -1,27 +0,0 @@ -minor_changes: - - plugins/module_utils/oracle/oci_utils.py - elements of list parameter ``key_by`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - lxd_container - elements of list parameter ``profiles`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - packet_device - elements of list parameters ``device_ids``, ``hostnames`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - pubnub_blocks - elements of list parameters ``event_handlers`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - vmadm - elements of list parameters ``disks``, ``nics``, ``resolvers``, ``filesystems`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - sl_vm - elements of list parameters ``disks``, ``ssh_keys`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - xml - elements of list parameters ``add_children``, ``set_children`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - keycloak_client - elements of list parameters ``default_roles``, ``redirect_uris``, ``web_origins`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - onepassword_info - elements of list parameters ``search_terms`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - librato_annotation - elements of list parameters ``links`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - pagerduty - elements of list parameters ``service`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - statusio_maintenance - elements of list parameters ``components``, ``containers`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - dnsimple - elements of list parameters ``record_ids`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - nsupdate - elements of list parameters ``value`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - omapi_host - elements of list parameters ``statements`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - mail - elements of list parameters ``to``, ``cc``, ``bcc``, ``attach``, ``headers`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - nexmo - elements of list parameters ``dest`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - rocketchat - elements of list parameters ``attachments`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - sendgrid - elements of list parameters ``to_addresses``, ``cc``, ``bcc``, ``attachments`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - slack - elements of list parameters ``attachments`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - twilio - elements of list parameters ``to_numbers`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - redhat_subscription - elements of list parameters ``pool_ids``, ``addons`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - gitlab_runner - elements of list parameters ``tag_list`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - na_ontap_gather_facts - elements of list parameters ``gather_subset`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). -bugfixes: - - redhat_subscription - ``mutually_exclusive`` was referring to parameter alias instead of name (https://github.com/ansible-collections/community.general/pull/1795). diff --git a/changelogs/fragments/1813-lxd_profile-merge-profiles.yml b/changelogs/fragments/1813-lxd_profile-merge-profiles.yml deleted file mode 100644 index d374347a5e..0000000000 --- a/changelogs/fragments/1813-lxd_profile-merge-profiles.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- lxd_profile - added ``merge_profile`` parameter to merge configurations from the play to an existing profile (https://github.com/ansible-collections/community.general/pull/1813). diff --git a/changelogs/fragments/1814-dnsimple-add-support-for-caa-records.yml b/changelogs/fragments/1814-dnsimple-add-support-for-caa-records.yml deleted file mode 100644 index bc4915b7b9..0000000000 --- a/changelogs/fragments/1814-dnsimple-add-support-for-caa-records.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - dnsimple - add CAA records to the whitelist of valid record types (https://github.com/ansible-collections/community.general/pull/1814). diff --git a/changelogs/fragments/1819-tidyup-pylint-blacklistnames.yml b/changelogs/fragments/1819-tidyup-pylint-blacklistnames.yml deleted file mode 100644 index fdbc850528..0000000000 --- a/changelogs/fragments/1819-tidyup-pylint-blacklistnames.yml +++ /dev/null @@ -1,17 +0,0 @@ -bugfixes: - - "alternatives - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." - - "beadm - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." - - "cronvar - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." - - "dconf - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." - - "filesystem - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." - - "hipchat - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." - - "interfaces_file - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." - - "java_cert - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." - - "lvg - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." - - "lvol - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." - - "lxc - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." - - "lxc_container - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." - - "parted - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." - - "rundeck_acl_policy - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." - - "statusio_maintenance - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." - - "timezone - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." diff --git a/changelogs/fragments/1830-valmod_docmissingtype_batch1.yml b/changelogs/fragments/1830-valmod_docmissingtype_batch1.yml deleted file mode 100644 index 83a27f7e77..0000000000 --- a/changelogs/fragments/1830-valmod_docmissingtype_batch1.yml +++ /dev/null @@ -1,7 +0,0 @@ -bugfixes: - - kibana_plugin - ``state`` parameter choices must use ``list()`` in python3 (https://github.com/ansible-collections/community.general/pull/1830). - - elasticsearch_plugin - ``state`` parameter choices must use ``list()`` in python3 (https://github.com/ansible-collections/community.general/pull/1830). - - riak - parameters ``wait_for_handoffs`` and ``wait_for_ring`` are ``int`` but the default value was ``false`` (https://github.com/ansible-collections/community.general/pull/1830). - - logstash_plugin - wrapped ``dict.keys()`` with ``list`` for use in ``choices`` setting (https://github.com/ansible-collections/community.general/pull/1830). - - iso_extract - use proper alias deprecation mechanism for ``thirsty`` alias of ``force`` (https://github.com/ansible-collections/community.general/pull/1830). - - runit - removed unused code, and passing command as ``list`` instead of ``str`` to ``run_command()`` (https://github.com/ansible-collections/community.general/pull/1830). diff --git a/changelogs/fragments/1833-zfs-creation-only-properties.yaml b/changelogs/fragments/1833-zfs-creation-only-properties.yaml deleted file mode 100644 index deb972a6d2..0000000000 --- a/changelogs/fragments/1833-zfs-creation-only-properties.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - zfs - some ZFS properties could be passed when the dataset/volume did not exist, but would fail if the dataset already existed, even if the property matched what was specified in the ansible task (https://github.com/ansible-collections/community.general/issues/868, https://github.com/ansible-collections/community.general/pull/1833). diff --git a/changelogs/fragments/1838-runit-deprecate-param-dist.yml b/changelogs/fragments/1838-runit-deprecate-param-dist.yml deleted file mode 100644 index 5d133c074e..0000000000 --- a/changelogs/fragments/1838-runit-deprecate-param-dist.yml +++ /dev/null @@ -1,2 +0,0 @@ -deprecated_features: - - runit - unused parameter ``dist`` marked for deprecation (https://github.com/ansible-collections/community.general/pull/1830). diff --git a/changelogs/fragments/1847-proxmox-kvm-fix-status.yml b/changelogs/fragments/1847-proxmox-kvm-fix-status.yml deleted file mode 100644 index 0863f1bed2..0000000000 --- a/changelogs/fragments/1847-proxmox-kvm-fix-status.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - proxmox_kvm - fix undefined local variable ``status`` when the parameter ``state`` is either ``stopped``, ``started``, ``restarted`` or ``absent`` (https://github.com/ansible-collections/community.general/pull/1847). diff --git a/changelogs/fragments/1852-deploy-helper-fix-state-is-clean-without-release.yaml b/changelogs/fragments/1852-deploy-helper-fix-state-is-clean-without-release.yaml deleted file mode 100644 index 0946a4f38f..0000000000 --- a/changelogs/fragments/1852-deploy-helper-fix-state-is-clean-without-release.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - deploy_helper - allow ``state=clean`` to be used without defining a ``release`` (https://github.com/ansible-collections/community.general/issues/1852). \ No newline at end of file diff --git a/changelogs/fragments/1861-python3-keys.yml b/changelogs/fragments/1861-python3-keys.yml deleted file mode 100644 index 029ed93575..0000000000 --- a/changelogs/fragments/1861-python3-keys.yml +++ /dev/null @@ -1,22 +0,0 @@ -bugfixes: - - redis cache plugin - wrapped usages of ``keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - memcached cache plugin - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - diy callback plugin - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - selective callback plugin - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - chef_databag lookup plugin - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - net_tools.nios.api module_utils - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - utm_utils module_utils - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - lxc_container - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - lxd_container - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - oneandone_monitoring_policy - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - oci_vcn - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - spotinst_aws_elastigroup - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - sensu_check - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - redhat_subscription - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - idrac_redfish_command - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - idrac_redfish_config - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - idrac_redfish_info - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - redfish_command - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - redfish_config - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - vdo - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - nsot inventory script - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). diff --git a/changelogs/fragments/1867-modhelper-cmdmixin-dict-params.yml b/changelogs/fragments/1867-modhelper-cmdmixin-dict-params.yml deleted file mode 100644 index 3f757b233a..0000000000 --- a/changelogs/fragments/1867-modhelper-cmdmixin-dict-params.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - module_helper module utils - ``CmdMixin.run_command()`` now accepts ``dict`` command arguments, providing the parameter and its value (https://github.com/ansible-collections/community.general/pull/1867). diff --git a/changelogs/fragments/1871-infoblox-inventory.yml b/changelogs/fragments/1871-infoblox-inventory.yml deleted file mode 100644 index d49d176f1b..0000000000 --- a/changelogs/fragments/1871-infoblox-inventory.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- "infoblox inventory script - make sure that the script also works with Ansible 2.9, and returns a more helpful error when community.general is not installed as part of Ansible 2.10/3 (https://github.com/ansible-collections/community.general/pull/1871)." diff --git a/changelogs/fragments/1880-fix_cobbler_system_ssl.yml b/changelogs/fragments/1880-fix_cobbler_system_ssl.yml deleted file mode 100644 index 849f703130..0000000000 --- a/changelogs/fragments/1880-fix_cobbler_system_ssl.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - cobbler_sync, cobbler_system - fix SSL/TLS certificate check when ``validate_certs`` set to ``false`` (https://github.com/ansible-collections/community.general/pull/1880). diff --git a/changelogs/fragments/1882-fix-nmcli-ensure-slave-type-for-bond-slave.yml b/changelogs/fragments/1882-fix-nmcli-ensure-slave-type-for-bond-slave.yml deleted file mode 100644 index 47569b6a24..0000000000 --- a/changelogs/fragments/1882-fix-nmcli-ensure-slave-type-for-bond-slave.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - nmcli - ensure the ``slave-type`` option is passed to ``nmcli`` for type ``bond-slave`` (https://github.com/ansible-collections/community.general/pull/1882). diff --git a/changelogs/fragments/1885-sanity-check-fixes-batch3.yml b/changelogs/fragments/1885-sanity-check-fixes-batch3.yml deleted file mode 100644 index bf819a6e21..0000000000 --- a/changelogs/fragments/1885-sanity-check-fixes-batch3.yml +++ /dev/null @@ -1,18 +0,0 @@ -minor_changes: - - oneandone_firewall_policy - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). - - oneandone_load_balancer - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). - - oneandone_monitoring_policy - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). - - oneandone_private_network - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). - - oneandone_server - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). - - profitbricks - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). - - profitbricks_volume - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). - - webfaction_domain - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). - - webfaction_site - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). - - consul - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). - - consul_acl - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). - - consul_session - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). - - datadog_monitor - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). - - sensu_check - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). - - sensu_client - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). - - sensu_handler - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). - - bundler - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). diff --git a/changelogs/fragments/1894-feat-nmcli-add-method4-and-method6.yml b/changelogs/fragments/1894-feat-nmcli-add-method4-and-method6.yml deleted file mode 100644 index 05daac483c..0000000000 --- a/changelogs/fragments/1894-feat-nmcli-add-method4-and-method6.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - nmcli - add ``method4`` and ``method6`` options (https://github.com/ansible-collections/community.general/pull/1894). diff --git a/changelogs/fragments/1895-proxmox-kvm-fix-issue-1875.yml b/changelogs/fragments/1895-proxmox-kvm-fix-issue-1875.yml deleted file mode 100644 index 73d908cfa8..0000000000 --- a/changelogs/fragments/1895-proxmox-kvm-fix-issue-1875.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - proxmox_kvm - fix parameter ``vmid`` passed twice to ``exit_json`` while creating a virtual machine without cloning (https://github.com/ansible-collections/community.general/issues/1875, https://github.com/ansible-collections/community.general/pull/1895). diff --git a/changelogs/fragments/1912-yum_versionlock-lock_unlock_concurrently.yml b/changelogs/fragments/1912-yum_versionlock-lock_unlock_concurrently.yml deleted file mode 100644 index 36f40da0fe..0000000000 --- a/changelogs/fragments/1912-yum_versionlock-lock_unlock_concurrently.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - yum_versionlock - Do the lock/unlock concurrently to speed up (https://github.com/ansible-collections/community.general/pull/1912). diff --git a/changelogs/fragments/1914-add-sanitization-to-url.yml b/changelogs/fragments/1914-add-sanitization-to-url.yml deleted file mode 100644 index 3b41bcb7af..0000000000 --- a/changelogs/fragments/1914-add-sanitization-to-url.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - proxmox inventory - added handling of extra trailing slashes in the URL (https://github.com/ansible-collections/community.general/pull/1914). diff --git a/changelogs/fragments/1916-add-version-sort-filter.yml b/changelogs/fragments/1916-add-version-sort-filter.yml deleted file mode 100644 index a06b464e55..0000000000 --- a/changelogs/fragments/1916-add-version-sort-filter.yml +++ /dev/null @@ -1,3 +0,0 @@ -add plugin.filter: - - name: version_sort - description: Sort a list according to version order instead of pure alphabetical one diff --git a/changelogs/fragments/1927-removed-parameter-invalid.yml b/changelogs/fragments/1927-removed-parameter-invalid.yml deleted file mode 100644 index 6dbc2e187b..0000000000 --- a/changelogs/fragments/1927-removed-parameter-invalid.yml +++ /dev/null @@ -1,12 +0,0 @@ -deprecated_features: - - composer - deprecated invalid parameter aliases ``working-dir``, ``global-command``, ``prefer-source``, ``prefer-dist``, ``no-dev``, ``no-scripts``, ``no-plugins``, ``optimize-autoloader``, ``classmap-authoritative``, ``apcu-autoloader``, ``ignore-platform-reqs``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927). - - apt_rpm - deprecated invalid parameter alias ``update-cache``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927). - - homebrew - deprecated invalid parameter alias ``update-brew``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927). - - homebrew_cask - deprecated invalid parameter alias ``update-brew``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927). - - opkg - deprecated invalid parameter alias ``update-cache``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927). - - pacman - deprecated invalid parameter alias ``update-cache``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927). - - slackpkg - deprecated invalid parameter alias ``update-cache``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927). - - urmpi - deprecated invalid parameter aliases ``update-cache`` and ``no-recommends``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927). - - xbps - deprecated invalid parameter alias ``update-cache``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927). - - github_deploy_key - deprecated invalid parameter alias ``2fa_token``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927). - - puppet - deprecated undocumented parameter ``show_diff``, will be removed in 7.0.0. (https://github.com/ansible-collections/community.general/pull/1927). diff --git a/changelogs/fragments/1928-bigpanda-message.yml b/changelogs/fragments/1928-bigpanda-message.yml deleted file mode 100644 index 081b51cc0f..0000000000 --- a/changelogs/fragments/1928-bigpanda-message.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- "bigpanda - actually use the ``deployment_message`` option (https://github.com/ansible-collections/community.general/pull/1928)." diff --git a/changelogs/fragments/1929-grove-message.yml b/changelogs/fragments/1929-grove-message.yml deleted file mode 100644 index 402aa24639..0000000000 --- a/changelogs/fragments/1929-grove-message.yml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: -- "grove - the option ``message`` has been renamed to ``message_content``. The old name ``message`` is kept as an alias and will be removed for community.general 4.0.0. This was done because ``message`` is used internally by Ansible (https://github.com/ansible-collections/community.general/pull/1929)." -deprecated_features: -- "grove - the option ``message`` will be removed in community.general 4.0.0. Use the new option ``message_content`` instead (https://github.com/ansible-collections/community.general/pull/1929)." diff --git a/changelogs/fragments/1949-proxmox-inventory-tags.yml b/changelogs/fragments/1949-proxmox-inventory-tags.yml deleted file mode 100644 index 073428c2e6..0000000000 --- a/changelogs/fragments/1949-proxmox-inventory-tags.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -bugfixes: -- proxmox inventory plugin - allowed proxomox tag string to contain commas when returned as fact (https://github.com/ansible-collections/community.general/pull/1949). -minor_changes: -- proxmox inventory plugin - added ``tags_parsed`` fact containing tags parsed as a list (https://github.com/ansible-collections/community.general/pull/1949). diff --git a/changelogs/fragments/1970-valmod-batch7.yml b/changelogs/fragments/1970-valmod-batch7.yml deleted file mode 100644 index cd577d4578..0000000000 --- a/changelogs/fragments/1970-valmod-batch7.yml +++ /dev/null @@ -1,18 +0,0 @@ -minor_changes: - - heroku_collaborator - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). - - linode_v4 - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). - - one_host - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). - - one_image_info - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). - - one_vm - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). - - scaleway_compute - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). - - scaleway_lb - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). - - manageiq_alert_profiles - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). - - manageiq_policies - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). - - manageiq_tags - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). - - oneview_datacenter_info - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). - - oneview_enclosure_info - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). - - oneview_ethernet_network_info - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). - - oneview_network_set_info - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). -bugfixes: - - manageiq_provider - wrapped ``dict.keys()`` with ``list`` for use in ``choices`` setting (https://github.com/ansible-collections/community.general/pull/1970). - - packet_volume_attachment - removed extraneous ``print`` call - old debug? (https://github.com/ansible-collections/community.general/pull/1970). diff --git a/changelogs/fragments/1972-ini_file-empty-str-value.yml b/changelogs/fragments/1972-ini_file-empty-str-value.yml deleted file mode 100644 index 7beba5ac4c..0000000000 --- a/changelogs/fragments/1972-ini_file-empty-str-value.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - ini_file - allows an empty string as a value for an option (https://github.com/ansible-collections/community.general/pull/1972). diff --git a/changelogs/fragments/1977-jenkinsjob-validate-certs.yml b/changelogs/fragments/1977-jenkinsjob-validate-certs.yml deleted file mode 100644 index b4f7b2f938..0000000000 --- a/changelogs/fragments/1977-jenkinsjob-validate-certs.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - jenkins_job - add a ``validate_certs`` parameter that allows disabling TLS/SSL certificate validation (https://github.com/ansible-collections/community.general/issues/255). diff --git a/changelogs/fragments/1978-jira-transition-logic.yml b/changelogs/fragments/1978-jira-transition-logic.yml deleted file mode 100644 index 12b4adc56d..0000000000 --- a/changelogs/fragments/1978-jira-transition-logic.yml +++ /dev/null @@ -1,4 +0,0 @@ -bugfixes: - - jira - fixed fields' update in ticket transitions (https://github.com/ansible-collections/community.general/issues/818). -minor_changes: - - jira - added parameter ``account_id`` for compatibility with recent versions of JIRA (https://github.com/ansible-collections/community.general/issues/818, https://github.com/ansible-collections/community.general/pull/1978). diff --git a/changelogs/fragments/1991-proxmox-inventory-fix-template-in-pool.yml b/changelogs/fragments/1991-proxmox-inventory-fix-template-in-pool.yml deleted file mode 100644 index 90a438dddf..0000000000 --- a/changelogs/fragments/1991-proxmox-inventory-fix-template-in-pool.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - proxmox inventory - exclude qemu templates from inclusion to the inventory via pools (https://github.com/ansible-collections/community.general/issues/1986, https://github.com/ansible-collections/community.general/pull/1991). diff --git a/changelogs/fragments/1993-haproxy-fix-draining.yml b/changelogs/fragments/1993-haproxy-fix-draining.yml deleted file mode 100644 index fd5c77f573..0000000000 --- a/changelogs/fragments/1993-haproxy-fix-draining.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - haproxy - fix a bug preventing haproxy from properly entering ``DRAIN`` mode (https://github.com/ansible-collections/community.general/issues/1913). diff --git a/changelogs/fragments/1999-proxmox-fix-issue-1955.yml b/changelogs/fragments/1999-proxmox-fix-issue-1955.yml deleted file mode 100644 index 274e70fb0f..0000000000 --- a/changelogs/fragments/1999-proxmox-fix-issue-1955.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: -- proxmox - removed requirement that root password is provided when containter state is ``present`` (https://github.com/ansible-collections/community.general/pull/1999). diff --git a/changelogs/fragments/2000-proxmox_kvm-tag-support.yml b/changelogs/fragments/2000-proxmox_kvm-tag-support.yml deleted file mode 100644 index d4084ecd67..0000000000 --- a/changelogs/fragments/2000-proxmox_kvm-tag-support.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- proxmox_kvm - added new module parameter ``tags`` for use with PVE 6+ (https://github.com/ansible-collections/community.general/pull/2000). diff --git a/changelogs/fragments/2001-no_log-false.yml b/changelogs/fragments/2001-no_log-false.yml deleted file mode 100644 index 82d9ba0bb0..0000000000 --- a/changelogs/fragments/2001-no_log-false.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- "Mark various module options with ``no_log=False`` which have a name that potentially could leak secrets, but which do not (https://github.com/ansible-collections/community.general/pull/2001)." diff --git a/changelogs/fragments/2006-valmod-batch8.yml b/changelogs/fragments/2006-valmod-batch8.yml deleted file mode 100644 index 30be5e16b2..0000000000 --- a/changelogs/fragments/2006-valmod-batch8.yml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: - - rax - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/2006). - - rax_cdb_user - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/2006). - - rax_scaling_group - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/2006). diff --git a/changelogs/fragments/2008-update-java-cert-replace-cert-when-changed.yml b/changelogs/fragments/2008-update-java-cert-replace-cert-when-changed.yml deleted file mode 100644 index 8cfda91016..0000000000 --- a/changelogs/fragments/2008-update-java-cert-replace-cert-when-changed.yml +++ /dev/null @@ -1,7 +0,0 @@ -minor_changes: - - "java_cert - change ``state: present`` to check certificates by hash, not just alias name (https://github.com/ansible/ansible/issues/43249)." -bugfixes: - - "java_cert - allow setting ``state: absent`` by providing just the ``cert_alias`` (https://github.com/ansible/ansible/issues/27982)." - - "java_cert - properly handle proxy arguments when the scheme is provided (https://github.com/ansible/ansible/issues/54481)." -security_fixes: - - "java_cert - remove password from ``run_command`` arguments (https://github.com/ansible-collections/community.general/pull/2008)." diff --git a/changelogs/fragments/2013-proxmox-purge-parameter.yml b/changelogs/fragments/2013-proxmox-purge-parameter.yml deleted file mode 100644 index 6c681e5a19..0000000000 --- a/changelogs/fragments/2013-proxmox-purge-parameter.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- proxmox - added ``purge`` module parameter for use when deleting lxc's with HA options (https://github.com/ansible-collections/community.general/pull/2013). diff --git a/changelogs/fragments/2014-allow-root-for-kibana-plugin.yaml b/changelogs/fragments/2014-allow-root-for-kibana-plugin.yaml deleted file mode 100644 index 6420203888..0000000000 --- a/changelogs/fragments/2014-allow-root-for-kibana-plugin.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - kibana_plugin - add parameter for passing ``--allow-root`` flag to kibana and kibana-plugin commands (https://github.com/ansible-collections/community.general/pull/2014). diff --git a/changelogs/fragments/2020-remove-unused-param-in-rax.yml b/changelogs/fragments/2020-remove-unused-param-in-rax.yml deleted file mode 100644 index 333548f0b9..0000000000 --- a/changelogs/fragments/2020-remove-unused-param-in-rax.yml +++ /dev/null @@ -1,2 +0,0 @@ -removed_features: - - rax - unused parameter ``service`` removed (https://github.com/ansible-collections/community.general/pull/2020). diff --git a/changelogs/fragments/2024-module-helper-fixes.yml b/changelogs/fragments/2024-module-helper-fixes.yml deleted file mode 100644 index 3ce3cc71dc..0000000000 --- a/changelogs/fragments/2024-module-helper-fixes.yml +++ /dev/null @@ -1,4 +0,0 @@ -bugfixes: - - module_helper module utils - actually ignoring formatting of parameters with value ``None`` (https://github.com/ansible-collections/community.general/pull/2024). - - module_helper module utils - handling ``ModuleHelperException`` now properly calls ``fail_json()`` (https://github.com/ansible-collections/community.general/pull/2024). - - module_helper module utils - use the command name as-is in ``CmdMixin`` if it fails ``get_bin_path()`` - allowing full path names to be passed (https://github.com/ansible-collections/community.general/pull/2024). diff --git a/changelogs/fragments/2027-add-redfish-session-create-delete-authenticate.yml b/changelogs/fragments/2027-add-redfish-session-create-delete-authenticate.yml deleted file mode 100644 index b5c22b9502..0000000000 --- a/changelogs/fragments/2027-add-redfish-session-create-delete-authenticate.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - redfish_* modules, redfish_utils module utils - add support for Redfish session create, delete, and authenticate (https://github.com/ansible-collections/community.general/issues/1975). diff --git a/changelogs/fragments/2031-ipa_sudorule_add_runasextusers.yml b/changelogs/fragments/2031-ipa_sudorule_add_runasextusers.yml deleted file mode 100644 index 9e70a16d80..0000000000 --- a/changelogs/fragments/2031-ipa_sudorule_add_runasextusers.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- ipa_sudorule - add support for setting sudo runasuser (https://github.com/ansible-collections/community.general/pull/2031). diff --git a/changelogs/fragments/2032-one_image-pyone.yml b/changelogs/fragments/2032-one_image-pyone.yml deleted file mode 100644 index 4975cb73ad..0000000000 --- a/changelogs/fragments/2032-one_image-pyone.yml +++ /dev/null @@ -1,2 +0,0 @@ -breaking_changes: - - one_image - use pyone instead of python-oca (https://github.com/ansible-collections/community.general/pull/2032). diff --git a/changelogs/fragments/2036-scaleway-inventory.yml b/changelogs/fragments/2036-scaleway-inventory.yml deleted file mode 100644 index 44161306ac..0000000000 --- a/changelogs/fragments/2036-scaleway-inventory.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - scaleway inventory plugin - fix pagination on scaleway inventory plugin (https://github.com/ansible-collections/community.general/pull/2036). diff --git a/changelogs/fragments/2037-add-from-csv-filter.yml b/changelogs/fragments/2037-add-from-csv-filter.yml deleted file mode 100644 index d99c4cd0a8..0000000000 --- a/changelogs/fragments/2037-add-from-csv-filter.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -add plugin.filter: - - name: from_csv - description: Converts CSV text input into list of dicts -minor_changes: - - csv module utils - new module_utils for shared functions between ``from_csv`` filter and ``read_csv`` module (https://github.com/ansible-collections/community.general/pull/2037). - - read_csv - refactored read_csv module to use shared csv functions from csv module_utils (https://github.com/ansible-collections/community.general/pull/2037). diff --git a/changelogs/fragments/2040-fix-index-error-in-redfish-set-manager-nic.yml b/changelogs/fragments/2040-fix-index-error-in-redfish-set-manager-nic.yml deleted file mode 100644 index 04d9a11101..0000000000 --- a/changelogs/fragments/2040-fix-index-error-in-redfish-set-manager-nic.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - redfish_config module, redfish_utils module utils - fix IndexError in ``SetManagerNic`` command (https://github.com/ansible-collections/community.general/issues/1692). diff --git a/changelogs/fragments/2057-nios-devel.yml b/changelogs/fragments/2057-nios-devel.yml deleted file mode 100644 index be9f8a970f..0000000000 --- a/changelogs/fragments/2057-nios-devel.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- "nios* modules - fix modules to work with ansible-core 2.11 (https://github.com/ansible-collections/community.general/pull/2057)." diff --git a/changelogs/fragments/2061-archive-refactor1.yml b/changelogs/fragments/2061-archive-refactor1.yml deleted file mode 100644 index a7189a2f59..0000000000 --- a/changelogs/fragments/2061-archive-refactor1.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - archive - refactored some reused code out into a couple of functions (https://github.com/ansible-collections/community.general/pull/2061). diff --git a/changelogs/fragments/2065-snmp-facts-timeout.yml b/changelogs/fragments/2065-snmp-facts-timeout.yml deleted file mode 100644 index 0e6a4e54fa..0000000000 --- a/changelogs/fragments/2065-snmp-facts-timeout.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - snmp_facts - added parameters ``timeout`` and ``retries`` to module (https://github.com/ansible-collections/community.general/issues/980). diff --git a/changelogs/fragments/2072-stacki-host-params-fallback.yml b/changelogs/fragments/2072-stacki-host-params-fallback.yml deleted file mode 100644 index f586a6eb0c..0000000000 --- a/changelogs/fragments/2072-stacki-host-params-fallback.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - stacki_host - replaced ``default`` to environment variables with ``fallback`` to them (https://github.com/ansible-collections/community.general/pull/2072). diff --git a/changelogs/fragments/2094-bugfix-respect-PATH-env-variable-in-zypper-modules.yaml b/changelogs/fragments/2094-bugfix-respect-PATH-env-variable-in-zypper-modules.yaml deleted file mode 100644 index e0addce2fc..0000000000 --- a/changelogs/fragments/2094-bugfix-respect-PATH-env-variable-in-zypper-modules.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - zypper, zypper_repository - respect ``PATH`` environment variable when resolving zypper executable path (https://github.com/ansible-collections/community.general/pull/2094). diff --git a/changelogs/fragments/2110-vdo-add_force_option.yaml b/changelogs/fragments/2110-vdo-add_force_option.yaml deleted file mode 100644 index 9e93a919a2..0000000000 --- a/changelogs/fragments/2110-vdo-add_force_option.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - vdo - add ``force`` option (https://github.com/ansible-collections/community.general/issues/2101). diff --git a/changelogs/fragments/2116-add-fields-to-ipa-config-module.yml b/changelogs/fragments/2116-add-fields-to-ipa-config-module.yml deleted file mode 100644 index d1e1dc3180..0000000000 --- a/changelogs/fragments/2116-add-fields-to-ipa-config-module.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ipa_config - add new options ``ipaconfigstring``, ``ipadefaultprimarygroup``, ``ipagroupsearchfields``, ``ipahomesrootdir``, ``ipabrkauthzdata``, ``ipamaxusernamelength``, ``ipapwdexpadvnotify``, ``ipasearchrecordslimit``, ``ipasearchtimelimit``, ``ipauserauthtype``, and ``ipausersearchfields`` (https://github.com/ansible-collections/community.general/pull/2116). diff --git a/changelogs/fragments/2125-git-config-scope-file.yml b/changelogs/fragments/2125-git-config-scope-file.yml deleted file mode 100644 index 75862e0333..0000000000 --- a/changelogs/fragments/2125-git-config-scope-file.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - git_config - fixed scope ``file`` behaviour and added integraton test for it (https://github.com/ansible-collections/community.general/issues/2117). diff --git a/changelogs/fragments/2135-vmadm-resolvers-type-fix.yml b/changelogs/fragments/2135-vmadm-resolvers-type-fix.yml deleted file mode 100644 index fcce6e12e1..0000000000 --- a/changelogs/fragments/2135-vmadm-resolvers-type-fix.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - vmadm - correct type of list elements in ``resolvers`` parameter (https://github.com/ansible-collections/community.general/issues/2135). diff --git a/changelogs/fragments/2139-dimensiondata_network-str-format.yml b/changelogs/fragments/2139-dimensiondata_network-str-format.yml deleted file mode 100644 index 115b04f045..0000000000 --- a/changelogs/fragments/2139-dimensiondata_network-str-format.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - dimensiondata_network - bug when formatting message, instead of % a simple comma was used (https://github.com/ansible-collections/community.general/pull/2139). diff --git a/changelogs/fragments/2142-apache2_mod_proxy-cleanup.yml b/changelogs/fragments/2142-apache2_mod_proxy-cleanup.yml deleted file mode 100644 index 6a24f1afc3..0000000000 --- a/changelogs/fragments/2142-apache2_mod_proxy-cleanup.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - apache2_mod_proxy - refactored/cleaned-up part of the code (https://github.com/ansible-collections/community.general/pull/2142). diff --git a/changelogs/fragments/2143-kibana_plugin-fixed-function-calls.yml b/changelogs/fragments/2143-kibana_plugin-fixed-function-calls.yml deleted file mode 100644 index 54a41cd237..0000000000 --- a/changelogs/fragments/2143-kibana_plugin-fixed-function-calls.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - kibana_plugin - added missing parameters to ``remove_plugin`` when using ``state=present force=true``, and fix potential quoting errors when invoking ``kibana`` (https://github.com/ansible-collections/community.general/pull/2143). diff --git a/changelogs/fragments/2144-atomic_get_bin_path.yml b/changelogs/fragments/2144-atomic_get_bin_path.yml deleted file mode 100644 index eeb55114d2..0000000000 --- a/changelogs/fragments/2144-atomic_get_bin_path.yml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: - - atomic_container - using ``get_bin_path()`` before calling ``run_command()`` (https://github.com/ansible-collections/community.general/pull/2144). - - atomic_host - using ``get_bin_path()`` before calling ``run_command()`` (https://github.com/ansible-collections/community.general/pull/2144). - - atomic_image - using ``get_bin_path()`` before calling ``run_command()`` (https://github.com/ansible-collections/community.general/pull/2144). diff --git a/changelogs/fragments/2146-npm-add_no_bin_links_option.yaml b/changelogs/fragments/2146-npm-add_no_bin_links_option.yaml deleted file mode 100644 index 651af80186..0000000000 --- a/changelogs/fragments/2146-npm-add_no_bin_links_option.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - npm - add ``no_bin_links`` option (https://github.com/ansible-collections/community.general/issues/2128). diff --git a/changelogs/fragments/2148-proxmox-inventory-agent-interfaces.yml b/changelogs/fragments/2148-proxmox-inventory-agent-interfaces.yml deleted file mode 100644 index 0ef97f20ed..0000000000 --- a/changelogs/fragments/2148-proxmox-inventory-agent-interfaces.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- proxmox inventory plugin - added ``proxmox_agent_interfaces`` fact describing network interfaces returned from a QEMU guest agent (https://github.com/ansible-collections/community.general/pull/2148). diff --git a/changelogs/fragments/2157-unreachable-code.yml b/changelogs/fragments/2157-unreachable-code.yml deleted file mode 100644 index 7cb84b4db9..0000000000 --- a/changelogs/fragments/2157-unreachable-code.yml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: - - rhevm - removed unreachable code (https://github.com/ansible-collections/community.general/pull/2157). - - ovh_ip_failover - removed unreachable code (https://github.com/ansible-collections/community.general/pull/2157). - - bitbucket_pipeline_variable - removed unreachable code (https://github.com/ansible-collections/community.general/pull/2157). diff --git a/changelogs/fragments/2159-ipa-user-sshpubkey-multi-word-comments.yaml b/changelogs/fragments/2159-ipa-user-sshpubkey-multi-word-comments.yaml deleted file mode 100644 index 10547bb71b..0000000000 --- a/changelogs/fragments/2159-ipa-user-sshpubkey-multi-word-comments.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - ipa_user - allow ``sshpubkey`` to permit multiple word comments (https://github.com/ansible-collections/community.general/pull/2159). diff --git a/changelogs/fragments/2160-list-literals.yml b/changelogs/fragments/2160-list-literals.yml deleted file mode 100644 index 661b1e322e..0000000000 --- a/changelogs/fragments/2160-list-literals.yml +++ /dev/null @@ -1,11 +0,0 @@ -minor_changes: - - hiera lookup - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160). - - known_hosts module utils - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160). - - nictagadm - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160). - - smartos_image_info - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160). - - xattr - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160). - - ipwcli_dns - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160). - - svr4pkg - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160). - - zfs_facts - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160). - - zpool_facts - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160). - - beadm - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160). diff --git a/changelogs/fragments/2161-pkgutil-list-extend.yml b/changelogs/fragments/2161-pkgutil-list-extend.yml deleted file mode 100644 index 9af970afd8..0000000000 --- a/changelogs/fragments/2161-pkgutil-list-extend.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - pkgutil - fixed calls to ``list.extend()`` (https://github.com/ansible-collections/community.general/pull/2161). diff --git a/changelogs/fragments/2162-modhelper-variables.yml b/changelogs/fragments/2162-modhelper-variables.yml deleted file mode 100644 index 68b0edc37e..0000000000 --- a/changelogs/fragments/2162-modhelper-variables.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - module_helper module utils - added mechanism to manage variables, providing automatic output of variables, change status and diff information (https://github.com/ansible-collections/community.general/pull/2162). diff --git a/changelogs/fragments/2162-proxmox-constructable.yml b/changelogs/fragments/2162-proxmox-constructable.yml deleted file mode 100644 index dfcb1e3495..0000000000 --- a/changelogs/fragments/2162-proxmox-constructable.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- proxmox inventory plugin - added ``Constructable`` class to the inventory to provide options ``strict``, ``keyed_groups``, ``groups``, and ``compose`` (https://github.com/ansible-collections/community.general/pull/2180). diff --git a/changelogs/fragments/2163-java_keystore_1667_improve_temp_files_storage.yml b/changelogs/fragments/2163-java_keystore_1667_improve_temp_files_storage.yml deleted file mode 100644 index 43d183707c..0000000000 --- a/changelogs/fragments/2163-java_keystore_1667_improve_temp_files_storage.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -bugfixes: - - "java_keystore - use tempfile lib to create temporary files with randomized - names, and remove the temporary PKCS#12 keystore as well as other materials - (https://github.com/ansible-collections/community.general/issues/1667)." diff --git a/changelogs/fragments/2174-ipa-user-userauthtype-multiselect.yml b/changelogs/fragments/2174-ipa-user-userauthtype-multiselect.yml deleted file mode 100644 index d162f19b7a..0000000000 --- a/changelogs/fragments/2174-ipa-user-userauthtype-multiselect.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ipa_user - fix ``userauthtype`` option to take in list of strings for the multi-select field instead of single string (https://github.com/ansible-collections/community.general/pull/2174). diff --git a/changelogs/fragments/2177-java_keystore_1668_dont_expose_secrets_on_cmdline.yml b/changelogs/fragments/2177-java_keystore_1668_dont_expose_secrets_on_cmdline.yml deleted file mode 100644 index 0d961a53ac..0000000000 --- a/changelogs/fragments/2177-java_keystore_1668_dont_expose_secrets_on_cmdline.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -security_fixes: - - "java_keystore - pass secret to keytool through an environment variable to not expose it as a - commandline argument (https://github.com/ansible-collections/community.general/issues/1668)." diff --git a/changelogs/fragments/2183-java_keystore_improve_error_handling.yml b/changelogs/fragments/2183-java_keystore_improve_error_handling.yml deleted file mode 100644 index 5d6ceef511..0000000000 --- a/changelogs/fragments/2183-java_keystore_improve_error_handling.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -bugfixes: - - "java_keystore - improve error handling and return ``cmd`` as documented. - Force ``LANG``, ``LC_ALL`` and ``LC_MESSAGES`` environment variables to ``C`` to rely - on ``keytool`` output parsing. Fix pylint's ``unused-variable`` and ``no-else-return`` - hints (https://github.com/ansible-collections/community.general/pull/2183)." diff --git a/changelogs/fragments/2185-xfconf-absent-check-mode.yml b/changelogs/fragments/2185-xfconf-absent-check-mode.yml deleted file mode 100644 index 059f4acd9a..0000000000 --- a/changelogs/fragments/2185-xfconf-absent-check-mode.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - xfconf - module was not honoring check mode when ``state`` was ``absent`` (https://github.com/ansible-collections/community.general/pull/2185). diff --git a/changelogs/fragments/2188-xfconf-modhelper-variables.yml b/changelogs/fragments/2188-xfconf-modhelper-variables.yml deleted file mode 100644 index 19e94254bd..0000000000 --- a/changelogs/fragments/2188-xfconf-modhelper-variables.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - module_helper module utils - added management of facts and adhoc setting of the initial value for variables (https://github.com/ansible-collections/community.general/pull/2188). - - xfconf - changed implementation to use ``ModuleHelper`` new features (https://github.com/ansible-collections/community.general/pull/2188). diff --git a/changelogs/fragments/2192-add-jira-attach.yml b/changelogs/fragments/2192-add-jira-attach.yml deleted file mode 100644 index 5877250541..0000000000 --- a/changelogs/fragments/2192-add-jira-attach.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - jira - added ``attach`` operation, which allows a user to attach a file to an issue (https://github.com/ansible-collections/community.general/pull/2192). diff --git a/changelogs/fragments/2203-modhelper-cause-changes-deco.yml b/changelogs/fragments/2203-modhelper-cause-changes-deco.yml deleted file mode 100644 index b61f97d6b8..0000000000 --- a/changelogs/fragments/2203-modhelper-cause-changes-deco.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - module_helper module utils - fixed decorator ``cause_changes`` (https://github.com/ansible-collections/community.general/pull/2203). diff --git a/changelogs/fragments/2204-github_repo-fix-baseurl_port.yml b/changelogs/fragments/2204-github_repo-fix-baseurl_port.yml deleted file mode 100644 index 0df3bd8ece..0000000000 --- a/changelogs/fragments/2204-github_repo-fix-baseurl_port.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - github_repo - PyGithub bug does not allow explicit port in ``base_url``. Specifying port is not required (https://github.com/PyGithub/PyGithub/issues/1913). diff --git a/changelogs/fragments/2208-jira-revamp.yml b/changelogs/fragments/2208-jira-revamp.yml deleted file mode 100644 index 32f1650aa0..0000000000 --- a/changelogs/fragments/2208-jira-revamp.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - jira - revamped the module as a class using ``ModuleHelper`` (https://github.com/ansible-collections/community.general/pull/2208). diff --git a/changelogs/fragments/2218-cpanm-revamp.yml b/changelogs/fragments/2218-cpanm-revamp.yml deleted file mode 100644 index 668a84f06b..0000000000 --- a/changelogs/fragments/2218-cpanm-revamp.yml +++ /dev/null @@ -1,5 +0,0 @@ -minor_changes: - - cpanm - rewritten using ``ModuleHelper`` (https://github.com/ansible-collections/community.general/pull/2218). - - cpanm - honor and install specified version when running in ``new`` mode; that feature is not available in ``compatibility`` mode (https://github.com/ansible-collections/community.general/issues/208). -deprecated_features: - - cpanm - parameter ``system_lib`` deprecated in favor of using ``become`` (https://github.com/ansible-collections/community.general/pull/2218). diff --git a/changelogs/fragments/2220_nmcli_wifi_support.yaml b/changelogs/fragments/2220_nmcli_wifi_support.yaml deleted file mode 100644 index 224c4dc526..0000000000 --- a/changelogs/fragments/2220_nmcli_wifi_support.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- "nmcli - add ability to connect to a Wifi network and also to attach it to a master (bond) (https://github.com/ansible-collections/community.general/pull/2220)." diff --git a/changelogs/fragments/2223_nmcli_no_IP_config_on_slave.yaml b/changelogs/fragments/2223_nmcli_no_IP_config_on_slave.yaml deleted file mode 100644 index 4d98b62922..0000000000 --- a/changelogs/fragments/2223_nmcli_no_IP_config_on_slave.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- "nmcli - do not set IP configuration on slave connection (https://github.com/ansible-collections/community.general/pull/2223)." diff --git a/changelogs/fragments/2224_nmcli_allow_MAC_overwrite.yaml b/changelogs/fragments/2224_nmcli_allow_MAC_overwrite.yaml deleted file mode 100644 index 98852463d8..0000000000 --- a/changelogs/fragments/2224_nmcli_allow_MAC_overwrite.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- "nmcli - don't restrict the ability to manually set the MAC address to the bridge (https://github.com/ansible-collections/community.general/pull/2224)." diff --git a/changelogs/fragments/2230-java_keystore-1669-ssl-input-files-by-path.yml b/changelogs/fragments/2230-java_keystore-1669-ssl-input-files-by-path.yml deleted file mode 100644 index 0622e93c31..0000000000 --- a/changelogs/fragments/2230-java_keystore-1669-ssl-input-files-by-path.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -minor_changes: - - "java_keystore - add options ``certificate_path`` and ``private_key_path``, - mutually exclusive with ``certificate`` and ``private_key`` respectively, and - targetting files on remote hosts rather than their contents on the controller. - (https://github.com/ansible-collections/community.general/issues/1669)." diff --git a/changelogs/fragments/2236-jira-isinstance.yml b/changelogs/fragments/2236-jira-isinstance.yml deleted file mode 100644 index e80cbacdf9..0000000000 --- a/changelogs/fragments/2236-jira-isinstance.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - jira - fixed calling of ``isinstance`` (https://github.com/ansible-collections/community.general/issues/2234). diff --git a/changelogs/fragments/2244-hashids-filters.yml b/changelogs/fragments/2244-hashids-filters.yml deleted file mode 100644 index 568119e890..0000000000 --- a/changelogs/fragments/2244-hashids-filters.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -add plugin.filter: - - name: hashids_encode - description: Encodes YouTube-like hashes from a sequence of integers - - name: hashids_decode - description: Decodes a sequence of numbers from a YouTube-like hash diff --git a/changelogs/fragments/2245-proxmox_fix_agent_string_handling.yml b/changelogs/fragments/2245-proxmox_fix_agent_string_handling.yml deleted file mode 100644 index 3eae94f4ea..0000000000 --- a/changelogs/fragments/2245-proxmox_fix_agent_string_handling.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - proxmox inventory - added handling of commas in KVM agent configuration string (https://github.com/ansible-collections/community.general/pull/2245). diff --git a/changelogs/fragments/2246-terraform.yaml b/changelogs/fragments/2246-terraform.yaml deleted file mode 100644 index d2dd93e22e..0000000000 --- a/changelogs/fragments/2246-terraform.yaml +++ /dev/null @@ -1,4 +0,0 @@ -bugfixes: - - terraform - fix issue that cause the execution fail because from Terraform 0.15 on, the ``-var`` and ``-var-file`` options are no longer available on ``terraform validate`` (https://github.com/ansible-collections/community.general/pull/2246). - - terraform - fix issue that cause the destroy to fail because from Terraform 0.15 on, the ``terraform destroy -force`` option is replaced with ``terraform destroy -auto-approve`` (https://github.com/ansible-collections/community.general/issues/2247). - - terraform - remove uses of ``use_unsafe_shell=True`` (https://github.com/ansible-collections/community.general/pull/2246). diff --git a/changelogs/fragments/2249-linode_v4-support-private_ip-option.yaml b/changelogs/fragments/2249-linode_v4-support-private_ip-option.yaml deleted file mode 100644 index e5d6ca02d7..0000000000 --- a/changelogs/fragments/2249-linode_v4-support-private_ip-option.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - linode_v4 - add support for ``private_ip`` option (https://github.com/ansible-collections/community.general/pull/2249). diff --git a/changelogs/fragments/2250-allow-keycloak-modules-to-take-token-as-param.yml b/changelogs/fragments/2250-allow-keycloak-modules-to-take-token-as-param.yml deleted file mode 100644 index 5b8deb2a03..0000000000 --- a/changelogs/fragments/2250-allow-keycloak-modules-to-take-token-as-param.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -minor_changes: - - keycloak_* modules - allow the keycloak modules to use a token for the - authentication, the modules can take either a token or the credentials - (https://github.com/ansible-collections/community.general/pull/2250). diff --git a/changelogs/fragments/2257-ldap_entry-params.yml b/changelogs/fragments/2257-ldap_entry-params.yml deleted file mode 100644 index f5c92d0b9c..0000000000 --- a/changelogs/fragments/2257-ldap_entry-params.yml +++ /dev/null @@ -1,2 +0,0 @@ -removed_features: -- "ldap_entry - the ``params`` parameter is now completely removed. Using it already triggered an error since community.general 0.1.2 (https://github.com/ansible-collections/community.general/pull/2257)." diff --git a/changelogs/fragments/2259-proxmox-multi-nic-and-unsupported.yml b/changelogs/fragments/2259-proxmox-multi-nic-and-unsupported.yml deleted file mode 100644 index d8f6f80385..0000000000 --- a/changelogs/fragments/2259-proxmox-multi-nic-and-unsupported.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -bugfixes: - - proxmox inventory plugin - support network interfaces without IP addresses, multiple network interfaces and unsupported/commanddisabled guest error (https://github.com/ansible-collections/community.general/pull/2263). -minor_changes: - - proxmox inventory plugin - allow to select whether ``ansible_host`` should be set for the proxmox nodes (https://github.com/ansible-collections/community.general/pull/2263). diff --git a/changelogs/fragments/2262-java_keystore-passphrase.yml b/changelogs/fragments/2262-java_keystore-passphrase.yml deleted file mode 100644 index 882ada97c3..0000000000 --- a/changelogs/fragments/2262-java_keystore-passphrase.yml +++ /dev/null @@ -1,8 +0,0 @@ -breaking_changes: -- "java_keystore - instead of failing, now overwrites keystore if the alias (name) is changed. - This was originally the intended behavior, but did not work due to a logic error. Make sure - that your playbooks and roles do not depend on the old behavior of failing instead of - overwriting (https://github.com/ansible-collections/community.general/issues/1671)." -- "java_keystore - instead of failing, now overwrites keystore if the passphrase is changed. - Make sure that your playbooks and roles do not depend on the old behavior of failing instead - of overwriting (https://github.com/ansible-collections/community.general/issues/1671)." diff --git a/changelogs/fragments/2267-lvol_size_addition-subtraction_support.yaml b/changelogs/fragments/2267-lvol_size_addition-subtraction_support.yaml deleted file mode 100644 index 25b79f4528..0000000000 --- a/changelogs/fragments/2267-lvol_size_addition-subtraction_support.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -minor_changes: - - lvol - added proper support for ``+-`` options when extending or reducing the logical volume (https://github.com/ansible-collections/community.general/issues/1988). -bugfixes: - - lvol - fixed sizing calculation rounding to match the underlying tools (https://github.com/ansible-collections/community.general/issues/1988). diff --git a/changelogs/fragments/2268-validation-univetion.yml b/changelogs/fragments/2268-validation-univetion.yml deleted file mode 100644 index f245380441..0000000000 --- a/changelogs/fragments/2268-validation-univetion.yml +++ /dev/null @@ -1,4 +0,0 @@ -bugfixes: - - udm_dns_record - fixed default value of parameter ``data`` to match its type (https://github.com/ansible-collections/community.general/pull/2268). -minor_changes: - - udm_dns_zone - elements of list parameters ``nameserver``, ``interfaces``, and ``mx`` are now validated (https://github.com/ansible-collections/community.general/pull/2268). diff --git a/changelogs/fragments/2280-pids-new-pattern-option.yml b/changelogs/fragments/2280-pids-new-pattern-option.yml deleted file mode 100644 index fb9f07e744..0000000000 --- a/changelogs/fragments/2280-pids-new-pattern-option.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- pids - new options ``pattern`` and `ignore_case`` for retrieving PIDs of processes matching a supplied pattern (https://github.com/ansible-collections/community.general/pull/2280). diff --git a/changelogs/fragments/2282-nmap-fix-cache-support.yml b/changelogs/fragments/2282-nmap-fix-cache-support.yml deleted file mode 100644 index 62b026eb25..0000000000 --- a/changelogs/fragments/2282-nmap-fix-cache-support.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - nmap inventory plugin - fix cache and constructed group support (https://github.com/ansible-collections/community.general/issues/2242). diff --git a/changelogs/fragments/2284-influxdb_retention_policy-idempotence.yml b/changelogs/fragments/2284-influxdb_retention_policy-idempotence.yml deleted file mode 100644 index 0df25ca462..0000000000 --- a/changelogs/fragments/2284-influxdb_retention_policy-idempotence.yml +++ /dev/null @@ -1,4 +0,0 @@ -bugfixes: - - influxdb_retention_policy - ensure idempotent module execution with different - duration and shard duration parameter values - (https://github.com/ansible-collections/community.general/issues/2281). diff --git a/changelogs/fragments/2308-terraform-add-plugin_paths-parameter.yaml b/changelogs/fragments/2308-terraform-add-plugin_paths-parameter.yaml deleted file mode 100644 index ec389b270c..0000000000 --- a/changelogs/fragments/2308-terraform-add-plugin_paths-parameter.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - terraform - add ``plugin_paths`` parameter which allows disabling Terraform from performing plugin discovery and auto-download (https://github.com/ansible-collections/community.general/pull/2308). diff --git a/changelogs/fragments/2329-hiera-lookup-plugin-return-type.yaml b/changelogs/fragments/2329-hiera-lookup-plugin-return-type.yaml deleted file mode 100644 index 4cced727a2..0000000000 --- a/changelogs/fragments/2329-hiera-lookup-plugin-return-type.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - hiera lookup plugin - converts the return type of plugin to unicode string (https://github.com/ansible-collections/community.general/pull/2329). diff --git a/changelogs/fragments/2340-jenkins_plugin-py2.yml b/changelogs/fragments/2340-jenkins_plugin-py2.yml deleted file mode 100644 index f3bcdbd361..0000000000 --- a/changelogs/fragments/2340-jenkins_plugin-py2.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- "jenkins_plugin - fixes Python 2 compatibility issue (https://github.com/ansible-collections/community.general/pull/2340)." \ No newline at end of file diff --git a/changelogs/fragments/2349-jira-bugfix-b64decode.yml b/changelogs/fragments/2349-jira-bugfix-b64decode.yml deleted file mode 100644 index 41a1dabb94..0000000000 --- a/changelogs/fragments/2349-jira-bugfix-b64decode.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - jira - fixed error when loading base64-encoded content as attachment (https://github.com/ansible-collections/community.general/pull/2349). diff --git a/changelogs/fragments/620-consul_io-env-variables-conf-based.yml b/changelogs/fragments/620-consul_io-env-variables-conf-based.yml deleted file mode 100644 index e3378428c5..0000000000 --- a/changelogs/fragments/620-consul_io-env-variables-conf-based.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -bugfixes: - - consul_io inventory script - kv_groups - fix byte chain decoding for Python 3 (https://github.com/ansible-collections/community.general/pull/620). -minor_changes: - - consul_io inventory script - conf options - allow custom configuration options via env variables (https://github.com/ansible-collections/community.general/pull/620). diff --git a/changelogs/fragments/719-manageiq-resource_id.yml b/changelogs/fragments/719-manageiq-resource_id.yml deleted file mode 100644 index bbeef5ff82..0000000000 --- a/changelogs/fragments/719-manageiq-resource_id.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - manageiq_tags and manageiq_policies - added new parameter ``resource_id``. This parameter can be used instead of parameter ``resource_name`` (https://github.com/ansible-collections/community.general/pull/719). \ No newline at end of file diff --git a/changelogs/fragments/720-cloudforms_inventory.yml b/changelogs/fragments/720-cloudforms_inventory.yml deleted file mode 100644 index f5675205d1..0000000000 --- a/changelogs/fragments/720-cloudforms_inventory.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - cloudforms inventory - fixed issue that non-existing (archived) VMs were synced (https://github.com/ansible-collections/community.general/pull/720). diff --git a/changelogs/fragments/816-only-invocate-feature-when-variable-is-set.yml b/changelogs/fragments/816-only-invocate-feature-when-variable-is-set.yml deleted file mode 100644 index 7d48c77298..0000000000 --- a/changelogs/fragments/816-only-invocate-feature-when-variable-is-set.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - proxmox lxc - only add the features flag when module parameter ``features`` is set. Before an empty string was send to proxmox in case the parameter was not used, which required to use ``root@pam`` for module execution (https://github.com/ansible-collections/community.general/pull/1763). diff --git a/changelogs/fragments/948-dellemc-migration-removal.yml b/changelogs/fragments/948-dellemc-migration-removal.yml deleted file mode 100644 index c4f64a815f..0000000000 --- a/changelogs/fragments/948-dellemc-migration-removal.yml +++ /dev/null @@ -1,13 +0,0 @@ -removed_features: - - | - The ``ome_device_info``, ``idrac_firmware`` and ``idrac_server_config_profile`` modules have now been migrated from community.general to the `dellemc.openmanage `_ Ansible collection. - If you use ansible-base 2.10 or newer, redirections have been provided. - - If you use Ansible 2.9 and installed this collection, you need to adjust the FQCNs (``community.general.idrac_firmware`` → ``dellemc.openmanage.idrac_firmware``) and make sure to install the dellemc.openmanage collection. -breaking_changes: - - | - If you use Ansible 2.9 and these plugins or modules from this collection, community.general 3.0.0 results in errors when trying to use the DellEMC content by FQCN, like ``community.general.idrac_firmware``. - Since Ansible 2.9 is not able to use redirections, you will have to adjust your playbooks and roles manually to use the new FQCNs (``dellemc.openmanage.idrac_firmware`` for the previous example) and to make sure that you have ``dellemc.openmanage`` installed. - - If you use ansible-base 2.10 or newer and did not install Ansible 4.0.0, but installed (and/or upgraded) community.general manually, you need to make sure to also install the ``dellemc.openmanage`` collection if you are using any of these plugins or modules. - While ansible-base 2.10 or newer can use the redirects that community.general 3.0.0 adds, the collection they point to (such as dellemc.openmanage) must be installed for them to work. diff --git a/changelogs/fragments/CVE-2021-20191_no_log.yml b/changelogs/fragments/CVE-2021-20191_no_log.yml deleted file mode 100644 index a2c8740598..0000000000 --- a/changelogs/fragments/CVE-2021-20191_no_log.yml +++ /dev/null @@ -1,4 +0,0 @@ -security_fixes: - - module_utils/_netapp, na_ontap_gather_facts - enabled ``no_log`` for the options ``api_key`` and ``secret_key`` to prevent accidental disclosure (CVE-2021-20191, https://github.com/ansible-collections/community.general/pull/1725). - - module_utils/identity/keycloak, keycloak_client, keycloak_clienttemplate, keycloak_group - enabled ``no_log`` for the option ``auth_client_secret`` to prevent accidental disclosure (CVE-2021-20191, https://github.com/ansible-collections/community.general/pull/1725). - - utm_proxy_auth_profile - enabled ``no_log`` for the option ``frontend_cookie_secret`` to prevent accidental disclosure (CVE-2021-20191, https://github.com/ansible-collections/community.general/pull/1725). diff --git a/changelogs/fragments/allow_funcd_to_load.yml b/changelogs/fragments/allow_funcd_to_load.yml deleted file mode 100644 index 3336b0aaf4..0000000000 --- a/changelogs/fragments/allow_funcd_to_load.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - funcd connection plugin - can now load (https://github.com/ansible-collections/community.general/pull/2235). diff --git a/changelogs/fragments/dict-filter.yml b/changelogs/fragments/dict-filter.yml deleted file mode 100644 index 1e9923e796..0000000000 --- a/changelogs/fragments/dict-filter.yml +++ /dev/null @@ -1,3 +0,0 @@ -add plugin.filter: - - name: dict - description: "The ``dict`` function as a filter: converts a list of tuples to a dictionary" diff --git a/changelogs/fragments/meta-runtime-deprecations.yml b/changelogs/fragments/meta-runtime-deprecations.yml deleted file mode 100644 index 8863f346af..0000000000 --- a/changelogs/fragments/meta-runtime-deprecations.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- "meta/runtime.yml - improve deprecation messages (https://github.com/ansible-collections/community.general/pull/1918)." diff --git a/changelogs/fragments/no_log-fixes.yml b/changelogs/fragments/no_log-fixes.yml deleted file mode 100644 index 70afd3229d..0000000000 --- a/changelogs/fragments/no_log-fixes.yml +++ /dev/null @@ -1,25 +0,0 @@ -security_fixes: - - "ovirt - mark the ``instance_rootpw`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "oneandone_firewall_policy, oneandone_load_balancer, oneandone_monitoring_policy, oneandone_private_network, oneandone_public_ip - mark the ``auth_token`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "rax_clb_ssl - mark the ``private_key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "spotinst_aws_elastigroup - mark the ``multai_token`` and ``token`` parameters as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "keycloak_client - mark the ``registration_access_token`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "librato_annotation - mark the ``api_key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "pagerduty_alert - mark the ``api_key``, ``service_key`` and ``integration_key`` parameters as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "nios_nsgroup - mark the ``tsig_key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "pulp_repo - mark the ``feed_client_key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "gitlab_runner - mark the ``registration_token`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "ibm_sa_host - mark the ``iscsi_chap_secret`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "keycloak_* modules - mark the ``auth_client_secret`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "hwc_ecs_instance - mark the ``admin_pass`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "ovirt - mark the ``instance_key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "pagerduty_change - mark the ``integration_key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "pingdom - mark the ``key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "rollbar_deployment - mark the ``token`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "stackdriver - mark the ``key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "dnsmadeeasy - mark the ``account_key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "logentries_msg - mark the ``token`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "redfish_command - mark the ``update_creds.password`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "utm_proxy_auth_profile - mark the ``frontend_cookie_secret`` parameter as ``no_log`` to avoid leakage of secrets. This causes the ``utm_proxy_auth_profile`` return value to no longer containing the correct value, but a placeholder (https://github.com/ansible-collections/community.general/pull/1736)." -breaking_changes: - - "utm_proxy_auth_profile - the ``frontend_cookie_secret`` return value now contains a placeholder string instead of the module's ``frontend_cookie_secret`` parameter (https://github.com/ansible-collections/community.general/pull/1736)." diff --git a/changelogs/fragments/path_join-shim-filter.yml b/changelogs/fragments/path_join-shim-filter.yml deleted file mode 100644 index f96922203f..0000000000 --- a/changelogs/fragments/path_join-shim-filter.yml +++ /dev/null @@ -1,3 +0,0 @@ -add plugin.filter: - - name: path_join - description: Redirects to ansible.builtin.path_join for ansible-base 2.10 or newer, and provides a compatible implementation for Ansible 2.9 diff --git a/changelogs/fragments/remove-deprecated-features.yml b/changelogs/fragments/remove-deprecated-features.yml deleted file mode 100644 index e728ce62d3..0000000000 --- a/changelogs/fragments/remove-deprecated-features.yml +++ /dev/null @@ -1,16 +0,0 @@ -removed_features: -- "airbrake_deployment - removed deprecated ``token`` parameter. Use ``project_id`` and ``project_key`` instead (https://github.com/ansible-collections/community.general/pull/1926)." -- "bigpanda - the alias ``message`` has been removed. Use ``deployment_message`` instead (https://github.com/ansible-collections/community.general/pull/1926)." -- "cisco_spark, cisco_webex - the alias ``message`` has been removed. Use ``msg`` instead (https://github.com/ansible-collections/community.general/pull/1926)." -- "clc_aa_policy - the ``wait`` parameter has been removed. It did not have any effect (https://github.com/ansible-collections/community.general/pull/1926)." -- "datadog_monitor - the alias ``message`` has been removed. Use ``notification_message`` instead (https://github.com/ansible-collections/community.general/pull/1926)." -- "django_manage - the parameter ``liveserver`` has been removed (https://github.com/ansible-collections/community.general/pull/1926)." -- "idrac_redfish_config - the parameters ``manager_attribute_name`` and ``manager_attribute_value`` have been removed. Use ``manager_attributes`` instead (https://github.com/ansible-collections/community.general/pull/1926)." -- "iso_extract - the alias ``thirsty`` has been removed. Use ``force`` instead (https://github.com/ansible-collections/community.general/pull/1926)." -- "redfish_config - the parameters ``bios_attribute_name`` and ``bios_attribute_value`` have been removed. Use ``bios_attributes`` instead (https://github.com/ansible-collections/community.general/pull/1926)." -- "syspatch - the ``apply`` parameter has been removed. This is the default mode, so simply removing it will not change the behavior (https://github.com/ansible-collections/community.general/pull/1926)." -- "xbps - the ``force`` parameter has been removed. It did not have any effect (https://github.com/ansible-collections/community.general/pull/1926)." -- "redfish modules - issuing a data modification command without specifying the ID of the target System, Chassis or Manager resource when there is more than one is no longer allowed. Use the ``resource_id`` option to specify the target ID (https://github.com/ansible-collections/community.general/pull/1926)." -- "pulp_repo - the alias ``ca_cert`` has been removed. Use ``feed_ca_cert`` instead (https://github.com/ansible-collections/community.general/pull/1926)." -- "pulp_repo - the ``feed_client_cert`` parameter no longer defaults to the value of the ``client_cert`` parameter (https://github.com/ansible-collections/community.general/pull/1926)." -- "pulp_repo - the ``feed_client_key`` parameter no longer defaults to the value of the ``client_key`` parameter (https://github.com/ansible-collections/community.general/pull/1926)." diff --git a/changelogs/fragments/remove-deprecated-modules.yml b/changelogs/fragments/remove-deprecated-modules.yml deleted file mode 100644 index fa9d9c9eb7..0000000000 --- a/changelogs/fragments/remove-deprecated-modules.yml +++ /dev/null @@ -1,66 +0,0 @@ -removed_features: -- "The deprecated ali_instance_facts module has been removed. Use ali_instance_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated hpilo_facts module has been removed. Use hpilo_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated idrac_redfish_facts module has been removed. Use idrac_redfish_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated jenkins_job_facts module has been removed. Use jenkins_job_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated memset_memstore_facts module has been removed. Use memset_memstore_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated memset_server_facts module has been removed. Use memset_server_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated na_ontap_gather_facts module has been removed. Use netapp.ontap.na_ontap_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated nginx_status_facts module has been removed. Use nginx_status_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated one_image_facts module has been removed. Use one_image_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated onepassword_facts module has been removed. Use onepassword_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated oneview_datacenter_facts module has been removed. Use oneview_datacenter_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated oneview_enclosure_facts module has been removed. Use oneview_enclosure_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated oneview_ethernet_network_facts module has been removed. Use oneview_ethernet_network_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated oneview_fc_network_facts module has been removed. Use oneview_fc_network_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated oneview_fcoe_network_facts module has been removed. Use oneview_fcoe_network_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated oneview_logical_interconnect_group_facts module has been removed. Use oneview_logical_interconnect_group_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated oneview_network_set_facts module has been removed. Use oneview_network_set_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated oneview_san_manager_facts module has been removed. Use oneview_san_manager_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated online_server_facts module has been removed. Use online_server_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated online_user_facts module has been removed. Use online_user_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated purefa_facts module has been removed. Use purestorage.flasharray.purefa_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated purefb_facts module has been removed. Use purestorage.flasharray.purefb_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated python_requirements_facts module has been removed. Use python_requirements_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated redfish_facts module has been removed. Use redfish_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated scaleway_image_facts module has been removed. Use scaleway_image_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated scaleway_ip_facts module has been removed. Use scaleway_ip_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated scaleway_organization_facts module has been removed. Use scaleway_organization_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated scaleway_security_group_facts module has been removed. Use scaleway_security_group_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated scaleway_server_facts module has been removed. Use scaleway_server_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated scaleway_snapshot_facts module has been removed. Use scaleway_snapshot_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated scaleway_volume_facts module has been removed. Use scaleway_volume_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated smartos_image_facts module has been removed. Use smartos_image_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated vertica_facts module has been removed. Use vertica_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated xenserver_guest_facts module has been removed. Use xenserver_guest_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt module has been removed. Use ovirt.ovirt.ovirt_vm instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_affinity_label_facts module has been removed. Use ovirt.ovirt.ovirt_affinity_label_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_api_facts module has been removed. Use ovirt.ovirt.ovirt_api_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_cluster_facts module has been removed. Use ovirt.ovirt.ovirt_cluster_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_datacenter_facts module has been removed. Use ovirt.ovirt.ovirt_datacenter_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_disk_facts module has been removed. Use ovirt.ovirt.ovirt_disk_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_event_facts module has been removed. Use ovirt.ovirt.ovirt_event_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_external_provider_facts module has been removed. Use ovirt.ovirt.ovirt_external_provider_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_group_facts module has been removed. Use ovirt.ovirt.ovirt_group_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_host_facts module has been removed. Use ovirt.ovirt.ovirt_host_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_host_storage_facts module has been removed. Use ovirt.ovirt.ovirt_host_storage_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_network_facts module has been removed. Use ovirt.ovirt.ovirt_network_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_nic_facts module has been removed. Use ovirt.ovirt.ovirt_nic_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_permission_facts module has been removed. Use ovirt.ovirt.ovirt_permission_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_quota_facts module has been removed. Use ovirt.ovirt.ovirt_quota_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_scheduling_policy_facts module has been removed. Use ovirt.ovirt.ovirt_scheduling_policy_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_snapshot_facts module has been removed. Use ovirt.ovirt.ovirt_snapshot_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_storage_domain_facts module has been removed. Use ovirt.ovirt.ovirt_storage_domain_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_storage_template_facts module has been removed. Use ovirt.ovirt.ovirt_storage_template_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_storage_vm_facts module has been removed. Use ovirt.ovirt.ovirt_storage_vm_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_tag_facts module has been removed. Use ovirt.ovirt.ovirt_tag_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_template_facts module has been removed. Use ovirt.ovirt.ovirt_template_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_user_facts module has been removed. Use ovirt.ovirt.ovirt_user_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_vm_facts module has been removed. Use ovirt.ovirt.ovirt_vm_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_vmpool_facts module has been removed. Use ovirt.ovirt.ovirt_vmpool_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The ovirt_facts docs fragment has been removed (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated gluster_heal_info module has been removed. Use gluster.gluster.gluster_heal_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated gluster_peer module has been removed. Use gluster.gluster.gluster_peer instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated gluster_volume module has been removed. Use gluster.gluster.gluster_volume instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated helm module has been removed. Use community.kubernetes.helm instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ldap_attr module has been removed. Use ldap_attrs instead (https://github.com/ansible-collections/community.general/pull/1924)." diff --git a/changelogs/fragments/selective-core-2.11.yml b/changelogs/fragments/selective-core-2.11.yml deleted file mode 100644 index 994e555c7c..0000000000 --- a/changelogs/fragments/selective-core-2.11.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- "selective callback plugin - adjust import so that the plugin also works with ansible-core 2.11 (https://github.com/ansible-collections/community.general/pull/1807)." diff --git a/galaxy.yml b/galaxy.yml index 3676516625..a4b4cad7e0 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,6 +1,6 @@ namespace: community name: general -version: 3.0.0 +version: 3.1.0 readme: README.md authors: - Ansible (https://github.com/ansible) From cd116120ad5c735a79f8609778b8a6c2e84d69cb Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 26 Apr 2021 18:43:14 +0200 Subject: [PATCH 0002/2828] Run CI for old branches only once per week. --- .azure-pipelines/azure-pipelines.yml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index 8c0804ab31..a479a33ba8 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -19,7 +19,14 @@ schedules: branches: include: - main - - stable-* + - stable-2 + - stable-3 + - cron: 0 8 * * 0 + displayName: Weekly (old branches) + always: true + branches: + include: + - stable-1 variables: - name: checkoutPath From 2ad004b97b750d31dc2900868598fafd31dc8d90 Mon Sep 17 00:00:00 2001 From: Alan Rominger Date: Mon, 26 Apr 2021 15:24:26 -0400 Subject: [PATCH 0003/2828] Make inventory scripts executable (#2337) * Make inventory scripts executable * Mark inventory scripts in vault folder as executable * Add changelog entry for making inventory scripts exectuable * Update changelogs/fragments/2337-mark-inventory-scripts-executable.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../fragments/2337-mark-inventory-scripts-executable.yml | 3 +++ scripts/inventory/abiquo.py | 0 scripts/inventory/apache-libcloud.py | 0 scripts/inventory/apstra_aos.py | 0 scripts/inventory/azure_rm.py | 0 scripts/inventory/brook.py | 0 scripts/inventory/cloudforms.py | 0 scripts/inventory/cobbler.py | 0 scripts/inventory/collins.py | 0 scripts/inventory/consul_io.py | 0 scripts/inventory/docker.py | 0 scripts/inventory/fleet.py | 0 scripts/inventory/foreman.py | 0 scripts/inventory/freeipa.py | 0 scripts/inventory/infoblox.py | 0 scripts/inventory/jail.py | 0 scripts/inventory/landscape.py | 0 scripts/inventory/linode.py | 0 scripts/inventory/lxc_inventory.py | 0 scripts/inventory/lxd.py | 0 scripts/inventory/mdt_dynamic_inventory.py | 0 scripts/inventory/nagios_livestatus.py | 0 scripts/inventory/nagios_ndo.py | 0 scripts/inventory/nsot.py | 0 scripts/inventory/openshift.py | 0 scripts/inventory/openvz.py | 0 scripts/inventory/ovirt.py | 0 scripts/inventory/ovirt4.py | 0 scripts/inventory/packet_net.py | 0 scripts/inventory/proxmox.py | 0 scripts/inventory/rackhd.py | 0 scripts/inventory/rax.py | 0 scripts/inventory/rudder.py | 0 scripts/inventory/scaleway.py | 0 scripts/inventory/serf.py | 0 scripts/inventory/softlayer.py | 0 scripts/inventory/spacewalk.py | 0 scripts/inventory/ssh_config.py | 0 scripts/inventory/stacki.py | 0 scripts/inventory/vagrant.py | 0 scripts/inventory/vbox.py | 0 scripts/inventory/zone.py | 0 scripts/vault/azure_vault.py | 0 scripts/vault/vault-keyring-client.py | 0 scripts/vault/vault-keyring.py | 0 45 files changed, 3 insertions(+) create mode 100644 changelogs/fragments/2337-mark-inventory-scripts-executable.yml mode change 100644 => 100755 scripts/inventory/abiquo.py mode change 100644 => 100755 scripts/inventory/apache-libcloud.py mode change 100644 => 100755 scripts/inventory/apstra_aos.py mode change 100644 => 100755 scripts/inventory/azure_rm.py mode change 100644 => 100755 scripts/inventory/brook.py mode change 100644 => 100755 scripts/inventory/cloudforms.py mode change 100644 => 100755 scripts/inventory/cobbler.py mode change 100644 => 100755 scripts/inventory/collins.py mode change 100644 => 100755 scripts/inventory/consul_io.py mode change 100644 => 100755 scripts/inventory/docker.py mode change 100644 => 100755 scripts/inventory/fleet.py mode change 100644 => 100755 scripts/inventory/foreman.py mode change 100644 => 100755 scripts/inventory/freeipa.py mode change 100644 => 100755 scripts/inventory/infoblox.py mode change 100644 => 100755 scripts/inventory/jail.py mode change 100644 => 100755 scripts/inventory/landscape.py mode change 100644 => 100755 scripts/inventory/linode.py mode change 100644 => 100755 scripts/inventory/lxc_inventory.py mode change 100644 => 100755 scripts/inventory/lxd.py mode change 100644 => 100755 scripts/inventory/mdt_dynamic_inventory.py mode change 100644 => 100755 scripts/inventory/nagios_livestatus.py mode change 100644 => 100755 scripts/inventory/nagios_ndo.py mode change 100644 => 100755 scripts/inventory/nsot.py mode change 100644 => 100755 scripts/inventory/openshift.py mode change 100644 => 100755 scripts/inventory/openvz.py mode change 100644 => 100755 scripts/inventory/ovirt.py mode change 100644 => 100755 scripts/inventory/ovirt4.py mode change 100644 => 100755 scripts/inventory/packet_net.py mode change 100644 => 100755 scripts/inventory/proxmox.py mode change 100644 => 100755 scripts/inventory/rackhd.py mode change 100644 => 100755 scripts/inventory/rax.py mode change 100644 => 100755 scripts/inventory/rudder.py mode change 100644 => 100755 scripts/inventory/scaleway.py mode change 100644 => 100755 scripts/inventory/serf.py mode change 100644 => 100755 scripts/inventory/softlayer.py mode change 100644 => 100755 scripts/inventory/spacewalk.py mode change 100644 => 100755 scripts/inventory/ssh_config.py mode change 100644 => 100755 scripts/inventory/stacki.py mode change 100644 => 100755 scripts/inventory/vagrant.py mode change 100644 => 100755 scripts/inventory/vbox.py mode change 100644 => 100755 scripts/inventory/zone.py mode change 100644 => 100755 scripts/vault/azure_vault.py mode change 100644 => 100755 scripts/vault/vault-keyring-client.py mode change 100644 => 100755 scripts/vault/vault-keyring.py diff --git a/changelogs/fragments/2337-mark-inventory-scripts-executable.yml b/changelogs/fragments/2337-mark-inventory-scripts-executable.yml new file mode 100644 index 0000000000..69aa3fff62 --- /dev/null +++ b/changelogs/fragments/2337-mark-inventory-scripts-executable.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - inventory and vault scripts - change file permissions to make vendored inventory and vault scripts exectuable (https://github.com/ansible-collections/community.general/pull/2337). diff --git a/scripts/inventory/abiquo.py b/scripts/inventory/abiquo.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/apache-libcloud.py b/scripts/inventory/apache-libcloud.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/apstra_aos.py b/scripts/inventory/apstra_aos.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/azure_rm.py b/scripts/inventory/azure_rm.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/brook.py b/scripts/inventory/brook.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/cloudforms.py b/scripts/inventory/cloudforms.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/cobbler.py b/scripts/inventory/cobbler.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/collins.py b/scripts/inventory/collins.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/consul_io.py b/scripts/inventory/consul_io.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/docker.py b/scripts/inventory/docker.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/fleet.py b/scripts/inventory/fleet.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/foreman.py b/scripts/inventory/foreman.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/freeipa.py b/scripts/inventory/freeipa.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/infoblox.py b/scripts/inventory/infoblox.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/jail.py b/scripts/inventory/jail.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/landscape.py b/scripts/inventory/landscape.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/linode.py b/scripts/inventory/linode.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/lxc_inventory.py b/scripts/inventory/lxc_inventory.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/lxd.py b/scripts/inventory/lxd.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/mdt_dynamic_inventory.py b/scripts/inventory/mdt_dynamic_inventory.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/nagios_livestatus.py b/scripts/inventory/nagios_livestatus.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/nagios_ndo.py b/scripts/inventory/nagios_ndo.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/nsot.py b/scripts/inventory/nsot.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/openshift.py b/scripts/inventory/openshift.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/openvz.py b/scripts/inventory/openvz.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/ovirt.py b/scripts/inventory/ovirt.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/ovirt4.py b/scripts/inventory/ovirt4.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/packet_net.py b/scripts/inventory/packet_net.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/proxmox.py b/scripts/inventory/proxmox.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/rackhd.py b/scripts/inventory/rackhd.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/rax.py b/scripts/inventory/rax.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/rudder.py b/scripts/inventory/rudder.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/scaleway.py b/scripts/inventory/scaleway.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/serf.py b/scripts/inventory/serf.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/softlayer.py b/scripts/inventory/softlayer.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/spacewalk.py b/scripts/inventory/spacewalk.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/ssh_config.py b/scripts/inventory/ssh_config.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/stacki.py b/scripts/inventory/stacki.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/vagrant.py b/scripts/inventory/vagrant.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/vbox.py b/scripts/inventory/vbox.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/zone.py b/scripts/inventory/zone.py old mode 100644 new mode 100755 diff --git a/scripts/vault/azure_vault.py b/scripts/vault/azure_vault.py old mode 100644 new mode 100755 diff --git a/scripts/vault/vault-keyring-client.py b/scripts/vault/vault-keyring-client.py old mode 100644 new mode 100755 diff --git a/scripts/vault/vault-keyring.py b/scripts/vault/vault-keyring.py old mode 100644 new mode 100755 From 9d13acd68e3b9104e48ddddedc811603330d6b7b Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Tue, 27 Apr 2021 14:16:24 +0300 Subject: [PATCH 0004/2828] BOTMETA.yml: team_suse - add a maintainer (#2354) --- .github/BOTMETA.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 06501fc2aa..c14fb0d0e1 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -707,7 +707,8 @@ files: labels: zypper ignore: dirtyharrycallahan robinro $modules/packaging/os/zypper_repository.py: - maintainers: matze + maintainers: $team_suse matze + labels: zypper $modules/remote_management/cobbler/: maintainers: dagwieers $modules/remote_management/hpilo/: @@ -1025,5 +1026,5 @@ macros: team_rhn: FlossWare alikins barnabycourt vritant team_scaleway: QuentinBrosse abarbare jerome-quere kindermoumoute remyleone sieben team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l - team_suse: commel dcermak evrardjp lrupp toabctl AnderEnder alxgu andytom + team_suse: commel dcermak evrardjp lrupp toabctl AnderEnder alxgu andytom sealor team_virt: joshainglis karmab tleguern Thulium-Drake Ajpantuso From 48ef05def340588393075341c3b0b4e44f5fdab8 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 27 Apr 2021 23:18:29 +1200 Subject: [PATCH 0005/2828] spotinst_aws_elastigroup - fixed elements for many lists (#2355) * fixed elements for many lists * added changelog fragment * Removed verbose types in description - still missing formatting and properly documenting dicts --- ...spotinst_aws_elastigroup-list-elements.yml | 2 + .../spotinst/spotinst_aws_elastigroup.py | 165 ++++++++++-------- 2 files changed, 92 insertions(+), 75 deletions(-) create mode 100644 changelogs/fragments/2355-spotinst_aws_elastigroup-list-elements.yml diff --git a/changelogs/fragments/2355-spotinst_aws_elastigroup-list-elements.yml b/changelogs/fragments/2355-spotinst_aws_elastigroup-list-elements.yml new file mode 100644 index 0000000000..876b212690 --- /dev/null +++ b/changelogs/fragments/2355-spotinst_aws_elastigroup-list-elements.yml @@ -0,0 +1,2 @@ +minor_changes: + - spotinst_aws_elastigroup - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/2355). diff --git a/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py b/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py index 1a0ddb9fef..5ed8028e37 100644 --- a/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py +++ b/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py @@ -23,26 +23,26 @@ options: credentials_path: description: - - (Path) Optional parameter that allows to set a non-default credentials path. + - Optional parameter that allows to set a non-default credentials path. default: ~/.spotinst/credentials type: path account_id: description: - - (String) Optional parameter that allows to set an account-id inside the module configuration - By default this is retrieved from the credentials path + - Optional parameter that allows to set an account-id inside the module configuration. + By default this is retrieved from the credentials path. type: str availability_vs_cost: description: - - (String) The strategy orientation. + - The strategy orientation. - "The choices available are: C(availabilityOriented), C(costOriented), C(balanced)." required: true type: str availability_zones: description: - - (List of Objects) a list of hash/dictionaries of Availability Zones that are configured in the elastigroup; + - A list of hash/dictionaries of Availability Zones that are configured in the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed are name (String), @@ -50,10 +50,11 @@ options: placement_group_name (String), required: true type: list + elements: dict block_device_mappings: description: - - (List of Objects) a list of hash/dictionaries of Block Device Mappings for elastigroup instances; + - A list of hash/dictionaries of Block Device Mappings for elastigroup instances; You can specify virtual devices and EBS volumes.; '[{"key":"value", "key":"value"}]'; keys allowed are @@ -68,10 +69,11 @@ options: volume_type(String), volume_size(Integer)) type: list + elements: dict chef: description: - - (Object) The Chef integration configuration.; + - The Chef integration configuration.; Expects the following keys - chef_server (String), organization (String), user (String), @@ -81,92 +83,94 @@ options: draining_timeout: description: - - (Integer) Time for instance to be drained from incoming requests and deregistered from ELB before termination. + - Time for instance to be drained from incoming requests and deregistered from ELB before termination. type: int ebs_optimized: description: - - (Boolean) Enable EBS optimization for supported instances which are not enabled by default.; + - Enable EBS optimization for supported instances which are not enabled by default.; Note - additional charges will be applied. type: bool ebs_volume_pool: description: - - (List of Objects) a list of hash/dictionaries of EBS devices to reattach to the elastigroup when available; + - A list of hash/dictionaries of EBS devices to reattach to the elastigroup when available; '[{"key":"value", "key":"value"}]'; keys allowed are - volume_ids (List of Strings), device_name (String) type: list + elements: dict ecs: description: - - (Object) The ECS integration configuration.; + - The ECS integration configuration.; Expects the following key - cluster_name (String) type: dict elastic_ips: description: - - (List of Strings) List of ElasticIps Allocation Ids (Example C(eipalloc-9d4e16f8)) to associate to the group instances + - List of ElasticIps Allocation Ids (Example C(eipalloc-9d4e16f8)) to associate to the group instances type: list + elements: str fallback_to_od: description: - - (Boolean) In case of no spots available, Elastigroup will launch an On-demand instance instead + - In case of no spots available, Elastigroup will launch an On-demand instance instead type: bool health_check_grace_period: description: - - (Integer) The amount of time, in seconds, after the instance has launched to start and check its health. + - The amount of time, in seconds, after the instance has launched to start and check its health. - If not specified, it defaults to C(300). type: int health_check_unhealthy_duration_before_replacement: description: - - (Integer) Minimal mount of time instance should be unhealthy for us to consider it unhealthy. + - Minimal mount of time instance should be unhealthy for us to consider it unhealthy. type: int health_check_type: description: - - (String) The service to use for the health check. + - The service to use for the health check. - "The choices available are: C(ELB), C(HCS), C(TARGET_GROUP), C(MLB), C(EC2)." type: str iam_role_name: description: - - (String) The instance profile iamRole name + - The instance profile iamRole name - Only use iam_role_arn, or iam_role_name type: str iam_role_arn: description: - - (String) The instance profile iamRole arn + - The instance profile iamRole arn - Only use iam_role_arn, or iam_role_name type: str id: description: - - (String) The group id if it already exists and you want to update, or delete it. + - The group id if it already exists and you want to update, or delete it. This will not work unless the uniqueness_by field is set to id. When this is set, and the uniqueness_by field is set, the group will either be updated or deleted, but not created. type: str image_id: description: - - (String) The image Id used to launch the instance.; + - The image Id used to launch the instance.; In case of conflict between Instance type and image type, an error will be returned required: true type: str key_pair: description: - - (String) Specify a Key Pair to attach to the instances + - Specify a Key Pair to attach to the instances type: str kubernetes: description: - - (Object) The Kubernetes integration configuration. + - The Kubernetes integration configuration. Expects the following keys - api_server (String), token (String) @@ -174,47 +178,48 @@ options: lifetime_period: description: - - (Integer) lifetime period + - Lifetime period type: int load_balancers: description: - - (List of Strings) List of classic ELB names + - List of classic ELB names type: list + elements: str max_size: description: - - (Integer) The upper limit number of instances that you can scale up to + - The upper limit number of instances that you can scale up to required: true type: int mesosphere: description: - - (Object) The Mesosphere integration configuration. + - The Mesosphere integration configuration. Expects the following key - api_server (String) type: dict min_size: description: - - (Integer) The lower limit number of instances that you can scale down to + - The lower limit number of instances that you can scale down to required: true type: int monitoring: description: - - (String) Describes whether instance Enhanced Monitoring is enabled + - Describes whether instance Enhanced Monitoring is enabled type: str name: description: - - (String) Unique name for elastigroup to be created, updated or deleted + - Unique name for elastigroup to be created, updated or deleted required: true type: str network_interfaces: description: - - (List of Objects) a list of hash/dictionaries of network interfaces to add to the elastigroup; + - A list of hash/dictionaries of network interfaces to add to the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed are - description (String), @@ -229,29 +234,30 @@ options: associate_ipv6_address (Boolean), private_ip_addresses (List of Objects, Keys are privateIpAddress (String, required) and primary (Boolean)) type: list + elements: dict on_demand_count: description: - - (Integer) Required if risk is not set + - Required if risk is not set - Number of on demand instances to launch. All other instances will be spot instances.; Either set this parameter or the risk parameter type: int on_demand_instance_type: description: - - (String) On-demand instance type that will be provisioned + - On-demand instance type that will be provisioned type: str opsworks: description: - - (Object) The elastigroup OpsWorks integration configration.; + - The elastigroup OpsWorks integration configration.; Expects the following key - layer_id (String) type: dict persistence: description: - - (Object) The Stateful elastigroup configration.; + - The Stateful elastigroup configration.; Accepts the following keys - should_persist_root_device (Boolean), should_persist_block_devices (Boolean), @@ -260,14 +266,14 @@ options: product: description: - - (String) Operation system type. + - Operation system type. - "Available choices are: C(Linux/UNIX), C(SUSE Linux), C(Windows), C(Linux/UNIX (Amazon VPC)), C(SUSE Linux (Amazon VPC))." required: true type: str rancher: description: - - (Object) The Rancher integration configuration.; + - The Rancher integration configuration.; Expects the following keys - version (String), access_key (String), @@ -277,7 +283,7 @@ options: right_scale: description: - - (Object) The Rightscale integration configuration.; + - The Rightscale integration configuration.; Expects the following keys - account_id (String), refresh_token (String) @@ -285,12 +291,12 @@ options: risk: description: - - (Integer) required if on demand is not set. The percentage of Spot instances to launch (0 - 100). + - Required if on demand is not set. The percentage of Spot instances to launch (0 - 100). type: int roll_config: description: - - (Object) Roll configuration.; + - Roll configuration.; If you would like the group to roll after updating, please use this feature. Accepts the following keys - batch_size_percentage(Integer, Required), @@ -300,7 +306,7 @@ options: scheduled_tasks: description: - - (List of Objects) a list of hash/dictionaries of scheduled tasks to configure in the elastigroup; + - A list of hash/dictionaries of scheduled tasks to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed are - adjustment (Integer), @@ -315,84 +321,90 @@ options: task_type (String, required), is_enabled (Boolean) type: list + elements: dict security_group_ids: description: - - (List of Strings) One or more security group IDs. ; + - One or more security group IDs. ; In case of update it will override the existing Security Group with the new given array required: true type: list + elements: str shutdown_script: description: - - (String) The Base64-encoded shutdown script that executes prior to instance termination. + - The Base64-encoded shutdown script that executes prior to instance termination. Encode before setting. type: str signals: description: - - (List of Objects) a list of hash/dictionaries of signals to configure in the elastigroup; + - A list of hash/dictionaries of signals to configure in the elastigroup; keys allowed are - name (String, required), timeout (Integer) type: list + elements: dict spin_up_time: description: - - (Integer) spin up time, in seconds, for the instance + - Spin up time, in seconds, for the instance type: int spot_instance_types: description: - - (List of Strings) Spot instance type that will be provisioned. + - Spot instance type that will be provisioned. required: true type: list + elements: str state: choices: - present - absent description: - - (String) create or delete the elastigroup + - Create or delete the elastigroup default: present type: str tags: description: - - (List of tagKey:tagValue pairs) a list of tags to configure in the elastigroup. Please specify list of keys and values (key colon value); + - A list of tags to configure in the elastigroup. Please specify list of keys and values (key colon value); type: list + elements: dict target: description: - - (Integer) The number of instances to launch + - The number of instances to launch required: true type: int target_group_arns: description: - - (List of Strings) List of target group arns instances should be registered to + - List of target group arns instances should be registered to type: list + elements: str tenancy: description: - - (String) dedicated vs shared tenancy. + - Dedicated vs shared tenancy. - "The available choices are: C(default), C(dedicated)." type: str terminate_at_end_of_billing_hour: description: - - (Boolean) terminate at the end of billing hour + - Terminate at the end of billing hour type: bool unit: description: - - (String) The capacity unit to launch instances by. + - The capacity unit to launch instances by. - "The available choices are: C(instance), C(weight)." type: str up_scaling_policies: description: - - (List of Objects) a list of hash/dictionaries of scaling policies to configure in the elastigroup; + - A list of hash/dictionaries of scaling policies to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed are - policy_name (String, required), @@ -413,10 +425,11 @@ options: maximum (String), minimum (String) type: list + elements: dict down_scaling_policies: description: - - (List of Objects) a list of hash/dictionaries of scaling policies to configure in the elastigroup; + - A list of hash/dictionaries of scaling policies to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed are - policy_name (String, required), @@ -437,10 +450,11 @@ options: maximum (String), minimum (String) type: list + elements: dict target_tracking_policies: description: - - (List of Objects) a list of hash/dictionaries of target tracking policies to configure in the elastigroup; + - A list of hash/dictionaries of target tracking policies to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed are - policy_name (String, required), @@ -452,37 +466,38 @@ options: cooldown (String, required), target (String, required) type: list + elements: dict uniqueness_by: choices: - id - name description: - - (String) If your group names are not unique, you may use this feature to update or delete a specific group. + - If your group names are not unique, you may use this feature to update or delete a specific group. Whenever this property is set, you must set a group_id in order to update or delete a group, otherwise a group will be created. default: name type: str user_data: description: - - (String) Base64-encoded MIME user data. Encode before setting the value. + - Base64-encoded MIME user data. Encode before setting the value. type: str utilize_reserved_instances: description: - - (Boolean) In case of any available Reserved Instances, + - In case of any available Reserved Instances, Elastigroup will utilize your reservations before purchasing Spot instances. type: bool wait_for_instances: description: - - (Boolean) Whether or not the elastigroup creation / update actions should wait for the instances to spin + - Whether or not the elastigroup creation / update actions should wait for the instances to spin type: bool default: false wait_timeout: description: - - (Integer) How long the module should wait for instances before failing the action.; + - How long the module should wait for instances before failing the action.; Only works if wait_for_instances is True. type: int @@ -1428,18 +1443,18 @@ def main(): fields = dict( account_id=dict(type='str'), availability_vs_cost=dict(type='str', required=True), - availability_zones=dict(type='list', required=True), - block_device_mappings=dict(type='list'), + availability_zones=dict(type='list', elements='dict', required=True), + block_device_mappings=dict(type='list', elements='dict'), chef=dict(type='dict'), credentials_path=dict(type='path', default="~/.spotinst/credentials"), do_not_update=dict(default=[], type='list'), - down_scaling_policies=dict(type='list'), + down_scaling_policies=dict(type='list', elements='dict'), draining_timeout=dict(type='int'), ebs_optimized=dict(type='bool'), - ebs_volume_pool=dict(type='list'), + ebs_volume_pool=dict(type='list', elements='dict'), ecs=dict(type='dict'), elastic_beanstalk=dict(type='dict'), - elastic_ips=dict(type='list'), + elastic_ips=dict(type='list', elements='str'), fallback_to_od=dict(type='bool'), id=dict(type='str'), health_check_grace_period=dict(type='int'), @@ -1451,7 +1466,7 @@ def main(): key_pair=dict(type='str', no_log=False), kubernetes=dict(type='dict'), lifetime_period=dict(type='int'), - load_balancers=dict(type='list'), + load_balancers=dict(type='list', elements='str'), max_size=dict(type='int', required=True), mesosphere=dict(type='dict'), min_size=dict(type='int', required=True), @@ -1459,7 +1474,7 @@ def main(): multai_load_balancers=dict(type='list'), multai_token=dict(type='str', no_log=True), name=dict(type='str', required=True), - network_interfaces=dict(type='list'), + network_interfaces=dict(type='list', elements='dict'), on_demand_count=dict(type='int'), on_demand_instance_type=dict(type='str'), opsworks=dict(type='dict'), @@ -1469,16 +1484,16 @@ def main(): right_scale=dict(type='dict'), risk=dict(type='int'), roll_config=dict(type='dict'), - scheduled_tasks=dict(type='list'), - security_group_ids=dict(type='list', required=True), + scheduled_tasks=dict(type='list', elements='dict'), + security_group_ids=dict(type='list', elements='str', required=True), shutdown_script=dict(type='str'), - signals=dict(type='list'), + signals=dict(type='list', elements='dict'), spin_up_time=dict(type='int'), - spot_instance_types=dict(type='list', required=True), + spot_instance_types=dict(type='list', elements='str', required=True), state=dict(default='present', choices=['present', 'absent']), - tags=dict(type='list'), + tags=dict(type='list', elements='dict'), target=dict(type='int', required=True), - target_group_arns=dict(type='list'), + target_group_arns=dict(type='list', elements='str'), tenancy=dict(type='str'), terminate_at_end_of_billing_hour=dict(type='bool'), token=dict(type='str', no_log=True), @@ -1486,8 +1501,8 @@ def main(): user_data=dict(type='str'), utilize_reserved_instances=dict(type='bool'), uniqueness_by=dict(default='name', choices=['name', 'id']), - up_scaling_policies=dict(type='list'), - target_tracking_policies=dict(type='list'), + up_scaling_policies=dict(type='list', elements='dict'), + target_tracking_policies=dict(type='list', elements='dict'), wait_for_instances=dict(type='bool', default=False), wait_timeout=dict(type='int') ) From 77d4bc29421a78b479c6e826ca7d68f00fa82f57 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 27 Apr 2021 22:13:40 +0200 Subject: [PATCH 0006/2828] No longer required for devel's ansible-test. (#2365) ci_complete --- tests/sanity/ignore-2.12.txt | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt index 80975cf389..68684f000d 100644 --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -69,7 +69,5 @@ plugins/modules/system/ssh_config.py use-argspec-type-path # Required since modu plugins/modules/system/xfconf.py validate-modules:parameter-state-invalid-choice plugins/modules/system/xfconf.py validate-modules:return-syntax-error plugins/modules/web_infrastructure/jenkins_plugin.py use-argspec-type-path -tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.6 # django generated code -tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.7 # django generated code tests/utils/shippable/check_matrix.py replace-urlopen tests/utils/shippable/timing.py shebang From b3f436aa6325c34e1824ccc39e8446969b70ab95 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Fri, 30 Apr 2021 04:28:43 +0200 Subject: [PATCH 0007/2828] Use Ansible's codecov uploader. (#2377) --- .azure-pipelines/scripts/publish-codecov.sh | 2 +- tests/utils/shippable/shippable.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.azure-pipelines/scripts/publish-codecov.sh b/.azure-pipelines/scripts/publish-codecov.sh index 7aeabda0c0..6d184f0b8d 100755 --- a/.azure-pipelines/scripts/publish-codecov.sh +++ b/.azure-pipelines/scripts/publish-codecov.sh @@ -7,7 +7,7 @@ set -o pipefail -eu output_path="$1" -curl --silent --show-error https://codecov.io/bash > codecov.sh +curl --silent --show-error https://ansible-ci-files.s3.us-east-1.amazonaws.com/codecov/codecov.sh > codecov.sh for file in "${output_path}"/reports/coverage*.xml; do name="${file}" diff --git a/tests/utils/shippable/shippable.sh b/tests/utils/shippable/shippable.sh index f239e86975..f70aa11380 100755 --- a/tests/utils/shippable/shippable.sh +++ b/tests/utils/shippable/shippable.sh @@ -181,7 +181,7 @@ function cleanup flags="${flags//=/,}" flags="${flags//[^a-zA-Z0-9_,]/_}" - bash <(curl -s https://codecov.io/bash) \ + bash <(curl -s https://ansible-ci-files.s3.us-east-1.amazonaws.com/codecov/codecov.sh) \ -f "${file}" \ -F "${flags}" \ -n "${test}" \ From 5fbe946c3a68557e15ac4f0acf4508ced250abde Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Fri, 30 Apr 2021 22:13:46 +0200 Subject: [PATCH 0008/2828] Spread nightly runs out. (#2387) --- .azure-pipelines/azure-pipelines.yml | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index a479a33ba8..d4153d9796 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -14,15 +14,20 @@ pr: schedules: - cron: 0 8 * * * - displayName: Nightly + displayName: Nightly (main) always: true branches: include: - main + - cron: 0 10 * * * + displayName: Nightly (active stable branches) + always: true + branches: + include: - stable-2 - stable-3 - - cron: 0 8 * * 0 - displayName: Weekly (old branches) + - cron: 0 11 * * 0 + displayName: Weekly (old stable branches) always: true branches: include: From ae21af882075948f9e427919776e5c155b263ea2 Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Sat, 1 May 2021 02:19:33 +0430 Subject: [PATCH 0009/2828] Add Fedora 34 to CI (#2384) * Add fedora 34 and fix typo * Remove Fedora 32 from devel testing * Use one newer version of Fedora for fixed ansible versions * Revert "Use one newer version of Fedora for fixed ansible versions" This reverts commit cbd006bd385865905c18b87655bd98b0610d4abc. * Try to skip task. * Revert "Try to skip task." This reverts commit ff0c899a8650e78967a1933b93fd8015695a6a61. * Temporary disable Fedora 34 on setup_postgresql_db Co-authored-by: Felix Fontein --- .azure-pipelines/azure-pipelines.yml | 6 +++--- .../integration/targets/setup_postgresql_db/tasks/main.yml | 4 ++++ 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index d4153d9796..8d1b81865e 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -268,10 +268,10 @@ stages: test: centos7 - name: CentOS 8 test: centos8 - - name: Fedora 32 - test: fedora32 - name: Fedora 33 test: fedora33 + - name: Fedora 34 + test: fedora34 - name: openSUSE 15 py2 test: opensuse15py2 - name: openSUSE 15 py3 @@ -294,7 +294,7 @@ stages: targets: - name: CentOS 8 test: centos8 - - name: Fedora 32 + - name: Fedora 33 test: fedora33 - name: openSUSE 15 py3 test: opensuse15 diff --git a/tests/integration/targets/setup_postgresql_db/tasks/main.yml b/tests/integration/targets/setup_postgresql_db/tasks/main.yml index 2322ee2cbf..f535ecdcf9 100644 --- a/tests/integration/targets/setup_postgresql_db/tasks/main.yml +++ b/tests/integration/targets/setup_postgresql_db/tasks/main.yml @@ -11,6 +11,10 @@ - meta: end_play when: ansible_facts.distribution == 'CentOS' and ansible_facts.distribution_major_version == '8' +# Temporary disable Fedora 34 +- meta: end_play + when: ansible_facts.distribution == 'Fedora' and ansible_facts.distribution_major_version == '34' + - name: python 2 set_fact: python_suffix: '' From 276880aac1a4df7cd9cfbea983c3e743de0a1bbf Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 1 May 2021 09:51:35 +0200 Subject: [PATCH 0010/2828] Remove resmo as composer maintainer. (#2392) --- .github/BOTMETA.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index c14fb0d0e1..6fcfdff4c1 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -560,7 +560,8 @@ files: $modules/packaging/language/bundler.py: maintainers: thoiberg $modules/packaging/language/composer.py: - maintainers: dmtrs resmo + maintainers: dmtrs + ignore: resmo $modules/packaging/language/cpanm.py: maintainers: fcuny russoz $modules/packaging/language/easy_install.py: From 26c3bd25f676b67a89a625e13991ab41394f5098 Mon Sep 17 00:00:00 2001 From: Xabier Napal Date: Sat, 1 May 2021 14:19:05 +0200 Subject: [PATCH 0011/2828] influxdb_retention_policy: fix duration parsing to support INF values (#2396) * influxdb_retention_policy: fix duration parsing to support INF values * add changelog --- .../2284-influxdb_retention_policy-fix_duration_parsing.yml | 3 +++ .../modules/database/influxdb/influxdb_retention_policy.py | 6 +++--- 2 files changed, 6 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/2284-influxdb_retention_policy-fix_duration_parsing.yml diff --git a/changelogs/fragments/2284-influxdb_retention_policy-fix_duration_parsing.yml b/changelogs/fragments/2284-influxdb_retention_policy-fix_duration_parsing.yml new file mode 100644 index 0000000000..04c82480c1 --- /dev/null +++ b/changelogs/fragments/2284-influxdb_retention_policy-fix_duration_parsing.yml @@ -0,0 +1,3 @@ +bugfixes: + - influxdb_retention_policy - fix bug where ``INF`` duration values failed parsing + (https://github.com/ansible-collections/community.general/pull/2385). diff --git a/plugins/modules/database/influxdb/influxdb_retention_policy.py b/plugins/modules/database/influxdb/influxdb_retention_policy.py index 2c2f9674b7..883adaffa6 100644 --- a/plugins/modules/database/influxdb/influxdb_retention_policy.py +++ b/plugins/modules/database/influxdb/influxdb_retention_policy.py @@ -129,7 +129,7 @@ from ansible_collections.community.general.plugins.module_utils.influxdb import from ansible.module_utils._text import to_native -VALID_DURATION_REGEX = re.compile(r'^(\d+(ns|u|µ|ms|s|m|h|d|w))+$') +VALID_DURATION_REGEX = re.compile(r'^(INF|(\d+(ns|u|µ|ms|s|m|h|d|w)))+$') DURATION_REGEX = re.compile(r'(\d+)(ns|u|µ|ms|s|m|h|d|w)') EXTENDED_DURATION_REGEX = re.compile(r'(?:(\d+)(ns|u|µ|ms|m|h|d|w)|(\d+(?:\.\d+)?)(s))') @@ -217,7 +217,7 @@ def create_retention_policy(module, client): influxdb_shard_group_duration_format = parse_duration_literal(shard_group_duration) if influxdb_shard_group_duration_format < 3600000000000: - module.fail_json(msg="shard_group_duration value must be at least 1h") + module.fail_json(msg="shard_group_duration value must be finite and at least 1h") if not module.check_mode: try: @@ -256,7 +256,7 @@ def alter_retention_policy(module, client, retention_policy): influxdb_shard_group_duration_format = parse_duration_literal(shard_group_duration) if influxdb_shard_group_duration_format < 3600000000000: - module.fail_json(msg="shard_group_duration value must be at least 1h") + module.fail_json(msg="shard_group_duration value must be finite and at least 1h") if (retention_policy['duration'] != influxdb_duration_format or retention_policy['shardGroupDuration'] != influxdb_shard_group_duration_format or From eb455c69a2c7f7ec28f6162e0c5a34f0bc7932e3 Mon Sep 17 00:00:00 2001 From: George Angelopoulos Date: Sat, 1 May 2021 19:23:14 +0300 Subject: [PATCH 0012/2828] composer: --no-interaction when discovering available options (#2348) The composer module always uses the no-interaction option if it discovers it _after_ calling "composer help ..." but not on the help call itself. The lack of this option caused composer to not exit when called through the ansible module. The same example command when ran interactively does not prompt for user interaction and exits immediately. It is therefore currently unknown why the same command hangs when called through the ansible composer module or even directly with the command module. Example command which hangs: php /usr/local/bin/composer help install --format=json --- ...-composer-no-interaction-option-discovery-to-avoid-hang.yaml | 2 ++ plugins/modules/packaging/language/composer.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2348-composer-no-interaction-option-discovery-to-avoid-hang.yaml diff --git a/changelogs/fragments/2348-composer-no-interaction-option-discovery-to-avoid-hang.yaml b/changelogs/fragments/2348-composer-no-interaction-option-discovery-to-avoid-hang.yaml new file mode 100644 index 0000000000..0728aeb28b --- /dev/null +++ b/changelogs/fragments/2348-composer-no-interaction-option-discovery-to-avoid-hang.yaml @@ -0,0 +1,2 @@ +bugfixes: + - composer - use ``no-interaction`` option when discovering available options to avoid an issue where composer hangs (https://github.com/ansible-collections/community.general/pull/2348). diff --git a/plugins/modules/packaging/language/composer.py b/plugins/modules/packaging/language/composer.py index c792098b04..64157cb685 100644 --- a/plugins/modules/packaging/language/composer.py +++ b/plugins/modules/packaging/language/composer.py @@ -169,7 +169,7 @@ def has_changed(string): def get_available_options(module, command='install'): # get all available options from a composer command using composer help to json - rc, out, err = composer_command(module, "help %s --format=json" % command) + rc, out, err = composer_command(module, "help %s" % command, arguments="--no-interaction --format=json") if rc != 0: output = parse_out(err) module.fail_json(msg=output) From 4e90ee752ed8d9586bb0df69511b0d040a0946ff Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 1 May 2021 22:21:17 +0200 Subject: [PATCH 0013/2828] Add ansible-test config file. (#2404) --- tests/config.yml | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 tests/config.yml diff --git a/tests/config.yml b/tests/config.yml new file mode 100644 index 0000000000..ba0238e305 --- /dev/null +++ b/tests/config.yml @@ -0,0 +1,5 @@ +--- +# See template for more information: +# https://github.com/ansible/ansible/blob/devel/test/lib/ansible_test/config/config.yml +modules: + python_requires: default From c0221b75afffda7b55852d6510e04bb7f13f292d Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Sun, 2 May 2021 12:28:27 +0300 Subject: [PATCH 0014/2828] BOTMETA.yml: terraform - add a new maintainer (#2290) --- .github/BOTMETA.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 6fcfdff4c1..fd23d0c9e4 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -225,7 +225,7 @@ files: $modules/cloud/misc/: ignore: ryansb $modules/cloud/misc/terraform.py: - maintainers: m-yosefpor + maintainers: m-yosefpor rainerleber $modules/cloud/misc/xenserver_facts.py: maintainers: caphrim007 cheese labels: xenserver_facts From cd957fae4cbee051c16e899c2d06a54e227c14fc Mon Sep 17 00:00:00 2001 From: Daniel Werner Date: Mon, 3 May 2021 07:25:08 +0200 Subject: [PATCH 0015/2828] Fix #2373 - TypeError: a bytes-like object is required, not 'str' (#2375) * Fix #2373 * Changelog fragment for #2373 * Update changelogs/fragments/2373-svr4pkg-fix-typeerror.yml Co-authored-by: Amin Vakil * Update changelogs/fragments/2373-svr4pkg-fix-typeerror.yml Co-authored-by: Felix Fontein Co-authored-by: Amin Vakil Co-authored-by: Felix Fontein --- changelogs/fragments/2373-svr4pkg-fix-typeerror.yml | 3 +++ plugins/modules/packaging/os/svr4pkg.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2373-svr4pkg-fix-typeerror.yml diff --git a/changelogs/fragments/2373-svr4pkg-fix-typeerror.yml b/changelogs/fragments/2373-svr4pkg-fix-typeerror.yml new file mode 100644 index 0000000000..d0b3580889 --- /dev/null +++ b/changelogs/fragments/2373-svr4pkg-fix-typeerror.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - svr4pkg - convert string to a bytes-like object to avoid ``TypeError`` with Python 3 (https://github.com/ansible-collections/community.general/issues/2373). diff --git a/plugins/modules/packaging/os/svr4pkg.py b/plugins/modules/packaging/os/svr4pkg.py index ea3cd7d468..aa7a5c2e52 100644 --- a/plugins/modules/packaging/os/svr4pkg.py +++ b/plugins/modules/packaging/os/svr4pkg.py @@ -121,7 +121,7 @@ def package_installed(module, name, category): def create_admin_file(): (desc, filename) = tempfile.mkstemp(prefix='ansible_svr4pkg', text=True) - fullauto = ''' + fullauto = b''' mail= instance=unique partial=nocheck From 26aba8e76687fb36688540194bec08a82c8b3070 Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Mon, 3 May 2021 09:56:47 +0430 Subject: [PATCH 0016/2828] puppet - replace stdout with console in logdest option (#2407) * Change stdout to console * readd stdout, resulting in console * add changelog * readd stdout to docs and add a warning when it is used * version of what??? Co-authored-by: Felix Fontein * postpone deprecation in another PR * remove console option, so it can be backported * change changelog respectively * Fix changelog formatting Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../fragments/2407-puppet-change_stdout_to_console.yaml | 3 +++ plugins/modules/system/puppet.py | 7 ++++--- 2 files changed, 7 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/2407-puppet-change_stdout_to_console.yaml diff --git a/changelogs/fragments/2407-puppet-change_stdout_to_console.yaml b/changelogs/fragments/2407-puppet-change_stdout_to_console.yaml new file mode 100644 index 0000000000..697b8e78d7 --- /dev/null +++ b/changelogs/fragments/2407-puppet-change_stdout_to_console.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - puppet - replace ``console` with ``stdout`` in ``logdest`` option when ``all`` has been chosen (https://github.com/ansible-collections/community.general/issues/1190). diff --git a/plugins/modules/system/puppet.py b/plugins/modules/system/puppet.py index 309da290d0..b83ef89aa5 100644 --- a/plugins/modules/system/puppet.py +++ b/plugins/modules/system/puppet.py @@ -54,7 +54,8 @@ options: logdest: description: - Where the puppet logs should go, if puppet apply is being used. - - C(all) will go to both C(stdout) and C(syslog). + - C(all) will go to both C(console) and C(syslog). + - C(stdout) will be deprecated and replaced by C(console). type: str choices: [ all, stdout, syslog ] default: stdout @@ -127,7 +128,7 @@ EXAMPLES = r''' community.general.puppet: noop: yes -- name: Run a manifest with debug, log to both syslog and stdout, specify module path +- name: Run a manifest with debug, log to both syslog and console, specify module path community.general.puppet: modulepath: /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules logdest: all @@ -269,7 +270,7 @@ def main(): if p['logdest'] == 'syslog': cmd += "--logdest syslog " if p['logdest'] == 'all': - cmd += " --logdest syslog --logdest stdout" + cmd += " --logdest syslog --logdest console" if p['modulepath']: cmd += "--modulepath='%s'" % p['modulepath'] if p['environment']: From b5f8ae43204748c7f9e1719296ef81931c56c296 Mon Sep 17 00:00:00 2001 From: spike77453 Date: Mon, 3 May 2021 07:27:56 +0200 Subject: [PATCH 0017/2828] nmcli: Add 'slave-type bridge' to nmcli command if type is bridge-slave (#2409) --- ...ave-type_bridge_to_nmcli_command_if_type_is_bridge-slave.yml | 2 ++ plugins/modules/net_tools/nmcli.py | 1 + tests/unit/plugins/modules/net_tools/test_nmcli.py | 1 + 3 files changed, 4 insertions(+) create mode 100644 changelogs/fragments/2409-nmcli_add_slave-type_bridge_to_nmcli_command_if_type_is_bridge-slave.yml diff --git a/changelogs/fragments/2409-nmcli_add_slave-type_bridge_to_nmcli_command_if_type_is_bridge-slave.yml b/changelogs/fragments/2409-nmcli_add_slave-type_bridge_to_nmcli_command_if_type_is_bridge-slave.yml new file mode 100644 index 0000000000..8d0b4c1617 --- /dev/null +++ b/changelogs/fragments/2409-nmcli_add_slave-type_bridge_to_nmcli_command_if_type_is_bridge-slave.yml @@ -0,0 +1,2 @@ +bugfixes: + - nmcli - if type is ``bridge-slave`` add ``slave-type bridge`` to ``nmcli`` command (https://github.com/ansible-collections/community.general/issues/2408). diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index 4ae5a1dac9..02fbbd038b 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -780,6 +780,7 @@ class Nmcli(object): }) elif self.type == 'bridge-slave': options.update({ + 'connection.slave-type': 'bridge', 'bridge-port.path-cost': self.path_cost, 'bridge-port.hairpin-mode': self.hairpin, 'bridge-port.priority': self.slavepriority, diff --git a/tests/unit/plugins/modules/net_tools/test_nmcli.py b/tests/unit/plugins/modules/net_tools/test_nmcli.py index 8d830bcf19..a05c8ccbf8 100644 --- a/tests/unit/plugins/modules/net_tools/test_nmcli.py +++ b/tests/unit/plugins/modules/net_tools/test_nmcli.py @@ -223,6 +223,7 @@ TESTCASE_BRIDGE_SLAVE_SHOW_OUTPUT = """\ connection.id: non_existent_nw_device connection.interface-name: br0_non_existant connection.autoconnect: yes +connection.slave-type: bridge ipv4.never-default: no bridge-port.path-cost: 100 bridge-port.hairpin-mode: yes From 7359b1fbe57f75619d09d759ceba7cf124c5f0b5 Mon Sep 17 00:00:00 2001 From: spike77453 Date: Mon, 3 May 2021 07:28:53 +0200 Subject: [PATCH 0018/2828] nmcli: Compare MAC addresses case insensitively (#2416) * nmcli: Compare MAC addresses case insensitively * Update changelogs/fragments/2416-nmcli_compare_mac_addresses_case_insensitively.yml Co-authored-by: Felix Fontein * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Felix Fontein * Add mac to TESTCASE_BRIDGE so test_bridge_connection_unchanged covers case sensitive mac address comparison * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Felix Fontein * Convert current_value to uppercase as well in case nmcli changes behaviour Co-authored-by: Felix Fontein --- .../2416-nmcli_compare_mac_addresses_case_insensitively.yml | 2 ++ plugins/modules/net_tools/nmcli.py | 6 +++++- tests/unit/plugins/modules/net_tools/test_nmcli.py | 2 ++ 3 files changed, 9 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2416-nmcli_compare_mac_addresses_case_insensitively.yml diff --git a/changelogs/fragments/2416-nmcli_compare_mac_addresses_case_insensitively.yml b/changelogs/fragments/2416-nmcli_compare_mac_addresses_case_insensitively.yml new file mode 100644 index 0000000000..6694638964 --- /dev/null +++ b/changelogs/fragments/2416-nmcli_compare_mac_addresses_case_insensitively.yml @@ -0,0 +1,2 @@ +bugfixes: + - nmcli - compare MAC addresses case insensitively to fix idempotency issue (https://github.com/ansible-collections/community.general/issues/2409). diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index 02fbbd038b..e2ed4ad572 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -1042,7 +1042,6 @@ class Nmcli(object): 'con-name': 'connection.id', 'autoconnect': 'connection.autoconnect', 'ifname': 'connection.interface-name', - 'mac': self.mac_setting, 'master': 'connection.master', 'slave-type': 'connection.slave-type', 'zone': 'connection.zone', @@ -1066,6 +1065,11 @@ class Nmcli(object): current_value = [re.sub(r'^{\s*ip\s*=\s*([^, ]+),\s*nh\s*=\s*([^} ]+),\s*mt\s*=\s*([^} ]+)\s*}', r'\1 \2 \3', route) for route in current_value] current_value = [re.sub(r'^{\s*ip\s*=\s*([^, ]+),\s*nh\s*=\s*([^} ]+)\s*}', r'\1 \2', route) for route in current_value] + if key == self.mac_setting: + # MAC addresses are case insensitive, nmcli always reports them in uppercase + value = value.upper() + # ensure current_value is also converted to uppercase in case nmcli changes behaviour + current_value = current_value.upper() elif key in param_alias: real_key = param_alias[key] if real_key in conn_info: diff --git a/tests/unit/plugins/modules/net_tools/test_nmcli.py b/tests/unit/plugins/modules/net_tools/test_nmcli.py index a05c8ccbf8..dceb5e5f3f 100644 --- a/tests/unit/plugins/modules/net_tools/test_nmcli.py +++ b/tests/unit/plugins/modules/net_tools/test_nmcli.py @@ -184,6 +184,7 @@ TESTCASE_BRIDGE = [ 'ifname': 'br0_non_existant', 'ip4': '10.10.10.10/24', 'gw4': '10.10.10.1', + 'mac': '52:54:00:ab:cd:ef', 'maxage': 100, 'stp': True, 'state': 'present', @@ -200,6 +201,7 @@ ipv4.addresses: 10.10.10.10/24 ipv4.gateway: 10.10.10.1 ipv4.never-default: no ipv6.method: auto +bridge.mac-address: 52:54:00:AB:CD:EF bridge.stp: yes bridge.max-age: 100 bridge.ageing-time: 300 From 4b0d2dcfe04591d67c5cdf82599ac33156c221ce Mon Sep 17 00:00:00 2001 From: Daniel-Sanchez-Fabregas <33929811+Daniel-Sanchez-Fabregas@users.noreply.github.com> Date: Mon, 3 May 2021 07:42:58 +0200 Subject: [PATCH 0019/2828] =?UTF-8?q?=F0=9F=93=9D=20Document=20nested=20no?= =?UTF-8?q?de=20addition=20with=20"=5F"=20in=20xml=20module=20(#2371)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * 📝 Document nested node addition with "_" in xml module Nested node addition using "_" to indicate sub nodes, and attributes are only documented in tests and issues, where is hard to find. * 🚨 Fix trailing space * Apply suggestions from code review Add missing collection prefix for modules. Co-authored-by: Felix Fontein * Add missing comments * Update xml.py * Fix linter warnings Co-authored-by: Felix Fontein --- plugins/modules/files/xml.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/plugins/modules/files/xml.py b/plugins/modules/files/xml.py index df3562df8c..f93c8e4dc4 100644 --- a/plugins/modules/files/xml.py +++ b/plugins/modules/files/xml.py @@ -285,6 +285,22 @@ EXAMPLES = r''' z: http://z.test attribute: z:my_namespaced_attribute value: 'false' + +- name: Adding building nodes with floor subnodes from a YAML variable + community.general.xml: + path: /foo/bar.xml + xpath: /business + add_children: + - building: + # Attributes + name: Scumm bar + location: Monkey island + # Subnodes + _: + - floor: Pirate hall + - floor: Grog storage + - construction_date: "1990" # Only strings are valid + - building: Grog factory ''' RETURN = r''' From 6a72c3b3385a739d049d23a24d5a5f186962d606 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 3 May 2021 13:22:11 +0200 Subject: [PATCH 0020/2828] Make plugins pass validation. (#2414) --- plugins/become/sudosu.py | 2 +- plugins/callback/loganalytics.py | 2 +- plugins/inventory/lxd.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/become/sudosu.py b/plugins/become/sudosu.py index e9668e6522..410b881b96 100644 --- a/plugins/become/sudosu.py +++ b/plugins/become/sudosu.py @@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = """ - become: sudosu + name: sudosu short_description: Run tasks using sudo su - description: - This become plugins allows your remote/login user to execute commands as another user via the C(sudo) and C(su) utilities combined. diff --git a/plugins/callback/loganalytics.py b/plugins/callback/loganalytics.py index 507d6fccd9..ef1ea02f87 100644 --- a/plugins/callback/loganalytics.py +++ b/plugins/callback/loganalytics.py @@ -4,7 +4,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = ''' - callback: loganalytics + name: loganalytics type: aggregate short_description: Posts task results to Azure Log Analytics author: "Cyrus Li (@zhcli) " diff --git a/plugins/inventory/lxd.py b/plugins/inventory/lxd.py index c48818d595..d1e47b0505 100644 --- a/plugins/inventory/lxd.py +++ b/plugins/inventory/lxd.py @@ -6,7 +6,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = r''' - name: community.general.lxd + name: lxd short_description: Returns Ansible inventory from lxd host description: - Get inventory from the lxd. @@ -68,7 +68,7 @@ DOCUMENTATION = r''' description: - Create groups by the following keywords C(location), C(pattern), C(network_range), C(os), C(release), C(profile), C(vlanid). - See example for syntax. - type: json + type: dict ''' EXAMPLES = ''' From 5064aa8ec6d967e9c6867af9fdeb4496377a2e4d Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 3 May 2021 23:27:16 +1200 Subject: [PATCH 0021/2828] linode_v4 - fixed error message (#2430) * fixed error message * added changelog fragment --- changelogs/fragments/2430-linodev4-error-message.yml | 2 ++ plugins/modules/cloud/linode/linode_v4.py | 5 ++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/2430-linodev4-error-message.yml diff --git a/changelogs/fragments/2430-linodev4-error-message.yml b/changelogs/fragments/2430-linodev4-error-message.yml new file mode 100644 index 0000000000..3dbfda1b9c --- /dev/null +++ b/changelogs/fragments/2430-linodev4-error-message.yml @@ -0,0 +1,2 @@ +bugfixes: + - linode_v4 - changed the error message to point to the correct bugtracker URL (https://github.com/ansible-collections/community.general/pull/2430). diff --git a/plugins/modules/cloud/linode/linode_v4.py b/plugins/modules/cloud/linode/linode_v4.py index 0f1133bac0..fcf3725bfc 100644 --- a/plugins/modules/cloud/linode/linode_v4.py +++ b/plugins/modules/cloud/linode/linode_v4.py @@ -208,9 +208,8 @@ def create_linode(module, client, **kwargs): else: return response._raw_json except TypeError: - module.fail_json(msg='Unable to parse Linode instance creation' - ' response. Please raise a bug against this' - ' module on https://github.com/ansible/ansible/issues' + module.fail_json(msg='Unable to parse Linode instance creation response. Please raise a bug against this' + ' module on https://github.com/ansible-collections/community.general/issues' ) From 7007c68ab786e6d51ecab5d97d2e3b891bd476c9 Mon Sep 17 00:00:00 2001 From: David Lundgren Date: Mon, 3 May 2021 14:05:07 -0500 Subject: [PATCH 0022/2828] Clean up test entries from sysrc tests (#2330) * Clean up test entries from sysrc tests * sysrc: enable tests * sysrc: cache the files to be changed and restore them * Update the ezjail archive host and remove obsolete file * sysrc: set ezjail to use archives for 12.0 or less * sysrc: Detect the version to use ftp vs ftp-archive using http * sysrc: Skip ezjail test on FreeBSD 12.0 --- tests/integration/targets/sysrc/aliases | 1 - .../integration/targets/sysrc/tasks/main.yml | 21 ++++++++++++++++++- .../targets/sysrc/tasks/setup-testjail.yml | 9 +++++++- 3 files changed, 28 insertions(+), 3 deletions(-) diff --git a/tests/integration/targets/sysrc/aliases b/tests/integration/targets/sysrc/aliases index c7d183fb65..360849e61b 100644 --- a/tests/integration/targets/sysrc/aliases +++ b/tests/integration/targets/sysrc/aliases @@ -3,4 +3,3 @@ needs/root skip/docker skip/osx skip/rhel -disabled # FIXME diff --git a/tests/integration/targets/sysrc/tasks/main.yml b/tests/integration/targets/sysrc/tasks/main.yml index c8b7de4160..b8292f785b 100644 --- a/tests/integration/targets/sysrc/tasks/main.yml +++ b/tests/integration/targets/sysrc/tasks/main.yml @@ -6,7 +6,11 @@ block: - name: Cache original contents of /etc/rc.conf shell: "cat /etc/rc.conf" - register: sysrc_original_content + register: cached_etc_rcconf_content + + - name: Cache original contents of /boot/loader.conf + shell: "cat /boot/loader.conf" + register: cached_boot_loaderconf_content ## ## sysrc - example - set mysqlpidfile @@ -130,6 +134,11 @@ ## sysrc - example - Enable nginx in testjail ## - name: Test within jail + # + # NOTE: FreeBSD 12.0 test runner receives a "connection reset by peer" after ~20% downloaded so we are + # only running this on 12.1 or higher + # + when: ansible_distribution_version is version('12.01', '>=') block: - name: Setup testjail include: setup-testjail.yml @@ -316,3 +325,13 @@ - not sysrc_value_absent_idempotent.changed - "'sysrc_delim=\"t1,t2\"' in sysrc_delim_content.stdout_lines" - "'sysrc_delim_delete' not in sysrc_delim_content.stdout_lines" + always: + - name: Restore /etc/rc.conf + copy: + content: "{{ cached_etc_rcconf_content }}" + dest: /etc/rc.conf + + - name: Restore /boot/loader.conf + copy: + content: "{{ cached_boot_loaderconf_content }}" + dest: /boot/loader.conf \ No newline at end of file diff --git a/tests/integration/targets/sysrc/tasks/setup-testjail.yml b/tests/integration/targets/sysrc/tasks/setup-testjail.yml index 9bd15320ae..e75957d19f 100644 --- a/tests/integration/targets/sysrc/tasks/setup-testjail.yml +++ b/tests/integration/targets/sysrc/tasks/setup-testjail.yml @@ -17,12 +17,19 @@ pkgng: name: ezjail +- name: Configure ezjail to use http + when: ansible_distribution_version is version('11.01', '>') + lineinfile: + dest: /usr/local/etc/ezjail.conf + regexp: ^ezjail_ftphost + line: ezjail_ftphost=http://ftp.freebsd.org + - name: Configure ezjail to use archive for old freebsd releases when: ansible_distribution_version is version('11.01', '<=') lineinfile: dest: /usr/local/etc/ezjail.conf regexp: ^ezjail_ftphost - line: ezjail_ftphost=ftp-archive.freebsd.org + line: ezjail_ftphost=http://ftp-archive.freebsd.org - name: Start ezjail ignore_errors: yes From 06bdabcad93846f37f70fd868fdce421ef366f8c Mon Sep 17 00:00:00 2001 From: zigaSRC <65527456+zigaSRC@users.noreply.github.com> Date: Mon, 3 May 2021 21:25:52 +0200 Subject: [PATCH 0023/2828] lvol - bug fix - Convert units to lowercase when using LVS or VGS command (#2369) * Added lower call for units when checking lvs/vgs size * Changelog * Size roudning correction * Rounding * Changelog * Remove whitespace --- changelogs/fragments/2369-lvol_size_bug_fixes.yml | 3 +++ plugins/modules/system/lvol.py | 13 +++++-------- 2 files changed, 8 insertions(+), 8 deletions(-) create mode 100644 changelogs/fragments/2369-lvol_size_bug_fixes.yml diff --git a/changelogs/fragments/2369-lvol_size_bug_fixes.yml b/changelogs/fragments/2369-lvol_size_bug_fixes.yml new file mode 100644 index 0000000000..fcd2f17b11 --- /dev/null +++ b/changelogs/fragments/2369-lvol_size_bug_fixes.yml @@ -0,0 +1,3 @@ +bugfixes: + - lvol - fixed size unit capitalization to match units used between different tools for comparison (https://github.com/ansible-collections/community.general/issues/2360). + - lvol - fixed rounding errors (https://github.com/ansible-collections/community.general/issues/2370). \ No newline at end of file diff --git a/plugins/modules/system/lvol.py b/plugins/modules/system/lvol.py index 8dc3fac7f5..fafa7db38a 100644 --- a/plugins/modules/system/lvol.py +++ b/plugins/modules/system/lvol.py @@ -389,7 +389,7 @@ def main(): # Get information on volume group requested vgs_cmd = module.get_bin_path("vgs", required=True) rc, current_vgs, err = module.run_command( - "%s --noheadings --nosuffix -o vg_name,size,free,vg_extent_size --units %s --separator ';' %s" % (vgs_cmd, unit, vg)) + "%s --noheadings --nosuffix -o vg_name,size,free,vg_extent_size --units %s --separator ';' %s" % (vgs_cmd, unit.lower(), vg)) if rc != 0: if state == 'absent': @@ -403,7 +403,7 @@ def main(): # Get information on logical volume requested lvs_cmd = module.get_bin_path("lvs", required=True) rc, current_lvs, err = module.run_command( - "%s -a --noheadings --nosuffix -o lv_name,size,lv_attr --units %s --separator ';' %s" % (lvs_cmd, unit, vg)) + "%s -a --noheadings --nosuffix -o lv_name,size,lv_attr --units %s --separator ';' %s" % (lvs_cmd, unit.lower(), vg)) if rc != 0: if state == 'absent': @@ -505,16 +505,13 @@ def main(): else: # size_whole == 'FREE': size_requested = size_percent * this_vg['free'] / 100 - # from LVEXTEND(8) - The resulting value is rounded upward. - # from LVREDUCE(8) - The resulting value for the substraction is rounded downward, for the absolute size it is rounded upward. if size_operator == '+': size_requested += this_lv['size'] - size_requested += this_vg['ext_size'] - (size_requested % this_vg['ext_size']) elif size_operator == '-': size_requested = this_lv['size'] - size_requested - size_requested -= (size_requested % this_vg['ext_size']) - else: - size_requested += this_vg['ext_size'] - (size_requested % this_vg['ext_size']) + + # According to latest documentation (LVM2-2.03.11) all tools round down + size_requested -= (size_requested % this_vg['ext_size']) if this_lv['size'] < size_requested: if (size_free > 0) and (size_free >= (size_requested - this_lv['size'])): From 1f41e66f098647af4c393a27ee5648e3371b2ed2 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 3 May 2021 22:24:33 +0200 Subject: [PATCH 0024/2828] Remove shippable config. (#2440) --- shippable.yml | 48 ------------------------------------------------ 1 file changed, 48 deletions(-) delete mode 100644 shippable.yml diff --git a/shippable.yml b/shippable.yml deleted file mode 100644 index 7cbbdc24e7..0000000000 --- a/shippable.yml +++ /dev/null @@ -1,48 +0,0 @@ -language: python - -env: - matrix: - - T=none - -matrix: - exclude: - - env: T=none - include: - - env: T=devel/sanity/1 - - env: T=devel/sanity/2 - - env: T=devel/sanity/3 - - env: T=devel/sanity/4 - - - env: T=2.11/sanity/1 - - env: T=2.11/sanity/2 - - env: T=2.11/sanity/3 - - env: T=2.11/sanity/4 - - - env: T=2.10/sanity/1 - - env: T=2.10/sanity/2 - - env: T=2.10/sanity/3 - - env: T=2.10/sanity/4 - - - env: T=2.9/sanity/1 - - env: T=2.9/sanity/2 - - env: T=2.9/sanity/3 - - env: T=2.9/sanity/4 - -branches: - except: - - "*-patch-*" - - "revert-*-*" - - "patchback/backports/*" - -build: - ci: - - tests/utils/shippable/timing.sh tests/utils/shippable/shippable.sh $T - -integrations: - notifications: - - integrationName: email - type: email - on_success: never - on_failure: never - on_start: never - on_pull_request: never From aaa561163b705dbf7e1f06b58382e3a072550e31 Mon Sep 17 00:00:00 2001 From: Jan Orel Date: Tue, 4 May 2021 12:21:55 +0200 Subject: [PATCH 0025/2828] OpenNebula one_vm.py: Fix missing keys (#2435) * OpenNebula one_vm.py: Fix missing keys * fixup OpenNebula one_vm.py: Fix missing keys --- .../fragments/2435-one_vm-fix_missing_keys.yml | 2 ++ plugins/modules/cloud/opennebula/one_vm.py | 17 +++++++++++++---- 2 files changed, 15 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/2435-one_vm-fix_missing_keys.yml diff --git a/changelogs/fragments/2435-one_vm-fix_missing_keys.yml b/changelogs/fragments/2435-one_vm-fix_missing_keys.yml new file mode 100644 index 0000000000..395c024b26 --- /dev/null +++ b/changelogs/fragments/2435-one_vm-fix_missing_keys.yml @@ -0,0 +1,2 @@ +bugfixes: + - one_vm - Allow missing NIC keys (https://github.com/ansible-collections/community.general/pull/2435). diff --git a/plugins/modules/cloud/opennebula/one_vm.py b/plugins/modules/cloud/opennebula/one_vm.py index 425a1c464a..fa3d4abaab 100644 --- a/plugins/modules/cloud/opennebula/one_vm.py +++ b/plugins/modules/cloud/opennebula/one_vm.py @@ -752,11 +752,20 @@ def get_vm_info(client, vm): if 'NIC' in vm.TEMPLATE: if isinstance(vm.TEMPLATE['NIC'], list): for nic in vm.TEMPLATE['NIC']: - networks_info.append({'ip': nic['IP'], 'mac': nic['MAC'], 'name': nic['NETWORK'], 'security_groups': nic['SECURITY_GROUPS']}) + networks_info.append({ + 'ip': nic.get('IP', ''), + 'mac': nic.get('MAC', ''), + 'name': nic.get('NETWORK', ''), + 'security_groups': nic.get('SECURITY_GROUPS', '') + }) else: - networks_info.append( - {'ip': vm.TEMPLATE['NIC']['IP'], 'mac': vm.TEMPLATE['NIC']['MAC'], - 'name': vm.TEMPLATE['NIC']['NETWORK'], 'security_groups': vm.TEMPLATE['NIC']['SECURITY_GROUPS']}) + networks_info.append({ + 'ip': vm.TEMPLATE['NIC'].get('IP', ''), + 'mac': vm.TEMPLATE['NIC'].get('MAC', ''), + 'name': vm.TEMPLATE['NIC'].get('NETWORK', ''), + 'security_groups': + vm.TEMPLATE['NIC'].get('SECURITY_GROUPS', '') + }) import time current_time = time.localtime() From 188a4eeb0c9c7b89de952d8bb76d962ee17dc490 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Wed, 5 May 2021 07:32:53 +0200 Subject: [PATCH 0026/2828] Add more plugin authors to BOTMETA. (#2451) --- .github/BOTMETA.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index fd23d0c9e4..f27c96e049 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -88,6 +88,8 @@ files: maintainers: $team_linode labels: cloud linode keywords: linode dynamic inventory script + $inventories/lxd.py: + maintainers: conloos $inventories/proxmox.py: maintainers: $team_virt ilijamt $inventories/scaleway.py: @@ -373,6 +375,8 @@ files: maintainers: $team_keycloak $modules/identity/keycloak/keycloak_group.py: maintainers: adamgoossens + $modules/identity/keycloak/keycloak_realm.py: + maintainers: kris2kris $modules/identity/onepassword_info.py: maintainers: Rylon $modules/identity/opendj/opendj_backendprop.py: From 9906b9dbc75c45288acdcc5eb597957a0a1df376 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Wed, 5 May 2021 12:31:01 +0200 Subject: [PATCH 0027/2828] Remove vendored ipaddress module. (#2441) --- changelogs/fragments/ipaddress.yml | 5 + plugins/inventory/lxd.py | 15 +- plugins/module_utils/compat/__init__.py | 0 plugins/module_utils/compat/ipaddress.py | 2580 ----------------- .../scaleway/scaleway_security_group_rule.py | 24 +- tests/sanity/ignore-2.10.txt | 2 - tests/sanity/ignore-2.11.txt | 2 - tests/sanity/ignore-2.12.txt | 2 - tests/sanity/ignore-2.9.txt | 2 - 9 files changed, 37 insertions(+), 2595 deletions(-) create mode 100644 changelogs/fragments/ipaddress.yml delete mode 100644 plugins/module_utils/compat/__init__.py delete mode 100644 plugins/module_utils/compat/ipaddress.py diff --git a/changelogs/fragments/ipaddress.yml b/changelogs/fragments/ipaddress.yml new file mode 100644 index 0000000000..7f6eeb70bb --- /dev/null +++ b/changelogs/fragments/ipaddress.yml @@ -0,0 +1,5 @@ +removed_features: +- "The vendored copy of ``ipaddress`` has been removed. Please use ``ipaddress`` from the Python 3 standard library, or `from pypi `_. (https://github.com/ansible-collections/community.general/pull/2441)." +breaking_changes: +- "scaleway_security_group_rule - when used with Python 2, the module now needs ``ipaddress`` installed `from pypi `_ (https://github.com/ansible-collections/community.general/pull/2441)." +- "lxd inventory plugin - when used with Python 2, the plugin now needs ``ipaddress`` installed `from pypi `_ (https://github.com/ansible-collections/community.general/pull/2441)." diff --git a/plugins/inventory/lxd.py b/plugins/inventory/lxd.py index d1e47b0505..06c620ac60 100644 --- a/plugins/inventory/lxd.py +++ b/plugins/inventory/lxd.py @@ -13,6 +13,8 @@ DOCUMENTATION = r''' - Uses a YAML configuration file that ends with 'lxd.(yml|yaml)'. version_added: "3.0.0" author: "Frank Dornheim (@conloos)" + requirements: + - ipaddress options: plugin: description: Token that ensures this is a source file for the 'lxd' plugin. @@ -124,10 +126,17 @@ import socket from ansible.plugins.inventory import BaseInventoryPlugin from ansible.module_utils._text import to_native, to_text from ansible.module_utils.common.dict_transformations import dict_merge +from ansible.module_utils.six import raise_from from ansible.errors import AnsibleError, AnsibleParserError -from ansible_collections.community.general.plugins.module_utils.compat import ipaddress from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException +try: + import ipaddress +except ImportError as exc: + IPADDRESS_IMPORT_ERROR = exc +else: + IPADDRESS_IMPORT_ERROR = None + class InventoryModule(BaseInventoryPlugin): DEBUG = 4 @@ -924,6 +933,10 @@ class InventoryModule(BaseInventoryPlugin): AnsibleParserError Returns: None""" + if IPADDRESS_IMPORT_ERROR: + raise_from( + AnsibleError('another_library must be installed to use this plugin'), + IPADDRESS_IMPORT_ERROR) super(InventoryModule, self).parse(inventory, loader, path, cache=False) # Read the inventory YAML file diff --git a/plugins/module_utils/compat/__init__.py b/plugins/module_utils/compat/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/module_utils/compat/ipaddress.py b/plugins/module_utils/compat/ipaddress.py deleted file mode 100644 index db4e91b784..0000000000 --- a/plugins/module_utils/compat/ipaddress.py +++ /dev/null @@ -1,2580 +0,0 @@ -# -*- coding: utf-8 -*- - -# This code is part of Ansible, but is an independent component. -# This particular file, and this file only, is based on -# Lib/ipaddress.py of cpython -# It is licensed under the PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 -# -# 1. This LICENSE AGREEMENT is between the Python Software Foundation -# ("PSF"), and the Individual or Organization ("Licensee") accessing and -# otherwise using this software ("Python") in source or binary form and -# its associated documentation. -# -# 2. Subject to the terms and conditions of this License Agreement, PSF hereby -# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, -# analyze, test, perform and/or display publicly, prepare derivative works, -# distribute, and otherwise use Python alone or in any derivative version, -# provided, however, that PSF's License Agreement and PSF's notice of copyright, -# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, -# 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved" -# are retained in Python alone or in any derivative version prepared by Licensee. -# -# 3. In the event Licensee prepares a derivative work that is based on -# or incorporates Python or any part thereof, and wants to make -# the derivative work available to others as provided herein, then -# Licensee hereby agrees to include in any such work a brief summary of -# the changes made to Python. -# -# 4. PSF is making Python available to Licensee on an "AS IS" -# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR -# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND -# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS -# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT -# INFRINGE ANY THIRD PARTY RIGHTS. -# -# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON -# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS -# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, -# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. -# -# 6. This License Agreement will automatically terminate upon a material -# breach of its terms and conditions. -# -# 7. Nothing in this License Agreement shall be deemed to create any -# relationship of agency, partnership, or joint venture between PSF and -# Licensee. This License Agreement does not grant permission to use PSF -# trademarks or trade name in a trademark sense to endorse or promote -# products or services of Licensee, or any third party. -# -# 8. By copying, installing or otherwise using Python, Licensee -# agrees to be bound by the terms and conditions of this License -# Agreement. - -# Copyright 2007 Google Inc. -# Licensed to PSF under a Contributor Agreement. - -"""A fast, lightweight IPv4/IPv6 manipulation library in Python. - -This library is used to create/poke/manipulate IPv4 and IPv6 addresses -and networks. - -""" - -from __future__ import (absolute_import, division, print_function) -from __future__ import unicode_literals -__metaclass__ = type - - -import itertools -import struct - - -# The following makes it easier for us to script updates of the bundled code and is not part of -# upstream -_BUNDLED_METADATA = {"pypi_name": "ipaddress", "version": "1.0.22"} - -__version__ = "1.0.22" - -# Compatibility functions -_compat_int_types = (int,) -try: - _compat_int_types = (int, long) -except NameError: - pass -try: - _compat_str = unicode -except NameError: - _compat_str = str - assert bytes != str -if b"\0"[0] == 0: # Python 3 semantics - - def _compat_bytes_to_byte_vals(byt): - return byt - - -else: - - def _compat_bytes_to_byte_vals(byt): - return [struct.unpack(b"!B", b)[0] for b in byt] - - -try: - _compat_int_from_byte_vals = int.from_bytes -except AttributeError: - - def _compat_int_from_byte_vals(bytvals, endianess): - assert endianess == "big" - res = 0 - for bv in bytvals: - assert isinstance(bv, _compat_int_types) - res = (res << 8) + bv - return res - - -def _compat_to_bytes(intval, length, endianess): - assert isinstance(intval, _compat_int_types) - assert endianess == "big" - if length == 4: - if intval < 0 or intval >= 2 ** 32: - raise struct.error("integer out of range for 'I' format code") - return struct.pack(b"!I", intval) - elif length == 16: - if intval < 0 or intval >= 2 ** 128: - raise struct.error("integer out of range for 'QQ' format code") - return struct.pack(b"!QQ", intval >> 64, intval & 0xFFFFFFFFFFFFFFFF) - else: - raise NotImplementedError() - - -if hasattr(int, "bit_length"): - # Not int.bit_length , since that won't work in 2.7 where long exists - def _compat_bit_length(i): - return i.bit_length() - - -else: - - def _compat_bit_length(i): - for res in itertools.count(): - if i >> res == 0: - return res - - -def _compat_range(start, end, step=1): - assert step > 0 - i = start - while i < end: - yield i - i += step - - -class _TotalOrderingMixin(object): - __slots__ = () - - # Helper that derives the other comparison operations from - # __lt__ and __eq__ - # We avoid functools.total_ordering because it doesn't handle - # NotImplemented correctly yet (http://bugs.python.org/issue10042) - def __eq__(self, other): - raise NotImplementedError - - def __ne__(self, other): - equal = self.__eq__(other) - if equal is NotImplemented: - return NotImplemented - return not equal - - def __lt__(self, other): - raise NotImplementedError - - def __le__(self, other): - less = self.__lt__(other) - if less is NotImplemented or not less: - return self.__eq__(other) - return less - - def __gt__(self, other): - less = self.__lt__(other) - if less is NotImplemented: - return NotImplemented - equal = self.__eq__(other) - if equal is NotImplemented: - return NotImplemented - return not (less or equal) - - def __ge__(self, other): - less = self.__lt__(other) - if less is NotImplemented: - return NotImplemented - return not less - - -IPV4LENGTH = 32 -IPV6LENGTH = 128 - - -class AddressValueError(ValueError): - """A Value Error related to the address.""" - - -class NetmaskValueError(ValueError): - """A Value Error related to the netmask.""" - - -def ip_address(address): - """Take an IP string/int and return an object of the correct type. - - Args: - address: A string or integer, the IP address. Either IPv4 or - IPv6 addresses may be supplied; integers less than 2**32 will - be considered to be IPv4 by default. - - Returns: - An IPv4Address or IPv6Address object. - - Raises: - ValueError: if the *address* passed isn't either a v4 or a v6 - address - - """ - try: - return IPv4Address(address) - except (AddressValueError, NetmaskValueError): - pass - - try: - return IPv6Address(address) - except (AddressValueError, NetmaskValueError): - pass - - if isinstance(address, bytes): - raise AddressValueError( - "%r does not appear to be an IPv4 or IPv6 address. " - "Did you pass in a bytes (str in Python 2) instead of" - " a unicode object?" % address - ) - - raise ValueError( - "%r does not appear to be an IPv4 or IPv6 address" % address - ) - - -def ip_network(address, strict=True): - """Take an IP string/int and return an object of the correct type. - - Args: - address: A string or integer, the IP network. Either IPv4 or - IPv6 networks may be supplied; integers less than 2**32 will - be considered to be IPv4 by default. - - Returns: - An IPv4Network or IPv6Network object. - - Raises: - ValueError: if the string passed isn't either a v4 or a v6 - address. Or if the network has host bits set. - - """ - try: - return IPv4Network(address, strict) - except (AddressValueError, NetmaskValueError): - pass - - try: - return IPv6Network(address, strict) - except (AddressValueError, NetmaskValueError): - pass - - if isinstance(address, bytes): - raise AddressValueError( - "%r does not appear to be an IPv4 or IPv6 network. " - "Did you pass in a bytes (str in Python 2) instead of" - " a unicode object?" % address - ) - - raise ValueError( - "%r does not appear to be an IPv4 or IPv6 network" % address - ) - - -def ip_interface(address): - """Take an IP string/int and return an object of the correct type. - - Args: - address: A string or integer, the IP address. Either IPv4 or - IPv6 addresses may be supplied; integers less than 2**32 will - be considered to be IPv4 by default. - - Returns: - An IPv4Interface or IPv6Interface object. - - Raises: - ValueError: if the string passed isn't either a v4 or a v6 - address. - - Notes: - The IPv?Interface classes describe an Address on a particular - Network, so they're basically a combination of both the Address - and Network classes. - - """ - try: - return IPv4Interface(address) - except (AddressValueError, NetmaskValueError): - pass - - try: - return IPv6Interface(address) - except (AddressValueError, NetmaskValueError): - pass - - raise ValueError( - "%r does not appear to be an IPv4 or IPv6 interface" % address - ) - - -def v4_int_to_packed(address): - """Represent an address as 4 packed bytes in network (big-endian) order. - - Args: - address: An integer representation of an IPv4 IP address. - - Returns: - The integer address packed as 4 bytes in network (big-endian) order. - - Raises: - ValueError: If the integer is negative or too large to be an - IPv4 IP address. - - """ - try: - return _compat_to_bytes(address, 4, "big") - except (struct.error, OverflowError): - raise ValueError("Address negative or too large for IPv4") - - -def v6_int_to_packed(address): - """Represent an address as 16 packed bytes in network (big-endian) order. - - Args: - address: An integer representation of an IPv6 IP address. - - Returns: - The integer address packed as 16 bytes in network (big-endian) order. - - """ - try: - return _compat_to_bytes(address, 16, "big") - except (struct.error, OverflowError): - raise ValueError("Address negative or too large for IPv6") - - -def _split_optional_netmask(address): - """Helper to split the netmask and raise AddressValueError if needed""" - addr = _compat_str(address).split("/") - if len(addr) > 2: - raise AddressValueError("Only one '/' permitted in %r" % address) - return addr - - -def _find_address_range(addresses): - """Find a sequence of sorted deduplicated IPv#Address. - - Args: - addresses: a list of IPv#Address objects. - - Yields: - A tuple containing the first and last IP addresses in the sequence. - - """ - it = iter(addresses) - first = last = next(it) # pylint: disable=stop-iteration-return - for ip in it: - if ip._ip != last._ip + 1: - yield first, last - first = ip - last = ip - yield first, last - - -def _count_righthand_zero_bits(number, bits): - """Count the number of zero bits on the right hand side. - - Args: - number: an integer. - bits: maximum number of bits to count. - - Returns: - The number of zero bits on the right hand side of the number. - - """ - if number == 0: - return bits - return min(bits, _compat_bit_length(~number & (number - 1))) - - -def summarize_address_range(first, last): - """Summarize a network range given the first and last IP addresses. - - Example: - >>> list(summarize_address_range(IPv4Address('192.0.2.0'), - ... IPv4Address('192.0.2.130'))) - ... #doctest: +NORMALIZE_WHITESPACE - [IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'), - IPv4Network('192.0.2.130/32')] - - Args: - first: the first IPv4Address or IPv6Address in the range. - last: the last IPv4Address or IPv6Address in the range. - - Returns: - An iterator of the summarized IPv(4|6) network objects. - - Raise: - TypeError: - If the first and last objects are not IP addresses. - If the first and last objects are not the same version. - ValueError: - If the last object is not greater than the first. - If the version of the first address is not 4 or 6. - - """ - if not ( - isinstance(first, _BaseAddress) and isinstance(last, _BaseAddress) - ): - raise TypeError("first and last must be IP addresses, not networks") - if first.version != last.version: - raise TypeError( - "%s and %s are not of the same version" % (first, last) - ) - if first > last: - raise ValueError("last IP address must be greater than first") - - if first.version == 4: - ip = IPv4Network - elif first.version == 6: - ip = IPv6Network - else: - raise ValueError("unknown IP version") - - ip_bits = first._max_prefixlen - first_int = first._ip - last_int = last._ip - while first_int <= last_int: - nbits = min( - _count_righthand_zero_bits(first_int, ip_bits), - _compat_bit_length(last_int - first_int + 1) - 1, - ) - net = ip((first_int, ip_bits - nbits)) - yield net - first_int += 1 << nbits - if first_int - 1 == ip._ALL_ONES: - break - - -def _collapse_addresses_internal(addresses): - """Loops through the addresses, collapsing concurrent netblocks. - - Example: - - ip1 = IPv4Network('192.0.2.0/26') - ip2 = IPv4Network('192.0.2.64/26') - ip3 = IPv4Network('192.0.2.128/26') - ip4 = IPv4Network('192.0.2.192/26') - - _collapse_addresses_internal([ip1, ip2, ip3, ip4]) -> - [IPv4Network('192.0.2.0/24')] - - This shouldn't be called directly; it is called via - collapse_addresses([]). - - Args: - addresses: A list of IPv4Network's or IPv6Network's - - Returns: - A list of IPv4Network's or IPv6Network's depending on what we were - passed. - - """ - # First merge - to_merge = list(addresses) - subnets = {} - while to_merge: - net = to_merge.pop() - supernet = net.supernet() - existing = subnets.get(supernet) - if existing is None: - subnets[supernet] = net - elif existing != net: - # Merge consecutive subnets - del subnets[supernet] - to_merge.append(supernet) - # Then iterate over resulting networks, skipping subsumed subnets - last = None - for net in sorted(subnets.values()): - if last is not None: - # Since they are sorted, - # last.network_address <= net.network_address is a given. - if last.broadcast_address >= net.broadcast_address: - continue - yield net - last = net - - -def collapse_addresses(addresses): - """Collapse a list of IP objects. - - Example: - collapse_addresses([IPv4Network('192.0.2.0/25'), - IPv4Network('192.0.2.128/25')]) -> - [IPv4Network('192.0.2.0/24')] - - Args: - addresses: An iterator of IPv4Network or IPv6Network objects. - - Returns: - An iterator of the collapsed IPv(4|6)Network objects. - - Raises: - TypeError: If passed a list of mixed version objects. - - """ - addrs = [] - ips = [] - nets = [] - - # split IP addresses and networks - for ip in addresses: - if isinstance(ip, _BaseAddress): - if ips and ips[-1]._version != ip._version: - raise TypeError( - "%s and %s are not of the same version" % (ip, ips[-1]) - ) - ips.append(ip) - elif ip._prefixlen == ip._max_prefixlen: - if ips and ips[-1]._version != ip._version: - raise TypeError( - "%s and %s are not of the same version" % (ip, ips[-1]) - ) - try: - ips.append(ip.ip) - except AttributeError: - ips.append(ip.network_address) - else: - if nets and nets[-1]._version != ip._version: - raise TypeError( - "%s and %s are not of the same version" % (ip, nets[-1]) - ) - nets.append(ip) - - # sort and dedup - ips = sorted(set(ips)) - - # find consecutive address ranges in the sorted sequence and summarize them - if ips: - for first, last in _find_address_range(ips): - addrs.extend(summarize_address_range(first, last)) - - return _collapse_addresses_internal(addrs + nets) - - -def get_mixed_type_key(obj): - """Return a key suitable for sorting between networks and addresses. - - Address and Network objects are not sortable by default; they're - fundamentally different so the expression - - IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24') - - doesn't make any sense. There are some times however, where you may wish - to have ipaddress sort these for you anyway. If you need to do this, you - can use this function as the key= argument to sorted(). - - Args: - obj: either a Network or Address object. - Returns: - appropriate key. - - """ - if isinstance(obj, _BaseNetwork): - return obj._get_networks_key() - elif isinstance(obj, _BaseAddress): - return obj._get_address_key() - return NotImplemented - - -class _IPAddressBase(_TotalOrderingMixin): - - """The mother class.""" - - __slots__ = () - - @property - def exploded(self): - """Return the longhand version of the IP address as a string.""" - return self._explode_shorthand_ip_string() - - @property - def compressed(self): - """Return the shorthand version of the IP address as a string.""" - return _compat_str(self) - - @property - def reverse_pointer(self): - """The name of the reverse DNS pointer for the IP address, e.g.: - >>> ipaddress.ip_address("127.0.0.1").reverse_pointer - '1.0.0.127.in-addr.arpa' - >>> ipaddress.ip_address("2001:db8::1").reverse_pointer - '1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa' - - """ - return self._reverse_pointer() - - @property - def version(self): - msg = "%200s has no version specified" % (type(self),) - raise NotImplementedError(msg) - - def _check_int_address(self, address): - if address < 0: - msg = "%d (< 0) is not permitted as an IPv%d address" - raise AddressValueError(msg % (address, self._version)) - if address > self._ALL_ONES: - msg = "%d (>= 2**%d) is not permitted as an IPv%d address" - raise AddressValueError( - msg % (address, self._max_prefixlen, self._version) - ) - - def _check_packed_address(self, address, expected_len): - address_len = len(address) - if address_len != expected_len: - msg = ( - "%r (len %d != %d) is not permitted as an IPv%d address. " - "Did you pass in a bytes (str in Python 2) instead of" - " a unicode object?" - ) - raise AddressValueError( - msg % (address, address_len, expected_len, self._version) - ) - - @classmethod - def _ip_int_from_prefix(cls, prefixlen): - """Turn the prefix length into a bitwise netmask - - Args: - prefixlen: An integer, the prefix length. - - Returns: - An integer. - - """ - return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen) - - @classmethod - def _prefix_from_ip_int(cls, ip_int): - """Return prefix length from the bitwise netmask. - - Args: - ip_int: An integer, the netmask in expanded bitwise format - - Returns: - An integer, the prefix length. - - Raises: - ValueError: If the input intermingles zeroes & ones - """ - trailing_zeroes = _count_righthand_zero_bits( - ip_int, cls._max_prefixlen - ) - prefixlen = cls._max_prefixlen - trailing_zeroes - leading_ones = ip_int >> trailing_zeroes - all_ones = (1 << prefixlen) - 1 - if leading_ones != all_ones: - byteslen = cls._max_prefixlen // 8 - details = _compat_to_bytes(ip_int, byteslen, "big") - msg = "Netmask pattern %r mixes zeroes & ones" - raise ValueError(msg % details) - return prefixlen - - @classmethod - def _report_invalid_netmask(cls, netmask_str): - msg = "%r is not a valid netmask" % netmask_str - raise NetmaskValueError(msg) - - @classmethod - def _prefix_from_prefix_string(cls, prefixlen_str): - """Return prefix length from a numeric string - - Args: - prefixlen_str: The string to be converted - - Returns: - An integer, the prefix length. - - Raises: - NetmaskValueError: If the input is not a valid netmask - """ - # int allows a leading +/- as well as surrounding whitespace, - # so we ensure that isn't the case - if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str): - cls._report_invalid_netmask(prefixlen_str) - try: - prefixlen = int(prefixlen_str) - except ValueError: - cls._report_invalid_netmask(prefixlen_str) - if not (0 <= prefixlen <= cls._max_prefixlen): - cls._report_invalid_netmask(prefixlen_str) - return prefixlen - - @classmethod - def _prefix_from_ip_string(cls, ip_str): - """Turn a netmask/hostmask string into a prefix length - - Args: - ip_str: The netmask/hostmask to be converted - - Returns: - An integer, the prefix length. - - Raises: - NetmaskValueError: If the input is not a valid netmask/hostmask - """ - # Parse the netmask/hostmask like an IP address. - try: - ip_int = cls._ip_int_from_string(ip_str) - except AddressValueError: - cls._report_invalid_netmask(ip_str) - - # Try matching a netmask (this would be /1*0*/ as a bitwise regexp). - # Note that the two ambiguous cases (all-ones and all-zeroes) are - # treated as netmasks. - try: - return cls._prefix_from_ip_int(ip_int) - except ValueError: - pass - - # Invert the bits, and try matching a /0+1+/ hostmask instead. - ip_int ^= cls._ALL_ONES - try: - return cls._prefix_from_ip_int(ip_int) - except ValueError: - cls._report_invalid_netmask(ip_str) - - def __reduce__(self): - return self.__class__, (_compat_str(self),) - - -class _BaseAddress(_IPAddressBase): - - """A generic IP object. - - This IP class contains the version independent methods which are - used by single IP addresses. - """ - - __slots__ = () - - def __int__(self): - return self._ip - - def __eq__(self, other): - try: - return self._ip == other._ip and self._version == other._version - except AttributeError: - return NotImplemented - - def __lt__(self, other): - if not isinstance(other, _IPAddressBase): - return NotImplemented - if not isinstance(other, _BaseAddress): - raise TypeError( - "%s and %s are not of the same type" % (self, other) - ) - if self._version != other._version: - raise TypeError( - "%s and %s are not of the same version" % (self, other) - ) - if self._ip != other._ip: - return self._ip < other._ip - return False - - # Shorthand for Integer addition and subtraction. This is not - # meant to ever support addition/subtraction of addresses. - def __add__(self, other): - if not isinstance(other, _compat_int_types): - return NotImplemented - return self.__class__(int(self) + other) - - def __sub__(self, other): - if not isinstance(other, _compat_int_types): - return NotImplemented - return self.__class__(int(self) - other) - - def __repr__(self): - return "%s(%r)" % (self.__class__.__name__, _compat_str(self)) - - def __str__(self): - return _compat_str(self._string_from_ip_int(self._ip)) - - def __hash__(self): - return hash(hex(int(self._ip))) - - def _get_address_key(self): - return (self._version, self) - - def __reduce__(self): - return self.__class__, (self._ip,) - - -class _BaseNetwork(_IPAddressBase): - - """A generic IP network object. - - This IP class contains the version independent methods which are - used by networks. - - """ - - def __init__(self, address): - self._cache = {} - - def __repr__(self): - return "%s(%r)" % (self.__class__.__name__, _compat_str(self)) - - def __str__(self): - return "%s/%d" % (self.network_address, self.prefixlen) - - def hosts(self): - """Generate Iterator over usable hosts in a network. - - This is like __iter__ except it doesn't return the network - or broadcast addresses. - - """ - network = int(self.network_address) - broadcast = int(self.broadcast_address) - for x in _compat_range(network + 1, broadcast): - yield self._address_class(x) - - def __iter__(self): - network = int(self.network_address) - broadcast = int(self.broadcast_address) - for x in _compat_range(network, broadcast + 1): - yield self._address_class(x) - - def __getitem__(self, n): - network = int(self.network_address) - broadcast = int(self.broadcast_address) - if n >= 0: - if network + n > broadcast: - raise IndexError("address out of range") - return self._address_class(network + n) - else: - n += 1 - if broadcast + n < network: - raise IndexError("address out of range") - return self._address_class(broadcast + n) - - def __lt__(self, other): - if not isinstance(other, _IPAddressBase): - return NotImplemented - if not isinstance(other, _BaseNetwork): - raise TypeError( - "%s and %s are not of the same type" % (self, other) - ) - if self._version != other._version: - raise TypeError( - "%s and %s are not of the same version" % (self, other) - ) - if self.network_address != other.network_address: - return self.network_address < other.network_address - if self.netmask != other.netmask: - return self.netmask < other.netmask - return False - - def __eq__(self, other): - try: - return ( - self._version == other._version - and self.network_address == other.network_address - and int(self.netmask) == int(other.netmask) - ) - except AttributeError: - return NotImplemented - - def __hash__(self): - return hash(int(self.network_address) ^ int(self.netmask)) - - def __contains__(self, other): - # always false if one is v4 and the other is v6. - if self._version != other._version: - return False - # dealing with another network. - if isinstance(other, _BaseNetwork): - return False - # dealing with another address - else: - # address - return ( - int(self.network_address) - <= int(other._ip) - <= int(self.broadcast_address) - ) - - def overlaps(self, other): - """Tell if self is partly contained in other.""" - return self.network_address in other or ( - self.broadcast_address in other - or ( - other.network_address in self - or (other.broadcast_address in self) - ) - ) - - @property - def broadcast_address(self): - x = self._cache.get("broadcast_address") - if x is None: - x = self._address_class( - int(self.network_address) | int(self.hostmask) - ) - self._cache["broadcast_address"] = x - return x - - @property - def hostmask(self): - x = self._cache.get("hostmask") - if x is None: - x = self._address_class(int(self.netmask) ^ self._ALL_ONES) - self._cache["hostmask"] = x - return x - - @property - def with_prefixlen(self): - return "%s/%d" % (self.network_address, self._prefixlen) - - @property - def with_netmask(self): - return "%s/%s" % (self.network_address, self.netmask) - - @property - def with_hostmask(self): - return "%s/%s" % (self.network_address, self.hostmask) - - @property - def num_addresses(self): - """Number of hosts in the current subnet.""" - return int(self.broadcast_address) - int(self.network_address) + 1 - - @property - def _address_class(self): - # Returning bare address objects (rather than interfaces) allows for - # more consistent behaviour across the network address, broadcast - # address and individual host addresses. - msg = "%200s has no associated address class" % (type(self),) - raise NotImplementedError(msg) - - @property - def prefixlen(self): - return self._prefixlen - - def address_exclude(self, other): - """Remove an address from a larger block. - - For example: - - addr1 = ip_network('192.0.2.0/28') - addr2 = ip_network('192.0.2.1/32') - list(addr1.address_exclude(addr2)) = - [IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'), - IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')] - - or IPv6: - - addr1 = ip_network('2001:db8::1/32') - addr2 = ip_network('2001:db8::1/128') - list(addr1.address_exclude(addr2)) = - [ip_network('2001:db8::1/128'), - ip_network('2001:db8::2/127'), - ip_network('2001:db8::4/126'), - ip_network('2001:db8::8/125'), - ... - ip_network('2001:db8:8000::/33')] - - Args: - other: An IPv4Network or IPv6Network object of the same type. - - Returns: - An iterator of the IPv(4|6)Network objects which is self - minus other. - - Raises: - TypeError: If self and other are of differing address - versions, or if other is not a network object. - ValueError: If other is not completely contained by self. - - """ - if not self._version == other._version: - raise TypeError( - "%s and %s are not of the same version" % (self, other) - ) - - if not isinstance(other, _BaseNetwork): - raise TypeError("%s is not a network object" % other) - - if not other.subnet_of(self): - raise ValueError("%s not contained in %s" % (other, self)) - if other == self: - return - - # Make sure we're comparing the network of other. - other = other.__class__( - "%s/%s" % (other.network_address, other.prefixlen) - ) - - s1, s2 = self.subnets() - while s1 != other and s2 != other: - if other.subnet_of(s1): - yield s2 - s1, s2 = s1.subnets() - elif other.subnet_of(s2): - yield s1 - s1, s2 = s2.subnets() - else: - # If we got here, there's a bug somewhere. - raise AssertionError( - "Error performing exclusion: " - "s1: %s s2: %s other: %s" % (s1, s2, other) - ) - if s1 == other: - yield s2 - elif s2 == other: - yield s1 - else: - # If we got here, there's a bug somewhere. - raise AssertionError( - "Error performing exclusion: " - "s1: %s s2: %s other: %s" % (s1, s2, other) - ) - - def compare_networks(self, other): - """Compare two IP objects. - - This is only concerned about the comparison of the integer - representation of the network addresses. This means that the - host bits aren't considered at all in this method. If you want - to compare host bits, you can easily enough do a - 'HostA._ip < HostB._ip' - - Args: - other: An IP object. - - Returns: - If the IP versions of self and other are the same, returns: - - -1 if self < other: - eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25') - IPv6Network('2001:db8::1000/124') < - IPv6Network('2001:db8::2000/124') - 0 if self == other - eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24') - IPv6Network('2001:db8::1000/124') == - IPv6Network('2001:db8::1000/124') - 1 if self > other - eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25') - IPv6Network('2001:db8::2000/124') > - IPv6Network('2001:db8::1000/124') - - Raises: - TypeError if the IP versions are different. - - """ - # does this need to raise a ValueError? - if self._version != other._version: - raise TypeError( - "%s and %s are not of the same type" % (self, other) - ) - # self._version == other._version below here: - if self.network_address < other.network_address: - return -1 - if self.network_address > other.network_address: - return 1 - # self.network_address == other.network_address below here: - if self.netmask < other.netmask: - return -1 - if self.netmask > other.netmask: - return 1 - return 0 - - def _get_networks_key(self): - """Network-only key function. - - Returns an object that identifies this address' network and - netmask. This function is a suitable "key" argument for sorted() - and list.sort(). - - """ - return (self._version, self.network_address, self.netmask) - - def subnets(self, prefixlen_diff=1, new_prefix=None): - """The subnets which join to make the current subnet. - - In the case that self contains only one IP - (self._prefixlen == 32 for IPv4 or self._prefixlen == 128 - for IPv6), yield an iterator with just ourself. - - Args: - prefixlen_diff: An integer, the amount the prefix length - should be increased by. This should not be set if - new_prefix is also set. - new_prefix: The desired new prefix length. This must be a - larger number (smaller prefix) than the existing prefix. - This should not be set if prefixlen_diff is also set. - - Returns: - An iterator of IPv(4|6) objects. - - Raises: - ValueError: The prefixlen_diff is too small or too large. - OR - prefixlen_diff and new_prefix are both set or new_prefix - is a smaller number than the current prefix (smaller - number means a larger network) - - """ - if self._prefixlen == self._max_prefixlen: - yield self - return - - if new_prefix is not None: - if new_prefix < self._prefixlen: - raise ValueError("new prefix must be longer") - if prefixlen_diff != 1: - raise ValueError("cannot set prefixlen_diff and new_prefix") - prefixlen_diff = new_prefix - self._prefixlen - - if prefixlen_diff < 0: - raise ValueError("prefix length diff must be > 0") - new_prefixlen = self._prefixlen + prefixlen_diff - - if new_prefixlen > self._max_prefixlen: - raise ValueError( - "prefix length diff %d is invalid for netblock %s" - % (new_prefixlen, self) - ) - - start = int(self.network_address) - end = int(self.broadcast_address) + 1 - step = (int(self.hostmask) + 1) >> prefixlen_diff - for new_addr in _compat_range(start, end, step): - current = self.__class__((new_addr, new_prefixlen)) - yield current - - def supernet(self, prefixlen_diff=1, new_prefix=None): - """The supernet containing the current network. - - Args: - prefixlen_diff: An integer, the amount the prefix length of - the network should be decreased by. For example, given a - /24 network and a prefixlen_diff of 3, a supernet with a - /21 netmask is returned. - - Returns: - An IPv4 network object. - - Raises: - ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have - a negative prefix length. - OR - If prefixlen_diff and new_prefix are both set or new_prefix is a - larger number than the current prefix (larger number means a - smaller network) - - """ - if self._prefixlen == 0: - return self - - if new_prefix is not None: - if new_prefix > self._prefixlen: - raise ValueError("new prefix must be shorter") - if prefixlen_diff != 1: - raise ValueError("cannot set prefixlen_diff and new_prefix") - prefixlen_diff = self._prefixlen - new_prefix - - new_prefixlen = self.prefixlen - prefixlen_diff - if new_prefixlen < 0: - raise ValueError( - "current prefixlen is %d, cannot have a prefixlen_diff of %d" - % (self.prefixlen, prefixlen_diff) - ) - return self.__class__( - ( - int(self.network_address) - & (int(self.netmask) << prefixlen_diff), - new_prefixlen, - ) - ) - - @property - def is_multicast(self): - """Test if the address is reserved for multicast use. - - Returns: - A boolean, True if the address is a multicast address. - See RFC 2373 2.7 for details. - - """ - return ( - self.network_address.is_multicast - and self.broadcast_address.is_multicast - ) - - @staticmethod - def _is_subnet_of(a, b): - try: - # Always false if one is v4 and the other is v6. - if a._version != b._version: - raise TypeError( - "%s and %s are not of the same version" % (a, b) - ) - return ( - b.network_address <= a.network_address - and b.broadcast_address >= a.broadcast_address - ) - except AttributeError: - raise TypeError( - "Unable to test subnet containment " - "between %s and %s" % (a, b) - ) - - def subnet_of(self, other): - """Return True if this network is a subnet of other.""" - return self._is_subnet_of(self, other) - - def supernet_of(self, other): - """Return True if this network is a supernet of other.""" - return self._is_subnet_of(other, self) - - @property - def is_reserved(self): - """Test if the address is otherwise IETF reserved. - - Returns: - A boolean, True if the address is within one of the - reserved IPv6 Network ranges. - - """ - return ( - self.network_address.is_reserved - and self.broadcast_address.is_reserved - ) - - @property - def is_link_local(self): - """Test if the address is reserved for link-local. - - Returns: - A boolean, True if the address is reserved per RFC 4291. - - """ - return ( - self.network_address.is_link_local - and self.broadcast_address.is_link_local - ) - - @property - def is_private(self): - """Test if this address is allocated for private networks. - - Returns: - A boolean, True if the address is reserved per - iana-ipv4-special-registry or iana-ipv6-special-registry. - - """ - return ( - self.network_address.is_private - and self.broadcast_address.is_private - ) - - @property - def is_global(self): - """Test if this address is allocated for public networks. - - Returns: - A boolean, True if the address is not reserved per - iana-ipv4-special-registry or iana-ipv6-special-registry. - - """ - return not self.is_private - - @property - def is_unspecified(self): - """Test if the address is unspecified. - - Returns: - A boolean, True if this is the unspecified address as defined in - RFC 2373 2.5.2. - - """ - return ( - self.network_address.is_unspecified - and self.broadcast_address.is_unspecified - ) - - @property - def is_loopback(self): - """Test if the address is a loopback address. - - Returns: - A boolean, True if the address is a loopback address as defined in - RFC 2373 2.5.3. - - """ - return ( - self.network_address.is_loopback - and self.broadcast_address.is_loopback - ) - - -class _BaseV4(object): - - """Base IPv4 object. - - The following methods are used by IPv4 objects in both single IP - addresses and networks. - - """ - - __slots__ = () - _version = 4 - # Equivalent to 255.255.255.255 or 32 bits of 1's. - _ALL_ONES = (2 ** IPV4LENGTH) - 1 - _DECIMAL_DIGITS = frozenset("0123456789") - - # the valid octets for host and netmasks. only useful for IPv4. - _valid_mask_octets = frozenset([255, 254, 252, 248, 240, 224, 192, 128, 0]) - - _max_prefixlen = IPV4LENGTH - # There are only a handful of valid v4 netmasks, so we cache them all - # when constructed (see _make_netmask()). - _netmask_cache = {} - - def _explode_shorthand_ip_string(self): - return _compat_str(self) - - @classmethod - def _make_netmask(cls, arg): - """Make a (netmask, prefix_len) tuple from the given argument. - - Argument can be: - - an integer (the prefix length) - - a string representing the prefix length (e.g. "24") - - a string representing the prefix netmask (e.g. "255.255.255.0") - """ - if arg not in cls._netmask_cache: - if isinstance(arg, _compat_int_types): - prefixlen = arg - else: - try: - # Check for a netmask in prefix length form - prefixlen = cls._prefix_from_prefix_string(arg) - except NetmaskValueError: - # Check for a netmask or hostmask in dotted-quad form. - # This may raise NetmaskValueError. - prefixlen = cls._prefix_from_ip_string(arg) - netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen)) - cls._netmask_cache[arg] = netmask, prefixlen - return cls._netmask_cache[arg] - - @classmethod - def _ip_int_from_string(cls, ip_str): - """Turn the given IP string into an integer for comparison. - - Args: - ip_str: A string, the IP ip_str. - - Returns: - The IP ip_str as an integer. - - Raises: - AddressValueError: if ip_str isn't a valid IPv4 Address. - - """ - if not ip_str: - raise AddressValueError("Address cannot be empty") - - octets = ip_str.split(".") - if len(octets) != 4: - raise AddressValueError("Expected 4 octets in %r" % ip_str) - - try: - return _compat_int_from_byte_vals( - map(cls._parse_octet, octets), "big" - ) - except ValueError as exc: - raise AddressValueError("%s in %r" % (exc, ip_str)) - - @classmethod - def _parse_octet(cls, octet_str): - """Convert a decimal octet into an integer. - - Args: - octet_str: A string, the number to parse. - - Returns: - The octet as an integer. - - Raises: - ValueError: if the octet isn't strictly a decimal from [0..255]. - - """ - if not octet_str: - raise ValueError("Empty octet not permitted") - # Whitelist the characters, since int() allows a lot of bizarre stuff. - if not cls._DECIMAL_DIGITS.issuperset(octet_str): - msg = "Only decimal digits permitted in %r" - raise ValueError(msg % octet_str) - # We do the length check second, since the invalid character error - # is likely to be more informative for the user - if len(octet_str) > 3: - msg = "At most 3 characters permitted in %r" - raise ValueError(msg % octet_str) - # Convert to integer (we know digits are legal) - octet_int = int(octet_str, 10) - # Any octets that look like they *might* be written in octal, - # and which don't look exactly the same in both octal and - # decimal are rejected as ambiguous - if octet_int > 7 and octet_str[0] == "0": - msg = "Ambiguous (octal/decimal) value in %r not permitted" - raise ValueError(msg % octet_str) - if octet_int > 255: - raise ValueError("Octet %d (> 255) not permitted" % octet_int) - return octet_int - - @classmethod - def _string_from_ip_int(cls, ip_int): - """Turns a 32-bit integer into dotted decimal notation. - - Args: - ip_int: An integer, the IP address. - - Returns: - The IP address as a string in dotted decimal notation. - - """ - return ".".join( - _compat_str( - struct.unpack(b"!B", b)[0] if isinstance(b, bytes) else b - ) - for b in _compat_to_bytes(ip_int, 4, "big") - ) - - def _is_hostmask(self, ip_str): - """Test if the IP string is a hostmask (rather than a netmask). - - Args: - ip_str: A string, the potential hostmask. - - Returns: - A boolean, True if the IP string is a hostmask. - - """ - bits = ip_str.split(".") - try: - parts = [x for x in map(int, bits) if x in self._valid_mask_octets] - except ValueError: - return False - if len(parts) != len(bits): - return False - if parts[0] < parts[-1]: - return True - return False - - def _reverse_pointer(self): - """Return the reverse DNS pointer name for the IPv4 address. - - This implements the method described in RFC1035 3.5. - - """ - reverse_octets = _compat_str(self).split(".")[::-1] - return ".".join(reverse_octets) + ".in-addr.arpa" - - @property - def max_prefixlen(self): - return self._max_prefixlen - - @property - def version(self): - return self._version - - -class IPv4Address(_BaseV4, _BaseAddress): - - """Represent and manipulate single IPv4 Addresses.""" - - __slots__ = ("_ip", "__weakref__") - - def __init__(self, address): - - """ - Args: - address: A string or integer representing the IP - - Additionally, an integer can be passed, so - IPv4Address('192.0.2.1') == IPv4Address(3221225985). - or, more generally - IPv4Address(int(IPv4Address('192.0.2.1'))) == - IPv4Address('192.0.2.1') - - Raises: - AddressValueError: If ipaddress isn't a valid IPv4 address. - - """ - # Efficient constructor from integer. - if isinstance(address, _compat_int_types): - self._check_int_address(address) - self._ip = address - return - - # Constructing from a packed address - if isinstance(address, bytes): - self._check_packed_address(address, 4) - bvs = _compat_bytes_to_byte_vals(address) - self._ip = _compat_int_from_byte_vals(bvs, "big") - return - - # Assume input argument to be string or any object representation - # which converts into a formatted IP string. - addr_str = _compat_str(address) - if "/" in addr_str: - raise AddressValueError("Unexpected '/' in %r" % address) - self._ip = self._ip_int_from_string(addr_str) - - @property - def packed(self): - """The binary representation of this address.""" - return v4_int_to_packed(self._ip) - - @property - def is_reserved(self): - """Test if the address is otherwise IETF reserved. - - Returns: - A boolean, True if the address is within the - reserved IPv4 Network range. - - """ - return self in self._constants._reserved_network - - @property - def is_private(self): - """Test if this address is allocated for private networks. - - Returns: - A boolean, True if the address is reserved per - iana-ipv4-special-registry. - - """ - return any(self in net for net in self._constants._private_networks) - - @property - def is_global(self): - return ( - self not in self._constants._public_network and not self.is_private - ) - - @property - def is_multicast(self): - """Test if the address is reserved for multicast use. - - Returns: - A boolean, True if the address is multicast. - See RFC 3171 for details. - - """ - return self in self._constants._multicast_network - - @property - def is_unspecified(self): - """Test if the address is unspecified. - - Returns: - A boolean, True if this is the unspecified address as defined in - RFC 5735 3. - - """ - return self == self._constants._unspecified_address - - @property - def is_loopback(self): - """Test if the address is a loopback address. - - Returns: - A boolean, True if the address is a loopback per RFC 3330. - - """ - return self in self._constants._loopback_network - - @property - def is_link_local(self): - """Test if the address is reserved for link-local. - - Returns: - A boolean, True if the address is link-local per RFC 3927. - - """ - return self in self._constants._linklocal_network - - -class IPv4Interface(IPv4Address): - def __init__(self, address): - if isinstance(address, (bytes, _compat_int_types)): - IPv4Address.__init__(self, address) - self.network = IPv4Network(self._ip) - self._prefixlen = self._max_prefixlen - return - - if isinstance(address, tuple): - IPv4Address.__init__(self, address[0]) - if len(address) > 1: - self._prefixlen = int(address[1]) - else: - self._prefixlen = self._max_prefixlen - - self.network = IPv4Network(address, strict=False) - self.netmask = self.network.netmask - self.hostmask = self.network.hostmask - return - - addr = _split_optional_netmask(address) - IPv4Address.__init__(self, addr[0]) - - self.network = IPv4Network(address, strict=False) - self._prefixlen = self.network._prefixlen - - self.netmask = self.network.netmask - self.hostmask = self.network.hostmask - - def __str__(self): - return "%s/%d" % ( - self._string_from_ip_int(self._ip), - self.network.prefixlen, - ) - - def __eq__(self, other): - address_equal = IPv4Address.__eq__(self, other) - if not address_equal or address_equal is NotImplemented: - return address_equal - try: - return self.network == other.network - except AttributeError: - # An interface with an associated network is NOT the - # same as an unassociated address. That's why the hash - # takes the extra info into account. - return False - - def __lt__(self, other): - address_less = IPv4Address.__lt__(self, other) - if address_less is NotImplemented: - return NotImplemented - try: - return ( - self.network < other.network - or self.network == other.network - and address_less - ) - except AttributeError: - # We *do* allow addresses and interfaces to be sorted. The - # unassociated address is considered less than all interfaces. - return False - - def __hash__(self): - return self._ip ^ self._prefixlen ^ int(self.network.network_address) - - __reduce__ = _IPAddressBase.__reduce__ - - @property - def ip(self): - return IPv4Address(self._ip) - - @property - def with_prefixlen(self): - return "%s/%s" % (self._string_from_ip_int(self._ip), self._prefixlen) - - @property - def with_netmask(self): - return "%s/%s" % (self._string_from_ip_int(self._ip), self.netmask) - - @property - def with_hostmask(self): - return "%s/%s" % (self._string_from_ip_int(self._ip), self.hostmask) - - -class IPv4Network(_BaseV4, _BaseNetwork): - - """This class represents and manipulates 32-bit IPv4 network + addresses.. - - Attributes: [examples for IPv4Network('192.0.2.0/27')] - .network_address: IPv4Address('192.0.2.0') - .hostmask: IPv4Address('0.0.0.31') - .broadcast_address: IPv4Address('192.0.2.32') - .netmask: IPv4Address('255.255.255.224') - .prefixlen: 27 - - """ - - # Class to use when creating address objects - _address_class = IPv4Address - - def __init__(self, address, strict=True): - - """Instantiate a new IPv4 network object. - - Args: - address: A string or integer representing the IP [& network]. - '192.0.2.0/24' - '192.0.2.0/255.255.255.0' - '192.0.0.2/0.0.0.255' - are all functionally the same in IPv4. Similarly, - '192.0.2.1' - '192.0.2.1/255.255.255.255' - '192.0.2.1/32' - are also functionally equivalent. That is to say, failing to - provide a subnetmask will create an object with a mask of /32. - - If the mask (portion after the / in the argument) is given in - dotted quad form, it is treated as a netmask if it starts with a - non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it - starts with a zero field (e.g. 0.255.255.255 == /8), with the - single exception of an all-zero mask which is treated as a - netmask == /0. If no mask is given, a default of /32 is used. - - Additionally, an integer can be passed, so - IPv4Network('192.0.2.1') == IPv4Network(3221225985) - or, more generally - IPv4Interface(int(IPv4Interface('192.0.2.1'))) == - IPv4Interface('192.0.2.1') - - Raises: - AddressValueError: If ipaddress isn't a valid IPv4 address. - NetmaskValueError: If the netmask isn't valid for - an IPv4 address. - ValueError: If strict is True and a network address is not - supplied. - - """ - _BaseNetwork.__init__(self, address) - - # Constructing from a packed address or integer - if isinstance(address, (_compat_int_types, bytes)): - self.network_address = IPv4Address(address) - self.netmask, self._prefixlen = self._make_netmask( - self._max_prefixlen - ) - # fixme: address/network test here. - return - - if isinstance(address, tuple): - if len(address) > 1: - arg = address[1] - else: - # We weren't given an address[1] - arg = self._max_prefixlen - self.network_address = IPv4Address(address[0]) - self.netmask, self._prefixlen = self._make_netmask(arg) - packed = int(self.network_address) - if packed & int(self.netmask) != packed: - if strict: - raise ValueError("%s has host bits set" % self) - else: - self.network_address = IPv4Address( - packed & int(self.netmask) - ) - return - - # Assume input argument to be string or any object representation - # which converts into a formatted IP prefix string. - addr = _split_optional_netmask(address) - self.network_address = IPv4Address(self._ip_int_from_string(addr[0])) - - if len(addr) == 2: - arg = addr[1] - else: - arg = self._max_prefixlen - self.netmask, self._prefixlen = self._make_netmask(arg) - - if strict: - if ( - IPv4Address(int(self.network_address) & int(self.netmask)) - != self.network_address - ): - raise ValueError("%s has host bits set" % self) - self.network_address = IPv4Address( - int(self.network_address) & int(self.netmask) - ) - - if self._prefixlen == (self._max_prefixlen - 1): - self.hosts = self.__iter__ - - @property - def is_global(self): - """Test if this address is allocated for public networks. - - Returns: - A boolean, True if the address is not reserved per - iana-ipv4-special-registry. - - """ - return ( - not ( - self.network_address in IPv4Network("100.64.0.0/10") - and self.broadcast_address in IPv4Network("100.64.0.0/10") - ) - and not self.is_private - ) - - -class _IPv4Constants(object): - - _linklocal_network = IPv4Network("169.254.0.0/16") - - _loopback_network = IPv4Network("127.0.0.0/8") - - _multicast_network = IPv4Network("224.0.0.0/4") - - _public_network = IPv4Network("100.64.0.0/10") - - _private_networks = [ - IPv4Network("0.0.0.0/8"), - IPv4Network("10.0.0.0/8"), - IPv4Network("127.0.0.0/8"), - IPv4Network("169.254.0.0/16"), - IPv4Network("172.16.0.0/12"), - IPv4Network("192.0.0.0/29"), - IPv4Network("192.0.0.170/31"), - IPv4Network("192.0.2.0/24"), - IPv4Network("192.168.0.0/16"), - IPv4Network("198.18.0.0/15"), - IPv4Network("198.51.100.0/24"), - IPv4Network("203.0.113.0/24"), - IPv4Network("240.0.0.0/4"), - IPv4Network("255.255.255.255/32"), - ] - - _reserved_network = IPv4Network("240.0.0.0/4") - - _unspecified_address = IPv4Address("0.0.0.0") - - -IPv4Address._constants = _IPv4Constants - - -class _BaseV6(object): - - """Base IPv6 object. - - The following methods are used by IPv6 objects in both single IP - addresses and networks. - - """ - - __slots__ = () - _version = 6 - _ALL_ONES = (2 ** IPV6LENGTH) - 1 - _HEXTET_COUNT = 8 - _HEX_DIGITS = frozenset("0123456789ABCDEFabcdef") - _max_prefixlen = IPV6LENGTH - - # There are only a bunch of valid v6 netmasks, so we cache them all - # when constructed (see _make_netmask()). - _netmask_cache = {} - - @classmethod - def _make_netmask(cls, arg): - """Make a (netmask, prefix_len) tuple from the given argument. - - Argument can be: - - an integer (the prefix length) - - a string representing the prefix length (e.g. "24") - - a string representing the prefix netmask (e.g. "255.255.255.0") - """ - if arg not in cls._netmask_cache: - if isinstance(arg, _compat_int_types): - prefixlen = arg - else: - prefixlen = cls._prefix_from_prefix_string(arg) - netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen)) - cls._netmask_cache[arg] = netmask, prefixlen - return cls._netmask_cache[arg] - - @classmethod - def _ip_int_from_string(cls, ip_str): - """Turn an IPv6 ip_str into an integer. - - Args: - ip_str: A string, the IPv6 ip_str. - - Returns: - An int, the IPv6 address - - Raises: - AddressValueError: if ip_str isn't a valid IPv6 Address. - - """ - if not ip_str: - raise AddressValueError("Address cannot be empty") - - parts = ip_str.split(":") - - # An IPv6 address needs at least 2 colons (3 parts). - _min_parts = 3 - if len(parts) < _min_parts: - msg = "At least %d parts expected in %r" % (_min_parts, ip_str) - raise AddressValueError(msg) - - # If the address has an IPv4-style suffix, convert it to hexadecimal. - if "." in parts[-1]: - try: - ipv4_int = IPv4Address(parts.pop())._ip - except AddressValueError as exc: - raise AddressValueError("%s in %r" % (exc, ip_str)) - parts.append("%x" % ((ipv4_int >> 16) & 0xFFFF)) - parts.append("%x" % (ipv4_int & 0xFFFF)) - - # An IPv6 address can't have more than 8 colons (9 parts). - # The extra colon comes from using the "::" notation for a single - # leading or trailing zero part. - _max_parts = cls._HEXTET_COUNT + 1 - if len(parts) > _max_parts: - msg = "At most %d colons permitted in %r" % ( - _max_parts - 1, - ip_str, - ) - raise AddressValueError(msg) - - # Disregarding the endpoints, find '::' with nothing in between. - # This indicates that a run of zeroes has been skipped. - skip_index = None - for i in _compat_range(1, len(parts) - 1): - if not parts[i]: - if skip_index is not None: - # Can't have more than one '::' - msg = "At most one '::' permitted in %r" % ip_str - raise AddressValueError(msg) - skip_index = i - - # parts_hi is the number of parts to copy from above/before the '::' - # parts_lo is the number of parts to copy from below/after the '::' - if skip_index is not None: - # If we found a '::', then check if it also covers the endpoints. - parts_hi = skip_index - parts_lo = len(parts) - skip_index - 1 - if not parts[0]: - parts_hi -= 1 - if parts_hi: - msg = "Leading ':' only permitted as part of '::' in %r" - raise AddressValueError(msg % ip_str) # ^: requires ^:: - if not parts[-1]: - parts_lo -= 1 - if parts_lo: - msg = "Trailing ':' only permitted as part of '::' in %r" - raise AddressValueError(msg % ip_str) # :$ requires ::$ - parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo) - if parts_skipped < 1: - msg = "Expected at most %d other parts with '::' in %r" - raise AddressValueError(msg % (cls._HEXTET_COUNT - 1, ip_str)) - else: - # Otherwise, allocate the entire address to parts_hi. The - # endpoints could still be empty, but _parse_hextet() will check - # for that. - if len(parts) != cls._HEXTET_COUNT: - msg = "Exactly %d parts expected without '::' in %r" - raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str)) - if not parts[0]: - msg = "Leading ':' only permitted as part of '::' in %r" - raise AddressValueError(msg % ip_str) # ^: requires ^:: - if not parts[-1]: - msg = "Trailing ':' only permitted as part of '::' in %r" - raise AddressValueError(msg % ip_str) # :$ requires ::$ - parts_hi = len(parts) - parts_lo = 0 - parts_skipped = 0 - - try: - # Now, parse the hextets into a 128-bit integer. - ip_int = 0 - for i in range(parts_hi): - ip_int <<= 16 - ip_int |= cls._parse_hextet(parts[i]) - ip_int <<= 16 * parts_skipped - for i in range(-parts_lo, 0): - ip_int <<= 16 - ip_int |= cls._parse_hextet(parts[i]) - return ip_int - except ValueError as exc: - raise AddressValueError("%s in %r" % (exc, ip_str)) - - @classmethod - def _parse_hextet(cls, hextet_str): - """Convert an IPv6 hextet string into an integer. - - Args: - hextet_str: A string, the number to parse. - - Returns: - The hextet as an integer. - - Raises: - ValueError: if the input isn't strictly a hex number from - [0..FFFF]. - - """ - # Whitelist the characters, since int() allows a lot of bizarre stuff. - if not cls._HEX_DIGITS.issuperset(hextet_str): - raise ValueError("Only hex digits permitted in %r" % hextet_str) - # We do the length check second, since the invalid character error - # is likely to be more informative for the user - if len(hextet_str) > 4: - msg = "At most 4 characters permitted in %r" - raise ValueError(msg % hextet_str) - # Length check means we can skip checking the integer value - return int(hextet_str, 16) - - @classmethod - def _compress_hextets(cls, hextets): - """Compresses a list of hextets. - - Compresses a list of strings, replacing the longest continuous - sequence of "0" in the list with "" and adding empty strings at - the beginning or at the end of the string such that subsequently - calling ":".join(hextets) will produce the compressed version of - the IPv6 address. - - Args: - hextets: A list of strings, the hextets to compress. - - Returns: - A list of strings. - - """ - best_doublecolon_start = -1 - best_doublecolon_len = 0 - doublecolon_start = -1 - doublecolon_len = 0 - for index, hextet in enumerate(hextets): - if hextet == "0": - doublecolon_len += 1 - if doublecolon_start == -1: - # Start of a sequence of zeros. - doublecolon_start = index - if doublecolon_len > best_doublecolon_len: - # This is the longest sequence of zeros so far. - best_doublecolon_len = doublecolon_len - best_doublecolon_start = doublecolon_start - else: - doublecolon_len = 0 - doublecolon_start = -1 - - if best_doublecolon_len > 1: - best_doublecolon_end = ( - best_doublecolon_start + best_doublecolon_len - ) - # For zeros at the end of the address. - if best_doublecolon_end == len(hextets): - hextets += [""] - hextets[best_doublecolon_start:best_doublecolon_end] = [""] - # For zeros at the beginning of the address. - if best_doublecolon_start == 0: - hextets = [""] + hextets - - return hextets - - @classmethod - def _string_from_ip_int(cls, ip_int=None): - """Turns a 128-bit integer into hexadecimal notation. - - Args: - ip_int: An integer, the IP address. - - Returns: - A string, the hexadecimal representation of the address. - - Raises: - ValueError: The address is bigger than 128 bits of all ones. - - """ - if ip_int is None: - ip_int = int(cls._ip) - - if ip_int > cls._ALL_ONES: - raise ValueError("IPv6 address is too large") - - hex_str = "%032x" % ip_int - hextets = ["%x" % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)] - - hextets = cls._compress_hextets(hextets) - return ":".join(hextets) - - def _explode_shorthand_ip_string(self): - """Expand a shortened IPv6 address. - - Args: - ip_str: A string, the IPv6 address. - - Returns: - A string, the expanded IPv6 address. - - """ - if isinstance(self, IPv6Network): - ip_str = _compat_str(self.network_address) - elif isinstance(self, IPv6Interface): - ip_str = _compat_str(self.ip) - else: - ip_str = _compat_str(self) - - ip_int = self._ip_int_from_string(ip_str) - hex_str = "%032x" % ip_int - parts = [hex_str[x:x + 4] for x in range(0, 32, 4)] - if isinstance(self, (_BaseNetwork, IPv6Interface)): - return "%s/%d" % (":".join(parts), self._prefixlen) - return ":".join(parts) - - def _reverse_pointer(self): - """Return the reverse DNS pointer name for the IPv6 address. - - This implements the method described in RFC3596 2.5. - - """ - reverse_chars = self.exploded[::-1].replace(":", "") - return ".".join(reverse_chars) + ".ip6.arpa" - - @property - def max_prefixlen(self): - return self._max_prefixlen - - @property - def version(self): - return self._version - - -class IPv6Address(_BaseV6, _BaseAddress): - - """Represent and manipulate single IPv6 Addresses.""" - - __slots__ = ("_ip", "__weakref__") - - def __init__(self, address): - """Instantiate a new IPv6 address object. - - Args: - address: A string or integer representing the IP - - Additionally, an integer can be passed, so - IPv6Address('2001:db8::') == - IPv6Address(42540766411282592856903984951653826560) - or, more generally - IPv6Address(int(IPv6Address('2001:db8::'))) == - IPv6Address('2001:db8::') - - Raises: - AddressValueError: If address isn't a valid IPv6 address. - - """ - # Efficient constructor from integer. - if isinstance(address, _compat_int_types): - self._check_int_address(address) - self._ip = address - return - - # Constructing from a packed address - if isinstance(address, bytes): - self._check_packed_address(address, 16) - bvs = _compat_bytes_to_byte_vals(address) - self._ip = _compat_int_from_byte_vals(bvs, "big") - return - - # Assume input argument to be string or any object representation - # which converts into a formatted IP string. - addr_str = _compat_str(address) - if "/" in addr_str: - raise AddressValueError("Unexpected '/' in %r" % address) - self._ip = self._ip_int_from_string(addr_str) - - @property - def packed(self): - """The binary representation of this address.""" - return v6_int_to_packed(self._ip) - - @property - def is_multicast(self): - """Test if the address is reserved for multicast use. - - Returns: - A boolean, True if the address is a multicast address. - See RFC 2373 2.7 for details. - - """ - return self in self._constants._multicast_network - - @property - def is_reserved(self): - """Test if the address is otherwise IETF reserved. - - Returns: - A boolean, True if the address is within one of the - reserved IPv6 Network ranges. - - """ - return any(self in x for x in self._constants._reserved_networks) - - @property - def is_link_local(self): - """Test if the address is reserved for link-local. - - Returns: - A boolean, True if the address is reserved per RFC 4291. - - """ - return self in self._constants._linklocal_network - - @property - def is_site_local(self): - """Test if the address is reserved for site-local. - - Note that the site-local address space has been deprecated by RFC 3879. - Use is_private to test if this address is in the space of unique local - addresses as defined by RFC 4193. - - Returns: - A boolean, True if the address is reserved per RFC 3513 2.5.6. - - """ - return self in self._constants._sitelocal_network - - @property - def is_private(self): - """Test if this address is allocated for private networks. - - Returns: - A boolean, True if the address is reserved per - iana-ipv6-special-registry. - - """ - return any(self in net for net in self._constants._private_networks) - - @property - def is_global(self): - """Test if this address is allocated for public networks. - - Returns: - A boolean, true if the address is not reserved per - iana-ipv6-special-registry. - - """ - return not self.is_private - - @property - def is_unspecified(self): - """Test if the address is unspecified. - - Returns: - A boolean, True if this is the unspecified address as defined in - RFC 2373 2.5.2. - - """ - return self._ip == 0 - - @property - def is_loopback(self): - """Test if the address is a loopback address. - - Returns: - A boolean, True if the address is a loopback address as defined in - RFC 2373 2.5.3. - - """ - return self._ip == 1 - - @property - def ipv4_mapped(self): - """Return the IPv4 mapped address. - - Returns: - If the IPv6 address is a v4 mapped address, return the - IPv4 mapped address. Return None otherwise. - - """ - if (self._ip >> 32) != 0xFFFF: - return None - return IPv4Address(self._ip & 0xFFFFFFFF) - - @property - def teredo(self): - """Tuple of embedded teredo IPs. - - Returns: - Tuple of the (server, client) IPs or None if the address - doesn't appear to be a teredo address (doesn't start with - 2001::/32) - - """ - if (self._ip >> 96) != 0x20010000: - return None - return ( - IPv4Address((self._ip >> 64) & 0xFFFFFFFF), - IPv4Address(~self._ip & 0xFFFFFFFF), - ) - - @property - def sixtofour(self): - """Return the IPv4 6to4 embedded address. - - Returns: - The IPv4 6to4-embedded address if present or None if the - address doesn't appear to contain a 6to4 embedded address. - - """ - if (self._ip >> 112) != 0x2002: - return None - return IPv4Address((self._ip >> 80) & 0xFFFFFFFF) - - -class IPv6Interface(IPv6Address): - def __init__(self, address): - if isinstance(address, (bytes, _compat_int_types)): - IPv6Address.__init__(self, address) - self.network = IPv6Network(self._ip) - self._prefixlen = self._max_prefixlen - return - if isinstance(address, tuple): - IPv6Address.__init__(self, address[0]) - if len(address) > 1: - self._prefixlen = int(address[1]) - else: - self._prefixlen = self._max_prefixlen - self.network = IPv6Network(address, strict=False) - self.netmask = self.network.netmask - self.hostmask = self.network.hostmask - return - - addr = _split_optional_netmask(address) - IPv6Address.__init__(self, addr[0]) - self.network = IPv6Network(address, strict=False) - self.netmask = self.network.netmask - self._prefixlen = self.network._prefixlen - self.hostmask = self.network.hostmask - - def __str__(self): - return "%s/%d" % ( - self._string_from_ip_int(self._ip), - self.network.prefixlen, - ) - - def __eq__(self, other): - address_equal = IPv6Address.__eq__(self, other) - if not address_equal or address_equal is NotImplemented: - return address_equal - try: - return self.network == other.network - except AttributeError: - # An interface with an associated network is NOT the - # same as an unassociated address. That's why the hash - # takes the extra info into account. - return False - - def __lt__(self, other): - address_less = IPv6Address.__lt__(self, other) - if address_less is NotImplemented: - return NotImplemented - try: - return ( - self.network < other.network - or self.network == other.network - and address_less - ) - except AttributeError: - # We *do* allow addresses and interfaces to be sorted. The - # unassociated address is considered less than all interfaces. - return False - - def __hash__(self): - return self._ip ^ self._prefixlen ^ int(self.network.network_address) - - __reduce__ = _IPAddressBase.__reduce__ - - @property - def ip(self): - return IPv6Address(self._ip) - - @property - def with_prefixlen(self): - return "%s/%s" % (self._string_from_ip_int(self._ip), self._prefixlen) - - @property - def with_netmask(self): - return "%s/%s" % (self._string_from_ip_int(self._ip), self.netmask) - - @property - def with_hostmask(self): - return "%s/%s" % (self._string_from_ip_int(self._ip), self.hostmask) - - @property - def is_unspecified(self): - return self._ip == 0 and self.network.is_unspecified - - @property - def is_loopback(self): - return self._ip == 1 and self.network.is_loopback - - -class IPv6Network(_BaseV6, _BaseNetwork): - - """This class represents and manipulates 128-bit IPv6 networks. - - Attributes: [examples for IPv6('2001:db8::1000/124')] - .network_address: IPv6Address('2001:db8::1000') - .hostmask: IPv6Address('::f') - .broadcast_address: IPv6Address('2001:db8::100f') - .netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0') - .prefixlen: 124 - - """ - - # Class to use when creating address objects - _address_class = IPv6Address - - def __init__(self, address, strict=True): - """Instantiate a new IPv6 Network object. - - Args: - address: A string or integer representing the IPv6 network or the - IP and prefix/netmask. - '2001:db8::/128' - '2001:db8:0000:0000:0000:0000:0000:0000/128' - '2001:db8::' - are all functionally the same in IPv6. That is to say, - failing to provide a subnetmask will create an object with - a mask of /128. - - Additionally, an integer can be passed, so - IPv6Network('2001:db8::') == - IPv6Network(42540766411282592856903984951653826560) - or, more generally - IPv6Network(int(IPv6Network('2001:db8::'))) == - IPv6Network('2001:db8::') - - strict: A boolean. If true, ensure that we have been passed - A true network address, eg, 2001:db8::1000/124 and not an - IP address on a network, eg, 2001:db8::1/124. - - Raises: - AddressValueError: If address isn't a valid IPv6 address. - NetmaskValueError: If the netmask isn't valid for - an IPv6 address. - ValueError: If strict was True and a network address was not - supplied. - - """ - _BaseNetwork.__init__(self, address) - - # Efficient constructor from integer or packed address - if isinstance(address, (bytes, _compat_int_types)): - self.network_address = IPv6Address(address) - self.netmask, self._prefixlen = self._make_netmask( - self._max_prefixlen - ) - return - - if isinstance(address, tuple): - if len(address) > 1: - arg = address[1] - else: - arg = self._max_prefixlen - self.netmask, self._prefixlen = self._make_netmask(arg) - self.network_address = IPv6Address(address[0]) - packed = int(self.network_address) - if packed & int(self.netmask) != packed: - if strict: - raise ValueError("%s has host bits set" % self) - else: - self.network_address = IPv6Address( - packed & int(self.netmask) - ) - return - - # Assume input argument to be string or any object representation - # which converts into a formatted IP prefix string. - addr = _split_optional_netmask(address) - - self.network_address = IPv6Address(self._ip_int_from_string(addr[0])) - - if len(addr) == 2: - arg = addr[1] - else: - arg = self._max_prefixlen - self.netmask, self._prefixlen = self._make_netmask(arg) - - if strict: - if ( - IPv6Address(int(self.network_address) & int(self.netmask)) - != self.network_address - ): - raise ValueError("%s has host bits set" % self) - self.network_address = IPv6Address( - int(self.network_address) & int(self.netmask) - ) - - if self._prefixlen == (self._max_prefixlen - 1): - self.hosts = self.__iter__ - - def hosts(self): - """Generate Iterator over usable hosts in a network. - - This is like __iter__ except it doesn't return the - Subnet-Router anycast address. - - """ - network = int(self.network_address) - broadcast = int(self.broadcast_address) - for x in _compat_range(network + 1, broadcast + 1): - yield self._address_class(x) - - @property - def is_site_local(self): - """Test if the address is reserved for site-local. - - Note that the site-local address space has been deprecated by RFC 3879. - Use is_private to test if this address is in the space of unique local - addresses as defined by RFC 4193. - - Returns: - A boolean, True if the address is reserved per RFC 3513 2.5.6. - - """ - return ( - self.network_address.is_site_local - and self.broadcast_address.is_site_local - ) - - -class _IPv6Constants(object): - - _linklocal_network = IPv6Network("fe80::/10") - - _multicast_network = IPv6Network("ff00::/8") - - _private_networks = [ - IPv6Network("::1/128"), - IPv6Network("::/128"), - IPv6Network("::ffff:0:0/96"), - IPv6Network("100::/64"), - IPv6Network("2001::/23"), - IPv6Network("2001:2::/48"), - IPv6Network("2001:db8::/32"), - IPv6Network("2001:10::/28"), - IPv6Network("fc00::/7"), - IPv6Network("fe80::/10"), - ] - - _reserved_networks = [ - IPv6Network("::/8"), - IPv6Network("100::/8"), - IPv6Network("200::/7"), - IPv6Network("400::/6"), - IPv6Network("800::/5"), - IPv6Network("1000::/4"), - IPv6Network("4000::/3"), - IPv6Network("6000::/3"), - IPv6Network("8000::/3"), - IPv6Network("A000::/3"), - IPv6Network("C000::/3"), - IPv6Network("E000::/4"), - IPv6Network("F000::/5"), - IPv6Network("F800::/6"), - IPv6Network("FE00::/9"), - ] - - _sitelocal_network = IPv6Network("fec0::/10") - - -IPv6Address._constants = _IPv6Constants diff --git a/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py b/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py index 054a4d4790..48e2f10ef3 100644 --- a/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py +++ b/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py @@ -17,11 +17,12 @@ module: scaleway_security_group_rule short_description: Scaleway Security Group Rule management module author: Antoine Barbare (@abarbare) description: - - This module manages Security Group Rule on Scaleway account - U(https://developer.scaleway.com) + - This module manages Security Group Rule on Scaleway account + U(https://developer.scaleway.com) extends_documentation_fragment: -- community.general.scaleway - + - community.general.scaleway +requirements: + - ipaddress options: state: @@ -129,10 +130,19 @@ data: } ''' +import traceback + from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway, payload_from_object -from ansible_collections.community.general.plugins.module_utils.compat.ipaddress import ip_network from ansible.module_utils._text import to_text -from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + +try: + from ipaddress import ip_network +except ImportError: + IPADDRESS_IMP_ERR = traceback.format_exc() + HAS_IPADDRESS = False +else: + HAS_IPADDRESS = True def get_sgr_from_api(security_group_rules, security_group_rule): @@ -255,6 +265,8 @@ def main(): argument_spec=argument_spec, supports_check_mode=True, ) + if not HAS_IPADDRESS: + module.fail_json(msg=missing_required_lib('ipaddress'), exception=IPADDRESS_IMP_ERR) core(module) diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 7beedfa206..a33e194233 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -1,6 +1,4 @@ plugins/module_utils/cloud.py pylint:bad-option-value # a pylint test that is disabled was modified over time -plugins/module_utils/compat/ipaddress.py no-assert -plugins/module_utils/compat/ipaddress.py no-unicode-literals plugins/module_utils/_mount.py future-import-boilerplate plugins/module_utils/_mount.py metaclass-boilerplate plugins/modules/cloud/linode/linode.py validate-modules:parameter-list-no-elements diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 80975cf389..4678f10294 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -1,5 +1,3 @@ -plugins/module_utils/compat/ipaddress.py no-assert -plugins/module_utils/compat/ipaddress.py no-unicode-literals plugins/module_utils/_mount.py future-import-boilerplate plugins/module_utils/_mount.py metaclass-boilerplate plugins/modules/cloud/linode/linode.py validate-modules:parameter-list-no-elements diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt index 68684f000d..ec34ff7833 100644 --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -1,5 +1,3 @@ -plugins/module_utils/compat/ipaddress.py no-assert -plugins/module_utils/compat/ipaddress.py no-unicode-literals plugins/module_utils/_mount.py future-import-boilerplate plugins/module_utils/_mount.py metaclass-boilerplate plugins/modules/cloud/linode/linode.py validate-modules:parameter-list-no-elements diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 36a0c3e08e..8f18be1c44 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -1,6 +1,4 @@ plugins/module_utils/cloud.py pylint:bad-option-value # a pylint test that is disabled was modified over time -plugins/module_utils/compat/ipaddress.py no-assert -plugins/module_utils/compat/ipaddress.py no-unicode-literals plugins/module_utils/_mount.py future-import-boilerplate plugins/module_utils/_mount.py metaclass-boilerplate plugins/modules/cloud/linode/linode.py validate-modules:parameter-type-not-in-doc From 73863262584ba58043e5357d8c45b468dd7de5e2 Mon Sep 17 00:00:00 2001 From: Mike Russell Date: Sat, 8 May 2021 02:58:55 -0700 Subject: [PATCH 0028/2828] Small Documentation Example Of Cask Leveraging (#2462) * Small Documentation Example Of Cask Leveraging - Just a lil' demo showing that we can utilize homebrew/cask/foo syntax for given name of package to grab associated cask pacakge Resolves: patch/sml-doc-example-update * Slight Documentation Example Edit - adjusting documentation example to provide better info surrounding installing a given formula from brew via cask Resolves: patch/sml-doc-example-update * Small Edits To Make PEP8 Happy - format code with autopep8 in vs code Resolves: patch/sml-doc-example-update * Only Making Small PEP8 Change - reverting previous mass PEP8 format, focus on trimming whitespace on doc example entry Resolves: patch/sml-doc-example-update * Remove Trailing Whitespace PEP8 - removed trailing whitespace on doc example chunk Resolves: patch/sml-doc-example-update --- plugins/modules/packaging/os/homebrew.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/plugins/modules/packaging/os/homebrew.py b/plugins/modules/packaging/os/homebrew.py index 9a41370c3d..47ec930a2c 100644 --- a/plugins/modules/packaging/os/homebrew.py +++ b/plugins/modules/packaging/os/homebrew.py @@ -127,6 +127,11 @@ EXAMPLES = ''' state: present install_options: with-baz,enable-debug +- name: Install formula foo with 'brew' from cask + community.general.homebrew: + name: homebrew/cask/foo + state: present + - name: Use ignored-pinned option while upgrading all community.general.homebrew: upgrade_all: yes From 4cdff8654a8ef793736b95d84b872acf3779bdea Mon Sep 17 00:00:00 2001 From: vbarba Date: Sun, 9 May 2021 22:25:00 +0200 Subject: [PATCH 0029/2828] fix stackpath_compute validate_config (#2448) * fix stackpath_compute validate_config get the lenght for the client_id / client_secret to validate inventory configuration * Add changelog fragment. Co-authored-by: Felix Fontein --- changelogs/fragments/2448-stackpath_compute-fix.yml | 2 ++ plugins/inventory/stackpath_compute.py | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/2448-stackpath_compute-fix.yml diff --git a/changelogs/fragments/2448-stackpath_compute-fix.yml b/changelogs/fragments/2448-stackpath_compute-fix.yml new file mode 100644 index 0000000000..196db780b1 --- /dev/null +++ b/changelogs/fragments/2448-stackpath_compute-fix.yml @@ -0,0 +1,2 @@ +bugfixes: +- "stackpath_compute inventory script - fix broken validation checks for client ID and client secret (https://github.com/ansible-collections/community.general/pull/2448)." diff --git a/plugins/inventory/stackpath_compute.py b/plugins/inventory/stackpath_compute.py index 393edac384..fb879e869e 100644 --- a/plugins/inventory/stackpath_compute.py +++ b/plugins/inventory/stackpath_compute.py @@ -102,13 +102,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): raise AnsibleError("plugin doesn't match this plugin") try: client_id = config['client_id'] - if client_id != 32: + if len(client_id) != 32: raise AnsibleError("client_id must be 32 characters long") except KeyError: raise AnsibleError("config missing client_id, a required option") try: client_secret = config['client_secret'] - if client_secret != 64: + if len(client_secret) != 64: raise AnsibleError("client_secret must be 64 characters long") except KeyError: raise AnsibleError("config missing client_id, a required option") From 2e58dfe52afd715a967c01b4994c1a3574e835dd Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 10 May 2021 14:45:10 +0200 Subject: [PATCH 0030/2828] Clarify Windows (non-)support. (#2476) --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 935f0ecabd..306f307128 100644 --- a/README.md +++ b/README.md @@ -7,6 +7,8 @@ This repo contains the `community.general` Ansible Collection. The collection in You can find [documentation for this collection on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/). +Please note that this collection does **not** support Windows targets. Only connection plugins included in this collection might support Windows targets, and will explicitly mention that in their documentation if they do so. + ## Tested with Ansible Tested with the current Ansible 2.9, ansible-base 2.10 and ansible-core 2.11 releases and the current development version of ansible-core. Ansible versions before 2.9.10 are not supported. From 8e7aff00b5f3af2ed7dbc377255832da68817144 Mon Sep 17 00:00:00 2001 From: sam-lunt Date: Mon, 10 May 2021 10:55:19 -0500 Subject: [PATCH 0031/2828] Avoid incorrectly marking zfs tasks as changed (#2454) * Avoid incorrectly marking zfs tasks as changed The zfs module will incorrectly mark certain tasks as having been changed. For example, if a dataset has a quota of "1G" and the user changes it to "1024M", the actual quota vale has not changed, but since the module is doing a simple string comparison between "1G" and "1024M", it marks the step as "changed". Instead of trying to handle all the corner cases of zfs (another example is when the zpool "altroot" property has been set), this change simply compares the output of "zfs-get" from before and after "zfs-set" is called * update changelog format * Update changelogs/fragments/2454-detect_zfs_changed.yml Co-authored-by: Felix Fontein * add note about check_mode * Update plugins/modules/storage/zfs/zfs.py Co-authored-by: Felix Fontein * Update plugins/modules/storage/zfs/zfs.py Co-authored-by: Felix Fontein * clarify check mode qualifications * rephrase to avoid hypothetical Co-authored-by: Felix Fontein --- .../fragments/2454-detect_zfs_changed.yml | 2 ++ plugins/modules/storage/zfs/zfs.py | 24 +++++++++++++++---- 2 files changed, 21 insertions(+), 5 deletions(-) create mode 100644 changelogs/fragments/2454-detect_zfs_changed.yml diff --git a/changelogs/fragments/2454-detect_zfs_changed.yml b/changelogs/fragments/2454-detect_zfs_changed.yml new file mode 100644 index 0000000000..0604278f6b --- /dev/null +++ b/changelogs/fragments/2454-detect_zfs_changed.yml @@ -0,0 +1,2 @@ +bugfixes: + - zfs - certain ZFS properties, especially sizes, would lead to a task being falsely marked as "changed" even when no actual change was made (https://github.com/ansible-collections/community.general/issues/975, https://github.com/ansible-collections/community.general/pull/2454). diff --git a/plugins/modules/storage/zfs/zfs.py b/plugins/modules/storage/zfs/zfs.py index fe693a5045..2d5d4487dd 100644 --- a/plugins/modules/storage/zfs/zfs.py +++ b/plugins/modules/storage/zfs/zfs.py @@ -37,6 +37,12 @@ options: - A dictionary of zfs properties to be set. - See the zfs(8) man page for more information. type: dict +notes: + - C(check_mode) is supported, but in certain situations it may report a task + as changed that will not be reported as changed when C(check_mode) is disabled. + For example, this might occur when the zpool C(altroot) option is set or when + a size is written using human-readable notation, such as C(1M) or C(1024K), + instead of as an unqualified byte count, such as C(1048576). author: - Johan Wiren (@johanwiren) ''' @@ -184,9 +190,7 @@ class Zfs(object): return cmd = [self.zfs_cmd, 'set', prop + '=' + str(value), self.name] (rc, out, err) = self.module.run_command(cmd) - if rc == 0: - self.changed = True - else: + if rc != 0: self.module.fail_json(msg=err) def set_properties_if_changed(self): @@ -194,15 +198,25 @@ class Zfs(object): for prop, value in self.properties.items(): if current_properties.get(prop, None) != value: self.set_property(prop, value) + if self.module.check_mode: + return + updated_properties = self.get_current_properties() + for prop in self.properties: + value = updated_properties.get(prop, None) + if value is None: + self.module.fail_json(msg="zfsprop was not present after being successfully set: %s" % prop) + if current_properties.get(prop, None) != value: + self.changed = True def get_current_properties(self): - cmd = [self.zfs_cmd, 'get', '-H'] + cmd = [self.zfs_cmd, 'get', '-H', '-p', '-o', "property,value,source"] if self.enhanced_sharing: cmd += ['-e'] cmd += ['all', self.name] rc, out, err = self.module.run_command(" ".join(cmd)) properties = dict() - for prop, value, source in [l.split('\t')[1:4] for l in out.splitlines()]: + for line in out.splitlines(): + prop, value, source = line.split('\t') # include source '-' so that creation-only properties are not removed # to avoids errors when the dataset already exists and the property is not changed # this scenario is most likely when the same playbook is run more than once From 624eb7171e8afc72684695aaad6c12d1a27c3c26 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 11 May 2021 07:52:57 +0200 Subject: [PATCH 0032/2828] Run unit tests also with Python 3.10. (#2486) ci_complete --- .azure-pipelines/azure-pipelines.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index 8d1b81865e..8dc49e5c03 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -124,6 +124,7 @@ stages: - test: 3.7 - test: 3.8 - test: 3.9 + - test: '3.10' - stage: Units_2_11 displayName: Units 2.11 dependsOn: [] From eea4f4596541fb0a3fc348bf36f6208c2a408b5f Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 11 May 2021 19:27:05 +0200 Subject: [PATCH 0033/2828] Add dependent lookup plugin (#2164) * Add dependent lookup plugin. * Use correct YAML booleans. * Began complete rewrite. * Only match start of error msg. * Improve tests. * Work around old Jinja2 versions. * Fix metadata. * Fix filter name. --- plugins/lookup/dependent.py | 208 ++++++++++++++++++ .../targets/lookup_dependent/aliases | 2 + .../targets/lookup_dependent/tasks/main.yml | 179 +++++++++++++++ tests/unit/plugins/lookup/test_dependent.py | 44 ++++ 4 files changed, 433 insertions(+) create mode 100644 plugins/lookup/dependent.py create mode 100644 tests/integration/targets/lookup_dependent/aliases create mode 100644 tests/integration/targets/lookup_dependent/tasks/main.yml create mode 100644 tests/unit/plugins/lookup/test_dependent.py diff --git a/plugins/lookup/dependent.py b/plugins/lookup/dependent.py new file mode 100644 index 0000000000..a22a98476c --- /dev/null +++ b/plugins/lookup/dependent.py @@ -0,0 +1,208 @@ +# (c) 2015-2021, Felix Fontein +# (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +name: dependent +short_description: Composes a list with nested elements of other lists or dicts which can depend on previous loop variables +version_added: 3.1.0 +description: + - "Takes the input lists and returns a list with elements that are lists, dictionaries, + or template expressions which evaluate to lists or dicts, composed of the elements of + the input evaluated lists and dictionaries." +options: + _raw: + description: + - A list where the elements are one-element dictionaries, mapping a name to a string, list, or dictionary. + The name is the index that is used in the result object. The value is iterated over as described below. + - If the value is a list, it is simply iterated over. + - If the value is a dictionary, it is iterated over and returned as if they would be processed by the + R(ansible.builtin.dict2items filter,ansible_collections.ansible.builtin.dict2items_filter). + - If the value is a string, it is evaluated as Jinja2 expressions which can access the previously chosen + elements with C(item.). The result must be a list or a dictionary. + type: list + elements: dict + required: true +""" + +EXAMPLES = """ +- name: Install/remove public keys for active admin users + ansible.posix.authorized_key: + user: "{{ item.admin.key }}" + key: "{{ lookup('file', item.key.public_key) }}" + state: "{{ 'present' if item.key.active else 'absent' }}" + when: item.admin.value.active + with_community.general.dependent: + - admin: admin_user_data + - key: admin_ssh_keys[item.admin.key] + loop_control: + # Makes the output readable, so that it doesn't contain the whole subdictionaries and lists + label: "{{ [item.admin.key, 'active' if item.key.active else 'inactive', item.key.public_key] }}" + vars: + admin_user_data: + admin1: + name: Alice + active: true + admin2: + name: Bob + active: true + admin_ssh_keys: + admin1: + - private_key: keys/private_key_admin1.pem + public_key: keys/private_key_admin1.pub + active: true + admin2: + - private_key: keys/private_key_admin2.pem + public_key: keys/private_key_admin2.pub + active: true + - private_key: keys/private_key_admin2-old.pem + public_key: keys/private_key_admin2-old.pub + active: false + +- name: Update DNS records + community.aws.route53: + zone: "{{ item.zone.key }}" + record: "{{ item.prefix.key ~ '.' if item.prefix.key else '' }}{{ item.zone.key }}" + type: "{{ item.entry.key }}" + ttl: "{{ item.entry.value.ttl | default(3600) }}" + value: "{{ item.entry.value.value }}" + state: "{{ 'absent' if (item.entry.value.absent | default(False)) else 'present' }}" + overwrite: true + loop_control: + # Makes the output readable, so that it doesn't contain the whole subdictionaries and lists + label: |- + {{ [item.zone.key, item.prefix.key, item.entry.key, + item.entry.value.ttl | default(3600), + item.entry.value.absent | default(False), item.entry.value.value] }} + with_community.general.dependent: + - zone: dns_setup + - prefix: item.zone.value + - entry: item.prefix.value + vars: + dns_setup: + example.com: + '': + A: + value: + - 1.2.3.4 + AAAA: + value: + - "2a01:1:2:3::1" + 'test._domainkey': + TXT: + ttl: 300 + value: + - '"k=rsa; t=s; p=MIGfMA..."' + example.org: + 'www': + A: + value: + - 1.2.3.4 + - 5.6.7.8 +""" + +RETURN = """ + _list: + description: + - A list composed of dictionaries whose keys are the variable names from the input list. + type: list + elements: dict + sample: + - key1: a + key2: test + - key1: a + key2: foo + - key1: b + key2: bar +""" + +from ansible.errors import AnsibleLookupError +from ansible.module_utils.common._collections_compat import Mapping, Sequence +from ansible.module_utils.six import string_types +from ansible.plugins.lookup import LookupBase +from ansible.template import Templar + + +class LookupModule(LookupBase): + def __evaluate(self, expression, templar, variables): + """Evaluate expression with templar. + + ``expression`` is the expression to evaluate. + ``variables`` are the variables to use. + """ + templar.available_variables = variables or {} + return templar.template("{0}{1}{2}".format("{{", expression, "}}"), cache=False) + + def __process(self, result, terms, index, current, templar, variables): + """Fills ``result`` list with evaluated items. + + ``result`` is a list where the resulting items are placed. + ``terms`` is the parsed list of terms + ``index`` is the current index to be processed in the list. + ``current`` is a dictionary where the first ``index`` values are filled in. + ``variables`` are the variables currently available. + """ + # If we are done, add to result list: + if index == len(terms): + result.append(current.copy()) + return + + key, expression, values = terms[index] + + if expression is not None: + # Evaluate expression in current context + vars = variables.copy() + vars['item'] = current.copy() + try: + values = self.__evaluate(expression, templar, variables=vars) + except Exception as e: + raise AnsibleLookupError( + 'Caught "{error}" while evaluating {key!r} with item == {item!r}'.format( + error=e, key=key, item=current)) + + if isinstance(values, Mapping): + for idx, val in sorted(values.items()): + current[key] = dict([('key', idx), ('value', val)]) + self.__process(result, terms, index + 1, current, templar, variables) + elif isinstance(values, Sequence): + for elt in values: + current[key] = elt + self.__process(result, terms, index + 1, current, templar, variables) + else: + raise AnsibleLookupError( + 'Did not obtain dictionary or list while evaluating {key!r} with item == {item!r}, but {type}'.format( + key=key, item=current, type=type(values))) + + def run(self, terms, variables=None, **kwargs): + """Generate list.""" + result = [] + if len(terms) > 0: + templar = Templar(loader=self._templar._loader) + data = [] + vars_so_far = set() + for index, term in enumerate(terms): + if not isinstance(term, Mapping): + raise AnsibleLookupError( + 'Parameter {index} must be a dictionary, got {type}'.format( + index=index, type=type(term))) + if len(term) != 1: + raise AnsibleLookupError( + 'Parameter {index} must be a one-element dictionary, got {count} elements'.format( + index=index, count=len(term))) + k, v = list(term.items())[0] + if k in vars_so_far: + raise AnsibleLookupError( + 'The variable {key!r} appears more than once'.format(key=k)) + vars_so_far.add(k) + if isinstance(v, string_types): + data.append((k, v, None)) + elif isinstance(v, (Sequence, Mapping)): + data.append((k, None, v)) + else: + raise AnsibleLookupError( + 'Parameter {key!r} (index {index}) must have a value of type string, dictionary or list, got type {type}'.format( + index=index, key=k, type=type(v))) + self.__process(result, data, 0, {}, templar, variables) + return result diff --git a/tests/integration/targets/lookup_dependent/aliases b/tests/integration/targets/lookup_dependent/aliases new file mode 100644 index 0000000000..45489be80c --- /dev/null +++ b/tests/integration/targets/lookup_dependent/aliases @@ -0,0 +1,2 @@ +shippable/posix/group2 +skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller diff --git a/tests/integration/targets/lookup_dependent/tasks/main.yml b/tests/integration/targets/lookup_dependent/tasks/main.yml new file mode 100644 index 0000000000..0f1b8d34fb --- /dev/null +++ b/tests/integration/targets/lookup_dependent/tasks/main.yml @@ -0,0 +1,179 @@ +--- +- name: Test 1 + set_fact: + loop_result: >- + {{ + query('community.general.dependent', + dict(key1=[1, 2]), + dict(key2='[item.key1 + 3, item.key1 + 6]'), + dict(key3='[item.key1 + item.key2 * 10]')) + }} + +- name: Check result of Test 1 + assert: + that: + - loop_result == expected_result + vars: + expected_result: + - key1: 1 + key2: 4 + key3: 41 + - key1: 1 + key2: 7 + key3: 71 + - key1: 2 + key2: 5 + key3: 52 + - key1: 2 + key2: 8 + key3: 82 + +- name: Test 2 + set_fact: + loop_result: >- + {{ query('community.general.dependent', + dict([['a', [1, 2, 3]]]), + dict([['b', '[1, 2, 3, 4] if item.a == 1 else [2, 3, 4] if item.a == 2 else [3, 4]']])) }} + # The last expression could have been `range(item.a, 5)`, but that's not supported by all Jinja2 versions used in CI + +- name: Check result of Test 2 + assert: + that: + - loop_result == expected_result + vars: + expected_result: + - a: 1 + b: 1 + - a: 1 + b: 2 + - a: 1 + b: 3 + - a: 1 + b: 4 + - a: 2 + b: 2 + - a: 2 + b: 3 + - a: 2 + b: 4 + - a: 3 + b: 3 + - a: 3 + b: 4 + +- name: Test 3 + debug: + var: item + with_community.general.dependent: + - var1: + a: + - 1 + - 2 + b: + - 3 + - 4 + - var2: 'item.var1.value' + - var3: 'dependent_lookup_test[item.var1.key ~ "_" ~ item.var2]' + loop_control: + label: "{{ [item.var1.key, item.var2, item.var3] }}" + register: dependent + vars: + dependent_lookup_test: + a_1: + - A + - B + a_2: + - C + b_3: + - D + b_4: + - E + - F + - G + +- name: Check result of Test 3 + assert: + that: + - (dependent.results | length) == 7 + - dependent.results[0].item.var1.key == "a" + - dependent.results[0].item.var2 == 1 + - dependent.results[0].item.var3 == "A" + - dependent.results[1].item.var1.key == "a" + - dependent.results[1].item.var2 == 1 + - dependent.results[1].item.var3 == "B" + - dependent.results[2].item.var1.key == "a" + - dependent.results[2].item.var2 == 2 + - dependent.results[2].item.var3 == "C" + - dependent.results[3].item.var1.key == "b" + - dependent.results[3].item.var2 == 3 + - dependent.results[3].item.var3 == "D" + - dependent.results[4].item.var1.key == "b" + - dependent.results[4].item.var2 == 4 + - dependent.results[4].item.var3 == "E" + - dependent.results[5].item.var1.key == "b" + - dependent.results[5].item.var2 == 4 + - dependent.results[5].item.var3 == "F" + - dependent.results[6].item.var1.key == "b" + - dependent.results[6].item.var2 == 4 + - dependent.results[6].item.var3 == "G" + +- name: "Test 4: template failure" + debug: + msg: "{{ item }}" + with_community.general.dependent: + - a: + - 1 + - 2 + - b: "[item.a + foo]" + ignore_errors: true + register: eval_error + +- name: Check result of Test 4 + assert: + that: + - eval_error is failed + - eval_error.msg.startswith("Caught \"'foo' is undefined\" while evaluating ") + +- name: "Test 5: same variable name reused" + debug: + msg: "{{ item }}" + with_community.general.dependent: + - a: x + - b: x + ignore_errors: true + register: eval_error + +- name: Check result of Test 5 + assert: + that: + - eval_error is failed + - eval_error.msg.startswith("Caught \"'x' is undefined\" while evaluating ") + +- name: "Test 6: multi-value dict" + debug: + msg: "{{ item }}" + with_community.general.dependent: + - a: x + b: x + ignore_errors: true + register: eval_error + +- name: Check result of Test 6 + assert: + that: + - eval_error is failed + - eval_error.msg == 'Parameter 0 must be a one-element dictionary, got 2 elements' + +- name: "Test 7: empty dict" + debug: + msg: "{{ item }}" + with_community.general.dependent: + - {} + ignore_errors: true + register: eval_error + +- name: Check result of Test 7 + assert: + that: + - eval_error is failed + - eval_error.msg == 'Parameter 0 must be a one-element dictionary, got 0 elements' diff --git a/tests/unit/plugins/lookup/test_dependent.py b/tests/unit/plugins/lookup/test_dependent.py new file mode 100644 index 0000000000..f2a31ff4b6 --- /dev/null +++ b/tests/unit/plugins/lookup/test_dependent.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# (c) 2020-2021, Felix Fontein +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +from ansible_collections.community.internal_test_tools.tests.unit.compat.unittest import TestCase +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import ( + MagicMock, +) + +from ansible.plugins.loader import lookup_loader + + +class TestLookupModule(TestCase): + def setUp(self): + templar = MagicMock() + templar._loader = None + self.lookup = lookup_loader.get("community.general.dependent", templar=templar) + + def test_empty(self): + self.assertListEqual(self.lookup.run([], None), []) + + def test_simple(self): + self.assertListEqual( + self.lookup.run( + [ + {'a': '[1, 2]'}, + {'b': '[item.a + 3, item.a + 6]'}, + {'c': '[item.a + item.b * 10]'}, + ], + {}, + ), + [ + {'a': 1, 'b': 4, 'c': 41}, + {'a': 1, 'b': 7, 'c': 71}, + {'a': 2, 'b': 5, 'c': 52}, + {'a': 2, 'b': 8, 'c': 82}, + ], + ) From 9d46ccf1b2aacac8136432f0abb85ae65082d8a8 Mon Sep 17 00:00:00 2001 From: TrevorSquillario <72882537+TrevorSquillario@users.noreply.github.com> Date: Tue, 11 May 2021 11:30:09 -0600 Subject: [PATCH 0034/2828] modified redfish_config and idrac_redfish_config to skip incorrect attributes (#2334) * modified redfish_config and idrac_redfish_config to skip incorrect attributes Signed-off-by: Trevor Squillario Trevor_Squillario@Dell.com * modified redfish_utils.py and idrac_redfish_config.py to return empty warning message * modified redfish_config.py and idrac_redfish_config.py to use module.warn() * updated changelog fragment for pr 2334 --- ...dfish_config-skip-incorrect-attributes.yml | 4 +++ plugins/module_utils/redfish_utils.py | 28 ++++++++++++++----- .../redfish/idrac_redfish_config.py | 24 ++++++++++++---- .../redfish/redfish_config.py | 3 ++ 4 files changed, 47 insertions(+), 12 deletions(-) create mode 100644 changelogs/fragments/2334-redfish_config-skip-incorrect-attributes.yml diff --git a/changelogs/fragments/2334-redfish_config-skip-incorrect-attributes.yml b/changelogs/fragments/2334-redfish_config-skip-incorrect-attributes.yml new file mode 100644 index 0000000000..2e609c43fc --- /dev/null +++ b/changelogs/fragments/2334-redfish_config-skip-incorrect-attributes.yml @@ -0,0 +1,4 @@ +minor_changes: + - redfish_utils module utils - modified set_bios_attributes function to skip invalid attribute instead of returning. Added skipped attributes to output (https://github.com/ansible-collections/community.general/issues/1995). + - idrac_redfish_config - modified set_manager_attributes function to skip invalid attribute instead of returning. Added skipped attributes to output. Modified module exit to add warning variable (https://github.com/ansible-collections/community.general/issues/1995). + - redfish_config - modified module exit to add warning variable (https://github.com/ansible-collections/community.general/issues/1995). diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index d8cc4061f8..df7011a0b4 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -1671,19 +1671,31 @@ class RedfishUtils(object): # Make a copy of the attributes dict attrs_to_patch = dict(attributes) + # List to hold attributes not found + attrs_bad = {} # Check the attributes - for attr in attributes: - if attr not in data[u'Attributes']: - return {'ret': False, 'msg': "BIOS attribute %s not found" % attr} + for attr_name, attr_value in attributes.items(): + # Check if attribute exists + if attr_name not in data[u'Attributes']: + # Remove and proceed to next attribute if this isn't valid + attrs_bad.update({attr_name: attr_value}) + del attrs_to_patch[attr_name] + continue + # If already set to requested value, remove it from PATCH payload - if data[u'Attributes'][attr] == attributes[attr]: - del attrs_to_patch[attr] + if data[u'Attributes'][attr_name] == attributes[attr_name]: + del attrs_to_patch[attr_name] + + warning = "" + if attrs_bad: + warning = "Incorrect attributes %s" % (attrs_bad) # Return success w/ changed=False if no attrs need to be changed if not attrs_to_patch: return {'ret': True, 'changed': False, - 'msg': "BIOS attributes already set"} + 'msg': "BIOS attributes already set", + 'warning': warning} # Get the SettingsObject URI set_bios_attr_uri = data["@Redfish.Settings"]["SettingsObject"]["@odata.id"] @@ -1693,7 +1705,9 @@ class RedfishUtils(object): response = self.patch_request(self.root_uri + set_bios_attr_uri, payload) if response['ret'] is False: return response - return {'ret': True, 'changed': True, 'msg': "Modified BIOS attribute"} + return {'ret': True, 'changed': True, + 'msg': "Modified BIOS attributes %s" % (attrs_to_patch), + 'warning': warning} def set_boot_order(self, boot_list): if not boot_list: diff --git a/plugins/modules/remote_management/redfish/idrac_redfish_config.py b/plugins/modules/remote_management/redfish/idrac_redfish_config.py index e27ef6a2a6..b16401311b 100644 --- a/plugins/modules/remote_management/redfish/idrac_redfish_config.py +++ b/plugins/modules/remote_management/redfish/idrac_redfish_config.py @@ -179,6 +179,7 @@ class IdracRedfishUtils(RedfishUtils): attrs_to_patch = {} attrs_skipped = {} + attrs_bad = {} # Store attrs which were not found in the system # Search for key entry and extract URI from it response = self.get_request(self.root_uri + manager_uri + "/" + key) @@ -189,13 +190,15 @@ class IdracRedfishUtils(RedfishUtils): if key not in data: return {'ret': False, - 'msg': "%s: Key %s not found" % (command, key)} + 'msg': "%s: Key %s not found" % (command, key), + 'warning': ""} for attr_name, attr_value in attributes.items(): # Check if attribute exists if attr_name not in data[u'Attributes']: - return {'ret': False, - 'msg': "%s: Manager attribute %s not found" % (command, attr_name)} + # Skip and proceed to next attribute if this isn't valid + attrs_bad.update({attr_name: attr_value}) + continue # Find out if value is already set to what we want. If yes, exclude # those attributes @@ -204,16 +207,23 @@ class IdracRedfishUtils(RedfishUtils): else: attrs_to_patch.update({attr_name: attr_value}) + warning = "" + if attrs_bad: + warning = "Incorrect attributes %s" % (attrs_bad) + if not attrs_to_patch: return {'ret': True, 'changed': False, - 'msg': "Manager attributes already set"} + 'msg': "No changes made. Manager attributes already set.", + 'warning': warning} payload = {"Attributes": attrs_to_patch} response = self.patch_request(self.root_uri + manager_uri + "/" + key, payload) if response['ret'] is False: return response + return {'ret': True, 'changed': True, - 'msg': "%s: Modified Manager attributes %s" % (command, attrs_to_patch)} + 'msg': "%s: Modified Manager attributes %s" % (command, attrs_to_patch), + 'warning': warning} CATEGORY_COMMANDS_ALL = { @@ -221,6 +231,7 @@ CATEGORY_COMMANDS_ALL = { "SetSystemAttributes"] } + # list of mutually exclusive commands for a category CATEGORY_COMMANDS_MUTUALLY_EXCLUSIVE = { "Manager": [["SetManagerAttributes", "SetLifecycleControllerAttributes", @@ -308,6 +319,9 @@ def main(): # Return data back or fail with proper message if result['ret'] is True: + if result.get('warning'): + module.warn(to_native(result['warning'])) + module.exit_json(changed=result['changed'], msg=to_native(result['msg'])) else: module.fail_json(msg=to_native(result['msg'])) diff --git a/plugins/modules/remote_management/redfish/redfish_config.py b/plugins/modules/remote_management/redfish/redfish_config.py index 5c1df16c4e..e084c670f4 100644 --- a/plugins/modules/remote_management/redfish/redfish_config.py +++ b/plugins/modules/remote_management/redfish/redfish_config.py @@ -321,6 +321,9 @@ def main(): # Return data back or fail with proper message if result['ret'] is True: + if result.get('warning'): + module.warn(to_native(result['warning'])) + module.exit_json(changed=result['changed'], msg=to_native(result['msg'])) else: module.fail_json(msg=to_native(result['msg'])) From d22dd5056e62d6b2b8929f732b453214354253b9 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Wed, 12 May 2021 05:31:10 +1200 Subject: [PATCH 0035/2828] module_helper.py Breakdown (#2393) * break down of module_helper into smaller pieces, keeping compatibility * removed abc.ABC (py3 only) from code + fixed reference to vars.py * multiple changes: - mh.base - moved more functionalities to ModuleHelperBase - mh.mixins.(cmd, state) - CmdMixin no longer inherits from ModuleHelperBase - mh.mixins.deps - DependencyMixin now overrides run() method to test dependency - mh.mixins.vars - created class VarsMixin - mh.module_helper - moved functions to base class, added VarsMixin - module_helper - importing AnsibleModule as well, for backward compatibility in test * removed unnecessary __all__ * make pylint happy * PR adjustments + bot config + changelog frag * Update plugins/module_utils/mh/module_helper.py Co-authored-by: Felix Fontein * Update plugins/module_utils/mh/module_helper.py Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .github/BOTMETA.yml | 3 + .../2393-module_helper-breakdown.yml | 2 + plugins/module_utils/mh/__init__.py | 0 plugins/module_utils/mh/base.py | 56 ++ plugins/module_utils/mh/deco.py | 54 ++ plugins/module_utils/mh/exceptions.py | 22 + plugins/module_utils/mh/mixins/__init__.py | 0 plugins/module_utils/mh/mixins/cmd.py | 167 ++++++ plugins/module_utils/mh/mixins/deps.py | 58 ++ plugins/module_utils/mh/mixins/state.py | 39 ++ plugins/module_utils/mh/mixins/vars.py | 132 +++++ plugins/module_utils/mh/module_helper.py | 79 +++ plugins/module_utils/module_helper.py | 511 +----------------- .../module_utils/test_module_helper.py | 6 +- .../plugins/modules/system/test_xfconf.py | 4 +- 15 files changed, 625 insertions(+), 508 deletions(-) create mode 100644 changelogs/fragments/2393-module_helper-breakdown.yml create mode 100644 plugins/module_utils/mh/__init__.py create mode 100644 plugins/module_utils/mh/base.py create mode 100644 plugins/module_utils/mh/deco.py create mode 100644 plugins/module_utils/mh/exceptions.py create mode 100644 plugins/module_utils/mh/mixins/__init__.py create mode 100644 plugins/module_utils/mh/mixins/cmd.py create mode 100644 plugins/module_utils/mh/mixins/deps.py create mode 100644 plugins/module_utils/mh/mixins/state.py create mode 100644 plugins/module_utils/mh/mixins/vars.py create mode 100644 plugins/module_utils/mh/module_helper.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index f27c96e049..cdef437f90 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -142,6 +142,9 @@ files: $module_utils/memset.py: maintainers: glitchcrab labels: cloud memset + $module_utils/mh/: + maintainers: russoz + labels: module_helper $module_utils/module_helper.py: maintainers: russoz labels: module_helper diff --git a/changelogs/fragments/2393-module_helper-breakdown.yml b/changelogs/fragments/2393-module_helper-breakdown.yml new file mode 100644 index 0000000000..472a1c3569 --- /dev/null +++ b/changelogs/fragments/2393-module_helper-breakdown.yml @@ -0,0 +1,2 @@ +minor_changes: + - module_helper module utils - break down of the long file into smaller pieces (https://github.com/ansible-collections/community.general/pull/2393). diff --git a/plugins/module_utils/mh/__init__.py b/plugins/module_utils/mh/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/mh/base.py b/plugins/module_utils/mh/base.py new file mode 100644 index 0000000000..2a2dd88f7b --- /dev/null +++ b/plugins/module_utils/mh/base.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# (c) 2020, Alexei Znamensky +# Copyright: (c) 2020, Ansible Project +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException as _MHE +from ansible_collections.community.general.plugins.module_utils.mh.deco import module_fails_on_exception + + +class ModuleHelperBase(object): + module = None + ModuleHelperException = _MHE + + def __init__(self, module=None): + self._changed = False + + if module: + self.module = module + + if not isinstance(self.module, AnsibleModule): + self.module = AnsibleModule(**self.module) + + def __init_module__(self): + pass + + def __run__(self): + raise NotImplementedError() + + def __quit_module__(self): + pass + + @property + def changed(self): + return self._changed + + @changed.setter + def changed(self, value): + self._changed = value + + def has_changed(self): + raise NotImplementedError() + + @property + def output(self): + raise NotImplementedError() + + @module_fails_on_exception + def run(self): + self.__init_module__() + self.__run__() + self.__quit_module__() + self.module.exit_json(changed=self.has_changed(), **self.output) diff --git a/plugins/module_utils/mh/deco.py b/plugins/module_utils/mh/deco.py new file mode 100644 index 0000000000..91f0d97744 --- /dev/null +++ b/plugins/module_utils/mh/deco.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# (c) 2020, Alexei Znamensky +# Copyright: (c) 2020, Ansible Project +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import traceback +from functools import wraps + +from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException + + +def cause_changes(on_success=None, on_failure=None): + + def deco(func): + if on_success is None and on_failure is None: + return func + + @wraps(func) + def wrapper(*args, **kwargs): + try: + self = args[0] + func(*args, **kwargs) + if on_success is not None: + self.changed = on_success + except Exception: + if on_failure is not None: + self.changed = on_failure + raise + + return wrapper + + return deco + + +def module_fails_on_exception(func): + @wraps(func) + def wrapper(self, *args, **kwargs): + try: + func(self, *args, **kwargs) + except SystemExit: + raise + except ModuleHelperException as e: + if e.update_output: + self.update_output(e.update_output) + self.module.fail_json(msg=e.msg, exception=traceback.format_exc(), + output=self.output, vars=self.vars.output(), **self.output) + except Exception as e: + msg = "Module failed with exception: {0}".format(str(e).strip()) + self.module.fail_json(msg=msg, exception=traceback.format_exc(), + output=self.output, vars=self.vars.output(), **self.output) + return wrapper diff --git a/plugins/module_utils/mh/exceptions.py b/plugins/module_utils/mh/exceptions.py new file mode 100644 index 0000000000..558dcca05f --- /dev/null +++ b/plugins/module_utils/mh/exceptions.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# (c) 2020, Alexei Znamensky +# Copyright: (c) 2020, Ansible Project +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class ModuleHelperException(Exception): + @staticmethod + def _get_remove(key, kwargs): + if key in kwargs: + result = kwargs[key] + del kwargs[key] + return result + return None + + def __init__(self, *args, **kwargs): + self.msg = self._get_remove('msg', kwargs) or "Module failed with exception: {0}".format(self) + self.update_output = self._get_remove('update_output', kwargs) or {} + super(ModuleHelperException, self).__init__(*args) diff --git a/plugins/module_utils/mh/mixins/__init__.py b/plugins/module_utils/mh/mixins/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/mh/mixins/cmd.py b/plugins/module_utils/mh/mixins/cmd.py new file mode 100644 index 0000000000..fc66638f69 --- /dev/null +++ b/plugins/module_utils/mh/mixins/cmd.py @@ -0,0 +1,167 @@ +# -*- coding: utf-8 -*- +# (c) 2020, Alexei Znamensky +# Copyright: (c) 2020, Ansible Project +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from functools import partial + + +class ArgFormat(object): + """ + Argument formatter for use as a command line parameter. Used in CmdMixin. + """ + BOOLEAN = 0 + PRINTF = 1 + FORMAT = 2 + + @staticmethod + def stars_deco(num): + if num == 1: + def deco(f): + return lambda v: f(*v) + return deco + elif num == 2: + def deco(f): + return lambda v: f(**v) + return deco + + return lambda f: f + + def __init__(self, name, fmt=None, style=FORMAT, stars=0): + """ + Creates a CLI-formatter for one specific argument. The argument may be a module parameter or just a named parameter for + the CLI command execution. + :param name: Name of the argument to be formatted + :param fmt: Either a str to be formatted (using or not printf-style) or a callable that does that + :param style: Whether arg_format (as str) should use printf-style formatting. + Ignored if arg_format is None or not a str (should be callable). + :param stars: A int with 0, 1 or 2 value, indicating to formatting the value as: value, *value or **value + """ + def printf_fmt(_fmt, v): + try: + return [_fmt % v] + except TypeError as e: + if e.args[0] != 'not all arguments converted during string formatting': + raise + return [_fmt] + + _fmts = { + ArgFormat.BOOLEAN: lambda _fmt, v: ([_fmt] if bool(v) else []), + ArgFormat.PRINTF: printf_fmt, + ArgFormat.FORMAT: lambda _fmt, v: [_fmt.format(v)], + } + + self.name = name + self.stars = stars + + if fmt is None: + fmt = "{0}" + style = ArgFormat.FORMAT + + if isinstance(fmt, str): + func = _fmts[style] + self.arg_format = partial(func, fmt) + elif isinstance(fmt, list) or isinstance(fmt, tuple): + self.arg_format = lambda v: [_fmts[style](f, v)[0] for f in fmt] + elif hasattr(fmt, '__call__'): + self.arg_format = fmt + else: + raise TypeError('Parameter fmt must be either: a string, a list/tuple of ' + 'strings or a function: type={0}, value={1}'.format(type(fmt), fmt)) + + if stars: + self.arg_format = (self.stars_deco(stars))(self.arg_format) + + def to_text(self, value): + if value is None: + return [] + func = self.arg_format + return [str(p) for p in func(value)] + + +class CmdMixin(object): + """ + Mixin for mapping module options to running a CLI command with its arguments. + """ + command = None + command_args_formats = {} + run_command_fixed_options = {} + check_rc = False + force_lang = "C" + + @property + def module_formats(self): + result = {} + for param in self.module.params.keys(): + result[param] = ArgFormat(param) + return result + + @property + def custom_formats(self): + result = {} + for param, fmt_spec in self.command_args_formats.items(): + result[param] = ArgFormat(param, **fmt_spec) + return result + + def _calculate_args(self, extra_params=None, params=None): + def add_arg_formatted_param(_cmd_args, arg_format, _value): + args = list(arg_format.to_text(_value)) + return _cmd_args + args + + def find_format(_param): + return self.custom_formats.get(_param, self.module_formats.get(_param)) + + extra_params = extra_params or dict() + cmd_args = list([self.command]) if isinstance(self.command, str) else list(self.command) + try: + cmd_args[0] = self.module.get_bin_path(cmd_args[0], required=True) + except ValueError: + pass + param_list = params if params else self.module.params.keys() + + for param in param_list: + if isinstance(param, dict): + if len(param) != 1: + raise self.ModuleHelperException("run_command parameter as a dict must " + "contain only one key: {0}".format(param)) + _param = list(param.keys())[0] + fmt = find_format(_param) + value = param[_param] + elif isinstance(param, str): + if param in self.module.argument_spec: + fmt = find_format(param) + value = self.module.params[param] + elif param in extra_params: + fmt = find_format(param) + value = extra_params[param] + else: + self.module.deprecate("Cannot determine value for parameter: {0}. " + "From version 4.0.0 onwards this will generate an exception".format(param), + version="4.0.0", collection_name="community.general") + continue + + else: + raise self.ModuleHelperException("run_command parameter must be either a str or a dict: {0}".format(param)) + cmd_args = add_arg_formatted_param(cmd_args, fmt, value) + + return cmd_args + + def process_command_output(self, rc, out, err): + return rc, out, err + + def run_command(self, extra_params=None, params=None, *args, **kwargs): + self.vars.cmd_args = self._calculate_args(extra_params, params) + options = dict(self.run_command_fixed_options) + env_update = dict(options.get('environ_update', {})) + options['check_rc'] = options.get('check_rc', self.check_rc) + if self.force_lang: + env_update.update({'LANGUAGE': self.force_lang}) + self.update_output(force_lang=self.force_lang) + options['environ_update'] = env_update + options.update(kwargs) + rc, out, err = self.module.run_command(self.vars.cmd_args, *args, **options) + self.update_output(rc=rc, stdout=out, stderr=err) + return self.process_command_output(rc, out, err) diff --git a/plugins/module_utils/mh/mixins/deps.py b/plugins/module_utils/mh/mixins/deps.py new file mode 100644 index 0000000000..1c6c9ae484 --- /dev/null +++ b/plugins/module_utils/mh/mixins/deps.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# (c) 2020, Alexei Znamensky +# Copyright: (c) 2020, Ansible Project +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import traceback + +from ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase +from ansible_collections.community.general.plugins.module_utils.mh.deco import module_fails_on_exception + + +class DependencyCtxMgr(object): + def __init__(self, name, msg=None): + self.name = name + self.msg = msg + self.has_it = False + self.exc_type = None + self.exc_val = None + self.exc_tb = None + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.has_it = exc_type is None + self.exc_type = exc_type + self.exc_val = exc_val + self.exc_tb = exc_tb + return not self.has_it + + @property + def text(self): + return self.msg or str(self.exc_val) + + +class DependencyMixin(ModuleHelperBase): + _dependencies = [] + + @classmethod + def dependency(cls, name, msg): + cls._dependencies.append(DependencyCtxMgr(name, msg)) + return cls._dependencies[-1] + + def fail_on_missing_deps(self): + for d in self._dependencies: + if not d.has_it: + self.module.fail_json(changed=False, + exception="\n".join(traceback.format_exception(d.exc_type, d.exc_val, d.exc_tb)), + msg=d.text, + **self.output) + + @module_fails_on_exception + def run(self): + self.fail_on_missing_deps() + super(DependencyMixin, self).run() diff --git a/plugins/module_utils/mh/mixins/state.py b/plugins/module_utils/mh/mixins/state.py new file mode 100644 index 0000000000..b946090ac9 --- /dev/null +++ b/plugins/module_utils/mh/mixins/state.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- +# (c) 2020, Alexei Znamensky +# Copyright: (c) 2020, Ansible Project +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class StateMixin(object): + state_param = 'state' + default_state = None + + def _state(self): + state = self.module.params.get(self.state_param) + return self.default_state if state is None else state + + def _method(self, state): + return "{0}_{1}".format(self.state_param, state) + + def __run__(self): + state = self._state() + self.vars.state = state + + # resolve aliases + if state not in self.module.params: + aliased = [name for name, param in self.module.argument_spec.items() if state in param.get('aliases', [])] + if aliased: + state = aliased[0] + self.vars.effective_state = state + + method = self._method(state) + if not hasattr(self, method): + return self.__state_fallback__() + func = getattr(self, method) + return func() + + def __state_fallback__(self): + raise ValueError("Cannot find method: {0}".format(self._method(self._state()))) diff --git a/plugins/module_utils/mh/mixins/vars.py b/plugins/module_utils/mh/mixins/vars.py new file mode 100644 index 0000000000..7c936e04ac --- /dev/null +++ b/plugins/module_utils/mh/mixins/vars.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- +# (c) 2020, Alexei Znamensky +# Copyright: (c) 2020, Ansible Project +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class VarMeta(object): + NOTHING = object() + + def __init__(self, diff=False, output=True, change=None, fact=False): + self.init = False + self.initial_value = None + self.value = None + + self.diff = diff + self.change = diff if change is None else change + self.output = output + self.fact = fact + + def set(self, diff=None, output=None, change=None, fact=None, initial_value=NOTHING): + if diff is not None: + self.diff = diff + if output is not None: + self.output = output + if change is not None: + self.change = change + if fact is not None: + self.fact = fact + if initial_value is not self.NOTHING: + self.initial_value = initial_value + + def set_value(self, value): + if not self.init: + self.initial_value = value + self.init = True + self.value = value + return self + + @property + def has_changed(self): + return self.change and (self.initial_value != self.value) + + @property + def diff_result(self): + return None if not (self.diff and self.has_changed) else { + 'before': self.initial_value, + 'after': self.value, + } + + def __str__(self): + return "".format( + self.value, self.initial_value, self.diff, self.output, self.change + ) + + +class VarDict(object): + def __init__(self): + self._data = dict() + self._meta = dict() + + def __getitem__(self, item): + return self._data[item] + + def __setitem__(self, key, value): + self.set(key, value) + + def __getattr__(self, item): + try: + return self._data[item] + except KeyError: + return getattr(self._data, item) + + def __setattr__(self, key, value): + if key in ('_data', '_meta'): + super(VarDict, self).__setattr__(key, value) + else: + self.set(key, value) + + def meta(self, name): + return self._meta[name] + + def set_meta(self, name, **kwargs): + self.meta(name).set(**kwargs) + + def set(self, name, value, **kwargs): + if name in ('_data', '_meta'): + raise ValueError("Names _data and _meta are reserved for use by ModuleHelper") + self._data[name] = value + if name in self._meta: + meta = self.meta(name) + else: + meta = VarMeta(**kwargs) + meta.set_value(value) + self._meta[name] = meta + + def output(self): + return dict((k, v) for k, v in self._data.items() if self.meta(k).output) + + def diff(self): + diff_results = [(k, self.meta(k).diff_result) for k in self._data] + diff_results = [dr for dr in diff_results if dr[1] is not None] + if diff_results: + before = dict((dr[0], dr[1]['before']) for dr in diff_results) + after = dict((dr[0], dr[1]['after']) for dr in diff_results) + return {'before': before, 'after': after} + return None + + def facts(self): + facts_result = dict((k, v) for k, v in self._data.items() if self._meta[k].fact) + return facts_result if facts_result else None + + def change_vars(self): + return [v for v in self._data if self.meta(v).change] + + def has_changed(self, v): + return self._meta[v].has_changed + + +class VarsMixin(object): + + def __init__(self, module=None): + self.vars = VarDict() + super(VarsMixin, self).__init__(module) + + def update_vars(self, meta=None, **kwargs): + if meta is None: + meta = {} + for k, v in kwargs.items(): + self.vars.set(k, v, **meta) diff --git a/plugins/module_utils/mh/module_helper.py b/plugins/module_utils/mh/module_helper.py new file mode 100644 index 0000000000..b27b60df9a --- /dev/null +++ b/plugins/module_utils/mh/module_helper.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- +# (c) 2020, Alexei Znamensky +# Copyright: (c) 2020, Ansible Project +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.common.dict_transformations import dict_merge + +from ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase, AnsibleModule +from ansible_collections.community.general.plugins.module_utils.mh.mixins.cmd import CmdMixin +from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin +from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyMixin +from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarsMixin, VarDict as _VD + + +class ModuleHelper(VarsMixin, DependencyMixin, ModuleHelperBase): + _output_conflict_list = ('msg', 'exception', 'output', 'vars', 'changed') + facts_name = None + output_params = () + diff_params = () + change_params = () + facts_params = () + + VarDict = _VD # for backward compatibility, will be deprecated at some point + + def __init__(self, module=None): + super(ModuleHelper, self).__init__(module) + for name, value in self.module.params.items(): + self.vars.set( + name, value, + diff=name in self.diff_params, + output=name in self.output_params, + change=None if not self.change_params else name in self.change_params, + fact=name in self.facts_params, + ) + + def update_output(self, **kwargs): + self.update_vars(meta={"output": True}, **kwargs) + + def update_facts(self, **kwargs): + self.update_vars(meta={"fact": True}, **kwargs) + + def _vars_changed(self): + return any(self.vars.has_changed(v) for v in self.vars.change_vars()) + + def has_changed(self): + return self.changed or self._vars_changed() + + @property + def output(self): + result = dict(self.vars.output()) + if self.facts_name: + facts = self.vars.facts() + if facts is not None: + result['ansible_facts'] = {self.facts_name: facts} + if self.module._diff: + diff = result.get('diff', {}) + vars_diff = self.vars.diff() or {} + result['diff'] = dict_merge(dict(diff), vars_diff) + + for varname in result: + if varname in self._output_conflict_list: + result["_" + varname] = result[varname] + del result[varname] + return result + + +class StateModuleHelper(StateMixin, ModuleHelper): + pass + + +class CmdModuleHelper(CmdMixin, ModuleHelper): + pass + + +class CmdStateModuleHelper(CmdMixin, StateMixin, ModuleHelper): + pass diff --git a/plugins/module_utils/module_helper.py b/plugins/module_utils/module_helper.py index d241eba5af..a6b35bdd33 100644 --- a/plugins/module_utils/module_helper.py +++ b/plugins/module_utils/module_helper.py @@ -6,506 +6,13 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -from functools import partial, wraps -import traceback -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.dict_transformations import dict_merge - - -class ModuleHelperException(Exception): - @staticmethod - def _get_remove(key, kwargs): - if key in kwargs: - result = kwargs[key] - del kwargs[key] - return result - return None - - def __init__(self, *args, **kwargs): - self.msg = self._get_remove('msg', kwargs) or "Module failed with exception: {0}".format(self) - self.update_output = self._get_remove('update_output', kwargs) or {} - super(ModuleHelperException, self).__init__(*args) - - -class ArgFormat(object): - """ - Argument formatter for use as a command line parameter. Used in CmdMixin. - """ - BOOLEAN = 0 - PRINTF = 1 - FORMAT = 2 - - @staticmethod - def stars_deco(num): - if num == 1: - def deco(f): - return lambda v: f(*v) - return deco - elif num == 2: - def deco(f): - return lambda v: f(**v) - return deco - - return lambda f: f - - def __init__(self, name, fmt=None, style=FORMAT, stars=0): - """ - Creates a CLI-formatter for one specific argument. The argument may be a module parameter or just a named parameter for - the CLI command execution. - :param name: Name of the argument to be formatted - :param fmt: Either a str to be formatted (using or not printf-style) or a callable that does that - :param style: Whether arg_format (as str) should use printf-style formatting. - Ignored if arg_format is None or not a str (should be callable). - :param stars: A int with 0, 1 or 2 value, indicating to formatting the value as: value, *value or **value - """ - def printf_fmt(_fmt, v): - try: - return [_fmt % v] - except TypeError as e: - if e.args[0] != 'not all arguments converted during string formatting': - raise - return [_fmt] - - _fmts = { - ArgFormat.BOOLEAN: lambda _fmt, v: ([_fmt] if bool(v) else []), - ArgFormat.PRINTF: printf_fmt, - ArgFormat.FORMAT: lambda _fmt, v: [_fmt.format(v)], - } - - self.name = name - self.stars = stars - - if fmt is None: - fmt = "{0}" - style = ArgFormat.FORMAT - - if isinstance(fmt, str): - func = _fmts[style] - self.arg_format = partial(func, fmt) - elif isinstance(fmt, list) or isinstance(fmt, tuple): - self.arg_format = lambda v: [_fmts[style](f, v)[0] for f in fmt] - elif hasattr(fmt, '__call__'): - self.arg_format = fmt - else: - raise TypeError('Parameter fmt must be either: a string, a list/tuple of ' - 'strings or a function: type={0}, value={1}'.format(type(fmt), fmt)) - - if stars: - self.arg_format = (self.stars_deco(stars))(self.arg_format) - - def to_text(self, value): - if value is None: - return [] - func = self.arg_format - return [str(p) for p in func(value)] - - -def cause_changes(on_success=None, on_failure=None): - - def deco(func): - if on_success is None and on_failure is None: - return func - - @wraps(func) - def wrapper(*args, **kwargs): - try: - self = args[0] - func(*args, **kwargs) - if on_success is not None: - self.changed = on_success - except Exception: - if on_failure is not None: - self.changed = on_failure - raise - - return wrapper - - return deco - - -def module_fails_on_exception(func): - @wraps(func) - def wrapper(self, *args, **kwargs): - try: - func(self, *args, **kwargs) - except SystemExit: - raise - except ModuleHelperException as e: - if e.update_output: - self.update_output(e.update_output) - self.module.fail_json(msg=e.msg, exception=traceback.format_exc(), - output=self.output, vars=self.vars.output(), **self.output) - except Exception as e: - msg = "Module failed with exception: {0}".format(str(e).strip()) - self.module.fail_json(msg=msg, exception=traceback.format_exc(), - output=self.output, vars=self.vars.output(), **self.output) - return wrapper - - -class DependencyCtxMgr(object): - def __init__(self, name, msg=None): - self.name = name - self.msg = msg - self.has_it = False - self.exc_type = None - self.exc_val = None - self.exc_tb = None - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.has_it = exc_type is None - self.exc_type = exc_type - self.exc_val = exc_val - self.exc_tb = exc_tb - return not self.has_it - - @property - def text(self): - return self.msg or str(self.exc_val) - - -class VarMeta(object): - NOTHING = object() - - def __init__(self, diff=False, output=True, change=None, fact=False): - self.init = False - self.initial_value = None - self.value = None - - self.diff = diff - self.change = diff if change is None else change - self.output = output - self.fact = fact - - def set(self, diff=None, output=None, change=None, fact=None, initial_value=NOTHING): - if diff is not None: - self.diff = diff - if output is not None: - self.output = output - if change is not None: - self.change = change - if fact is not None: - self.fact = fact - if initial_value is not self.NOTHING: - self.initial_value = initial_value - - def set_value(self, value): - if not self.init: - self.initial_value = value - self.init = True - self.value = value - return self - - @property - def has_changed(self): - return self.change and (self.initial_value != self.value) - - @property - def diff_result(self): - return None if not (self.diff and self.has_changed) else { - 'before': self.initial_value, - 'after': self.value, - } - - def __str__(self): - return "".format( - self.value, self.initial_value, self.diff, self.output, self.change - ) - - -class ModuleHelper(object): - _output_conflict_list = ('msg', 'exception', 'output', 'vars', 'changed') - _dependencies = [] - module = None - facts_name = None - output_params = () - diff_params = () - change_params = () - facts_params = () - - class VarDict(object): - def __init__(self): - self._data = dict() - self._meta = dict() - - def __getitem__(self, item): - return self._data[item] - - def __setitem__(self, key, value): - self.set(key, value) - - def __getattr__(self, item): - try: - return self._data[item] - except KeyError: - return getattr(self._data, item) - - def __setattr__(self, key, value): - if key in ('_data', '_meta'): - super(ModuleHelper.VarDict, self).__setattr__(key, value) - else: - self.set(key, value) - - def meta(self, name): - return self._meta[name] - - def set_meta(self, name, **kwargs): - self.meta(name).set(**kwargs) - - def set(self, name, value, **kwargs): - if name in ('_data', '_meta'): - raise ValueError("Names _data and _meta are reserved for use by ModuleHelper") - self._data[name] = value - if name in self._meta: - meta = self.meta(name) - else: - meta = VarMeta(**kwargs) - meta.set_value(value) - self._meta[name] = meta - - def output(self): - return dict((k, v) for k, v in self._data.items() if self.meta(k).output) - - def diff(self): - diff_results = [(k, self.meta(k).diff_result) for k in self._data] - diff_results = [dr for dr in diff_results if dr[1] is not None] - if diff_results: - before = dict((dr[0], dr[1]['before']) for dr in diff_results) - after = dict((dr[0], dr[1]['after']) for dr in diff_results) - return {'before': before, 'after': after} - return None - - def facts(self): - facts_result = dict((k, v) for k, v in self._data.items() if self._meta[k].fact) - return facts_result if facts_result else None - - def change_vars(self): - return [v for v in self._data if self.meta(v).change] - - def has_changed(self, v): - return self._meta[v].has_changed - - def __init__(self, module=None): - self.vars = ModuleHelper.VarDict() - self._changed = False - - if module: - self.module = module - - if not isinstance(self.module, AnsibleModule): - self.module = AnsibleModule(**self.module) - - for name, value in self.module.params.items(): - self.vars.set( - name, value, - diff=name in self.diff_params, - output=name in self.output_params, - change=None if not self.change_params else name in self.change_params, - fact=name in self.facts_params, - ) - - def update_vars(self, meta=None, **kwargs): - if meta is None: - meta = {} - for k, v in kwargs.items(): - self.vars.set(k, v, **meta) - - def update_output(self, **kwargs): - self.update_vars(meta={"output": True}, **kwargs) - - def update_facts(self, **kwargs): - self.update_vars(meta={"fact": True}, **kwargs) - - def __init_module__(self): - pass - - def __run__(self): - raise NotImplementedError() - - def __quit_module__(self): - pass - - def _vars_changed(self): - return any(self.vars.has_changed(v) for v in self.vars.change_vars()) - - @property - def changed(self): - return self._changed - - @changed.setter - def changed(self, value): - self._changed = value - - def has_changed(self): - return self.changed or self._vars_changed() - - @property - def output(self): - result = dict(self.vars.output()) - if self.facts_name: - facts = self.vars.facts() - if facts is not None: - result['ansible_facts'] = {self.facts_name: facts} - if self.module._diff: - diff = result.get('diff', {}) - vars_diff = self.vars.diff() or {} - result['diff'] = dict_merge(dict(diff), vars_diff) - - for varname in result: - if varname in self._output_conflict_list: - result["_" + varname] = result[varname] - del result[varname] - return result - - @module_fails_on_exception - def run(self): - self.fail_on_missing_deps() - self.__init_module__() - self.__run__() - self.__quit_module__() - self.module.exit_json(changed=self.has_changed(), **self.output) - - @classmethod - def dependency(cls, name, msg): - cls._dependencies.append(DependencyCtxMgr(name, msg)) - return cls._dependencies[-1] - - def fail_on_missing_deps(self): - for d in self._dependencies: - if not d.has_it: - self.module.fail_json(changed=False, - exception="\n".join(traceback.format_exception(d.exc_type, d.exc_val, d.exc_tb)), - msg=d.text, - **self.output) - - -class StateMixin(object): - state_param = 'state' - default_state = None - - def _state(self): - state = self.module.params.get(self.state_param) - return self.default_state if state is None else state - - def _method(self, state): - return "{0}_{1}".format(self.state_param, state) - - def __run__(self): - state = self._state() - self.vars.state = state - - # resolve aliases - if state not in self.module.params: - aliased = [name for name, param in self.module.argument_spec.items() if state in param.get('aliases', [])] - if aliased: - state = aliased[0] - self.vars.effective_state = state - - method = self._method(state) - if not hasattr(self, method): - return self.__state_fallback__() - func = getattr(self, method) - return func() - - def __state_fallback__(self): - raise ValueError("Cannot find method: {0}".format(self._method(self._state()))) - - -class CmdMixin(object): - """ - Mixin for mapping module options to running a CLI command with its arguments. - """ - command = None - command_args_formats = {} - run_command_fixed_options = {} - check_rc = False - force_lang = "C" - - @property - def module_formats(self): - result = {} - for param in self.module.params.keys(): - result[param] = ArgFormat(param) - return result - - @property - def custom_formats(self): - result = {} - for param, fmt_spec in self.command_args_formats.items(): - result[param] = ArgFormat(param, **fmt_spec) - return result - - def _calculate_args(self, extra_params=None, params=None): - def add_arg_formatted_param(_cmd_args, arg_format, _value): - args = list(arg_format.to_text(_value)) - return _cmd_args + args - - def find_format(_param): - return self.custom_formats.get(_param, self.module_formats.get(_param)) - - extra_params = extra_params or dict() - cmd_args = list([self.command]) if isinstance(self.command, str) else list(self.command) - try: - cmd_args[0] = self.module.get_bin_path(cmd_args[0], required=True) - except ValueError: - pass - param_list = params if params else self.module.params.keys() - - for param in param_list: - if isinstance(param, dict): - if len(param) != 1: - raise ModuleHelperException("run_command parameter as a dict must " - "contain only one key: {0}".format(param)) - _param = list(param.keys())[0] - fmt = find_format(_param) - value = param[_param] - elif isinstance(param, str): - if param in self.module.argument_spec: - fmt = find_format(param) - value = self.module.params[param] - elif param in extra_params: - fmt = find_format(param) - value = extra_params[param] - else: - self.module.deprecate("Cannot determine value for parameter: {0}. " - "From version 4.0.0 onwards this will generate an exception".format(param), - version="4.0.0", collection_name="community.general") - continue - - else: - raise ModuleHelperException("run_command parameter must be either a str or a dict: {0}".format(param)) - cmd_args = add_arg_formatted_param(cmd_args, fmt, value) - - return cmd_args - - def process_command_output(self, rc, out, err): - return rc, out, err - - def run_command(self, extra_params=None, params=None, *args, **kwargs): - self.vars.cmd_args = self._calculate_args(extra_params, params) - options = dict(self.run_command_fixed_options) - env_update = dict(options.get('environ_update', {})) - options['check_rc'] = options.get('check_rc', self.check_rc) - if self.force_lang: - env_update.update({'LANGUAGE': self.force_lang}) - self.update_output(force_lang=self.force_lang) - options['environ_update'] = env_update - options.update(kwargs) - rc, out, err = self.module.run_command(self.vars.cmd_args, *args, **options) - self.update_output(rc=rc, stdout=out, stderr=err) - return self.process_command_output(rc, out, err) - - -class StateModuleHelper(StateMixin, ModuleHelper): - pass - - -class CmdModuleHelper(CmdMixin, ModuleHelper): - pass - - -class CmdStateModuleHelper(CmdMixin, StateMixin, ModuleHelper): - pass +from ansible_collections.community.general.plugins.module_utils.mh.module_helper import ( + ModuleHelper, StateModuleHelper, CmdModuleHelper, CmdStateModuleHelper, AnsibleModule +) +from ansible_collections.community.general.plugins.module_utils.mh.mixins.cmd import CmdMixin, ArgFormat +from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin +from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyCtxMgr +from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException +from ansible_collections.community.general.plugins.module_utils.mh.deco import cause_changes, module_fails_on_exception +from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarMeta, VarDict diff --git a/tests/unit/plugins/module_utils/test_module_helper.py b/tests/unit/plugins/module_utils/test_module_helper.py index 6f77ca7662..6452784182 100644 --- a/tests/unit/plugins/module_utils/test_module_helper.py +++ b/tests/unit/plugins/module_utils/test_module_helper.py @@ -6,12 +6,10 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from collections import namedtuple - import pytest from ansible_collections.community.general.plugins.module_utils.module_helper import ( - ArgFormat, DependencyCtxMgr, ModuleHelper, VarMeta, cause_changes + ArgFormat, DependencyCtxMgr, VarMeta, VarDict, cause_changes ) @@ -144,7 +142,7 @@ def test_variable_meta_diff(): def test_vardict(): - vd = ModuleHelper.VarDict() + vd = VarDict() vd.set('a', 123) assert vd['a'] == 123 assert vd.a == 123 diff --git a/tests/unit/plugins/modules/system/test_xfconf.py b/tests/unit/plugins/modules/system/test_xfconf.py index 1002952ce3..dee387bd7d 100644 --- a/tests/unit/plugins/modules/system/test_xfconf.py +++ b/tests/unit/plugins/modules/system/test_xfconf.py @@ -21,7 +21,7 @@ def patch_xfconf(mocker): """ Function used for mocking some parts of redhat_subscribtion module """ - mocker.patch('ansible_collections.community.general.plugins.module_utils.module_helper.AnsibleModule.get_bin_path', + mocker.patch('ansible_collections.community.general.plugins.module_utils.mh.module_helper.AnsibleModule.get_bin_path', return_value='/testbin/xfconf-query') @@ -332,7 +332,7 @@ def test_xfconf(mocker, capfd, patch_xfconf, testcase): # Mock function used for running commands first call_results = [item[2] for item in testcase['run_command.calls']] mock_run_command = mocker.patch( - 'ansible_collections.community.general.plugins.module_utils.module_helper.AnsibleModule.run_command', + 'ansible_collections.community.general.plugins.module_utils.mh.module_helper.AnsibleModule.run_command', side_effect=call_results) # Try to run test case From 0912e8cc7ab1bc2b70f6552e84b3430fb7cd1dfc Mon Sep 17 00:00:00 2001 From: CWollinger Date: Tue, 11 May 2021 19:31:46 +0200 Subject: [PATCH 0036/2828] discord.py: Add new module for discord notifications (#2398) * first push: add discord module and test for notifications * fix the yaml docs and edit the result output * add link * fix link * fix docs and remove required=False in argument spec * add elements specd and more info about embeds * called str... * elements for embeds oc. * fix typo's in description and set checkmode to false * edit docs and module return * support checkmode with get method * fix unit test * handle exception and add new example for embeds * quote line * fix typos * fix yaml --- plugins/modules/discord.py | 1 + plugins/modules/notification/discord.py | 215 ++++++++++++++++++ .../modules/notification/test_discord.py | 103 +++++++++ 3 files changed, 319 insertions(+) create mode 120000 plugins/modules/discord.py create mode 100644 plugins/modules/notification/discord.py create mode 100644 tests/unit/plugins/modules/notification/test_discord.py diff --git a/plugins/modules/discord.py b/plugins/modules/discord.py new file mode 120000 index 0000000000..1acf222f94 --- /dev/null +++ b/plugins/modules/discord.py @@ -0,0 +1 @@ +./notification/discord.py \ No newline at end of file diff --git a/plugins/modules/notification/discord.py b/plugins/modules/notification/discord.py new file mode 100644 index 0000000000..27dc6fc85c --- /dev/null +++ b/plugins/modules/notification/discord.py @@ -0,0 +1,215 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Christian Wollinger +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: discord +short_description: Send Discord messages +version_added: 3.1.0 +description: + - Sends a message to a Discord channel using the Discord webhook API. +author: Christian Wollinger (@cwollinger) +seealso: + - name: API documentation + description: Documentation for Discord API + link: https://discord.com/developers/docs/resources/webhook#execute-webhook +options: + webhook_id: + description: + - The webhook ID. + - "Format from Discord webhook URL: C(/webhooks/{webhook.id}/{webhook.token})." + required: yes + type: str + webhook_token: + description: + - The webhook token. + - "Format from Discord webhook URL: C(/webhooks/{webhook.id}/{webhook.token})." + required: yes + type: str + content: + description: + - Content of the message to the Discord channel. + - At least one of I(content) and I(embeds) must be specified. + type: str + username: + description: + - Overrides the default username of the webhook. + type: str + avatar_url: + description: + - Overrides the default avatar of the webhook. + type: str + tts: + description: + - Set this to C(true) if this is a TTS (Text to Speech) message. + type: bool + default: false + embeds: + description: + - Send messages as Embeds to the Discord channel. + - Embeds can have a colored border, embedded images, text fields and more. + - "Allowed parameters are described in the Discord Docs: U(https://discord.com/developers/docs/resources/channel#embed-object)" + - At least one of I(content) and I(embeds) must be specified. + type: list + elements: dict +''' + +EXAMPLES = """ +- name: Send a message to the Discord channel + community.general.discord: + webhook_id: "00000" + webhook_token: "XXXYYY" + content: "This is a message from ansible" + +- name: Send a message to the Discord channel with specific username and avatar + community.general.discord: + webhook_id: "00000" + webhook_token: "XXXYYY" + content: "This is a message from ansible" + username: Ansible + avatar_url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png" + +- name: Send a embedded message to the Discord channel + community.general.discord: + webhook_id: "00000" + webhook_token: "XXXYYY" + embeds: + - title: "Embedded message" + description: "This is an embedded message" + footer: + text: "Author: Ansible" + image: + url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png" + +- name: Send two embedded messages + community.general.discord: + webhook_id: "00000" + webhook_token: "XXXYYY" + embeds: + - title: "First message" + description: "This is my first embedded message" + footer: + text: "Author: Ansible" + image: + url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png" + - title: "Second message" + description: "This is my first second message" + footer: + text: "Author: Ansible" + icon_url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png" + fields: + - name: "Field 1" + value: "Value of my first field" + - name: "Field 2" + value: "Value of my second field" + timestamp: "{{ ansible_date_time.iso8601 }}" +""" + +RETURN = """ +http_code: + description: + - Response Code returned by Discord API. + returned: always + type: int + sample: 204 +""" + +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.basic import AnsibleModule + + +def discord_check_mode(module): + + webhook_id = module.params['webhook_id'] + webhook_token = module.params['webhook_token'] + + headers = { + 'content-type': 'application/json' + } + + url = "https://discord.com/api/webhooks/%s/%s" % ( + webhook_id, webhook_token) + + response, info = fetch_url(module, url, method='GET', headers=headers) + return response, info + + +def discord_text_msg(module): + + webhook_id = module.params['webhook_id'] + webhook_token = module.params['webhook_token'] + content = module.params['content'] + user = module.params['username'] + avatar_url = module.params['avatar_url'] + tts = module.params['tts'] + embeds = module.params['embeds'] + + headers = { + 'content-type': 'application/json' + } + + url = "https://discord.com/api/webhooks/%s/%s" % ( + webhook_id, webhook_token) + + payload = { + 'content': content, + 'username': user, + 'avatar_url': avatar_url, + 'tts': tts, + 'embeds': embeds, + } + + payload = module.jsonify(payload) + + response, info = fetch_url(module, url, data=payload, headers=headers, method='POST') + return response, info + + +def main(): + module = AnsibleModule( + argument_spec=dict( + webhook_id=dict(type='str', required=True), + webhook_token=dict(type='str', required=True, no_log=True), + content=dict(type='str'), + username=dict(type='str'), + avatar_url=dict(type='str'), + tts=dict(type='bool', default=False), + embeds=dict(type='list', elements='dict'), + ), + required_one_of=[['content', 'embeds']], + supports_check_mode=True + ) + + result = dict( + changed=False, + http_code='', + ) + + if module.check_mode: + response, info = discord_check_mode(module) + if info['status'] != 200: + try: + module.fail_json(http_code=info['status'], msg=info['msg'], response=module.from_json(info['body']), info=info) + except Exception: + module.fail_json(http_code=info['status'], msg=info['msg'], info=info) + else: + module.exit_json(msg=info['msg'], changed=False, http_code=info['status'], response=module.from_json(response.read())) + else: + response, info = discord_text_msg(module) + if info['status'] != 204: + try: + module.fail_json(http_code=info['status'], msg=info['msg'], response=module.from_json(info['body']), info=info) + except Exception: + module.fail_json(http_code=info['status'], msg=info['msg'], info=info) + else: + module.exit_json(msg=info['msg'], changed=True, http_code=info['status']) + + +if __name__ == "__main__": + main() diff --git a/tests/unit/plugins/modules/notification/test_discord.py b/tests/unit/plugins/modules/notification/test_discord.py new file mode 100644 index 0000000000..257b0d4dab --- /dev/null +++ b/tests/unit/plugins/modules/notification/test_discord.py @@ -0,0 +1,103 @@ +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import pytest +from ansible_collections.community.general.tests.unit.compat.mock import Mock, patch +from ansible_collections.community.general.plugins.modules.notification import discord +from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args + + +class TestDiscordModule(ModuleTestCase): + + def setUp(self): + super(TestDiscordModule, self).setUp() + self.module = discord + + def tearDown(self): + super(TestDiscordModule, self).tearDown() + + @pytest.fixture + def fetch_url_mock(self, mocker): + return mocker.patch('ansible.module_utils.notification.discord.fetch_url') + + def test_without_parameters(self): + """Failure if no parameters set""" + with self.assertRaises(AnsibleFailJson): + set_module_args({}) + self.module.main() + + def test_without_content(self): + """Failure if content and embeds both are missing""" + set_module_args({ + 'webhook_id': 'xxx', + 'webhook_token': 'xxx' + }) + with self.assertRaises(AnsibleFailJson): + self.module.main() + + def test_successful_message(self): + """Test a basic message successfully.""" + set_module_args({ + 'webhook_id': 'xxx', + 'webhook_token': 'xxx', + 'content': 'test' + }) + + with patch.object(discord, "fetch_url") as fetch_url_mock: + fetch_url_mock.return_value = (None, {"status": 204, 'msg': 'OK (0 bytes)'}) + with self.assertRaises(AnsibleExitJson): + self.module.main() + + self.assertTrue(fetch_url_mock.call_count, 1) + call_data = json.loads(fetch_url_mock.call_args[1]['data']) + assert call_data['content'] == "test" + + def test_message_with_username(self): + """Test a message with username set successfully.""" + set_module_args({ + 'webhook_id': 'xxx', + 'webhook_token': 'xxx', + 'content': 'test', + 'username': 'Ansible Bot' + }) + + with patch.object(discord, "fetch_url") as fetch_url_mock: + fetch_url_mock.return_value = (None, {"status": 204, 'msg': 'OK (0 bytes)'}) + with self.assertRaises(AnsibleExitJson): + self.module.main() + + self.assertTrue(fetch_url_mock.call_count, 1) + call_data = json.loads(fetch_url_mock.call_args[1]['data']) + assert call_data['username'] == "Ansible Bot" + assert call_data['content'] == "test" + + def test_failed_message(self): + """Test failure because webhook id is wrong.""" + + set_module_args({ + 'webhook_id': 'wrong', + 'webhook_token': 'xxx', + 'content': 'test' + }) + + with patch.object(discord, "fetch_url") as fetch_url_mock: + fetch_url_mock.return_value = (None, {"status": 404, 'msg': 'HTTP Error 404: Not Found', 'body': '{"message": "Unknown Webhook", "code": 10015}'}) + with self.assertRaises(AnsibleFailJson): + self.module.main() + + def test_failed_message_without_body(self): + """Test failure with empty response body.""" + + set_module_args({ + 'webhook_id': 'wrong', + 'webhook_token': 'xxx', + 'content': 'test' + }) + + with patch.object(discord, "fetch_url") as fetch_url_mock: + fetch_url_mock.return_value = (None, {"status": 404, 'msg': 'HTTP Error 404: Not Found'}) + with self.assertRaises(AnsibleFailJson): + self.module.main() From b9fa9116c15f5d6e90eb8626ec67650854429b14 Mon Sep 17 00:00:00 2001 From: spike77453 Date: Tue, 11 May 2021 19:35:30 +0200 Subject: [PATCH 0037/2828] nmcli: Remove dead code, 'options' never contains keys from 'param_alias' (#2417) * nmcli: Remove dead code, 'options' never contains keys from 'param_alias' * Update changelogs/fragments/2417-nmcli_remove_dead_code.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../fragments/2417-nmcli_remove_dead_code.yml | 2 ++ plugins/modules/net_tools/nmcli.py | 18 ------------------ 2 files changed, 2 insertions(+), 18 deletions(-) create mode 100644 changelogs/fragments/2417-nmcli_remove_dead_code.yml diff --git a/changelogs/fragments/2417-nmcli_remove_dead_code.yml b/changelogs/fragments/2417-nmcli_remove_dead_code.yml new file mode 100644 index 0000000000..9d94c393fa --- /dev/null +++ b/changelogs/fragments/2417-nmcli_remove_dead_code.yml @@ -0,0 +1,2 @@ +minor_changes: + - nmcli - remove dead code, ``options`` never contains keys from ``param_alias`` (https://github.com/ansible-collections/community.general/pull/2417). diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index e2ed4ad572..929d88c654 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -1036,17 +1036,6 @@ class Nmcli(object): return conn_info def _compare_conn_params(self, conn_info, options): - # See nmcli(1) for details - param_alias = { - 'type': 'connection.type', - 'con-name': 'connection.id', - 'autoconnect': 'connection.autoconnect', - 'ifname': 'connection.interface-name', - 'master': 'connection.master', - 'slave-type': 'connection.slave-type', - 'zone': 'connection.zone', - } - changed = False diff_before = dict() diff_after = dict() @@ -1070,13 +1059,6 @@ class Nmcli(object): value = value.upper() # ensure current_value is also converted to uppercase in case nmcli changes behaviour current_value = current_value.upper() - elif key in param_alias: - real_key = param_alias[key] - if real_key in conn_info: - current_value = conn_info[real_key] - else: - # alias parameter does not exist - current_value = None else: # parameter does not exist current_value = None From 83a0c32269210c33c978ecf1a11096608ff91a20 Mon Sep 17 00:00:00 2001 From: Xabier Napal Date: Wed, 12 May 2021 17:33:27 +0200 Subject: [PATCH 0038/2828] influxdb_retention_policy - add state argument to module spec (#2383) (#2385) * influxdb_retention_policy: add state option to module argument spec * influxdb_retention_policy: simplify duration parsing logic (suggested in #2284) * add changelog * fix documentation and changelog * add constants for duration and sgduration validations * restyle ansible module spec Co-authored-by: Felix Fontein * improve changelog Co-authored-by: Felix Fontein * set changed result in check mode for state absent * remove required flag in optional module arguments * influxdb_retention_policy: improve examples readability Co-authored-by: Felix Fontein --- ...uxdb_retention_policy-add-state-option.yml | 6 + .../influxdb/influxdb_retention_policy.py | 132 ++++++++++++------ 2 files changed, 92 insertions(+), 46 deletions(-) create mode 100644 changelogs/fragments/2383-influxdb_retention_policy-add-state-option.yml diff --git a/changelogs/fragments/2383-influxdb_retention_policy-add-state-option.yml b/changelogs/fragments/2383-influxdb_retention_policy-add-state-option.yml new file mode 100644 index 0000000000..b8e358848e --- /dev/null +++ b/changelogs/fragments/2383-influxdb_retention_policy-add-state-option.yml @@ -0,0 +1,6 @@ +minor_changes: + - influxdb_retention_policy - add ``state`` parameter with allowed values + ``present`` and ``absent`` to support deletion of existing retention policies + (https://github.com/ansible-collections/community.general/issues/2383). + - influxdb_retention_policy - simplify duration logic parsing + (https://github.com/ansible-collections/community.general/pull/2385). diff --git a/plugins/modules/database/influxdb/influxdb_retention_policy.py b/plugins/modules/database/influxdb/influxdb_retention_policy.py index 883adaffa6..3ff48cbad0 100644 --- a/plugins/modules/database/influxdb/influxdb_retention_policy.py +++ b/plugins/modules/database/influxdb/influxdb_retention_policy.py @@ -29,17 +29,24 @@ options: - Name of the retention policy. required: true type: str + state: + description: + - State of the retention policy. + choices: [ absent, present ] + default: present + type: str + version_added: 3.1.0 duration: description: - Determines how long InfluxDB should keep the data. If specified, it should be C(INF) or at least one hour. If not specified, C(INF) is assumed. Supports complex duration expressions with multiple units. - required: true + - Required only if I(state) is set to C(present). type: str replication: description: - Determines how many independent copies of each point are stored in the cluster. - required: true + - Required only if I(state) is set to C(present). type: int default: description: @@ -63,53 +70,65 @@ EXAMPLES = r''' # Example influxdb_retention_policy command from Ansible Playbooks - name: Create 1 hour retention policy community.general.influxdb_retention_policy: - hostname: "{{influxdb_ip_address}}" - database_name: "{{influxdb_database_name}}" + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" policy_name: test duration: 1h replication: 1 ssl: yes validate_certs: yes + state: present - name: Create 1 day retention policy with 1 hour shard group duration community.general.influxdb_retention_policy: - hostname: "{{influxdb_ip_address}}" - database_name: "{{influxdb_database_name}}" + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" policy_name: test duration: 1d replication: 1 shard_group_duration: 1h + state: present - name: Create 1 week retention policy with 1 day shard group duration community.general.influxdb_retention_policy: - hostname: "{{influxdb_ip_address}}" - database_name: "{{influxdb_database_name}}" + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" policy_name: test duration: 1w replication: 1 shard_group_duration: 1d + state: present - name: Create infinite retention policy with 1 week of shard group duration community.general.influxdb_retention_policy: - hostname: "{{influxdb_ip_address}}" - database_name: "{{influxdb_database_name}}" + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" policy_name: test duration: INF replication: 1 ssl: no validate_certs: no shard_group_duration: 1w + state: present - name: Create retention policy with complex durations community.general.influxdb_retention_policy: - hostname: "{{influxdb_ip_address}}" - database_name: "{{influxdb_database_name}}" + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" policy_name: test duration: 5d1h30m replication: 1 ssl: no validate_certs: no shard_group_duration: 1d10h30m + state: present + +- name: Drop retention policy + community.general.influxdb_retention_policy: + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + policy_name: test + state: absent ''' RETURN = r''' @@ -134,6 +153,21 @@ VALID_DURATION_REGEX = re.compile(r'^(INF|(\d+(ns|u|µ|ms|s|m|h|d|w)))+$') DURATION_REGEX = re.compile(r'(\d+)(ns|u|µ|ms|s|m|h|d|w)') EXTENDED_DURATION_REGEX = re.compile(r'(?:(\d+)(ns|u|µ|ms|m|h|d|w)|(\d+(?:\.\d+)?)(s))') +DURATION_UNIT_NANOSECS = { + 'ns': 1, + 'u': 1000, + 'µ': 1000, + 'ms': 1000 * 1000, + 's': 1000 * 1000 * 1000, + 'm': 1000 * 1000 * 1000 * 60, + 'h': 1000 * 1000 * 1000 * 60 * 60, + 'd': 1000 * 1000 * 1000 * 60 * 60 * 24, + 'w': 1000 * 1000 * 1000 * 60 * 60 * 24 * 7, +} + +MINIMUM_VALID_DURATION = 1 * DURATION_UNIT_NANOSECS['h'] +MINIMUM_VALID_SHARD_GROUP_DURATION = 1 * DURATION_UNIT_NANOSECS['h'] + def check_duration_literal(value): return VALID_DURATION_REGEX.search(value) is not None @@ -148,28 +182,9 @@ def parse_duration_literal(value, extended=False): lookup = (EXTENDED_DURATION_REGEX if extended else DURATION_REGEX).findall(value) for duration_literal in lookup: - if extended and duration_literal[3] == 's': - duration_val = float(duration_literal[2]) - duration += duration_val * 1000 * 1000 * 1000 - else: - duration_val = int(duration_literal[0]) - - if duration_literal[1] == 'ns': - duration += duration_val - elif duration_literal[1] == 'u' or duration_literal[1] == 'µ': - duration += duration_val * 1000 - elif duration_literal[1] == 'ms': - duration += duration_val * 1000 * 1000 - elif duration_literal[1] == 's': - duration += duration_val * 1000 * 1000 * 1000 - elif duration_literal[1] == 'm': - duration += duration_val * 1000 * 1000 * 1000 * 60 - elif duration_literal[1] == 'h': - duration += duration_val * 1000 * 1000 * 1000 * 60 * 60 - elif duration_literal[1] == 'd': - duration += duration_val * 1000 * 1000 * 1000 * 60 * 60 * 24 - elif duration_literal[1] == 'w': - duration += duration_val * 1000 * 1000 * 1000 * 60 * 60 * 24 * 7 + filtered_literal = list(filter(None, duration_literal)) + duration_val = float(filtered_literal[0]) + duration += duration_val * DURATION_UNIT_NANOSECS[filtered_literal[1]] return duration @@ -208,7 +223,7 @@ def create_retention_policy(module, client): module.fail_json(msg="Failed to parse value of duration") influxdb_duration_format = parse_duration_literal(duration) - if influxdb_duration_format != 0 and influxdb_duration_format < 3600000000000: + if influxdb_duration_format != 0 and influxdb_duration_format < MINIMUM_VALID_DURATION: module.fail_json(msg="duration value must be at least 1h") if shard_group_duration is not None: @@ -216,7 +231,7 @@ def create_retention_policy(module, client): module.fail_json(msg="Failed to parse value of shard_group_duration") influxdb_shard_group_duration_format = parse_duration_literal(shard_group_duration) - if influxdb_shard_group_duration_format < 3600000000000: + if influxdb_shard_group_duration_format < MINIMUM_VALID_SHARD_GROUP_DURATION: module.fail_json(msg="shard_group_duration value must be finite and at least 1h") if not module.check_mode: @@ -245,7 +260,7 @@ def alter_retention_policy(module, client, retention_policy): module.fail_json(msg="Failed to parse value of duration") influxdb_duration_format = parse_duration_literal(duration) - if influxdb_duration_format != 0 and influxdb_duration_format < 3600000000000: + if influxdb_duration_format != 0 and influxdb_duration_format < MINIMUM_VALID_DURATION: module.fail_json(msg="duration value must be at least 1h") if shard_group_duration is None: @@ -255,7 +270,7 @@ def alter_retention_policy(module, client, retention_policy): module.fail_json(msg="Failed to parse value of shard_group_duration") influxdb_shard_group_duration_format = parse_duration_literal(shard_group_duration) - if influxdb_shard_group_duration_format < 3600000000000: + if influxdb_shard_group_duration_format < MINIMUM_VALID_SHARD_GROUP_DURATION: module.fail_json(msg="shard_group_duration value must be finite and at least 1h") if (retention_policy['duration'] != influxdb_duration_format or @@ -272,30 +287,55 @@ def alter_retention_policy(module, client, retention_policy): module.exit_json(changed=changed) +def drop_retention_policy(module, client): + database_name = module.params['database_name'] + policy_name = module.params['policy_name'] + + if not module.check_mode: + try: + client.drop_retention_policy(policy_name, database_name) + except exceptions.InfluxDBClientError as e: + module.fail_json(msg=e.content) + module.exit_json(changed=True) + + def main(): argument_spec = InfluxDb.influxdb_argument_spec() argument_spec.update( + state=dict(default='present', type='str', choices=['present', 'absent']), database_name=dict(required=True, type='str'), policy_name=dict(required=True, type='str'), - duration=dict(required=True, type='str'), - replication=dict(required=True, type='int'), + duration=dict(type='str'), + replication=dict(type='int'), default=dict(default=False, type='bool'), - shard_group_duration=dict(required=False, type='str'), + shard_group_duration=dict(type='str'), ) module = AnsibleModule( argument_spec=argument_spec, - supports_check_mode=True + supports_check_mode=True, + required_if=( + ('state', 'present', ['duration', 'replication']), + ), ) + state = module.params['state'] + influxdb = InfluxDb(module) client = influxdb.connect_to_influxdb() retention_policy = find_retention_policy(module, client) - if retention_policy: - alter_retention_policy(module, client, retention_policy) - else: - create_retention_policy(module, client) + if state == 'present': + if retention_policy: + alter_retention_policy(module, client, retention_policy) + else: + create_retention_policy(module, client) + + if state == 'absent': + if retention_policy: + drop_retention_policy(module, client) + else: + module.exit_json(changed=False) if __name__ == '__main__': From 265d034e310fd3704d4779108be910f611bca91e Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 13 May 2021 03:37:31 +1200 Subject: [PATCH 0039/2828] linode - docs/validation changes + minor refactorings (#2410) * multiple changes: - documentation fixes - minor refactorings * added param deprecation note to the documentation * added changelog fragment * Update changelogs/fragments/2410-linode-improvements.yml Co-authored-by: Felix Fontein * Update changelogs/fragments/2410-linode-improvements.yml Co-authored-by: Felix Fontein * Update plugins/modules/cloud/linode/linode.py Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../fragments/2410-linode-improvements.yml | 5 ++ plugins/modules/cloud/linode/linode.py | 71 +++++++++---------- tests/sanity/ignore-2.10.txt | 3 - tests/sanity/ignore-2.11.txt | 3 - tests/sanity/ignore-2.12.txt | 3 - tests/sanity/ignore-2.9.txt | 2 - 6 files changed, 39 insertions(+), 48 deletions(-) create mode 100644 changelogs/fragments/2410-linode-improvements.yml diff --git a/changelogs/fragments/2410-linode-improvements.yml b/changelogs/fragments/2410-linode-improvements.yml new file mode 100644 index 0000000000..cdf8551b08 --- /dev/null +++ b/changelogs/fragments/2410-linode-improvements.yml @@ -0,0 +1,5 @@ +deprecated_features: + - linode - parameter ``backupsenabled`` is deprecated and will be removed in community.general 5.0.0 (https://github.com/ansible-collections/community.general/pull/2410). +minor_changes: + - linode - added proper traceback when failing due to exceptions (https://github.com/ansible-collections/community.general/pull/2410). + - linode - parameter ``additional_disks`` is now validated as a list of dictionaries (https://github.com/ansible-collections/community.general/pull/2410). diff --git a/plugins/modules/cloud/linode/linode.py b/plugins/modules/cloud/linode/linode.py index a35b25b6c7..c9ee0e61ed 100644 --- a/plugins/modules/cloud/linode/linode.py +++ b/plugins/modules/cloud/linode/linode.py @@ -21,8 +21,10 @@ options: type: str api_key: description: - - Linode API key + - Linode API key. + - C(LINODE_API_KEY) env variable can be used instead. type: str + required: yes name: description: - Name to give the instance (alphanumeric, dashes, underscore). @@ -46,6 +48,7 @@ options: - List of dictionaries for creating additional disks that are added to the Linode configuration settings. - Dictionary takes Size, Label, Type. Size is in MB. type: list + elements: dict alert_bwin_enabled: description: - Set status of bandwidth in alerts. @@ -86,9 +89,18 @@ options: description: - Set threshold for average IO ops/sec over 2 hour period. type: int + backupsenabled: + description: + - Deprecated parameter, it will be removed in community.general C(5.0.0). + - To enable backups pass values to either I(backupweeklyday) or I(backupwindow). + type: int backupweeklyday: description: - - Integer value for what day of the week to store weekly backups. + - Day of the week to take backups. + type: int + backupwindow: + description: + - The time window in which backups will be taken. type: int plan: description: @@ -153,7 +165,6 @@ author: notes: - Please note, linode-python does not have python 3 support. - This module uses the now deprecated v3 of the Linode API. - - C(LINODE_API_KEY) env variable can be used instead. - Please review U(https://www.linode.com/api/linode) for determining the required parameters. ''' @@ -262,7 +273,6 @@ EXAMPLES = ''' delegate_to: localhost ''' -import os import time import traceback @@ -274,7 +284,7 @@ except ImportError: LINODE_IMP_ERR = traceback.format_exc() HAS_LINODE = False -from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback def randompass(): @@ -358,7 +368,7 @@ def linodeServers(module, api, state, name, if not servers: for arg in (name, plan, distribution, datacenter): if not arg: - module.fail_json(msg='%s is required for %s state' % (arg, state)) # @TODO use required_if instead + module.fail_json(msg='%s is required for %s state' % (arg, state)) # Create linode entity new_server = True @@ -383,7 +393,7 @@ def linodeServers(module, api, state, name, try: res = api.linode_ip_addprivate(LinodeID=linode_id) except Exception as e: - module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE']) + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) if not disks: for arg in (name, linode_id, distribution): @@ -428,7 +438,7 @@ def linodeServers(module, api, state, name, jobs.append(res['JobID']) except Exception as e: # TODO: destroy linode ? - module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE']) + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) if not configs: for arg in (name, linode_id, distribution): @@ -471,7 +481,7 @@ def linodeServers(module, api, state, name, Disklist=disks_list, Label='%s config' % name) configs = api.linode_config_list(LinodeId=linode_id) except Exception as e: - module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE']) + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) # Start / Ensure servers are running for server in servers: @@ -517,10 +527,7 @@ def linodeServers(module, api, state, name, instance['password'] = password instances.append(instance) - elif state in ('stopped'): - if not linode_id: - module.fail_json(msg='linode_id is required for stopped state') - + elif state in ('stopped',): if not servers: module.fail_json(msg='Server (lid: %s) not found' % (linode_id)) @@ -530,17 +537,14 @@ def linodeServers(module, api, state, name, try: res = api.linode_shutdown(LinodeId=linode_id) except Exception as e: - module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE']) + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) instance['status'] = 'Stopping' changed = True else: instance['status'] = 'Stopped' instances.append(instance) - elif state in ('restarted'): - if not linode_id: - module.fail_json(msg='linode_id is required for restarted state') - + elif state in ('restarted',): if not servers: module.fail_json(msg='Server (lid: %s) not found' % (linode_id)) @@ -549,7 +553,7 @@ def linodeServers(module, api, state, name, try: res = api.linode_reboot(LinodeId=server['LINODEID']) except Exception as e: - module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE']) + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) instance['status'] = 'Restarting' changed = True instances.append(instance) @@ -560,7 +564,7 @@ def linodeServers(module, api, state, name, try: api.linode_delete(LinodeId=server['LINODEID'], skipChecks=True) except Exception as e: - module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE']) + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) instance['status'] = 'Deleting' changed = True instances.append(instance) @@ -577,7 +581,7 @@ def main(): argument_spec=dict( state=dict(type='str', default='present', choices=['absent', 'active', 'deleted', 'present', 'restarted', 'started', 'stopped']), - api_key=dict(type='str', no_log=True), + api_key=dict(type='str', no_log=True, required=True, fallback=(env_fallback, ['LINODE_API_KEY'])), name=dict(type='str', required=True), alert_bwin_enabled=dict(type='bool'), alert_bwin_threshold=dict(type='int'), @@ -589,12 +593,12 @@ def main(): alert_cpu_threshold=dict(type='int'), alert_diskio_enabled=dict(type='bool'), alert_diskio_threshold=dict(type='int'), - backupsenabled=dict(type='int'), + backupsenabled=dict(type='int', removed_in_version='5.0.0', removed_from_collection='community.general'), backupweeklyday=dict(type='int'), backupwindow=dict(type='int'), displaygroup=dict(type='str', default=''), plan=dict(type='int'), - additional_disks=dict(type='list'), + additional_disks=dict(type='list', elements='dict'), distribution=dict(type='int'), datacenter=dict(type='int'), kernel_id=dict(type='int'), @@ -608,6 +612,10 @@ def main(): wait_timeout=dict(type='int', default=300), watchdog=dict(type='bool', default=True), ), + required_if=[ + ('state', 'restarted', ['linode_id']), + ('state', 'stopped', ['linode_id']), + ] ) if not HAS_LINODE: @@ -626,7 +634,6 @@ def main(): alert_cpu_threshold = module.params.get('alert_cpu_threshold') alert_diskio_enabled = module.params.get('alert_diskio_enabled') alert_diskio_threshold = module.params.get('alert_diskio_threshold') - backupsenabled = module.params.get('backupsenabled') backupweeklyday = module.params.get('backupweeklyday') backupwindow = module.params.get('backupwindow') displaygroup = module.params.get('displaygroup') @@ -642,10 +649,9 @@ def main(): ssh_pub_key = module.params.get('ssh_pub_key') swap = module.params.get('swap') wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) + wait_timeout = module.params.get('wait_timeout') watchdog = int(module.params.get('watchdog')) - kwargs = dict() check_items = dict( alert_bwin_enabled=alert_bwin_enabled, alert_bwin_threshold=alert_bwin_threshold, @@ -661,23 +667,14 @@ def main(): backupwindow=backupwindow, ) - for key, value in check_items.items(): - if value is not None: - kwargs[key] = value - - # Setup the api_key - if not api_key: - try: - api_key = os.environ['LINODE_API_KEY'] - except KeyError as e: - module.fail_json(msg='Unable to load %s' % e.message) + kwargs = dict((k, v) for k, v in check_items.items() if v is not None) # setup the auth try: api = linode_api.Api(api_key) api.test_echo() except Exception as e: - module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE']) + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) linodeServers(module, api, state, name, displaygroup, plan, diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index a33e194233..da611904bb 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -1,9 +1,6 @@ plugins/module_utils/cloud.py pylint:bad-option-value # a pylint test that is disabled was modified over time plugins/module_utils/_mount.py future-import-boilerplate plugins/module_utils/_mount.py metaclass-boilerplate -plugins/modules/cloud/linode/linode.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/linode/linode.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/linode/linode.py validate-modules:undocumented-parameter plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-choice diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 4678f10294..a7d85904ae 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -1,8 +1,5 @@ plugins/module_utils/_mount.py future-import-boilerplate plugins/module_utils/_mount.py metaclass-boilerplate -plugins/modules/cloud/linode/linode.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/linode/linode.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/linode/linode.py validate-modules:undocumented-parameter plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-choice diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt index ec34ff7833..cf5d588e9a 100644 --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -1,8 +1,5 @@ plugins/module_utils/_mount.py future-import-boilerplate plugins/module_utils/_mount.py metaclass-boilerplate -plugins/modules/cloud/linode/linode.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/linode/linode.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/linode/linode.py validate-modules:undocumented-parameter plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-choice diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 8f18be1c44..5c759d2095 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -1,8 +1,6 @@ plugins/module_utils/cloud.py pylint:bad-option-value # a pylint test that is disabled was modified over time plugins/module_utils/_mount.py future-import-boilerplate plugins/module_utils/_mount.py metaclass-boilerplate -plugins/modules/cloud/linode/linode.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/linode/linode.py validate-modules:undocumented-parameter plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/cloud/online/online_server_info.py validate-modules:return-syntax-error From 23dda56913c19fdd8f1156be9496b7762fc7c11a Mon Sep 17 00:00:00 2001 From: Kogelvis Date: Thu, 13 May 2021 21:48:49 +0200 Subject: [PATCH 0040/2828] Add proxmox_nic module (#2449) * Add proxmox_nic module Add proxmox_nic module to manage NIC's on Qemu(KVM) VM's in a Proxmox VE cluster. Update proxmox integration tests and add tests for proxmox_nic module. This partially solves https://github.com/ansible-collections/community.general/issues/1964#issuecomment-790499397 and allows for adding/updating/deleting network interface cards after creating/cloning a VM. The proxmox_nic module will keep MAC-addresses the same when updating a NIC. It only changes when explicitly setting a MAC-address. * Apply suggestions from code review Co-authored-by: Felix Fontein * Add check_mode and implement review comments - check_mode added - some documentation updates - when MTU is set, check if the model is virtio, else fail - trunks can now be provided as list of ints instead of vlanid[;vlanid...] * Make returns on update_nic and delete_nic more readable Co-authored-by: Felix Fontein * Increase readability on update_nic and delete_nic * Implement check in get_vmid - get_vmid will now fail when multiple vmid's are returned as proxmox doesn't guarantee uniqueness - remove an unused import - fix a typo in an error message * Add some error checking to get_vmid - get_vmid will now return the error message when proxmoxer fails - get_vmid will return the vmid directly instead of a list of one - Some minor documentation updates * Warn instead of fail when setting mtu on unsupported nic - When setting the MTU on an unsupported NIC model (virtio is the only supported model) this module will now print a warning instead of failing. - Some minor documentation updates. * Take advantage of proxmox_auth_argument_spec Make use of proxmox_auth_argument_spec from plugins/module_utils/proxmox.py This provides some extra environment fallbacks. * Add blank line to conform with pep8 Co-authored-by: Felix Fontein --- plugins/modules/cloud/misc/proxmox_nic.py | 349 ++++++++++++++++++ plugins/modules/proxmox_nic.py | 1 + .../targets/proxmox/tasks/main.yml | 88 ++++- 3 files changed, 437 insertions(+), 1 deletion(-) create mode 100644 plugins/modules/cloud/misc/proxmox_nic.py create mode 120000 plugins/modules/proxmox_nic.py diff --git a/plugins/modules/cloud/misc/proxmox_nic.py b/plugins/modules/cloud/misc/proxmox_nic.py new file mode 100644 index 0000000000..a9c9f14ddc --- /dev/null +++ b/plugins/modules/cloud/misc/proxmox_nic.py @@ -0,0 +1,349 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Lammert Hellinga (@Kogelvis) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: proxmox_nic +short_description: Management of a NIC of a Qemu(KVM) VM in a Proxmox VE cluster. +version_added: 3.1.0 +description: + - Allows you to create/update/delete a NIC on Qemu(KVM) Virtual Machines in a Proxmox VE cluster. +author: "Lammert Hellinga (@Kogelvis) " +options: + bridge: + description: + - Add this interface to the specified bridge device. The Proxmox VE default bridge is called C(vmbr0). + type: str + firewall: + description: + - Whether this interface should be protected by the firewall. + type: bool + default: false + interface: + description: + - Name of the interface, should be C(net[n]) where C(1 ≤ n ≤ 31). + type: str + required: true + link_down: + description: + - Whether this interface should be disconnected (like pulling the plug). + type: bool + default: false + mac: + description: + - C(XX:XX:XX:XX:XX:XX) should be a unique MAC address. This is automatically generated if not specified. + - When not specified this module will keep the MAC address the same when changing an existing interface. + type: str + model: + description: + - The NIC emulator model. + type: str + choices: ['e1000', 'e1000-82540em', 'e1000-82544gc', 'e1000-82545em', 'i82551', 'i82557b', 'i82559er', 'ne2k_isa', 'ne2k_pci', 'pcnet', + 'rtl8139', 'virtio', 'vmxnet3'] + default: virtio + mtu: + description: + - Force MTU, for C(virtio) model only, setting will be ignored otherwise. + - Set to C(1) to use the bridge MTU. + - Value should be C(1 ≤ n ≤ 65520). + type: int + name: + description: + - Specifies the VM name. Only used on the configuration web interface. + - Required only for I(state=present). + type: str + queues: + description: + - Number of packet queues to be used on the device. + - Value should be C(0 ≤ n ≤ 16). + type: int + rate: + description: + - Rate limit in MBps (MegaBytes per second) as floating point number. + type: float + state: + description: + - Indicates desired state of the NIC. + type: str + choices: ['present', 'absent'] + default: present + tag: + description: + - VLAN tag to apply to packets on this interface. + - Value should be C(1 ≤ n ≤ 4094). + type: int + trunks: + description: + - List of VLAN trunks to pass through this interface. + type: list + elements: int + vmid: + description: + - Specifies the instance ID. + type: int +extends_documentation_fragment: + - community.general.proxmox.documentation +''' + +EXAMPLES = ''' +- name: Create NIC net0 targeting the vm by name + community.general.proxmox_nic: + api_user: root@pam + api_password: secret + api_host: proxmoxhost + name: my_vm + interface: net0 + bridge: vmbr0 + tag: 3 + +- name: Create NIC net0 targeting the vm by id + community.general.proxmox_nic: + api_user: root@pam + api_password: secret + api_host: proxmoxhost + vmid: 103 + interface: net0 + bridge: vmbr0 + mac: "12:34:56:C0:FF:EE" + firewall: true + +- name: Delete NIC net0 targeting the vm by name + community.general.proxmox_nic: + api_user: root@pam + api_password: secret + api_host: proxmoxhost + name: my_vm + interface: net0 + state: absent +''' + +RETURN = ''' +vmid: + description: The VM vmid. + returned: success + type: int + sample: 115 +msg: + description: A short message + returned: always + type: str + sample: "Nic net0 unchanged on VM with vmid 103" +''' + +try: + from proxmoxer import ProxmoxAPI + HAS_PROXMOXER = True +except ImportError: + HAS_PROXMOXER = False + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible_collections.community.general.plugins.module_utils.proxmox import proxmox_auth_argument_spec + + +def get_vmid(module, proxmox, name): + try: + vms = [vm['vmid'] for vm in proxmox.cluster.resources.get(type='vm') if vm.get('name') == name] + except Exception as e: + module.fail_json(msg='Error: %s occurred while retrieving VM with name = %s' % (e, name)) + + if not vms: + module.fail_json(msg='No VM found with name: %s' % name) + elif len(vms) > 1: + module.fail_json(msg='Multiple VMs found with name: %s, provide vmid instead' % name) + + return vms[0] + + +def get_vm(proxmox, vmid): + return [vm for vm in proxmox.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid)] + + +def update_nic(module, proxmox, vmid, interface, model, **kwargs): + vm = get_vm(proxmox, vmid) + + try: + vminfo = proxmox.nodes(vm[0]['node']).qemu(vmid).config.get() + except Exception as e: + module.fail_json(msg='Getting information for VM with vmid = %s failed with exception: %s' % (vmid, e)) + + if interface in vminfo: + # Convert the current config to a dictionary + config = vminfo[interface].split(',') + config.sort() + + config_current = {} + + for i in config: + kv = i.split('=') + try: + config_current[kv[0]] = kv[1] + except IndexError: + config_current[kv[0]] = '' + + # determine the current model nic and mac-address + models = ['e1000', 'e1000-82540em', 'e1000-82544gc', 'e1000-82545em', 'i82551', 'i82557b', + 'i82559er', 'ne2k_isa', 'ne2k_pci', 'pcnet', 'rtl8139', 'virtio', 'vmxnet3'] + current_model = set(models) & set(config_current.keys()) + current_model = current_model.pop() + current_mac = config_current[current_model] + + # build nic config string + config_provided = "{0}={1}".format(model, current_mac) + else: + config_provided = model + + if kwargs['mac']: + config_provided = "{0}={1}".format(model, kwargs['mac']) + + if kwargs['bridge']: + config_provided += ",bridge={0}".format(kwargs['bridge']) + + if kwargs['firewall']: + config_provided += ",firewall=1" + + if kwargs['link_down']: + config_provided += ',link_down=1' + + if kwargs['mtu']: + if model == 'virtio': + config_provided += ",mtu={0}".format(kwargs['mtu']) + else: + module.warn( + 'Ignoring MTU for nic {0} on VM with vmid {1}, ' + 'model should be set to \'virtio\': '.format(interface, vmid)) + + if kwargs['queues']: + config_provided += ",queues={0}".format(kwargs['queues']) + + if kwargs['rate']: + config_provided += ",rate={0}".format(kwargs['rate']) + + if kwargs['tag']: + config_provided += ",tag={0}".format(kwargs['tag']) + + if kwargs['trunks']: + config_provided += ",trunks={0}".format(';'.join(str(x) for x in kwargs['trunks'])) + + net = {interface: config_provided} + vm = get_vm(proxmox, vmid) + + if ((interface not in vminfo) or (vminfo[interface] != config_provided)): + if not module.check_mode: + proxmox.nodes(vm[0]['node']).qemu(vmid).config.set(**net) + return True + + return False + + +def delete_nic(module, proxmox, vmid, interface): + vm = get_vm(proxmox, vmid) + vminfo = proxmox.nodes(vm[0]['node']).qemu(vmid).config.get() + + if interface in vminfo: + if not module.check_mode: + proxmox.nodes(vm[0]['node']).qemu(vmid).config.set(vmid=vmid, delete=interface) + return True + + return False + + +def main(): + module_args = proxmox_auth_argument_spec() + nic_args = dict( + bridge=dict(type='str'), + firewall=dict(type='bool', default=False), + interface=dict(type='str', required=True), + link_down=dict(type='bool', default=False), + mac=dict(type='str'), + model=dict(choices=['e1000', 'e1000-82540em', 'e1000-82544gc', 'e1000-82545em', + 'i82551', 'i82557b', 'i82559er', 'ne2k_isa', 'ne2k_pci', 'pcnet', + 'rtl8139', 'virtio', 'vmxnet3'], default='virtio'), + mtu=dict(type='int'), + name=dict(type='str'), + queues=dict(type='int'), + rate=dict(type='float'), + state=dict(default='present', choices=['present', 'absent']), + tag=dict(type='int'), + trunks=dict(type='list', elements='int'), + vmid=dict(type='int'), + ) + module_args.update(nic_args) + + module = AnsibleModule( + argument_spec=module_args, + required_together=[('api_token_id', 'api_token_secret')], + required_one_of=[('name', 'vmid'), ('api_password', 'api_token_id')], + supports_check_mode=True, + ) + + if not HAS_PROXMOXER: + module.fail_json(msg='proxmoxer required for this module') + + api_host = module.params['api_host'] + api_password = module.params['api_password'] + api_token_id = module.params['api_token_id'] + api_token_secret = module.params['api_token_secret'] + api_user = module.params['api_user'] + interface = module.params['interface'] + model = module.params['model'] + name = module.params['name'] + state = module.params['state'] + validate_certs = module.params['validate_certs'] + vmid = module.params['vmid'] + + auth_args = {'user': api_user} + if not (api_token_id and api_token_secret): + auth_args['password'] = api_password + else: + auth_args['token_name'] = api_token_id + auth_args['token_value'] = api_token_secret + + try: + proxmox = ProxmoxAPI(api_host, verify_ssl=validate_certs, **auth_args) + except Exception as e: + module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e) + + # If vmid is not defined then retrieve its value from the vm name, + if not vmid: + vmid = get_vmid(module, proxmox, name) + + # Ensure VM id exists + if not get_vm(proxmox, vmid): + module.fail_json(vmid=vmid, msg='VM with vmid = %s does not exist in cluster' % vmid) + + if state == 'present': + try: + if update_nic(module, proxmox, vmid, interface, model, + bridge=module.params['bridge'], + firewall=module.params['firewall'], + link_down=module.params['link_down'], + mac=module.params['mac'], + mtu=module.params['mtu'], + queues=module.params['queues'], + rate=module.params['rate'], + tag=module.params['tag'], + trunks=module.params['trunks']): + module.exit_json(changed=True, vmid=vmid, msg="Nic {0} updated on VM with vmid {1}".format(interface, vmid)) + else: + module.exit_json(vmid=vmid, msg="Nic {0} unchanged on VM with vmid {1}".format(interface, vmid)) + except Exception as e: + module.fail_json(vmid=vmid, msg='Unable to change nic {0} on VM with vmid {1}: '.format(interface, vmid) + str(e)) + + elif state == 'absent': + try: + if delete_nic(module, proxmox, vmid, interface): + module.exit_json(changed=True, vmid=vmid, msg="Nic {0} deleted on VM with vmid {1}".format(interface, vmid)) + else: + module.exit_json(vmid=vmid, msg="Nic {0} does not exist on VM with vmid {1}".format(interface, vmid)) + except Exception as e: + module.fail_json(vmid=vmid, msg='Unable to delete nic {0} on VM with vmid {1}: '.format(interface, vmid) + str(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/proxmox_nic.py b/plugins/modules/proxmox_nic.py new file mode 120000 index 0000000000..88756ab636 --- /dev/null +++ b/plugins/modules/proxmox_nic.py @@ -0,0 +1 @@ +cloud/misc/proxmox_nic.py \ No newline at end of file diff --git a/tests/integration/targets/proxmox/tasks/main.yml b/tests/integration/targets/proxmox/tasks/main.yml index 6301cb66ef..5954d3f11f 100644 --- a/tests/integration/targets/proxmox/tasks/main.yml +++ b/tests/integration/targets/proxmox/tasks/main.yml @@ -48,7 +48,7 @@ api_token_secret: "{{ api_token_secret | default(omit) }}" validate_certs: "{{ validate_certs }}" register: results - + - assert: that: - results is not changed @@ -226,6 +226,92 @@ - results_action_current.vmid == {{ vmid }} - results_action_current.msg == "VM test-instance with vmid = {{ vmid }} is running" +- name: VM add/change/delete NIC + tags: [ 'nic' ] + block: + - name: Add NIC to test VM + proxmox_nic: + api_host: "{{ api_host }}" + api_user: "{{ user }}@{{ domain }}" + api_password: "{{ api_password | default(omit) }}" + api_token_id: "{{ api_token_id | default(omit) }}" + api_token_secret: "{{ api_token_secret | default(omit) }}" + validate_certs: "{{ validate_certs }}" + vmid: "{{ vmid }}" + state: present + interface: net5 + bridge: vmbr0 + tag: 42 + register: results + + - assert: + that: + - results is changed + - results.vmid == {{ vmid }} + - results.msg == "Nic net5 updated on VM with vmid {{ vmid }}" + + - name: Update NIC no changes + proxmox_nic: + api_host: "{{ api_host }}" + api_user: "{{ user }}@{{ domain }}" + api_password: "{{ api_password | default(omit) }}" + api_token_id: "{{ api_token_id | default(omit) }}" + api_token_secret: "{{ api_token_secret | default(omit) }}" + validate_certs: "{{ validate_certs }}" + vmid: "{{ vmid }}" + state: present + interface: net5 + bridge: vmbr0 + tag: 42 + register: results + + - assert: + that: + - results is not changed + - results.vmid == {{ vmid }} + - results.msg == "Nic net5 unchanged on VM with vmid {{ vmid }}" + + - name: Update NIC with changes + proxmox_nic: + api_host: "{{ api_host }}" + api_user: "{{ user }}@{{ domain }}" + api_password: "{{ api_password | default(omit) }}" + api_token_id: "{{ api_token_id | default(omit) }}" + api_token_secret: "{{ api_token_secret | default(omit) }}" + validate_certs: "{{ validate_certs }}" + vmid: "{{ vmid }}" + state: present + interface: net5 + bridge: vmbr0 + tag: 24 + firewall: True + register: results + + - assert: + that: + - results is changed + - results.vmid == {{ vmid }} + - results.msg == "Nic net5 updated on VM with vmid {{ vmid }}" + + - name: Delete NIC + proxmox_nic: + api_host: "{{ api_host }}" + api_user: "{{ user }}@{{ domain }}" + api_password: "{{ api_password | default(omit) }}" + api_token_id: "{{ api_token_id | default(omit) }}" + api_token_secret: "{{ api_token_secret | default(omit) }}" + validate_certs: "{{ validate_certs }}" + vmid: "{{ vmid }}" + state: absent + interface: net5 + register: results + + - assert: + that: + - results is changed + - results.vmid == {{ vmid }} + - results.msg == "Nic net5 deleted on VM with vmid {{ vmid }}" + - name: VM stop tags: [ 'stop' ] block: From 384655e15c7e36e5b4c56578c534053404f9f1d1 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 13 May 2021 21:49:57 +0200 Subject: [PATCH 0041/2828] Add groupby_as_dict filter (#2323) * Add groupby_as_dict filter. * Test all error cases. --- .../fragments/2323-groupby_as_dict-filter.yml | 3 ++ plugins/filter/groupby.py | 42 +++++++++++++++++ .../targets/filter_groupby/aliases | 2 + .../targets/filter_groupby/tasks/main.yml | 45 +++++++++++++++++++ .../targets/filter_groupby/vars/main.yml | 31 +++++++++++++ 5 files changed, 123 insertions(+) create mode 100644 changelogs/fragments/2323-groupby_as_dict-filter.yml create mode 100644 plugins/filter/groupby.py create mode 100644 tests/integration/targets/filter_groupby/aliases create mode 100644 tests/integration/targets/filter_groupby/tasks/main.yml create mode 100644 tests/integration/targets/filter_groupby/vars/main.yml diff --git a/changelogs/fragments/2323-groupby_as_dict-filter.yml b/changelogs/fragments/2323-groupby_as_dict-filter.yml new file mode 100644 index 0000000000..e72f323a60 --- /dev/null +++ b/changelogs/fragments/2323-groupby_as_dict-filter.yml @@ -0,0 +1,3 @@ +add plugin.filter: + - name: groupby_as_dict + description: Transform a sequence of dictionaries to a dictionary where the dictionaries are indexed by an attribute diff --git a/plugins/filter/groupby.py b/plugins/filter/groupby.py new file mode 100644 index 0000000000..a2a85aa905 --- /dev/null +++ b/plugins/filter/groupby.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, Felix Fontein +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.errors import AnsibleFilterError +from ansible.module_utils.common._collections_compat import Mapping, Sequence + + +def groupby_as_dict(sequence, attribute): + ''' + Given a sequence of dictionaries and an attribute name, returns a dictionary mapping + the value of this attribute to the dictionary. + + If multiple dictionaries in the sequence have the same value for this attribute, + the filter will fail. + ''' + if not isinstance(sequence, Sequence): + raise AnsibleFilterError('Input is not a sequence') + + result = dict() + for list_index, element in enumerate(sequence): + if not isinstance(element, Mapping): + raise AnsibleFilterError('Sequence element #{0} is not a mapping'.format(list_index)) + if attribute not in element: + raise AnsibleFilterError('Attribute not contained in element #{0} of sequence'.format(list_index)) + result_index = element[attribute] + if result_index in result: + raise AnsibleFilterError('Multiple sequence entries have attribute value {0!r}'.format(result_index)) + result[result_index] = element + return result + + +class FilterModule(object): + ''' Ansible list filters ''' + + def filters(self): + return { + 'groupby_as_dict': groupby_as_dict, + } diff --git a/tests/integration/targets/filter_groupby/aliases b/tests/integration/targets/filter_groupby/aliases new file mode 100644 index 0000000000..6e79abdd02 --- /dev/null +++ b/tests/integration/targets/filter_groupby/aliases @@ -0,0 +1,2 @@ +shippable/posix/group4 +skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller diff --git a/tests/integration/targets/filter_groupby/tasks/main.yml b/tests/integration/targets/filter_groupby/tasks/main.yml new file mode 100644 index 0000000000..29036a3bc5 --- /dev/null +++ b/tests/integration/targets/filter_groupby/tasks/main.yml @@ -0,0 +1,45 @@ +--- +- name: Test functionality + assert: + that: + - list1 | community.general.groupby_as_dict('name') == dict1 + +- name: 'Test error: not a list' + set_fact: + test: "{{ list_no_list | community.general.groupby_as_dict('name') }}" + ignore_errors: true + register: result + +- assert: + that: + - result.msg == 'Input is not a sequence' + +- name: 'Test error: list element not a mapping' + set_fact: + test: "{{ list_no_dict | community.general.groupby_as_dict('name') }}" + ignore_errors: true + register: result + +- assert: + that: + - "result.msg == 'Sequence element #0 is not a mapping'" + +- name: 'Test error: list element does not have attribute' + set_fact: + test: "{{ list_no_attribute | community.general.groupby_as_dict('name') }}" + ignore_errors: true + register: result + +- assert: + that: + - "result.msg == 'Attribute not contained in element #1 of sequence'" + +- name: 'Test error: attribute collision' + set_fact: + test: "{{ list_collision | community.general.groupby_as_dict('name') }}" + ignore_errors: true + register: result + +- assert: + that: + - result.msg == "Multiple sequence entries have attribute value 'a'" diff --git a/tests/integration/targets/filter_groupby/vars/main.yml b/tests/integration/targets/filter_groupby/vars/main.yml new file mode 100644 index 0000000000..15d38a351a --- /dev/null +++ b/tests/integration/targets/filter_groupby/vars/main.yml @@ -0,0 +1,31 @@ +--- +list1: + - name: a + x: y + - name: b + z: 1 + +dict1: + a: + name: a + x: y + b: + name: b + z: 1 + +list_no_list: + a: + name: a + +list_no_dict: + - [] + - 1 + +list_no_attribute: + - name: a + foo: baz + - foo: bar + +list_collision: + - name: a + - name: a From ee9770cff720259cb781f0b1d9705f33a5d83fb1 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 13 May 2021 21:50:40 +0200 Subject: [PATCH 0042/2828] Deprecate nios content (#2458) * Deprecate nios content. * Make 2.9's ansible-test happy. * Add module_utils deprecation. --- changelogs/fragments/nios-deprecation.yml | 2 + meta/runtime.yml | 88 ++++++++++++++++++- plugins/lookup/nios.py | 4 + plugins/lookup/nios_next_ip.py | 4 + plugins/lookup/nios_next_network.py | 4 + .../modules/net_tools/nios/nios_a_record.py | 4 + .../net_tools/nios/nios_aaaa_record.py | 4 + .../net_tools/nios/nios_cname_record.py | 4 + .../modules/net_tools/nios/nios_dns_view.py | 4 + .../net_tools/nios/nios_fixed_address.py | 4 + .../net_tools/nios/nios_host_record.py | 4 + plugins/modules/net_tools/nios/nios_member.py | 4 + .../modules/net_tools/nios/nios_mx_record.py | 4 + .../net_tools/nios/nios_naptr_record.py | 4 + .../modules/net_tools/nios/nios_network.py | 4 + .../net_tools/nios/nios_network_view.py | 4 + .../modules/net_tools/nios/nios_nsgroup.py | 4 + .../modules/net_tools/nios/nios_ptr_record.py | 4 + .../modules/net_tools/nios/nios_srv_record.py | 4 + .../modules/net_tools/nios/nios_txt_record.py | 4 + plugins/modules/net_tools/nios/nios_zone.py | 4 + tests/sanity/ignore-2.9.txt | 32 +++++++ 22 files changed, 196 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/nios-deprecation.yml diff --git a/changelogs/fragments/nios-deprecation.yml b/changelogs/fragments/nios-deprecation.yml new file mode 100644 index 0000000000..bcfc2b4128 --- /dev/null +++ b/changelogs/fragments/nios-deprecation.yml @@ -0,0 +1,2 @@ +deprecated_features: +- "The nios, nios_next_ip, nios_next_network lookup plugins, the nios documentation fragment, and the nios_host_record, nios_ptr_record, nios_mx_record, nios_fixed_address, nios_zone, nios_member, nios_a_record, nios_aaaa_record, nios_network, nios_dns_view, nios_txt_record, nios_naptr_record, nios_srv_record, nios_cname_record, nios_nsgroup, and nios_network_view module have been deprecated and will be removed from community.general 5.0.0. Please install the `infoblox.nios_modules `_ collection instead and use its plugins and modules (https://github.com/ansible-collections/community.general/pull/2458)." diff --git a/meta/runtime.yml b/meta/runtime.yml index c116029974..e5b59bc046 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -37,6 +37,18 @@ plugin_routing: redirect: community.google.gcp_storage_file hashi_vault: redirect: community.hashi_vault.hashi_vault + nios: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios lookup plugin has been deprecated. Please use infoblox.nios_modules.nios_lookup instead. + nios_next_ip: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_next_ip lookup plugin has been deprecated. Please use infoblox.nios_modules.nios_next_ip instead. + nios_next_network: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_next_network lookup plugin has been deprecated. Please use infoblox.nios_modules.nios_next_network instead. modules: ali_instance_facts: tombstone: @@ -283,6 +295,70 @@ plugin_routing: tombstone: removal_version: 3.0.0 warning_text: Use netapp.ontap.na_ontap_info instead. + nios_a_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_a_record module has been deprecated. Please use infoblox.nios_modules.nios_a_record instead. + nios_aaaa_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_aaaa_record module has been deprecated. Please use infoblox.nios_modules.nios_aaaa_record instead. + nios_cname_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_cname_record module has been deprecated. Please use infoblox.nios_modules.nios_cname_record instead. + nios_dns_view: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_dns_view module has been deprecated. Please use infoblox.nios_modules.nios_dns_view instead. + nios_fixed_address: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_fixed_address module has been deprecated. Please use infoblox.nios_modules.nios_fixed_address instead. + nios_host_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_host_record module has been deprecated. Please use infoblox.nios_modules.nios_host_record instead. + nios_member: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_member module has been deprecated. Please use infoblox.nios_modules.nios_member instead. + nios_mx_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_mx_record module has been deprecated. Please use infoblox.nios_modules.nios_mx_record instead. + nios_naptr_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_naptr_record module has been deprecated. Please use infoblox.nios_modules.nios_naptr_record instead. + nios_network: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_network module has been deprecated. Please use infoblox.nios_modules.nios_network instead. + nios_network_view: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_network_view module has been deprecated. Please use infoblox.nios_modules.nios_network_view instead. + nios_nsgroup: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_nsgroup module has been deprecated. Please use infoblox.nios_modules.nios_nsgroup instead. + nios_ptr_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_ptr_record module has been deprecated. Please use infoblox.nios_modules.nios_ptr_record instead. + nios_srv_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_srv_record module has been deprecated. Please use infoblox.nios_modules.nios_srv_record instead. + nios_txt_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_txt_record module has been deprecated. Please use infoblox.nios_modules.nios_txt_record instead. + nios_zone: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_zone module has been deprecated. Please use infoblox.nios_modules.nios_zone instead. nginx_status_facts: tombstone: removal_version: 3.0.0 @@ -568,11 +644,13 @@ plugin_routing: redirect: community.kubevirt.kubevirt_common_options kubevirt_vm_options: redirect: community.kubevirt.kubevirt_vm_options + nios: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios document fragment has been deprecated. Please use infoblox.nios_modules.nios instead. postgresql: redirect: community.postgresql.postgresql module_utils: - remote_management.dellemc.dellemc_idrac: - redirect: dellemc.openmanage.dellemc_idrac docker.common: redirect: community.docker.common docker.swarm: @@ -587,6 +665,12 @@ plugin_routing: redirect: community.hrobot.robot kubevirt: redirect: community.kubevirt.kubevirt + net_tools.nios.api: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.net_tools.nios.api module_utils has been deprecated. Please use infoblox.nios_modules.api instead. + remote_management.dellemc.dellemc_idrac: + redirect: dellemc.openmanage.dellemc_idrac remote_management.dellemc.ome: redirect: dellemc.openmanage.ome postgresql: diff --git a/plugins/lookup/nios.py b/plugins/lookup/nios.py index 4b606e78ba..819d8077e6 100644 --- a/plugins/lookup/nios.py +++ b/plugins/lookup/nios.py @@ -25,6 +25,10 @@ DOCUMENTATION = ''' author: Unknown (!UNKNOWN) name: nios short_description: Query Infoblox NIOS objects +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding lookup from it. + alternative: infoblox.nios_modules.nios_lookup + removed_in: 5.0.0 description: - Uses the Infoblox WAPI API to fetch NIOS specified objects. This lookup supports adding additional keywords to filter the return data and specify diff --git a/plugins/lookup/nios_next_ip.py b/plugins/lookup/nios_next_ip.py index 5b979b8d07..21773cb53e 100644 --- a/plugins/lookup/nios_next_ip.py +++ b/plugins/lookup/nios_next_ip.py @@ -25,6 +25,10 @@ DOCUMENTATION = ''' author: Unknown (!UNKNOWN) name: nios_next_ip short_description: Return the next available IP address for a network +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding lookup from it. + alternative: infoblox.nios_modules.nios_next_ip + removed_in: 5.0.0 description: - Uses the Infoblox WAPI API to return the next available IP addresses for a given network CIDR diff --git a/plugins/lookup/nios_next_network.py b/plugins/lookup/nios_next_network.py index 84b230d1fe..2aa22ab704 100644 --- a/plugins/lookup/nios_next_network.py +++ b/plugins/lookup/nios_next_network.py @@ -25,6 +25,10 @@ DOCUMENTATION = ''' author: Unknown (!UNKNOWN) name: nios_next_network short_description: Return the next available network range for a network-container +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding lookup from it. + alternative: infoblox.nios_modules.nios_next_network + removed_in: 5.0.0 description: - Uses the Infoblox WAPI API to return the next available network addresses for a given network CIDR diff --git a/plugins/modules/net_tools/nios/nios_a_record.py b/plugins/modules/net_tools/nios/nios_a_record.py index 7e8b273024..b4adfe0103 100644 --- a/plugins/modules/net_tools/nios/nios_a_record.py +++ b/plugins/modules/net_tools/nios/nios_a_record.py @@ -10,6 +10,10 @@ DOCUMENTATION = ''' module: nios_a_record author: "Blair Rampling (@brampling)" short_description: Configure Infoblox NIOS A records +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding module from it. + alternative: infoblox.nios_modules.nios_a_record + removed_in: 5.0.0 description: - Adds and/or removes instances of A record objects from Infoblox NIOS servers. This module manages NIOS C(record:a) objects diff --git a/plugins/modules/net_tools/nios/nios_aaaa_record.py b/plugins/modules/net_tools/nios/nios_aaaa_record.py index d35b779f10..9b22f86948 100644 --- a/plugins/modules/net_tools/nios/nios_aaaa_record.py +++ b/plugins/modules/net_tools/nios/nios_aaaa_record.py @@ -10,6 +10,10 @@ DOCUMENTATION = ''' module: nios_aaaa_record author: "Blair Rampling (@brampling)" short_description: Configure Infoblox NIOS AAAA records +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding module from it. + alternative: infoblox.nios_modules.nios_aaaa_record + removed_in: 5.0.0 description: - Adds and/or removes instances of AAAA record objects from Infoblox NIOS servers. This module manages NIOS C(record:aaaa) objects diff --git a/plugins/modules/net_tools/nios/nios_cname_record.py b/plugins/modules/net_tools/nios/nios_cname_record.py index 2ab38473f3..099cb02572 100644 --- a/plugins/modules/net_tools/nios/nios_cname_record.py +++ b/plugins/modules/net_tools/nios/nios_cname_record.py @@ -10,6 +10,10 @@ DOCUMENTATION = ''' module: nios_cname_record author: "Blair Rampling (@brampling)" short_description: Configure Infoblox NIOS CNAME records +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding module from it. + alternative: infoblox.nios_modules.nios_cname_record + removed_in: 5.0.0 description: - Adds and/or removes instances of CNAME record objects from Infoblox NIOS servers. This module manages NIOS C(record:cname) objects diff --git a/plugins/modules/net_tools/nios/nios_dns_view.py b/plugins/modules/net_tools/nios/nios_dns_view.py index af5d56d4ca..46c56fc7bb 100644 --- a/plugins/modules/net_tools/nios/nios_dns_view.py +++ b/plugins/modules/net_tools/nios/nios_dns_view.py @@ -10,6 +10,10 @@ DOCUMENTATION = ''' module: nios_dns_view author: "Peter Sprygada (@privateip)" short_description: Configure Infoblox NIOS DNS views +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding module from it. + alternative: infoblox.nios_modules.nios_dns_view + removed_in: 5.0.0 description: - Adds and/or removes instances of DNS view objects from Infoblox NIOS servers. This module manages NIOS C(view) objects diff --git a/plugins/modules/net_tools/nios/nios_fixed_address.py b/plugins/modules/net_tools/nios/nios_fixed_address.py index cab3b5e1b5..bc2969bbe5 100644 --- a/plugins/modules/net_tools/nios/nios_fixed_address.py +++ b/plugins/modules/net_tools/nios/nios_fixed_address.py @@ -10,6 +10,10 @@ DOCUMENTATION = ''' module: nios_fixed_address author: "Sumit Jaiswal (@sjaiswal)" short_description: Configure Infoblox NIOS DHCP Fixed Address +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding module from it. + alternative: infoblox.nios_modules.nios_fixed_address + removed_in: 5.0.0 description: - A fixed address is a specific IP address that a DHCP server always assigns when a lease request comes from a particular diff --git a/plugins/modules/net_tools/nios/nios_host_record.py b/plugins/modules/net_tools/nios/nios_host_record.py index d3e9d3de95..6fed663657 100644 --- a/plugins/modules/net_tools/nios/nios_host_record.py +++ b/plugins/modules/net_tools/nios/nios_host_record.py @@ -10,6 +10,10 @@ DOCUMENTATION = ''' module: nios_host_record author: "Peter Sprygada (@privateip)" short_description: Configure Infoblox NIOS host records +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding module from it. + alternative: infoblox.nios_modules.nios_host_record + removed_in: 5.0.0 description: - Adds and/or removes instances of host record objects from Infoblox NIOS servers. This module manages NIOS C(record:host) objects diff --git a/plugins/modules/net_tools/nios/nios_member.py b/plugins/modules/net_tools/nios/nios_member.py index f8bf3e2595..186933864a 100644 --- a/plugins/modules/net_tools/nios/nios_member.py +++ b/plugins/modules/net_tools/nios/nios_member.py @@ -10,6 +10,10 @@ DOCUMENTATION = ''' module: nios_member author: "Krishna Vasudevan (@krisvasudevan)" short_description: Configure Infoblox NIOS members +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding module from it. + alternative: infoblox.nios_modules.nios_member + removed_in: 5.0.0 description: - Adds and/or removes Infoblox NIOS servers. This module manages NIOS C(member) objects using the Infoblox WAPI interface over REST. requirements: diff --git a/plugins/modules/net_tools/nios/nios_mx_record.py b/plugins/modules/net_tools/nios/nios_mx_record.py index a5c93b92bf..6e54ff2bda 100644 --- a/plugins/modules/net_tools/nios/nios_mx_record.py +++ b/plugins/modules/net_tools/nios/nios_mx_record.py @@ -10,6 +10,10 @@ DOCUMENTATION = ''' module: nios_mx_record author: "Blair Rampling (@brampling)" short_description: Configure Infoblox NIOS MX records +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding module from it. + alternative: infoblox.nios_modules.nios_mx_record + removed_in: 5.0.0 description: - Adds and/or removes instances of MX record objects from Infoblox NIOS servers. This module manages NIOS C(record:mx) objects diff --git a/plugins/modules/net_tools/nios/nios_naptr_record.py b/plugins/modules/net_tools/nios/nios_naptr_record.py index 387dd1dd98..f943d3d6d9 100644 --- a/plugins/modules/net_tools/nios/nios_naptr_record.py +++ b/plugins/modules/net_tools/nios/nios_naptr_record.py @@ -10,6 +10,10 @@ DOCUMENTATION = ''' module: nios_naptr_record author: "Blair Rampling (@brampling)" short_description: Configure Infoblox NIOS NAPTR records +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding module from it. + alternative: infoblox.nios_modules.nios_naptr_record + removed_in: 5.0.0 description: - Adds and/or removes instances of NAPTR record objects from Infoblox NIOS servers. This module manages NIOS C(record:naptr) objects diff --git a/plugins/modules/net_tools/nios/nios_network.py b/plugins/modules/net_tools/nios/nios_network.py index 98d06a2ede..6a7decb894 100644 --- a/plugins/modules/net_tools/nios/nios_network.py +++ b/plugins/modules/net_tools/nios/nios_network.py @@ -10,6 +10,10 @@ DOCUMENTATION = ''' module: nios_network author: "Peter Sprygada (@privateip)" short_description: Configure Infoblox NIOS network object +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding module from it. + alternative: infoblox.nios_modules.nios_network + removed_in: 5.0.0 description: - Adds and/or removes instances of network objects from Infoblox NIOS servers. This module manages NIOS C(network) objects diff --git a/plugins/modules/net_tools/nios/nios_network_view.py b/plugins/modules/net_tools/nios/nios_network_view.py index c8925adcfb..a27f8519a0 100644 --- a/plugins/modules/net_tools/nios/nios_network_view.py +++ b/plugins/modules/net_tools/nios/nios_network_view.py @@ -10,6 +10,10 @@ DOCUMENTATION = ''' module: nios_network_view author: "Peter Sprygada (@privateip)" short_description: Configure Infoblox NIOS network views +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding module from it. + alternative: infoblox.nios_modules.nios_network_view + removed_in: 5.0.0 description: - Adds and/or removes instances of network view objects from Infoblox NIOS servers. This module manages NIOS C(networkview) objects diff --git a/plugins/modules/net_tools/nios/nios_nsgroup.py b/plugins/modules/net_tools/nios/nios_nsgroup.py index b56c3f0b8d..8e8cde399c 100644 --- a/plugins/modules/net_tools/nios/nios_nsgroup.py +++ b/plugins/modules/net_tools/nios/nios_nsgroup.py @@ -11,6 +11,10 @@ DOCUMENTATION = ''' --- module: nios_nsgroup short_description: Configure InfoBlox DNS Nameserver Groups +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding module from it. + alternative: infoblox.nios_modules.nios_nsgroup + removed_in: 5.0.0 extends_documentation_fragment: - community.general.nios diff --git a/plugins/modules/net_tools/nios/nios_ptr_record.py b/plugins/modules/net_tools/nios/nios_ptr_record.py index 04c1370920..22550f129a 100644 --- a/plugins/modules/net_tools/nios/nios_ptr_record.py +++ b/plugins/modules/net_tools/nios/nios_ptr_record.py @@ -11,6 +11,10 @@ DOCUMENTATION = ''' module: nios_ptr_record author: "Trebuchet Clement (@clementtrebuchet)" short_description: Configure Infoblox NIOS PTR records +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding module from it. + alternative: infoblox.nios_modules.nios_ptr_record + removed_in: 5.0.0 description: - Adds and/or removes instances of PTR record objects from Infoblox NIOS servers. This module manages NIOS C(record:ptr) objects diff --git a/plugins/modules/net_tools/nios/nios_srv_record.py b/plugins/modules/net_tools/nios/nios_srv_record.py index 8a12aa7fd3..574a5fcf8b 100644 --- a/plugins/modules/net_tools/nios/nios_srv_record.py +++ b/plugins/modules/net_tools/nios/nios_srv_record.py @@ -10,6 +10,10 @@ DOCUMENTATION = ''' module: nios_srv_record author: "Blair Rampling (@brampling)" short_description: Configure Infoblox NIOS SRV records +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding module from it. + alternative: infoblox.nios_modules.nios_srv_record + removed_in: 5.0.0 description: - Adds and/or removes instances of SRV record objects from Infoblox NIOS servers. This module manages NIOS C(record:srv) objects diff --git a/plugins/modules/net_tools/nios/nios_txt_record.py b/plugins/modules/net_tools/nios/nios_txt_record.py index 761a895052..b3267af41f 100644 --- a/plugins/modules/net_tools/nios/nios_txt_record.py +++ b/plugins/modules/net_tools/nios/nios_txt_record.py @@ -10,6 +10,10 @@ DOCUMENTATION = ''' module: nios_txt_record author: "Corey Wanless (@coreywan)" short_description: Configure Infoblox NIOS txt records +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding module from it. + alternative: infoblox.nios_modules.nios_txt_record + removed_in: 5.0.0 description: - Adds and/or removes instances of txt record objects from Infoblox NIOS servers. This module manages NIOS C(record:txt) objects diff --git a/plugins/modules/net_tools/nios/nios_zone.py b/plugins/modules/net_tools/nios/nios_zone.py index 3c59aab298..f97098351b 100644 --- a/plugins/modules/net_tools/nios/nios_zone.py +++ b/plugins/modules/net_tools/nios/nios_zone.py @@ -10,6 +10,10 @@ DOCUMENTATION = ''' module: nios_zone author: "Peter Sprygada (@privateip)" short_description: Configure Infoblox NIOS DNS zones +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding module from it. + alternative: infoblox.nios_modules.nios_zone + removed_in: 5.0.0 description: - Adds and/or removes instances of DNS zone objects from Infoblox NIOS servers. This module manages NIOS C(zone_auth) objects diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 5c759d2095..32e13b1a1e 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -41,6 +41,38 @@ plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules plugins/modules/remote_management/stacki/stacki_host.py validate-modules:doc-default-does-not-match-spec plugins/modules/remote_management/stacki/stacki_host.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/stacki/stacki_host.py validate-modules:undocumented-parameter +plugins/modules/net_tools/nios/nios_a_record.py validate-modules:deprecation-mismatch +plugins/modules/net_tools/nios/nios_a_record.py validate-modules:invalid-documentation +plugins/modules/net_tools/nios/nios_aaaa_record.py validate-modules:deprecation-mismatch +plugins/modules/net_tools/nios/nios_aaaa_record.py validate-modules:invalid-documentation +plugins/modules/net_tools/nios/nios_cname_record.py validate-modules:deprecation-mismatch +plugins/modules/net_tools/nios/nios_cname_record.py validate-modules:invalid-documentation +plugins/modules/net_tools/nios/nios_dns_view.py validate-modules:deprecation-mismatch +plugins/modules/net_tools/nios/nios_dns_view.py validate-modules:invalid-documentation +plugins/modules/net_tools/nios/nios_fixed_address.py validate-modules:deprecation-mismatch +plugins/modules/net_tools/nios/nios_fixed_address.py validate-modules:invalid-documentation +plugins/modules/net_tools/nios/nios_host_record.py validate-modules:deprecation-mismatch +plugins/modules/net_tools/nios/nios_host_record.py validate-modules:invalid-documentation +plugins/modules/net_tools/nios/nios_member.py validate-modules:deprecation-mismatch +plugins/modules/net_tools/nios/nios_member.py validate-modules:invalid-documentation +plugins/modules/net_tools/nios/nios_mx_record.py validate-modules:deprecation-mismatch +plugins/modules/net_tools/nios/nios_mx_record.py validate-modules:invalid-documentation +plugins/modules/net_tools/nios/nios_naptr_record.py validate-modules:deprecation-mismatch +plugins/modules/net_tools/nios/nios_naptr_record.py validate-modules:invalid-documentation +plugins/modules/net_tools/nios/nios_network.py validate-modules:deprecation-mismatch +plugins/modules/net_tools/nios/nios_network.py validate-modules:invalid-documentation +plugins/modules/net_tools/nios/nios_network_view.py validate-modules:deprecation-mismatch +plugins/modules/net_tools/nios/nios_network_view.py validate-modules:invalid-documentation +plugins/modules/net_tools/nios/nios_nsgroup.py validate-modules:deprecation-mismatch +plugins/modules/net_tools/nios/nios_nsgroup.py validate-modules:invalid-documentation +plugins/modules/net_tools/nios/nios_ptr_record.py validate-modules:deprecation-mismatch +plugins/modules/net_tools/nios/nios_ptr_record.py validate-modules:invalid-documentation +plugins/modules/net_tools/nios/nios_srv_record.py validate-modules:deprecation-mismatch +plugins/modules/net_tools/nios/nios_srv_record.py validate-modules:invalid-documentation +plugins/modules/net_tools/nios/nios_txt_record.py validate-modules:deprecation-mismatch +plugins/modules/net_tools/nios/nios_txt_record.py validate-modules:invalid-documentation +plugins/modules/net_tools/nios/nios_zone.py validate-modules:deprecation-mismatch +plugins/modules/net_tools/nios/nios_zone.py validate-modules:invalid-documentation plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-invalid plugins/modules/system/iptables_state.py validate-modules:undocumented-parameter plugins/modules/system/launchd.py use-argspec-type-path # False positive From 054eb90ae52b6065caf7d5a52dc887bff06b46a2 Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Fri, 14 May 2021 12:30:59 +0430 Subject: [PATCH 0043/2828] gitlab_user: add expires_at option (#2450) * gitlab_user: add expires_at option * Add changelog * Add integration test * Add expires_at to addSshKeyToUser function * password is required if state is set to present * Check expires_at will not be added to a present ssh key * add documentation about present ssh key * add expires_at to unit tests * Improve documentation Co-authored-by: Felix Fontein * Only pass expires_at to api when it is not None * Emphasize on SSH public key * Apply felixfontein suggestion Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- ...450-gitlab_user-add_expires_at_option.yaml | 3 + .../source_control/gitlab/gitlab_user.py | 28 +++- .../targets/gitlab_user/defaults/main.yml | 3 + .../targets/gitlab_user/tasks/main.yml | 12 +- .../targets/gitlab_user/tasks/sshkey.yml | 134 ++++++++++++++++++ .../source_control/gitlab/test_gitlab_user.py | 6 +- 6 files changed, 172 insertions(+), 14 deletions(-) create mode 100644 changelogs/fragments/2450-gitlab_user-add_expires_at_option.yaml create mode 100644 tests/integration/targets/gitlab_user/tasks/sshkey.yml diff --git a/changelogs/fragments/2450-gitlab_user-add_expires_at_option.yaml b/changelogs/fragments/2450-gitlab_user-add_expires_at_option.yaml new file mode 100644 index 0000000000..290e13847a --- /dev/null +++ b/changelogs/fragments/2450-gitlab_user-add_expires_at_option.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - gitlab_user - add ``expires_at`` option (https://github.com/ansible-collections/community.general/issues/2325). diff --git a/plugins/modules/source_control/gitlab/gitlab_user.py b/plugins/modules/source_control/gitlab/gitlab_user.py index 9fefe1aff9..4d300ea842 100644 --- a/plugins/modules/source_control/gitlab/gitlab_user.py +++ b/plugins/modules/source_control/gitlab/gitlab_user.py @@ -57,16 +57,22 @@ options: type: str sshkey_name: description: - - The name of the sshkey + - The name of the SSH public key. type: str sshkey_file: description: - - The ssh key itself. + - The SSH public key itself. type: str + sshkey_expires_at: + description: + - The expiration date of the SSH public key in ISO 8601 format C(YYYY-MM-DDTHH:MM:SSZ). + - This is only used when adding new SSH public keys. + type: str + version_added: 3.1.0 group: description: - Id or Full path of parent group in the form of group/name. - - Add user as an member to this group. + - Add user as a member to this group. type: str access_level: description: @@ -254,7 +260,8 @@ class GitLabUser(object): if options['sshkey_name'] and options['sshkey_file']: key_changed = self.addSshKeyToUser(user, { 'name': options['sshkey_name'], - 'file': options['sshkey_file']}) + 'file': options['sshkey_file'], + 'expires_at': options['sshkey_expires_at']}) changed = changed or key_changed # Assign group @@ -295,7 +302,7 @@ class GitLabUser(object): ''' @param user User object - @param sshkey Dict containing sshkey infos {"name": "", "file": ""} + @param sshkey Dict containing sshkey infos {"name": "", "file": "", "expires_at": ""} ''' def addSshKeyToUser(self, user, sshkey): if not self.sshKeyExists(user, sshkey['name']): @@ -303,9 +310,13 @@ class GitLabUser(object): return True try: - user.keys.create({ + parameter = { 'title': sshkey['name'], - 'key': sshkey['file']}) + 'key': sshkey['file'], + } + if sshkey['expires_at'] is not None: + parameter['expires_at'] = sshkey['expires_at'] + user.keys.create(parameter) except gitlab.exceptions.GitlabCreateError as e: self._module.fail_json(msg="Failed to assign sshkey to user: %s" % to_native(e)) return True @@ -471,6 +482,7 @@ def main(): email=dict(type='str'), sshkey_name=dict(type='str'), sshkey_file=dict(type='str', no_log=False), + sshkey_expires_at=dict(type='str', no_log=False), group=dict(type='str'), access_level=dict(type='str', default="guest", choices=["developer", "guest", "maintainer", "master", "owner", "reporter"]), confirm=dict(type='bool', default=True), @@ -503,6 +515,7 @@ def main(): user_email = module.params['email'] user_sshkey_name = module.params['sshkey_name'] user_sshkey_file = module.params['sshkey_file'] + user_sshkey_expires_at = module.params['sshkey_expires_at'] group_path = module.params['group'] access_level = module.params['access_level'] confirm = module.params['confirm'] @@ -549,6 +562,7 @@ def main(): "email": user_email, "sshkey_name": user_sshkey_name, "sshkey_file": user_sshkey_file, + "sshkey_expires_at": user_sshkey_expires_at, "group_path": group_path, "access_level": access_level, "confirm": confirm, diff --git a/tests/integration/targets/gitlab_user/defaults/main.yml b/tests/integration/targets/gitlab_user/defaults/main.yml index a6755cf412..bbe016b0a8 100644 --- a/tests/integration/targets/gitlab_user/defaults/main.yml +++ b/tests/integration/targets/gitlab_user/defaults/main.yml @@ -1,3 +1,6 @@ gitlab_user: ansible_test_user gitlab_user_pass: Secr3tPassw00rd gitlab_user_email: root@localhost +gitlab_sshkey_name: ansibletest +gitlab_sshkey_file: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDI8GIMlrirf+zsvBpxnF0daykP6YEJ5wytZXhDGD2dZXg9Tln0KUSDgreT3FDgoabjlOmG1L/nhu6ML76WCsmc/wnVMlXlDlQpVJSQ2PCxGNs9WRW7Y/Pk6t9KtV/VSYr0LaPgLEU8VkffSUBJezbKa1cssjb4CmRRqcePRNYpgCXdK05TEgFvmXl9qIM8Domf1ak1PlbyMmi/MytzHmnVFzxgUKv5c0Mr+vguCi131gPdh3QSf5AHPLEoO9LcMfu2IO1zvl61wYfsJ0Wn2Fncw+tJQfUin0ffTFgUIsGqki04/YjXyWynjSwQf5Jym4BYM0i2zlDUyRxs4/Tfp4yvJFik42ambzjLK6poq+iCpQReeYih9WZUaZwUQe7zYWhTOuoV7ydsk8+kDRMPidF9K5zWkQnglGrOzdbTqnhxNpwHCg2eSRJ49kPYLOH76g8P7IQvl+zluG0o8Nndir1WcYil4D4CCBskM8WbmrElZH1CRyP/NQMNIf4hFMItTjk= ansible@ansible +gitlab_sshkey_expires_at: 2030-01-01T00:00:00.000Z diff --git a/tests/integration/targets/gitlab_user/tasks/main.yml b/tests/integration/targets/gitlab_user/tasks/main.yml index 6cbcd14c34..dddf7aaea8 100644 --- a/tests/integration/targets/gitlab_user/tasks/main.yml +++ b/tests/integration/targets/gitlab_user/tasks/main.yml @@ -56,7 +56,7 @@ - gitlab_user_state_again.user.is_admin == False -- name: Update User Test => Make User Admin +- name: Update User Test => Make User Admin gitlab_user: api_url: "{{ gitlab_host }}" email: "{{ gitlab_user_email }}" @@ -189,8 +189,8 @@ api_url: "{{ gitlab_host }}" validate_certs: False - # note: the only way to check if a password really is what it is expected - # to be is to use it for login, so we use it here instead of the + # note: the only way to check if a password really is what it is expected + # to be is to use it for login, so we use it here instead of the # default token assuming that a user can always change its own password api_username: "{{ gitlab_user }}" api_password: "{{ gitlab_user_pass }}" @@ -205,8 +205,8 @@ - name: Check PW setting return state assert: that: - # note: there is no way to determine if a password has changed or - # not, so it can only be always yellow or always green, we + # note: there is no way to determine if a password has changed or + # not, so it can only be always yellow or always green, we # decided for always green for now - gitlab_user_state is not changed @@ -248,3 +248,5 @@ assert: that: - gitlab_user_state is not changed + +- include_tasks: sshkey.yml diff --git a/tests/integration/targets/gitlab_user/tasks/sshkey.yml b/tests/integration/targets/gitlab_user/tasks/sshkey.yml new file mode 100644 index 0000000000..2d2067e74b --- /dev/null +++ b/tests/integration/targets/gitlab_user/tasks/sshkey.yml @@ -0,0 +1,134 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- name: Create gitlab user with sshkey credentials + gitlab_user: + api_url: "{{ gitlab_host }}" + api_token: "{{ gitlab_login_token }}" + email: "{{ gitlab_user_email }}" + name: "{{ gitlab_user }}" + username: "{{ gitlab_user }}" + password: "{{ gitlab_user_pass }}" + validate_certs: false + sshkey_name: "{{ gitlab_sshkey_name }}" + sshkey_file: "{{ gitlab_sshkey_file }}" + state: present + register: gitlab_user_sshkey + +- name: Check user has been created correctly + assert: + that: + - gitlab_user_sshkey is changed + +- name: Create gitlab user again + gitlab_user: + api_url: "{{ gitlab_host }}" + api_token: "{{ gitlab_login_token }}" + email: "{{ gitlab_user_email }}" + name: "{{ gitlab_user }}" + username: "{{ gitlab_user }}" + password: "{{ gitlab_user_pass }}" + validate_certs: false + sshkey_name: "{{ gitlab_sshkey_name }}" + sshkey_file: "{{ gitlab_sshkey_file }}" + state: present + register: gitlab_user_sshkey_again + +- name: Check state is not changed + assert: + that: + - gitlab_user_sshkey_again is not changed + +- name: Add expires_at to an already created gitlab user with ssh key + gitlab_user: + api_url: "{{ gitlab_host }}" + api_token: "{{ gitlab_login_token }}" + email: "{{ gitlab_user_email }}" + name: "{{ gitlab_user }}" + username: "{{ gitlab_user }}" + password: "{{ gitlab_user_pass }}" + validate_certs: false + sshkey_name: "{{ gitlab_sshkey_name }}" + sshkey_file: "{{ gitlab_sshkey_file }}" + sshkey_expires_at: "{{ gitlab_sshkey_expires_at }}" + state: present + register: gitlab_user_created_user_sshkey_expires_at + +- name: Check expires_at will not be added to a present ssh key + assert: + that: + - gitlab_user_created_user_sshkey_expires_at is not changed + +- name: Remove created gitlab user + gitlab_user: + api_url: "{{ gitlab_host }}" + api_token: "{{ gitlab_login_token }}" + email: "{{ gitlab_user_email }}" + name: "{{ gitlab_user }}" + username: "{{ gitlab_user }}" + validate_certs: false + state: absent + register: gitlab_user_sshkey_remove + +- name: Check user has been removed correctly + assert: + that: + - gitlab_user_sshkey_remove is changed + +- name: Create gitlab user with sshkey and expires_at + gitlab_user: + api_url: "{{ gitlab_host }}" + api_token: "{{ gitlab_login_token }}" + email: "{{ gitlab_user_email }}" + name: "{{ gitlab_user }}" + username: "{{ gitlab_user }}" + password: "{{ gitlab_user_pass }}" + validate_certs: false + sshkey_name: "{{ gitlab_sshkey_name }}" + sshkey_file: "{{ gitlab_sshkey_file }}" + sshkey_expires_at: "{{ gitlab_sshkey_expires_at }}" + state: present + register: gitlab_user_sshkey_expires_at + +- name: Check user has been created correctly + assert: + that: + - gitlab_user_sshkey_expires_at is changed + +- name: Create gitlab user with sshkey and expires_at again + gitlab_user: + api_url: "{{ gitlab_host }}" + api_token: "{{ gitlab_login_token }}" + email: "{{ gitlab_user_email }}" + name: "{{ gitlab_user }}" + username: "{{ gitlab_user }}" + password: "{{ gitlab_user_pass }}" + validate_certs: false + sshkey_name: "{{ gitlab_sshkey_name }}" + sshkey_file: "{{ gitlab_sshkey_file }}" + sshkey_expires_at: "{{ gitlab_sshkey_expires_at }}" + state: present + register: gitlab_user_sshkey_expires_at_again + +- name: Check state is not changed + assert: + that: + - gitlab_user_sshkey_expires_at_again is not changed + +- name: Remove created gitlab user + gitlab_user: + api_url: "{{ gitlab_host }}" + api_token: "{{ gitlab_login_token }}" + email: "{{ gitlab_user_email }}" + name: "{{ gitlab_user }}" + username: "{{ gitlab_user }}" + validate_certs: false + state: absent + register: gitlab_user_sshkey_expires_at_remove + +- name: Check user has been removed correctly + assert: + that: + - gitlab_user_sshkey_expires_at_remove is changed diff --git a/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_user.py b/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_user.py index 4a47654a8c..5722854e17 100644 --- a/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_user.py +++ b/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_user.py @@ -144,7 +144,8 @@ class TestGitlabUser(GitlabModuleTestCase): 'name': "Public key", 'file': "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJe" "jgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4" - "soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0="}) + "soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=", + 'expires_at': ""}) self.assertEqual(rvalue, False) rvalue = self.moduleUtil.addSshKeyToUser(user, { @@ -153,7 +154,8 @@ class TestGitlabUser(GitlabModuleTestCase): "dRuSuA5zszUJzYPPUSRAX3BCgTqLqYx//UuVncK7YqLVSbbwjKR2Ez5lISgCnVfLVEXzwhv+" "xawxKWmI7hJ5S0tOv6MJ+IxyTa4xcKwJTwB86z22n9fVOQeJTR2dSOH1WJrf0PvRk+KVNY2j" "TiGHTi9AIjLnyD/jWRpOgtdfkLRc8EzAWrWlgNmH2WOKBw6za0az6XoG75obUdFVdW3qcD0x" - "c809OHLi7FDf+E7U4wiZJCFuUizMeXyuK/SkaE1aee4Qp5R4dxTR4TP9M1XAYkf+kF0W9srZ+mhF069XD/zhUPJsvwEF"}) + "c809OHLi7FDf+E7U4wiZJCFuUizMeXyuK/SkaE1aee4Qp5R4dxTR4TP9M1XAYkf+kF0W9srZ+mhF069XD/zhUPJsvwEF", + 'expires_at': "2027-01-01"}) self.assertEqual(rvalue, True) @with_httmock(resp_get_group) From e2dfd42dd49115dfb4b07d484c98dfd7b300da49 Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Fri, 14 May 2021 13:03:30 +0430 Subject: [PATCH 0044/2828] proxmox_nic: set mtu on interface even if it's not virtio (#2505) * Set mtu on interface whatsoever * add changelog fragment * Revert "add changelog fragment" This reverts commit 5f2f1e7febd848b1fd095635a85bf5215fbcd17d. --- plugins/modules/cloud/misc/proxmox_nic.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/plugins/modules/cloud/misc/proxmox_nic.py b/plugins/modules/cloud/misc/proxmox_nic.py index a9c9f14ddc..23be9473eb 100644 --- a/plugins/modules/cloud/misc/proxmox_nic.py +++ b/plugins/modules/cloud/misc/proxmox_nic.py @@ -211,9 +211,8 @@ def update_nic(module, proxmox, vmid, interface, model, **kwargs): config_provided += ',link_down=1' if kwargs['mtu']: - if model == 'virtio': - config_provided += ",mtu={0}".format(kwargs['mtu']) - else: + config_provided += ",mtu={0}".format(kwargs['mtu']) + if model != 'virtio': module.warn( 'Ignoring MTU for nic {0} on VM with vmid {1}, ' 'model should be set to \'virtio\': '.format(interface, vmid)) From 5d0a7f40f2dd87e60b59f4ca7c3390fcc168d9ff Mon Sep 17 00:00:00 2001 From: Abhijeet Kasurde Date: Fri, 14 May 2021 14:25:27 +0530 Subject: [PATCH 0045/2828] random_pet: Random pet name generator (#2479) A lookup plugin to generate random pet names based upon criteria. Signed-off-by: Abhijeet Kasurde --- plugins/lookup/random_pet.py | 99 +++++++++++++++++++ .../targets/lookup_random_pet/aliases | 3 + .../lookup_random_pet/dependencies.yml | 6 ++ .../targets/lookup_random_pet/runme.sh | 9 ++ .../targets/lookup_random_pet/test.yml | 25 +++++ 5 files changed, 142 insertions(+) create mode 100644 plugins/lookup/random_pet.py create mode 100644 tests/integration/targets/lookup_random_pet/aliases create mode 100644 tests/integration/targets/lookup_random_pet/dependencies.yml create mode 100755 tests/integration/targets/lookup_random_pet/runme.sh create mode 100644 tests/integration/targets/lookup_random_pet/test.yml diff --git a/plugins/lookup/random_pet.py b/plugins/lookup/random_pet.py new file mode 100644 index 0000000000..6caf178e4b --- /dev/null +++ b/plugins/lookup/random_pet.py @@ -0,0 +1,99 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2021, Abhijeet Kasurde +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' + name: random_pet + author: + - Abhijeet Kasurde (@Akasurde) + short_description: Generates random pet names + version_added: '3.1.0' + requirements: + - petname U(https://github.com/dustinkirkland/python-petname) + description: + - Generates random pet names that can be used as unique identifiers for the resources. + options: + words: + description: + - The number of words in the pet name. + default: 2 + type: int + length: + description: + - The maximal length of every component of the pet name. + - Values below 3 will be set to 3 by petname. + default: 6 + type: int + prefix: + description: A string to prefix with the name. + type: str + separator: + description: The character to separate words in the pet name. + default: "-" + type: str +''' + +EXAMPLES = r''' +- name: Generate pet name + ansible.builtin.debug: + var: lookup('community.general.random_pet') + # Example result: 'loving-raptor' + +- name: Generate pet name with 3 words + ansible.builtin.debug: + var: lookup('community.general.random_pet', words=3) + # Example result: 'fully-fresh-macaw' + +- name: Generate pet name with separator + ansible.builtin.debug: + var: lookup('community.general.random_pet', separator="_") + # Example result: 'causal_snipe' + +- name: Generate pet name with length + ansible.builtin.debug: + var: lookup('community.general.random_pet', length=7) + # Example result: 'natural-peacock' +''' + +RETURN = r''' + _raw: + description: A one-element list containing a random pet name + type: list + elements: str +''' + +try: + import petname + + HAS_PETNAME = True +except ImportError: + HAS_PETNAME = False + +from ansible.errors import AnsibleError +from ansible.plugins.lookup import LookupBase + + +class LookupModule(LookupBase): + + def run(self, terms, variables=None, **kwargs): + + if not HAS_PETNAME: + raise AnsibleError('Python petname library is required. ' + 'Please install using "pip install petname"') + + self.set_options(var_options=variables, direct=kwargs) + words = self.get_option('words') + length = self.get_option('length') + prefix = self.get_option('prefix') + separator = self.get_option('separator') + + values = petname.Generate(words=words, separator=separator, letters=length) + if prefix: + values = "%s%s%s" % (prefix, separator, values) + + return [values] diff --git a/tests/integration/targets/lookup_random_pet/aliases b/tests/integration/targets/lookup_random_pet/aliases new file mode 100644 index 0000000000..bc987654d9 --- /dev/null +++ b/tests/integration/targets/lookup_random_pet/aliases @@ -0,0 +1,3 @@ +shippable/posix/group2 +skip/aix +skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller diff --git a/tests/integration/targets/lookup_random_pet/dependencies.yml b/tests/integration/targets/lookup_random_pet/dependencies.yml new file mode 100644 index 0000000000..b6b679d966 --- /dev/null +++ b/tests/integration/targets/lookup_random_pet/dependencies.yml @@ -0,0 +1,6 @@ +--- +- hosts: localhost + tasks: + - name: Install Petname Python package + pip: + name: petname \ No newline at end of file diff --git a/tests/integration/targets/lookup_random_pet/runme.sh b/tests/integration/targets/lookup_random_pet/runme.sh new file mode 100755 index 0000000000..afdff7bb9d --- /dev/null +++ b/tests/integration/targets/lookup_random_pet/runme.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +set -eux + +ANSIBLE_ROLES_PATH=../ \ + ansible-playbook dependencies.yml -v "$@" + +ANSIBLE_ROLES_PATH=../ \ + ansible-playbook test.yml -v "$@" diff --git a/tests/integration/targets/lookup_random_pet/test.yml b/tests/integration/targets/lookup_random_pet/test.yml new file mode 100644 index 0000000000..1ab619d2f4 --- /dev/null +++ b/tests/integration/targets/lookup_random_pet/test.yml @@ -0,0 +1,25 @@ +- hosts: localhost + gather_facts: no + tasks: + - name: Call plugin + set_fact: + result1: "{{ query('community.general.random_pet', words=3) }}" + result2: "{{ query('community.general.random_pet', length=3) }}" + result3: "{{ query('community.general.random_pet', prefix='kubernetes') }}" + result4: "{{ query('community.general.random_pet', separator='_') }}" + result5: "{{ query('community.general.random_pet', words=2, length=6, prefix='kubernetes', separator='_') }}" + + - name: Check results + assert: + that: + - result1 | length == 1 + - result1[0].split('-') | length == 3 + - result2 | length == 1 + - result2[0].split('-')[0] | length <= 3 + - result3 | length == 1 + - result3[0].split('-')[0] == 'kubernetes' + - result4 | length == 1 + - result4[0].split('_') | length == 2 + - result5 | length == 1 + - result5[0].split('_') | length == 3 + - result5[0].split('_')[0] == 'kubernetes' From a385cbb11dd22953451890c9c2157538977972a8 Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Fri, 14 May 2021 16:31:44 -0400 Subject: [PATCH 0046/2828] java_keystore: New ssl_backend option for cryptography (#2485) * Adding cryptography as a backend for OpenSSL operations * Updating unit tests and adding changelog fragment * Allowing private key password option when using unprotected key * Incorporating suggestions from initial review * Centralizing module exit path --- ...85-java_keystore-ssl_backend-parameter.yml | 2 + plugins/modules/system/java_keystore.py | 461 ++++++++++++------ .../targets/java_keystore/tasks/main.yml | 13 + .../targets/java_keystore/tasks/tests.yml | 1 + .../modules/system/test_java_keystore.py | 168 ++++--- 5 files changed, 414 insertions(+), 231 deletions(-) create mode 100644 changelogs/fragments/2485-java_keystore-ssl_backend-parameter.yml diff --git a/changelogs/fragments/2485-java_keystore-ssl_backend-parameter.yml b/changelogs/fragments/2485-java_keystore-ssl_backend-parameter.yml new file mode 100644 index 0000000000..b446476f82 --- /dev/null +++ b/changelogs/fragments/2485-java_keystore-ssl_backend-parameter.yml @@ -0,0 +1,2 @@ +minor_changes: + - java_keystore - added ``ssl_backend`` parameter for using the cryptography library instead of the OpenSSL binary (https://github.com/ansible-collections/community.general/pull/2485). diff --git a/plugins/modules/system/java_keystore.py b/plugins/modules/system/java_keystore.py index ebfe6abdd7..78bcfb6af6 100644 --- a/plugins/modules/system/java_keystore.py +++ b/plugins/modules/system/java_keystore.py @@ -88,9 +88,19 @@ options: description: - Mode the file should be. required: false + ssl_backend: + description: + - Backend for loading private keys and certificates. + type: str + default: openssl + choices: + - openssl + - cryptography + version_added: 3.1.0 requirements: - - openssl in PATH + - openssl in PATH (when I(ssl_backend=openssl)) - keytool in PATH + - cryptography >= 3.0 (when I(ssl_backend=cryptography)) author: - Guillaume Grossetie (@Mogztter) - quidame (@quidame) @@ -164,55 +174,283 @@ import os import re import tempfile -from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import PY2 +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text + +try: + from cryptography.hazmat.primitives.serialization.pkcs12 import serialize_key_and_certificates + from cryptography.hazmat.primitives.serialization import ( + BestAvailableEncryption, + NoEncryption, + load_pem_private_key, + load_der_private_key, + ) + from cryptography.x509 import ( + load_pem_x509_certificate, + load_der_x509_certificate, + ) + from cryptography.hazmat.primitives import hashes + from cryptography.exceptions import UnsupportedAlgorithm + from cryptography.hazmat.backends.openssl import backend + HAS_CRYPTOGRAPHY_PKCS12 = True +except ImportError: + HAS_CRYPTOGRAPHY_PKCS12 = False -def read_certificate_fingerprint(module, openssl_bin, certificate_path): - current_certificate_fingerprint_cmd = [openssl_bin, "x509", "-noout", "-in", certificate_path, "-fingerprint", "-sha256"] - (rc, current_certificate_fingerprint_out, current_certificate_fingerprint_err) = run_commands(module, current_certificate_fingerprint_cmd) - if rc != 0: - return module.fail_json(msg=current_certificate_fingerprint_out, - err=current_certificate_fingerprint_err, - cmd=current_certificate_fingerprint_cmd, - rc=rc) +class JavaKeystore: + def __init__(self, module): + self.module = module - current_certificate_match = re.search(r"=([\w:]+)", current_certificate_fingerprint_out) - if not current_certificate_match: - return module.fail_json(msg="Unable to find the current certificate fingerprint in %s" % current_certificate_fingerprint_out, - cmd=current_certificate_fingerprint_cmd, - rc=rc) + self.keytool_bin = module.get_bin_path('keytool', True) - return current_certificate_match.group(1) - - -def read_stored_certificate_fingerprint(module, keytool_bin, alias, keystore_path, keystore_password): - stored_certificate_fingerprint_cmd = [keytool_bin, "-list", "-alias", alias, "-keystore", keystore_path, "-storepass:env", "STOREPASS", "-v"] - (rc, stored_certificate_fingerprint_out, stored_certificate_fingerprint_err) = run_commands( - module, stored_certificate_fingerprint_cmd, environ_update=dict(STOREPASS=keystore_password)) - if rc != 0: - if "keytool error: java.lang.Exception: Alias <%s> does not exist" % alias in stored_certificate_fingerprint_out: - return "alias mismatch" - if re.match(r'keytool error: java\.io\.IOException: [Kk]eystore( was tampered with, or)? password was incorrect', - stored_certificate_fingerprint_out): - return "password mismatch" - return module.fail_json(msg=stored_certificate_fingerprint_out, - err=stored_certificate_fingerprint_err, - cmd=stored_certificate_fingerprint_cmd, - rc=rc) - - stored_certificate_match = re.search(r"SHA256: ([\w:]+)", stored_certificate_fingerprint_out) - if not stored_certificate_match: - return module.fail_json(msg="Unable to find the stored certificate fingerprint in %s" % stored_certificate_fingerprint_out, - cmd=stored_certificate_fingerprint_cmd, - rc=rc) - - return stored_certificate_match.group(1) - - -def run_commands(module, cmd, data=None, environ_update=None, check_rc=False): - return module.run_command(cmd, check_rc=check_rc, data=data, environ_update=environ_update) + self.certificate = module.params['certificate'] + self.keypass = module.params['private_key_passphrase'] + self.keystore_path = module.params['dest'] + self.name = module.params['name'] + self.password = module.params['password'] + self.private_key = module.params['private_key'] + self.ssl_backend = module.params['ssl_backend'] + + if self.ssl_backend == 'openssl': + self.openssl_bin = module.get_bin_path('openssl', True) + else: + if not HAS_CRYPTOGRAPHY_PKCS12: + self.module.fail_json(msg=missing_required_lib('cryptography >= 3.0')) + + if module.params['certificate_path'] is None: + self.certificate_path = create_file(self.certificate) + self.module.add_cleanup_file(self.certificate_path) + else: + self.certificate_path = module.params['certificate_path'] + + if module.params['private_key_path'] is None: + self.private_key_path = create_file(self.private_key) + self.module.add_cleanup_file(self.private_key_path) + else: + self.private_key_path = module.params['private_key_path'] + + def update_permissions(self): + try: + file_args = self.module.load_file_common_arguments(self.module.params, path=self.keystore_path) + except TypeError: + # The path argument is only supported in Ansible-base 2.10+. Fall back to + # pre-2.10 behavior for older Ansible versions. + self.module.params['path'] = self.keystore_path + file_args = self.module.load_file_common_arguments(self.module.params) + return self.module.set_fs_attributes_if_different(file_args, False) + + def read_certificate_fingerprint(self, cert_format='PEM'): + if self.ssl_backend == 'cryptography': + if cert_format == 'PEM': + cert_loader = load_pem_x509_certificate + else: + cert_loader = load_der_x509_certificate + + try: + with open(self.certificate_path, 'rb') as cert_file: + cert = cert_loader( + cert_file.read(), + backend=backend + ) + except (OSError, ValueError) as e: + self.module.fail_json(msg="Unable to read the provided certificate: %s" % to_native(e)) + + fp = hex_decode(cert.fingerprint(hashes.SHA256())).upper() + fingerprint = ':'.join([fp[i:i + 2] for i in range(0, len(fp), 2)]) + else: + current_certificate_fingerprint_cmd = [ + self.openssl_bin, "x509", "-noout", "-in", self.certificate_path, "-fingerprint", "-sha256" + ] + (rc, current_certificate_fingerprint_out, current_certificate_fingerprint_err) = self.module.run_command( + current_certificate_fingerprint_cmd, + environ_update=None, + check_rc=False + ) + if rc != 0: + return self.module.fail_json( + msg=current_certificate_fingerprint_out, + err=current_certificate_fingerprint_err, + cmd=current_certificate_fingerprint_cmd, + rc=rc + ) + + current_certificate_match = re.search(r"=([\w:]+)", current_certificate_fingerprint_out) + if not current_certificate_match: + return self.module.fail_json( + msg="Unable to find the current certificate fingerprint in %s" % ( + current_certificate_fingerprint_out + ), + cmd=current_certificate_fingerprint_cmd, + rc=rc + ) + + fingerprint = current_certificate_match.group(1) + return fingerprint + + def read_stored_certificate_fingerprint(self): + stored_certificate_fingerprint_cmd = [ + self.keytool_bin, "-list", "-alias", self.name, "-keystore", + self.keystore_path, "-storepass:env", "STOREPASS", "-v" + ] + (rc, stored_certificate_fingerprint_out, stored_certificate_fingerprint_err) = self.module.run_command( + stored_certificate_fingerprint_cmd, environ_update=dict(STOREPASS=self.password), check_rc=False) + if rc != 0: + if "keytool error: java.lang.Exception: Alias <%s> does not exist" % self.name \ + in stored_certificate_fingerprint_out: + return "alias mismatch" + if re.match( + r'keytool error: java\.io\.IOException: ' + + '[Kk]eystore( was tampered with, or)? password was incorrect', + stored_certificate_fingerprint_out + ): + return "password mismatch" + return self.module.fail_json( + msg=stored_certificate_fingerprint_out, + err=stored_certificate_fingerprint_err, + cmd=stored_certificate_fingerprint_cmd, + rc=rc + ) + + stored_certificate_match = re.search(r"SHA256: ([\w:]+)", stored_certificate_fingerprint_out) + if not stored_certificate_match: + return self.module.fail_json( + msg="Unable to find the stored certificate fingerprint in %s" % stored_certificate_fingerprint_out, + cmd=stored_certificate_fingerprint_cmd, + rc=rc + ) + + return stored_certificate_match.group(1) + + def cert_changed(self): + current_certificate_fingerprint = self.read_certificate_fingerprint() + stored_certificate_fingerprint = self.read_stored_certificate_fingerprint() + return current_certificate_fingerprint != stored_certificate_fingerprint + + def cryptography_create_pkcs12_bundle(self, keystore_p12_path, key_format='PEM', cert_format='PEM'): + if key_format == 'PEM': + key_loader = load_pem_private_key + else: + key_loader = load_der_private_key + + if cert_format == 'PEM': + cert_loader = load_pem_x509_certificate + else: + cert_loader = load_der_x509_certificate + + try: + with open(self.private_key_path, 'rb') as key_file: + private_key = key_loader( + key_file.read(), + password=to_bytes(self.keypass), + backend=backend + ) + except TypeError: + # Re-attempt with no password to match existing behavior + try: + with open(self.private_key_path, 'rb') as key_file: + private_key = key_loader( + key_file.read(), + password=None, + backend=backend + ) + except (OSError, TypeError, ValueError, UnsupportedAlgorithm) as e: + self.module.fail_json( + msg="The following error occurred while loading the provided private_key: %s" % to_native(e) + ) + except (OSError, ValueError, UnsupportedAlgorithm) as e: + self.module.fail_json( + msg="The following error occurred while loading the provided private_key: %s" % to_native(e) + ) + try: + with open(self.certificate_path, 'rb') as cert_file: + cert = cert_loader( + cert_file.read(), + backend=backend + ) + except (OSError, ValueError, UnsupportedAlgorithm) as e: + self.module.fail_json( + msg="The following error occurred while loading the provided certificate: %s" % to_native(e) + ) + + if self.password: + encryption = BestAvailableEncryption(to_bytes(self.password)) + else: + encryption = NoEncryption() + + pkcs12_bundle = serialize_key_and_certificates( + name=to_bytes(self.name), + key=private_key, + cert=cert, + cas=None, + encryption_algorithm=encryption + ) + + with open(keystore_p12_path, 'wb') as p12_file: + p12_file.write(pkcs12_bundle) + + def openssl_create_pkcs12_bundle(self, keystore_p12_path): + export_p12_cmd = [self.openssl_bin, "pkcs12", "-export", "-name", self.name, "-in", self.certificate_path, + "-inkey", self.private_key_path, "-out", keystore_p12_path, "-passout", "stdin"] + + # when keypass is provided, add -passin + cmd_stdin = "" + if self.keypass: + export_p12_cmd.append("-passin") + export_p12_cmd.append("stdin") + cmd_stdin = "%s\n" % self.keypass + cmd_stdin += "%s\n%s" % (self.password, self.password) + + (rc, export_p12_out, dummy) = self.module.run_command( + export_p12_cmd, data=cmd_stdin, environ_update=None, check_rc=False + ) + + if rc != 0: + self.module.fail_json(msg=export_p12_out, cmd=export_p12_cmd, rc=rc) + + def create(self): + if self.module.check_mode: + return {'changed': True} + + if os.path.exists(self.keystore_path): + os.remove(self.keystore_path) + + keystore_p12_path = create_path() + self.module.add_cleanup_file(keystore_p12_path) + + if self.ssl_backend == 'cryptography': + self.cryptography_create_pkcs12_bundle(keystore_p12_path) + else: + self.openssl_create_pkcs12_bundle(keystore_p12_path) + + import_keystore_cmd = [self.keytool_bin, "-importkeystore", + "-destkeystore", self.keystore_path, + "-srckeystore", keystore_p12_path, + "-srcstoretype", "pkcs12", + "-alias", self.name, + "-deststorepass:env", "STOREPASS", + "-srcstorepass:env", "STOREPASS", + "-noprompt"] + + (rc, import_keystore_out, dummy) = self.module.run_command( + import_keystore_cmd, data=None, environ_update=dict(STOREPASS=self.password), check_rc=False + ) + if rc != 0: + return self.module.fail_json(msg=import_keystore_out, cmd=import_keystore_cmd, rc=rc) + + self.update_permissions() + return { + 'changed': True, + 'msg': import_keystore_out, + 'cmd': import_keystore_cmd, + 'rc': rc + } + + def exists(self): + return os.path.exists(self.keystore_path) +# Utility functions def create_path(): dummy, tmpfile = tempfile.mkstemp() os.remove(tmpfile) @@ -226,123 +464,11 @@ def create_file(content): return tmpfile -def create_tmp_certificate(module): - return create_file(module.params['certificate']) - - -def create_tmp_private_key(module): - return create_file(module.params['private_key']) - - -def cert_changed(module, openssl_bin, keytool_bin, keystore_path, keystore_pass, alias): - certificate_path = module.params['certificate_path'] - if certificate_path is None: - certificate_path = create_tmp_certificate(module) - try: - current_certificate_fingerprint = read_certificate_fingerprint(module, openssl_bin, certificate_path) - stored_certificate_fingerprint = read_stored_certificate_fingerprint(module, keytool_bin, alias, keystore_path, keystore_pass) - return current_certificate_fingerprint != stored_certificate_fingerprint - finally: - if module.params['certificate_path'] is None: - os.remove(certificate_path) - - -def create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password, keypass): - if module.check_mode: - return module.exit_json(changed=True) - - certificate_path = module.params['certificate_path'] - if certificate_path is None: - certificate_path = create_tmp_certificate(module) - - private_key_path = module.params['private_key_path'] - if private_key_path is None: - private_key_path = create_tmp_private_key(module) - - keystore_p12_path = create_path() - - try: - if os.path.exists(keystore_path): - os.remove(keystore_path) - - export_p12_cmd = [openssl_bin, "pkcs12", "-export", "-name", name, "-in", certificate_path, - "-inkey", private_key_path, "-out", keystore_p12_path, "-passout", "stdin"] - - # when keypass is provided, add -passin - cmd_stdin = "" - if keypass: - export_p12_cmd.append("-passin") - export_p12_cmd.append("stdin") - cmd_stdin = "%s\n" % keypass - cmd_stdin += "%s\n%s" % (password, password) - - (rc, export_p12_out, dummy) = run_commands(module, export_p12_cmd, data=cmd_stdin) - if rc != 0: - return module.fail_json(msg=export_p12_out, - cmd=export_p12_cmd, - rc=rc) - - import_keystore_cmd = [keytool_bin, "-importkeystore", - "-destkeystore", keystore_path, - "-srckeystore", keystore_p12_path, - "-srcstoretype", "pkcs12", - "-alias", name, - "-deststorepass:env", "STOREPASS", - "-srcstorepass:env", "STOREPASS", - "-noprompt"] - - (rc, import_keystore_out, dummy) = run_commands(module, import_keystore_cmd, data=None, - environ_update=dict(STOREPASS=password)) - if rc != 0: - return module.fail_json(msg=import_keystore_out, - cmd=import_keystore_cmd, - rc=rc) - - update_jks_perm(module, keystore_path) - return module.exit_json(changed=True, - msg=import_keystore_out, - cmd=import_keystore_cmd, - rc=rc) - finally: - if module.params['certificate_path'] is None: - os.remove(certificate_path) - if module.params['private_key_path'] is None: - os.remove(private_key_path) - os.remove(keystore_p12_path) - - -def update_jks_perm(module, keystore_path): - try: - file_args = module.load_file_common_arguments(module.params, path=keystore_path) - except TypeError: - # The path argument is only supported in Ansible-base 2.10+. Fall back to - # pre-2.10 behavior for older Ansible versions. - module.params['path'] = keystore_path - file_args = module.load_file_common_arguments(module.params) - module.set_fs_attributes_if_different(file_args, False) - - -def process_jks(module): - name = module.params['name'] - password = module.params['password'] - keypass = module.params['private_key_passphrase'] - keystore_path = module.params['dest'] - force = module.params['force'] - openssl_bin = module.get_bin_path('openssl', True) - keytool_bin = module.get_bin_path('keytool', True) - - if os.path.exists(keystore_path): - if force: - create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password, keypass) - else: - if cert_changed(module, openssl_bin, keytool_bin, keystore_path, password, name): - create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password, keypass) - else: - if not module.check_mode: - update_jks_perm(module, keystore_path) - module.exit_json(changed=False) +def hex_decode(s): + if PY2: + return s.decode('hex') else: - create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password, keypass) + return s.hex() class ArgumentSpec(object): @@ -358,6 +484,7 @@ class ArgumentSpec(object): private_key_path=dict(type='path', no_log=False), private_key_passphrase=dict(type='str', no_log=True), password=dict(type='str', required=True, no_log=True), + ssl_backend=dict(type='str', default='openssl', choices=['openssl', 'cryptography']), force=dict(type='bool', default=False), ) choose_between = ( @@ -379,7 +506,19 @@ def main(): add_file_common_args=spec.add_file_common_args, ) module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') - process_jks(module) + + result = dict() + jks = JavaKeystore(module) + + if jks.exists(): + if module.params['force'] or jks.cert_changed(): + result = jks.create() + else: + result['changed'] = jks.update_permissions() + else: + result = jks.create() + + module.exit_json(**result) if __name__ == '__main__': diff --git a/tests/integration/targets/java_keystore/tasks/main.yml b/tests/integration/targets/java_keystore/tasks/main.yml index 358222aea8..b5f1f01624 100644 --- a/tests/integration/targets/java_keystore/tasks/main.yml +++ b/tests/integration/targets/java_keystore/tasks/main.yml @@ -9,12 +9,22 @@ - name: Include tasks to create ssl materials on the controller include_tasks: prepare.yml +- set_fact: + ssl_backends: ['openssl'] + +- set_fact: + ssl_backends: "{{ ssl_backends + ['cryptography'] }}" + when: cryptography_version.stdout is version('3.0', '>=') + - when: has_java_keytool block: - name: Include tasks to play with 'certificate' and 'private_key' contents include_tasks: tests.yml vars: remote_cert: false + loop: "{{ ssl_backends }}" + loop_control: + loop_var: ssl_backend - name: Include tasks to create ssl materials on the remote host include_tasks: prepare.yml @@ -23,3 +33,6 @@ include_tasks: tests.yml vars: remote_cert: true + loop: "{{ ssl_backends }}" + loop_control: + loop_var: ssl_backend diff --git a/tests/integration/targets/java_keystore/tasks/tests.yml b/tests/integration/targets/java_keystore/tasks/tests.yml index e0de1c6836..b892dd1d29 100644 --- a/tests/integration/targets/java_keystore/tasks/tests.yml +++ b/tests/integration/targets/java_keystore/tasks/tests.yml @@ -23,6 +23,7 @@ private_key_path: "{{ omit if not remote_cert else output_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.key' }}" private_key_passphrase: "{{ item.passphrase | d(omit) }}" password: changeit + ssl_backend: "{{ ssl_backend }}" loop: "{{ java_keystore_certs }}" check_mode: yes register: result_check diff --git a/tests/unit/plugins/modules/system/test_java_keystore.py b/tests/unit/plugins/modules/system/test_java_keystore.py index ec14b3734d..5e99074c95 100644 --- a/tests/unit/plugins/modules/system/test_java_keystore.py +++ b/tests/unit/plugins/modules/system/test_java_keystore.py @@ -14,7 +14,7 @@ from ansible_collections.community.general.tests.unit.plugins.modules.utils impo from ansible_collections.community.general.tests.unit.compat.mock import patch from ansible_collections.community.general.tests.unit.compat.mock import Mock from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.modules.system.java_keystore import create_jks, cert_changed, ArgumentSpec +from ansible_collections.community.general.plugins.modules.system.java_keystore import JavaKeystore, ArgumentSpec class TestCreateJavaKeystore(ModuleTestCase): @@ -28,14 +28,16 @@ class TestCreateJavaKeystore(ModuleTestCase): self.spec = ArgumentSpec() self.mock_create_file = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.create_file') self.mock_create_path = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.create_path') - self.mock_run_commands = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.run_commands') + self.mock_run_command = patch('ansible.module_utils.basic.AnsibleModule.run_command') + self.mock_get_bin_path = patch('ansible.module_utils.basic.AnsibleModule.get_bin_path') self.mock_os_path_exists = patch('os.path.exists', side_effect=lambda path: True if path == '/path/to/keystore.jks' else orig_exists(path)) self.mock_selinux_context = patch('ansible.module_utils.basic.AnsibleModule.selinux_context', side_effect=lambda path: ['unconfined_u', 'object_r', 'user_home_t', 's0']) self.mock_is_special_selinux_path = patch('ansible.module_utils.basic.AnsibleModule.is_special_selinux_path', side_effect=lambda path: (False, None)) - self.run_commands = self.mock_run_commands.start() + self.run_command = self.mock_run_command.start() + self.get_bin_path = self.mock_get_bin_path.start() self.create_file = self.mock_create_file.start() self.create_path = self.mock_create_path.start() self.selinux_context = self.mock_selinux_context.start() @@ -47,7 +49,8 @@ class TestCreateJavaKeystore(ModuleTestCase): super(TestCreateJavaKeystore, self).tearDown() self.mock_create_file.stop() self.mock_create_path.stop() - self.mock_run_commands.stop() + self.mock_run_command.stop() + self.mock_get_bin_path.stop() self.mock_selinux_context.stop() self.mock_is_special_selinux_path.stop() self.mock_os_path_exists.stop() @@ -57,7 +60,38 @@ class TestCreateJavaKeystore(ModuleTestCase): certificate='cert-foo', private_key='private-foo', dest='/path/to/keystore.jks', - name='foo', + name='test', + password='changeit' + )) + + module = AnsibleModule( + argument_spec=self.spec.argument_spec, + supports_check_mode=self.spec.supports_check_mode + ) + + with patch('os.remove', return_value=True): + self.create_path.side_effect = ['/tmp/tmpgrzm2ah7'] + self.create_file.side_effect = ['/tmp/etacifitrec', '/tmp/yek_etavirp', ''] + self.run_command.side_effect = [(0, '', ''), (0, '', '')] + self.get_bin_path.side_effect = ['keytool', 'openssl', ''] + jks = JavaKeystore(module) + assert jks.create() == { + 'changed': True, + 'cmd': ["keytool", "-importkeystore", + "-destkeystore", "/path/to/keystore.jks", + "-srckeystore", "/tmp/tmpgrzm2ah7", "-srcstoretype", "pkcs12", "-alias", "test", + "-deststorepass:env", "STOREPASS", "-srcstorepass:env", "STOREPASS", "-noprompt"], + 'msg': '', + 'rc': 0 + } + + def test_create_jks_keypass_fail_export_pkcs12(self): + set_module_args(dict( + certificate='cert-foo', + private_key='private-foo', + private_key_passphrase='passphrase-foo', + dest='/path/to/keystore.jks', + name='test', password='changeit' )) @@ -67,44 +101,15 @@ class TestCreateJavaKeystore(ModuleTestCase): ) module.exit_json = Mock() - - with patch('os.remove', return_value=True): - self.create_path.side_effect = ['/tmp/tmpgrzm2ah7'] - self.create_file.side_effect = ['/tmp/etacifitrec', '/tmp/yek_etavirp'] - self.run_commands.side_effect = [(0, '', ''), (0, '', '')] - create_jks(module, "test", "openssl", "keytool", "/path/to/keystore.jks", "changeit", "") - module.exit_json.assert_called_once_with( - changed=True, - cmd=["keytool", "-importkeystore", - "-destkeystore", "/path/to/keystore.jks", - "-srckeystore", "/tmp/tmpgrzm2ah7", "-srcstoretype", "pkcs12", "-alias", "test", - "-deststorepass:env", "STOREPASS", "-srcstorepass:env", "STOREPASS", "-noprompt"], - msg='', - rc=0 - ) - - def test_create_jks_keypass_fail_export_pkcs12(self): - set_module_args(dict( - certificate='cert-foo', - private_key='private-foo', - private_key_passphrase='passphrase-foo', - dest='/path/to/keystore.jks', - name='foo', - password='changeit' - )) - - module = AnsibleModule( - argument_spec=self.spec.argument_spec, - supports_check_mode=self.spec.supports_check_mode - ) - module.fail_json = Mock() with patch('os.remove', return_value=True): self.create_path.side_effect = ['/tmp/tmp1cyp12xa'] - self.create_file.side_effect = ['/tmp/tmpvalcrt32', '/tmp/tmpwh4key0c'] - self.run_commands.side_effect = [(1, '', ''), (0, '', '')] - create_jks(module, "test", "openssl", "keytool", "/path/to/keystore.jks", "changeit", "passphrase-foo") + self.create_file.side_effect = ['/tmp/tmpvalcrt32', '/tmp/tmpwh4key0c', ''] + self.run_command.side_effect = [(1, '', ''), (0, '', '')] + self.get_bin_path.side_effect = ['keytool', 'openssl', ''] + jks = JavaKeystore(module) + jks.create() module.fail_json.assert_called_once_with( cmd=["openssl", "pkcs12", "-export", "-name", "test", "-in", "/tmp/tmpvalcrt32", @@ -121,7 +126,7 @@ class TestCreateJavaKeystore(ModuleTestCase): certificate='cert-foo', private_key='private-foo', dest='/path/to/keystore.jks', - name='foo', + name='test', password='changeit' )) @@ -130,13 +135,16 @@ class TestCreateJavaKeystore(ModuleTestCase): supports_check_mode=self.spec.supports_check_mode ) + module.exit_json = Mock() module.fail_json = Mock() with patch('os.remove', return_value=True): self.create_path.side_effect = ['/tmp/tmp1cyp12xa'] - self.create_file.side_effect = ['/tmp/tmpvalcrt32', '/tmp/tmpwh4key0c'] - self.run_commands.side_effect = [(1, '', ''), (0, '', '')] - create_jks(module, "test", "openssl", "keytool", "/path/to/keystore.jks", "changeit", "") + self.create_file.side_effect = ['/tmp/tmpvalcrt32', '/tmp/tmpwh4key0c', ''] + self.run_command.side_effect = [(1, '', ''), (0, '', '')] + self.get_bin_path.side_effect = ['keytool', 'openssl', ''] + jks = JavaKeystore(module) + jks.create() module.fail_json.assert_called_once_with( cmd=["openssl", "pkcs12", "-export", "-name", "test", "-in", "/tmp/tmpvalcrt32", @@ -152,7 +160,7 @@ class TestCreateJavaKeystore(ModuleTestCase): certificate='cert-foo', private_key='private-foo', dest='/path/to/keystore.jks', - name='foo', + name='test', password='changeit' )) @@ -161,13 +169,16 @@ class TestCreateJavaKeystore(ModuleTestCase): supports_check_mode=self.spec.supports_check_mode ) + module.exit_json = Mock() module.fail_json = Mock() with patch('os.remove', return_value=True): self.create_path.side_effect = ['/tmp/tmpgrzm2ah7'] - self.create_file.side_effect = ['/tmp/etacifitrec', '/tmp/yek_etavirp'] - self.run_commands.side_effect = [(0, '', ''), (1, '', '')] - create_jks(module, "test", "openssl", "keytool", "/path/to/keystore.jks", "changeit", "") + self.create_file.side_effect = ['/tmp/etacifitrec', '/tmp/yek_etavirp', ''] + self.run_command.side_effect = [(0, '', ''), (1, '', '')] + self.get_bin_path.side_effect = ['keytool', 'openssl', ''] + jks = JavaKeystore(module) + jks.create() module.fail_json.assert_called_once_with( cmd=["keytool", "-importkeystore", "-destkeystore", "/path/to/keystore.jks", @@ -186,15 +197,18 @@ class TestCertChanged(ModuleTestCase): super(TestCertChanged, self).setUp() self.spec = ArgumentSpec() self.mock_create_file = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.create_file') - self.mock_run_commands = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.run_commands') - self.run_commands = self.mock_run_commands.start() + self.mock_run_command = patch('ansible.module_utils.basic.AnsibleModule.run_command') + self.mock_get_bin_path = patch('ansible.module_utils.basic.AnsibleModule.get_bin_path') + self.run_command = self.mock_run_command.start() self.create_file = self.mock_create_file.start() + self.get_bin_path = self.mock_get_bin_path.start() def tearDown(self): """Teardown.""" super(TestCertChanged, self).tearDown() self.mock_create_file.stop() - self.mock_run_commands.stop() + self.mock_run_command.stop() + self.mock_get_bin_path.stop() def test_cert_unchanged_same_fingerprint(self): set_module_args(dict( @@ -211,9 +225,11 @@ class TestCertChanged(ModuleTestCase): ) with patch('os.remove', return_value=True): - self.create_file.side_effect = ['/tmp/placeholder'] - self.run_commands.side_effect = [(0, 'foo=abcd:1234:efgh', ''), (0, 'SHA256: abcd:1234:efgh', '')] - result = cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo') + self.create_file.side_effect = ['/tmp/placeholder', ''] + self.run_command.side_effect = [(0, 'foo=abcd:1234:efgh', ''), (0, 'SHA256: abcd:1234:efgh', '')] + self.get_bin_path.side_effect = ['keytool', 'openssl', ''] + jks = JavaKeystore(module) + result = jks.cert_changed() self.assertFalse(result, 'Fingerprint is identical') def test_cert_changed_fingerprint_mismatch(self): @@ -231,9 +247,11 @@ class TestCertChanged(ModuleTestCase): ) with patch('os.remove', return_value=True): - self.create_file.side_effect = ['/tmp/placeholder'] - self.run_commands.side_effect = [(0, 'foo=abcd:1234:efgh', ''), (0, 'SHA256: wxyz:9876:stuv', '')] - result = cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo') + self.create_file.side_effect = ['/tmp/placeholder', ''] + self.run_command.side_effect = [(0, 'foo=abcd:1234:efgh', ''), (0, 'SHA256: wxyz:9876:stuv', '')] + self.get_bin_path.side_effect = ['keytool', 'openssl', ''] + jks = JavaKeystore(module) + result = jks.cert_changed() self.assertTrue(result, 'Fingerprint mismatch') def test_cert_changed_fail_alias_does_not_exist(self): @@ -251,10 +269,12 @@ class TestCertChanged(ModuleTestCase): ) with patch('os.remove', return_value=True): - self.create_file.side_effect = ['/tmp/placeholder'] - self.run_commands.side_effect = [(0, 'foo=abcd:1234:efgh', ''), - (1, 'keytool error: java.lang.Exception: Alias does not exist', '')] - result = cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo') + self.create_file.side_effect = ['/tmp/placeholder', ''] + self.run_command.side_effect = [(0, 'foo=abcd:1234:efgh', ''), + (1, 'keytool error: java.lang.Exception: Alias does not exist', '')] + self.get_bin_path.side_effect = ['keytool', 'openssl', ''] + jks = JavaKeystore(module) + result = jks.cert_changed() self.assertTrue(result, 'Alias mismatch detected') def test_cert_changed_password_mismatch(self): @@ -272,10 +292,12 @@ class TestCertChanged(ModuleTestCase): ) with patch('os.remove', return_value=True): - self.create_file.side_effect = ['/tmp/placeholder'] - self.run_commands.side_effect = [(0, 'foo=abcd:1234:efgh', ''), - (1, 'keytool error: java.io.IOException: Keystore password was incorrect', '')] - result = cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo') + self.create_file.side_effect = ['/tmp/placeholder', ''] + self.run_command.side_effect = [(0, 'foo=abcd:1234:efgh', ''), + (1, 'keytool error: java.io.IOException: Keystore password was incorrect', '')] + self.get_bin_path.side_effect = ['keytool', 'openssl', ''] + jks = JavaKeystore(module) + result = jks.cert_changed() self.assertTrue(result, 'Password mismatch detected') def test_cert_changed_fail_read_cert(self): @@ -292,12 +314,15 @@ class TestCertChanged(ModuleTestCase): supports_check_mode=self.spec.supports_check_mode ) + module.exit_json = Mock() module.fail_json = Mock() with patch('os.remove', return_value=True): - self.create_file.side_effect = ['/tmp/tmpdj6bvvme'] - self.run_commands.side_effect = [(1, '', 'Oops'), (0, 'SHA256: wxyz:9876:stuv', '')] - cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo') + self.create_file.side_effect = ['/tmp/tmpdj6bvvme', ''] + self.run_command.side_effect = [(1, '', 'Oops'), (0, 'SHA256: wxyz:9876:stuv', '')] + self.get_bin_path.side_effect = ['keytool', 'openssl', ''] + jks = JavaKeystore(module) + jks.cert_changed() module.fail_json.assert_called_once_with( cmd=["openssl", "x509", "-noout", "-in", "/tmp/tmpdj6bvvme", "-fingerprint", "-sha256"], msg='', @@ -319,12 +344,15 @@ class TestCertChanged(ModuleTestCase): supports_check_mode=self.spec.supports_check_mode ) + module.exit_json = Mock() module.fail_json = Mock(return_value=True) with patch('os.remove', return_value=True): - self.create_file.side_effect = ['/tmp/placeholder'] - self.run_commands.side_effect = [(0, 'foo: wxyz:9876:stuv', ''), (1, '', 'Oops')] - cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo') + self.create_file.side_effect = ['/tmp/placeholder', ''] + self.run_command.side_effect = [(0, 'foo: wxyz:9876:stuv', ''), (1, '', 'Oops')] + self.get_bin_path.side_effect = ['keytool', 'openssl', ''] + jks = JavaKeystore(module) + jks.cert_changed() module.fail_json.assert_called_with( cmd=["keytool", "-list", "-alias", "foo", "-keystore", "/path/to/keystore.jks", "-storepass:env", "STOREPASS", "-v"], msg='', From c8f402806fe1f7d8ee8a0a716c986b1b47860cae Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 16 May 2021 23:24:37 +1200 Subject: [PATCH 0047/2828] Cleanup connections plugins (#2520) * minor refactors * minor refactors in plugins/connection/saltstack.py * minor refactors in plugins/connection/qubes.py * minor refactor in plugins/connection/lxc.py * minor refactors in plugins/connection/chroot.py * minor refactors in plugins/connection/funcd.py * minor refactors in plugins/connection/iocage.py * minor refactors in plugins/connection/jail.py * added changelog fragment --- .../fragments/2520-connection-refactors.yml | 9 ++++++ plugins/connection/chroot.py | 27 ++++++++-------- plugins/connection/funcd.py | 18 ++++++----- plugins/connection/iocage.py | 2 +- plugins/connection/jail.py | 26 +++++++-------- plugins/connection/lxc.py | 10 +++--- plugins/connection/qubes.py | 8 +---- plugins/connection/saltstack.py | 32 ++++++++----------- plugins/connection/zone.py | 27 ++++++++-------- 9 files changed, 79 insertions(+), 80 deletions(-) create mode 100644 changelogs/fragments/2520-connection-refactors.yml diff --git a/changelogs/fragments/2520-connection-refactors.yml b/changelogs/fragments/2520-connection-refactors.yml new file mode 100644 index 0000000000..2e5c8273d7 --- /dev/null +++ b/changelogs/fragments/2520-connection-refactors.yml @@ -0,0 +1,9 @@ +minor_changes: + - chroot connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). + - funcd connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). + - iocage connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). + - jail connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). + - lxc connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). + - qubes connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). + - saltstack connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). + - zone connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). diff --git a/plugins/connection/chroot.py b/plugins/connection/chroot.py index ffaea2b198..a18506cb80 100644 --- a/plugins/connection/chroot.py +++ b/plugins/connection/chroot.py @@ -62,7 +62,7 @@ display = Display() class Connection(ConnectionBase): - ''' Local chroot based connections ''' + """ Local chroot based connections """ transport = 'community.general.chroot' has_pipelining = True @@ -95,7 +95,7 @@ class Connection(ConnectionBase): raise AnsibleError("%s does not look like a chrootable dir (/bin/sh missing)" % self.chroot) def _connect(self): - ''' connect to the chroot ''' + """ connect to the chroot """ if os.path.isabs(self.get_option('chroot_exe')): self.chroot_cmd = self.get_option('chroot_exe') else: @@ -110,17 +110,17 @@ class Connection(ConnectionBase): self._connected = True def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE): - ''' run a command on the chroot. This is only needed for implementing + """ run a command on the chroot. This is only needed for implementing put_file() get_file() so that we don't have to read the whole file into memory. compared to exec_command() it looses some niceties like being able to return the process's exit code immediately. - ''' + """ executable = self.get_option('executable') local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd] - display.vvv("EXEC %s" % (local_cmd), host=self.chroot) + display.vvv("EXEC %s" % local_cmd, host=self.chroot) local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -128,16 +128,17 @@ class Connection(ConnectionBase): return p def exec_command(self, cmd, in_data=None, sudoable=False): - ''' run a command on the chroot ''' + """ run a command on the chroot """ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) p = self._buffered_exec_command(cmd) stdout, stderr = p.communicate(in_data) - return (p.returncode, stdout, stderr) + return p.returncode, stdout, stderr - def _prefix_login_path(self, remote_path): - ''' Make sure that we put files into a standard path + @staticmethod + def _prefix_login_path(remote_path): + """ Make sure that we put files into a standard path If a path is relative, then we need to choose where to put it. ssh chooses $HOME but we aren't guaranteed that a home dir will @@ -145,13 +146,13 @@ class Connection(ConnectionBase): This also happens to be the former default. Can revisit using $HOME instead if it's a problem - ''' + """ if not remote_path.startswith(os.path.sep): remote_path = os.path.join(os.path.sep, remote_path) return os.path.normpath(remote_path) def put_file(self, in_path, out_path): - ''' transfer a file from local to chroot ''' + """ transfer a file from local to chroot """ super(Connection, self).put_file(in_path, out_path) display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.chroot) @@ -177,7 +178,7 @@ class Connection(ConnectionBase): raise AnsibleError("file or module does not exist at: %s" % in_path) def fetch_file(self, in_path, out_path): - ''' fetch a file from chroot to local ''' + """ fetch a file from chroot to local """ super(Connection, self).fetch_file(in_path, out_path) display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.chroot) @@ -201,6 +202,6 @@ class Connection(ConnectionBase): raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) def close(self): - ''' terminate the connection; nothing to do here ''' + """ terminate the connection; nothing to do here """ super(Connection, self).close() self._connected = False diff --git a/plugins/connection/funcd.py b/plugins/connection/funcd.py index 3aed7145cb..109e251146 100644 --- a/plugins/connection/funcd.py +++ b/plugins/connection/funcd.py @@ -44,7 +44,7 @@ display = Display() class Connection(ConnectionBase): - ''' Func-based connections ''' + """ Func-based connections """ has_pipelining = False @@ -53,6 +53,7 @@ class Connection(ConnectionBase): self.host = host # port is unused, this go on func self.port = port + self.client = None def connect(self, port=None): if not HAVE_FUNC: @@ -62,31 +63,32 @@ class Connection(ConnectionBase): return self def exec_command(self, cmd, become_user=None, sudoable=False, executable='/bin/sh', in_data=None): - ''' run a command on the remote minion ''' + """ run a command on the remote minion """ if in_data: raise AnsibleError("Internal Error: this module does not support optimized module pipelining") # totally ignores privlege escalation - display.vvv("EXEC %s" % (cmd), host=self.host) + display.vvv("EXEC %s" % cmd, host=self.host) p = self.client.command.run(cmd)[self.host] - return (p[0], p[1], p[2]) + return p[0], p[1], p[2] - def _normalize_path(self, path, prefix): + @staticmethod + def _normalize_path(path, prefix): if not path.startswith(os.path.sep): path = os.path.join(os.path.sep, path) normpath = os.path.normpath(path) return os.path.join(prefix, normpath[1:]) def put_file(self, in_path, out_path): - ''' transfer a file from local to remote ''' + """ transfer a file from local to remote """ out_path = self._normalize_path(out_path, '/') display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host) self.client.local.copyfile.send(in_path, out_path) def fetch_file(self, in_path, out_path): - ''' fetch a file from remote to local ''' + """ fetch a file from remote to local """ in_path = self._normalize_path(in_path, '/') display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host) @@ -99,5 +101,5 @@ class Connection(ConnectionBase): shutil.rmtree(tmpdir) def close(self): - ''' terminate the connection; nothing to do here ''' + """ terminate the connection; nothing to do here """ pass diff --git a/plugins/connection/iocage.py b/plugins/connection/iocage.py index 435c789fd2..beb440eae3 100644 --- a/plugins/connection/iocage.py +++ b/plugins/connection/iocage.py @@ -40,7 +40,7 @@ display = Display() class Connection(Jail): - ''' Local iocage based connections ''' + """ Local iocage based connections """ transport = 'community.general.iocage' diff --git a/plugins/connection/jail.py b/plugins/connection/jail.py index 5252e3c4eb..f5d787b62f 100644 --- a/plugins/connection/jail.py +++ b/plugins/connection/jail.py @@ -35,7 +35,6 @@ import os import os.path import subprocess import traceback -import ansible.constants as C from ansible.errors import AnsibleError from ansible.module_utils.six.moves import shlex_quote @@ -47,7 +46,7 @@ display = Display() class Connection(ConnectionBase): - ''' Local BSD Jail based connections ''' + """ Local BSD Jail based connections """ modified_jailname_key = 'conn_jail_name' @@ -90,20 +89,20 @@ class Connection(ConnectionBase): return to_text(stdout, errors='surrogate_or_strict').split() def _connect(self): - ''' connect to the jail; nothing to do here ''' + """ connect to the jail; nothing to do here """ super(Connection, self)._connect() if not self._connected: display.vvv(u"ESTABLISH JAIL CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self.jail) self._connected = True def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE): - ''' run a command on the jail. This is only needed for implementing + """ run a command on the jail. This is only needed for implementing put_file() get_file() so that we don't have to read the whole file into memory. compared to exec_command() it looses some niceties like being able to return the process's exit code immediately. - ''' + """ local_cmd = [self.jexec_cmd] set_env = '' @@ -123,16 +122,17 @@ class Connection(ConnectionBase): return p def exec_command(self, cmd, in_data=None, sudoable=False): - ''' run a command on the jail ''' + """ run a command on the jail """ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) p = self._buffered_exec_command(cmd) stdout, stderr = p.communicate(in_data) - return (p.returncode, stdout, stderr) + return p.returncode, stdout, stderr - def _prefix_login_path(self, remote_path): - ''' Make sure that we put files into a standard path + @staticmethod + def _prefix_login_path(remote_path): + """ Make sure that we put files into a standard path If a path is relative, then we need to choose where to put it. ssh chooses $HOME but we aren't guaranteed that a home dir will @@ -140,13 +140,13 @@ class Connection(ConnectionBase): This also happens to be the former default. Can revisit using $HOME instead if it's a problem - ''' + """ if not remote_path.startswith(os.path.sep): remote_path = os.path.join(os.path.sep, remote_path) return os.path.normpath(remote_path) def put_file(self, in_path, out_path): - ''' transfer a file from local to jail ''' + """ transfer a file from local to jail """ super(Connection, self).put_file(in_path, out_path) display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.jail) @@ -172,7 +172,7 @@ class Connection(ConnectionBase): raise AnsibleError("file or module does not exist at: %s" % in_path) def fetch_file(self, in_path, out_path): - ''' fetch a file from jail to local ''' + """ fetch a file from jail to local """ super(Connection, self).fetch_file(in_path, out_path) display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.jail) @@ -196,6 +196,6 @@ class Connection(ConnectionBase): raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, to_native(stdout), to_native(stderr))) def close(self): - ''' terminate the connection; nothing to do here ''' + """ terminate the connection; nothing to do here """ super(Connection, self).close() self._connected = False diff --git a/plugins/connection/lxc.py b/plugins/connection/lxc.py index 8de1acc35d..6512a87c6d 100644 --- a/plugins/connection/lxc.py +++ b/plugins/connection/lxc.py @@ -42,14 +42,13 @@ try: except ImportError: pass -from ansible import constants as C from ansible import errors from ansible.module_utils._text import to_bytes, to_native from ansible.plugins.connection import ConnectionBase class Connection(ConnectionBase): - ''' Local lxc based connections ''' + """ Local lxc based connections """ transport = 'community.general.lxc' has_pipelining = True @@ -62,7 +61,7 @@ class Connection(ConnectionBase): self.container = None def _connect(self): - ''' connect to the lxc; nothing to do here ''' + """ connect to the lxc; nothing to do here """ super(Connection, self)._connect() if not HAS_LIBLXC: @@ -77,7 +76,8 @@ class Connection(ConnectionBase): if self.container.state == "STOPPED": raise errors.AnsibleError("%s is not running" % self.container_name) - def _communicate(self, pid, in_data, stdin, stdout, stderr): + @staticmethod + def _communicate(pid, in_data, stdin, stdout, stderr): buf = {stdout: [], stderr: []} read_fds = [stdout, stderr] if in_data: @@ -111,7 +111,7 @@ class Connection(ConnectionBase): return fd def exec_command(self, cmd, in_data=None, sudoable=False): - ''' run a command on the chroot ''' + """ run a command on the chroot """ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) # python2-lxc needs bytes. python3-lxc needs text. diff --git a/plugins/connection/qubes.py b/plugins/connection/qubes.py index aa0075b674..d3f934b601 100644 --- a/plugins/connection/qubes.py +++ b/plugins/connection/qubes.py @@ -37,15 +37,9 @@ DOCUMENTATION = ''' # - name: hosts ''' -import shlex -import shutil - -import os -import base64 import subprocess -import ansible.constants as C -from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils._text import to_bytes from ansible.plugins.connection import ConnectionBase, ensure_connect from ansible.errors import AnsibleConnectionFailure from ansible.utils.display import Display diff --git a/plugins/connection/saltstack.py b/plugins/connection/saltstack.py index 6be7a79949..f8e3680aea 100644 --- a/plugins/connection/saltstack.py +++ b/plugins/connection/saltstack.py @@ -16,14 +16,11 @@ DOCUMENTATION = ''' - This allows you to use existing Saltstack infrastructure to connect to targets. ''' -import re import os -import pty -import codecs -import subprocess +import base64 -from ansible.module_utils._text import to_bytes, to_text -from ansible.module_utils.six.moves import cPickle +from ansible import errors +from ansible.plugins.connection import ConnectionBase HAVE_SALTSTACK = False try: @@ -32,13 +29,9 @@ try: except ImportError: pass -import os -from ansible import errors -from ansible.plugins.connection import ConnectionBase - class Connection(ConnectionBase): - ''' Salt-based connections ''' + """ Salt-based connections """ has_pipelining = False # while the name of the product is salt, naming that module salt cause @@ -58,29 +51,30 @@ class Connection(ConnectionBase): return self def exec_command(self, cmd, sudoable=False, in_data=None): - ''' run a command on the remote minion ''' + """ run a command on the remote minion """ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) if in_data: raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") - self._display.vvv("EXEC %s" % (cmd), host=self.host) + self._display.vvv("EXEC %s" % cmd, host=self.host) # need to add 'true;' to work around https://github.com/saltstack/salt/issues/28077 res = self.client.cmd(self.host, 'cmd.exec_code_all', ['bash', 'true;' + cmd]) if self.host not in res: raise errors.AnsibleError("Minion %s didn't answer, check if salt-minion is running and the name is correct" % self.host) p = res[self.host] - return (p['retcode'], p['stdout'], p['stderr']) + return p['retcode'], p['stdout'], p['stderr'] - def _normalize_path(self, path, prefix): + @staticmethod + def _normalize_path(path, prefix): if not path.startswith(os.path.sep): path = os.path.join(os.path.sep, path) normpath = os.path.normpath(path) return os.path.join(prefix, normpath[1:]) def put_file(self, in_path, out_path): - ''' transfer a file from local to remote ''' + """ transfer a file from local to remote """ super(Connection, self).put_file(in_path, out_path) @@ -88,11 +82,11 @@ class Connection(ConnectionBase): self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host) with open(in_path, 'rb') as in_fh: content = in_fh.read() - self.client.cmd(self.host, 'hashutil.base64_decodefile', [codecs.encode(content, 'base64'), out_path]) + self.client.cmd(self.host, 'hashutil.base64_decodefile', [base64.b64encode(content), out_path]) # TODO test it def fetch_file(self, in_path, out_path): - ''' fetch a file from remote to local ''' + """ fetch a file from remote to local """ super(Connection, self).fetch_file(in_path, out_path) @@ -102,5 +96,5 @@ class Connection(ConnectionBase): open(out_path, 'wb').write(content) def close(self): - ''' terminate the connection; nothing to do here ''' + """ terminate the connection; nothing to do here """ pass diff --git a/plugins/connection/zone.py b/plugins/connection/zone.py index 7a7a36331d..b101ec5cf3 100644 --- a/plugins/connection/zone.py +++ b/plugins/connection/zone.py @@ -31,7 +31,6 @@ import os.path import subprocess import traceback -from ansible import constants as C from ansible.errors import AnsibleError from ansible.module_utils.six.moves import shlex_quote from ansible.module_utils._text import to_bytes @@ -42,7 +41,7 @@ display = Display() class Connection(ConnectionBase): - ''' Local zone based connections ''' + """ Local zone based connections """ transport = 'community.general.zone' has_pipelining = True @@ -75,9 +74,9 @@ class Connection(ConnectionBase): stdout=subprocess.PIPE, stderr=subprocess.PIPE) zones = [] - for l in process.stdout.readlines(): + for line in process.stdout.readlines(): # 1:work:running:/zones/work:3126dc59-9a07-4829-cde9-a816e4c5040e:native:shared - s = l.split(':') + s = line.split(':') if s[1] != 'global': zones.append(s[1]) @@ -95,20 +94,20 @@ class Connection(ConnectionBase): return path + '/root' def _connect(self): - ''' connect to the zone; nothing to do here ''' + """ connect to the zone; nothing to do here """ super(Connection, self)._connect() if not self._connected: display.vvv("THIS IS A LOCAL ZONE DIR", host=self.zone) self._connected = True def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE): - ''' run a command on the zone. This is only needed for implementing + """ run a command on the zone. This is only needed for implementing put_file() get_file() so that we don't have to read the whole file into memory. compared to exec_command() it looses some niceties like being able to return the process's exit code immediately. - ''' + """ # NOTE: zlogin invokes a shell (just like ssh does) so we do not pass # this through /bin/sh -c here. Instead it goes through the shell # that zlogin selects. @@ -122,16 +121,16 @@ class Connection(ConnectionBase): return p def exec_command(self, cmd, in_data=None, sudoable=False): - ''' run a command on the zone ''' + """ run a command on the zone """ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) p = self._buffered_exec_command(cmd) stdout, stderr = p.communicate(in_data) - return (p.returncode, stdout, stderr) + return p.returncode, stdout, stderr def _prefix_login_path(self, remote_path): - ''' Make sure that we put files into a standard path + """ Make sure that we put files into a standard path If a path is relative, then we need to choose where to put it. ssh chooses $HOME but we aren't guaranteed that a home dir will @@ -139,13 +138,13 @@ class Connection(ConnectionBase): This also happens to be the former default. Can revisit using $HOME instead if it's a problem - ''' + """ if not remote_path.startswith(os.path.sep): remote_path = os.path.join(os.path.sep, remote_path) return os.path.normpath(remote_path) def put_file(self, in_path, out_path): - ''' transfer a file from local to zone ''' + """ transfer a file from local to zone """ super(Connection, self).put_file(in_path, out_path) display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.zone) @@ -171,7 +170,7 @@ class Connection(ConnectionBase): raise AnsibleError("file or module does not exist at: %s" % in_path) def fetch_file(self, in_path, out_path): - ''' fetch a file from zone to local ''' + """ fetch a file from zone to local """ super(Connection, self).fetch_file(in_path, out_path) display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.zone) @@ -195,6 +194,6 @@ class Connection(ConnectionBase): raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) def close(self): - ''' terminate the connection; nothing to do here ''' + """ terminate the connection; nothing to do here """ super(Connection, self).close() self._connected = False From 5b7751530819ce066f8edf899ef3ecd2d7791e6d Mon Sep 17 00:00:00 2001 From: iridian <442359+iridian-ks@users.noreply.github.com> Date: Sun, 16 May 2021 22:32:51 -0700 Subject: [PATCH 0048/2828] 1085 updating the hcl whitelist to include all supported options (#2495) * 1085 updating the hcl whitelist to include all supported options * Update changelogs/fragments/1085-consul-acl-hcl-whitelist-update.yml Co-authored-by: Felix Fontein Co-authored-by: Dillon Gilmore Co-authored-by: Felix Fontein --- .../1085-consul-acl-hcl-whitelist-update.yml | 2 ++ .../modules/clustering/consul/consul_acl.py | 19 ++++++++++++++++++- 2 files changed, 20 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/1085-consul-acl-hcl-whitelist-update.yml diff --git a/changelogs/fragments/1085-consul-acl-hcl-whitelist-update.yml b/changelogs/fragments/1085-consul-acl-hcl-whitelist-update.yml new file mode 100644 index 0000000000..78db43da7d --- /dev/null +++ b/changelogs/fragments/1085-consul-acl-hcl-whitelist-update.yml @@ -0,0 +1,2 @@ +bugfixes: + - consul_acl - update the hcl allowlist to include all supported options (https://github.com/ansible-collections/community.general/pull/2495). diff --git a/plugins/modules/clustering/consul/consul_acl.py b/plugins/modules/clustering/consul/consul_acl.py index cb5395ed31..5a37ca0eb9 100644 --- a/plugins/modules/clustering/consul/consul_acl.py +++ b/plugins/modules/clustering/consul/consul_acl.py @@ -189,7 +189,24 @@ from collections import defaultdict from ansible.module_utils.basic import to_text, AnsibleModule -RULE_SCOPES = ["agent", "event", "key", "keyring", "node", "operator", "query", "service", "session"] +RULE_SCOPES = [ + "agent", + "agent_prefix", + "event", + "event_prefix", + "key", + "key_prefix", + "keyring", + "node", + "node_prefix", + "operator", + "query", + "query_prefix", + "service", + "service_prefix", + "session", + "session_prefix", +] MANAGEMENT_PARAMETER_NAME = "mgmt_token" HOST_PARAMETER_NAME = "host" From ea200c9d8c35ac084a5f4841ff82558572d14ee3 Mon Sep 17 00:00:00 2001 From: sgalea87 <43749726+sgalea87@users.noreply.github.com> Date: Mon, 17 May 2021 07:33:40 +0200 Subject: [PATCH 0049/2828] Update influxdb_user.py Fixed Multiple No Privileges (#2499) * Update influxdb_user.py Fixed Multiple No Privileges * Update influxdb_user.py Fixed line spaces * Update influxdb_user.py Fixed whitespace * Create 2499-influxdb_user-fix-multiple-no-privileges.yml Added changelog --- ...99-influxdb_user-fix-multiple-no-privileges.yml | 2 ++ plugins/modules/database/influxdb/influxdb_user.py | 14 +++++++------- 2 files changed, 9 insertions(+), 7 deletions(-) create mode 100644 changelogs/fragments/2499-influxdb_user-fix-multiple-no-privileges.yml diff --git a/changelogs/fragments/2499-influxdb_user-fix-multiple-no-privileges.yml b/changelogs/fragments/2499-influxdb_user-fix-multiple-no-privileges.yml new file mode 100644 index 0000000000..d4575ea711 --- /dev/null +++ b/changelogs/fragments/2499-influxdb_user-fix-multiple-no-privileges.yml @@ -0,0 +1,2 @@ +bugfixes: + - influxdb_user - fix bug where an influxdb user has no privileges for 2 or more databases (https://github.com/ansible-collections/community.general/pull/2499). diff --git a/plugins/modules/database/influxdb/influxdb_user.py b/plugins/modules/database/influxdb/influxdb_user.py index e17e3753f2..8aec04533b 100644 --- a/plugins/modules/database/influxdb/influxdb_user.py +++ b/plugins/modules/database/influxdb/influxdb_user.py @@ -166,16 +166,16 @@ def set_user_grants(module, client, user_name, grants): try: current_grants = client.get_list_privileges(user_name) + parsed_grants = [] # Fix privileges wording for i, v in enumerate(current_grants): - if v['privilege'] == 'ALL PRIVILEGES': - v['privilege'] = 'ALL' - current_grants[i] = v - elif v['privilege'] == 'NO PRIVILEGES': - del(current_grants[i]) + if v['privilege'] != 'NO PRIVILEGES': + if v['privilege'] == 'ALL PRIVILEGES': + v['privilege'] = 'ALL' + parsed_grants.add(v) # check if the current grants are included in the desired ones - for current_grant in current_grants: + for current_grant in parsed_grants: if current_grant not in grants: if not module.check_mode: client.revoke_privilege(current_grant['privilege'], @@ -185,7 +185,7 @@ def set_user_grants(module, client, user_name, grants): # check if the desired grants are included in the current ones for grant in grants: - if grant not in current_grants: + if grant not in parsed_grants: if not module.check_mode: client.grant_privilege(grant['privilege'], grant['database'], From 448b8cbcda019e1d89eb715dfb6fc2f754440613 Mon Sep 17 00:00:00 2001 From: Dennis Israelsson Date: Mon, 17 May 2021 07:35:15 +0200 Subject: [PATCH 0050/2828] fix error when cache is disabled (#2518) --- changelogs/fragments/2518-nmap-fix-cache-disabled.yml | 2 ++ plugins/inventory/nmap.py | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2518-nmap-fix-cache-disabled.yml diff --git a/changelogs/fragments/2518-nmap-fix-cache-disabled.yml b/changelogs/fragments/2518-nmap-fix-cache-disabled.yml new file mode 100644 index 0000000000..8f4680b6a6 --- /dev/null +++ b/changelogs/fragments/2518-nmap-fix-cache-disabled.yml @@ -0,0 +1,2 @@ +bugfixes: + - nmap inventory plugin - fix local variable error when cache is disabled (https://github.com/ansible-collections/community.general/issues/2512). diff --git a/plugins/inventory/nmap.py b/plugins/inventory/nmap.py index 687317abfa..39a6ff3a67 100644 --- a/plugins/inventory/nmap.py +++ b/plugins/inventory/nmap.py @@ -130,7 +130,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): # This occurs if the cache_key is not in the cache or if the cache_key expired, so the cache needs to be updated cache_needs_update = True - if cache_needs_update: + if not user_cache_setting or cache_needs_update: # setup command cmd = [self._nmap] if not self._options['ports']: @@ -207,6 +207,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): except Exception as e: raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e))) + if cache_needs_update: self._cache[cache_key] = results self._populate(results) From 2cc848fe1a32959d54b23afe2aa153c2fd79b35c Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 17 May 2021 08:11:17 +0200 Subject: [PATCH 0051/2828] Use --assumeyes with explicit yum call. (#2533) --- tests/integration/targets/yum_versionlock/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/targets/yum_versionlock/tasks/main.yml b/tests/integration/targets/yum_versionlock/tasks/main.yml index dda5a11bf0..3ea170b145 100644 --- a/tests/integration/targets/yum_versionlock/tasks/main.yml +++ b/tests/integration/targets/yum_versionlock/tasks/main.yml @@ -24,7 +24,7 @@ register: lock_all_packages - name: Update all packages - command: yum update --setopt=obsoletes=0 + command: yum update --assumeyes --setopt=obsoletes=0 register: update_all_locked_packages changed_when: - '"No packages marked for update" not in update_all_locked_packages.stdout' From da7e4e1dc2306ae047cac87912f5b7c805ee2233 Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Mon, 17 May 2021 12:32:20 +0430 Subject: [PATCH 0052/2828] yum_versionlock: disable fedora34 integration test (#2536) * Disable yum_versionlock integration test on Fedora 34 * Remove --assumeyes and add a comment regarding this * Update update task name --- tests/integration/targets/yum_versionlock/tasks/main.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/integration/targets/yum_versionlock/tasks/main.yml b/tests/integration/targets/yum_versionlock/tasks/main.yml index 3ea170b145..4084bdcb91 100644 --- a/tests/integration/targets/yum_versionlock/tasks/main.yml +++ b/tests/integration/targets/yum_versionlock/tasks/main.yml @@ -23,8 +23,9 @@ state: present register: lock_all_packages - - name: Update all packages - command: yum update --assumeyes --setopt=obsoletes=0 + # This should fail when it needs user interaction and missing -y is on purpose. + - name: Update all packages (not really) + command: yum update --setopt=obsoletes=0 register: update_all_locked_packages changed_when: - '"No packages marked for update" not in update_all_locked_packages.stdout' @@ -59,4 +60,4 @@ state: absent when: yum_versionlock_install is changed when: (ansible_distribution in ['CentOS', 'RedHat'] and ansible_distribution_major_version is version('7', '>=')) or - (ansible_distribution == 'Fedora') + (ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('33', '<=')) From 350380ba8c91030b69ec4fe2b087fb62ee82c389 Mon Sep 17 00:00:00 2001 From: Jan Baier <7996094+baierjan@users.noreply.github.com> Date: Mon, 17 May 2021 13:50:40 +0200 Subject: [PATCH 0053/2828] Add option missing to passwordstore lookup (#2500) Add ability to ignore error on missing pass file to allow processing the output further via another filters (mainly the default filter) without updating the pass file itself. It also contains the option to create the pass file, like the option create=true does. Finally, it also allows to issue a warning only, if the pass file is not found. --- ...asswordstore-add_option_ignore_missing.yml | 3 + plugins/lookup/passwordstore.py | 56 +++++++++++++++++-- .../lookup_passwordstore/tasks/tests.yml | 31 ++++++++++ 3 files changed, 85 insertions(+), 5 deletions(-) create mode 100644 changelogs/fragments/2500-passwordstore-add_option_ignore_missing.yml diff --git a/changelogs/fragments/2500-passwordstore-add_option_ignore_missing.yml b/changelogs/fragments/2500-passwordstore-add_option_ignore_missing.yml new file mode 100644 index 0000000000..6141ac7747 --- /dev/null +++ b/changelogs/fragments/2500-passwordstore-add_option_ignore_missing.yml @@ -0,0 +1,3 @@ +minor_changes: + - passwordstore lookup - add option ``missing`` to choose what to do if the password file is missing + (https://github.com/ansible-collections/community.general/pull/2500). diff --git a/plugins/lookup/passwordstore.py b/plugins/lookup/passwordstore.py index 79c69ed962..976dfb837e 100644 --- a/plugins/lookup/passwordstore.py +++ b/plugins/lookup/passwordstore.py @@ -25,9 +25,9 @@ DOCUMENTATION = ''' env: - name: PASSWORD_STORE_DIR create: - description: Create the password if it does not already exist. + description: Create the password if it does not already exist. Takes precedence over C(missing). type: bool - default: 'no' + default: false overwrite: description: Overwrite the password if it does already exist. type: bool @@ -60,6 +60,22 @@ DOCUMENTATION = ''' description: use alphanumeric characters. type: bool default: 'no' + missing: + description: + - List of preference about what to do if the password file is missing. + - If I(create=true), the value for this option is ignored and assumed to be C(create). + - If set to C(error), the lookup will error out if the passname does not exist. + - If set to C(create), the passname will be created with the provided length I(length) if it does not exist. + - If set to C(empty) or C(warn), will return a C(none) in case the passname does not exist. + When using C(lookup) and not C(query), this will be translated to an empty string. + version_added: 3.1.0 + type: str + default: error + choices: + - error + - warn + - empty + - create ''' EXAMPLES = """ # Debug is used for examples, BAD IDEA to show passwords on screen @@ -67,12 +83,28 @@ EXAMPLES = """ ansible.builtin.debug: msg: "{{ lookup('community.general.passwordstore', 'example/test')}}" +- name: Basic lookup. Warns if example/test does not exist and returns empty string + ansible.builtin.debug: + msg: "{{ lookup('community.general.passwordstore', 'example/test missing=warn')}}" + - name: Create pass with random 16 character password. If password exists just give the password ansible.builtin.debug: var: mypassword vars: mypassword: "{{ lookup('community.general.passwordstore', 'example/test create=true')}}" +- name: Create pass with random 16 character password. If password exists just give the password + ansible.builtin.debug: + var: mypassword + vars: + mypassword: "{{ lookup('community.general.passwordstore', 'example/test missing=create')}}" + +- name: Prints 'abc' if example/test does not exist, just give the password otherwise + ansible.builtin.debug: + var: mypassword + vars: + mypassword: "{{ lookup('community.general.passwordstore', 'example/test missing=empty') | default('abc', true) }}" + - name: Different size password ansible.builtin.debug: msg: "{{ lookup('community.general.passwordstore', 'example/test create=true length=42')}}" @@ -111,10 +143,13 @@ import yaml from distutils import util from ansible.errors import AnsibleError, AnsibleAssertionError from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.utils.display import Display from ansible.utils.encrypt import random_password from ansible.plugins.lookup import LookupBase from ansible import constants as C +display = Display() + # backhacked check_output with input for python 2.7 # http://stackoverflow.com/questions/10103551/passing-data-to-subprocess-check-output @@ -178,12 +213,17 @@ class LookupModule(LookupBase): self.paramvals[key] = util.strtobool(self.paramvals[key]) except (ValueError, AssertionError) as e: raise AnsibleError(e) + if self.paramvals['missing'] not in ['error', 'warn', 'create', 'empty']: + raise AnsibleError("{0} is not a valid option for missing".format(self.paramvals['missing'])) if not isinstance(self.paramvals['length'], int): if self.paramvals['length'].isdigit(): self.paramvals['length'] = int(self.paramvals['length']) else: raise AnsibleError("{0} is not a correct value for length".format(self.paramvals['length'])) + if self.paramvals['create']: + self.paramvals['missing'] = 'create' + # Collect pass environment variables from the plugin's parameters. self.env = os.environ.copy() @@ -224,9 +264,11 @@ class LookupModule(LookupBase): if e.returncode != 0 and 'not in the password store' in e.output: # if pass returns 1 and return string contains 'is not in the password store.' # We need to determine if this is valid or Error. - if not self.paramvals['create']: - raise AnsibleError('passname: {0} not found, use create=True'.format(self.passname)) + if self.paramvals['missing'] == 'error': + raise AnsibleError('passwordstore: passname {0} not found and missing=error is set'.format(self.passname)) else: + if self.paramvals['missing'] == 'warn': + display.warning('passwordstore: passname {0} not found'.format(self.passname)) return False else: raise AnsibleError(e) @@ -294,6 +336,7 @@ class LookupModule(LookupBase): 'userpass': '', 'length': 16, 'backup': False, + 'missing': 'error', } for term in terms: @@ -304,6 +347,9 @@ class LookupModule(LookupBase): else: result.append(self.get_passresult()) else: # password does not exist - if self.paramvals['create']: + if self.paramvals['missing'] == 'create': result.append(self.generate_password()) + else: + result.append(None) + return result diff --git a/tests/integration/targets/lookup_passwordstore/tasks/tests.yml b/tests/integration/targets/lookup_passwordstore/tasks/tests.yml index aba5457c0a..e69ba5e572 100644 --- a/tests/integration/targets/lookup_passwordstore/tasks/tests.yml +++ b/tests/integration/targets/lookup_passwordstore/tasks/tests.yml @@ -61,6 +61,37 @@ that: - readpass == newpass +- name: Create a password using missing=create + set_fact: + newpass: "{{ lookup('community.general.passwordstore', 'test-missing-create missing=create length=8') }}" + +- name: Fetch password from an existing file + set_fact: + readpass: "{{ lookup('community.general.passwordstore', 'test-missing-create') }}" + +- name: Verify password + assert: + that: + - readpass == newpass + +- name: Fetch password from existing file using missing=empty + set_fact: + readpass: "{{ lookup('community.general.passwordstore', 'test-missing-create missing=empty') }}" + +- name: Verify password + assert: + that: + - readpass == newpass + +- name: Fetch password from non-existing file using missing=empty + set_fact: + readpass: "{{ query('community.general.passwordstore', 'test-missing-pass missing=empty') }}" + +- name: Verify password + assert: + that: + - readpass == [ none ] + # As inserting multiline passwords on the commandline would require something # like expect, simply create it by using default gpg on a file with the correct # structure. From 345d5f2dfa8e8ea4d624da18a92ac69d298522b8 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 18 May 2021 00:03:15 +1200 Subject: [PATCH 0054/2828] snap - revamp + implementing enabled/disabled states (#2411) * revamp of snap module * added changelog fragment * fixed description * Update changelogs/fragments/2411-snap-revamp-enabled-disabled-states.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- ...11-snap-revamp-enabled-disabled-states.yml | 2 + plugins/modules/packaging/os/snap.py | 277 ++++++++++-------- 2 files changed, 157 insertions(+), 122 deletions(-) create mode 100644 changelogs/fragments/2411-snap-revamp-enabled-disabled-states.yml diff --git a/changelogs/fragments/2411-snap-revamp-enabled-disabled-states.yml b/changelogs/fragments/2411-snap-revamp-enabled-disabled-states.yml new file mode 100644 index 0000000000..a52b377817 --- /dev/null +++ b/changelogs/fragments/2411-snap-revamp-enabled-disabled-states.yml @@ -0,0 +1,2 @@ +minor_changes: + - snap - added ``enabled`` and ``disabled`` states (https://github.com/ansible-collections/community.general/issues/1990). diff --git a/plugins/modules/packaging/os/snap.py b/plugins/modules/packaging/os/snap.py index 9776b4e50a..fab2558ccf 100644 --- a/plugins/modules/packaging/os/snap.py +++ b/plugins/modules/packaging/os/snap.py @@ -31,7 +31,7 @@ options: - Desired state of the package. required: false default: present - choices: [ absent, present ] + choices: [ absent, present, enabled, disabled ] type: str classic: description: @@ -105,151 +105,184 @@ snaps_removed: returned: When any snaps have been removed ''' -import operator import re -from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.module_helper import ( + CmdStateModuleHelper, ArgFormat, ModuleHelperException +) -def validate_input_snaps(module): - """Ensure that all exist.""" - for snap_name in module.params['name']: - if not snap_exists(module, snap_name): - module.fail_json(msg="No snap matching '%s' available." % snap_name) +__state_map = dict( + present='install', + absent='remove', + info='info', # not public + list='list', # not public + enabled='enable', + disabled='disable', +) -def snap_exists(module, snap_name): - snap_path = module.get_bin_path("snap", True) - cmd_parts = [snap_path, 'info', snap_name] - cmd = ' '.join(cmd_parts) - rc, out, err = module.run_command(cmd, check_rc=False) - - return rc == 0 +def _state_map(value): + return __state_map[value] -def is_snap_installed(module, snap_name): - snap_path = module.get_bin_path("snap", True) - cmd_parts = [snap_path, 'list', snap_name] - cmd = ' '.join(cmd_parts) - rc, out, err = module.run_command(cmd, check_rc=False) +class Snap(CmdStateModuleHelper): + __disable_re = re.compile(r'(?:\S+\s+){5}(?P\S+)') + module = dict( + argument_spec={ + 'name': dict(type='list', elements='str', required=True), + 'state': dict(type='str', required=False, default='present', + choices=['absent', 'present', 'enabled', 'disabled']), + 'classic': dict(type='bool', required=False, default=False), + 'channel': dict(type='str', required=False, default='stable'), + }, + supports_check_mode=True, + ) + command = "snap" + command_args_formats = dict( + actionable_snaps=dict(fmt=lambda v: v), + state=dict(fmt=_state_map), + classic=dict(fmt="--classic", style=ArgFormat.BOOLEAN), + channel=dict(fmt=lambda v: [] if v == 'stable' else ['--channel', '{0}']), + ) + check_rc = False - return rc == 0 + @staticmethod + def _first_non_zero(a): + for elem in a: + if elem != 0: + return elem + return 0 -def get_snap_for_action(module): - """Construct a list of snaps to use for current action.""" - snaps = module.params['name'] + def _run_multiple_commands(self, commands): + outputs = [(c,) + self.run_command(params=c) for c in commands] + results = ([], [], [], []) + for output in outputs: + for i in range(4): + results[i].append(output[i]) - is_present_state = module.params['state'] == 'present' - negation_predicate = operator.not_ if is_present_state else bool + return [ + '; '.join(results[0]), + self._first_non_zero(results[1]), + '\n'.join(results[2]), + '\n'.join(results[3]), + ] - def predicate(s): - return negation_predicate(is_snap_installed(module, s)) + def snap_exists(self, snap_name): + return 0 == self.run_command(params=[{'state': 'info'}, {'name': [snap_name]}])[0] - return [s for s in snaps if predicate(s)] + def is_snap_installed(self, snap_name): + return 0 == self.run_command(params=[{'state': 'list'}, {'name': [snap_name]}])[0] + def is_snap_enabled(self, snap_name): + rc, out, err = self.run_command(params=[{'state': 'list'}, {'name': [snap_name]}]) + if rc != 0: + return None + result = out.splitlines()[1] + match = self.__disable_re.match(result) + if not match: + raise ModuleHelperException(msg="Unable to parse 'snap list {0}' output:\n{1}".format(snap_name, out)) + notes = match.group('notes') + return "disabled" not in notes.split(',') -def get_base_cmd_parts(module): - action_map = { - 'present': 'install', - 'absent': 'remove', - } + def validate_input_snaps(self): + """Ensure that all exist.""" + for snap_name in self.vars.name: + if not self.snap_exists(snap_name): + raise ModuleHelperException(msg="No snap matching '%s' available." % snap_name) - state = module.params['state'] + def state_present(self): + self.validate_input_snaps() # if snap doesnt exist, it will explode when trying to install + self.vars.meta('classic').set(output=True) + self.vars.meta('channel').set(output=True) + actionable_snaps = [s for s in self.vars.name if self.is_snap_installed(s)] + if not actionable_snaps: + return + self.changed = True + self.vars.snaps_installed = actionable_snaps + if self.module.check_mode: + return + params = ['classic', 'channel', 'state'] # get base cmd parts + has_one_pkg_params = bool(self.vars.classic) or self.vars.channel != 'stable' + has_multiple_snaps = len(actionable_snaps) > 1 + if has_one_pkg_params and has_multiple_snaps: + commands = [params + [s] for s in actionable_snaps] + else: + commands = [params + actionable_snaps] + self.vars.cmd, rc, out, err = self._run_multiple_commands(commands) + if rc == 0: + return - classic = ['--classic'] if module.params['classic'] else [] - channel = ['--channel', module.params['channel']] if module.params['channel'] and module.params['channel'] != 'stable' else [] + classic_snap_pattern = re.compile(r'^error: This revision of snap "(?P\w+)"' + r' was published using classic confinement') + match = classic_snap_pattern.match(err) + if match: + err_pkg = match.group('package_name') + msg = "Couldn't install {name} because it requires classic confinement".format(name=err_pkg) + else: + msg = "Ooops! Snap installation failed while executing '{cmd}', please examine logs and " \ + "error output for more details.".format(cmd=self.vars.cmd) + raise ModuleHelperException(msg=msg) - snap_path = module.get_bin_path("snap", True) - snap_action = action_map[state] + def state_absent(self): + self.validate_input_snaps() # if snap doesnt exist, it will be absent by definition + actionable_snaps = [s for s in self.vars.name if not self.is_snap_installed(s)] + if not actionable_snaps: + return + self.changed = True + self.vars.snaps_removed = actionable_snaps + if self.module.check_mode: + return + params = ['classic', 'channel', 'state'] # get base cmd parts + commands = [params + actionable_snaps] + self.vars.cmd, rc, out, err = self._run_multiple_commands(commands) + if rc == 0: + return + msg = "Ooops! Snap removal failed while executing '{cmd}', please examine logs and " \ + "error output for more details.".format(cmd=self.vars.cmd) + raise ModuleHelperException(msg=msg) - cmd_parts = [snap_path, snap_action] - if snap_action == 'install': - cmd_parts += classic + channel + def state_enabled(self): + self.validate_input_snaps() + actionable_snaps = [s for s in self.vars.name if self.is_snap_enabled(s) is False] + if not actionable_snaps: + return + self.changed = True + self.vars.snaps_enabled = actionable_snaps + if self.module.check_mode: + return + params = ['classic', 'channel', 'state'] # get base cmd parts + commands = [params + actionable_snaps] + self.vars.cmd, rc, out, err = self._run_multiple_commands(commands) + if rc == 0: + return + msg = "Ooops! Snap enabling failed while executing '{cmd}', please examine logs and " \ + "error output for more details.".format(cmd=self.vars.cmd) + raise ModuleHelperException(msg=msg) - return cmd_parts - - -def get_cmd_parts(module, snap_names): - """Return list of cmds to run in exec format.""" - is_install_mode = module.params['state'] == 'present' - has_multiple_snaps = len(snap_names) > 1 - - cmd_parts = get_base_cmd_parts(module) - has_one_pkg_params = '--classic' in cmd_parts or '--channel' in cmd_parts - - if not (is_install_mode and has_one_pkg_params and has_multiple_snaps): - return [cmd_parts + snap_names] - - return [cmd_parts + [s] for s in snap_names] - - -def run_cmd_for(module, snap_names): - cmds_parts = get_cmd_parts(module, snap_names) - cmd = '; '.join(' '.join(c) for c in cmds_parts) - cmd = 'sh -c "{0}"'.format(cmd) - - # Actually execute the snap command - return (cmd, ) + module.run_command(cmd, check_rc=False) - - -def execute_action(module): - is_install_mode = module.params['state'] == 'present' - exit_kwargs = { - 'classic': module.params['classic'], - 'channel': module.params['channel'], - } if is_install_mode else {} - - actionable_snaps = get_snap_for_action(module) - if not actionable_snaps: - module.exit_json(changed=False, **exit_kwargs) - - changed_def_args = { - 'changed': True, - 'snaps_{result}'. - format(result='installed' if is_install_mode - else 'removed'): actionable_snaps, - } - - if module.check_mode: - module.exit_json(**dict(changed_def_args, **exit_kwargs)) - - cmd, rc, out, err = run_cmd_for(module, actionable_snaps) - cmd_out_args = { - 'cmd': cmd, - 'rc': rc, - 'stdout': out, - 'stderr': err, - } - - if rc == 0: - module.exit_json(**dict(changed_def_args, **dict(cmd_out_args, **exit_kwargs))) - else: - msg = "Ooops! Snap installation failed while executing '{cmd}', please examine logs and error output for more details.".format(cmd=cmd) - if is_install_mode: - m = re.match(r'^error: This revision of snap "(?P\w+)" was published using classic confinement', err) - if m is not None: - err_pkg = m.group('package_name') - msg = "Couldn't install {name} because it requires classic confinement".format(name=err_pkg) - module.fail_json(msg=msg, **dict(cmd_out_args, **exit_kwargs)) + def state_disabled(self): + self.validate_input_snaps() + actionable_snaps = [s for s in self.vars.name if self.is_snap_enabled(s) is True] + if not actionable_snaps: + return + self.changed = True + self.vars.snaps_enabled = actionable_snaps + if self.module.check_mode: + return + params = ['classic', 'channel', 'state'] # get base cmd parts + commands = [params + actionable_snaps] + self.vars.cmd, rc, out, err = self._run_multiple_commands(commands) + if rc == 0: + return + msg = "Ooops! Snap disabling failed while executing '{cmd}', please examine logs and " \ + "error output for more details.".format(cmd=self.vars.cmd) + raise ModuleHelperException(msg=msg) def main(): - module_args = { - 'name': dict(type='list', elements='str', required=True), - 'state': dict(type='str', required=False, default='present', choices=['absent', 'present']), - 'classic': dict(type='bool', required=False, default=False), - 'channel': dict(type='str', required=False, default='stable'), - } - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=True, - ) - - validate_input_snaps(module) - - # Apply changes to the snaps - execute_action(module) + snap = Snap() + snap.run() if __name__ == '__main__': From dc0a56141fd41c94ff7c79dc902f97255da295cb Mon Sep 17 00:00:00 2001 From: Lauri Tirkkonen Date: Mon, 17 May 2021 19:55:00 +0300 Subject: [PATCH 0055/2828] zfs_delegate_admin: drop choices from permissions (#2540) instead of whitelisting some subset of known existing permissions, just allow any string to be used as permissions. this way, any permission supported by the underlying zfs commands can be used, eg. 'bookmark', 'load-key', 'change-key' and all property permissions, which were missing from the choices list. --- changelogs/fragments/2540-zfs-delegate-choices.yml | 2 ++ plugins/modules/storage/zfs/zfs_delegate_admin.py | 8 +++----- 2 files changed, 5 insertions(+), 5 deletions(-) create mode 100644 changelogs/fragments/2540-zfs-delegate-choices.yml diff --git a/changelogs/fragments/2540-zfs-delegate-choices.yml b/changelogs/fragments/2540-zfs-delegate-choices.yml new file mode 100644 index 0000000000..8e0138420c --- /dev/null +++ b/changelogs/fragments/2540-zfs-delegate-choices.yml @@ -0,0 +1,2 @@ +minor_changes: + - zfs_delegate_admin - drop choices from permissions, allowing any permission supported by the underlying zfs commands (https://github.com/ansible-collections/community.general/pull/2540). diff --git a/plugins/modules/storage/zfs/zfs_delegate_admin.py b/plugins/modules/storage/zfs/zfs_delegate_admin.py index 71225fa155..ead4041150 100644 --- a/plugins/modules/storage/zfs/zfs_delegate_admin.py +++ b/plugins/modules/storage/zfs/zfs_delegate_admin.py @@ -51,8 +51,9 @@ options: permissions: description: - The list of permission(s) to delegate (required if C(state) is C(present)). + - Supported permissions depend on the ZFS version in use. See for example + U(https://openzfs.github.io/openzfs-docs/man/8/zfs-allow.8.html) for OpenZFS. type: list - choices: [ allow, clone, create, destroy, diff, hold, mount, promote, readonly, receive, release, rename, rollback, send, share, snapshot, unallow ] elements: str local: description: @@ -248,10 +249,7 @@ def main(): users=dict(type='list', elements='str'), groups=dict(type='list', elements='str'), everyone=dict(type='bool', default=False), - permissions=dict(type='list', elements='str', - choices=['allow', 'clone', 'create', 'destroy', 'diff', 'hold', 'mount', 'promote', - 'readonly', 'receive', 'release', 'rename', 'rollback', 'send', 'share', - 'snapshot', 'unallow']), + permissions=dict(type='list', elements='str'), local=dict(type='bool'), descendents=dict(type='bool'), recursive=dict(type='bool', default=False), From 2b1eff2783b6f6c8b6d4ef0552afc35d5eac9146 Mon Sep 17 00:00:00 2001 From: quidame Date: Mon, 17 May 2021 20:05:24 +0200 Subject: [PATCH 0056/2828] java_keystore: pass in secret to keytool via stdin (#2526) * java_keystore: pass in secret to keytool via stdin * add changelog fragment --- .../2526-java_keystore-password-via-stdin.yml | 4 ++++ plugins/modules/system/java_keystore.py | 10 ++++------ .../unit/plugins/modules/system/test_java_keystore.py | 6 +++--- 3 files changed, 11 insertions(+), 9 deletions(-) create mode 100644 changelogs/fragments/2526-java_keystore-password-via-stdin.yml diff --git a/changelogs/fragments/2526-java_keystore-password-via-stdin.yml b/changelogs/fragments/2526-java_keystore-password-via-stdin.yml new file mode 100644 index 0000000000..1e45e306af --- /dev/null +++ b/changelogs/fragments/2526-java_keystore-password-via-stdin.yml @@ -0,0 +1,4 @@ +--- +minor_changes: + - "java_keystore - replace envvar by stdin to pass secret to ``keytool`` + (https://github.com/ansible-collections/community.general/pull/2526)." diff --git a/plugins/modules/system/java_keystore.py b/plugins/modules/system/java_keystore.py index 78bcfb6af6..8293801f1b 100644 --- a/plugins/modules/system/java_keystore.py +++ b/plugins/modules/system/java_keystore.py @@ -290,11 +290,11 @@ class JavaKeystore: def read_stored_certificate_fingerprint(self): stored_certificate_fingerprint_cmd = [ - self.keytool_bin, "-list", "-alias", self.name, "-keystore", - self.keystore_path, "-storepass:env", "STOREPASS", "-v" + self.keytool_bin, "-list", "-alias", self.name, + "-keystore", self.keystore_path, "-v" ] (rc, stored_certificate_fingerprint_out, stored_certificate_fingerprint_err) = self.module.run_command( - stored_certificate_fingerprint_cmd, environ_update=dict(STOREPASS=self.password), check_rc=False) + stored_certificate_fingerprint_cmd, data=self.password, check_rc=False) if rc != 0: if "keytool error: java.lang.Exception: Alias <%s> does not exist" % self.name \ in stored_certificate_fingerprint_out: @@ -428,12 +428,10 @@ class JavaKeystore: "-srckeystore", keystore_p12_path, "-srcstoretype", "pkcs12", "-alias", self.name, - "-deststorepass:env", "STOREPASS", - "-srcstorepass:env", "STOREPASS", "-noprompt"] (rc, import_keystore_out, dummy) = self.module.run_command( - import_keystore_cmd, data=None, environ_update=dict(STOREPASS=self.password), check_rc=False + import_keystore_cmd, data='%s\n%s\n%s' % (self.password, self.password, self.password), check_rc=False ) if rc != 0: return self.module.fail_json(msg=import_keystore_out, cmd=import_keystore_cmd, rc=rc) diff --git a/tests/unit/plugins/modules/system/test_java_keystore.py b/tests/unit/plugins/modules/system/test_java_keystore.py index 5e99074c95..7d582a3e99 100644 --- a/tests/unit/plugins/modules/system/test_java_keystore.py +++ b/tests/unit/plugins/modules/system/test_java_keystore.py @@ -80,7 +80,7 @@ class TestCreateJavaKeystore(ModuleTestCase): 'cmd': ["keytool", "-importkeystore", "-destkeystore", "/path/to/keystore.jks", "-srckeystore", "/tmp/tmpgrzm2ah7", "-srcstoretype", "pkcs12", "-alias", "test", - "-deststorepass:env", "STOREPASS", "-srcstorepass:env", "STOREPASS", "-noprompt"], + "-noprompt"], 'msg': '', 'rc': 0 } @@ -183,7 +183,7 @@ class TestCreateJavaKeystore(ModuleTestCase): cmd=["keytool", "-importkeystore", "-destkeystore", "/path/to/keystore.jks", "-srckeystore", "/tmp/tmpgrzm2ah7", "-srcstoretype", "pkcs12", "-alias", "test", - "-deststorepass:env", "STOREPASS", "-srcstorepass:env", "STOREPASS", "-noprompt"], + "-noprompt"], msg='', rc=1 ) @@ -354,7 +354,7 @@ class TestCertChanged(ModuleTestCase): jks = JavaKeystore(module) jks.cert_changed() module.fail_json.assert_called_with( - cmd=["keytool", "-list", "-alias", "foo", "-keystore", "/path/to/keystore.jks", "-storepass:env", "STOREPASS", "-v"], + cmd=["keytool", "-list", "-alias", "foo", "-keystore", "/path/to/keystore.jks", "-v"], msg='', err='Oops', rc=1 From 2a376642ddc8be103e57eb688da86bfb71bf790a Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 18 May 2021 06:28:21 +1200 Subject: [PATCH 0057/2828] ModuleHelper - better mechanism for customizing "changed" behaviour (#2514) * better mechanism for customizing "changed" behaviour * dont drink and code: silly mistake from late at night * added changelog fragment --- changelogs/fragments/2514-mh-improved-changed.yml | 2 ++ plugins/module_utils/mh/base.py | 8 +++++++- 2 files changed, 9 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2514-mh-improved-changed.yml diff --git a/changelogs/fragments/2514-mh-improved-changed.yml b/changelogs/fragments/2514-mh-improved-changed.yml new file mode 100644 index 0000000000..b540600130 --- /dev/null +++ b/changelogs/fragments/2514-mh-improved-changed.yml @@ -0,0 +1,2 @@ +minor_changes: + - ModuleHelper module utils - improved mechanism for customizing the calculation of ``changed`` (https://github.com/ansible-collections/community.general/pull/2514). diff --git a/plugins/module_utils/mh/base.py b/plugins/module_utils/mh/base.py index 2a2dd88f7b..e0de7f2fdd 100644 --- a/plugins/module_utils/mh/base.py +++ b/plugins/module_utils/mh/base.py @@ -33,9 +33,15 @@ class ModuleHelperBase(object): def __quit_module__(self): pass + def __changed__(self): + raise NotImplementedError() + @property def changed(self): - return self._changed + try: + return self.__changed__() + except NotImplementedError: + return self._changed @changed.setter def changed(self, value): From b89eb87ad6872dcfed1bd2a7969ba5ce091ddf9e Mon Sep 17 00:00:00 2001 From: Xabier Napal Date: Mon, 17 May 2021 21:00:35 +0200 Subject: [PATCH 0058/2828] influxdb_user: allow creation of first user with auth enabled (#2364) (#2368) * influxdb_user: allow creation of first user with auth enabled (#2364) * handle potential exceptions while parsing influxdb client error * fix changelog Co-authored-by: Felix Fontein * influxdb_user: use generic exceptions to be compatible with python 2.7 Co-authored-by: Felix Fontein --- .../2364-influxdb_user-first_user.yml | 5 ++++ .../database/influxdb/influxdb_user.py | 25 +++++++++++++++++-- 2 files changed, 28 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/2364-influxdb_user-first_user.yml diff --git a/changelogs/fragments/2364-influxdb_user-first_user.yml b/changelogs/fragments/2364-influxdb_user-first_user.yml new file mode 100644 index 0000000000..905688643b --- /dev/null +++ b/changelogs/fragments/2364-influxdb_user-first_user.yml @@ -0,0 +1,5 @@ +bugfixes: + - influxdb_user - allow creation of admin users when InfluxDB authentication + is enabled but no other user exists on the database. In this scenario, + InfluxDB 1.x allows only ``CREATE USER`` queries and rejects any other query + (https://github.com/ansible-collections/community.general/issues/2364). diff --git a/plugins/modules/database/influxdb/influxdb_user.py b/plugins/modules/database/influxdb/influxdb_user.py index 8aec04533b..d9e6b58051 100644 --- a/plugins/modules/database/influxdb/influxdb_user.py +++ b/plugins/modules/database/influxdb/influxdb_user.py @@ -100,6 +100,8 @@ RETURN = r''' #only defaults ''' +import json + from ansible.module_utils.urls import ConnectionError from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native @@ -115,7 +117,7 @@ def find_user(module, client, user_name): if user['user'] == user_name: user_result = user break - except (ConnectionError, influx.exceptions.InfluxDBClientError) as e: + except ConnectionError as e: module.fail_json(msg=to_native(e)) return user_result @@ -198,6 +200,9 @@ def set_user_grants(module, client, user_name, grants): return changed +INFLUX_AUTH_FIRST_USER_REQUIRED = "error authorizing query: create admin user first or disable authentication" + + def main(): argument_spec = influx.InfluxDb.influxdb_argument_spec() argument_spec.update( @@ -219,7 +224,23 @@ def main(): grants = module.params['grants'] influxdb = influx.InfluxDb(module) client = influxdb.connect_to_influxdb() - user = find_user(module, client, user_name) + + user = None + try: + user = find_user(module, client, user_name) + except influx.exceptions.InfluxDBClientError as e: + if e.code == 403: + reason = None + try: + msg = json.loads(e.content) + reason = msg["error"] + except (KeyError, ValueError): + module.fail_json(msg=to_native(e)) + + if reason != INFLUX_AUTH_FIRST_USER_REQUIRED: + module.fail_json(msg=to_native(e)) + else: + module.fail_json(msg=to_native(e)) changed = False From d24fc92466cc48d8dc436b80a2613635061b8f07 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 18 May 2021 08:44:00 +1200 Subject: [PATCH 0059/2828] ModuleHelper - cmd params now taken from self.vars instead of self.module.params (#2517) * cmd params now taken from self.vars instead of self.module.params * added changelog fragment * Update changelogs/fragments/2517-cmd-params-from-vars.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- changelogs/fragments/2517-cmd-params-from-vars.yml | 2 ++ plugins/module_utils/mh/mixins/cmd.py | 6 +++--- plugins/modules/system/xfconf.py | 2 +- 3 files changed, 6 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/2517-cmd-params-from-vars.yml diff --git a/changelogs/fragments/2517-cmd-params-from-vars.yml b/changelogs/fragments/2517-cmd-params-from-vars.yml new file mode 100644 index 0000000000..95a2f7165d --- /dev/null +++ b/changelogs/fragments/2517-cmd-params-from-vars.yml @@ -0,0 +1,2 @@ +minor_changes: + - cmd (Module Helper) module utils - ``CmdMixin`` now pulls the value for ``run_command()`` params from ``self.vars``, as opposed to previously retrieving those from ``self.module.params`` (https://github.com/ansible-collections/community.general/pull/2517). diff --git a/plugins/module_utils/mh/mixins/cmd.py b/plugins/module_utils/mh/mixins/cmd.py index fc66638f69..eb7cc698cc 100644 --- a/plugins/module_utils/mh/mixins/cmd.py +++ b/plugins/module_utils/mh/mixins/cmd.py @@ -120,7 +120,7 @@ class CmdMixin(object): cmd_args[0] = self.module.get_bin_path(cmd_args[0], required=True) except ValueError: pass - param_list = params if params else self.module.params.keys() + param_list = params if params else self.vars.keys() for param in param_list: if isinstance(param, dict): @@ -131,9 +131,9 @@ class CmdMixin(object): fmt = find_format(_param) value = param[_param] elif isinstance(param, str): - if param in self.module.argument_spec: + if param in self.vars.keys(): fmt = find_format(param) - value = self.module.params[param] + value = self.vars[param] elif param in extra_params: fmt = find_format(param) value = extra_params[param] diff --git a/plugins/modules/system/xfconf.py b/plugins/modules/system/xfconf.py index f2975df050..dc560e7775 100644 --- a/plugins/modules/system/xfconf.py +++ b/plugins/modules/system/xfconf.py @@ -258,7 +258,7 @@ class XFConfProperty(CmdMixin, StateMixin, ModuleHelper): params = ['channel', 'property', {'create': True}] if self.vars.is_array: - params.append({'is_array': True}) + params.append('is_array') params.append({'values_and_types': (self.vars.value, value_type)}) if not self.module.check_mode: From f6db0745fcfb59dc2dff26a1d86fc60b142b65d3 Mon Sep 17 00:00:00 2001 From: quidame Date: Tue, 18 May 2021 06:46:45 +0200 Subject: [PATCH 0060/2828] filesystem: revamp module (#2472) * revamp filesystem module to prepare next steps * pass all commands to module.run_command() as lists * refactor grow() and grow_cmd() to not need to override them so much * refactor all existing get_fs_size() overrides to raise a ValueError if not able to parse command output and return an integer. * override MKFS_FORCE_FLAGS the same way for all fstypes that require it * improve documentation of limitations of the module regarding FreeBSD * fix indentation in DOCUMENTATION * add/update function/method docstrings * fix pylint hints filesystem: refactor integration tests * Include *reiserfs* and *swap* in tests. * Fix reiserfs related code and tests accordingly. * Replace "other fs" (unhandled by this module), from *swap* to *minix* (both mkswap and mkfs.minix being provided by util-linux). * Replace *dd* commands by *filesize* dedicated module. * Use FQCNs and name the tasks. * Update main tests conditionals. * add a changelog fragment * Apply suggestions from code review Co-authored-by: Felix Fontein * declare variables as lists when lists are needed * fix construction without useless conversion Co-authored-by: Felix Fontein --- .../2472_filesystem_module_revamp.yml | 9 + plugins/modules/system/filesystem.py | 296 ++++++++++-------- .../targets/filesystem/defaults/main.yml | 2 + .../filesystem/tasks/create_device.yml | 20 +- .../targets/filesystem/tasks/create_fs.yml | 69 ++-- .../targets/filesystem/tasks/main.yml | 50 +-- .../filesystem/tasks/overwrite_another_fs.yml | 37 ++- .../targets/filesystem/tasks/remove_fs.yml | 60 ++-- .../targets/filesystem/tasks/setup.yml | 179 +++++++---- 9 files changed, 434 insertions(+), 288 deletions(-) create mode 100644 changelogs/fragments/2472_filesystem_module_revamp.yml diff --git a/changelogs/fragments/2472_filesystem_module_revamp.yml b/changelogs/fragments/2472_filesystem_module_revamp.yml new file mode 100644 index 0000000000..691c861078 --- /dev/null +++ b/changelogs/fragments/2472_filesystem_module_revamp.yml @@ -0,0 +1,9 @@ +--- +minor_changes: + - "filesystem - cleanup and revamp module, tests and doc. Pass all commands to + ``module.run_command()`` as lists. Move the device-vs-mountpoint logic to + ``grow()`` method. Give to all ``get_fs_size()`` the same logic and error + handling. (https://github.com/ansible-collections/community.general/pull/2472)." +bugfixes: + - "filesystem - repair ``reiserfs`` fstype support after adding it to integration + tests (https://github.com/ansible-collections/community.general/pull/2472)." diff --git a/plugins/modules/system/filesystem.py b/plugins/modules/system/filesystem.py index 6944178da1..97fe2dc1ab 100644 --- a/plugins/modules/system/filesystem.py +++ b/plugins/modules/system/filesystem.py @@ -7,10 +7,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type + DOCUMENTATION = ''' --- author: -- Alexander Bulimov (@abulimov) + - Alexander Bulimov (@abulimov) module: filesystem short_description: Makes a filesystem description: @@ -18,13 +19,12 @@ description: options: state: description: - - If C(state=present), the filesystem is created if it doesn't already - exist, that is the default behaviour if I(state) is omitted. - - If C(state=absent), filesystem signatures on I(dev) are wiped if it - contains a filesystem (as known by C(blkid)). - - When C(state=absent), all other options but I(dev) are ignored, and the - module doesn't fail if the device I(dev) doesn't actually exist. - - C(state=absent) is not supported and will fail on FreeBSD systems. + - If C(state=present), the filesystem is created if it doesn't already + exist, that is the default behaviour if I(state) is omitted. + - If C(state=absent), filesystem signatures on I(dev) are wiped if it + contains a filesystem (as known by C(blkid)). + - When C(state=absent), all other options but I(dev) are ignored, and the + module doesn't fail if the device I(dev) doesn't actually exist. type: str choices: [ present, absent ] default: present @@ -32,48 +32,56 @@ options: fstype: choices: [ btrfs, ext2, ext3, ext4, ext4dev, f2fs, lvm, ocfs2, reiserfs, xfs, vfat, swap ] description: - - Filesystem type to be created. This option is required with - C(state=present) (or if I(state) is omitted). - - reiserfs support was added in 2.2. - - lvm support was added in 2.5. - - since 2.5, I(dev) can be an image file. - - vfat support was added in 2.5 - - ocfs2 support was added in 2.6 - - f2fs support was added in 2.7 - - swap support was added in 2.8 + - Filesystem type to be created. This option is required with + C(state=present) (or if I(state) is omitted). + - reiserfs support was added in 2.2. + - lvm support was added in 2.5. + - since 2.5, I(dev) can be an image file. + - vfat support was added in 2.5 + - ocfs2 support was added in 2.6 + - f2fs support was added in 2.7 + - swap support was added in 2.8 type: str aliases: [type] dev: description: - - Target path to device or image file. + - Target path to block device or regular file. + - On systems not using block devices but character devices instead (as + FreeBSD), this module only works when applying to regular files, aka + disk images. type: path required: yes aliases: [device] force: description: - - If C(yes), allows to create new filesystem on devices that already has filesystem. + - If C(yes), allows to create new filesystem on devices that already has filesystem. type: bool default: 'no' resizefs: description: - - If C(yes), if the block device and filesystem size differ, grow the filesystem into the space. - - Supported for C(ext2), C(ext3), C(ext4), C(ext4dev), C(f2fs), C(lvm), C(xfs) and C(vfat) filesystems. - Attempts to resize other filesystem types will fail. - - XFS Will only grow if mounted. Currently, the module is based on commands - from C(util-linux) package to perform operations, so resizing of XFS is - not supported on FreeBSD systems. - - vFAT will likely fail if fatresize < 1.04. + - If C(yes), if the block device and filesystem size differ, grow the filesystem into the space. + - Supported for C(ext2), C(ext3), C(ext4), C(ext4dev), C(f2fs), C(lvm), C(xfs) and C(vfat) filesystems. + Attempts to resize other filesystem types will fail. + - XFS Will only grow if mounted. Currently, the module is based on commands + from C(util-linux) package to perform operations, so resizing of XFS is + not supported on FreeBSD systems. + - vFAT will likely fail if fatresize < 1.04. type: bool default: 'no' opts: description: - - List of options to be passed to mkfs command. + - List of options to be passed to mkfs command. type: str requirements: - - Uses tools related to the I(fstype) (C(mkfs)) and C(blkid) command. When I(resizefs) is enabled, C(blockdev) command is required too. + - Uses tools related to the I(fstype) (C(mkfs)) and the C(blkid) command. + - When I(resizefs) is enabled, C(blockdev) command is required too. notes: - - Potential filesystem on I(dev) are checked using C(blkid), in case C(blkid) isn't able to detect an existing filesystem, - this filesystem is overwritten even if I(force) is C(no). + - Potential filesystem on I(dev) are checked using C(blkid). In case C(blkid) + isn't able to detect an existing filesystem, this filesystem is overwritten + even if I(force) is C(no). + - On FreeBSD systems, either C(e2fsprogs) or C(util-linux) packages provide + a C(blkid) command that is compatible with this module, when applied to + regular files. - This module supports I(check_mode). ''' @@ -102,6 +110,7 @@ import re import stat from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native class Device(object): @@ -114,13 +123,15 @@ class Device(object): statinfo = os.stat(self.path) if stat.S_ISBLK(statinfo.st_mode): blockdev_cmd = self.module.get_bin_path("blockdev", required=True) - dummy, devsize_in_bytes, dummy = self.module.run_command([blockdev_cmd, "--getsize64", self.path], check_rc=True) - return int(devsize_in_bytes) + dummy, out, dummy = self.module.run_command([blockdev_cmd, "--getsize64", self.path], check_rc=True) + devsize_in_bytes = int(out) elif os.path.isfile(self.path): - return os.path.getsize(self.path) + devsize_in_bytes = os.path.getsize(self.path) else: self.module.fail_json(changed=False, msg="Target device not supported: %s" % self) + return devsize_in_bytes + def get_mountpoint(self): """Return (first) mountpoint of device. Returns None when not mounted.""" cmd_findmnt = self.module.get_bin_path("findmnt", required=True) @@ -141,9 +152,12 @@ class Device(object): class Filesystem(object): - GROW = None MKFS = None - MKFS_FORCE_FLAGS = '' + MKFS_FORCE_FLAGS = [] + INFO = None + GROW = None + GROW_MAX_SPACE_FLAGS = [] + GROW_MOUNTPOINT_ONLY = False LANG_ENV = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'} @@ -155,7 +169,11 @@ class Filesystem(object): return type(self).__name__ def get_fs_size(self, dev): - """ Return size in bytes of filesystem on device. Returns int """ + """Return size in bytes of filesystem on device (integer). + Should query the info with a per-fstype command that can access the + device whenever it is mounted or not, and parse the command output. + Parser must ensure to return an integer, or raise a ValueError. + """ raise NotImplementedError() def create(self, opts, dev): @@ -163,31 +181,27 @@ class Filesystem(object): return mkfs = self.module.get_bin_path(self.MKFS, required=True) - if opts is None: - cmd = "%s %s '%s'" % (mkfs, self.MKFS_FORCE_FLAGS, dev) - else: - cmd = "%s %s %s '%s'" % (mkfs, self.MKFS_FORCE_FLAGS, opts, dev) + cmd = [mkfs] + self.MKFS_FORCE_FLAGS + opts + [str(dev)] self.module.run_command(cmd, check_rc=True) def wipefs(self, dev): - if platform.system() == 'FreeBSD': - msg = "module param state=absent is currently not supported on this OS (FreeBSD)." - self.module.fail_json(msg=msg) - if self.module.check_mode: return # wipefs comes with util-linux package (as 'blockdev' & 'findmnt' above) - # so it is not supported on FreeBSD. Even the use of dd as a fallback is + # that is ported to FreeBSD. The use of dd as a portable fallback is # not doable here if it needs get_mountpoint() (to prevent corruption of - # a mounted filesystem), since 'findmnt' is not available on FreeBSD. + # a mounted filesystem), since 'findmnt' is not available on FreeBSD, + # even in util-linux port for this OS. wipefs = self.module.get_bin_path('wipefs', required=True) - cmd = [wipefs, "--all", dev.__str__()] + cmd = [wipefs, "--all", str(dev)] self.module.run_command(cmd, check_rc=True) - def grow_cmd(self, dev): - cmd = self.module.get_bin_path(self.GROW, required=True) - return [cmd, str(dev)] + def grow_cmd(self, target): + """Build and return the resizefs commandline as list.""" + cmdline = [self.module.get_bin_path(self.GROW, required=True)] + cmdline += self.GROW_MAX_SPACE_FLAGS + [target] + return cmdline def grow(self, dev): """Get dev and fs size and compare. Returns stdout of used command.""" @@ -196,31 +210,50 @@ class Filesystem(object): try: fssize_in_bytes = self.get_fs_size(dev) except NotImplementedError: - self.module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % self.fstype) + self.module.fail_json(msg="module does not support resizing %s filesystem yet" % self.fstype) + except ValueError as err: + self.module.warn("unable to process %s output '%s'" % (self.INFO, to_native(err))) + self.module.fail_json(msg="unable to process %s output for %s" % (self.INFO, dev)) if not fssize_in_bytes < devsize_in_bytes: self.module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (self.fstype, dev)) elif self.module.check_mode: - self.module.exit_json(changed=True, msg="Resizing filesystem %s on device %s" % (self.fstype, dev)) + self.module.exit_json(changed=True, msg="resizing filesystem %s on device %s" % (self.fstype, dev)) + + if self.GROW_MOUNTPOINT_ONLY: + mountpoint = dev.get_mountpoint() + if not mountpoint: + self.module.fail_json(msg="%s needs to be mounted for %s operations" % (dev, self.fstype)) + grow_target = mountpoint else: - dummy, out, dummy = self.module.run_command(self.grow_cmd(dev), check_rc=True) - return out + grow_target = str(dev) + + dummy, out, dummy = self.module.run_command(self.grow_cmd(grow_target), check_rc=True) + return out class Ext(Filesystem): - MKFS_FORCE_FLAGS = '-F' + MKFS_FORCE_FLAGS = ['-F'] + INFO = 'tune2fs' GROW = 'resize2fs' def get_fs_size(self, dev): - cmd = self.module.get_bin_path('tune2fs', required=True) - # Get Block count and Block size - dummy, size, dummy = self.module.run_command([cmd, '-l', str(dev)], check_rc=True, environ_update=self.LANG_ENV) - for line in size.splitlines(): + """Get Block count and Block size and return their product.""" + cmd = self.module.get_bin_path(self.INFO, required=True) + dummy, out, dummy = self.module.run_command([cmd, '-l', str(dev)], check_rc=True, environ_update=self.LANG_ENV) + + block_count = block_size = None + for line in out.splitlines(): if 'Block count:' in line: block_count = int(line.split(':')[1].strip()) elif 'Block size:' in line: block_size = int(line.split(':')[1].strip()) - return block_size * block_count + if None not in (block_size, block_count): + break + else: + raise ValueError(out) + + return block_size * block_count class Ext2(Ext): @@ -237,52 +270,46 @@ class Ext4(Ext): class XFS(Filesystem): MKFS = 'mkfs.xfs' - MKFS_FORCE_FLAGS = '-f' + MKFS_FORCE_FLAGS = ['-f'] + INFO = 'xfs_info' GROW = 'xfs_growfs' + GROW_MOUNTPOINT_ONLY = True def get_fs_size(self, dev): - cmd = self.module.get_bin_path('xfs_info', required=True) + """Get bsize and blocks and return their product.""" + cmdline = [self.module.get_bin_path(self.INFO, required=True)] + # Depending on the versions, xfs_info is able to get info from the + # device, whenever it is mounted or not, or only if unmounted, or + # only if mounted, or not at all. For any version until now, it is + # able to query info from the mountpoint. So try it first, and use + # device as the last resort: it may or may not work. mountpoint = dev.get_mountpoint() if mountpoint: - rc, out, err = self.module.run_command([cmd, str(mountpoint)], environ_update=self.LANG_ENV) + cmdline += [mountpoint] else: - # Recent GNU/Linux distros support access to unmounted XFS filesystems - rc, out, err = self.module.run_command([cmd, str(dev)], environ_update=self.LANG_ENV) - if rc != 0: - self.module.fail_json(msg="Error while attempting to query size of XFS filesystem: %s" % err) + cmdline += [str(dev)] + dummy, out, dummy = self.module.run_command(cmdline, check_rc=True, environ_update=self.LANG_ENV) + block_size = block_count = None for line in out.splitlines(): col = line.split('=') if col[0].strip() == 'data': - if col[1].strip() != 'bsize': - self.module.fail_json(msg='Unexpected output format from xfs_info (could not locate "bsize")') - if col[2].split()[1] != 'blocks': - self.module.fail_json(msg='Unexpected output format from xfs_info (could not locate "blocks")') - block_size = int(col[2].split()[0]) - block_count = int(col[3].split(',')[0]) - return block_size * block_count + if col[1].strip() == 'bsize': + block_size = int(col[2].split()[0]) + if col[2].split()[1] == 'blocks': + block_count = int(col[3].split(',')[0]) + if None not in (block_size, block_count): + break + else: + raise ValueError(out) - def grow_cmd(self, dev): - # Check first if growing is needed, and then if it is doable or not. - devsize_in_bytes = dev.size() - fssize_in_bytes = self.get_fs_size(dev) - if not fssize_in_bytes < devsize_in_bytes: - self.module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (self.fstype, dev)) - - mountpoint = dev.get_mountpoint() - if not mountpoint: - # xfs filesystem needs to be mounted - self.module.fail_json(msg="%s needs to be mounted for xfs operations" % dev) - - cmd = self.module.get_bin_path(self.GROW, required=True) - - return [cmd, str(mountpoint)] + return block_size * block_count class Reiserfs(Filesystem): MKFS = 'mkfs.reiserfs' - MKFS_FORCE_FLAGS = '-f' + MKFS_FORCE_FLAGS = ['-q'] class Btrfs(Filesystem): @@ -290,7 +317,8 @@ class Btrfs(Filesystem): def __init__(self, module): super(Btrfs, self).__init__(module) - dummy, stdout, stderr = self.module.run_command('%s --version' % self.MKFS, check_rc=True) + mkfs = self.module.get_bin_path(self.MKFS, required=True) + dummy, stdout, stderr = self.module.run_command([mkfs, '--version'], check_rc=True) match = re.search(r" v([0-9.]+)", stdout) if not match: # v0.20-rc1 use stderr @@ -298,29 +326,27 @@ class Btrfs(Filesystem): if match: # v0.20-rc1 doesn't have --force parameter added in following version v3.12 if LooseVersion(match.group(1)) >= LooseVersion('3.12'): - self.MKFS_FORCE_FLAGS = '-f' - else: - self.MKFS_FORCE_FLAGS = '' + self.MKFS_FORCE_FLAGS = ['-f'] else: # assume version is greater or equal to 3.12 - self.MKFS_FORCE_FLAGS = '-f' + self.MKFS_FORCE_FLAGS = ['-f'] self.module.warn('Unable to identify mkfs.btrfs version (%r, %r)' % (stdout, stderr)) class Ocfs2(Filesystem): MKFS = 'mkfs.ocfs2' - MKFS_FORCE_FLAGS = '-Fx' + MKFS_FORCE_FLAGS = ['-Fx'] class F2fs(Filesystem): MKFS = 'mkfs.f2fs' + INFO = 'dump.f2fs' GROW = 'resize.f2fs' - @property - def MKFS_FORCE_FLAGS(self): + def __init__(self, module): + super(F2fs, self).__init__(module) mkfs = self.module.get_bin_path(self.MKFS, required=True) - cmd = "%s %s" % (mkfs, os.devnull) - dummy, out, dummy = self.module.run_command(cmd, check_rc=False, environ_update=self.LANG_ENV) + dummy, out, dummy = self.module.run_command([mkfs, os.devnull], check_rc=False, environ_update=self.LANG_ENV) # Looking for " F2FS-tools: mkfs.f2fs Ver: 1.10.0 (2018-01-30)" # mkfs.f2fs displays version since v1.2.0 match = re.search(r"F2FS-tools: mkfs.f2fs Ver: ([0-9.]+) \(", out) @@ -328,69 +354,73 @@ class F2fs(Filesystem): # Since 1.9.0, mkfs.f2fs check overwrite before make filesystem # before that version -f switch wasn't used if LooseVersion(match.group(1)) >= LooseVersion('1.9.0'): - return '-f' - - return '' + self.MKFS_FORCE_FLAGS = ['-f'] def get_fs_size(self, dev): - cmd = self.module.get_bin_path('dump.f2fs', required=True) - # Get sector count and sector size - dummy, dump, dummy = self.module.run_command([cmd, str(dev)], check_rc=True, environ_update=self.LANG_ENV) - sector_size = None - sector_count = None - for line in dump.splitlines(): + """Get sector size and total FS sectors and return their product.""" + cmd = self.module.get_bin_path(self.INFO, required=True) + dummy, out, dummy = self.module.run_command([cmd, str(dev)], check_rc=True, environ_update=self.LANG_ENV) + sector_size = sector_count = None + for line in out.splitlines(): if 'Info: sector size = ' in line: # expected: 'Info: sector size = 512' sector_size = int(line.split()[4]) elif 'Info: total FS sectors = ' in line: # expected: 'Info: total FS sectors = 102400 (50 MB)' sector_count = int(line.split()[5]) - if None not in (sector_size, sector_count): break else: - self.module.warn("Unable to process dump.f2fs output '%s'", '\n'.join(dump)) - self.module.fail_json(msg="Unable to process dump.f2fs output for %s" % dev) + raise ValueError(out) return sector_size * sector_count class VFAT(Filesystem): - if platform.system() == 'FreeBSD': - MKFS = "newfs_msdos" - else: - MKFS = 'mkfs.vfat' + INFO = 'fatresize' GROW = 'fatresize' + GROW_MAX_SPACE_FLAGS = ['-s', 'max'] + + def __init__(self, module): + super(VFAT, self).__init__(module) + if platform.system() == 'FreeBSD': + self.MKFS = 'newfs_msdos' + else: + self.MKFS = 'mkfs.vfat' def get_fs_size(self, dev): - cmd = self.module.get_bin_path(self.GROW, required=True) - dummy, output, dummy = self.module.run_command([cmd, '--info', str(dev)], check_rc=True, environ_update=self.LANG_ENV) - for line in output.splitlines()[1:]: + """Get and return size of filesystem, in bytes.""" + cmd = self.module.get_bin_path(self.INFO, required=True) + dummy, out, dummy = self.module.run_command([cmd, '--info', str(dev)], check_rc=True, environ_update=self.LANG_ENV) + fssize = None + for line in out.splitlines()[1:]: param, value = line.split(':', 1) if param.strip() == 'Size': - return int(value.strip()) - self.module.fail_json(msg="fatresize failed to provide filesystem size for %s" % dev) + fssize = int(value.strip()) + break + else: + raise ValueError(out) - def grow_cmd(self, dev): - cmd = self.module.get_bin_path(self.GROW) - return [cmd, "-s", str(dev.size()), str(dev.path)] + return fssize class LVM(Filesystem): MKFS = 'pvcreate' - MKFS_FORCE_FLAGS = '-f' + MKFS_FORCE_FLAGS = ['-f'] + INFO = 'pvs' GROW = 'pvresize' def get_fs_size(self, dev): - cmd = self.module.get_bin_path('pvs', required=True) + """Get and return PV size, in bytes.""" + cmd = self.module.get_bin_path(self.INFO, required=True) dummy, size, dummy = self.module.run_command([cmd, '--noheadings', '-o', 'pv_size', '--units', 'b', '--nosuffix', str(dev)], check_rc=True) - block_count = int(size) - return block_count + pv_size = int(size) + return pv_size class Swap(Filesystem): MKFS = 'mkswap' - MKFS_FORCE_FLAGS = '-f' + MKFS_FORCE_FLAGS = ['-f'] FILESYSTEMS = { @@ -439,6 +469,10 @@ def main(): force = module.params['force'] resizefs = module.params['resizefs'] + mkfs_opts = [] + if opts is not None: + mkfs_opts = opts.split() + changed = False if not os.path.exists(dev): @@ -451,7 +485,7 @@ def main(): dev = Device(module, dev) cmd = module.get_bin_path('blkid', required=True) - rc, raw_fs, err = module.run_command("%s -c /dev/null -o value -s TYPE %s" % (cmd, dev)) + rc, raw_fs, err = module.run_command([cmd, '-c', os.devnull, '-o', 'value', '-s', 'TYPE', str(dev)]) # In case blkid isn't able to identify an existing filesystem, device is considered as empty, # then this existing filesystem would be overwritten even if force isn't enabled. fs = raw_fs.strip() @@ -481,7 +515,7 @@ def main(): module.fail_json(msg="'%s' is already used as %s, use force=yes to overwrite" % (dev, fs), rc=rc, err=err) # create fs - filesystem.create(opts, dev) + filesystem.create(mkfs_opts, dev) changed = True elif fs: diff --git a/tests/integration/targets/filesystem/defaults/main.yml b/tests/integration/targets/filesystem/defaults/main.yml index 764b98b6ba..15ef85aa0e 100644 --- a/tests/integration/targets/filesystem/defaults/main.yml +++ b/tests/integration/targets/filesystem/defaults/main.yml @@ -17,7 +17,9 @@ tested_filesystems: ext2: {fssize: 10, grow: True} xfs: {fssize: 20, grow: False} # grow requires a mounted filesystem btrfs: {fssize: 150, grow: False} # grow not implemented + reiserfs: {fssize: 33, grow: False} # grow not implemented vfat: {fssize: 20, grow: True} ocfs2: {fssize: '{{ ocfs2_fssize }}', grow: False} # grow not implemented f2fs: {fssize: '{{ f2fs_fssize|default(60) }}', grow: 'f2fs_version is version("1.10.0", ">=")'} lvm: {fssize: 20, grow: True} + swap: {fssize: 10, grow: False} # grow not implemented diff --git a/tests/integration/targets/filesystem/tasks/create_device.yml b/tests/integration/targets/filesystem/tasks/create_device.yml index e49861e7ca..30fd62e33a 100644 --- a/tests/integration/targets/filesystem/tasks/create_device.yml +++ b/tests/integration/targets/filesystem/tasks/create_device.yml @@ -1,6 +1,9 @@ --- - name: 'Create a "disk" file' - command: 'dd if=/dev/zero of={{ image_file }} bs=1M count={{ fssize }}' + community.general.filesize: + path: '{{ image_file }}' + size: '{{ fssize }}M' + force: true - vars: dev: '{{ image_file }}' @@ -8,26 +11,29 @@ - when: fstype == 'lvm' block: - name: 'Create a loop device for LVM' - command: 'losetup --show -f {{ dev }}' + ansible.builtin.command: + cmd: 'losetup --show -f {{ dev }}' register: loop_device_cmd - - set_fact: + - name: 'Switch to loop device target for further tasks' + ansible.builtin.set_fact: dev: "{{ loop_device_cmd.stdout }}" - include_tasks: '{{ action }}.yml' always: - name: 'Detach loop device used for LVM' - command: 'losetup -d {{ dev }}' - args: + ansible.builtin.command: + cmd: 'losetup -d {{ dev }}' removes: '{{ dev }}' when: fstype == 'lvm' - name: 'Clean correct device for LVM' - set_fact: + ansible.builtin.set_fact: dev: '{{ image_file }}' when: fstype == 'lvm' - - file: + - name: 'Remove disk image file' + ansible.builtin.file: name: '{{ image_file }}' state: absent diff --git a/tests/integration/targets/filesystem/tasks/create_fs.yml b/tests/integration/targets/filesystem/tasks/create_fs.yml index 688a4462db..de1a9f18a0 100644 --- a/tests/integration/targets/filesystem/tasks/create_fs.yml +++ b/tests/integration/targets/filesystem/tasks/create_fs.yml @@ -1,43 +1,58 @@ -- name: filesystem creation - filesystem: +--- +- name: "Create filesystem" + community.general.filesystem: dev: '{{ dev }}' fstype: '{{ fstype }}' register: fs_result -- assert: +- name: "Assert that results are as expected" + ansible.builtin.assert: that: - 'fs_result is changed' - 'fs_result is success' -- command: 'blkid -c /dev/null -o value -s UUID {{ dev }}' +- name: "Get UUID of created filesystem" + ansible.builtin.command: + cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + changed_when: false register: uuid - name: "Check that filesystem isn't created if force isn't used" - filesystem: + community.general.filesystem: dev: '{{ dev }}' fstype: '{{ fstype }}' register: fs2_result -- command: 'blkid -c /dev/null -o value -s UUID {{ dev }}' +- name: "Get UUID of the filesystem" + ansible.builtin.command: + cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + changed_when: false register: uuid2 -- assert: +- name: "Assert that filesystem UUID is not changed" + ansible.builtin.assert: that: - - 'not (fs2_result is changed)' + - 'fs2_result is not changed' - 'fs2_result is success' - 'uuid.stdout == uuid2.stdout' -- name: Check that filesystem is recreated if force is used - filesystem: +- name: "Check that filesystem is recreated if force is used" + community.general.filesystem: dev: '{{ dev }}' fstype: '{{ fstype }}' force: yes register: fs3_result -- command: 'blkid -c /dev/null -o value -s UUID {{ dev }}' +- name: "Get UUID of the new filesystem" + ansible.builtin.command: + cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + changed_when: false register: uuid3 -- assert: +- name: "Assert that filesystem UUID is changed" + # libblkid gets no UUID at all for this fstype on FreeBSD + when: not (ansible_system == 'FreeBSD' and fstype == 'reiserfs') + ansible.builtin.assert: that: - 'fs3_result is changed' - 'fs3_result is success' @@ -46,24 +61,31 @@ - when: 'grow|bool and (fstype != "vfat" or resize_vfat)' block: - - name: increase fake device - shell: 'dd if=/dev/zero bs=1M count=1 >> {{ image_file }}' + - name: "Increase fake device" + community.general.filesize: + path: '{{ image_file }}' + size: '{{ fssize | int + 1 }}M' - - name: Resize loop device for LVM - command: losetup -c {{ dev }} + - name: "Resize loop device for LVM" + ansible.builtin.command: + cmd: 'losetup -c {{ dev }}' when: fstype == 'lvm' - - name: Expand filesystem - filesystem: + - name: "Expand filesystem" + community.general.filesystem: dev: '{{ dev }}' fstype: '{{ fstype }}' resizefs: yes register: fs4_result - - command: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + - name: "Get UUID of the filesystem" + ansible.builtin.command: + cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + changed_when: false register: uuid4 - - assert: + - name: "Assert that filesystem UUID is not changed" + ansible.builtin.assert: that: - 'fs4_result is changed' - 'fs4_result is success' @@ -74,14 +96,15 @@ (fstype == "xfs" and ansible_system == "Linux" and ansible_distribution not in ["CentOS", "Ubuntu"]) block: - - name: Check that resizefs does nothing if device size is not changed - filesystem: + - name: "Check that resizefs does nothing if device size is not changed" + community.general.filesystem: dev: '{{ dev }}' fstype: '{{ fstype }}' resizefs: yes register: fs5_result - - assert: + - name: "Assert that the state did not change" + ansible.builtin.assert: that: - 'fs5_result is not changed' - 'fs5_result is succeeded' diff --git a/tests/integration/targets/filesystem/tasks/main.yml b/tests/integration/targets/filesystem/tasks/main.yml index 44e8c49f61..d836c8a15d 100644 --- a/tests/integration/targets/filesystem/tasks/main.yml +++ b/tests/integration/targets/filesystem/tasks/main.yml @@ -4,9 +4,9 @@ # and should not be used as examples of how to write Ansible roles # #################################################################### -- debug: +- ansible.builtin.debug: msg: '{{ role_name }}' -- debug: +- ansible.builtin.debug: msg: '{{ role_path|basename }}' - import_tasks: setup.yml @@ -27,29 +27,35 @@ grow: '{{ item.0.value.grow }}' action: '{{ item.1 }}' when: - - 'not (item.0.key == "btrfs" and ansible_system == "FreeBSD")' # btrfs not available on FreeBSD - # On Ubuntu trusty, blkid is unable to identify filesystem smaller than 256Mo, see - # https://www.kernel.org/pub/linux/utils/util-linux/v2.21/v2.21-ChangeLog - # https://anonscm.debian.org/cgit/collab-maint/pkg-util-linux.git/commit/?id=04f7020eadf31efc731558df92daa0a1c336c46c - - 'not (item.0.key == "btrfs" and (ansible_distribution == "Ubuntu" and ansible_distribution_release == "trusty"))' - - 'not (item.0.key == "btrfs" and (ansible_facts.os_family == "RedHat" and ansible_facts.distribution_major_version is version("8", ">=")))' - - 'not (item.0.key == "lvm" and ansible_system == "FreeBSD")' # LVM not available on FreeBSD - - 'not (item.0.key == "lvm" and ansible_virtualization_type in ["docker", "container", "containerd"])' # Tests use losetup which can not be used inside unprivileged container - - 'not (item.0.key == "ocfs2" and ansible_os_family != "Debian")' # ocfs2 only available on Debian based distributions - - 'not (item.0.key == "f2fs" and ansible_system == "FreeBSD")' - # f2fs-tools package not available with RHEL/CentOS - - 'not (item.0.key == "f2fs" and ansible_distribution in ["CentOS", "RedHat"])' - # On Ubuntu trusty, blkid (2.20.1) is unable to identify F2FS filesystem. blkid handles F2FS since v2.23, see: - # https://mirrors.edge.kernel.org/pub/linux/utils/util-linux/v2.23/v2.23-ReleaseNotes - - 'not (item.0.key == "f2fs" and ansible_distribution == "Ubuntu" and ansible_distribution_version is version("14.04", "<="))' - - 'not (item.1 == "overwrite_another_fs" and ansible_system == "FreeBSD")' + # FreeBSD limited support + # Not available: btrfs, lvm, f2fs, ocfs2 + # All BSD systems use swap fs, but only Linux needs mkswap + # Supported: ext2/3/4 (e2fsprogs), xfs (xfsprogs), reiserfs (progsreiserfs), vfat + - 'not (ansible_system == "FreeBSD" and item.0.key in ["btrfs", "f2fs", "swap", "lvm", "ocfs2"])' + # Available on FreeBSD but not on testbed (util-linux conflicts with e2fsprogs): wipefs, mkfs.minix + - 'not (ansible_system == "FreeBSD" and item.1 in ["overwrite_another_fs", "remove_fs"])' + + # Other limitations and corner cases + + # f2fs-tools and reiserfs-utils packages not available with RHEL/CentOS on CI + - 'not (ansible_distribution in ["CentOS", "RedHat"] and item.0.key in ["f2fs", "reiserfs"])' + - 'not (ansible_os_family == "RedHat" and ansible_distribution_major_version is version("8", ">=") and + item.0.key == "btrfs")' + # ocfs2 only available on Debian based distributions + - 'not (item.0.key == "ocfs2" and ansible_os_family != "Debian")' + # Tests use losetup which can not be used inside unprivileged container + - 'not (item.0.key == "lvm" and ansible_virtualization_type in ["docker", "container", "containerd"])' - - 'not (item.1 == "remove_fs" and ansible_system == "FreeBSD")' # util-linux not available on FreeBSD # On CentOS 6 shippable containers, wipefs seems unable to remove vfat signatures - - 'not (item.1 == "remove_fs" and item.0.key == "vfat" and ansible_distribution == "CentOS" and - ansible_distribution_version is version("7.0", "<"))' + - 'not (ansible_distribution == "CentOS" and ansible_distribution_version is version("7.0", "<") and + item.1 == "remove_fs" and item.0.key == "vfat")' + # On same systems, mkfs.minix (unhandled by the module) can't find the device/file + - 'not (ansible_distribution == "CentOS" and ansible_distribution_version is version("7.0", "<") and + item.1 == "overwrite_another_fs")' # The xfsprogs package on newer versions of OpenSUSE (15+) require Python 3, we skip this on our Python 2 container # OpenSUSE 42.3 Python2 and the other py3 containers are not affected so we will continue to run that - - 'not (item.0.key == "xfs" and ansible_os_family == "Suse" and ansible_python.version.major == 2 and ansible_distribution_major_version|int != 42)' + - 'not (ansible_os_family == "Suse" and ansible_distribution_major_version|int != 42 and + item.0.key == "xfs" and ansible_python.version.major == 2)' + loop: "{{ query('dict', tested_filesystems)|product(['create_fs', 'overwrite_another_fs', 'remove_fs'])|list }}" diff --git a/tests/integration/targets/filesystem/tasks/overwrite_another_fs.yml b/tests/integration/targets/filesystem/tasks/overwrite_another_fs.yml index 671d9b0bea..4bf92836bb 100644 --- a/tests/integration/targets/filesystem/tasks/overwrite_another_fs.yml +++ b/tests/integration/targets/filesystem/tasks/overwrite_another_fs.yml @@ -1,40 +1,55 @@ --- - name: 'Recreate "disk" file' - command: 'dd if=/dev/zero of={{ image_file }} bs=1M count={{ fssize }}' + community.general.filesize: + path: '{{ image_file }}' + size: '{{ fssize }}M' + force: true -- name: 'Create a swap filesystem' - command: 'mkswap {{ dev }}' +- name: 'Create a minix filesystem' + ansible.builtin.command: + cmd: 'mkfs.minix {{ dev }}' -- command: 'blkid -c /dev/null -o value -s UUID {{ dev }}' +- name: 'Get UUID of the new filesystem' + ansible.builtin.command: + cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + changed_when: false register: uuid - name: "Check that an existing filesystem (not handled by this module) isn't overwritten when force isn't used" - filesystem: + community.general.filesystem: dev: '{{ dev }}' fstype: '{{ fstype }}' register: fs_result ignore_errors: True -- command: 'blkid -c /dev/null -o value -s UUID {{ dev }}' +- name: 'Get UUID of the filesystem' + ansible.builtin.command: + cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + changed_when: false register: uuid2 -- assert: +- name: 'Assert that module failed and filesystem UUID is not changed' + ansible.builtin.assert: that: - 'fs_result is failed' - 'uuid.stdout == uuid2.stdout' - name: "Check that an existing filesystem (not handled by this module) is overwritten when force is used" - filesystem: + community.general.filesystem: dev: '{{ dev }}' fstype: '{{ fstype }}' force: yes register: fs_result2 -- command: 'blkid -c /dev/null -o value -s UUID {{ dev }}' +- name: 'Get UUID of the new filesystem' + ansible.builtin.command: + cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + changed_when: false register: uuid3 -- assert: +- name: 'Assert that module succeeded and filesystem UUID is changed' + ansible.builtin.assert: that: - - 'fs_result2 is successful' + - 'fs_result2 is success' - 'fs_result2 is changed' - 'uuid2.stdout != uuid3.stdout' diff --git a/tests/integration/targets/filesystem/tasks/remove_fs.yml b/tests/integration/targets/filesystem/tasks/remove_fs.yml index 7d1ca2a19c..338d439d60 100644 --- a/tests/integration/targets/filesystem/tasks/remove_fs.yml +++ b/tests/integration/targets/filesystem/tasks/remove_fs.yml @@ -1,98 +1,98 @@ --- # We assume 'create_fs' tests have passed. -- name: filesystem creation - filesystem: +- name: "Create filesystem" + community.general.filesystem: dev: '{{ dev }}' fstype: '{{ fstype }}' -- name: get filesystem UUID with 'blkid' - command: +- name: "Get filesystem UUID with 'blkid'" + ansible.builtin.command: cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' changed_when: false register: blkid_ref -- name: Assert that a filesystem exists on top of the device - assert: +- name: "Assert that a filesystem exists on top of the device" + ansible.builtin.assert: that: - blkid_ref.stdout | length > 0 # Test check_mode first -- name: filesystem removal (check mode) - filesystem: +- name: "Remove filesystem (check mode)" + community.general.filesystem: dev: '{{ dev }}' state: absent register: wipefs check_mode: yes -- name: get filesystem UUID with 'blkid' (should remain the same) - command: +- name: "Get filesystem UUID with 'blkid' (should remain the same)" + ansible.builtin.command: cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' changed_when: false register: blkid -- name: Assert that the state changed but the filesystem still exists - assert: +- name: "Assert that the state changed but the filesystem still exists" + ansible.builtin.assert: that: - wipefs is changed - blkid.stdout == blkid_ref.stdout # Do it -- name: filesystem removal - filesystem: +- name: "Remove filesystem" + community.general.filesystem: dev: '{{ dev }}' state: absent register: wipefs -- name: get filesystem UUID with 'blkid' (should be empty) - command: +- name: "Get filesystem UUID with 'blkid' (should be empty)" + ansible.builtin.command: cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' changed_when: false failed_when: false register: blkid -- name: Assert that the state changed and the device has no filesystem - assert: +- name: "Assert that the state changed and the device has no filesystem" + ansible.builtin.assert: that: - wipefs is changed - blkid.stdout | length == 0 - blkid.rc == 2 # Do it again -- name: filesystem removal (idempotency) - filesystem: +- name: "Remove filesystem (idempotency)" + community.general.filesystem: dev: '{{ dev }}' state: absent register: wipefs -- name: Assert that the state did not change - assert: +- name: "Assert that the state did not change" + ansible.builtin.assert: that: - wipefs is not changed # and again -- name: filesystem removal (idempotency, check mode) - filesystem: +- name: "Remove filesystem (idempotency, check mode)" + community.general.filesystem: dev: '{{ dev }}' state: absent register: wipefs check_mode: yes -- name: Assert that the state did not change - assert: +- name: "Assert that the state did not change" + ansible.builtin.assert: that: - wipefs is not changed # By the way, test removal of a filesystem on unexistent device -- name: filesystem removal (unexistent device) - filesystem: +- name: "Remove filesystem (unexistent device)" + community.general.filesystem: dev: '/dev/unexistent_device' state: absent register: wipefs -- name: Assert that the state did not change - assert: +- name: "Assert that the state did not change" + ansible.builtin.assert: that: - wipefs is not changed diff --git a/tests/integration/targets/filesystem/tasks/setup.yml b/tests/integration/targets/filesystem/tasks/setup.yml index 82fe7c54e6..9ca4b983d0 100644 --- a/tests/integration/targets/filesystem/tasks/setup.yml +++ b/tests/integration/targets/filesystem/tasks/setup.yml @@ -1,6 +1,9 @@ --- -- name: install filesystem tools - package: +# By installing e2fsprogs on FreeBSD, we get a usable blkid command, but this +# package conflicts with util-linux, that provides blkid too, but also wipefs +# (required for filesystem state=absent). +- name: "Install filesystem tools" + ansible.builtin.package: name: '{{ item }}' state: present # xfsprogs on OpenSUSE requires Python 3, skip this for our newer Py2 OpenSUSE builds @@ -9,86 +12,134 @@ - e2fsprogs - xfsprogs -- block: - - name: install btrfs progs - package: - name: btrfs-progs - state: present - when: - - ansible_os_family != 'Suse' - - not (ansible_distribution == 'Ubuntu' and ansible_distribution_version is version('16.04', '<=')) - - ansible_system != "FreeBSD" - - not (ansible_facts.os_family == "RedHat" and ansible_facts.distribution_major_version is version('8', '>=')) +- name: "Install btrfs progs" + ansible.builtin.package: + name: btrfs-progs + state: present + when: + - ansible_os_family != 'Suse' + - not (ansible_distribution == 'Ubuntu' and ansible_distribution_version is version('16.04', '<=')) + - ansible_system != "FreeBSD" + - not (ansible_facts.os_family == "RedHat" and ansible_facts.distribution_major_version is version('8', '>=')) - - name: install btrfs progs (Ubuntu <= 16.04) - package: - name: btrfs-tools - state: present - when: ansible_distribution == 'Ubuntu' and ansible_distribution_version is version('16.04', '<=') +- name: "Install btrfs tools (Ubuntu <= 16.04)" + ansible.builtin.package: + name: btrfs-tools + state: present + when: + - ansible_distribution == 'Ubuntu' + - ansible_distribution_version is version('16.04', '<=') - - name: install btrfs progs (OpenSuse) - package: - name: '{{ item }}' - state: present - when: ansible_os_family == 'Suse' - with_items: - - python{{ ansible_python.version.major }}-xml - - btrfsprogs +- name: "Install btrfs progs (OpenSuse)" + ansible.builtin.package: + name: '{{ item }}' + state: present + when: ansible_os_family == 'Suse' + with_items: + - python{{ ansible_python.version.major }}-xml + - btrfsprogs - - name: install ocfs2 (Debian) - package: - name: ocfs2-tools - state: present - when: ansible_os_family == 'Debian' +- name: "Install reiserfs utils (Fedora)" + ansible.builtin.package: + name: reiserfs-utils + state: present + when: + - ansible_distribution == 'Fedora' - - when: - - ansible_os_family != 'RedHat' or ansible_distribution == 'Fedora' - - ansible_distribution != 'Ubuntu' or ansible_distribution_version is version('16.04', '>=') - - ansible_system != "FreeBSD" - block: - - name: install f2fs - package: - name: f2fs-tools - state: present +- name: "Install reiserfs (OpenSuse)" + ansible.builtin.package: + name: reiserfs + state: present + when: + - ansible_os_family == 'Suse' - - name: fetch f2fs version - command: mkfs.f2fs /dev/null - ignore_errors: yes - register: mkfs_f2fs +- name: "Install reiserfs progs (Debian and more)" + ansible.builtin.package: + name: reiserfsprogs + state: present + when: + - ansible_system == 'Linux' + - ansible_os_family not in ['Suse', 'RedHat'] - - set_fact: - f2fs_version: '{{ mkfs_f2fs.stdout | regex_search("F2FS-tools: mkfs.f2fs Ver:.*") | regex_replace("F2FS-tools: mkfs.f2fs Ver: ([0-9.]+) .*", "\1") }}' +- name: "Install reiserfs progs (FreeBSD)" + ansible.builtin.package: + name: progsreiserfs + state: present + when: + - ansible_system == 'FreeBSD' - - name: install dosfstools and lvm2 (Linux) - package: - name: '{{ item }}' - with_items: - - dosfstools - - lvm2 - when: ansible_system == 'Linux' +- name: "Install ocfs2 (Debian)" + ansible.builtin.package: + name: ocfs2-tools + state: present + when: ansible_os_family == 'Debian' -- block: - - name: install fatresize - package: - name: fatresize - state: present - - command: fatresize --help - register: fatresize - - set_fact: - fatresize_version: '{{ fatresize.stdout_lines[0] | regex_search("[0-9]+\.[0-9]+\.[0-9]+") }}' +- name: "Install f2fs tools and get version" + when: + - ansible_os_family != 'RedHat' or ansible_distribution == 'Fedora' + - ansible_distribution != 'Ubuntu' or ansible_distribution_version is version('16.04', '>=') + - ansible_system != "FreeBSD" + block: + - name: "Install f2fs tools" + ansible.builtin.package: + name: f2fs-tools + state: present + + - name: "Fetch f2fs version" + ansible.builtin.command: + cmd: mkfs.f2fs /dev/null + changed_when: false + ignore_errors: true + register: mkfs_f2fs + + - name: "Record f2fs_version" + ansible.builtin.set_fact: + f2fs_version: '{{ mkfs_f2fs.stdout + | regex_search("F2FS-tools: mkfs.f2fs Ver:.*") + | regex_replace("F2FS-tools: mkfs.f2fs Ver: ([0-9.]+) .*", "\1") }}' + +- name: "Install dosfstools and lvm2 (Linux)" + ansible.builtin.package: + name: '{{ item }}' + with_items: + - dosfstools + - lvm2 + when: ansible_system == 'Linux' + +- name: "Install fatresize and get version" when: - ansible_system == 'Linux' - ansible_os_family != 'Suse' - ansible_os_family != 'RedHat' or (ansible_distribution == 'CentOS' and ansible_distribution_version is version('7.0', '==')) + block: + - name: "Install fatresize" + ansible.builtin.package: + name: fatresize + state: present -- command: mke2fs -V + - name: "Fetch fatresize version" + ansible.builtin.command: + cmd: fatresize --help + changed_when: false + register: fatresize + + - name: "Record fatresize_version" + ansible.builtin.set_fact: + fatresize_version: '{{ fatresize.stdout_lines[0] | regex_search("[0-9]+\.[0-9]+\.[0-9]+") }}' + +- name: "Fetch e2fsprogs version" + ansible.builtin.command: + cmd: mke2fs -V + changed_when: false register: mke2fs -- set_fact: +- name: "Record e2fsprogs_version" + ansible.builtin.set_fact: # mke2fs 1.43.6 (29-Aug-2017) e2fsprogs_version: '{{ mke2fs.stderr_lines[0] | regex_search("[0-9]{1,2}\.[0-9]{1,2}(\.[0-9]{1,2})?") }}' -- set_fact: +- name: "Set version-related facts to skip further tasks" + ansible.builtin.set_fact: # http://e2fsprogs.sourceforge.net/e2fsprogs-release.html#1.43 # Mke2fs no longer complains if the user tries to create a file system # using the entire block device. From 2c1ab2d384cc44136e76a9177a7e87e4c7d1f96a Mon Sep 17 00:00:00 2001 From: quidame Date: Tue, 18 May 2021 11:51:37 +0200 Subject: [PATCH 0061/2828] iptables_state: fix per-table initialization command (#2525) * refactor initialize_from_null_state() * Use a more neutral command (iptables -L) to load per-table needed modules. * fix 'FutureWarning: Possible nested set at position ...' (re.sub) * fix pylints (module + action plugin) * unsubscriptable-object * superfluous-parens * consider-using-in * unused-variable * unused-import * no-else-break * cleanup other internal module_args if they exist * add changelog fragment * Apply suggestions from code review (changelog fragment) Co-authored-by: Felix Fontein * Remove useless plugin type in changelog fragment Co-authored-by: Amin Vakil Co-authored-by: Felix Fontein Co-authored-by: Amin Vakil --- ...ables_state-fix-initialization-command.yml | 6 +++ plugins/action/system/iptables_state.py | 19 ++++--- plugins/modules/system/iptables_state.py | 49 +++++++++---------- 3 files changed, 37 insertions(+), 37 deletions(-) create mode 100644 changelogs/fragments/2525-iptables_state-fix-initialization-command.yml diff --git a/changelogs/fragments/2525-iptables_state-fix-initialization-command.yml b/changelogs/fragments/2525-iptables_state-fix-initialization-command.yml new file mode 100644 index 0000000000..552c0b26ab --- /dev/null +++ b/changelogs/fragments/2525-iptables_state-fix-initialization-command.yml @@ -0,0 +1,6 @@ +--- +bugfixes: + - "iptables_state - fix initialization of iptables from null state when adressing + more than one table (https://github.com/ansible-collections/community.general/issues/2523)." + - "iptables_state - fix a 'FutureWarning' in a regex and do some basic code clean up + (https://github.com/ansible-collections/community.general/pull/2525)." diff --git a/plugins/action/system/iptables_state.py b/plugins/action/system/iptables_state.py index cc174b3bd7..96b6dc689c 100644 --- a/plugins/action/system/iptables_state.py +++ b/plugins/action/system/iptables_state.py @@ -7,7 +7,7 @@ __metaclass__ = type import time from ansible.plugins.action import ActionBase -from ansible.errors import AnsibleError, AnsibleActionFail, AnsibleConnectionFailure +from ansible.errors import AnsibleActionFail, AnsibleConnectionFailure from ansible.utils.vars import merge_hash from ansible.utils.display import Display @@ -46,7 +46,7 @@ class ActionModule(ActionBase): the async wrapper results (those with the ansible_job_id key). ''' # At least one iteration is required, even if timeout is 0. - for i in range(max(1, timeout)): + for dummy in range(max(1, timeout)): async_result = self._execute_module( module_name='ansible.builtin.async_status', module_args=module_args, @@ -76,7 +76,6 @@ class ActionModule(ActionBase): task_async = self._task.async_val check_mode = self._play_context.check_mode max_timeout = self._connection._play_context.timeout - module_name = self._task.action module_args = self._task.args if module_args.get('state', None) == 'restored': @@ -133,7 +132,7 @@ class ActionModule(ActionBase): # The module is aware to not process the main iptables-restore # command before finding (and deleting) the 'starter' cookie on # the host, so the previous query will not reach ssh timeout. - garbage = self._low_level_execute_command(starter_cmd, sudoable=self.DEFAULT_SUDOABLE) + dummy = self._low_level_execute_command(starter_cmd, sudoable=self.DEFAULT_SUDOABLE) # As the main command is not yet executed on the target, here # 'finished' means 'failed before main command be executed'. @@ -143,7 +142,7 @@ class ActionModule(ActionBase): except AttributeError: pass - for x in range(max_timeout): + for dummy in range(max_timeout): time.sleep(1) remaining_time -= 1 # - AnsibleConnectionFailure covers rejected requests (i.e. @@ -151,7 +150,7 @@ class ActionModule(ActionBase): # - ansible_timeout is able to cover dropped requests (due # to a rule or policy DROP) if not lower than async_val. try: - garbage = self._low_level_execute_command(confirm_cmd, sudoable=self.DEFAULT_SUDOABLE) + dummy = self._low_level_execute_command(confirm_cmd, sudoable=self.DEFAULT_SUDOABLE) break except AnsibleConnectionFailure: continue @@ -164,12 +163,12 @@ class ActionModule(ActionBase): del result[key] if result.get('invocation', {}).get('module_args'): - if '_timeout' in result['invocation']['module_args']: - del result['invocation']['module_args']['_back'] - del result['invocation']['module_args']['_timeout'] + for key in ('_back', '_timeout', '_async_dir', 'jid'): + if result['invocation']['module_args'].get(key): + del result['invocation']['module_args'][key] async_status_args['mode'] = 'cleanup' - garbage = self._execute_module( + dummy = self._execute_module( module_name='ansible.builtin.async_status', module_args=async_status_args, task_vars=task_vars, diff --git a/plugins/modules/system/iptables_state.py b/plugins/modules/system/iptables_state.py index 5647526819..326db862bc 100644 --- a/plugins/modules/system/iptables_state.py +++ b/plugins/modules/system/iptables_state.py @@ -232,7 +232,7 @@ import filecmp import shutil from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.module_utils._text import to_bytes, to_native IPTABLES = dict( @@ -262,7 +262,7 @@ def read_state(b_path): lines = text.splitlines() while '' in lines: lines.remove('') - return (lines) + return lines def write_state(b_path, lines, changed): @@ -282,9 +282,9 @@ def write_state(b_path, lines, changed): if b_destdir and not os.path.exists(b_destdir) and not module.check_mode: try: os.makedirs(b_destdir) - except Exception as e: + except Exception as err: module.fail_json( - msg='Error creating %s. Error code: %s. Error description: %s' % (destdir, e[0], e[1]), + msg='Error creating %s: %s' % (destdir, to_native(err)), initial_state=lines) changed = True @@ -295,10 +295,10 @@ def write_state(b_path, lines, changed): if changed and not module.check_mode: try: shutil.copyfile(tmpfile, b_path) - except Exception as e: + except Exception as err: path = to_native(b_path, errors='surrogate_or_strict') module.fail_json( - msg='Error saving state into %s. Error code: %s. Error description: %s' % (path, e[0], e[1]), + msg='Error saving state into %s: %s' % (path, to_native(err)), initial_state=lines) return changed @@ -313,14 +313,11 @@ def initialize_from_null_state(initializer, initcommand, table): if table is None: table = 'filter' - tmpfd, tmpfile = tempfile.mkstemp() - with os.fdopen(tmpfd, 'w') as f: - f.write('*%s\nCOMMIT\n' % table) - - initializer.append(tmpfile) - (rc, out, err) = module.run_command(initializer, check_rc=True) + commandline = list(initializer) + commandline += ['-t', table] + (rc, out, err) = module.run_command(commandline, check_rc=True) (rc, out, err) = module.run_command(initcommand, check_rc=True) - return (rc, out, err) + return rc, out, err def filter_and_format_state(string): @@ -328,13 +325,13 @@ def filter_and_format_state(string): Remove timestamps to ensure idempotence between runs. Also remove counters by default. And return the result as a list. ''' - string = re.sub('((^|\n)# (Generated|Completed)[^\n]*) on [^\n]*', '\\1', string) + string = re.sub(r'((^|\n)# (Generated|Completed)[^\n]*) on [^\n]*', r'\1', string) if not module.params['counters']: - string = re.sub('[[][0-9]+:[0-9]+[]]', '[0:0]', string) + string = re.sub(r'\[[0-9]+:[0-9]+\]', r'[0:0]', string) lines = string.splitlines() while '' in lines: lines.remove('') - return (lines) + return lines def per_table_state(command, state): @@ -347,14 +344,14 @@ def per_table_state(command, state): COMMAND = list(command) if '*%s' % t in state.splitlines(): COMMAND.extend(['--table', t]) - (rc, out, err) = module.run_command(COMMAND, check_rc=True) - out = re.sub('(^|\n)(# Generated|# Completed|[*]%s|COMMIT)[^\n]*' % t, '', out) - out = re.sub(' *[[][0-9]+:[0-9]+[]] *', '', out) + dummy, out, dummy = module.run_command(COMMAND, check_rc=True) + out = re.sub(r'(^|\n)(# Generated|# Completed|[*]%s|COMMIT)[^\n]*' % t, r'', out) + out = re.sub(r' *\[[0-9]+:[0-9]+\] *', r'', out) table = out.splitlines() while '' in table: table.remove('') tables[t] = table - return (tables) + return tables def main(): @@ -402,7 +399,7 @@ def main(): changed = False COMMANDARGS = [] INITCOMMAND = [bin_iptables_save] - INITIALIZER = [bin_iptables_restore] + INITIALIZER = [bin_iptables, '-L', '-n'] TESTCOMMAND = [bin_iptables_restore, '--test'] if counters: @@ -502,7 +499,7 @@ def main(): if _back is not None: b_back = to_bytes(_back, errors='surrogate_or_strict') - garbage = write_state(b_back, initref_state, changed) + dummy = write_state(b_back, initref_state, changed) BACKCOMMAND = list(MAINCOMMAND) BACKCOMMAND.append(_back) @@ -559,9 +556,7 @@ def main(): if os.path.exists(b_starter): os.remove(b_starter) break - else: - time.sleep(0.01) - continue + time.sleep(0.01) (rc, stdout, stderr) = module.run_command(MAINCOMMAND) if 'Another app is currently holding the xtables lock' in stderr: @@ -579,7 +574,7 @@ def main(): (rc, stdout, stderr) = module.run_command(SAVECOMMAND, check_rc=True) restored_state = filter_and_format_state(stdout) - if restored_state != initref_state and restored_state != initial_state: + if restored_state not in (initref_state, initial_state): if module.check_mode: changed = True else: @@ -609,7 +604,7 @@ def main(): # timeout # * task attribute 'poll' equals 0 # - for x in range(_timeout): + for dummy in range(_timeout): if os.path.exists(b_back): time.sleep(1) continue From 31687a524ea22ac313e18af067b413c313f8c714 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 18 May 2021 11:57:59 +0200 Subject: [PATCH 0062/2828] Next planned release is 3.2.0. --- galaxy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/galaxy.yml b/galaxy.yml index a4b4cad7e0..ba1969d712 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,6 +1,6 @@ namespace: community name: general -version: 3.1.0 +version: 3.2.0 readme: README.md authors: - Ansible (https://github.com/ansible) From c4624d3ad8db66a3f7d21656fef8a8f60a907aeb Mon Sep 17 00:00:00 2001 From: Andre Lehmann Date: Tue, 18 May 2021 12:59:11 +0200 Subject: [PATCH 0063/2828] pacman: add 'executable' option to use an alternative pacman binary (#2524) * Add 'bin' option to use an alternative pacman binary * Add changelog entry * Incorporate recommendations * Update plugins/modules/packaging/os/pacman.py * Apply suggestions from code review Co-authored-by: Felix Fontein --- .../fragments/2524-pacman_add_bin_option.yml | 2 ++ plugins/modules/packaging/os/pacman.py | 26 ++++++++++++++++--- 2 files changed, 25 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/2524-pacman_add_bin_option.yml diff --git a/changelogs/fragments/2524-pacman_add_bin_option.yml b/changelogs/fragments/2524-pacman_add_bin_option.yml new file mode 100644 index 0000000000..1a7c78f7ec --- /dev/null +++ b/changelogs/fragments/2524-pacman_add_bin_option.yml @@ -0,0 +1,2 @@ +minor_changes: + - pacman - add ``executable`` option to use an alternative pacman binary (https://github.com/ansible-collections/community.general/issues/2524). diff --git a/plugins/modules/packaging/os/pacman.py b/plugins/modules/packaging/os/pacman.py index b19528ba9e..859c90a6c4 100644 --- a/plugins/modules/packaging/os/pacman.py +++ b/plugins/modules/packaging/os/pacman.py @@ -44,6 +44,14 @@ options: default: no type: bool + executable: + description: + - Name of binary to use. This can either be C(pacman) or a pacman compatible AUR helper. + - Beware that AUR helpers might behave unexpectedly and are therefore not recommended. + default: pacman + type: str + version_added: 3.1.0 + extra_args: description: - Additional option to pass to pacman when enforcing C(state). @@ -79,8 +87,10 @@ options: type: str notes: - - When used with a `loop:` each package will be processed individually, - it is much more efficient to pass the list directly to the `name` option. + - When used with a C(loop:) each package will be processed individually, + it is much more efficient to pass the list directly to the I(name) option. + - To use an AUR helper (I(executable) option), a few extra setup steps might be required beforehand. + For example, a dedicated build user with permissions to install packages could be necessary. ''' RETURN = ''' @@ -109,6 +119,13 @@ EXAMPLES = ''' - ~/bar-1.0-1-any.pkg.tar.xz state: present +- name: Install package from AUR using a Pacman compatible AUR helper + community.general.pacman: + name: foo + state: present + executable: yay + extra_args: --builddir /var/cache/yay + - name: Upgrade package foo community.general.pacman: name: foo @@ -419,6 +436,7 @@ def main(): name=dict(type='list', elements='str', aliases=['pkg', 'package']), state=dict(type='str', default='present', choices=['present', 'installed', 'latest', 'absent', 'removed']), force=dict(type='bool', default=False), + executable=dict(type='str', default='pacman'), extra_args=dict(type='str', default=''), upgrade=dict(type='bool', default=False), upgrade_extra_args=dict(type='str', default=''), @@ -432,11 +450,13 @@ def main(): supports_check_mode=True, ) - pacman_path = module.get_bin_path('pacman', True) module.run_command_environ_update = dict(LC_ALL='C') p = module.params + # find pacman binary + pacman_path = module.get_bin_path(p['executable'], True) + # normalize the state parameter if p['state'] in ['present', 'installed']: p['state'] = 'present' From 452a185a2364b6c404093a1d5c6a6efa0e092c18 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 20 May 2021 05:38:11 +1200 Subject: [PATCH 0064/2828] removed supporting code for testing module "nuage" - no longer exists here (#2559) --- .../prepare_nuage_tests/tasks/main.yml | 24 ------------------- 1 file changed, 24 deletions(-) delete mode 100644 tests/integration/targets/prepare_nuage_tests/tasks/main.yml diff --git a/tests/integration/targets/prepare_nuage_tests/tasks/main.yml b/tests/integration/targets/prepare_nuage_tests/tasks/main.yml deleted file mode 100644 index 2a902dc828..0000000000 --- a/tests/integration/targets/prepare_nuage_tests/tasks/main.yml +++ /dev/null @@ -1,24 +0,0 @@ -#################################################################### -# WARNING: These are designed specifically for Ansible tests # -# and should not be used as examples of how to write Ansible roles # -#################################################################### - -- block: - - name: Install Nuage VSD API Simulator - pip: - name: nuage-vsd-sim - - - name: Start Nuage VSD API Simulator - shell: "(cd /; nuage-vsd-sim >/dev/null 2>&1)" - async: 1800 - poll: 0 - - - name: Wait for API to be ready - uri: - url: http://localhost:5000 - register: api - delay: 3 - retries: 10 - until: api.status == 200 - - when: "ansible_python_version is version('2.7', '>=')" From 1403f5edccd34027b25dfda9fa61309e16b0f3d2 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Fri, 21 May 2021 05:43:16 +1200 Subject: [PATCH 0065/2828] ModuleHelper: CmdMixin custom function for processing cmd results (#2564) * MH: custom function for processing cmd results * added changelog fragment * removed case of process_output being a str --- changelogs/fragments/2564-mh-cmd-process-output.yml | 2 ++ plugins/module_utils/mh/mixins/cmd.py | 9 +++++++-- 2 files changed, 9 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/2564-mh-cmd-process-output.yml diff --git a/changelogs/fragments/2564-mh-cmd-process-output.yml b/changelogs/fragments/2564-mh-cmd-process-output.yml new file mode 100644 index 0000000000..717c0d7fbb --- /dev/null +++ b/changelogs/fragments/2564-mh-cmd-process-output.yml @@ -0,0 +1,2 @@ +minor_changes: + - module_helper module utils - method ``CmdMixin.run_command()`` now accepts ``process_output`` specifying a function to process the outcome of the underlying ``module.run_command()`` (https://github.com/ansible-collections/community.general/pull/2564). diff --git a/plugins/module_utils/mh/mixins/cmd.py b/plugins/module_utils/mh/mixins/cmd.py index eb7cc698cc..724708868e 100644 --- a/plugins/module_utils/mh/mixins/cmd.py +++ b/plugins/module_utils/mh/mixins/cmd.py @@ -152,7 +152,7 @@ class CmdMixin(object): def process_command_output(self, rc, out, err): return rc, out, err - def run_command(self, extra_params=None, params=None, *args, **kwargs): + def run_command(self, extra_params=None, params=None, process_output=None, *args, **kwargs): self.vars.cmd_args = self._calculate_args(extra_params, params) options = dict(self.run_command_fixed_options) env_update = dict(options.get('environ_update', {})) @@ -164,4 +164,9 @@ class CmdMixin(object): options.update(kwargs) rc, out, err = self.module.run_command(self.vars.cmd_args, *args, **options) self.update_output(rc=rc, stdout=out, stderr=err) - return self.process_command_output(rc, out, err) + if process_output is None: + _process = self.process_command_output + else: + _process = process_output + + return _process(rc, out, err) From 7a169af0534d21142e2ddd5b89a882aedd2b6256 Mon Sep 17 00:00:00 2001 From: momcilo78 Date: Thu, 20 May 2021 22:06:00 +0200 Subject: [PATCH 0066/2828] Add comment_visibility parameter for comment operation for jira module (#2556) * Add comment_visibility parameter for comment operation for jira module Co-authored-by: felixfontein * Update plugins/modules/web_infrastructure/jira.py Co-authored-by: Felix Fontein * Update plugins/modules/web_infrastructure/jira.py Co-authored-by: Felix Fontein * addressed pep8 E711 * Added missing parameter. * params is not in use anymore. * It appears other modules are using options, where in documentation they use suboptions. Inconsistancy? * adjusted indentation * tweaked suboptions, fixed documentation * Added fragment * Update changelogs/fragments/2556-add-comment_visibility-parameter-for-comment-operation-of-jira-module.yml Co-authored-by: Felix Fontein * Update plugins/modules/web_infrastructure/jira.py Co-authored-by: Felix Fontein Co-authored-by: felixfontein --- ...r-for-comment-operation-of-jira-module.yml | 2 + plugins/modules/web_infrastructure/jira.py | 39 +++++++++++++++++++ 2 files changed, 41 insertions(+) create mode 100644 changelogs/fragments/2556-add-comment_visibility-parameter-for-comment-operation-of-jira-module.yml diff --git a/changelogs/fragments/2556-add-comment_visibility-parameter-for-comment-operation-of-jira-module.yml b/changelogs/fragments/2556-add-comment_visibility-parameter-for-comment-operation-of-jira-module.yml new file mode 100644 index 0000000000..e31fad744a --- /dev/null +++ b/changelogs/fragments/2556-add-comment_visibility-parameter-for-comment-operation-of-jira-module.yml @@ -0,0 +1,2 @@ +minor_changes: + - jira - add comment visibility parameter for comment operation (https://github.com/ansible-collections/community.general/pull/2556). diff --git a/plugins/modules/web_infrastructure/jira.py b/plugins/modules/web_infrastructure/jira.py index 6acf0c7f51..4c10974126 100644 --- a/plugins/modules/web_infrastructure/jira.py +++ b/plugins/modules/web_infrastructure/jira.py @@ -86,6 +86,25 @@ options: - The comment text to add. - Note that JIRA may not allow changing field values on specific transitions or states. + comment_visibility: + type: dict + description: + - Used to specify comment comment visibility. + - See U(https://developer.atlassian.com/cloud/jira/platform/rest/v2/api-group-issue-comments/#api-rest-api-2-issue-issueidorkey-comment-post) for details. + suboptions: + type: + description: + - Use type to specify which of the JIRA visibility restriction types will be used. + type: str + required: true + choices: [group, role] + value: + description: + - Use value to specify value corresponding to the type of visibility restriction. For example name of the group or role. + type: str + required: true + version_added: '3.2.0' + status: type: str required: false @@ -223,6 +242,18 @@ EXAMPLES = r""" operation: comment comment: A comment added by Ansible +- name: Comment on issue with restricted visibility + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: '{{ issue.meta.key }}' + operation: comment + comment: A comment added by Ansible + comment_visibility: + type: role + value: Developers + # Assign an existing issue using edit - name: Assign an issue using free-form fields community.general.jira: @@ -385,6 +416,10 @@ class JIRA(StateModuleHelper): issuetype=dict(type='str', ), issue=dict(type='str', aliases=['ticket']), comment=dict(type='str', ), + comment_visibility=dict(type='dict', options=dict( + type=dict(type='str', choices=['group', 'role'], required=True), + value=dict(type='str', required=True) + )), status=dict(type='str', ), assignee=dict(type='str', ), fields=dict(default={}, type='dict'), @@ -445,6 +480,10 @@ class JIRA(StateModuleHelper): data = { 'body': self.vars.comment } + # if comment_visibility is specified restrict visibility + if self.vars.comment_visibility is not None: + data['visibility'] = self.vars.comment_visibility + url = self.vars.restbase + '/issue/' + self.vars.issue + '/comment' self.vars.meta = self.post(url, data) From 852e2405256b661a1a306a3e7656a60e7fba6803 Mon Sep 17 00:00:00 2001 From: Abhijeet Kasurde Date: Fri, 21 May 2021 22:15:22 +0530 Subject: [PATCH 0067/2828] Add missing author name (#2570) Signed-off-by: Abhijeet Kasurde --- plugins/inventory/stackpath_compute.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plugins/inventory/stackpath_compute.py b/plugins/inventory/stackpath_compute.py index fb879e869e..8e6b5bf953 100644 --- a/plugins/inventory/stackpath_compute.py +++ b/plugins/inventory/stackpath_compute.py @@ -10,6 +10,8 @@ DOCUMENTATION = ''' name: stackpath_compute short_description: StackPath Edge Computing inventory source version_added: 1.2.0 + author: + - UNKNOWN (@shayrybak) extends_documentation_fragment: - inventory_cache - constructed From 8f083d5d85ddf4f98aee8221bf4cb3c4a721e7d6 Mon Sep 17 00:00:00 2001 From: absynth76 <58172580+absynth76@users.noreply.github.com> Date: Sat, 22 May 2021 13:33:27 +0200 Subject: [PATCH 0068/2828] java_cert - fix incorrect certificate alias on pkcs12 import (#2560) * fix wrong certificate alias used when importing pkcs12, modify error output, stdout is more relevant than stderr * add changelog fragment * fix changelog fragment --- .../2560-java_cert-pkcs12-alias-bugfix.yml | 2 + plugins/modules/system/java_cert.py | 4 +- .../targets/java_cert/tasks/state_change.yml | 138 ++++++++++++------ 3 files changed, 98 insertions(+), 46 deletions(-) create mode 100644 changelogs/fragments/2560-java_cert-pkcs12-alias-bugfix.yml diff --git a/changelogs/fragments/2560-java_cert-pkcs12-alias-bugfix.yml b/changelogs/fragments/2560-java_cert-pkcs12-alias-bugfix.yml new file mode 100644 index 0000000000..471962d74f --- /dev/null +++ b/changelogs/fragments/2560-java_cert-pkcs12-alias-bugfix.yml @@ -0,0 +1,2 @@ +bugfixes: + - "java_cert - fix issue with incorrect alias used on PKCS#12 certificate import (https://github.com/ansible-collections/community.general/pull/2560)." diff --git a/plugins/modules/system/java_cert.py b/plugins/modules/system/java_cert.py index ad56358034..1c507f9277 100644 --- a/plugins/modules/system/java_cert.py +++ b/plugins/modules/system/java_cert.py @@ -278,7 +278,7 @@ def _export_public_cert_from_pkcs12(module, executable, pkcs_file, alias, passwo (export_rc, export_stdout, export_err) = module.run_command(export_cmd, data=password, check_rc=False) if export_rc != 0: - module.fail_json(msg="Internal module failure, cannot extract public certificate from pkcs12, error: %s" % export_err, + module.fail_json(msg="Internal module failure, cannot extract public certificate from pkcs12, error: %s" % export_stdout, rc=export_rc) with open(dest, 'w') as f: @@ -498,7 +498,7 @@ def main(): if pkcs12_path: # Extracting certificate with openssl - _export_public_cert_from_pkcs12(module, executable, pkcs12_path, cert_alias, pkcs12_pass, new_certificate) + _export_public_cert_from_pkcs12(module, executable, pkcs12_path, pkcs12_alias, pkcs12_pass, new_certificate) elif path: # Extracting the X509 digest is a bit easier. Keytool will print the PEM diff --git a/tests/integration/targets/java_cert/tasks/state_change.yml b/tests/integration/targets/java_cert/tasks/state_change.yml index 3c37fc6727..8cee41106f 100644 --- a/tests/integration/targets/java_cert/tasks/state_change.yml +++ b/tests/integration/targets/java_cert/tasks/state_change.yml @@ -4,52 +4,11 @@ args: creates: "{{ test_key_path }}" -- name: Create the test keystore - java_keystore: - name: placeholder - dest: "{{ test_keystore2_path }}" - password: "{{ test_keystore2_password }}" - private_key: "{{ lookup('file', '{{ test_key_path }}') }}" - certificate: "{{ lookup('file', '{{ test_cert_path }}') }}" - - name: Generate the self signed cert we will use for testing command: openssl req -x509 -newkey rsa:4096 -keyout '{{ test_key2_path }}' -out '{{ test_cert2_path }}' -days 365 -nodes -subj '/CN=localhost' args: creates: "{{ test_key2_path }}" -- name: | - Import the newly created certificate. This is our main test. - If the java_cert has been updated properly, then this task will report changed each time - since the module will be comparing the hash of the certificate instead of validating that the alias - simply exists - java_cert: - cert_alias: test_cert - cert_path: "{{ test_cert2_path }}" - keystore_path: "{{ test_keystore2_path }}" - keystore_pass: "{{ test_keystore2_password }}" - state: present - register: result_x509_changed - -- name: Verify the x509 status has changed - assert: - that: - - result_x509_changed is changed - -- name: | - We also want to make sure that the status doesnt change if we import the same cert - java_cert: - cert_alias: test_cert - cert_path: "{{ test_cert2_path }}" - keystore_path: "{{ test_keystore2_path }}" - keystore_pass: "{{ test_keystore2_password }}" - state: present - register: result_x509_succeeded - -- name: Verify the x509 status is ok - assert: - that: - - result_x509_succeeded is succeeded - - name: Create the pkcs12 archive from the test x509 cert command: > openssl pkcs12 @@ -70,6 +29,97 @@ -out {{ test_pkcs2_path }} -passout pass:"{{ test_keystore2_password }}" +- name: try to create the test keystore based on the just created pkcs12, keystore_create flag not enabled + java_cert: + cert_alias: test_pkcs12_cert + pkcs12_alias: test_pkcs12_cert + pkcs12_path: "{{ test_pkcs_path }}" + pkcs12_password: "{{ test_keystore2_password }}" + keystore_path: "{{ test_keystore2_path }}" + keystore_pass: "{{ test_keystore2_password }}" + ignore_errors: true + register: result_x509_changed + +- name: Verify the x509 status is failed + assert: + that: + - result_x509_changed is failed + +- name: Create the test keystore based on the just created pkcs12 + java_cert: + cert_alias: test_pkcs12_cert + pkcs12_alias: test_pkcs12_cert + pkcs12_path: "{{ test_pkcs_path }}" + pkcs12_password: "{{ test_keystore2_password }}" + keystore_path: "{{ test_keystore2_path }}" + keystore_pass: "{{ test_keystore2_password }}" + keystore_create: yes + +- name: try to import from pkcs12 a non existing alias + java_cert: + cert_alias: test_pkcs12_cert + pkcs12_alias: non_existing_alias + pkcs12_path: "{{ test_pkcs_path }}" + pkcs12_password: "{{ test_keystore2_password }}" + keystore_path: "{{ test_keystore2_path }}" + keystore_pass: "{{ test_keystore2_password }}" + keystore_create: yes + ignore_errors: yes + register: result_x509_changed + +- name: Verify the x509 status is failed + assert: + that: + - result_x509_changed is failed + +- name: import initial test certificate from file path + java_cert: + cert_alias: test_cert + cert_path: "{{ test_cert_path }}" + keystore_path: "{{ test_keystore2_path }}" + keystore_pass: "{{ test_keystore2_password }}" + keystore_create: yes + state: present + register: result_x509_changed + +- name: Verify the x509 status is changed + assert: + that: + - result_x509_changed is changed + +- name: | + Import the newly created certificate. This is our main test. + If the java_cert has been updated properly, then this task will report changed each time + since the module will be comparing the hash of the certificate instead of validating that the alias + simply exists + java_cert: + cert_alias: test_cert + cert_path: "{{ test_cert2_path }}" + keystore_path: "{{ test_keystore2_path }}" + keystore_pass: "{{ test_keystore2_password }}" + state: present + register: result_x509_changed + +- name: Verify the x509 status is changed + assert: + that: + - result_x509_changed is changed + +- name: | + We also want to make sure that the status doesnt change if we import the same cert + java_cert: + cert_alias: test_cert + cert_path: "{{ test_cert2_path }}" + keystore_path: "{{ test_keystore2_path }}" + keystore_pass: "{{ test_keystore2_password }}" + state: present + register: result_x509_succeeded + +- name: Verify the x509 status is ok + assert: + that: + - result_x509_succeeded is succeeded + - name: > Ensure the original pkcs12 cert is in the keystore java_cert: @@ -83,7 +133,7 @@ - name: | Perform the same test, but we will now be testing the pkcs12 functionality - If we add a different pkcs12 cert with the same alias, we should have a chnaged result, NOT the same + If we add a different pkcs12 cert with the same alias, we should have a changed result, NOT the same java_cert: cert_alias: test_pkcs12_cert pkcs12_alias: test_pkcs12_cert @@ -94,7 +144,7 @@ state: present register: result_pkcs12_changed -- name: Verify the pkcs12 status has changed +- name: Verify the pkcs12 status is changed assert: that: - result_pkcs12_changed is changed @@ -155,7 +205,7 @@ that: - result_x509_absent is changed -- name: Ensure we can remove the pkcs12 archive +- name: Ensure we can remove the certificate imported from pkcs12 archive java_cert: cert_alias: test_pkcs12_cert keystore_path: "{{ test_keystore2_path }}" From 3100c32a00d6a350274884aba06afe51a71d5253 Mon Sep 17 00:00:00 2001 From: abikouo <79859644+abikouo@users.noreply.github.com> Date: Sat, 22 May 2021 13:34:19 +0200 Subject: [PATCH 0069/2828] ovir4 inventory script (#2461) * update configparser * changelog * handle multiple python version * Update changelogs/fragments/2461-ovirt4-fix-configparser.yml Co-authored-by: Felix Fontein * Update ovirt4.py Co-authored-by: Felix Fontein --- .../2461-ovirt4-fix-configparser.yml | 3 +++ scripts/inventory/ovirt4.py | 27 +++++++++++++------ 2 files changed, 22 insertions(+), 8 deletions(-) create mode 100644 changelogs/fragments/2461-ovirt4-fix-configparser.yml diff --git a/changelogs/fragments/2461-ovirt4-fix-configparser.yml b/changelogs/fragments/2461-ovirt4-fix-configparser.yml new file mode 100644 index 0000000000..6e3845b21a --- /dev/null +++ b/changelogs/fragments/2461-ovirt4-fix-configparser.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - ovir4 inventory script - improve configparser creation to avoid crashes for options without values (https://github.com/ansible-collections/community.general/issues/674). diff --git a/scripts/inventory/ovirt4.py b/scripts/inventory/ovirt4.py index afff18dbdb..84b68a1258 100755 --- a/scripts/inventory/ovirt4.py +++ b/scripts/inventory/ovirt4.py @@ -56,6 +56,7 @@ import sys from collections import defaultdict from ansible.module_utils.six.moves import configparser +from ansible.module_utils.six import PY2 import json @@ -106,14 +107,24 @@ def create_connection(): config_path = os.environ.get('OVIRT_INI_PATH', default_path) # Create parser and add ovirt section if it doesn't exist: - config = configparser.SafeConfigParser( - defaults={ - 'ovirt_url': os.environ.get('OVIRT_URL'), - 'ovirt_username': os.environ.get('OVIRT_USERNAME'), - 'ovirt_password': os.environ.get('OVIRT_PASSWORD'), - 'ovirt_ca_file': os.environ.get('OVIRT_CAFILE', ''), - } - ) + if PY2: + config = configparser.SafeConfigParser( + defaults={ + 'ovirt_url': os.environ.get('OVIRT_URL'), + 'ovirt_username': os.environ.get('OVIRT_USERNAME'), + 'ovirt_password': os.environ.get('OVIRT_PASSWORD'), + 'ovirt_ca_file': os.environ.get('OVIRT_CAFILE', ''), + }, allow_no_value=True + ) + else: + config = configparser.ConfigParser( + defaults={ + 'ovirt_url': os.environ.get('OVIRT_URL'), + 'ovirt_username': os.environ.get('OVIRT_USERNAME'), + 'ovirt_password': os.environ.get('OVIRT_PASSWORD'), + 'ovirt_ca_file': os.environ.get('OVIRT_CAFILE', ''), + }, allow_no_value=True + ) if not config.has_section('ovirt'): config.add_section('ovirt') config.read(config_path) From d7e55db99b331be30301b9d1f027be63504007be Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sat, 22 May 2021 23:51:36 +1200 Subject: [PATCH 0070/2828] Massive adjustment in integration tests for changed and failed (#2577) * Replaced ".changed ==" with "is [not] changed". Same for failed * Mr Quote refused to go --- .../targets/archive/tasks/main.yml | 16 ++++++------- .../targets/flatpak/tasks/check_mode.yml | 24 +++++++++---------- .../targets/flatpak/tasks/main.yml | 4 ++-- .../targets/flatpak/tasks/test.yml | 16 ++++++------- .../flatpak_remote/tasks/check_mode.yml | 12 +++++----- .../targets/flatpak_remote/tasks/main.yml | 4 ++-- .../targets/flatpak_remote/tasks/test.yml | 10 ++++---- .../git_config/tasks/get_set_no_state.yml | 4 ++-- .../tasks/get_set_state_present.yml | 4 ++-- .../precedence_between_unset_and_value.yml | 2 +- .../git_config/tasks/unset_check_mode.yml | 2 +- .../git_config/tasks/unset_no_value.yml | 2 +- .../targets/git_config/tasks/unset_value.yml | 2 +- .../targets/github_issue/tasks/main.yml | 10 ++++---- .../targets/hwc_ecs_instance/tasks/main.yml | 8 +++---- .../targets/hwc_evs_disk/tasks/main.yml | 10 ++++---- .../targets/hwc_network_vpc/tasks/main.yml | 8 +++---- .../targets/hwc_smn_topic/tasks/main.yml | 8 +++---- .../targets/hwc_vpc_eip/tasks/main.yml | 8 +++---- .../hwc_vpc_peering_connect/tasks/main.yml | 8 +++---- .../targets/hwc_vpc_port/tasks/main.yml | 8 +++---- .../targets/hwc_vpc_private_ip/tasks/main.yml | 8 +++---- .../targets/hwc_vpc_route/tasks/main.yml | 8 +++---- .../hwc_vpc_security_group/tasks/main.yml | 8 +++---- .../tasks/main.yml | 8 +++---- .../targets/hwc_vpc_subnet/tasks/main.yml | 8 +++---- .../targets/influxdb_user/tasks/tests.yml | 22 ++++++++--------- .../targets/ipify_facts/tasks/main.yml | 6 ++--- .../targets/iso_create/tasks/main.yml | 16 ++++++------- .../targets/iso_extract/tasks/tests.yml | 6 ++--- .../targets/one_host/tasks/main.yml | 2 +- .../test_buildah_synchronize/tasks/main.yml | 4 ++-- .../test-add-children-elements-unicode.yml | 4 ++-- .../xml/tasks/test-add-children-elements.yml | 4 ++-- .../test-add-children-from-groupvars.yml | 4 ++-- .../tasks/test-add-children-insertafter.yml | 4 ++-- .../tasks/test-add-children-insertbefore.yml | 4 ++-- ...t-add-children-with-attributes-unicode.yml | 4 ++-- .../test-add-children-with-attributes.yml | 4 ++-- .../xml/tasks/test-add-element-implicitly.yml | 2 +- .../test-add-namespaced-children-elements.yml | 6 ++--- .../xml/tasks/test-children-elements-xml.yml | 4 ++-- .../targets/xml/tasks/test-count-unicode.yml | 2 +- .../targets/xml/tasks/test-count.yml | 2 +- .../test-get-element-content-unicode.yml | 4 ++-- .../xml/tasks/test-get-element-content.yml | 4 ++-- .../test-mutually-exclusive-attributes.yml | 4 ++-- .../xml/tasks/test-pretty-print-only.yml | 4 ++-- .../targets/xml/tasks/test-pretty-print.yml | 4 ++-- .../tasks/test-remove-attribute-nochange.yml | 4 ++-- .../xml/tasks/test-remove-attribute.yml | 4 ++-- .../tasks/test-remove-element-nochange.yml | 4 ++-- .../targets/xml/tasks/test-remove-element.yml | 4 ++-- ...t-remove-namespaced-attribute-nochange.yml | 4 ++-- .../test-remove-namespaced-attribute.yml | 4 ++-- ...est-remove-namespaced-element-nochange.yml | 4 ++-- .../tasks/test-remove-namespaced-element.yml | 4 ++-- .../test-set-attribute-value-unicode.yml | 4 ++-- .../xml/tasks/test-set-attribute-value.yml | 4 ++-- .../test-set-children-elements-level.yml | 8 +++---- .../test-set-children-elements-unicode.yml | 8 +++---- .../xml/tasks/test-set-children-elements.yml | 8 +++---- .../tasks/test-set-element-value-empty.yml | 4 ++-- .../tasks/test-set-element-value-unicode.yml | 6 ++--- .../xml/tasks/test-set-element-value.yml | 6 ++--- .../test-set-namespaced-attribute-value.yml | 4 ++-- .../test-set-namespaced-children-elements.yml | 6 ++--- .../test-set-namespaced-element-value.yml | 6 ++--- .../targets/xml/tasks/test-xmlstring.yml | 14 +++++------ 69 files changed, 220 insertions(+), 220 deletions(-) diff --git a/tests/integration/targets/archive/tasks/main.yml b/tests/integration/targets/archive/tasks/main.yml index 19a1f6af0c..2267268715 100644 --- a/tests/integration/targets/archive/tasks/main.yml +++ b/tests/integration/targets/archive/tasks/main.yml @@ -174,7 +174,7 @@ - name: Test that the file modes were changed assert: that: - - "archive_02_gz_stat.changed == False " + - archive_02_gz_stat is not changed - "archive_02_gz_stat.stat.mode == '0600'" - "'archived' in archive_bz2_result_02" - "{{ archive_bz2_result_02['archived']| length}} == 3" @@ -199,7 +199,7 @@ - name: Test that the file modes were changed assert: that: - - "archive_02_zip_stat.changed == False" + - archive_02_zip_stat is not changed - "archive_02_zip_stat.stat.mode == '0600'" - "'archived' in archive_zip_result_02" - "{{ archive_zip_result_02['archived']| length}} == 3" @@ -224,7 +224,7 @@ - name: Test that the file modes were changed assert: that: - - "archive_02_bz2_stat.changed == False" + - archive_02_bz2_stat is not changed - "archive_02_bz2_stat.stat.mode == '0600'" - "'archived' in archive_bz2_result_02" - "{{ archive_bz2_result_02['archived']| length}} == 3" @@ -248,7 +248,7 @@ - name: Test that the file modes were changed assert: that: - - "archive_02_xz_stat.changed == False" + - archive_02_xz_stat is not changed - "archive_02_xz_stat.stat.mode == '0600'" - "'archived' in archive_xz_result_02" - "{{ archive_xz_result_02['archived']| length}} == 3" @@ -294,7 +294,7 @@ - name: Assert that nonascii tests succeeded assert: that: - - "nonascii_result_0.changed == true" + - nonascii_result_0 is changed - "nonascii_stat0.stat.exists == true" - name: remove nonascii test @@ -315,7 +315,7 @@ - name: Assert that nonascii tests succeeded assert: that: - - "nonascii_result_1.changed == true" + - nonascii_result_1 is changed - "nonascii_stat_1.stat.exists == true" - name: remove nonascii test @@ -336,7 +336,7 @@ - name: Assert that nonascii tests succeeded assert: that: - - "nonascii_result_1.changed == true" + - nonascii_result_1 is changed - "nonascii_stat_1.stat.exists == true" - name: remove nonascii test @@ -357,7 +357,7 @@ - name: Assert that nonascii tests succeeded assert: that: - - "nonascii_result_2.changed == true" + - nonascii_result_2 is changed - "nonascii_stat_2.stat.exists == true" - name: remove nonascii test diff --git a/tests/integration/targets/flatpak/tasks/check_mode.yml b/tests/integration/targets/flatpak/tasks/check_mode.yml index 065f10dfa7..3186fd2830 100644 --- a/tests/integration/targets/flatpak/tasks/check_mode.yml +++ b/tests/integration/targets/flatpak/tasks/check_mode.yml @@ -13,7 +13,7 @@ - name: Verify addition of absent flatpak test result (check mode) assert: that: - - "addition_result.changed == true" + - addition_result is changed msg: "Adding an absent flatpak shall mark module execution as changed" - name: Test non-existent idempotency of addition of absent flatpak (check mode) @@ -27,7 +27,7 @@ - name: Verify non-existent idempotency of addition of absent flatpak test result (check mode) assert: that: - - "double_addition_result.changed == true" + - double_addition_result is changed msg: | Adding an absent flatpak a second time shall still mark module execution as changed in check mode @@ -44,7 +44,7 @@ - name: Verify removal of absent flatpak test result (check mode) assert: that: - - "removal_result.changed == false" + - removal_result is not changed msg: "Removing an absent flatpak shall mark module execution as not changed" # state=present with url on absent flatpak @@ -60,7 +60,7 @@ - name: Verify addition of absent flatpak with url test result (check mode) assert: that: - - "url_addition_result.changed == true" + - url_addition_result is changed msg: "Adding an absent flatpak from URL shall mark module execution as changed" - name: Test non-existent idempotency of addition of absent flatpak with url (check mode) @@ -76,7 +76,7 @@ result (check mode) assert: that: - - "double_url_addition_result.changed == true" + - double_url_addition_result is changed msg: | Adding an absent flatpak from URL a second time shall still mark module execution as changed in check mode @@ -93,7 +93,7 @@ - name: Verify removal of absent flatpak with url test result (check mode) assert: that: - - "url_removal_result.changed == false" + - url_removal_result is not changed msg: "Removing an absent flatpak shall mark module execution as not changed" @@ -112,7 +112,7 @@ - name: Verify addition test result of present flatpak (check mode) assert: that: - - "addition_present_result.changed == false" + - addition_present_result is not changed msg: "Adding an present flatpak shall mark module execution as not changed" # state=absent on present flatpak @@ -127,7 +127,7 @@ - name: Verify removal of present flatpak test result (check mode) assert: that: - - "removal_present_result.changed == true" + - removal_present_result is changed msg: "Removing a present flatpak shall mark module execution as changed" - name: Test non-existent idempotency of removal (check mode) @@ -140,7 +140,7 @@ - name: Verify non-existent idempotency of removal (check mode) assert: that: - - "double_removal_present_result.changed == true" + - double_removal_present_result is changed msg: | Removing a present flatpak a second time shall still mark module execution as changed in check mode @@ -158,7 +158,7 @@ - name: Verify addition with url of present flatpak test result (check mode) assert: that: - - "url_addition_present_result.changed == false" + - url_addition_present_result is not changed msg: "Adding a present flatpak from URL shall mark module execution as not changed" # state=absent with url on present flatpak @@ -173,7 +173,7 @@ - name: Verify removal with url of present flatpak test result (check mode) assert: that: - - "url_removal_present_result.changed == true" + - url_removal_present_result is changed msg: "Removing an absent flatpak shall mark module execution as not changed" - name: Test non-existent idempotency of removal with url of present flatpak (check mode) @@ -189,5 +189,5 @@ flatpak test result (check mode) assert: that: - - "double_url_removal_present_result.changed == true" + - double_url_removal_present_result is changed msg: Removing an absent flatpak a second time shall still mark module execution as changed diff --git a/tests/integration/targets/flatpak/tasks/main.yml b/tests/integration/targets/flatpak/tasks/main.yml index 45f9ecd501..a1d1bda8a4 100644 --- a/tests/integration/targets/flatpak/tasks/main.yml +++ b/tests/integration/targets/flatpak/tasks/main.yml @@ -40,8 +40,8 @@ - name: Verify executable override test result assert: that: - - "executable_override_result.failed == true" - - "executable_override_result.changed == false" + - executable_override_result is failed + - executable_override_result is not changed msg: "Specifying non-existing executable shall fail module execution" - import_tasks: check_mode.yml diff --git a/tests/integration/targets/flatpak/tasks/test.yml b/tests/integration/targets/flatpak/tasks/test.yml index 20d864a84d..1e7d888bb5 100644 --- a/tests/integration/targets/flatpak/tasks/test.yml +++ b/tests/integration/targets/flatpak/tasks/test.yml @@ -11,7 +11,7 @@ - name: Verify addition test result - {{ method }} assert: that: - - "addition_result.changed == true" + - addition_result is changed msg: "state=present shall add flatpak when absent" - name: Test idempotency of addition - {{ method }} @@ -25,7 +25,7 @@ - name: Verify idempotency of addition test result - {{ method }} assert: that: - - "double_addition_result.changed == false" + - double_addition_result is not changed msg: "state=present shall not do anything when flatpak is already present" # state=absent @@ -40,7 +40,7 @@ - name: Verify removal test result - {{ method }} assert: that: - - "removal_result.changed == true" + - removal_result is changed msg: "state=absent shall remove flatpak when present" - name: Test idempotency of removal - {{ method }} @@ -53,7 +53,7 @@ - name: Verify idempotency of removal test result - {{ method }} assert: that: - - "double_removal_result.changed == false" + - double_removal_result is not changed msg: "state=absent shall not do anything when flatpak is not present" # state=present with url as name @@ -69,7 +69,7 @@ - name: Verify addition test result - {{ method }} assert: that: - - "url_addition_result.changed == true" + - url_addition_result is changed msg: "state=present with url as name shall add flatpak when absent" - name: Test idempotency of addition with url - {{ method }} @@ -83,7 +83,7 @@ - name: Verify idempotency of addition with url test result - {{ method }} assert: that: - - "double_url_addition_result.changed == false" + - double_url_addition_result is not changed msg: "state=present with url as name shall not do anything when flatpak is already present" # state=absent with url as name @@ -98,7 +98,7 @@ - name: Verify removal test result - {{ method }} assert: that: - - "url_removal_result.changed == true" + - url_removal_result is changed msg: "state=absent with url as name shall remove flatpak when present" - name: Test idempotency of removal with url - {{ method }} @@ -111,5 +111,5 @@ - name: Verify idempotency of removal with url test result - {{ method }} assert: that: - - "double_url_removal_result.changed == false" + - double_url_removal_result is not changed msg: "state=absent with url as name shall not do anything when flatpak is not present" diff --git a/tests/integration/targets/flatpak_remote/tasks/check_mode.yml b/tests/integration/targets/flatpak_remote/tasks/check_mode.yml index 7ce89a8c15..1f4def86d9 100644 --- a/tests/integration/targets/flatpak_remote/tasks/check_mode.yml +++ b/tests/integration/targets/flatpak_remote/tasks/check_mode.yml @@ -13,7 +13,7 @@ - name: Verify addition of absent flatpak remote test result (check mode) assert: that: - - "addition_result.changed == true" + - addition_result is changed msg: "Adding an absent flatpak remote shall mark module execution as changed" - name: Test non-existent idempotency of addition of absent flatpak remote (check mode) @@ -29,7 +29,7 @@ test result (check mode) assert: that: - - "double_addition_result.changed == true" + - double_addition_result is changed msg: | Adding an absent flatpak remote a second time shall still mark module execution as changed in check mode @@ -46,7 +46,7 @@ - name: Verify removal of absent flatpak remote test result (check mode) assert: that: - - "removal_result.changed == false" + - removal_result is not changed msg: "Removing an absent flatpak remote shall mark module execution as not changed" @@ -65,7 +65,7 @@ - name: Verify addition of present flatpak remote test result (check mode) assert: that: - - "addition_result.changed == false" + - addition_result is not changed msg: "Adding a present flatpak remote shall mark module execution as not changed" # state=absent @@ -80,7 +80,7 @@ - name: Verify removal of present flatpak remote test result (check mode) assert: that: - - "removal_result.changed == true" + - removal_result is changed msg: "Removing a present flatpak remote shall mark module execution as changed" - name: Test non-existent idempotency of removal of present flatpak remote (check mode) @@ -95,7 +95,7 @@ test result (check mode) assert: that: - - "double_removal_result.changed == true" + - double_removal_result is changed msg: | Removing a present flatpak remote a second time shall still mark module execution as changed in check mode diff --git a/tests/integration/targets/flatpak_remote/tasks/main.yml b/tests/integration/targets/flatpak_remote/tasks/main.yml index aa2219e181..91fa7262df 100644 --- a/tests/integration/targets/flatpak_remote/tasks/main.yml +++ b/tests/integration/targets/flatpak_remote/tasks/main.yml @@ -40,8 +40,8 @@ - name: Verify executable override test result assert: that: - - "executable_override_result.failed == true" - - "executable_override_result.changed == false" + - executable_override_result is failed + - executable_override_result is not changed msg: "Specifying non-existing executable shall fail module execution" - import_tasks: check_mode.yml diff --git a/tests/integration/targets/flatpak_remote/tasks/test.yml b/tests/integration/targets/flatpak_remote/tasks/test.yml index 9570f623a1..66c43649b4 100644 --- a/tests/integration/targets/flatpak_remote/tasks/test.yml +++ b/tests/integration/targets/flatpak_remote/tasks/test.yml @@ -11,7 +11,7 @@ - name: Verify addition test result - {{ method }} assert: that: - - "addition_result.changed == true" + - addition_result is changed msg: "state=present shall add flatpak when absent" - name: Test idempotency of addition - {{ method }} @@ -25,7 +25,7 @@ - name: Verify idempotency of addition test result - {{ method }} assert: that: - - "double_addition_result.changed == false" + - double_addition_result is not changed msg: "state=present shall not do anything when flatpak is already present" - name: Test updating remote url does not do anything - {{ method }} @@ -39,7 +39,7 @@ - name: Verify updating remote url does not do anything - {{ method }} assert: that: - - "url_update_result.changed == false" + - url_update_result is not changed msg: "Trying to update the URL of an existing flatpak remote shall not do anything" @@ -55,7 +55,7 @@ - name: Verify removal test result - {{ method }} assert: that: - - "removal_result.changed == true" + - removal_result is changed msg: "state=absent shall remove flatpak when present" - name: Test idempotency of removal - {{ method }} @@ -68,5 +68,5 @@ - name: Verify idempotency of removal test result - {{ method }} assert: that: - - "double_removal_result.changed == false" + - double_removal_result is not changed msg: "state=absent shall not do anything when flatpak is not present" diff --git a/tests/integration/targets/git_config/tasks/get_set_no_state.yml b/tests/integration/targets/git_config/tasks/get_set_no_state.yml index 149a9b2d93..7e9714a75e 100644 --- a/tests/integration/targets/git_config/tasks/get_set_no_state.yml +++ b/tests/integration/targets/git_config/tasks/get_set_no_state.yml @@ -17,9 +17,9 @@ - name: assert set changed and value is correct assert: that: - - set_result.changed == true + - set_result is changed - set_result.diff.before == "\n" - set_result.diff.after == option_value + "\n" - - get_result.changed == false + - get_result is not changed - get_result.config_value == option_value ... diff --git a/tests/integration/targets/git_config/tasks/get_set_state_present.yml b/tests/integration/targets/git_config/tasks/get_set_state_present.yml index 59f3c9c0ee..52d986d633 100644 --- a/tests/integration/targets/git_config/tasks/get_set_state_present.yml +++ b/tests/integration/targets/git_config/tasks/get_set_state_present.yml @@ -19,9 +19,9 @@ - name: assert set changed and value is correct with state=present assert: that: - - set_result.changed == true + - set_result is changed - set_result.diff.before == "\n" - set_result.diff.after == option_value + "\n" - - get_result.changed == false + - get_result is not changed - get_result.config_value == option_value ... diff --git a/tests/integration/targets/git_config/tasks/precedence_between_unset_and_value.yml b/tests/integration/targets/git_config/tasks/precedence_between_unset_and_value.yml index 24ef292015..9eb4ca4034 100644 --- a/tests/integration/targets/git_config/tasks/precedence_between_unset_and_value.yml +++ b/tests/integration/targets/git_config/tasks/precedence_between_unset_and_value.yml @@ -18,7 +18,7 @@ - name: assert unset changed and deleted value assert: that: - - unset_result.changed == true + - unset_result is changed - unset_result.diff.before == option_value + "\n" - unset_result.diff.after == "\n" - get_result.config_value == '' diff --git a/tests/integration/targets/git_config/tasks/unset_check_mode.yml b/tests/integration/targets/git_config/tasks/unset_check_mode.yml index c8fe00c0b7..43b9905373 100644 --- a/tests/integration/targets/git_config/tasks/unset_check_mode.yml +++ b/tests/integration/targets/git_config/tasks/unset_check_mode.yml @@ -18,7 +18,7 @@ - name: assert unset changed but dit not delete value assert: that: - - unset_result.changed == true + - unset_result is changed - unset_result.diff.before == option_value + "\n" - unset_result.diff.after == "\n" - get_result.config_value == option_value diff --git a/tests/integration/targets/git_config/tasks/unset_no_value.yml b/tests/integration/targets/git_config/tasks/unset_no_value.yml index 71568e3aa4..5fb6b6bcb6 100644 --- a/tests/integration/targets/git_config/tasks/unset_no_value.yml +++ b/tests/integration/targets/git_config/tasks/unset_no_value.yml @@ -17,7 +17,7 @@ - name: assert unsetting didn't change assert: that: - - unset_result.changed == false + - unset_result is not changed - unset_result.msg == 'no setting to unset' - get_result.config_value == '' ... diff --git a/tests/integration/targets/git_config/tasks/unset_value.yml b/tests/integration/targets/git_config/tasks/unset_value.yml index a2308156aa..6dda37736e 100644 --- a/tests/integration/targets/git_config/tasks/unset_value.yml +++ b/tests/integration/targets/git_config/tasks/unset_value.yml @@ -17,7 +17,7 @@ - name: assert unset changed and deleted value assert: that: - - unset_result.changed == true + - unset_result is changed - unset_result.diff.before == option_value + "\n" - unset_result.diff.after == "\n" - get_result.config_value == '' diff --git a/tests/integration/targets/github_issue/tasks/main.yml b/tests/integration/targets/github_issue/tasks/main.yml index 24266128ae..7731a7a955 100644 --- a/tests/integration/targets/github_issue/tasks/main.yml +++ b/tests/integration/targets/github_issue/tasks/main.yml @@ -18,8 +18,8 @@ - assert: that: - - "{{ get_status_0002.changed == True }}" - - "{{ get_status_0002.issue_status == 'closed' }}" + - get_status_0002 is changed + - get_status_0002.issue_status == 'closed' - name: Check if GitHub issue is closed or not github_issue: @@ -32,6 +32,6 @@ - assert: that: - - "{{ get_status_0003.changed == False }}" - - "{{ get_status_0003.failed == True }}" - - "{{ 'Failed' in get_status_0003.msg }}" + - get_status_0003 is not changed + - get_status_0003 is failed + - "'Failed' in get_status_0003.msg" diff --git a/tests/integration/targets/hwc_ecs_instance/tasks/main.yml b/tests/integration/targets/hwc_ecs_instance/tasks/main.yml index 8c8ea2eb3d..4d36c11286 100644 --- a/tests/integration/targets/hwc_ecs_instance/tasks/main.yml +++ b/tests/integration/targets/hwc_ecs_instance/tasks/main.yml @@ -167,8 +167,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #---------------------------------------------------------- - name: delete a instance (check mode) hwc_ecs_instance: @@ -277,8 +277,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #--------------------------------------------------------- # Post-test teardown - name: delete a disk diff --git a/tests/integration/targets/hwc_evs_disk/tasks/main.yml b/tests/integration/targets/hwc_evs_disk/tasks/main.yml index 79e67d0dc9..e2380450cd 100644 --- a/tests/integration/targets/hwc_evs_disk/tasks/main.yml +++ b/tests/integration/targets/hwc_evs_disk/tasks/main.yml @@ -50,8 +50,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #---------------------------------------------------------- - name: delete a disk (check mode) hwc_evs_disk: @@ -92,7 +92,7 @@ - name: assert changed is false assert: that: - - result.changed == false + - result is not changed # ---------------------------------------------------------------------------- - name: delete a disk that does not exist hwc_evs_disk: @@ -105,5 +105,5 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed diff --git a/tests/integration/targets/hwc_network_vpc/tasks/main.yml b/tests/integration/targets/hwc_network_vpc/tasks/main.yml index 5c01cf7ad8..e3b979d0b5 100644 --- a/tests/integration/targets/hwc_network_vpc/tasks/main.yml +++ b/tests/integration/targets/hwc_network_vpc/tasks/main.yml @@ -62,8 +62,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #---------------------------------------------------------- - name: delete a vpc hwc_network_vpc: @@ -97,5 +97,5 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed diff --git a/tests/integration/targets/hwc_smn_topic/tasks/main.yml b/tests/integration/targets/hwc_smn_topic/tasks/main.yml index 180f8fad3e..a9879aea54 100644 --- a/tests/integration/targets/hwc_smn_topic/tasks/main.yml +++ b/tests/integration/targets/hwc_smn_topic/tasks/main.yml @@ -44,8 +44,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #---------------------------------------------------------- - name: delete a smn topic hwc_smn_topic: @@ -77,5 +77,5 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed diff --git a/tests/integration/targets/hwc_vpc_eip/tasks/main.yml b/tests/integration/targets/hwc_vpc_eip/tasks/main.yml index 57de832418..bdf5d763a7 100644 --- a/tests/integration/targets/hwc_vpc_eip/tasks/main.yml +++ b/tests/integration/targets/hwc_vpc_eip/tasks/main.yml @@ -96,8 +96,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #---------------------------------------------------------- - name: delete a eip (check mode) hwc_vpc_eip: @@ -159,8 +159,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #--------------------------------------------------------- # Post-test teardown - name: delete a port diff --git a/tests/integration/targets/hwc_vpc_peering_connect/tasks/main.yml b/tests/integration/targets/hwc_vpc_peering_connect/tasks/main.yml index 2316a4b25c..cb6a15f750 100644 --- a/tests/integration/targets/hwc_vpc_peering_connect/tasks/main.yml +++ b/tests/integration/targets/hwc_vpc_peering_connect/tasks/main.yml @@ -78,8 +78,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #---------------------------------------------------------- - name: delete a peering connect (check mode) hwc_vpc_peering_connect: @@ -133,8 +133,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #--------------------------------------------------------- # Post-test teardown - name: delete a vpc diff --git a/tests/integration/targets/hwc_vpc_port/tasks/main.yml b/tests/integration/targets/hwc_vpc_port/tasks/main.yml index b7f28360c1..00f5ae8b2e 100644 --- a/tests/integration/targets/hwc_vpc_port/tasks/main.yml +++ b/tests/integration/targets/hwc_vpc_port/tasks/main.yml @@ -69,8 +69,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #---------------------------------------------------------- - name: delete a port (check mode) hwc_vpc_port: @@ -116,8 +116,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #--------------------------------------------------------- # Post-test teardown - name: delete a subnet diff --git a/tests/integration/targets/hwc_vpc_private_ip/tasks/main.yml b/tests/integration/targets/hwc_vpc_private_ip/tasks/main.yml index efd6765c80..5531d575f8 100644 --- a/tests/integration/targets/hwc_vpc_private_ip/tasks/main.yml +++ b/tests/integration/targets/hwc_vpc_private_ip/tasks/main.yml @@ -70,8 +70,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #---------------------------------------------------------- - name: delete a private ip (check mode) hwc_vpc_private_ip: @@ -117,8 +117,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #--------------------------------------------------------- # Post-test teardown - name: delete a subnet diff --git a/tests/integration/targets/hwc_vpc_route/tasks/main.yml b/tests/integration/targets/hwc_vpc_route/tasks/main.yml index b281000b7a..9c9c37e8c0 100644 --- a/tests/integration/targets/hwc_vpc_route/tasks/main.yml +++ b/tests/integration/targets/hwc_vpc_route/tasks/main.yml @@ -81,8 +81,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #---------------------------------------------------------- - name: delete a route (check mode) hwc_vpc_route: @@ -127,8 +127,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #--------------------------------------------------------- # Post-test teardown - name: delete a peering connect diff --git a/tests/integration/targets/hwc_vpc_security_group/tasks/main.yml b/tests/integration/targets/hwc_vpc_security_group/tasks/main.yml index 6b21f8b9a4..9f853ca8e7 100644 --- a/tests/integration/targets/hwc_vpc_security_group/tasks/main.yml +++ b/tests/integration/targets/hwc_vpc_security_group/tasks/main.yml @@ -51,8 +51,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #---------------------------------------------------------- - name: delete a security group (check mode) hwc_vpc_security_group: @@ -83,5 +83,5 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed diff --git a/tests/integration/targets/hwc_vpc_security_group_rule/tasks/main.yml b/tests/integration/targets/hwc_vpc_security_group_rule/tasks/main.yml index 2d774101bf..04213e7162 100644 --- a/tests/integration/targets/hwc_vpc_security_group_rule/tasks/main.yml +++ b/tests/integration/targets/hwc_vpc_security_group_rule/tasks/main.yml @@ -85,8 +85,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #---------------------------------------------------------- - name: delete a security group rule (check mode) hwc_vpc_security_group_rule: @@ -151,8 +151,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #--------------------------------------------------------- # Post-test teardown - name: delete a security group diff --git a/tests/integration/targets/hwc_vpc_subnet/tasks/main.yml b/tests/integration/targets/hwc_vpc_subnet/tasks/main.yml index 3b3cf65478..c16ff85241 100644 --- a/tests/integration/targets/hwc_vpc_subnet/tasks/main.yml +++ b/tests/integration/targets/hwc_vpc_subnet/tasks/main.yml @@ -77,8 +77,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #---------------------------------------------------------- - name: delete a subnet (check mode) hwc_vpc_subnet: @@ -136,8 +136,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #--------------------------------------------------------- # Post-test teardown - name: delete a vpc diff --git a/tests/integration/targets/influxdb_user/tasks/tests.yml b/tests/integration/targets/influxdb_user/tasks/tests.yml index b980e29094..ad3396642b 100644 --- a/tests/integration/targets/influxdb_user/tasks/tests.yml +++ b/tests/integration/targets/influxdb_user/tasks/tests.yml @@ -13,7 +13,7 @@ - name: Check that admin user adding succeeds with a change assert: that: - - add_admin_user.changed == true + - add_admin_user is changed - name: Test add admin user block: @@ -24,7 +24,7 @@ - name: Check that admin user adding succeeds with a change assert: that: - - add_admin_user.changed == true + - add_admin_user is changed - name: Test add admin user idempotence block: @@ -35,7 +35,7 @@ - name: Check that admin user adding succeeds without a change assert: that: - - add_admin_user.changed == false + - add_admin_user is not changed - name: Enable authentication and restart service block: @@ -58,7 +58,7 @@ - name: Check that adding user with enabled authentication succeeds with a change assert: that: - - add_user_with_auth_enabled.changed == true + - add_user_with_auth_enabled is changed - name: Test add user when authentication enabled block: @@ -69,7 +69,7 @@ - name: Check that adding user with enabled authentication succeeds with a change assert: that: - - add_user_with_auth_enabled.changed == true + - add_user_with_auth_enabled is changed - name: Test add user when authentication enabled idempotence block: @@ -80,7 +80,7 @@ - name: Check that adding same user succeeds without a change assert: that: - - same_user.changed == false + - same_user is not changed - name: Test change user password in check mode block: @@ -92,7 +92,7 @@ - name: Check that password changing succeeds with a change assert: that: - - change_password.changed == true + - change_password is changed - name: Test change user password block: @@ -103,7 +103,7 @@ - name: Check that password changing succeeds with a change assert: that: - - change_password.changed == true + - change_password is changed - name: Test remove user in check mode block: @@ -115,7 +115,7 @@ - name: Check that removing user succeeds with a change assert: that: - - remove_user.changed == true + - remove_user is changed - name: Test remove user block: @@ -126,7 +126,7 @@ - name: Check that removing user succeeds with a change assert: that: - - remove_user.changed == true + - remove_user is changed - name: Test remove user idempotence block: @@ -137,4 +137,4 @@ - name: Check that removing user succeeds without a change assert: that: - - remove_user.changed == false + - remove_user is not changed diff --git a/tests/integration/targets/ipify_facts/tasks/main.yml b/tests/integration/targets/ipify_facts/tasks/main.yml index 4fbd5ab696..7b620ff9ec 100644 --- a/tests/integration/targets/ipify_facts/tasks/main.yml +++ b/tests/integration/targets/ipify_facts/tasks/main.yml @@ -41,6 +41,6 @@ - name: check if task was successful assert: that: - - "{{ external_ip.changed == false }}" - - "{{ external_ip['ansible_facts'] is defined }}" - - "{{ external_ip['ansible_facts']['ipify_public_ip'] is defined }}" + - external_ip is not changed + - external_ip.ansible_facts is defined + - external_ip.ansible_facts.ipify_public_ip is defined diff --git a/tests/integration/targets/iso_create/tasks/main.yml b/tests/integration/targets/iso_create/tasks/main.yml index de46276743..4a0df3b818 100644 --- a/tests/integration/targets/iso_create/tasks/main.yml +++ b/tests/integration/targets/iso_create/tasks/main.yml @@ -35,7 +35,7 @@ - debug: var=iso_file - assert: that: - - iso_result.changed == True + - iso_result is changed - iso_file.stat.exists == False - name: Create iso file with a specified file @@ -54,7 +54,7 @@ - assert: that: - - iso_result.changed == True + - iso_result is changed - iso_file.stat.exists == True - name: Create iso file with a specified file and folder @@ -74,10 +74,10 @@ - assert: that: - - iso_result.changed == True + - iso_result is changed - iso_file.stat.exists == True -- name: Create iso file with volume identification string +- name: Create iso file with volume identification string iso_create: src_files: - "{{ role_path }}/files/test1.cfg" @@ -93,7 +93,7 @@ - assert: that: - - iso_result.changed == True + - iso_result is changed - iso_file.stat.exists == True - name: Create iso file with Rock Ridge extention @@ -112,7 +112,7 @@ - assert: that: - - iso_result.changed == True + - iso_result is changed - iso_file.stat.exists == True - name: Create iso file with Joliet extention @@ -131,7 +131,7 @@ - assert: that: - - iso_result.changed == True + - iso_result is changed - iso_file.stat.exists == True - name: Create iso file with UDF enabled @@ -150,5 +150,5 @@ - assert: that: - - iso_result.changed == True + - iso_result is changed - iso_file.stat.exists == True diff --git a/tests/integration/targets/iso_extract/tasks/tests.yml b/tests/integration/targets/iso_extract/tasks/tests.yml index f9182ba6ae..18f22422ce 100644 --- a/tests/integration/targets/iso_extract/tasks/tests.yml +++ b/tests/integration/targets/iso_extract/tasks/tests.yml @@ -28,7 +28,7 @@ - assert: that: - - iso_extract_test0 is changed == true + - iso_extract_test0 is changed - name: Extract the iso again iso_extract: @@ -42,11 +42,11 @@ - name: Test iso_extract_test0_again (normal mode) assert: that: - - iso_extract_test0_again is changed == false + - iso_extract_test0_again is not changed when: not in_check_mode - name: Test iso_extract_test0_again (check-mode) assert: that: - - iso_extract_test0_again is changed == true + - iso_extract_test0_again is changed when: in_check_mode diff --git a/tests/integration/targets/one_host/tasks/main.yml b/tests/integration/targets/one_host/tasks/main.yml index a3cea768af..7d38c2a890 100644 --- a/tests/integration/targets/one_host/tasks/main.yml +++ b/tests/integration/targets/one_host/tasks/main.yml @@ -177,7 +177,7 @@ - name: "assert test_{{test_number}} worked" assert: that: - - result.changed == false + - result is not changed # HOST DISABLEMENT diff --git a/tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/tasks/main.yml b/tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/tasks/main.yml index 92fd0830c4..a80e218921 100644 --- a/tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/tasks/main.yml +++ b/tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/tasks/main.yml @@ -40,7 +40,7 @@ - assert: that: - "'changed' in sync_result" - - "sync_result.changed == true" + - sync_result is changed - "'cmd' in sync_result" - "'rsync' in sync_result.cmd" - "'msg' in sync_result" @@ -63,7 +63,7 @@ - assert: that: - - "sync_result.changed == False" + - sync_result is not changed - name: cleanup old files file: diff --git a/tests/integration/targets/xml/tasks/test-add-children-elements-unicode.yml b/tests/integration/targets/xml/tasks/test-add-children-elements-unicode.yml index 8ad91501c3..d89c29ae27 100644 --- a/tests/integration/targets/xml/tasks/test-add-children-elements-unicode.yml +++ b/tests/integration/targets/xml/tasks/test-add-children-elements-unicode.yml @@ -24,6 +24,6 @@ - name: Test expected result assert: that: - - add_children_elements_unicode.changed == true - - comparison.changed == false # identical + - add_children_elements_unicode is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-add-children-elements-unicode.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-add-children-elements.yml b/tests/integration/targets/xml/tasks/test-add-children-elements.yml index 8d9b06866d..3c439c7ac2 100644 --- a/tests/integration/targets/xml/tasks/test-add-children-elements.yml +++ b/tests/integration/targets/xml/tasks/test-add-children-elements.yml @@ -24,6 +24,6 @@ - name: Test expected result assert: that: - - add_children_elements.changed == true - - comparison.changed == false # identical + - add_children_elements is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-add-children-elements.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-add-children-from-groupvars.yml b/tests/integration/targets/xml/tasks/test-add-children-from-groupvars.yml index e062de8d14..818fdf09b9 100644 --- a/tests/integration/targets/xml/tasks/test-add-children-from-groupvars.yml +++ b/tests/integration/targets/xml/tasks/test-add-children-from-groupvars.yml @@ -23,6 +23,6 @@ - name: Test expected result assert: that: - - add_children_from_groupvars.changed == true - - comparison.changed == false # identical + - add_children_from_groupvars is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-add-children-from-groupvars.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-add-children-insertafter.yml b/tests/integration/targets/xml/tasks/test-add-children-insertafter.yml index 2d42e2d54e..479052ebdd 100644 --- a/tests/integration/targets/xml/tasks/test-add-children-insertafter.yml +++ b/tests/integration/targets/xml/tasks/test-add-children-insertafter.yml @@ -28,5 +28,5 @@ - name: Test expected result assert: that: - - add_children_insertafter.changed == true - - comparison.changed == false # identical + - add_children_insertafter is changed + - comparison is not changed # identical diff --git a/tests/integration/targets/xml/tasks/test-add-children-insertbefore.yml b/tests/integration/targets/xml/tasks/test-add-children-insertbefore.yml index 8550f12cf7..9839d7cc91 100644 --- a/tests/integration/targets/xml/tasks/test-add-children-insertbefore.yml +++ b/tests/integration/targets/xml/tasks/test-add-children-insertbefore.yml @@ -28,5 +28,5 @@ - name: Test expected result assert: that: - - add_children_insertbefore.changed == true - - comparison.changed == false # identical + - add_children_insertbefore is changed + - comparison is not changed # identical diff --git a/tests/integration/targets/xml/tasks/test-add-children-with-attributes-unicode.yml b/tests/integration/targets/xml/tasks/test-add-children-with-attributes-unicode.yml index d4a2329f69..585157c970 100644 --- a/tests/integration/targets/xml/tasks/test-add-children-with-attributes-unicode.yml +++ b/tests/integration/targets/xml/tasks/test-add-children-with-attributes-unicode.yml @@ -26,6 +26,6 @@ - name: Test expected result assert: that: - - add_children_with_attributes_unicode.changed == true - - comparison.changed == false # identical + - add_children_with_attributes_unicode is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-add-children-with-attributes-unicode.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-add-children-with-attributes.yml b/tests/integration/targets/xml/tasks/test-add-children-with-attributes.yml index 91e92637fc..c3704801d9 100644 --- a/tests/integration/targets/xml/tasks/test-add-children-with-attributes.yml +++ b/tests/integration/targets/xml/tasks/test-add-children-with-attributes.yml @@ -29,7 +29,7 @@ - name: Test expected result assert: that: - - add_children_with_attributes.changed == true - - comparison.changed == false # identical + - add_children_with_attributes is changed + - comparison is not changed # identical when: lxml_predictable_attribute_order #command: diff -u {{ role_path }}/results/test-add-children-with-attributes.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-add-element-implicitly.yml b/tests/integration/targets/xml/tasks/test-add-element-implicitly.yml index db674ba4fc..6166cd46b9 100644 --- a/tests/integration/targets/xml/tasks/test-add-element-implicitly.yml +++ b/tests/integration/targets/xml/tasks/test-add-element-implicitly.yml @@ -108,7 +108,7 @@ - name: Test expected result assert: that: - - comparison.changed == false # identical + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-add-element-implicitly.yml /tmp/ansible-xml-beers-implicit.xml diff --git a/tests/integration/targets/xml/tasks/test-add-namespaced-children-elements.yml b/tests/integration/targets/xml/tasks/test-add-namespaced-children-elements.yml index 25eca47f5b..2cac73e65c 100644 --- a/tests/integration/targets/xml/tasks/test-add-namespaced-children-elements.yml +++ b/tests/integration/targets/xml/tasks/test-add-namespaced-children-elements.yml @@ -21,12 +21,12 @@ src: results/test-add-namespaced-children-elements.xml dest: /tmp/ansible-xml-namespaced-beers.xml check_mode: yes - diff: yes + diff: yes register: comparison - name: Test expected result assert: that: - - add_namespaced_children_elements.changed == true - - comparison.changed == false # identical + - add_namespaced_children_elements is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-add-namespaced-children-elements.xml /tmp/ansible-xml-namespaced-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-children-elements-xml.yml b/tests/integration/targets/xml/tasks/test-children-elements-xml.yml index e63100c47c..6b50d819c3 100644 --- a/tests/integration/targets/xml/tasks/test-children-elements-xml.yml +++ b/tests/integration/targets/xml/tasks/test-children-elements-xml.yml @@ -25,6 +25,6 @@ - name: Test expected result assert: that: - - children_elements.changed == true - - comparison.changed == false # identical + - children_elements is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-add-children-elements.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-count-unicode.yml b/tests/integration/targets/xml/tasks/test-count-unicode.yml index 47a806bf98..a9a462b5da 100644 --- a/tests/integration/targets/xml/tasks/test-count-unicode.yml +++ b/tests/integration/targets/xml/tasks/test-count-unicode.yml @@ -15,5 +15,5 @@ - name: Test expected result assert: that: - - beers.changed == false + - beers is not changed - beers.count == 2 diff --git a/tests/integration/targets/xml/tasks/test-count.yml b/tests/integration/targets/xml/tasks/test-count.yml index cbc97e323c..b8a21870f7 100644 --- a/tests/integration/targets/xml/tasks/test-count.yml +++ b/tests/integration/targets/xml/tasks/test-count.yml @@ -15,5 +15,5 @@ - name: Test expected result assert: that: - - beers.changed == false + - beers is not changed - beers.count == 3 diff --git a/tests/integration/targets/xml/tasks/test-get-element-content-unicode.yml b/tests/integration/targets/xml/tasks/test-get-element-content-unicode.yml index 73ae96674f..718f12d640 100644 --- a/tests/integration/targets/xml/tasks/test-get-element-content-unicode.yml +++ b/tests/integration/targets/xml/tasks/test-get-element-content-unicode.yml @@ -15,7 +15,7 @@ - name: Test expected result assert: that: - - get_element_attribute.changed == false + - get_element_attribute is not changed - get_element_attribute.matches[0]['rating'] is defined and get_element_attribute.matches[0]['rating']['subjective'] == 'да' - name: Get element text @@ -28,5 +28,5 @@ - name: Test expected result assert: that: - - get_element_text.changed == false + - get_element_text is not changed - get_element_text.matches[0]['rating'] == 'десять' diff --git a/tests/integration/targets/xml/tasks/test-get-element-content.yml b/tests/integration/targets/xml/tasks/test-get-element-content.yml index 4a40b42dcf..d38aa70d95 100644 --- a/tests/integration/targets/xml/tasks/test-get-element-content.yml +++ b/tests/integration/targets/xml/tasks/test-get-element-content.yml @@ -15,7 +15,7 @@ - name: Test expected result assert: that: - - get_element_attribute.changed == false + - get_element_attribute is not changed - get_element_attribute.matches[0]['rating'] is defined - get_element_attribute.matches[0]['rating']['subjective'] == 'true' @@ -43,5 +43,5 @@ - name: Test expected result assert: that: - - get_element_text.changed == false + - get_element_text is not changed - get_element_text.matches[0]['rating'] == '10' diff --git a/tests/integration/targets/xml/tasks/test-mutually-exclusive-attributes.yml b/tests/integration/targets/xml/tasks/test-mutually-exclusive-attributes.yml index 3f24b0ac84..07a71f9153 100644 --- a/tests/integration/targets/xml/tasks/test-mutually-exclusive-attributes.yml +++ b/tests/integration/targets/xml/tasks/test-mutually-exclusive-attributes.yml @@ -18,5 +18,5 @@ - name: Test expected result assert: that: - - module_output.changed == false - - module_output.failed == true + - module_output is not changed + - module_output is failed diff --git a/tests/integration/targets/xml/tasks/test-pretty-print-only.yml b/tests/integration/targets/xml/tasks/test-pretty-print-only.yml index 7c0f7d5fd6..16fcf629c5 100644 --- a/tests/integration/targets/xml/tasks/test-pretty-print-only.yml +++ b/tests/integration/targets/xml/tasks/test-pretty-print-only.yml @@ -24,6 +24,6 @@ - name: Test expected result assert: that: - - pretty_print_only.changed == true - - comparison.changed == false # identical + - pretty_print_only is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-pretty-print-only.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-pretty-print.yml b/tests/integration/targets/xml/tasks/test-pretty-print.yml index 88b618b25d..fd47ff3d82 100644 --- a/tests/integration/targets/xml/tasks/test-pretty-print.yml +++ b/tests/integration/targets/xml/tasks/test-pretty-print.yml @@ -25,6 +25,6 @@ - name: Test expected result assert: that: - - pretty_print.changed == true - - comparison.changed == false # identical + - pretty_print is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-pretty-print.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-remove-attribute-nochange.yml b/tests/integration/targets/xml/tasks/test-remove-attribute-nochange.yml index d09dee405c..fbd73237f1 100644 --- a/tests/integration/targets/xml/tasks/test-remove-attribute-nochange.yml +++ b/tests/integration/targets/xml/tasks/test-remove-attribute-nochange.yml @@ -23,6 +23,6 @@ - name: Test expected result assert: that: - - remove_attribute.changed == false - - comparison.changed == false # identical + - remove_attribute is not changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-remove-attribute.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-remove-attribute.yml b/tests/integration/targets/xml/tasks/test-remove-attribute.yml index 9aa395e666..52b5214213 100644 --- a/tests/integration/targets/xml/tasks/test-remove-attribute.yml +++ b/tests/integration/targets/xml/tasks/test-remove-attribute.yml @@ -23,6 +23,6 @@ - name: Test expected result assert: that: - - remove_attribute.changed == true - - comparison.changed == false # identical + - remove_attribute is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-remove-attribute.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-remove-element-nochange.yml b/tests/integration/targets/xml/tasks/test-remove-element-nochange.yml index 2debc80d51..e548bfabf8 100644 --- a/tests/integration/targets/xml/tasks/test-remove-element-nochange.yml +++ b/tests/integration/targets/xml/tasks/test-remove-element-nochange.yml @@ -23,6 +23,6 @@ - name: Test expected result assert: that: - - remove_element.changed == false - - comparison.changed == false # identical + - remove_element is not changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-remove-element.yml b/tests/integration/targets/xml/tasks/test-remove-element.yml index f2e20ea220..092ca3e033 100644 --- a/tests/integration/targets/xml/tasks/test-remove-element.yml +++ b/tests/integration/targets/xml/tasks/test-remove-element.yml @@ -23,6 +23,6 @@ - name: Test expected result assert: that: - - remove_element.changed == true - - comparison.changed == false # identical + - remove_element is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute-nochange.yml b/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute-nochange.yml index 291536d3bf..19c14dec8d 100644 --- a/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute-nochange.yml +++ b/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute-nochange.yml @@ -28,6 +28,6 @@ - name: Test expected result assert: that: - - remove_namespaced_attribute.changed == false - - comparison.changed == false # identical + - remove_namespaced_attribute is not changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-remove-namespaced-attribute.xml /tmp/ansible-xml-namespaced-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute.yml b/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute.yml index a7ccdac4e3..9e54911ba5 100644 --- a/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute.yml +++ b/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute.yml @@ -28,6 +28,6 @@ - name: Test expected result assert: that: - - remove_namespaced_attribute.changed == true - - comparison.changed == false # identical + - remove_namespaced_attribute is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-remove-namespaced-attribute.xml /tmp/ansible-xml-namespaced-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-remove-namespaced-element-nochange.yml b/tests/integration/targets/xml/tasks/test-remove-namespaced-element-nochange.yml index b1938e45b7..b96f2a7819 100644 --- a/tests/integration/targets/xml/tasks/test-remove-namespaced-element-nochange.yml +++ b/tests/integration/targets/xml/tasks/test-remove-namespaced-element-nochange.yml @@ -28,6 +28,6 @@ - name: Test expected result assert: that: - - remove_namespaced_element.changed == false - - comparison.changed == false # identical + - remove_namespaced_element is not changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-namespaced-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-remove-namespaced-element.yml b/tests/integration/targets/xml/tasks/test-remove-namespaced-element.yml index be78af6803..660baa9840 100644 --- a/tests/integration/targets/xml/tasks/test-remove-namespaced-element.yml +++ b/tests/integration/targets/xml/tasks/test-remove-namespaced-element.yml @@ -28,6 +28,6 @@ - name: Test expected result assert: that: - - remove_namespaced_element.changed == true - - comparison.changed == false # identical + - remove_namespaced_element is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-namespaced-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-set-attribute-value-unicode.yml b/tests/integration/targets/xml/tasks/test-set-attribute-value-unicode.yml index dabf72a1b7..b72d502f12 100644 --- a/tests/integration/targets/xml/tasks/test-set-attribute-value-unicode.yml +++ b/tests/integration/targets/xml/tasks/test-set-attribute-value-unicode.yml @@ -24,6 +24,6 @@ - name: Test expected result assert: that: - - set_attribute_value_unicode.changed == true - - comparison.changed == false # identical + - set_attribute_value_unicode is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-set-attribute-value-unicode.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-set-attribute-value.yml b/tests/integration/targets/xml/tasks/test-set-attribute-value.yml index 2aa39fe22f..6a2aa6c511 100644 --- a/tests/integration/targets/xml/tasks/test-set-attribute-value.yml +++ b/tests/integration/targets/xml/tasks/test-set-attribute-value.yml @@ -24,6 +24,6 @@ - name: Test expected result assert: that: - - set_attribute_value.changed == true - - comparison.changed == false # identical + - set_attribute_value is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-set-attribute-value.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-set-children-elements-level.yml b/tests/integration/targets/xml/tasks/test-set-children-elements-level.yml index 3e2c0adb6f..7fa926e879 100644 --- a/tests/integration/targets/xml/tasks/test-set-children-elements-level.yml +++ b/tests/integration/targets/xml/tasks/test-set-children-elements-level.yml @@ -47,8 +47,8 @@ - name: Test expected result assert: that: - - set_children_elements_level.changed == true - - comparison.changed == false # identical + - set_children_elements_level is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-set-children-elements-level.xml /tmp/ansible-xml-beers.xml @@ -70,5 +70,5 @@ - name: Test expected result assert: that: - - set_children_again.changed == false - - comparison.changed == false # identical + - set_children_again is not changed + - comparison is not changed # identical diff --git a/tests/integration/targets/xml/tasks/test-set-children-elements-unicode.yml b/tests/integration/targets/xml/tasks/test-set-children-elements-unicode.yml index 240b894ac7..3cc25cd999 100644 --- a/tests/integration/targets/xml/tasks/test-set-children-elements-unicode.yml +++ b/tests/integration/targets/xml/tasks/test-set-children-elements-unicode.yml @@ -25,8 +25,8 @@ - name: Test expected result assert: that: - - set_children_elements_unicode.changed == true - - comparison.changed == false # identical + - set_children_elements_unicode is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-set-children-elements-unicode.xml /tmp/ansible-xml-beers.xml @@ -41,6 +41,6 @@ - name: Test expected result assert: that: - - set_children_again.changed == false - - comparison.changed == false # identical + - set_children_again is not changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-set-children-elements-unicode.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-set-children-elements.yml b/tests/integration/targets/xml/tasks/test-set-children-elements.yml index 7b0f3247ad..7c305ead74 100644 --- a/tests/integration/targets/xml/tasks/test-set-children-elements.yml +++ b/tests/integration/targets/xml/tasks/test-set-children-elements.yml @@ -25,8 +25,8 @@ - name: Test expected result assert: that: - - set_children_elements.changed == true - - comparison.changed == false # identical + - set_children_elements is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-set-children-elements.xml /tmp/ansible-xml-beers.xml @@ -48,6 +48,6 @@ - name: Test expected result assert: that: - - set_children_again.changed == false - - comparison.changed == false # identical + - set_children_again is not changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-set-children-elements.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-set-element-value-empty.yml b/tests/integration/targets/xml/tasks/test-set-element-value-empty.yml index 5814803cb7..4575d5e75f 100644 --- a/tests/integration/targets/xml/tasks/test-set-element-value-empty.yml +++ b/tests/integration/targets/xml/tasks/test-set-element-value-empty.yml @@ -23,6 +23,6 @@ - name: Test expected result assert: that: - - set_element_value_empty.changed == true - - comparison.changed == false # identical + - set_element_value_empty is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-set-element-value-empty.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-set-element-value-unicode.yml b/tests/integration/targets/xml/tasks/test-set-element-value-unicode.yml index c3a40b7d93..139087fcd9 100644 --- a/tests/integration/targets/xml/tasks/test-set-element-value-unicode.yml +++ b/tests/integration/targets/xml/tasks/test-set-element-value-unicode.yml @@ -37,7 +37,7 @@ - name: Test expected result assert: that: - - set_element_first_run.changed == true - - set_element_second_run.changed == false - - comparison.changed == false # identical + - set_element_first_run is changed + - set_element_second_run is not changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-set-element-value-unicode.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-set-element-value.yml b/tests/integration/targets/xml/tasks/test-set-element-value.yml index dbd070f139..2f845e949b 100644 --- a/tests/integration/targets/xml/tasks/test-set-element-value.yml +++ b/tests/integration/targets/xml/tasks/test-set-element-value.yml @@ -37,7 +37,7 @@ - name: Test expected result assert: that: - - set_element_first_run.changed == true - - set_element_second_run.changed == false - - comparison.changed == false # identical + - set_element_first_run is changed + - set_element_second_run is not changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-set-element-value.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-set-namespaced-attribute-value.yml b/tests/integration/targets/xml/tasks/test-set-namespaced-attribute-value.yml index e0086efe3a..2ba83a8330 100644 --- a/tests/integration/targets/xml/tasks/test-set-namespaced-attribute-value.yml +++ b/tests/integration/targets/xml/tasks/test-set-namespaced-attribute-value.yml @@ -29,6 +29,6 @@ - name: Test expected result assert: that: - - set_namespaced_attribute_value.changed == true - - comparison.changed == false # identical + - set_namespaced_attribute_value is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-set-namespaced-attribute-value.xml /tmp/ansible-xml-namespaced-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-set-namespaced-children-elements.yml b/tests/integration/targets/xml/tasks/test-set-namespaced-children-elements.yml index 8e66e70eeb..6204c8c74d 100644 --- a/tests/integration/targets/xml/tasks/test-set-namespaced-children-elements.yml +++ b/tests/integration/targets/xml/tasks/test-set-namespaced-children-elements.yml @@ -52,6 +52,6 @@ - name: Test expected result assert: that: - - set_children_again.changed == false # idempotency - - set_namespaced_attribute_value.changed == true - - comparison.changed == false # identical + - set_children_again is not changed # idempotency + - set_namespaced_attribute_value is changed + - comparison is not changed # identical diff --git a/tests/integration/targets/xml/tasks/test-set-namespaced-element-value.yml b/tests/integration/targets/xml/tasks/test-set-namespaced-element-value.yml index f77d7537e9..cf6a8a7eb0 100644 --- a/tests/integration/targets/xml/tasks/test-set-namespaced-element-value.yml +++ b/tests/integration/targets/xml/tasks/test-set-namespaced-element-value.yml @@ -41,6 +41,6 @@ - name: Test expected result assert: that: - - set_element_first_run.changed == true - - set_element_second_run.changed == false - - comparison.changed == false # identical + - set_element_first_run is changed + - set_element_second_run is not changed + - comparison is not changed # identical diff --git a/tests/integration/targets/xml/tasks/test-xmlstring.yml b/tests/integration/targets/xml/tasks/test-xmlstring.yml index 4620d984fa..82781fa94d 100644 --- a/tests/integration/targets/xml/tasks/test-xmlstring.yml +++ b/tests/integration/targets/xml/tasks/test-xmlstring.yml @@ -25,8 +25,8 @@ - name: Test expected result assert: that: - - xmlresponse.changed == false - - comparison.changed == false # identical + - xmlresponse is not changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-pretty-print-only.xml /tmp/ansible-xml-beers.xml @@ -49,8 +49,8 @@ - name: Test expected result assert: that: - - xmlresponse.changed == true - - comparison.changed == false # identical + - xmlresponse is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-pretty-print-only.xml /tmp/ansible-xml-beers.xml @@ -63,7 +63,7 @@ add_children: - beer: Old Rasputin register: xmlresponse_modification - + - name: Compare to expected result copy: content: '{{ xmlresponse_modification.xmlstring }}' @@ -76,6 +76,6 @@ - name: Test expected result assert: that: - - xmlresponse_modification.changed == true - - comparison.changed == false # identical + - xmlresponse_modification is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-pretty-print.xml /tmp/ansible-xml-beers.xml From cc293f90a245aad5c2eae4b1c28b49101563b134 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 23 May 2021 08:20:37 +1200 Subject: [PATCH 0071/2828] ini_file - opening file as utf-8-sig (#2578) * opening file as utf-8-sig * added changelog fragment * using io.open() * Update tests/integration/targets/ini_file/tasks/main.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../fragments/2578-ini-file-utf8-bom.yml | 2 ++ plugins/modules/files/ini_file.py | 3 +- .../targets/ini_file/tasks/main.yml | 34 +++++++++++++++++++ 3 files changed, 38 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2578-ini-file-utf8-bom.yml diff --git a/changelogs/fragments/2578-ini-file-utf8-bom.yml b/changelogs/fragments/2578-ini-file-utf8-bom.yml new file mode 100644 index 0000000000..00640c0b23 --- /dev/null +++ b/changelogs/fragments/2578-ini-file-utf8-bom.yml @@ -0,0 +1,2 @@ +minor_changes: + - ini_file - opening file with encoding ``utf-8-sig`` (https://github.com/ansible-collections/community.general/issues/2189). diff --git a/plugins/modules/files/ini_file.py b/plugins/modules/files/ini_file.py index ac4c6d0cf3..ea857cefa9 100644 --- a/plugins/modules/files/ini_file.py +++ b/plugins/modules/files/ini_file.py @@ -104,6 +104,7 @@ EXAMPLES = r''' backup: yes ''' +import io import os import re import tempfile @@ -141,7 +142,7 @@ def do_ini(module, filename, section=None, option=None, value=None, os.makedirs(destpath) ini_lines = [] else: - with open(filename, 'r') as ini_file: + with io.open(filename, 'r', encoding="utf-8-sig") as ini_file: ini_lines = ini_file.readlines() if module._diff: diff --git a/tests/integration/targets/ini_file/tasks/main.yml b/tests/integration/targets/ini_file/tasks/main.yml index 2e84147c72..be5835669b 100644 --- a/tests/integration/targets/ini_file/tasks/main.yml +++ b/tests/integration/targets/ini_file/tasks/main.yml @@ -480,3 +480,37 @@ assert: that: - content15 == expected15 + +- name: Create starting ini file + copy: + # The content below is the following text file with BOM: + # [section1] + # var1=aaa + # var2=bbb + # [section2] + # var3=ccc + content: !!binary | + 77u/W3NlY3Rpb24xXQp2YXIxPWFhYQp2YXIyPWJiYgpbc2VjdGlvbjJdCnZhcjM9Y2NjCg== + dest: "{{ output_file }}" +- name: Test ini breakage + ini_file: + path: "{{ output_file }}" + section: section1 + option: var4 + value: 0 + +- name: read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: set expected content and get current ini file content + set_fact: + expected16: "[section1]\nvar1=aaa\nvar2=bbb\nvar4 = 0\n[section2]\nvar3=ccc\n" + content16: "{{ output_content.content | b64decode }}" +- debug: + var: content16 +- name: Verify content of ini file is as expected + assert: + that: + - content16 == expected16 From 593d622438dd2a7aada0ccb762446df4ebb1a6ac Mon Sep 17 00:00:00 2001 From: Tong He <68936428+unnecessary-username@users.noreply.github.com> Date: Tue, 25 May 2021 03:59:52 +0800 Subject: [PATCH 0072/2828] rhsm_release: Fix the issue that rhsm_release module considers 8, 7Client and 7Workstation as invalid releases (#2571) * rhsm_release: Fix the issue that rhsm_release module considers 8, 7Client and 7Workstation as invalid releases. * Fix the unit test error: The new release_matcher could pass a wider range of patterns but that would not cause extra issue to the whole module. * Submit the changelog fragment. * Update changelogs/fragments/2571-rhsm_release-fix-release_matcher.yaml Co-authored-by: Amin Vakil Co-authored-by: Amin Vakil --- .../fragments/2571-rhsm_release-fix-release_matcher.yaml | 2 ++ plugins/modules/packaging/os/rhsm_release.py | 6 +++--- .../unit/plugins/modules/packaging/os/test_rhsm_release.py | 5 ++--- 3 files changed, 7 insertions(+), 6 deletions(-) create mode 100644 changelogs/fragments/2571-rhsm_release-fix-release_matcher.yaml diff --git a/changelogs/fragments/2571-rhsm_release-fix-release_matcher.yaml b/changelogs/fragments/2571-rhsm_release-fix-release_matcher.yaml new file mode 100644 index 0000000000..764743303f --- /dev/null +++ b/changelogs/fragments/2571-rhsm_release-fix-release_matcher.yaml @@ -0,0 +1,2 @@ +bugfixes: + - rhsm_release - fix the issue that module considers 8, 7Client and 7Workstation as invalid releases (https://github.com/ansible-collections/community.general/pull/2571). diff --git a/plugins/modules/packaging/os/rhsm_release.py b/plugins/modules/packaging/os/rhsm_release.py index 22b280f1fc..a4d8f71197 100644 --- a/plugins/modules/packaging/os/rhsm_release.py +++ b/plugins/modules/packaging/os/rhsm_release.py @@ -56,9 +56,9 @@ from ansible.module_utils.basic import AnsibleModule import re -# Matches release-like values such as 7.2, 6.10, 10Server, -# but rejects unlikely values, like 100Server, 100.0, 1.100, etc. -release_matcher = re.compile(r'\b\d{1,2}(?:\.\d{1,2}|Server)\b') +# Matches release-like values such as 7.2, 5.10, 6Server, 8 +# but rejects unlikely values, like 100Server, 1.100, 7server etc. +release_matcher = re.compile(r'\b\d{1,2}(?:\.\d{1,2}|Server|Client|Workstation|)\b') def _sm_release(module, *args): diff --git a/tests/unit/plugins/modules/packaging/os/test_rhsm_release.py b/tests/unit/plugins/modules/packaging/os/test_rhsm_release.py index a75ec69448..98db6e2840 100644 --- a/tests/unit/plugins/modules/packaging/os/test_rhsm_release.py +++ b/tests/unit/plugins/modules/packaging/os/test_rhsm_release.py @@ -125,13 +125,12 @@ class RhsmRepositoryReleaseModuleTestCase(ModuleTestCase): def test_release_matcher(self): # throw a few values at the release matcher -- only sane_values should match - sane_values = ['1Server', '10Server', '1.10', '10.0'] + sane_values = ['1Server', '1Client', '10Server', '1.10', '10.0', '9'] insane_values = [ '6server', # lowercase 's' '100Server', # excessively long 'x' component - '100.0', # excessively long 'x' component - '6.100', # excessively long 'y' component '100.100', # excessively long 'x' and 'y' components + '+.-', # illegal characters ] matches = self.module.release_matcher.findall(' '.join(sane_values + insane_values)) From 63012eef82ad127e06e7a3e5a51eeb7a0f30a0c3 Mon Sep 17 00:00:00 2001 From: DasSkelett Date: Tue, 25 May 2021 12:58:20 +0200 Subject: [PATCH 0073/2828] Use str() to get exception message (#2590) --- .../fragments/2590-netcup_dns-exception-no-message-attr.yml | 2 ++ plugins/modules/net_tools/netcup_dns.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2590-netcup_dns-exception-no-message-attr.yml diff --git a/changelogs/fragments/2590-netcup_dns-exception-no-message-attr.yml b/changelogs/fragments/2590-netcup_dns-exception-no-message-attr.yml new file mode 100644 index 0000000000..06cac9ad1b --- /dev/null +++ b/changelogs/fragments/2590-netcup_dns-exception-no-message-attr.yml @@ -0,0 +1,2 @@ +bugfixes: + - netcup_dns - use ``str(ex)`` instead of unreliable ``ex.message`` in exception handling to fix ``AttributeError`` in error cases (https://github.com/ansible-collections/community.general/pull/2590). diff --git a/plugins/modules/net_tools/netcup_dns.py b/plugins/modules/net_tools/netcup_dns.py index 5d63a5b38e..5ec5cbb246 100644 --- a/plugins/modules/net_tools/netcup_dns.py +++ b/plugins/modules/net_tools/netcup_dns.py @@ -255,7 +255,7 @@ def main(): has_changed = True except Exception as ex: - module.fail_json(msg=ex.message) + module.fail_json(msg=str(ex)) module.exit_json(changed=has_changed, result={"records": [record_data(r) for r in all_records]}) From d8713992209ccce44b884093967749249bca960f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 25 May 2021 07:04:19 -0400 Subject: [PATCH 0074/2828] json_query, no more 'unknown type' errors (#2607) Signed-off-by: Abhijeet Kasurde --- changelogs/fragments/json_query_more_types.yml | 3 +++ plugins/filter/json_query.py | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/json_query_more_types.yml diff --git a/changelogs/fragments/json_query_more_types.yml b/changelogs/fragments/json_query_more_types.yml new file mode 100644 index 0000000000..4ac69b67c0 --- /dev/null +++ b/changelogs/fragments/json_query_more_types.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - json_query filter plugin - avoid 'unknown type' errors for more Ansible internal types (https://github.com/ansible-collections/community.general/pull/2607). diff --git a/plugins/filter/json_query.py b/plugins/filter/json_query.py index 972109a045..673cafa587 100644 --- a/plugins/filter/json_query.py +++ b/plugins/filter/json_query.py @@ -35,9 +35,11 @@ def json_query(data, expr): raise AnsibleError('You need to install "jmespath" prior to running ' 'json_query filter') - # Hack to handle Ansible String Types + # Hack to handle Ansible Unsafe text, AnsibleMapping and AnsibleSequence # See issue: https://github.com/ansible-collections/community.general/issues/320 jmespath.functions.REVERSE_TYPES_MAP['string'] = jmespath.functions.REVERSE_TYPES_MAP['string'] + ('AnsibleUnicode', 'AnsibleUnsafeText', ) + jmespath.functions.REVERSE_TYPES_MAP['array'] = jmespath.functions.REVERSE_TYPES_MAP['array'] + ('AnsibleSequence', ) + jmespath.functions.REVERSE_TYPES_MAP['object'] = jmespath.functions.REVERSE_TYPES_MAP['object'] + ('AnsibleMapping', ) try: return jmespath.search(expr, data) except jmespath.exceptions.JMESPathError as e: From 6df3685d42f35147b73f08237b3dea73e8d36e9a Mon Sep 17 00:00:00 2001 From: Alexander Moiseenko Date: Wed, 26 May 2021 08:00:53 +0300 Subject: [PATCH 0075/2828] jenkins_plugin: HTTP Error 405: Method Not Allowed on disable/enable plugin #2510 (#2511) * define POST method for pluginManager api requests Jenkins makeEnable/makeDisable api requests requires to use POST method * add changelog fragment * fix my yoda lang thx to aminvakil Co-authored-by: Amin Vakil * update changelog fragment Co-authored-by: Felix Fontein Co-authored-by: Amin Vakil Co-authored-by: Felix Fontein --- changelogs/fragments/2510-jenkins_plugin_use_post_method.yml | 2 ++ plugins/modules/web_infrastructure/jenkins_plugin.py | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2510-jenkins_plugin_use_post_method.yml diff --git a/changelogs/fragments/2510-jenkins_plugin_use_post_method.yml b/changelogs/fragments/2510-jenkins_plugin_use_post_method.yml new file mode 100644 index 0000000000..b310e27061 --- /dev/null +++ b/changelogs/fragments/2510-jenkins_plugin_use_post_method.yml @@ -0,0 +1,2 @@ +bugfixes: + - jenkins_plugin - use POST method for sending request to jenkins API when ``state`` option is one of ``enabled``, ``disabled``, ``pinned``, ``unpinned``, or ``absent`` (https://github.com/ansible-collections/community.general/issues/2510). diff --git a/plugins/modules/web_infrastructure/jenkins_plugin.py b/plugins/modules/web_infrastructure/jenkins_plugin.py index c9946023ac..be335fcfd3 100644 --- a/plugins/modules/web_infrastructure/jenkins_plugin.py +++ b/plugins/modules/web_infrastructure/jenkins_plugin.py @@ -696,7 +696,8 @@ class JenkinsPlugin(object): self._get_url_data( url, msg_status="Plugin not found. %s" % url, - msg_exception="%s has failed." % msg) + msg_exception="%s has failed." % msg, + method="POST") def main(): From aa74cf4d61b1e22e55ca1fa0b0d18744da493b4f Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Wed, 26 May 2021 17:06:23 +1200 Subject: [PATCH 0076/2828] ini_file - added note in documentation for utf-8 bom (#2599) * added note in documentation for utf-8 bom * Update plugins/modules/files/ini_file.py Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- plugins/modules/files/ini_file.py | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/modules/files/ini_file.py b/plugins/modules/files/ini_file.py index ea857cefa9..d318d04d57 100644 --- a/plugins/modules/files/ini_file.py +++ b/plugins/modules/files/ini_file.py @@ -79,6 +79,7 @@ options: notes: - While it is possible to add an I(option) without specifying a I(value), this makes no sense. - As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well. + - As of community.general 3.2.0, UTF-8 BOM markers are discarded when reading files. author: - Jan-Piet Mens (@jpmens) - Ales Nosek (@noseka1) From 4764a5deba6b44c988ab21b1b8b2951e71b8499b Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Wed, 26 May 2021 17:07:09 +1200 Subject: [PATCH 0077/2828] redis cache - better parsing of connection uri (#2579) * better parsing of connection uri * added changelog fragment * fixed tests for ansible 2.9 * Update tests/unit/plugins/cache/test_redis.py Co-authored-by: Felix Fontein * Update tests/unit/plugins/cache/test_redis.py Co-authored-by: Felix Fontein * Adjustments from PR * Update test_redis.py * Update test_redis.py * Update plugins/cache/redis.py Co-authored-by: Felix Fontein * Update plugins/cache/redis.py Co-authored-by: Felix Fontein * Update tests/unit/plugins/cache/test_redis.py Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- changelogs/fragments/2579-redis-cache-ipv6.yml | 2 ++ plugins/cache/redis.py | 14 ++++++++++++-- tests/unit/plugins/cache/test_redis.py | 15 ++++++++++++++- 3 files changed, 28 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/2579-redis-cache-ipv6.yml diff --git a/changelogs/fragments/2579-redis-cache-ipv6.yml b/changelogs/fragments/2579-redis-cache-ipv6.yml new file mode 100644 index 0000000000..aaa5e78b34 --- /dev/null +++ b/changelogs/fragments/2579-redis-cache-ipv6.yml @@ -0,0 +1,2 @@ +bugfixes: + - redis cache - improved connection string parsing (https://github.com/ansible-collections/community.general/issues/497). diff --git a/plugins/cache/redis.py b/plugins/cache/redis.py index 7a376d6d7c..6af7c731e4 100644 --- a/plugins/cache/redis.py +++ b/plugins/cache/redis.py @@ -61,6 +61,7 @@ DOCUMENTATION = ''' type: integer ''' +import re import time import json @@ -91,6 +92,8 @@ class CacheModule(BaseCacheModule): performance. """ _sentinel_service_name = None + re_url_conn = re.compile(r'^([^:]+|\[[^]]+\]):(\d+):(\d+)(?::(.*))?$') + re_sent_conn = re.compile(r'^(.*):(\d+)$') def __init__(self, *args, **kwargs): uri = '' @@ -130,11 +133,18 @@ class CacheModule(BaseCacheModule): self._db = self._get_sentinel_connection(uri, kw) # normal connection else: - connection = uri.split(':') + connection = self._parse_connection(self.re_url_conn, uri) self._db = StrictRedis(*connection, **kw) display.vv('Redis connection: %s' % self._db) + @staticmethod + def _parse_connection(re_patt, uri): + match = re_patt.match(uri) + if not match: + raise AnsibleError("Unable to parse connection string") + return match.groups() + def _get_sentinel_connection(self, uri, kw): """ get sentinel connection details from _uri @@ -158,7 +168,7 @@ class CacheModule(BaseCacheModule): except IndexError: pass # password is optional - sentinels = [tuple(shost.split(':')) for shost in connections] + sentinels = [self._parse_connection(self.re_sent_conn, shost) for shost in connections] display.vv('\nUsing redis sentinels: %s' % sentinels) scon = Sentinel(sentinels, **kw) try: diff --git a/tests/unit/plugins/cache/test_redis.py b/tests/unit/plugins/cache/test_redis.py index e665826769..ee7e1f7913 100644 --- a/tests/unit/plugins/cache/test_redis.py +++ b/tests/unit/plugins/cache/test_redis.py @@ -23,10 +23,23 @@ import pytest pytest.importorskip('redis') +from ansible import constants as C from ansible.plugins.loader import cache_loader +from ansible.release import __version__ as ansible_version from ansible_collections.community.general.plugins.cache.redis import CacheModule as RedisCache def test_redis_cachemodule(): # The _uri option is required for the redis plugin - assert isinstance(cache_loader.get('community.general.redis', **{'_uri': '127.0.0.1:6379:1'}), RedisCache) + connection = '127.0.0.1:6379:1' + if ansible_version.startswith('2.9.'): + C.CACHE_PLUGIN_CONNECTION = connection + assert isinstance(cache_loader.get('community.general.redis', **{'_uri': connection}), RedisCache) + + +def test_redis_cachemodule(): + # The _uri option is required for the redis plugin + connection = '[::1]:6379:1' + if ansible_version.startswith('2.9.'): + C.CACHE_PLUGIN_CONNECTION = connection + assert isinstance(cache_loader.get('community.general.redis', **{'_uri': connection}), RedisCache) From d0f8eac7fdf264ba04ce536d4de8b146dc3f86e4 Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Wed, 26 May 2021 12:12:21 +0430 Subject: [PATCH 0078/2828] Add CONTRIBUTING.md (#2602) * Initial file shamelessly copied from community.mysql * Add some notes on pull requests * Add CONTRIBUTING.md link to README.md * Add quick-start development guide link * Apply felixfontein's suggestions Co-authored-by: Felix Fontein * add note about rebasing and merge commits Co-authored-by: Felix Fontein * add note about easyfix and waiting_on_contributor tags Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- CONTRIBUTING.md | 32 ++++++++++++++++++++++++++++++++ README.md | 2 ++ 2 files changed, 34 insertions(+) create mode 100644 CONTRIBUTING.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000..959d363236 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,32 @@ +# Contributing + +We follow [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) in all our contributions and interactions within this repository. + +If you are a committer, also refer to the [collection's committer guidelines](https://github.com/ansible-collections/community.general/blob/main/commit-rights.md). + +## Issue tracker + +Whether you are looking for an opportunity to contribute or you found a bug and already know how to solve it, please go to the [issue tracker](https://github.com/ansible-collections/community.general/issues). +There you can find feature ideas to implement, reports about bugs to solve, or submit an issue to discuss your idea before implementing it which can help choose a right direction at the beginning of your work and potentially save a lot of time and effort. +Also somebody may already have started discussing or working on implementing the same or a similar idea, +so you can cooperate to create a better solution together. + +* If you are interested in starting with an easy issue, look for [issues with an `easyfix` label](https://github.com/ansible-collections/community.general/labels/easyfix). +* Often issues that are waiting for contributors to pick up have [the `waiting_on_contributor` label](https://github.com/ansible-collections/community.general/labels/waiting_on_contributor). + +## Open pull requests + +Look through currently [open pull requests](https://github.com/ansible-collections/community.general/pulls). +You can help by reviewing them. Reviews help move pull requests to merge state. Some good pull requests cannot be merged only due to a lack of reviews. And it is always worth saying that good reviews are often more valuable than pull requests themselves. +Note that reviewing does not only mean code review, but also offering comments on new interfaces added to existing plugins/modules, interfaces of new plugins/modules, improving language (not everyone is a native english speaker), or testing bugfixes and new features! + +Also, consider taking up a valuable, reviewed, but abandoned pull request which you could politely ask the original authors to complete yourself. + +* Try committing your changes with an informative but short commit message. +* All commits of a pull request branch will be squashed into one commit at last. That does not mean you must have only one commit on your pull request, though! +* Please try not to force-push if it is not needed, so reviewers and other users looking at your pull request later can see the pull request commit history. +* Do not add merge commits to your PR. The bot will complain and you will have to rebase ([instructions for rebasing](https://docs.ansible.com/ansible/latest/dev_guide/developing_rebasing.html)) to remove them before your PR can be merged. To avoid that git automatically does merges during pulls, you can configure it to do rebases instead by running `git config pull.rebase true` inside the respository checkout. + +You can also read [our Quick-start development guide](https://github.com/ansible/community-docs/blob/main/create_pr_quick_start_guide.rst). + +If you find any inconsistencies or places in this document which can be improved, feel free to raise an issue or pull request to fix it. diff --git a/README.md b/README.md index 306f307128..e6e4eb880e 100644 --- a/README.md +++ b/README.md @@ -50,6 +50,8 @@ export COLLECTIONS_PATH=$(pwd)/collections:$COLLECTIONS_PATH You can find more information in the [developer guide for collections](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections), and in the [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html). +Also for some notes specific to this collection see [our CONTRIBUTING documentation](https://github.com/ansible-collections/community.general/blob/main/CONTRIBUTING.md). + ### Running tests See [here](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#testing-collections). From 0b4a2bea01ef4f7a5d1f78e3f38f0d2b55955d39 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Wed, 26 May 2021 10:34:35 +0200 Subject: [PATCH 0079/2828] Use become test framework for sudosu tests. (#2629) --- tests/unit/plugins/become/test_sudosu.py | 37 ++++++++++++++---------- 1 file changed, 21 insertions(+), 16 deletions(-) diff --git a/tests/unit/plugins/become/test_sudosu.py b/tests/unit/plugins/become/test_sudosu.py index 4e5c998f09..6adf200d8e 100644 --- a/tests/unit/plugins/become/test_sudosu.py +++ b/tests/unit/plugins/become/test_sudosu.py @@ -10,36 +10,41 @@ __metaclass__ = type import re from ansible import context -from ansible.playbook.play_context import PlayContext -from ansible.plugins.loader import become_loader + +from .helper import call_become_plugin def test_sudosu(mocker, parser, reset_cli_args): options = parser.parse_args([]) context._init_global_context(options) - play_context = PlayContext() default_cmd = "/bin/foo" default_exe = "/bin/bash" sudo_exe = 'sudo' sudo_flags = '-H -s -n' - cmd = play_context.make_become_cmd(cmd=default_cmd, executable=default_exe) - assert cmd == default_cmd - success = 'BECOME-SUCCESS-.+?' - play_context.become = True - play_context.become_user = 'foo' - play_context.set_become_plugin(become_loader.get('community.general.sudosu')) - play_context.become_flags = sudo_flags - cmd = play_context.make_become_cmd(cmd=default_cmd, executable=default_exe) - - assert (re.match("""%s %s su -l %s %s -c 'echo %s; %s'""" % (sudo_exe, sudo_flags, play_context.become_user, + task = { + 'become_user': 'foo', + 'become_method': 'community.general.sudosu', + 'become_flags': sudo_flags, + } + var_options = {} + cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe) + print(cmd) + assert (re.match("""%s %s su -l %s %s -c 'echo %s; %s'""" % (sudo_exe, sudo_flags, task['become_user'], default_exe, success, default_cmd), cmd) is not None) - play_context.become_pass = 'testpass' - cmd = play_context.make_become_cmd(cmd=default_cmd, executable=default_exe) + task = { + 'become_user': 'foo', + 'become_method': 'community.general.sudosu', + 'become_flags': sudo_flags, + 'become_pass': 'testpass', + } + var_options = {} + cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe) + print(cmd) assert (re.match("""%s %s -p "%s" su -l %s %s -c 'echo %s; %s'""" % (sudo_exe, sudo_flags.replace('-n', ''), - r"\[sudo via ansible, key=.+?\] password:", play_context.become_user, + r"\[sudo via ansible, key=.+?\] password:", task['become_user'], default_exe, success, default_cmd), cmd) is not None) From 26757edfb27b8d871963aae45e41eddf05a06775 Mon Sep 17 00:00:00 2001 From: Sylvia van Os Date: Thu, 27 May 2021 07:57:06 +0200 Subject: [PATCH 0080/2828] Add one-liner lookup example (#2615) * Add one-liner lookup example * Remove trailing whitespace * Update plugins/lookup/tss.py Co-authored-by: Felix Fontein * Update plugins/lookup/tss.py Co-authored-by: Amin Vakil Co-authored-by: Felix Fontein Co-authored-by: Amin Vakil --- plugins/lookup/tss.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/plugins/lookup/tss.py b/plugins/lookup/tss.py index 2c25532699..b7b7cd85e0 100644 --- a/plugins/lookup/tss.py +++ b/plugins/lookup/tss.py @@ -103,6 +103,14 @@ EXAMPLES = r""" | items2dict(key_name='slug', value_name='itemValue'))['password'] }} + +- hosts: localhost + vars: + secret_password: >- + {{ ((lookup('community.general.tss', 1) | from_json).get('items') | items2dict(key_name='slug', value_name='itemValue'))['password'] }}" + tasks: + - ansible.builtin.debug: + msg: the password is {{ secret_password }} """ from ansible.errors import AnsibleError, AnsibleOptionsError From 4aa50962cb54b903c807b6a000cb41c28d4b1806 Mon Sep 17 00:00:00 2001 From: sgalea87 <43749726+sgalea87@users.noreply.github.com> Date: Thu, 27 May 2021 08:01:28 +0200 Subject: [PATCH 0081/2828] influxdb_user: Fix bug introduced by PR 2499 (#2614) * Update influxdb_user.py Fixed function name * Create 2614-influxdb_user-fix-issue-introduced-in-PR#2499 Added changelog * Rename 2614-influxdb_user-fix-issue-introduced-in-PR#2499 to 2614-influxdb_user-fix-issue-introduced-in-PR#2499.yml Fixed extension * Update changelogs/fragments/2614-influxdb_user-fix-issue-introduced-in-PR#2499.yml Co-authored-by: Amin Vakil Co-authored-by: Amin Vakil --- .../2614-influxdb_user-fix-issue-introduced-in-PR#2499.yml | 2 ++ plugins/modules/database/influxdb/influxdb_user.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2614-influxdb_user-fix-issue-introduced-in-PR#2499.yml diff --git a/changelogs/fragments/2614-influxdb_user-fix-issue-introduced-in-PR#2499.yml b/changelogs/fragments/2614-influxdb_user-fix-issue-introduced-in-PR#2499.yml new file mode 100644 index 0000000000..dfae3f2bdf --- /dev/null +++ b/changelogs/fragments/2614-influxdb_user-fix-issue-introduced-in-PR#2499.yml @@ -0,0 +1,2 @@ +bugfixes: + - influxdb_user - fix bug which removed current privileges instead of appending them to existing ones (https://github.com/ansible-collections/community.general/issues/2609, https://github.com/ansible-collections/community.general/pull/2614). diff --git a/plugins/modules/database/influxdb/influxdb_user.py b/plugins/modules/database/influxdb/influxdb_user.py index d9e6b58051..cb35ea7ce6 100644 --- a/plugins/modules/database/influxdb/influxdb_user.py +++ b/plugins/modules/database/influxdb/influxdb_user.py @@ -174,7 +174,7 @@ def set_user_grants(module, client, user_name, grants): if v['privilege'] != 'NO PRIVILEGES': if v['privilege'] == 'ALL PRIVILEGES': v['privilege'] = 'ALL' - parsed_grants.add(v) + parsed_grants.append(v) # check if the current grants are included in the desired ones for current_grant in parsed_grants: From b45298bc4355d0ab95ed37e0eade699b2b665289 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 27 May 2021 08:23:35 +0200 Subject: [PATCH 0082/2828] Temporarily disable iptables_state tests. (#2641) --- tests/integration/targets/iptables_state/aliases | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/targets/iptables_state/aliases b/tests/integration/targets/iptables_state/aliases index 3cac4af522..12765cec47 100644 --- a/tests/integration/targets/iptables_state/aliases +++ b/tests/integration/targets/iptables_state/aliases @@ -5,3 +5,4 @@ skip/freebsd # no iptables/netfilter (Linux specific) skip/osx # no iptables/netfilter (Linux specific) skip/macos # no iptables/netfilter (Linux specific) skip/aix # no iptables/netfilter (Linux specific) +disabled # FIXME From 909e9fe9508804b2e18b755ef36060861cde5228 Mon Sep 17 00:00:00 2001 From: quidame Date: Thu, 27 May 2021 08:47:16 +0200 Subject: [PATCH 0083/2828] fix a regression in initialization_from_null_state() (iptables-nft > 1.8.2) (#2604) --- plugins/modules/system/iptables_state.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/plugins/modules/system/iptables_state.py b/plugins/modules/system/iptables_state.py index 326db862bc..66ba2c9b20 100644 --- a/plugins/modules/system/iptables_state.py +++ b/plugins/modules/system/iptables_state.py @@ -304,7 +304,7 @@ def write_state(b_path, lines, changed): return changed -def initialize_from_null_state(initializer, initcommand, table): +def initialize_from_null_state(initializer, initcommand, fallbackcmd, table): ''' This ensures iptables-state output is suitable for iptables-restore to roll back to it, i.e. iptables-save output is not empty. This also works for the @@ -315,8 +315,14 @@ def initialize_from_null_state(initializer, initcommand, table): commandline = list(initializer) commandline += ['-t', table] - (rc, out, err) = module.run_command(commandline, check_rc=True) + dummy = module.run_command(commandline, check_rc=True) (rc, out, err) = module.run_command(initcommand, check_rc=True) + if '*%s' % table not in out.splitlines(): + # The last resort. + iptables_input = '*%s\n:OUTPUT ACCEPT\nCOMMIT\n' % table + dummy = module.run_command(fallbackcmd, data=iptables_input, check_rc=True) + (rc, out, err) = module.run_command(initcommand, check_rc=True) + return rc, out, err @@ -401,6 +407,7 @@ def main(): INITCOMMAND = [bin_iptables_save] INITIALIZER = [bin_iptables, '-L', '-n'] TESTCOMMAND = [bin_iptables_restore, '--test'] + FALLBACKCMD = [bin_iptables_restore] if counters: COMMANDARGS.append('--counters') @@ -425,6 +432,7 @@ def main(): INITIALIZER.extend(['--modprobe', modprobe]) INITCOMMAND.extend(['--modprobe', modprobe]) TESTCOMMAND.extend(['--modprobe', modprobe]) + FALLBACKCMD.extend(['--modprobe', modprobe]) SAVECOMMAND = list(COMMANDARGS) SAVECOMMAND.insert(0, bin_iptables_save) @@ -458,15 +466,15 @@ def main(): for t in TABLES: if '*%s' % t in state_to_restore: if len(stdout) == 0 or '*%s' % t not in stdout.splitlines(): - (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, t) + (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, FALLBACKCMD, t) elif len(stdout) == 0: - (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, 'filter') + (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, FALLBACKCMD, 'filter') elif state == 'restored' and '*%s' % table not in state_to_restore: module.fail_json(msg="Table %s to restore not defined in %s" % (table, path)) elif len(stdout) == 0 or '*%s' % table not in stdout.splitlines(): - (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, table) + (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, FALLBACKCMD, table) initial_state = filter_and_format_state(stdout) if initial_state is None: From b79969da68ddaa73cfabdb866f80ac7f414b9f62 Mon Sep 17 00:00:00 2001 From: rainerleber <39616583+rainerleber@users.noreply.github.com> Date: Thu, 27 May 2021 18:46:12 +0200 Subject: [PATCH 0084/2828] Add module hana_query to make SAP HANA administration easier. (#2623) * new * move link * Apply suggestions from code review Co-authored-by: Felix Fontein * add more interesting return value in test * remove unused objects * removed unneeded function * extend test output * Update tests/unit/plugins/modules/database/saphana/test_hana_query.py Co-authored-by: Felix Fontein Co-authored-by: Rainer Leber Co-authored-by: Felix Fontein --- .../modules/database/saphana/hana_query.py | 187 ++++++++++++++++++ plugins/modules/hana_query.py | 1 + .../modules/database/saphana/__init__.py | 0 .../database/saphana/test_hana_query.py | 66 +++++++ 4 files changed, 254 insertions(+) create mode 100644 plugins/modules/database/saphana/hana_query.py create mode 120000 plugins/modules/hana_query.py create mode 100644 tests/unit/plugins/modules/database/saphana/__init__.py create mode 100644 tests/unit/plugins/modules/database/saphana/test_hana_query.py diff --git a/plugins/modules/database/saphana/hana_query.py b/plugins/modules/database/saphana/hana_query.py new file mode 100644 index 0000000000..ab147ef3fe --- /dev/null +++ b/plugins/modules/database/saphana/hana_query.py @@ -0,0 +1,187 @@ +#!/usr/bin/python + +# Copyright: (c) 2021, Rainer Leber +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: hana_query +short_description: Execute SQL on HANA +version_added: 3.2.0 +description: This module executes SQL statements on HANA with hdbsql. +options: + sid: + description: The system ID. + type: str + required: true + instance: + description: The instance number. + type: str + required: true + user: + description: A dedicated username. Defaults to C(SYSTEM). + type: str + default: SYSTEM + password: + description: The password to connect to the database. + type: str + required: true + autocommit: + description: Autocommit the statement. + type: bool + default: true + host: + description: The Host IP address. The port can be defined as well. + type: str + database: + description: Define the database on which to connect. + type: str + encrypted: + description: Use encrypted connection. Defaults to C(false). + type: bool + default: false + filepath: + description: + - One or more files each containing one SQL query to run. + - Must be a string or list containing strings. + type: list + elements: path + query: + description: + - SQL query to run. + - Must be a string or list containing strings. Please note that if you supply a string, it will be split by commas (C(,)) to a list. + It is better to supply a one-element list instead to avoid mangled input. + type: list + elements: str +notes: + - Does not support C(check_mode). +author: + - Rainer Leber (@rainerleber) +''' + +EXAMPLES = r''' +- name: Simple select query + community.general.hana_query: + sid: "hdb" + instance: "01" + password: "Test123" + query: "select user_name from users" + +- name: Run several queries + community.general.hana_query: + sid: "hdb" + instance: "01" + password: "Test123" + query: + - "select user_name from users;" + - select * from SYSTEM; + host: "localhost" + autocommit: False + +- name: Run several queries from file + community.general.hana_query: + sid: "hdb" + instance: "01" + password: "Test123" + filepath: + - /tmp/HANA_CPU_UtilizationPerCore_2.00.020+.txt + - /tmp/HANA.txt + host: "localhost" +''' + +RETURN = r''' +query_result: + description: List containing results of all queries executed (one sublist for every query). + returned: on success + type: list + elements: list + sample: [[{"Column": "Value1"}, {"Column": "Value2"}], [{"Column": "Value1"}, {"Column": "Value2"}]] +''' + +import csv +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import StringIO +from ansible.module_utils._text import to_native + + +def csv_to_list(rawcsv): + reader_raw = csv.DictReader(StringIO(rawcsv)) + reader = [dict((k, v.strip()) for k, v in row.items()) for row in reader_raw] + return list(reader) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + sid=dict(type='str', required=True), + instance=dict(type='str', required=True), + encrypted=dict(type='bool', required=False, default=False), + host=dict(type='str', required=False), + user=dict(type='str', required=False, default="SYSTEM"), + password=dict(type='str', required=True, no_log=True), + database=dict(type='str', required=False), + query=dict(type='list', elements='str', required=False), + filepath=dict(type='list', elements='path', required=False), + autocommit=dict(type='bool', required=False, default=True), + ), + required_one_of=[('query', 'filepath')], + supports_check_mode=False, + ) + rc, out, err, out_raw = [0, [], "", ""] + + params = module.params + + sid = (params['sid']).upper() + instance = params['instance'] + user = params['user'] + password = params['password'] + autocommit = params['autocommit'] + host = params['host'] + database = params['database'] + encrypted = params['encrypted'] + + filepath = params['filepath'] + query = params['query'] + + bin_path = "/usr/sap/{sid}/HDB{instance}/exe/hdbsql".format(sid=sid, instance=instance) + + try: + command = [module.get_bin_path(bin_path, required=True)] + except Exception as e: + module.fail_json(msg='Failed to find hdbsql at the expected path "{0}". Please check SID and instance number: "{1}"'.format(bin_path, to_native(e))) + + if encrypted is True: + command.extend(['-attemptencrypt']) + if autocommit is False: + command.extend(['-z']) + if host is not None: + command.extend(['-n', host]) + if database is not None: + command.extend(['-d', database]) + # -x Suppresses additional output, such as the number of selected rows in a result set. + command.extend(['-x', '-i', instance, '-u', user, '-p', password]) + + if filepath is not None: + command.extend(['-I']) + for p in filepath: + # makes a command like hdbsql -i 01 -u SYSTEM -p secret123# -I /tmp/HANA_CPU_UtilizationPerCore_2.00.020+.txt, + # iterates through files and append the output to var out. + query_command = command + [p] + (rc, out_raw, err) = module.run_command(query_command) + out.append(csv_to_list(out_raw)) + if query is not None: + for q in query: + # makes a command like hdbsql -i 01 -u SYSTEM -p secret123# "select user_name from users", + # iterates through multiple commands and append the output to var out. + query_command = command + [q] + (rc, out_raw, err) = module.run_command(query_command) + out.append(csv_to_list(out_raw)) + changed = True + + module.exit_json(changed=changed, rc=rc, query_result=out, stderr=err) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/hana_query.py b/plugins/modules/hana_query.py new file mode 120000 index 0000000000..ea869eb7a4 --- /dev/null +++ b/plugins/modules/hana_query.py @@ -0,0 +1 @@ +./database/saphana/hana_query.py \ No newline at end of file diff --git a/tests/unit/plugins/modules/database/saphana/__init__.py b/tests/unit/plugins/modules/database/saphana/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit/plugins/modules/database/saphana/test_hana_query.py b/tests/unit/plugins/modules/database/saphana/test_hana_query.py new file mode 100644 index 0000000000..4d158c028e --- /dev/null +++ b/tests/unit/plugins/modules/database/saphana/test_hana_query.py @@ -0,0 +1,66 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Rainer Leber (@rainerleber) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible_collections.community.general.plugins.modules import hana_query +from ansible_collections.community.general.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, + ModuleTestCase, + set_module_args, +) +from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible.module_utils import basic + + +def get_bin_path(*args, **kwargs): + """Function to return path of hdbsql""" + return "/usr/sap/HDB/HDB01/exe/hdbsql" + + +class Testhana_query(ModuleTestCase): + """Main class for testing hana_query module.""" + + def setUp(self): + """Setup.""" + super(Testhana_query, self).setUp() + self.module = hana_query + self.mock_get_bin_path = patch.object(basic.AnsibleModule, 'get_bin_path', get_bin_path) + self.mock_get_bin_path.start() + self.addCleanup(self.mock_get_bin_path.stop) # ensure that the patching is 'undone' + + def tearDown(self): + """Teardown.""" + super(Testhana_query, self).tearDown() + + def test_without_required_parameters(self): + """Failure must occurs when all parameters are missing.""" + with self.assertRaises(AnsibleFailJson): + set_module_args({}) + self.module.main() + + def test_hana_query(self): + """Check that result is processed.""" + set_module_args({ + 'sid': "HDB", + 'instance': "01", + 'encrypted': False, + 'host': "localhost", + 'user': "SYSTEM", + 'password': "1234Qwer", + 'database': "HDB", + 'query': "SELECT * FROM users;" + }) + with patch.object(basic.AnsibleModule, 'run_command') as run_command: + run_command.return_value = 0, 'username,name\n testuser,test user \n myuser, my user \n', '' + with self.assertRaises(AnsibleExitJson) as result: + hana_query.main() + self.assertEqual(result.exception.args[0]['query_result'], [[ + {'username': 'testuser', 'name': 'test user'}, + {'username': 'myuser', 'name': 'my user'}, + ]]) + self.assertEqual(run_command.call_count, 1) From dc793ea32b7246904c969eb1768fb5c8aef87990 Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Thu, 27 May 2021 19:46:38 +0300 Subject: [PATCH 0085/2828] hana_query module: add a maintainer (#2647) --- .github/BOTMETA.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index cdef437f90..994de0621f 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -347,6 +347,8 @@ files: $modules/database/mssql/mssql_db.py: maintainers: vedit Jmainguy kenichi-ogawa-1988 labels: mssql_db + $modules/database/saphana/hana_query.py: + maintainers: rainerleber $modules/database/vertica/: maintainers: dareko $modules/files/archive.py: From 7cd96d963efe4e6bf7ac9080fdf933dc23664dcf Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 27 May 2021 18:49:26 +0200 Subject: [PATCH 0086/2828] meta/runtime.yml and __init__.py cleanup (#2632) * Remove superfluous __init__.py files. * Reformat and sort meta/runtime.yml. * The ovirt modules have been removed. * Add changelog entry. --- changelogs/fragments/2632-cleanup.yml | 2 + meta/runtime.yml | 244 +++++++++--------- plugins/action/__init__.py | 0 plugins/become/__init__.py | 0 plugins/cache/__init__.py | 0 plugins/callback/__init__.py | 0 plugins/connection/__init__.py | 0 plugins/doc_fragments/__init__.py | 0 plugins/filter/__init__.py | 0 plugins/inventory/__init__.py | 0 plugins/lookup/__init__.py | 0 plugins/module_utils/__init__.py | 0 plugins/module_utils/identity/__init__.py | 0 .../identity/keycloak/__init__.py | 0 plugins/module_utils/mh/__init__.py | 0 plugins/module_utils/mh/mixins/__init__.py | 0 plugins/module_utils/net_tools/__init__.py | 0 .../module_utils/net_tools/nios/__init__.py | 0 .../net_tools/pritunl/__init__.py | 0 plugins/module_utils/oracle/__init__.py | 0 .../remote_management/__init__.py | 0 .../remote_management/lxca/__init__.py | 0 .../module_utils/source_control/__init__.py | 0 plugins/module_utils/storage/__init__.py | 0 plugins/module_utils/storage/emc/__init__.py | 0 .../module_utils/storage/hpe3par/__init__.py | 0 plugins/modules/__init__.py | 0 plugins/modules/net_tools/pritunl/__init__.py | 0 28 files changed, 118 insertions(+), 128 deletions(-) create mode 100644 changelogs/fragments/2632-cleanup.yml delete mode 100644 plugins/action/__init__.py delete mode 100644 plugins/become/__init__.py delete mode 100644 plugins/cache/__init__.py delete mode 100644 plugins/callback/__init__.py delete mode 100644 plugins/connection/__init__.py delete mode 100644 plugins/doc_fragments/__init__.py delete mode 100644 plugins/filter/__init__.py delete mode 100644 plugins/inventory/__init__.py delete mode 100644 plugins/lookup/__init__.py delete mode 100644 plugins/module_utils/__init__.py delete mode 100644 plugins/module_utils/identity/__init__.py delete mode 100644 plugins/module_utils/identity/keycloak/__init__.py delete mode 100644 plugins/module_utils/mh/__init__.py delete mode 100644 plugins/module_utils/mh/mixins/__init__.py delete mode 100644 plugins/module_utils/net_tools/__init__.py delete mode 100644 plugins/module_utils/net_tools/nios/__init__.py delete mode 100644 plugins/module_utils/net_tools/pritunl/__init__.py delete mode 100644 plugins/module_utils/oracle/__init__.py delete mode 100644 plugins/module_utils/remote_management/__init__.py delete mode 100644 plugins/module_utils/remote_management/lxca/__init__.py delete mode 100644 plugins/module_utils/source_control/__init__.py delete mode 100644 plugins/module_utils/storage/__init__.py delete mode 100644 plugins/module_utils/storage/emc/__init__.py delete mode 100644 plugins/module_utils/storage/hpe3par/__init__.py delete mode 100644 plugins/modules/__init__.py delete mode 100644 plugins/modules/net_tools/pritunl/__init__.py diff --git a/changelogs/fragments/2632-cleanup.yml b/changelogs/fragments/2632-cleanup.yml new file mode 100644 index 0000000000..def89de634 --- /dev/null +++ b/changelogs/fragments/2632-cleanup.yml @@ -0,0 +1,2 @@ +minor_changes: +- "Remove unnecessary ``__init__.py`` files from ``plugins/`` (https://github.com/ansible-collections/community.general/pull/2632)." diff --git a/meta/runtime.yml b/meta/runtime.yml index e5b59bc046..8b2a0c0ad6 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -1,31 +1,5 @@ --- requires_ansible: '>=2.9.10' -action_groups: - ovirt: - - ovirt_affinity_label_facts - - ovirt_api_facts - - ovirt_cluster_facts - - ovirt_datacenter_facts - - ovirt_disk_facts - - ovirt_event_facts - - ovirt_external_provider_facts - - ovirt_group_facts - - ovirt_host_facts - - ovirt_host_storage_facts - - ovirt_network_facts - - ovirt_nic_facts - - ovirt_permission_facts - - ovirt_quota_facts - - ovirt_scheduling_policy_facts - - ovirt_snapshot_facts - - ovirt_storage_domain_facts - - ovirt_storage_template_facts - - ovirt_storage_vm_facts - - ovirt_tag_facts - - ovirt_template_facts - - ovirt_user_facts - - ovirt_vm_facts - - ovirt_vmpool_facts plugin_routing: connection: docker: @@ -40,15 +14,18 @@ plugin_routing: nios: deprecation: removal_version: 5.0.0 - warning_text: The community.general.nios lookup plugin has been deprecated. Please use infoblox.nios_modules.nios_lookup instead. + warning_text: The community.general.nios lookup plugin has been deprecated. + Please use infoblox.nios_modules.nios_lookup instead. nios_next_ip: deprecation: removal_version: 5.0.0 - warning_text: The community.general.nios_next_ip lookup plugin has been deprecated. Please use infoblox.nios_modules.nios_next_ip instead. + warning_text: The community.general.nios_next_ip lookup plugin has been deprecated. + Please use infoblox.nios_modules.nios_next_ip instead. nios_next_network: deprecation: removal_version: 5.0.0 - warning_text: The community.general.nios_next_network lookup plugin has been deprecated. Please use infoblox.nios_modules.nios_next_network instead. + warning_text: The community.general.nios_next_network lookup plugin has been + deprecated. Please use infoblox.nios_modules.nios_next_network instead. modules: ali_instance_facts: tombstone: @@ -153,11 +130,13 @@ plugin_routing: gcp_forwarding_rule: tombstone: removal_version: 2.0.0 - warning_text: Use google.cloud.gcp_compute_forwarding_rule or google.cloud.gcp_compute_global_forwarding_rule instead. + warning_text: Use google.cloud.gcp_compute_forwarding_rule or google.cloud.gcp_compute_global_forwarding_rule + instead. gcp_healthcheck: tombstone: removal_version: 2.0.0 - warning_text: Use google.cloud.gcp_compute_health_check, google.cloud.gcp_compute_http_health_check or google.cloud.gcp_compute_https_health_check instead. + warning_text: Use google.cloud.gcp_compute_health_check, google.cloud.gcp_compute_http_health_check + or google.cloud.gcp_compute_https_health_check instead. gcp_target_proxy: tombstone: removal_version: 2.0.0 @@ -168,37 +147,22 @@ plugin_routing: warning_text: Use google.cloud.gcp_compute_url_map instead. gcpubsub: redirect: community.google.gcpubsub - gcpubsub_info: - redirect: community.google.gcpubsub_info gcpubsub_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.google.gcpubsub_info instead. + gcpubsub_info: + redirect: community.google.gcpubsub_info gcspanner: tombstone: removal_version: 2.0.0 - warning_text: Use google.cloud.gcp_spanner_database and/or google.cloud.gcp_spanner_instance instead. + warning_text: Use google.cloud.gcp_spanner_database and/or google.cloud.gcp_spanner_instance + instead. github_hooks: tombstone: removal_version: 2.0.0 - warning_text: Use community.general.github_webhook and community.general.github_webhook_info instead. - # Adding tombstones burns the old name, so we simply remove the entries: - # gluster_heal_info: - # tombstone: - # removal_version: 3.0.0 - # warning_text: The gluster modules have migrated to the gluster.gluster collection. Use gluster.gluster.gluster_heal_info instead. - # gluster_peer: - # tombstone: - # removal_version: 3.0.0 - # warning_text: The gluster modules have migrated to the gluster.gluster collection. Use gluster.gluster.gluster_peer instead. - # gluster_volume: - # tombstone: - # removal_version: 3.0.0 - # warning_text: The gluster modules have migrated to the gluster.gluster collection. Use gluster.gluster.gluster_volume instead. - # helm: - # tombstone: - # removal_version: 3.0.0 - # warning_text: Use community.kubernetes.helm instead. + warning_text: Use community.general.github_webhook and community.general.github_webhook_info + instead. hetzner_failover_ip: redirect: community.hrobot.failover_ip hetzner_failover_ip_info: @@ -246,11 +210,13 @@ plugin_routing: logicmonitor: tombstone: removal_version: 1.0.0 - warning_text: The logicmonitor_facts module is no longer maintained and the API used has been disabled in 2017. + warning_text: The logicmonitor_facts module is no longer maintained and the + API used has been disabled in 2017. logicmonitor_facts: tombstone: removal_version: 1.0.0 - warning_text: The logicmonitor_facts module is no longer maintained and the API used has been disabled in 2017. + warning_text: The logicmonitor_facts module is no longer maintained and the + API used has been disabled in 2017. memset_memstore_facts: tombstone: removal_version: 3.0.0 @@ -295,74 +261,90 @@ plugin_routing: tombstone: removal_version: 3.0.0 warning_text: Use netapp.ontap.na_ontap_info instead. - nios_a_record: - deprecation: - removal_version: 5.0.0 - warning_text: The community.general.nios_a_record module has been deprecated. Please use infoblox.nios_modules.nios_a_record instead. - nios_aaaa_record: - deprecation: - removal_version: 5.0.0 - warning_text: The community.general.nios_aaaa_record module has been deprecated. Please use infoblox.nios_modules.nios_aaaa_record instead. - nios_cname_record: - deprecation: - removal_version: 5.0.0 - warning_text: The community.general.nios_cname_record module has been deprecated. Please use infoblox.nios_modules.nios_cname_record instead. - nios_dns_view: - deprecation: - removal_version: 5.0.0 - warning_text: The community.general.nios_dns_view module has been deprecated. Please use infoblox.nios_modules.nios_dns_view instead. - nios_fixed_address: - deprecation: - removal_version: 5.0.0 - warning_text: The community.general.nios_fixed_address module has been deprecated. Please use infoblox.nios_modules.nios_fixed_address instead. - nios_host_record: - deprecation: - removal_version: 5.0.0 - warning_text: The community.general.nios_host_record module has been deprecated. Please use infoblox.nios_modules.nios_host_record instead. - nios_member: - deprecation: - removal_version: 5.0.0 - warning_text: The community.general.nios_member module has been deprecated. Please use infoblox.nios_modules.nios_member instead. - nios_mx_record: - deprecation: - removal_version: 5.0.0 - warning_text: The community.general.nios_mx_record module has been deprecated. Please use infoblox.nios_modules.nios_mx_record instead. - nios_naptr_record: - deprecation: - removal_version: 5.0.0 - warning_text: The community.general.nios_naptr_record module has been deprecated. Please use infoblox.nios_modules.nios_naptr_record instead. - nios_network: - deprecation: - removal_version: 5.0.0 - warning_text: The community.general.nios_network module has been deprecated. Please use infoblox.nios_modules.nios_network instead. - nios_network_view: - deprecation: - removal_version: 5.0.0 - warning_text: The community.general.nios_network_view module has been deprecated. Please use infoblox.nios_modules.nios_network_view instead. - nios_nsgroup: - deprecation: - removal_version: 5.0.0 - warning_text: The community.general.nios_nsgroup module has been deprecated. Please use infoblox.nios_modules.nios_nsgroup instead. - nios_ptr_record: - deprecation: - removal_version: 5.0.0 - warning_text: The community.general.nios_ptr_record module has been deprecated. Please use infoblox.nios_modules.nios_ptr_record instead. - nios_srv_record: - deprecation: - removal_version: 5.0.0 - warning_text: The community.general.nios_srv_record module has been deprecated. Please use infoblox.nios_modules.nios_srv_record instead. - nios_txt_record: - deprecation: - removal_version: 5.0.0 - warning_text: The community.general.nios_txt_record module has been deprecated. Please use infoblox.nios_modules.nios_txt_record instead. - nios_zone: - deprecation: - removal_version: 5.0.0 - warning_text: The community.general.nios_zone module has been deprecated. Please use infoblox.nios_modules.nios_zone instead. nginx_status_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.nginx_status_info instead. + nios_a_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_a_record module has been deprecated. + Please use infoblox.nios_modules.nios_a_record instead. + nios_aaaa_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_aaaa_record module has been deprecated. + Please use infoblox.nios_modules.nios_aaaa_record instead. + nios_cname_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_cname_record module has been deprecated. + Please use infoblox.nios_modules.nios_cname_record instead. + nios_dns_view: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_dns_view module has been deprecated. + Please use infoblox.nios_modules.nios_dns_view instead. + nios_fixed_address: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_fixed_address module has been deprecated. + Please use infoblox.nios_modules.nios_fixed_address instead. + nios_host_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_host_record module has been deprecated. + Please use infoblox.nios_modules.nios_host_record instead. + nios_member: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_member module has been deprecated. + Please use infoblox.nios_modules.nios_member instead. + nios_mx_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_mx_record module has been deprecated. + Please use infoblox.nios_modules.nios_mx_record instead. + nios_naptr_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_naptr_record module has been deprecated. + Please use infoblox.nios_modules.nios_naptr_record instead. + nios_network: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_network module has been deprecated. + Please use infoblox.nios_modules.nios_network instead. + nios_network_view: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_network_view module has been deprecated. + Please use infoblox.nios_modules.nios_network_view instead. + nios_nsgroup: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_nsgroup module has been deprecated. + Please use infoblox.nios_modules.nios_nsgroup instead. + nios_ptr_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_ptr_record module has been deprecated. + Please use infoblox.nios_modules.nios_ptr_record instead. + nios_srv_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_srv_record module has been deprecated. + Please use infoblox.nios_modules.nios_srv_record instead. + nios_txt_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_txt_record module has been deprecated. + Please use infoblox.nios_modules.nios_txt_record instead. + nios_zone: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_zone module has been deprecated. + Please use infoblox.nios_modules.nios_zone instead. ome_device_info: redirect: dellemc.openmanage.ome_device_info one_image_facts: @@ -396,7 +378,8 @@ plugin_routing: oneview_logical_interconnect_group_facts: tombstone: removal_version: 3.0.0 - warning_text: Use community.general.oneview_logical_interconnect_group_info instead. + warning_text: Use community.general.oneview_logical_interconnect_group_info + instead. oneview_network_set_facts: tombstone: removal_version: 3.0.0 @@ -553,10 +536,10 @@ plugin_routing: redirect: community.postgresql.postgresql_table postgresql_tablespace: redirect: community.postgresql.postgresql_tablespace - postgresql_user_obj_stat_info: - redirect: community.postgresql.postgresql_user_obj_stat_info postgresql_user: redirect: community.postgresql.postgresql_user + postgresql_user_obj_stat_info: + redirect: community.postgresql.postgresql_user_obj_stat_info purefa_facts: tombstone: removal_version: 3.0.0 @@ -647,7 +630,8 @@ plugin_routing: nios: deprecation: removal_version: 5.0.0 - warning_text: The community.general.nios document fragment has been deprecated. Please use infoblox.nios_modules.nios instead. + warning_text: The community.general.nios document fragment has been deprecated. + Please use infoblox.nios_modules.nios instead. postgresql: redirect: community.postgresql.postgresql module_utils: @@ -668,26 +652,30 @@ plugin_routing: net_tools.nios.api: deprecation: removal_version: 5.0.0 - warning_text: The community.general.net_tools.nios.api module_utils has been deprecated. Please use infoblox.nios_modules.api instead. + warning_text: The community.general.net_tools.nios.api module_utils has been + deprecated. Please use infoblox.nios_modules.api instead. + postgresql: + redirect: community.postgresql.postgresql remote_management.dellemc.dellemc_idrac: redirect: dellemc.openmanage.dellemc_idrac remote_management.dellemc.ome: redirect: dellemc.openmanage.ome - postgresql: - redirect: community.postgresql.postgresql callback: actionable: tombstone: removal_version: 2.0.0 - warning_text: Use the 'default' callback plugin with 'display_skipped_hosts = no' and 'display_ok_hosts = no' options. + warning_text: Use the 'default' callback plugin with 'display_skipped_hosts + = no' and 'display_ok_hosts = no' options. full_skip: tombstone: removal_version: 2.0.0 - warning_text: Use the 'default' callback plugin with 'display_skipped_hosts = no' option. + warning_text: Use the 'default' callback plugin with 'display_skipped_hosts + = no' option. stderr: tombstone: removal_version: 2.0.0 - warning_text: Use the 'default' callback plugin with 'display_failed_stderr = yes' option. + warning_text: Use the 'default' callback plugin with 'display_failed_stderr + = yes' option. inventory: docker_machine: redirect: community.docker.docker_machine diff --git a/plugins/action/__init__.py b/plugins/action/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/become/__init__.py b/plugins/become/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/cache/__init__.py b/plugins/cache/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/callback/__init__.py b/plugins/callback/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/connection/__init__.py b/plugins/connection/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/doc_fragments/__init__.py b/plugins/doc_fragments/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/filter/__init__.py b/plugins/filter/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/inventory/__init__.py b/plugins/inventory/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/lookup/__init__.py b/plugins/lookup/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/module_utils/__init__.py b/plugins/module_utils/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/module_utils/identity/__init__.py b/plugins/module_utils/identity/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/module_utils/identity/keycloak/__init__.py b/plugins/module_utils/identity/keycloak/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/module_utils/mh/__init__.py b/plugins/module_utils/mh/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/module_utils/mh/mixins/__init__.py b/plugins/module_utils/mh/mixins/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/module_utils/net_tools/__init__.py b/plugins/module_utils/net_tools/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/module_utils/net_tools/nios/__init__.py b/plugins/module_utils/net_tools/nios/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/module_utils/net_tools/pritunl/__init__.py b/plugins/module_utils/net_tools/pritunl/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/module_utils/oracle/__init__.py b/plugins/module_utils/oracle/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/module_utils/remote_management/__init__.py b/plugins/module_utils/remote_management/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/module_utils/remote_management/lxca/__init__.py b/plugins/module_utils/remote_management/lxca/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/module_utils/source_control/__init__.py b/plugins/module_utils/source_control/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/module_utils/storage/__init__.py b/plugins/module_utils/storage/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/module_utils/storage/emc/__init__.py b/plugins/module_utils/storage/emc/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/module_utils/storage/hpe3par/__init__.py b/plugins/module_utils/storage/hpe3par/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/modules/__init__.py b/plugins/modules/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/modules/net_tools/pritunl/__init__.py b/plugins/modules/net_tools/pritunl/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 From 285639a4f94e6000f61e8298e26589b39e5f8d8f Mon Sep 17 00:00:00 2001 From: christophemorio <49184206+christophemorio@users.noreply.github.com> Date: Thu, 27 May 2021 19:03:39 +0200 Subject: [PATCH 0087/2828] Terraform overwrite init (#2573) * feat: implement overwrite_init option * chore: changelog --- .../fragments/2573-terraform-overwrite-init.yml | 2 ++ plugins/modules/cloud/misc/terraform.py | 11 ++++++++++- 2 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2573-terraform-overwrite-init.yml diff --git a/changelogs/fragments/2573-terraform-overwrite-init.yml b/changelogs/fragments/2573-terraform-overwrite-init.yml new file mode 100644 index 0000000000..f2dad6a7ee --- /dev/null +++ b/changelogs/fragments/2573-terraform-overwrite-init.yml @@ -0,0 +1,2 @@ +minor_changes: + - terraform - add option ``overwrite_init`` to skip init if exists (https://github.com/ansible-collections/community.general/pull/2573). diff --git a/plugins/modules/cloud/misc/terraform.py b/plugins/modules/cloud/misc/terraform.py index 0a4e41b5f0..9bf36c8c81 100644 --- a/plugins/modules/cloud/misc/terraform.py +++ b/plugins/modules/cloud/misc/terraform.py @@ -107,6 +107,12 @@ options: you intend to provision an entirely new Terraform deployment. default: false type: bool + overwrite_init: + description: + - Run init even if C(.terraform/terraform.tfstate) already exists in I(project_path). + default: true + type: bool + version_added: '3.2.0' backend_config: description: - A group of key-values to provide at init stage to the -backend-config parameter. @@ -348,6 +354,7 @@ def main(): backend_config=dict(type='dict', default=None), backend_config_files=dict(type='list', elements='path', default=None), init_reconfigure=dict(required=False, type='bool', default=False), + overwrite_init=dict(type='bool', default=True), ), required_if=[('state', 'planned', ['plan_file'])], supports_check_mode=True, @@ -367,6 +374,7 @@ def main(): backend_config = module.params.get('backend_config') backend_config_files = module.params.get('backend_config_files') init_reconfigure = module.params.get('init_reconfigure') + overwrite_init = module.params.get('overwrite_init') if bin_path is not None: command = [bin_path] @@ -383,7 +391,8 @@ def main(): APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve') if force_init: - init_plugins(command[0], project_path, backend_config, backend_config_files, init_reconfigure, plugin_paths) + if overwrite_init or not os.path.isfile(os.path.join(project_path, ".terraform", "terraform.tfstate")): + init_plugins(command[0], project_path, backend_config, backend_config_files, init_reconfigure, plugin_paths) workspace_ctx = get_workspace_context(command[0], project_path) if workspace_ctx["current"] != workspace: From 795125fec4d4b9875ea1c29a6ccd81c30432b4c7 Mon Sep 17 00:00:00 2001 From: Abhijeet Kasurde Date: Thu, 27 May 2021 22:34:52 +0530 Subject: [PATCH 0088/2828] xml: Add an example for absent (#2644) Element node can be deleted based upon the attribute value. Signed-off-by: Abhijeet Kasurde --- plugins/modules/files/xml.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/plugins/modules/files/xml.py b/plugins/modules/files/xml.py index f93c8e4dc4..e7c6ca3f1e 100644 --- a/plugins/modules/files/xml.py +++ b/plugins/modules/files/xml.py @@ -301,6 +301,23 @@ EXAMPLES = r''' - floor: Grog storage - construction_date: "1990" # Only strings are valid - building: Grog factory + +# Consider this XML for following example - +# +# +# +# part to remove +# +# +# part to keep +# +# + +- name: Delete element node based upon attribute + community.general.xml: + path: bar.xml + xpath: /config/element[@name='test1'] + state: absent ''' RETURN = r''' From 95794f31e34552628cd648d27672eadabe4154ec Mon Sep 17 00:00:00 2001 From: Merouane Atig Date: Thu, 27 May 2021 19:08:35 +0200 Subject: [PATCH 0089/2828] Fix drain example with correct wait values (#2603) --- plugins/modules/net_tools/haproxy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/modules/net_tools/haproxy.py b/plugins/modules/net_tools/haproxy.py index 8efb59ed2e..a3320b45c5 100644 --- a/plugins/modules/net_tools/haproxy.py +++ b/plugins/modules/net_tools/haproxy.py @@ -150,7 +150,7 @@ EXAMPLES = r''' backend: www wait: yes drain: yes - wait_interval: 1 + wait_interval: 60 wait_retries: 60 - name: Disable backend server in 'www' backend pool and drop open sessions to it From 43c12b82fa9cf63d2258565f1d62d5dc0a0075ff Mon Sep 17 00:00:00 2001 From: Abhijeet Kasurde Date: Thu, 27 May 2021 22:39:26 +0530 Subject: [PATCH 0090/2828] random_string: a new lookup plugin (#2572) New lookup plugin to generate random string based upon constraints. Signed-off-by: Abhijeet Kasurde --- plugins/lookup/random_string.py | 220 ++++++++++++++++++ .../targets/lookup_random_string/aliases | 3 + .../targets/lookup_random_string/runme.sh | 6 + .../targets/lookup_random_string/test.yml | 48 ++++ 4 files changed, 277 insertions(+) create mode 100644 plugins/lookup/random_string.py create mode 100644 tests/integration/targets/lookup_random_string/aliases create mode 100755 tests/integration/targets/lookup_random_string/runme.sh create mode 100644 tests/integration/targets/lookup_random_string/test.yml diff --git a/plugins/lookup/random_string.py b/plugins/lookup/random_string.py new file mode 100644 index 0000000000..6a05cfd041 --- /dev/null +++ b/plugins/lookup/random_string.py @@ -0,0 +1,220 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2021, Abhijeet Kasurde +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" + name: random_string + author: + - Abhijeet Kasurde (@Akasurde) + short_description: Generates random string + version_added: '3.2.0' + description: + - Generates random string based upon the given constraints. + options: + length: + description: The length of the string. + default: 8 + type: int + upper: + description: + - Include uppercase letters in the string. + default: true + type: bool + lower: + description: + - Include lowercase letters in the string. + default: true + type: bool + numbers: + description: + - Include numbers in the string. + default: true + type: bool + special: + description: + - Include special characters in the string. + - Special characters are taken from Python standard library C(string). + See L(the documentation of string.punctuation,https://docs.python.org/3/library/string.html#string.punctuation) + for which characters will be used. + - The choice of special characters can be changed to setting I(override_special). + default: true + type: bool + min_numeric: + description: + - Minimum number of numeric characters in the string. + - If set, overrides I(numbers=false). + default: 0 + type: int + min_upper: + description: + - Minimum number of uppercase alphabets in the string. + - If set, overrides I(upper=false). + default: 0 + type: int + min_lower: + description: + - Minimum number of lowercase alphabets in the string. + - If set, overrides I(lower=false). + default: 0 + type: int + min_special: + description: + - Minimum number of special character in the string. + default: 0 + type: int + override_special: + description: + - Overide a list of special characters to use in the string. + - If set I(min_special) should be set to a non-default value. + type: str + override_all: + description: + - Override all values of I(numbers), I(upper), I(lower), and I(special) with + the given list of characters. + type: str + base64: + description: + - Returns base64 encoded string. + type: bool + default: false +""" + +EXAMPLES = r""" +- name: Generate random string + ansible.builtin.debug: + var: lookup('community.general.random_string') + # Example result: ['DeadBeeF'] + +- name: Generate random string with length 12 + ansible.builtin.debug: + var: lookup('community.general.random_string', length=12) + # Example result: ['Uan0hUiX5kVG'] + +- name: Generate base64 encoded random string + ansible.builtin.debug: + var: lookup('community.general.random_string', base64=True) + # Example result: ['NHZ6eWN5Qk0='] + +- name: Generate a random string with 1 lower, 1 upper, 1 number and 1 special char (atleast) + ansible.builtin.debug: + var: lookup('community.general.random_string', min_lower=1, min_upper=1, min_special=1, min_numeric=1) + # Example result: ['&Qw2|E[-'] + +- name: Generate a random string with all lower case characters + debug: + var: query('community.general.random_string', upper=false, numbers=false, special=false) + # Example result: ['exolxzyz'] + +- name: Generate random hexadecimal string + debug: + var: query('community.general.random_string', upper=false, lower=false, override_special=hex_chars, numbers=false) + vars: + hex_chars: '0123456789ABCDEF' + # Example result: ['D2A40737'] + +- name: Generate random hexadecimal string with override_all + debug: + var: query('community.general.random_string', override_all=hex_chars) + vars: + hex_chars: '0123456789ABCDEF' + # Example result: ['D2A40737'] +""" + +RETURN = r""" + _raw: + description: A one-element list containing a random string + type: list + elements: str +""" + +import base64 +import random +import string + +from ansible.errors import AnsibleLookupError +from ansible.plugins.lookup import LookupBase +from ansible.module_utils._text import to_bytes, to_text + + +class LookupModule(LookupBase): + @staticmethod + def get_random(random_generator, chars, length): + if not chars: + raise AnsibleLookupError( + "Available characters cannot be None, please change constraints" + ) + return "".join(random_generator.choice(chars) for dummy in range(length)) + + @staticmethod + def b64encode(string_value, encoding="utf-8"): + return to_text( + base64.b64encode( + to_bytes(string_value, encoding=encoding, errors="surrogate_or_strict") + ) + ) + + def run(self, terms, variables=None, **kwargs): + number_chars = string.digits + lower_chars = string.ascii_lowercase + upper_chars = string.ascii_uppercase + special_chars = string.punctuation + random_generator = random.SystemRandom() + + self.set_options(var_options=variables, direct=kwargs) + + length = self.get_option("length") + base64_flag = self.get_option("base64") + override_all = self.get_option("override_all") + values = "" + available_chars_set = "" + + if override_all: + # Override all the values + available_chars_set = override_all + else: + upper = self.get_option("upper") + lower = self.get_option("lower") + numbers = self.get_option("numbers") + special = self.get_option("special") + override_special = self.get_option("override_special") + + if override_special: + special_chars = override_special + + if upper: + available_chars_set += upper_chars + if lower: + available_chars_set += lower_chars + if numbers: + available_chars_set += number_chars + if special: + available_chars_set += special_chars + + mapping = { + "min_numeric": number_chars, + "min_lower": lower_chars, + "min_upper": upper_chars, + "min_special": special_chars, + } + + for m in mapping: + if self.get_option(m): + values += self.get_random(random_generator, mapping[m], self.get_option(m)) + + remaining_pass_len = length - len(values) + values += self.get_random(random_generator, available_chars_set, remaining_pass_len) + + # Get pseudo randomization + shuffled_values = list(values) + # Randomize the order + random.shuffle(shuffled_values) + + if base64_flag: + return [self.b64encode("".join(shuffled_values))] + + return ["".join(shuffled_values)] diff --git a/tests/integration/targets/lookup_random_string/aliases b/tests/integration/targets/lookup_random_string/aliases new file mode 100644 index 0000000000..bc987654d9 --- /dev/null +++ b/tests/integration/targets/lookup_random_string/aliases @@ -0,0 +1,3 @@ +shippable/posix/group2 +skip/aix +skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller diff --git a/tests/integration/targets/lookup_random_string/runme.sh b/tests/integration/targets/lookup_random_string/runme.sh new file mode 100755 index 0000000000..8ed6373823 --- /dev/null +++ b/tests/integration/targets/lookup_random_string/runme.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +set -eux + +ANSIBLE_ROLES_PATH=../ \ + ansible-playbook test.yml -v "$@" diff --git a/tests/integration/targets/lookup_random_string/test.yml b/tests/integration/targets/lookup_random_string/test.yml new file mode 100644 index 0000000000..52a572379b --- /dev/null +++ b/tests/integration/targets/lookup_random_string/test.yml @@ -0,0 +1,48 @@ +- hosts: localhost + gather_facts: no + tasks: + - name: Call plugin + set_fact: + result1: "{{ query('community.general.random_string') }}" + result2: "{{ query('community.general.random_string', length=0) }}" + result3: "{{ query('community.general.random_string', length=10) }}" + result4: "{{ query('community.general.random_string', length=-1) }}" + result5: "{{ query('community.general.random_string', override_special='_', min_special=1) }}" + result6: "{{ query('community.general.random_string', upper=false, special=false) }}" # lower case only + result7: "{{ query('community.general.random_string', lower=false) }}" # upper case only + result8: "{{ query('community.general.random_string', lower=false, upper=false, special=false) }}" # number only + result9: "{{ query('community.general.random_string', lower=false, upper=false, special=false, min_numeric=1, length=1) }}" # single digit only + result10: "{{ query('community.general.random_string', numbers=false, upper=false, special=false, min_lower=1, length=1) }}" # single lowercase character only + result11: "{{ query('community.general.random_string', base64=true, length=8) }}" + result12: "{{ query('community.general.random_string', upper=false, numbers=false, special=false) }}" # all lower case + result13: "{{ query('community.general.random_string', override_all='0', length=2) }}" + + - name: Raise error when impossible constraints are provided + set_fact: + impossible: "{{ query('community.general.random_string', upper=false, lower=false, special=false, numbers=false) }}" + ignore_errors: yes + register: impossible_result + + - name: Check results + assert: + that: + - result1[0] | length == 8 + - result2[0] | length == 0 + - result3[0] | length == 10 + - result4[0] | length == 0 + - result5[0] | length == 8 + - "'_' in result5[0]" + - result6[0] is lower + - result7[0] is upper + - result8[0] | regex_replace('^(\d+)$', '') == '' + - result9[0] | regex_replace('^(\d+)$', '') == '' + - result9[0] | length == 1 + - result10[0] | length == 1 + - result10[0] is lower + # if input string is not multiple of 3, base64 encoded string will be padded with = + - result11[0].endswith('=') + - result12[0] is lower + - result13[0] | length == 2 + - result13[0] == '00' + - impossible_result is failed + - "'Available characters cannot' in impossible_result.msg" From 3afcf7e75db37d4c6e24bb4ef25999d95013d4e3 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Fri, 28 May 2021 05:13:21 +1200 Subject: [PATCH 0091/2828] minor refactors on plugins/modules/cloud/misc (#2557) * minor refactors on plugins/modules/cloud/misc * added changelog fragment * removed unreachable statement * Update plugins/modules/cloud/misc/terraform.py Co-authored-by: Felix Fontein * Update plugins/modules/cloud/misc/rhevm.py Co-authored-by: Felix Fontein * adjusted per PR comment Co-authored-by: Felix Fontein --- .../fragments/2557-cloud-misc-refactor.yml | 7 +++++ .../cloud/misc/cloud_init_data_facts.py | 4 +-- .../modules/cloud/misc/proxmox_group_info.py | 2 +- plugins/modules/cloud/misc/proxmox_kvm.py | 31 +++++++++---------- plugins/modules/cloud/misc/rhevm.py | 4 +-- plugins/modules/cloud/misc/serverless.py | 11 +++---- plugins/modules/cloud/misc/terraform.py | 2 +- 7 files changed, 32 insertions(+), 29 deletions(-) create mode 100644 changelogs/fragments/2557-cloud-misc-refactor.yml diff --git a/changelogs/fragments/2557-cloud-misc-refactor.yml b/changelogs/fragments/2557-cloud-misc-refactor.yml new file mode 100644 index 0000000000..82e56dc942 --- /dev/null +++ b/changelogs/fragments/2557-cloud-misc-refactor.yml @@ -0,0 +1,7 @@ +minor_changes: + - cloud_init_data_facts - minor refactor (https://github.com/ansible-collections/community.general/pull/2557). + - proxmox_group_info - minor refactor (https://github.com/ansible-collections/community.general/pull/2557). + - proxmox_kvm - minor refactor (https://github.com/ansible-collections/community.general/pull/2557). + - rhevm - minor refactor (https://github.com/ansible-collections/community.general/pull/2557). + - serverless - minor refactor (https://github.com/ansible-collections/community.general/pull/2557). + - terraform - minor refactor (https://github.com/ansible-collections/community.general/pull/2557). diff --git a/plugins/modules/cloud/misc/cloud_init_data_facts.py b/plugins/modules/cloud/misc/cloud_init_data_facts.py index 2efb90cfeb..5774fa6f39 100644 --- a/plugins/modules/cloud/misc/cloud_init_data_facts.py +++ b/plugins/modules/cloud/misc/cloud_init_data_facts.py @@ -88,7 +88,7 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_text -CLOUD_INIT_PATH = "/var/lib/cloud/data/" +CLOUD_INIT_PATH = "/var/lib/cloud/data" def gather_cloud_init_data_facts(module): @@ -100,7 +100,7 @@ def gather_cloud_init_data_facts(module): filter = module.params.get('filter') if filter is None or filter == i: res['cloud_init_data_facts'][i] = dict() - json_file = CLOUD_INIT_PATH + i + '.json' + json_file = os.path.join(CLOUD_INIT_PATH, i + '.json') if os.path.exists(json_file): f = open(json_file, 'rb') diff --git a/plugins/modules/cloud/misc/proxmox_group_info.py b/plugins/modules/cloud/misc/proxmox_group_info.py index bf88659656..3d60e7e214 100644 --- a/plugins/modules/cloud/misc/proxmox_group_info.py +++ b/plugins/modules/cloud/misc/proxmox_group_info.py @@ -95,7 +95,7 @@ class ProxmoxGroup: self.group = dict() # Data representation is not the same depending on API calls for k, v in group.items(): - if k == 'users' and type(v) == str: + if k == 'users' and isinstance(v, str): self.group['users'] = v.split(',') elif k == 'members': self.group['users'] = group['members'] diff --git a/plugins/modules/cloud/misc/proxmox_kvm.py b/plugins/modules/cloud/misc/proxmox_kvm.py index 2dcb1ab573..0ad75a45bd 100644 --- a/plugins/modules/cloud/misc/proxmox_kvm.py +++ b/plugins/modules/cloud/misc/proxmox_kvm.py @@ -808,23 +808,23 @@ def get_vminfo(module, proxmox, node, vmid, **kwargs): # Sanitize kwargs. Remove not defined args and ensure True and False converted to int. kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) - # Convert all dict in kwargs to elements. For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n] + # Convert all dict in kwargs to elements. + # For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n] for k in list(kwargs.keys()): if isinstance(kwargs[k], dict): kwargs.update(kwargs[k]) del kwargs[k] # Split information by type + re_net = re.compile(r'net[0-9]') + re_dev = re.compile(r'(virtio|ide|scsi|sata)[0-9]') for k, v in kwargs.items(): - if re.match(r'net[0-9]', k) is not None: + if re_net.match(k): interface = k k = vm[k] k = re.search('=(.*?),', k).group(1) mac[interface] = k - if (re.match(r'virtio[0-9]', k) is not None or - re.match(r'ide[0-9]', k) is not None or - re.match(r'scsi[0-9]', k) is not None or - re.match(r'sata[0-9]', k) is not None): + elif re_dev.match(k): device = k k = vm[k] k = re.search('(.*?),', k).group(1) @@ -835,16 +835,13 @@ def get_vminfo(module, proxmox, node, vmid, **kwargs): results['vmid'] = int(vmid) -def settings(module, proxmox, vmid, node, name, **kwargs): +def settings(proxmox, vmid, node, **kwargs): proxmox_node = proxmox.nodes(node) # Sanitize kwargs. Remove not defined args and ensure True and False converted to int. kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) - if proxmox_node.qemu(vmid).config.set(**kwargs) is None: - return True - else: - return False + return proxmox_node.qemu(vmid).config.set(**kwargs) is None def wait_for_task(module, proxmox, node, taskid): @@ -915,7 +912,8 @@ def create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sock if 'pool' in kwargs: del kwargs['pool'] - # Convert all dict in kwargs to elements. For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n], ipconfig[n] + # Convert all dict in kwargs to elements. + # For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n], ipconfig[n] for k in list(kwargs.keys()): if isinstance(kwargs[k], dict): kwargs.update(kwargs[k]) @@ -938,8 +936,9 @@ def create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sock # VM tags are expected to be valid and presented as a comma/semi-colon delimited string if 'tags' in kwargs: + re_tag = re.compile(r'^[a-z0-9_][a-z0-9_\-\+\.]*$') for tag in kwargs['tags']: - if not re.match(r'^[a-z0-9_][a-z0-9_\-\+\.]*$', tag): + if not re_tag.match(tag): module.fail_json(msg='%s is not a valid tag' % tag) kwargs['tags'] = ",".join(kwargs['tags']) @@ -971,7 +970,7 @@ def create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sock if not wait_for_task(module, proxmox, node, taskid): module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' % - proxmox_node.tasks(taskid).log.get()[:1]) + proxmox_node.tasks(taskid).log.get()[:1]) return False return True @@ -1209,14 +1208,14 @@ def main(): if delete is not None: try: - settings(module, proxmox, vmid, node, name, delete=delete) + settings(proxmox, vmid, node, delete=delete) module.exit_json(changed=True, vmid=vmid, msg="Settings has deleted on VM {0} with vmid {1}".format(name, vmid)) except Exception as e: module.fail_json(vmid=vmid, msg='Unable to delete settings on VM {0} with vmid {1}: '.format(name, vmid) + str(e)) if revert is not None: try: - settings(module, proxmox, vmid, node, name, revert=revert) + settings(proxmox, vmid, node, revert=revert) module.exit_json(changed=True, vmid=vmid, msg="Settings has reverted on VM {0} with vmid {1}".format(name, vmid)) except Exception as e: module.fail_json(vmid=vmid, msg='Unable to revert settings on VM {0} with vmid {1}: Maybe is not a pending task... '.format(name, vmid) + str(e)) diff --git a/plugins/modules/cloud/misc/rhevm.py b/plugins/modules/cloud/misc/rhevm.py index cc6c1252bf..77b40248b3 100644 --- a/plugins/modules/cloud/misc/rhevm.py +++ b/plugins/modules/cloud/misc/rhevm.py @@ -547,7 +547,7 @@ class RHEVConn(object): def set_Memory_Policy(self, name, memory_policy): VM = self.get_VM(name) - VM.memory_policy.guaranteed = int(int(memory_policy) * 1024 * 1024 * 1024) + VM.memory_policy.guaranteed = int(memory_policy) * 1024 * 1024 * 1024 try: VM.update() setMsg("The memory policy has been updated.") @@ -1260,7 +1260,7 @@ def core(module): r = RHEV(module) - state = module.params.get('state', 'present') + state = module.params.get('state') if state == 'ping': r.test() diff --git a/plugins/modules/cloud/misc/serverless.py b/plugins/modules/cloud/misc/serverless.py index 912d4226a8..1b2f8b62a6 100644 --- a/plugins/modules/cloud/misc/serverless.py +++ b/plugins/modules/cloud/misc/serverless.py @@ -139,16 +139,14 @@ from ansible.module_utils.basic import AnsibleModule def read_serverless_config(module): path = module.params.get('service_path') + full_path = os.path.join(path, 'serverless.yml') try: - with open(os.path.join(path, 'serverless.yml')) as sls_config: + with open(full_path) as sls_config: config = yaml.safe_load(sls_config.read()) return config except IOError as e: - module.fail_json(msg="Could not open serverless.yml in {0}. err: {1}".format(path, str(e))) - - module.fail_json(msg="Failed to open serverless config at {0}".format( - os.path.join(path, 'serverless.yml'))) + module.fail_json(msg="Could not open serverless.yml in {0}. err: {1}".format(full_path, str(e))) def get_service_name(module, stage): @@ -182,7 +180,6 @@ def main(): service_path = module.params.get('service_path') state = module.params.get('state') - functions = module.params.get('functions') region = module.params.get('region') stage = module.params.get('stage') deploy = module.params.get('deploy', True) @@ -193,7 +190,7 @@ def main(): if serverless_bin_path is not None: command = serverless_bin_path + " " else: - command = "serverless " + command = module.get_bin_path("serverless") + " " if state == 'present': command += 'deploy ' diff --git a/plugins/modules/cloud/misc/terraform.py b/plugins/modules/cloud/misc/terraform.py index 9bf36c8c81..8a34f9699b 100644 --- a/plugins/modules/cloud/misc/terraform.py +++ b/plugins/modules/cloud/misc/terraform.py @@ -233,7 +233,7 @@ def get_version(bin_path): def preflight_validation(bin_path, project_path, version, variables_args=None, plan_file=None): - if project_path in [None, ''] or '/' not in project_path: + if project_path is None or '/' not in project_path: module.fail_json(msg="Path for Terraform project can not be None or ''.") if not os.path.exists(bin_path): module.fail_json(msg="Path for Terraform binary '{0}' doesn't exist on this host - check the path and try again please.".format(bin_path)) From 14f13904d63dcffab2069b5be69ebe46a2945fef Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 27 May 2021 22:59:42 +0200 Subject: [PATCH 0092/2828] Add extra docs tests (#2663) * Add extra docs tests. * Linting. * Fix copy'n'paste error. --- tests/sanity/extra/extra-docs.json | 10 ++++++++++ tests/sanity/extra/extra-docs.py | 23 +++++++++++++++++++++++ 2 files changed, 33 insertions(+) create mode 100644 tests/sanity/extra/extra-docs.json create mode 100755 tests/sanity/extra/extra-docs.py diff --git a/tests/sanity/extra/extra-docs.json b/tests/sanity/extra/extra-docs.json new file mode 100644 index 0000000000..a62ef37e63 --- /dev/null +++ b/tests/sanity/extra/extra-docs.json @@ -0,0 +1,10 @@ +{ + "include_symlinks": false, + "prefixes": [ + "docs/docsite/" + ], + "output": "path-line-column-message", + "requirements": [ + "antsibull" + ] +} diff --git a/tests/sanity/extra/extra-docs.py b/tests/sanity/extra/extra-docs.py new file mode 100755 index 0000000000..f4b7f59d3c --- /dev/null +++ b/tests/sanity/extra/extra-docs.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +"""Check extra collection docs with antsibull-lint.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import sys +import subprocess + + +def main(): + """Main entry point.""" + if not os.path.isdir(os.path.join('docs', 'docsite')): + return + p = subprocess.run(['antsibull-lint', 'collection-docs', '.'], check=False) + if p.returncode not in (0, 3): + print('{0}:0:0: unexpected return code {1}'.format(sys.argv[0], p.returncode)) + + +if __name__ == '__main__': + main() From 14813a6287af016d2b1823ce8e29bc2cc1dd10e5 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Fri, 28 May 2021 07:09:57 +0200 Subject: [PATCH 0093/2828] Stop mentioning Freenode. We're on Libera.chat. (#2666) --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index e6e4eb880e..a874a3e929 100644 --- a/README.md +++ b/README.md @@ -60,10 +60,10 @@ See [here](https://docs.ansible.com/ansible/devel/dev_guide/developing_collectio We have a dedicated Working Group for Ansible development. -You can find other people interested on the following Freenode IRC channels - +You can find other people interested on the following [Libera.chat](https://libera.chat/) IRC channels - - `#ansible` - For general use questions and support. -- `#ansible-devel` - For discussions on developer topics and code related to features or bugs. -- `#ansible-community` - For discussions on community topics and community meetings. +- `#ansible-devel` - For discussions on developer topics and code related to features or bugs in ansible-core. +- `#ansible-community` - For discussions on community topics and community meetings, and for general development questions for community collections. For more information about communities, meetings and agendas see [Community Wiki](https://github.com/ansible/community/wiki/Community). From c3cab7c68c4eb0d0f80d49356c8be52d0bb849ef Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Fri, 28 May 2021 15:19:29 +0430 Subject: [PATCH 0094/2828] composer: add composer_executable (#2650) * composer: add composer_executable * Add changelog * Improve documentation thanks to felixfontein Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../2650-composer-add_composer_executable.yml | 3 +++ plugins/modules/packaging/language/composer.py | 14 ++++++++++++-- 2 files changed, 15 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/2650-composer-add_composer_executable.yml diff --git a/changelogs/fragments/2650-composer-add_composer_executable.yml b/changelogs/fragments/2650-composer-add_composer_executable.yml new file mode 100644 index 0000000000..b1cccc689c --- /dev/null +++ b/changelogs/fragments/2650-composer-add_composer_executable.yml @@ -0,0 +1,3 @@ +--- +minor_changes: + - composer - add ``composer_executable`` option (https://github.com/ansible-collections/community.general/issues/2649). diff --git a/plugins/modules/packaging/language/composer.py b/plugins/modules/packaging/language/composer.py index 64157cb685..86fe7bdea3 100644 --- a/plugins/modules/packaging/language/composer.py +++ b/plugins/modules/packaging/language/composer.py @@ -117,9 +117,14 @@ options: default: false type: bool aliases: [ ignore-platform-reqs ] + composer_executable: + type: path + description: + - Path to composer executable on the remote host, if composer is not in C(PATH) or a custom composer is needed. + version_added: 3.2.0 requirements: - php - - composer installed in bin path (recommended /usr/local/bin) + - composer installed in bin path (recommended /usr/local/bin) or specified in I(composer_executable) notes: - Default options that are always appended in each execution are --no-ansi, --no-interaction and --no-progress if available. - We received reports about issues on macOS if composer was installed by Homebrew. Please use the official install method to avoid issues. @@ -187,7 +192,11 @@ def composer_command(module, command, arguments="", options=None, global_command else: php_path = module.params['executable'] - composer_path = module.get_bin_path("composer", True, ["/usr/local/bin"]) + if module.params['composer_executable'] is None: + composer_path = module.get_bin_path("composer", True, ["/usr/local/bin"]) + else: + composer_path = module.params['composer_executable'] + cmd = "%s %s %s %s %s %s" % (php_path, composer_path, "global" if global_command else "", command, " ".join(options), arguments) return module.run_command(cmd) @@ -231,6 +240,7 @@ def main(): ignore_platform_reqs=dict( default=False, type="bool", aliases=["ignore-platform-reqs"], deprecated_aliases=[dict(name='ignore-platform-reqs', version='5.0.0', collection_name='community.general')]), + composer_executable=dict(type="path"), ), required_if=[('global_command', False, ['working_dir'])], supports_check_mode=True From b281d3d699433a0e0dda7d6db01d22855a2a4cd5 Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Sat, 29 May 2021 03:00:12 -0400 Subject: [PATCH 0095/2828] proxmox_kvm - Fixed vmid result when VM with name exists (#2648) * Fixed vmid result when VM with name exists * Adding changelog fragment --- changelogs/fragments/2648-proxmox_kvm-fix-vmid-return-value.yml | 2 ++ plugins/modules/cloud/misc/proxmox_kvm.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2648-proxmox_kvm-fix-vmid-return-value.yml diff --git a/changelogs/fragments/2648-proxmox_kvm-fix-vmid-return-value.yml b/changelogs/fragments/2648-proxmox_kvm-fix-vmid-return-value.yml new file mode 100644 index 0000000000..7971fc24eb --- /dev/null +++ b/changelogs/fragments/2648-proxmox_kvm-fix-vmid-return-value.yml @@ -0,0 +1,2 @@ +bugfixes: + - proxmox_kvm - fixed ``vmid`` return value when VM with ``name`` already exists (https://github.com/ansible-collections/community.general/issues/2648). diff --git a/plugins/modules/cloud/misc/proxmox_kvm.py b/plugins/modules/cloud/misc/proxmox_kvm.py index 0ad75a45bd..a664279e57 100644 --- a/plugins/modules/cloud/misc/proxmox_kvm.py +++ b/plugins/modules/cloud/misc/proxmox_kvm.py @@ -1225,7 +1225,7 @@ def main(): if get_vm(proxmox, vmid) and not (update or clone): module.exit_json(changed=False, vmid=vmid, msg="VM with vmid <%s> already exists" % vmid) elif get_vmid(proxmox, name) and not (update or clone): - module.exit_json(changed=False, vmid=vmid, msg="VM with name <%s> already exists" % name) + module.exit_json(changed=False, vmid=get_vmid(proxmox, name)[0], msg="VM with name <%s> already exists" % name) elif not (node, name): module.fail_json(msg='node, name is mandatory for creating/updating vm') elif not node_check(proxmox, node): From f09c39b71e84eb15481a9c2b4fd08beabfb17cff Mon Sep 17 00:00:00 2001 From: quidame Date: Sat, 29 May 2021 10:50:24 +0200 Subject: [PATCH 0096/2828] iptables_state: fix broken query of `async_status` result (#2671) * use get() rather than querying the key directly * add a changelog fragment * re-enable CI tests * Update changelog fragment Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../2671-fix-broken-query-of-async_status-result.yml | 6 ++++++ plugins/action/system/iptables_state.py | 2 +- tests/integration/targets/iptables_state/aliases | 1 - 3 files changed, 7 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/2671-fix-broken-query-of-async_status-result.yml diff --git a/changelogs/fragments/2671-fix-broken-query-of-async_status-result.yml b/changelogs/fragments/2671-fix-broken-query-of-async_status-result.yml new file mode 100644 index 0000000000..993caaa323 --- /dev/null +++ b/changelogs/fragments/2671-fix-broken-query-of-async_status-result.yml @@ -0,0 +1,6 @@ +--- +bugfixes: + - "iptables_state - fix a broken query of ``async_status`` result + with current ansible-core development version + (https://github.com/ansible-collections/community.general/issues/2627, + https://github.com/ansible-collections/community.general/pull/2671)." diff --git a/plugins/action/system/iptables_state.py b/plugins/action/system/iptables_state.py index 96b6dc689c..887f3f47f9 100644 --- a/plugins/action/system/iptables_state.py +++ b/plugins/action/system/iptables_state.py @@ -52,7 +52,7 @@ class ActionModule(ActionBase): module_args=module_args, task_vars=task_vars, wrap_async=False) - if async_result['finished'] == 1: + if async_result.get('finished', 0) == 1: break time.sleep(min(1, timeout)) diff --git a/tests/integration/targets/iptables_state/aliases b/tests/integration/targets/iptables_state/aliases index 12765cec47..3cac4af522 100644 --- a/tests/integration/targets/iptables_state/aliases +++ b/tests/integration/targets/iptables_state/aliases @@ -5,4 +5,3 @@ skip/freebsd # no iptables/netfilter (Linux specific) skip/osx # no iptables/netfilter (Linux specific) skip/macos # no iptables/netfilter (Linux specific) skip/aix # no iptables/netfilter (Linux specific) -disabled # FIXME From bef3c04d1c7dfaf87d00f3777dd844c1e09e2a99 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 30 May 2021 02:48:59 +1200 Subject: [PATCH 0097/2828] Fixed sanity checks for cloud/online/ modules (#2677) * fixed validation-modules for plugins/modules/cloud/online/online_server_info.py * fixed validation-modules for plugins/modules/cloud/online/online_user_info.py * sanity fix --- plugins/modules/cloud/online/online_server_info.py | 8 +++++--- plugins/modules/cloud/online/online_user_info.py | 9 ++++----- tests/sanity/ignore-2.10.txt | 2 -- tests/sanity/ignore-2.11.txt | 2 -- tests/sanity/ignore-2.12.txt | 2 -- tests/sanity/ignore-2.9.txt | 2 -- 6 files changed, 9 insertions(+), 16 deletions(-) diff --git a/plugins/modules/cloud/online/online_server_info.py b/plugins/modules/cloud/online/online_server_info.py index f0e73aea16..f33a44d30f 100644 --- a/plugins/modules/cloud/online/online_server_info.py +++ b/plugins/modules/cloud/online/online_server_info.py @@ -32,11 +32,13 @@ EXAMPLES = r''' ''' RETURN = r''' ---- online_server_info: - description: Response from Online API + description: + - Response from Online API. + - "For more details please refer to: U(https://console.online.net/en/api/)." returned: success - type: complex + type: list + elements: dict sample: "online_server_info": [ { diff --git a/plugins/modules/cloud/online/online_user_info.py b/plugins/modules/cloud/online/online_user_info.py index 093a2c687f..4125ccb63d 100644 --- a/plugins/modules/cloud/online/online_user_info.py +++ b/plugins/modules/cloud/online/online_user_info.py @@ -7,7 +7,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = r''' ---- module: online_user_info short_description: Gather information about Online user. description: @@ -16,7 +15,6 @@ author: - "Remy Leone (@sieben)" extends_documentation_fragment: - community.general.online - ''' EXAMPLES = r''' @@ -29,11 +27,12 @@ EXAMPLES = r''' ''' RETURN = r''' ---- online_user_info: - description: Response from Online API + description: + - Response from Online API. + - "For more details please refer to: U(https://console.online.net/en/api/)." returned: success - type: complex + type: dict sample: "online_user_info": { "company": "foobar LLC", diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index da611904bb..16c94a2c09 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -4,8 +4,6 @@ plugins/module_utils/_mount.py metaclass-boilerplate plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-choice -plugins/modules/cloud/online/online_server_info.py validate-modules:return-syntax-error -plugins/modules/cloud/online/online_user_info.py validate-modules:return-syntax-error plugins/modules/cloud/rackspace/rax.py use-argspec-type-path # fix needed plugins/modules/cloud/rackspace/rax_files.py validate-modules:parameter-state-invalid-choice plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index a7d85904ae..db731736c0 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -3,8 +3,6 @@ plugins/module_utils/_mount.py metaclass-boilerplate plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-choice -plugins/modules/cloud/online/online_server_info.py validate-modules:return-syntax-error -plugins/modules/cloud/online/online_user_info.py validate-modules:return-syntax-error plugins/modules/cloud/rackspace/rax.py use-argspec-type-path # fix needed plugins/modules/cloud/rackspace/rax_files.py validate-modules:parameter-state-invalid-choice plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt index cf5d588e9a..de3634ae40 100644 --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -3,8 +3,6 @@ plugins/module_utils/_mount.py metaclass-boilerplate plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-choice -plugins/modules/cloud/online/online_server_info.py validate-modules:return-syntax-error -plugins/modules/cloud/online/online_user_info.py validate-modules:return-syntax-error plugins/modules/cloud/rackspace/rax.py use-argspec-type-path # fix needed plugins/modules/cloud/rackspace/rax_files.py validate-modules:parameter-state-invalid-choice plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 32e13b1a1e..9cb31a442d 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -3,8 +3,6 @@ plugins/module_utils/_mount.py future-import-boilerplate plugins/module_utils/_mount.py metaclass-boilerplate plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen -plugins/modules/cloud/online/online_server_info.py validate-modules:return-syntax-error -plugins/modules/cloud/online/online_user_info.py validate-modules:return-syntax-error plugins/modules/cloud/rackspace/rax.py use-argspec-type-path plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path # fix needed, expanduser() applied to dict values From b6c0cc0b610e8a23d4b8c7353475fce0f4315947 Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Mon, 31 May 2021 01:51:29 -0400 Subject: [PATCH 0098/2828] archive - Adding exclusion_patterns option (#2616) * Adding exclusion_patterns option * Adding changelog fragment and Python 2.6 compatability * Minor refactoring for readability * Removing unneccessary conditional * Applying initial review suggestions * Adding missed review suggestion --- ...2616-archive-exclusion_patterns-option.yml | 2 + plugins/modules/files/archive.py | 105 ++++++++++++++---- .../targets/archive/tasks/main.yml | 13 +++ 3 files changed, 100 insertions(+), 20 deletions(-) create mode 100644 changelogs/fragments/2616-archive-exclusion_patterns-option.yml diff --git a/changelogs/fragments/2616-archive-exclusion_patterns-option.yml b/changelogs/fragments/2616-archive-exclusion_patterns-option.yml new file mode 100644 index 0000000000..86ef806b63 --- /dev/null +++ b/changelogs/fragments/2616-archive-exclusion_patterns-option.yml @@ -0,0 +1,2 @@ +minor_changes: + - archive - added ``exclusion_patterns`` option to exclude files or subdirectories from archives (https://github.com/ansible-collections/community.general/pull/2616). diff --git a/plugins/modules/files/archive.py b/plugins/modules/files/archive.py index 8b8088dae1..8d4afa58a5 100644 --- a/plugins/modules/files/archive.py +++ b/plugins/modules/files/archive.py @@ -41,8 +41,16 @@ options: exclude_path: description: - Remote absolute path, glob, or list of paths or globs for the file or files to exclude from I(path) list and glob expansion. + - Use I(exclusion_patterns) to instead exclude files or subdirectories below any of the paths from the I(path) list. type: list elements: path + exclusion_patterns: + description: + - Glob style patterns to exclude files or directories from the resulting archive. + - This differs from I(exclude_path) which applies only to the source paths from I(path). + type: list + elements: path + version_added: 3.2.0 force_archive: description: - Allows you to force the module to treat this as an archive even if only a single file is specified. @@ -163,6 +171,8 @@ import re import shutil import tarfile import zipfile +from fnmatch import fnmatch +from sys import version_info from traceback import format_exc from ansible.module_utils.basic import AnsibleModule, missing_required_lib @@ -186,6 +196,8 @@ else: LZMA_IMP_ERR = format_exc() HAS_LZMA = False +PY27 = version_info[0:2] >= (2, 7) + def to_b(s): return to_bytes(s, errors='surrogate_or_strict') @@ -214,6 +226,59 @@ def expand_paths(paths): return expanded_path, is_globby +def matches_exclusion_patterns(path, exclusion_patterns): + return any(fnmatch(path, p) for p in exclusion_patterns) + + +def get_filter(exclusion_patterns, format): + def zip_filter(path): + return matches_exclusion_patterns(path, exclusion_patterns) + + def tar_filter(tarinfo): + return None if matches_exclusion_patterns(tarinfo.name, exclusion_patterns) else tarinfo + + return zip_filter if format == 'zip' or not PY27 else tar_filter + + +def get_archive_contains(format): + def archive_contains(archive, name): + try: + if format == 'zip': + archive.getinfo(name) + else: + archive.getmember(name) + except KeyError: + return False + + return True + + return archive_contains + + +def get_add_to_archive(format, filter): + def add_to_zip_archive(archive_file, path, archive_name): + try: + if not filter(path): + archive_file.write(path, archive_name) + except Exception as e: + return e + + return None + + def add_to_tar_archive(archive_file, path, archive_name): + try: + if PY27: + archive_file.add(path, archive_name, recursive=False, filter=filter) + else: + archive_file.add(path, archive_name, recursive=False, exclude=filter) + except Exception as e: + return e + + return None + + return add_to_zip_archive if format == 'zip' else add_to_tar_archive + + def main(): module = AnsibleModule( argument_spec=dict( @@ -221,6 +286,7 @@ def main(): format=dict(type='str', default='gz', choices=['bz2', 'gz', 'tar', 'xz', 'zip']), dest=dict(type='path'), exclude_path=dict(type='list', elements='path'), + exclusion_patterns=dict(type='list', elements='path'), force_archive=dict(type='bool', default=False), remove=dict(type='bool', default=False), ), @@ -242,6 +308,8 @@ def main(): changed = False state = 'absent' + exclusion_patterns = params['exclusion_patterns'] or [] + # Simple or archive file compression (inapplicable with 'zip' since it's always an archive) b_successes = [] @@ -262,6 +330,10 @@ def main(): # Only attempt to expand the exclude paths if it exists b_expanded_exclude_paths = expand_paths(exclude_paths)[0] if exclude_paths else [] + filter = get_filter(exclusion_patterns, fmt) + archive_contains = get_archive_contains(fmt) + add_to_archive = get_add_to_archive(fmt, filter) + # Only try to determine if we are working with an archive or not if we haven't set archive to true if not force_archive: # If we actually matched multiple files or TRIED to, then @@ -384,38 +456,31 @@ def main(): n_fullpath = to_na(b_fullpath) n_arcname = to_native(b_match_root.sub(b'', b_fullpath), errors='surrogate_or_strict') - try: - if fmt == 'zip': - arcfile.write(n_fullpath, n_arcname) - else: - arcfile.add(n_fullpath, n_arcname, recursive=False) - - except Exception as e: - errors.append('%s: %s' % (n_fullpath, to_native(e))) + err = add_to_archive(arcfile, n_fullpath, n_arcname) + if err: + errors.append('%s: %s' % (n_fullpath, to_native(err))) for b_filename in b_filenames: b_fullpath = b_dirpath + b_filename n_fullpath = to_na(b_fullpath) n_arcname = to_n(b_match_root.sub(b'', b_fullpath)) - try: - if fmt == 'zip': - arcfile.write(n_fullpath, n_arcname) - else: - arcfile.add(n_fullpath, n_arcname, recursive=False) + err = add_to_archive(arcfile, n_fullpath, n_arcname) + if err: + errors.append('Adding %s: %s' % (to_native(b_path), to_native(err))) + if archive_contains(arcfile, n_arcname): b_successes.append(b_fullpath) - except Exception as e: - errors.append('Adding %s: %s' % (to_native(b_path), to_native(e))) else: path = to_na(b_path) arcname = to_n(b_match_root.sub(b'', b_path)) - if fmt == 'zip': - arcfile.write(path, arcname) - else: - arcfile.add(path, arcname, recursive=False) - b_successes.append(b_path) + err = add_to_archive(arcfile, path, arcname) + if err: + errors.append('Adding %s: %s' % (to_native(b_path), to_native(err))) + + if archive_contains(arcfile, arcname): + b_successes.append(b_path) except Exception as e: expanded_fmt = 'zip' if fmt == 'zip' else ('tar.' + fmt) diff --git a/tests/integration/targets/archive/tasks/main.yml b/tests/integration/targets/archive/tasks/main.yml index 2267268715..761f9eb7b8 100644 --- a/tests/integration/targets/archive/tasks/main.yml +++ b/tests/integration/targets/archive/tasks/main.yml @@ -363,6 +363,19 @@ - name: remove nonascii test file: path="{{ output_dir }}/test-archive-nonascii-くらとみ.zip" state=absent +- name: Test exclusion_patterns option + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/test-archive-exclustion-patterns.tgz" + exclusion_patterns: b?r.* + register: exclusion_patterns_result + +- name: Assert that exclusion_patterns only archives included files + assert: + that: + - exclusion_patterns_result is changed + - "'bar.txt' not in exclusion_patterns_result.archived" + - name: Remove backports.lzma if previously installed (pip) pip: name=backports.lzma state=absent when: backports_lzma_pip is changed From 3516acf8d402de721887cd10e16293d747fbb29e Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 1 Jun 2021 19:03:07 +0200 Subject: [PATCH 0099/2828] Add filter docs (#2680) * Began with filter docs. * Add more filters. * Add time unit filters. * Add TOC and filters to create identifiers. * Add more filters. * Add documentation from ansible/ansible for json_query and random_mac. * Update docs/docsite/rst/filter_guide.rst Co-authored-by: Abhijeet Kasurde Co-authored-by: Abhijeet Kasurde --- docs/docsite/extra-docs.yml | 5 + docs/docsite/rst/filter_guide.rst | 753 ++++++++++++++++++++++++++++++ 2 files changed, 758 insertions(+) create mode 100644 docs/docsite/extra-docs.yml create mode 100644 docs/docsite/rst/filter_guide.rst diff --git a/docs/docsite/extra-docs.yml b/docs/docsite/extra-docs.yml new file mode 100644 index 0000000000..22ae7b58f5 --- /dev/null +++ b/docs/docsite/extra-docs.yml @@ -0,0 +1,5 @@ +--- +sections: + - title: Guides + toctree: + - filter_guide diff --git a/docs/docsite/rst/filter_guide.rst b/docs/docsite/rst/filter_guide.rst new file mode 100644 index 0000000000..201b275aae --- /dev/null +++ b/docs/docsite/rst/filter_guide.rst @@ -0,0 +1,753 @@ +.. _ansible_collections.community.general.docsite.filter_guide: + +community.general Filter Guide +============================== + +The :ref:`community.general collection ` offers several useful filter plugins. + +.. contents:: Topics + +Paths +----- + +The ``path_join`` filter has been added in ansible-base 2.10. If you want to use this filter, but also need to support Ansible 2.9, you can use ``community.general``'s ``path_join`` shim, ``community.general.path_join``. This filter redirects to ``path_join`` for ansible-base 2.10 and ansible-core 2.11 or newer, and re-implements the filter for Ansible 2.9. + +.. code-block:: yaml+jinja + + # ansible-base 2.10 or newer: + path: {{ ('/etc', path, 'subdir', file) | path_join }} + + # Also works with Ansible 2.9: + path: {{ ('/etc', path, 'subdir', file) | community.general.path_join }} + +.. versionadded:: 3.0.0 + +Abstract transformations +------------------------ + +Dictionaries +^^^^^^^^^^^^ + +You can use the ``dict_kv`` filter to create a single-entry dictionary with ``value | community.general.dict_kv(key)``: + +.. code-block:: yaml+jinja + + - name: Create a single-entry dictionary + debug: + msg: "{{ myvar | community.general.dict_kv('thatsmyvar') }}" + vars: + myvar: myvalue + + - name: Create a list of dictionaries where the 'server' field is taken from a list + debug: + msg: >- + {{ myservers | map('community.general.dict_kv', 'server') + | map('combine', common_config) }} + vars: + common_config: + type: host + database: all + myservers: + - server1 + - server2 + +This produces: + +.. code-block:: ansible-output + + TASK [Create a single-entry dictionary] ************************************************** + ok: [localhost] => { + "msg": { + "thatsmyvar": "myvalue" + } + } + + TASK [Create a list of dictionaries where the 'server' field is taken from a list] ******* + ok: [localhost] => { + "msg": [ + { + "database": "all", + "server": "server1", + "type": "host" + }, + { + "database": "all", + "server": "server2", + "type": "host" + } + ] + } + +.. versionadded:: 2.0.0 + +If you need to convert a list of key-value pairs to a dictionary, you can use the ``dict`` function. Unfortunately, this function cannot be used with ``map``. For this, the ``community.general.dict`` filter can be used: + +.. code-block:: yaml+jinja + + - name: Create a dictionary with the dict function + debug: + msg: "{{ dict([[1, 2], ['a', 'b']]) }}" + + - name: Create a dictionary with the community.general.dict filter + debug: + msg: "{{ [[1, 2], ['a', 'b']] | community.general.dict }}" + + - name: Create a list of dictionaries with map and the community.general.dict filter + debug: + msg: >- + {{ values | map('zip', ['k1', 'k2', 'k3']) + | map('map', 'reverse') + | map('community.general.dict') }} + vars: + values: + - - foo + - 23 + - a + - - bar + - 42 + - b + +This produces: + +.. code-block:: ansible-output + + TASK [Create a dictionary with the dict function] **************************************** + ok: [localhost] => { + "msg": { + "1": 2, + "a": "b" + } + } + + TASK [Create a dictionary with the community.general.dict filter] ************************ + ok: [localhost] => { + "msg": { + "1": 2, + "a": "b" + } + } + + TASK [Create a list of dictionaries with map and the community.general.dict filter] ****** + ok: [localhost] => { + "msg": [ + { + "k1": "foo", + "k2": 23, + "k3": "a" + }, + { + "k1": "bar", + "k2": 42, + "k3": "b" + } + ] + } + +.. versionadded:: 3.0.0 + +Grouping +^^^^^^^^ + +If you have a list of dictionaries, the Jinja2 ``groupby`` filter allows to group the list by an attribute. This results in a list of ``(grouper, list)`` namedtuples, where ``list`` contains all dictionaries where the selected attribute equals ``grouper``. If you know that for every ``grouper``, there will be a most one entry in that list, you can use the ``community.general.groupby_as_dict`` filter to convert the original list into a dictionary which maps ``grouper`` to the corresponding dictionary. + +One example is ``ansible_facts.mounts``, which is a list of dictionaries where each has one ``device`` element to indicate the device which is mounted. Therefore, ``ansible_facts.mounts | community.general.groupby_as_dict('device')`` is a dictionary mapping a device to the mount information: + +.. code-block:: yaml+jinja + + - name: Output mount facts grouped by device name + debug: + var: ansible_facts.mounts | community.general.groupby_as_dict('device') + + - name: Output mount facts grouped by mount point + debug: + var: ansible_facts.mounts | community.general.groupby_as_dict('mount') + +This produces: + +.. code-block:: ansible-output + + TASK [Output mount facts grouped by device name] ****************************************** + ok: [localhost] => { + "ansible_facts.mounts | community.general.groupby_as_dict('device')": { + "/dev/sda1": { + "block_available": 2000, + "block_size": 4096, + "block_total": 2345, + "block_used": 345, + "device": "/dev/sda1", + "fstype": "ext4", + "inode_available": 500, + "inode_total": 512, + "inode_used": 12, + "mount": "/boot", + "options": "rw,relatime,data=ordered", + "size_available": 56821, + "size_total": 543210, + "uuid": "ab31cade-d9c1-484d-8482-8a4cbee5241a" + }, + "/dev/sda2": { + "block_available": 1234, + "block_size": 4096, + "block_total": 12345, + "block_used": 11111, + "device": "/dev/sda2", + "fstype": "ext4", + "inode_available": 1111, + "inode_total": 1234, + "inode_used": 123, + "mount": "/", + "options": "rw,relatime", + "size_available": 42143, + "size_total": 543210, + "uuid": "abcdef01-2345-6789-0abc-def012345678" + } + } + } + + TASK [Output mount facts grouped by mount point] ****************************************** + ok: [localhost] => { + "ansible_facts.mounts | community.general.groupby_as_dict('mount')": { + "/": { + "block_available": 1234, + "block_size": 4096, + "block_total": 12345, + "block_used": 11111, + "device": "/dev/sda2", + "fstype": "ext4", + "inode_available": 1111, + "inode_total": 1234, + "inode_used": 123, + "mount": "/", + "options": "rw,relatime", + "size_available": 42143, + "size_total": 543210, + "uuid": "bdf50b7d-4859-40af-8665-c637ee7a7808" + }, + "/boot": { + "block_available": 2000, + "block_size": 4096, + "block_total": 2345, + "block_used": 345, + "device": "/dev/sda1", + "fstype": "ext4", + "inode_available": 500, + "inode_total": 512, + "inode_used": 12, + "mount": "/boot", + "options": "rw,relatime,data=ordered", + "size_available": 56821, + "size_total": 543210, + "uuid": "ab31cade-d9c1-484d-8482-8a4cbee5241a" + } + } + } + +.. versionadded: 3.0.0 + +Merging lists of dictionaries +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If you have two lists of dictionaries and want to combine them into a list of merged dictionaries, where two dictionaries are merged if they coincide in one attribute, you can use the ``lists_mergeby`` filter. + +.. code-block:: yaml+jinja + + - name: Merge two lists by common attribute 'name' + debug: + var: list1 | community.general.lists_mergeby(list2, 'name') + vars: + list1: + - name: foo + extra: true + - name: bar + extra: false + - name: meh + extra: true + list2: + - name: foo + path: /foo + - name: baz + path: /bazzz + +This produces: + +.. code-block:: ansible-output + + TASK [Merge two lists by common attribute 'name'] **************************************** + ok: [localhost] => { + "list1 | community.general.lists_mergeby(list2, 'name')": [ + { + "extra": false, + "name": "bar" + }, + { + "name": "baz", + "path": "/bazzz" + }, + { + "extra": true, + "name": "foo", + "path": "/foo" + }, + { + "extra": true, + "name": "meh" + } + ] + } + +.. versionadded: 2.0.0 + +Working with times +------------------ + +The ``to_time_unit`` filter allows to convert times from a human-readable string to a unit. For example, ``'4h 30min 12second' | community.general.to_time_unit('hour')`` gives the number of hours that correspond to 4 hours, 30 minutes and 12 seconds. + +There are shorthands to directly convert to various units, like ``to_hours``, ``to_minutes``, ``to_seconds``, and so on. The following table lists all units that can be used: + +.. list-table:: Units + :widths: 25 25 25 25 + :header-rows: 1 + + * - Unit name + - Unit value in seconds + - Unit strings for filter + - Shorthand filter + * - Millisecond + - 1/1000 second + - ``ms``, ``millisecond``, ``milliseconds``, ``msec``, ``msecs``, ``msecond``, ``mseconds`` + - ``to_milliseconds`` + * - Second + - 1 second + - ``s``, ``sec``, ``secs``, ``second``, ``seconds`` + - ``to_seconds`` + * - Minute + - 60 seconds + - ``m``, ``min``, ``mins``, ``minute``, ``minutes`` + - ``to_minutes`` + * - Hour + - 60*60 seconds + - ``h``, ``hour``, ``hours`` + - ``to_hours`` + * - Day + - 24*60*60 seconds + - ``d``, ``day``, ``days`` + - ``to_days`` + * - Week + - 7*24*60*60 seconds + - ``w``, ``week``, ``weeks`` + - ``to_weeks`` + * - Month + - 30*24*60*60 seconds + - ``mo``, ``month``, ``months`` + - ``to_months`` + * - Year + - 365*24*60*60 seconds + - ``y``, ``year``, ``years`` + - ``to_years`` + +Note that months and years are using a simplified representation: a month is 30 days, and a year is 365 days. If you need different definitions of months or years, you can pass them as keyword arguments. For example, if you want a year to be 365.25 days, and a month to be 30.5 days, you can write ``'11months 4' | community.general.to_years(year=365.25, month=30.5)``. These keyword arguments can be specified to ``to_time_unit`` and to all shorthand filters. + +.. code-block:: yaml+jinja + + - name: Convert string to seconds + debug: + msg: "{{ '30h 20m 10s 123ms' | community.general.to_time_unit('seconds') }}" + + - name: Convert string to hours + debug: + msg: "{{ '30h 20m 10s 123ms' | community.general.to_hours }}" + + - name: Convert string to years (using 365.25 days == 1 year) + debug: + msg: "{{ '400d 15h' | community.general.to_years(year=365.25) }}" + +This produces: + +.. code-block:: ansible-output + + TASK [Convert string to seconds] ********************************************************** + ok: [localhost] => { + "msg": "109210.123" + } + + TASK [Convert string to hours] ************************************************************ + ok: [localhost] => { + "msg": "30.336145277778" + } + + TASK [Convert string to years (using 365.25 days == 1 year)] ****************************** + ok: [localhost] => { + "msg": "1.096851471595" + } + +.. versionadded: 0.2.0 + +Working with versions +--------------------- + +If you need to sort a list of version numbers, the Jinja ``sort`` filter is problematic. Since it sorts lexicographically, ``2.10`` will come before ``2.9``. To treat version numbers correctly, you can use the ``version_sort`` filter: + +.. code-block:: yaml+jinja + + - name: Sort list by version number + debug: + var: ansible_versions | community.general.version_sort + vars: + ansible_versions: + - '2.8.0' + - '2.11.0' + - '2.7.0' + - '2.10.0' + - '2.9.0' + +This produces: + +.. code-block:: ansible-output + + TASK [Sort list by version number] ******************************************************** + ok: [localhost] => { + "ansible_versions | community.general.version_sort": [ + "2.7.0", + "2.8.0", + "2.9.0", + "2.10.0", + "2.11.0" + ] + } + +.. versionadded: 2.2.0 + +Creating identifiers +-------------------- + +The following filters allow to create identifiers. + +Hashids +^^^^^^^ + +`Hashids `_ allow to convert sequences of integers to short unique string identifiers. This filter needs the `hashids Python library `_ installed on the controller. + +.. code-block:: yaml+jinja + + - name: "Create hashid" + debug: + msg: "{{ [1234, 5, 6] | community.general.hashids_encode }}" + + - name: "Decode hashid" + debug: + msg: "{{ 'jm2Cytn' | community.general.hashids_decode }}" + +This produces: + +.. code-block:: ansible-output + + TASK [Create hashid] ********************************************************************** + ok: [localhost] => { + "msg": "jm2Cytn" + } + + TASK [Decode hashid] ********************************************************************** + ok: [localhost] => { + "msg": [ + 1234, + 5, + 6 + ] + } + +The hashids filters accept keyword arguments to allow fine-tuning the hashids generated: + +:salt: String to use as salt when hashing. +:alphabet: String of 16 or more unique characters to produce a hash. +:min_length: Minimum length of hash produced. + +.. versionadded: 3.0.0 + +Random MACs +^^^^^^^^^^^ + +You can use the ``random_mac`` filter to complete a partial `MAC address `_ to a random 6-byte MAC address. + +.. code-block:: yaml+jinja + + - name: "Create a random MAC starting with ff:" + debug: + msg: "{{ 'FF' | community.general.random_mac }}" + + - name: "Create a random MAC starting with 00:11:22:" + debug: + msg: "{{ '00:11:22' | community.general.random_mac }}" + +This produces: + +.. code-block:: ansible-output + + TASK [Create a random MAC starting with ff:] ********************************************** + ok: [localhost] => { + "msg": "ff:69:d3:78:7f:b4" + } + + TASK [Create a random MAC starting with 00:11:22:] **************************************** + ok: [localhost] => { + "msg": "00:11:22:71:5d:3b" + } + +You can also initialize the random number generator from a seed to create random-but-idempotent MAC addresses: + +.. code-block:: yaml+jinja + + "{{ '52:54:00' | community.general.random_mac(seed=inventory_hostname) }}" + +Conversions +----------- + +Parsing CSV files +^^^^^^^^^^^^^^^^^ + +Ansible offers the :ref:`community.general.read_csv module ` to read CSV files. Sometimes you need to convert strings to CSV files instead. For this, the ``from_csv`` filter exists. + +.. code-block:: yaml+jinja + + - name: "Parse CSV from string" + debug: + msg: "{{ csv_string | community.general.from_csv }}" + vars: + csv_string: | + foo,bar,baz + 1,2,3 + you,this,then + +This produces: + +.. code-block:: ansible-output + + TASK [Parse CSV from string] ************************************************************** + ok: [localhost] => { + "msg": [ + { + "bar": "2", + "baz": "3", + "foo": "1" + }, + { + "bar": "this", + "baz": "then", + "foo": "you" + } + ] + } + +The ``from_csv`` filter has several keyword arguments to control its behavior: + +:dialect: Dialect of the CSV file. Default is ``excel``. Other possible choices are ``excel-tab`` and ``unix``. If one of ``delimiter``, ``skipinitialspace`` or ``strict`` is specified, ``dialect`` is ignored. +:fieldnames: A set of column names to use. If not provided, the first line of the CSV is assumed to contain the column names. +:delimiter: Sets the delimiter to use. Default depends on the dialect used. +:skipinitialspace: Set to ``true`` to ignore space directly after the delimiter. Default depends on the dialect used (usually ``false``). +:strict: Set to ``true`` to error out on invalid CSV input. + +.. versionadded: 3.0.0 + +Converting to JSON +^^^^^^^^^^^^^^^^^^ + +`JC `_ is a CLI tool and Python library which allows to interpret output of various CLI programs as JSON. It is also available as a filter in community.general. This filter needs the `jc Python library `_ installed on the controller. + +.. code-block:: yaml+jinja + + - name: Run 'ls' to list files in / + command: ls / + register: result + + - name: Parse the ls output + debug: + msg: "{{ result.stdout | community.general.jc('ls') }}" + +This produces: + +.. code-block:: ansible-output + + TASK [Run 'ls' to list files in /] ******************************************************** + changed: [localhost] + + TASK [Parse the ls output] **************************************************************** + ok: [localhost] => { + "msg": [ + { + "filename": "bin" + }, + { + "filename": "boot" + }, + { + "filename": "dev" + }, + { + "filename": "etc" + }, + { + "filename": "home" + }, + { + "filename": "lib" + }, + { + "filename": "proc" + }, + { + "filename": "root" + }, + { + "filename": "run" + }, + { + "filename": "tmp" + } + ] + } + +.. versionadded: 2.0.0 + +.. _ansible_collections.community.general.docsite.json_query_filter: + +Selecting JSON data: JSON queries +--------------------------------- + +To select a single element or a data subset from a complex data structure in JSON format (for example, Ansible facts), use the ``json_query`` filter. The ``json_query`` filter lets you query a complex JSON structure and iterate over it using a loop structure. + +.. note:: You must manually install the **jmespath** dependency on the Ansible controller before using this filter. This filter is built upon **jmespath**, and you can use the same syntax. For examples, see `jmespath examples `_. + +Consider this data structure: + +.. code-block:: yaml+jinja + + { + "domain_definition": { + "domain": { + "cluster": [ + { + "name": "cluster1" + }, + { + "name": "cluster2" + } + ], + "server": [ + { + "name": "server11", + "cluster": "cluster1", + "port": "8080" + }, + { + "name": "server12", + "cluster": "cluster1", + "port": "8090" + }, + { + "name": "server21", + "cluster": "cluster2", + "port": "9080" + }, + { + "name": "server22", + "cluster": "cluster2", + "port": "9090" + } + ], + "library": [ + { + "name": "lib1", + "target": "cluster1" + }, + { + "name": "lib2", + "target": "cluster2" + } + ] + } + } + } + +To extract all clusters from this structure, you can use the following query: + +.. code-block:: yaml+jinja + + - name: Display all cluster names + ansible.builtin.debug: + var: item + loop: "{{ domain_definition | community.general.json_query('domain.cluster[*].name') }}" + +To extract all server names: + +.. code-block:: yaml+jinja + + - name: Display all server names + ansible.builtin.debug: + var: item + loop: "{{ domain_definition | community.general.json_query('domain.server[*].name') }}" + +To extract ports from cluster1: + +.. code-block:: yaml+jinja + + - name: Display all ports from cluster1 + ansible.builtin.debug: + var: item + loop: "{{ domain_definition | community.general.json_query(server_name_cluster1_query) }}" + vars: + server_name_cluster1_query: "domain.server[?cluster=='cluster1'].port" + +.. note:: You can use a variable to make the query more readable. + +To print out the ports from cluster1 in a comma separated string: + +.. code-block:: yaml+jinja + + - name: Display all ports from cluster1 as a string + ansible.builtin.debug: + msg: "{{ domain_definition | community.general.json_query('domain.server[?cluster==`cluster1`].port') | join(', ') }}" + +.. note:: In the example above, quoting literals using backticks avoids escaping quotes and maintains readability. + +You can use YAML `single quote escaping `_: + +.. code-block:: yaml+jinja + + - name: Display all ports from cluster1 + ansible.builtin.debug: + var: item + loop: "{{ domain_definition | community.general.json_query('domain.server[?cluster==''cluster1''].port') }}" + +.. note:: Escaping single quotes within single quotes in YAML is done by doubling the single quote. + +To get a hash map with all ports and names of a cluster: + +.. code-block:: yaml+jinja + + - name: Display all server ports and names from cluster1 + ansible.builtin.debug: + var: item + loop: "{{ domain_definition | community.general.json_query(server_name_cluster1_query) }}" + vars: + server_name_cluster1_query: "domain.server[?cluster=='cluster2'].{name: name, port: port}" + +To extract ports from all clusters with name starting with 'server1': + +.. code-block:: yaml+jinja + + - name: Display all ports from cluster1 + ansible.builtin.debug: + msg: "{{ domain_definition | to_json | from_json | community.general.json_query(server_name_query) }}" + vars: + server_name_query: "domain.server[?starts_with(name,'server1')].port" + +To extract ports from all clusters with name containing 'server1': + +.. code-block:: yaml+jinja + + - name: Display all ports from cluster1 + ansible.builtin.debug: + msg: "{{ domain_definition | to_json | from_json | community.general.json_query(server_name_query) }}" + vars: + server_name_query: "domain.server[?contains(name,'server1')].port" + +.. note:: while using ``starts_with`` and ``contains``, you have to use `` to_json | from_json `` filter for correct parsing of data structure. From 1ad85849afa9a4c2d89678deecb7513edbee6164 Mon Sep 17 00:00:00 2001 From: Chih-Hsuan Yen Date: Wed, 2 Jun 2021 04:04:09 +0800 Subject: [PATCH 0100/2828] nmcli: new arguments to ignore automatic dns servers and gateways (#2635) * nmcli: new arguments to ignore automatic dns servers and gateways Closes #1087 * Add changelog fragment * Address review comments --- .../2635-nmcli-add-ignore-auto-arguments.yml | 2 + plugins/modules/net_tools/nmcli.py | 42 ++++++++++++++++++- .../plugins/modules/net_tools/test_nmcli.py | 32 ++++++++++++++ 3 files changed, 75 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2635-nmcli-add-ignore-auto-arguments.yml diff --git a/changelogs/fragments/2635-nmcli-add-ignore-auto-arguments.yml b/changelogs/fragments/2635-nmcli-add-ignore-auto-arguments.yml new file mode 100644 index 0000000000..e75ceb6a1b --- /dev/null +++ b/changelogs/fragments/2635-nmcli-add-ignore-auto-arguments.yml @@ -0,0 +1,2 @@ +minor_changes: + - nmcli - add new options to ignore automatic DNS servers and gateways (https://github.com/ansible-collections/community.general/issues/1087). diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index 929d88c654..399d15267a 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -77,6 +77,12 @@ options: - Use the format C(192.0.2.1). - This parameter is mutually_exclusive with never_default4 parameter. type: str + gw4_ignore_auto: + description: + - Ignore automatically configured IPv4 routes. + type: bool + default: false + version_added: 3.2.0 routes4: description: - The list of ipv4 routes. @@ -107,6 +113,12 @@ options: - A list of DNS search domains. elements: str type: list + dns4_ignore_auto: + description: + - Ignore automatically configured IPv4 name servers. + type: bool + default: false + version_added: 3.2.0 method4: description: - Configuration method to be used for IPv4. @@ -125,6 +137,12 @@ options: - The IPv6 gateway for this interface. - Use the format C(2001:db8::1). type: str + gw6_ignore_auto: + description: + - Ignore automatically configured IPv6 routes. + type: bool + default: false + version_added: 3.2.0 dns6: description: - A list of up to 3 dns servers. @@ -136,6 +154,12 @@ options: - A list of DNS search domains. elements: str type: list + dns6_ignore_auto: + description: + - Ignore automatically configured IPv6 name servers. + type: bool + default: false + version_added: 3.2.0 method6: description: - Configuration method to be used for IPv6 @@ -648,16 +672,20 @@ class Nmcli(object): self.type = module.params['type'] self.ip4 = module.params['ip4'] self.gw4 = module.params['gw4'] + self.gw4_ignore_auto = module.params['gw4_ignore_auto'] self.routes4 = module.params['routes4'] self.route_metric4 = module.params['route_metric4'] self.never_default4 = module.params['never_default4'] self.dns4 = module.params['dns4'] self.dns4_search = module.params['dns4_search'] + self.dns4_ignore_auto = module.params['dns4_ignore_auto'] self.method4 = module.params['method4'] self.ip6 = module.params['ip6'] self.gw6 = module.params['gw6'] + self.gw6_ignore_auto = module.params['gw6_ignore_auto'] self.dns6 = module.params['dns6'] self.dns6_search = module.params['dns6_search'] + self.dns6_ignore_auto = module.params['dns6_ignore_auto'] self.method6 = module.params['method6'] self.mtu = module.params['mtu'] self.stp = module.params['stp'] @@ -729,7 +757,9 @@ class Nmcli(object): 'ipv4.dhcp-client-id': self.dhcp_client_id, 'ipv4.dns': self.dns4, 'ipv4.dns-search': self.dns4_search, + 'ipv4.ignore-auto-dns': self.dns4_ignore_auto, 'ipv4.gateway': self.gw4, + 'ipv4.ignore-auto-routes': self.gw4_ignore_auto, 'ipv4.routes': self.routes4, 'ipv4.route-metric': self.route_metric4, 'ipv4.never-default': self.never_default4, @@ -737,7 +767,9 @@ class Nmcli(object): 'ipv6.addresses': self.ip6, 'ipv6.dns': self.dns6, 'ipv6.dns-search': self.dns6_search, + 'ipv6.ignore-auto-dns': self.dns6_ignore_auto, 'ipv6.gateway': self.gw6, + 'ipv6.ignore-auto-routes': self.gw6_ignore_auto, 'ipv6.method': self.ipv6_method, }) @@ -900,7 +932,11 @@ class Nmcli(object): if setting in ('bridge.stp', 'bridge-port.hairpin-mode', 'connection.autoconnect', - 'ipv4.never-default'): + 'ipv4.never-default', + 'ipv4.ignore-auto-dns', + 'ipv4.ignore-auto-routes', + 'ipv6.ignore-auto-dns', + 'ipv6.ignore-auto-routes'): return bool elif setting in ('ipv4.dns', 'ipv4.dns-search', @@ -1116,17 +1152,21 @@ def main(): ]), ip4=dict(type='str'), gw4=dict(type='str'), + gw4_ignore_auto=dict(type='bool', default=False), routes4=dict(type='list', elements='str'), route_metric4=dict(type='int'), never_default4=dict(type='bool', default=False), dns4=dict(type='list', elements='str'), dns4_search=dict(type='list', elements='str'), + dns4_ignore_auto=dict(type='bool', default=False), method4=dict(type='str', choices=['auto', 'link-local', 'manual', 'shared', 'disabled']), dhcp_client_id=dict(type='str'), ip6=dict(type='str'), gw6=dict(type='str'), + gw6_ignore_auto=dict(type='bool', default=False), dns6=dict(type='list', elements='str'), dns6_search=dict(type='list', elements='str'), + dns6_ignore_auto=dict(type='bool', default=False), method6=dict(type='str', choices=['ignore', 'auto', 'dhcp', 'link-local', 'manual', 'shared']), # Bond Specific vars mode=dict(type='str', default='balance-rr', diff --git a/tests/unit/plugins/modules/net_tools/test_nmcli.py b/tests/unit/plugins/modules/net_tools/test_nmcli.py index dceb5e5f3f..5b3f96937b 100644 --- a/tests/unit/plugins/modules/net_tools/test_nmcli.py +++ b/tests/unit/plugins/modules/net_tools/test_nmcli.py @@ -95,8 +95,12 @@ connection.autoconnect: yes ipv4.method: manual ipv4.addresses: 10.10.10.10/24 ipv4.gateway: 10.10.10.1 +ipv4.ignore-auto-dns: no +ipv4.ignore-auto-routes: no ipv4.never-default: no ipv6.method: auto +ipv6.ignore-auto-dns: no +ipv6.ignore-auto-routes: no """ TESTCASE_GENERIC_DNS4_SEARCH = [ @@ -120,10 +124,14 @@ connection.autoconnect: yes ipv4.method: manual ipv4.addresses: 10.10.10.10/24 ipv4.gateway: 10.10.10.1 +ipv4.ignore-auto-dns: no +ipv4.ignore-auto-routes: no ipv4.never-default: no ipv4.dns-search: search.redhat.com ipv6.dns-search: search6.redhat.com ipv6.method: auto +ipv6.ignore-auto-dns: no +ipv6.ignore-auto-routes: no """ TESTCASE_GENERIC_ZONE = [ @@ -147,8 +155,12 @@ connection.zone: external ipv4.method: manual ipv4.addresses: 10.10.10.10/24 ipv4.gateway: 10.10.10.1 +ipv4.ignore-auto-dns: no +ipv4.ignore-auto-routes: no ipv4.never-default: no ipv6.method: auto +ipv6.ignore-auto-dns: no +ipv6.ignore-auto-routes: no """ TESTCASE_BOND = [ @@ -172,8 +184,12 @@ connection.autoconnect: yes ipv4.method: manual ipv4.addresses: 10.10.10.10/24 ipv4.gateway: 10.10.10.1 +ipv4.ignore-auto-dns: no +ipv4.ignore-auto-routes: no ipv4.never-default: no ipv6.method: auto +ipv6.ignore-auto-dns: no +ipv6.ignore-auto-routes: no bond.options: mode=active-backup,primary=non_existent_primary """ @@ -199,8 +215,12 @@ connection.autoconnect: yes ipv4.method: manual ipv4.addresses: 10.10.10.10/24 ipv4.gateway: 10.10.10.1 +ipv4.ignore-auto-dns: no +ipv4.ignore-auto-routes: no ipv4.never-default: no ipv6.method: auto +ipv6.ignore-auto-dns: no +ipv6.ignore-auto-routes: no bridge.mac-address: 52:54:00:AB:CD:EF bridge.stp: yes bridge.max-age: 100 @@ -252,8 +272,12 @@ connection.autoconnect: yes ipv4.method: manual ipv4.addresses: 10.10.10.10/24 ipv4.gateway: 10.10.10.1 +ipv4.ignore-auto-dns: no +ipv4.ignore-auto-routes: no ipv4.never-default: no ipv6.method: auto +ipv6.ignore-auto-dns: no +ipv6.ignore-auto-routes: no vlan.id: 10 """ @@ -343,8 +367,12 @@ connection.autoconnect: yes 802-3-ethernet.mtu: auto ipv4.method: auto ipv4.dhcp-client-id: 00:11:22:AA:BB:CC:DD +ipv4.ignore-auto-dns: no +ipv4.ignore-auto-routes: no ipv4.never-default: no ipv6.method: auto +ipv6.ignore-auto-dns: no +ipv6.ignore-auto-routes: no """ TESTCASE_ETHERNET_STATIC = [ @@ -368,9 +396,13 @@ connection.autoconnect: yes ipv4.method: manual ipv4.addresses: 10.10.10.10/24 ipv4.gateway: 10.10.10.1 +ipv4.ignore-auto-dns: no +ipv4.ignore-auto-routes: no ipv4.never-default: no ipv4.dns: 1.1.1.1,8.8.8.8 ipv6.method: auto +ipv6.ignore-auto-dns: no +ipv6.ignore-auto-routes: no """ From ca1506fb267d2592ebc6e1fc5df87024ac98ce80 Mon Sep 17 00:00:00 2001 From: Gene Gotimer Date: Tue, 1 Jun 2021 16:06:26 -0400 Subject: [PATCH 0101/2828] Added SHA1 option to maven_artifact (#2662) * Added SHA1 option * Add changelog fragment * Update plugins/modules/packaging/language/maven_artifact.py Co-authored-by: Felix Fontein * Update plugins/modules/packaging/language/maven_artifact.py Co-authored-by: Felix Fontein * Combined hash functions Co-authored-by: Felix Fontein * Update plugins/modules/packaging/language/maven_artifact.py Co-authored-by: Felix Fontein * Update plugins/modules/packaging/language/maven_artifact.py Co-authored-by: Felix Fontein * Removed unused functions (rolled into _local_checksum) * Update changelogs/fragments/2661-maven_artifact-add-sha1-option.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../2661-maven_artifact-add-sha1-option.yml | 2 + .../packaging/language/maven_artifact.py | 72 +++++++++++-------- 2 files changed, 46 insertions(+), 28 deletions(-) create mode 100644 changelogs/fragments/2661-maven_artifact-add-sha1-option.yml diff --git a/changelogs/fragments/2661-maven_artifact-add-sha1-option.yml b/changelogs/fragments/2661-maven_artifact-add-sha1-option.yml new file mode 100644 index 0000000000..827942200b --- /dev/null +++ b/changelogs/fragments/2661-maven_artifact-add-sha1-option.yml @@ -0,0 +1,2 @@ +minor_changes: + - maven_artifact - added ``checksum_alg`` option to support SHA1 checksums in order to support FIPS systems (https://github.com/ansible-collections/community.general/pull/2662). diff --git a/plugins/modules/packaging/language/maven_artifact.py b/plugins/modules/packaging/language/maven_artifact.py index 50b808f57a..83833b0480 100644 --- a/plugins/modules/packaging/language/maven_artifact.py +++ b/plugins/modules/packaging/language/maven_artifact.py @@ -129,10 +129,10 @@ options: verify_checksum: type: str description: - - If C(never), the md5 checksum will never be downloaded and verified. - - If C(download), the md5 checksum will be downloaded and verified only after artifact download. This is the default. - - If C(change), the md5 checksum will be downloaded and verified if the destination already exist, - to verify if they are identical. This was the behaviour before 2.6. Since it downloads the md5 before (maybe) + - If C(never), the MD5/SHA1 checksum will never be downloaded and verified. + - If C(download), the MD5/SHA1 checksum will be downloaded and verified only after artifact download. This is the default. + - If C(change), the MD5/SHA1 checksum will be downloaded and verified if the destination already exist, + to verify if they are identical. This was the behaviour before 2.6. Since it downloads the checksum before (maybe) downloading the artifact, and since some repository software, when acting as a proxy/cache, return a 404 error if the artifact has not been cached yet, it may fail unexpectedly. If you still need it, you should consider using C(always) instead - if you deal with a checksum, it is better to @@ -141,6 +141,15 @@ options: required: false default: 'download' choices: ['never', 'download', 'change', 'always'] + checksum_alg: + type: str + description: + - If C(md5), checksums will use the MD5 algorithm. This is the default. + - If C(sha1), checksums will use the SHA1 algorithm. This can be used on systems configured to use + FIPS-compliant algorithms, since MD5 will be blocked on such systems. + default: 'md5' + choices: ['md5', 'sha1'] + version_added: 3.2.0 directory_mode: type: str description: @@ -507,7 +516,7 @@ class MavenDownloader: raise ValueError(failmsg + " because of " + info['msg'] + "for URL " + url_to_use) return None - def download(self, tmpdir, artifact, verify_download, filename=None): + def download(self, tmpdir, artifact, verify_download, filename=None, checksum_alg='md5'): if (not artifact.version and not artifact.version_by_spec) or artifact.version == "latest": artifact = Artifact(artifact.group_id, artifact.artifact_id, self.find_latest_version_available(artifact), None, artifact.classifier, artifact.extension) @@ -528,11 +537,11 @@ class MavenDownloader: shutil.copyfileobj(response, f) if verify_download: - invalid_md5 = self.is_invalid_md5(tempname, url) - if invalid_md5: + invalid_checksum = self.is_invalid_checksum(tempname, url, checksum_alg) + if invalid_checksum: # if verify_change was set, the previous file would be deleted os.remove(tempname) - return invalid_md5 + return invalid_checksum except Exception as e: os.remove(tempname) raise e @@ -541,40 +550,45 @@ class MavenDownloader: shutil.move(tempname, artifact.get_filename(filename)) return None - def is_invalid_md5(self, file, remote_url): + def is_invalid_checksum(self, file, remote_url, checksum_alg='md5'): if os.path.exists(file): - local_md5 = self._local_md5(file) + local_checksum = self._local_checksum(checksum_alg, file) if self.local: parsed_url = urlparse(remote_url) - remote_md5 = self._local_md5(parsed_url.path) + remote_checksum = self._local_checksum(checksum_alg, parsed_url.path) else: try: - remote_md5 = to_text(self._getContent(remote_url + '.md5', "Failed to retrieve MD5", False), errors='strict') + remote_checksum = to_text(self._getContent(remote_url + '.' + checksum_alg, "Failed to retrieve checksum", False), errors='strict') except UnicodeError as e: - return "Cannot retrieve a valid md5 from %s: %s" % (remote_url, to_native(e)) - if(not remote_md5): - return "Cannot find md5 from " + remote_url + return "Cannot retrieve a valid %s checksum from %s: %s" % (checksum_alg, remote_url, to_native(e)) + if not remote_checksum: + return "Cannot find %s checksum from %s" % (checksum_alg, remote_url) try: - # Check if remote md5 only contains md5 or md5 + filename - _remote_md5 = remote_md5.split(None)[0] - remote_md5 = _remote_md5 - # remote_md5 is empty so we continue and keep original md5 string - # This should not happen since we check for remote_md5 before + # Check if remote checksum only contains md5/sha1 or md5/sha1 + filename + _remote_checksum = remote_checksum.split(None)[0] + remote_checksum = _remote_checksum + # remote_checksum is empty so we continue and keep original checksum string + # This should not happen since we check for remote_checksum before except IndexError: pass - if local_md5.lower() == remote_md5.lower(): + if local_checksum.lower() == remote_checksum.lower(): return None else: - return "Checksum does not match: we computed " + local_md5 + " but the repository states " + remote_md5 + return "Checksum does not match: we computed " + local_checksum + " but the repository states " + remote_checksum return "Path does not exist: " + file - def _local_md5(self, file): - md5 = hashlib.md5() + def _local_checksum(self, checksum_alg, file): + if checksum_alg.lower() == 'md5': + hash = hashlib.md5() + elif checksum_alg.lower() == 'sha1': + hash = hashlib.sha1() + else: + raise ValueError("Unknown checksum_alg %s" % checksum_alg) with io.open(file, 'rb') as f: for chunk in iter(lambda: f.read(8192), b''): - md5.update(chunk) - return md5.hexdigest() + hash.update(chunk) + return hash.hexdigest() def main(): @@ -599,6 +613,7 @@ def main(): client_key=dict(type="path", required=False), keep_name=dict(required=False, default=False, type='bool'), verify_checksum=dict(required=False, default='download', choices=['never', 'download', 'change', 'always']), + checksum_alg=dict(required=False, default='md5', choices=['md5', 'sha1']), directory_mode=dict(type='str'), ), add_file_common_args=True, @@ -639,6 +654,7 @@ def main(): verify_checksum = module.params["verify_checksum"] verify_download = verify_checksum in ['download', 'always'] verify_change = verify_checksum in ['change', 'always'] + checksum_alg = module.params["checksum_alg"] downloader = MavenDownloader(module, repository_url, local, headers) @@ -683,12 +699,12 @@ def main(): b_dest = to_bytes(dest, errors='surrogate_or_strict') - if os.path.lexists(b_dest) and ((not verify_change) or not downloader.is_invalid_md5(dest, downloader.find_uri_for_artifact(artifact))): + if os.path.lexists(b_dest) and ((not verify_change) or not downloader.is_invalid_checksum(dest, downloader.find_uri_for_artifact(artifact), checksum_alg)): prev_state = "present" if prev_state == "absent": try: - download_error = downloader.download(module.tmpdir, artifact, verify_download, b_dest) + download_error = downloader.download(module.tmpdir, artifact, verify_download, b_dest, checksum_alg) if download_error is None: changed = True else: From fe5717c1aa1deab9ac487a2903c725ac2ac2cb27 Mon Sep 17 00:00:00 2001 From: Benjamin Schubert Date: Thu, 3 Jun 2021 20:42:05 +0100 Subject: [PATCH 0102/2828] keycloak_realm.py: Mark 'reset_password_allowed' as no_log=False (#2694) * keycloak_realm.py: Mark 'reset_password_allowed' as no_log=False This value is not sensitive but Ansible will complain about it otherwise * fixup! keycloak_realm.py: Mark 'reset_password_allowed' as no_log=False * Apply all suggestions from code review Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- changelogs/fragments/keycloak-realm-no-log-password-reset.yml | 2 ++ plugins/modules/identity/keycloak/keycloak_realm.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/keycloak-realm-no-log-password-reset.yml diff --git a/changelogs/fragments/keycloak-realm-no-log-password-reset.yml b/changelogs/fragments/keycloak-realm-no-log-password-reset.yml new file mode 100644 index 0000000000..104bf4179b --- /dev/null +++ b/changelogs/fragments/keycloak-realm-no-log-password-reset.yml @@ -0,0 +1,2 @@ +bugfixes: + - keycloak_realm - remove warning that ``reset_password_allowed`` needs to be marked as ``no_log`` (https://github.com/ansible-collections/community.general/pull/2694). diff --git a/plugins/modules/identity/keycloak/keycloak_realm.py b/plugins/modules/identity/keycloak/keycloak_realm.py index 7e80bd3d3d..509fcab7bc 100644 --- a/plugins/modules/identity/keycloak/keycloak_realm.py +++ b/plugins/modules/identity/keycloak/keycloak_realm.py @@ -654,7 +654,7 @@ def main(): registration_flow=dict(type='str', aliases=['registrationFlow']), remember_me=dict(type='bool', aliases=['rememberMe']), reset_credentials_flow=dict(type='str', aliases=['resetCredentialsFlow']), - reset_password_allowed=dict(type='bool', aliases=['resetPasswordAllowed']), + reset_password_allowed=dict(type='bool', aliases=['resetPasswordAllowed'], no_log=False), revoke_refresh_token=dict(type='bool', aliases=['revokeRefreshToken']), smtp_server=dict(type='dict', aliases=['smtpServer']), ssl_required=dict(type='bool', aliases=['sslRequired']), From efbda2389d02dbefd887bac505b320c100b66b1a Mon Sep 17 00:00:00 2001 From: Benjamin Schubert Date: Thu, 3 Jun 2021 20:44:54 +0100 Subject: [PATCH 0103/2828] keycloak_realm.py: Fix the `ssl_required` parameter according to the API (#2693) * keycloak_realm.py: Fix the `ssl_required` parameter according to the API The `ssl_required` parameter is a string and must be one of 'all', 'external' or 'none'. Passing a bool will make the server return a 500. * fixup! keycloak_realm.py: Fix the `ssl_required` parameter according to the API * Update changelogs/fragments/keycloak_realm_ssl_required.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- changelogs/fragments/keycloak_realm_ssl_required.yml | 3 +++ plugins/modules/identity/keycloak/keycloak_realm.py | 5 +++-- 2 files changed, 6 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/keycloak_realm_ssl_required.yml diff --git a/changelogs/fragments/keycloak_realm_ssl_required.yml b/changelogs/fragments/keycloak_realm_ssl_required.yml new file mode 100644 index 0000000000..7476612e2f --- /dev/null +++ b/changelogs/fragments/keycloak_realm_ssl_required.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - keycloak_realm - ``ssl_required`` changed from a boolean type to accept the strings ``none``, ``external`` or ``all``. This is not a breaking change since the module always failed when a boolean was supplied (https://github.com/ansible-collections/community.general/pull/2693). diff --git a/plugins/modules/identity/keycloak/keycloak_realm.py b/plugins/modules/identity/keycloak/keycloak_realm.py index 509fcab7bc..95f79704ef 100644 --- a/plugins/modules/identity/keycloak/keycloak_realm.py +++ b/plugins/modules/identity/keycloak/keycloak_realm.py @@ -439,9 +439,10 @@ options: ssl_required: description: - The realm ssl required option. + choices: ['all', 'external', 'none'] aliases: - sslRequired - type: bool + type: str sso_session_idle_timeout: description: - The realm sso session idle timeout. @@ -657,7 +658,7 @@ def main(): reset_password_allowed=dict(type='bool', aliases=['resetPasswordAllowed'], no_log=False), revoke_refresh_token=dict(type='bool', aliases=['revokeRefreshToken']), smtp_server=dict(type='dict', aliases=['smtpServer']), - ssl_required=dict(type='bool', aliases=['sslRequired']), + ssl_required=dict(choices=["external", "all", "none"], aliases=['sslRequired']), sso_session_idle_timeout=dict(type='int', aliases=['ssoSessionIdleTimeout']), sso_session_idle_timeout_remember_me=dict(type='int', aliases=['ssoSessionIdleTimeoutRememberMe']), sso_session_max_lifespan=dict(type='int', aliases=['ssoSessionMaxLifespan']), From d93bc039b274d1af837fa7fe869956a3be1d878c Mon Sep 17 00:00:00 2001 From: Matthias Vogelgesang Date: Thu, 3 Jun 2021 22:54:19 +0200 Subject: [PATCH 0104/2828] BOTMETA.yml: remove myself from zypper_repository (#2701) --- .github/BOTMETA.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 994de0621f..a3fb8e1f35 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -717,8 +717,9 @@ files: labels: zypper ignore: dirtyharrycallahan robinro $modules/packaging/os/zypper_repository.py: - maintainers: $team_suse matze + maintainers: $team_suse labels: zypper + ignore: matze $modules/remote_management/cobbler/: maintainers: dagwieers $modules/remote_management/hpilo/: From 5ddf0041ecc733ed6f1f6ab938af584683c6e862 Mon Sep 17 00:00:00 2001 From: George Rawlinson Date: Fri, 4 Jun 2021 17:08:54 +1200 Subject: [PATCH 0105/2828] add module pacman_key (#778) * add module pacman_key * add symlink and fix documentation for pacman_key * documentation fix for pacman_key * improve logic around user input * Update plugins/modules/packaging/os/pacman_key.py Co-authored-by: Andrew Klychkov * Update plugins/modules/packaging/os/pacman_key.py Co-authored-by: Andrew Klychkov * Update plugins/modules/packaging/os/pacman_key.py Co-authored-by: Andrew Klychkov * Update plugins/modules/packaging/os/pacman_key.py Co-authored-by: Andrew Klychkov * Update plugins/modules/packaging/os/pacman_key.py Co-authored-by: Andrew Klychkov * Update plugins/modules/packaging/os/pacman_key.py Co-authored-by: Andrew Klychkov * Update plugins/modules/packaging/os/pacman_key.py Co-authored-by: Andrew Klychkov * Update plugins/modules/packaging/os/pacman_key.py Co-authored-by: Andrew Klychkov * Update plugins/modules/packaging/os/pacman_key.py Co-authored-by: Andrew Klychkov * Update plugins/modules/packaging/os/pacman_key.py Co-authored-by: Andrew Klychkov * Improve parameter checking required_one_of=[] is neat. Co-authored-by: Alexei Znamensky * Revert "Improve parameter checking" This reverts commit 044b0cbc854744480ad1e17753e33f0371c7d0eb. * Simplify a bunch of code. * fix typos pointed out by yan12125 * replaced manual checks with required-if invocation * added default keyring to documentation * some initial tests * updated metadata * refactored to make sanity tests pass * refactor to make sanity tests pass ... part deux * refactor: simplify run_command invocations * test: cover check-mode and some normal operation * docs: fix grammatical errors * rip out fingerprint code a full length (40 characters) key ID is equivalent to the fingerprint. * refactor tests, add a couple more * test: added testcase for method: data * Update plugins/modules/packaging/os/pacman_key.py Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> * docs: correct yaml boolean type Co-authored-by: Felix Fontein Co-authored-by: Andrew Klychkov Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> Co-authored-by: Felix Fontein --- plugins/modules/packaging/os/pacman_key.py | 314 ++++++++++ plugins/modules/pacman_key.py | 1 + .../modules/packaging/os/test_pacman_key.py | 576 ++++++++++++++++++ 3 files changed, 891 insertions(+) create mode 100644 plugins/modules/packaging/os/pacman_key.py create mode 120000 plugins/modules/pacman_key.py create mode 100644 tests/unit/plugins/modules/packaging/os/test_pacman_key.py diff --git a/plugins/modules/packaging/os/pacman_key.py b/plugins/modules/packaging/os/pacman_key.py new file mode 100644 index 0000000000..85896c211d --- /dev/null +++ b/plugins/modules/packaging/os/pacman_key.py @@ -0,0 +1,314 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, George Rawlinson +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: pacman_key +author: +- George Rawlinson (@grawlinson) +version_added: "3.2.0" +short_description: Manage pacman's list of trusted keys +description: +- Add or remove gpg keys from the pacman keyring. +notes: +- Use full-length key ID (40 characters). +- Keys will be verified when using I(data), I(file), or I(url) unless I(verify) is overridden. +- Keys will be locally signed after being imported into the keyring. +- If the key ID exists in the keyring, the key will not be added unless I(force_update) is specified. +- I(data), I(file), I(url), and I(keyserver) are mutually exclusive. +- Supports C(check_mode). +requirements: +- gpg +- pacman-key +options: + id: + description: + - The 40 character identifier of the key. + - Including this allows check mode to correctly report the changed state. + - Do not specify a subkey ID, instead specify the primary key ID. + required: true + type: str + data: + description: + - The keyfile contents to add to the keyring. + - Must be of C(PGP PUBLIC KEY BLOCK) type. + type: str + file: + description: + - The path to a keyfile on the remote server to add to the keyring. + - Remote file must be of C(PGP PUBLIC KEY BLOCK) type. + type: path + url: + description: + - The URL to retrieve keyfile from. + - Remote file must be of C(PGP PUBLIC KEY BLOCK) type. + type: str + keyserver: + description: + - The keyserver used to retrieve key from. + type: str + verify: + description: + - Whether or not to verify the keyfile's key ID against specified key ID. + type: bool + default: true + force_update: + description: + - This forces the key to be updated if it already exists in the keyring. + type: bool + default: false + keyring: + description: + - The full path to the keyring folder on the remote server. + - If not specified, module will use pacman's default (C(/etc/pacman.d/gnupg)). + - Useful if the remote system requires an alternative gnupg directory. + type: path + default: /etc/pacman.d/gnupg + state: + description: + - Ensures that the key is present (added) or absent (revoked). + default: present + choices: [ absent, present ] + type: str +''' + +EXAMPLES = ''' +- name: Import a key via local file + community.general.pacman_key: + data: "{{ lookup('file', 'keyfile.asc') }}" + state: present + +- name: Import a key via remote file + community.general.pacman_key: + file: /tmp/keyfile.asc + state: present + +- name: Import a key via url + community.general.pacman_key: + id: 01234567890ABCDE01234567890ABCDE12345678 + url: https://domain.tld/keys/keyfile.asc + state: present + +- name: Import a key via keyserver + community.general.pacman_key: + id: 01234567890ABCDE01234567890ABCDE12345678 + keyserver: keyserver.domain.tld + +- name: Import a key into an alternative keyring + community.general.pacman_key: + id: 01234567890ABCDE01234567890ABCDE12345678 + file: /tmp/keyfile.asc + keyring: /etc/pacman.d/gnupg-alternative + +- name: Remove a key from the keyring + community.general.pacman_key: + id: 01234567890ABCDE01234567890ABCDE12345678 + state: absent +''' + +RETURN = r''' # ''' + +import os.path +import tempfile +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from ansible.module_utils._text import to_native + + +class PacmanKey(object): + def __init__(self, module): + self.module = module + # obtain binary paths for gpg & pacman-key + self.gpg = module.get_bin_path('gpg', required=True) + self.pacman_key = module.get_bin_path('pacman-key', required=True) + + # obtain module parameters + keyid = module.params['id'] + url = module.params['url'] + data = module.params['data'] + file = module.params['file'] + keyserver = module.params['keyserver'] + verify = module.params['verify'] + force_update = module.params['force_update'] + keyring = module.params['keyring'] + state = module.params['state'] + self.keylength = 40 + + # sanitise key ID & check if key exists in the keyring + keyid = self.sanitise_keyid(keyid) + key_present = self.key_in_keyring(keyring, keyid) + + # check mode + if module.check_mode: + if state == "present": + changed = (key_present and force_update) or not key_present + module.exit_json(changed=changed) + elif state == "absent": + if key_present: + module.exit_json(changed=True) + module.exit_json(changed=False) + + if state == "present": + if key_present and not force_update: + module.exit_json(changed=False) + + if data: + file = self.save_key(data) + self.add_key(keyring, file, keyid, verify) + module.exit_json(changed=True) + elif file: + self.add_key(keyring, file, keyid, verify) + module.exit_json(changed=True) + elif url: + data = self.fetch_key(url) + file = self.save_key(data) + self.add_key(keyring, file, keyid, verify) + module.exit_json(changed=True) + elif keyserver: + self.recv_key(keyring, keyid, keyserver) + module.exit_json(changed=True) + elif state == "absent": + if key_present: + self.remove_key(keyring, keyid) + module.exit_json(changed=True) + module.exit_json(changed=False) + + def is_hexadecimal(self, string): + """Check if a given string is valid hexadecimal""" + try: + int(string, 16) + except ValueError: + return False + return True + + def sanitise_keyid(self, keyid): + """Sanitise given key ID. + + Strips whitespace, uppercases all characters, and strips leading `0X`. + """ + sanitised_keyid = keyid.strip().upper().replace(' ', '').replace('0X', '') + if len(sanitised_keyid) != self.keylength: + self.module.fail_json(msg="key ID is not full-length: %s" % sanitised_keyid) + if not self.is_hexadecimal(sanitised_keyid): + self.module.fail_json(msg="key ID is not hexadecimal: %s" % sanitised_keyid) + return sanitised_keyid + + def fetch_key(self, url): + """Downloads a key from url""" + response, info = fetch_url(self.module, url) + if info['status'] != 200: + self.module.fail_json(msg="failed to fetch key at %s, error was %s" % (url, info['msg'])) + return to_native(response.read()) + + def recv_key(self, keyring, keyid, keyserver): + """Receives key via keyserver""" + cmd = [self.pacman_key, '--gpgdir', keyring, '--keyserver', keyserver, '--recv-keys', keyid] + self.module.run_command(cmd, check_rc=True) + self.lsign_key(keyring, keyid) + + def lsign_key(self, keyring, keyid): + """Locally sign key""" + cmd = [self.pacman_key, '--gpgdir', keyring] + self.module.run_command(cmd + ['--lsign-key', keyid], check_rc=True) + + def save_key(self, data): + "Saves key data to a temporary file" + tmpfd, tmpname = tempfile.mkstemp() + self.module.add_cleanup_file(tmpname) + tmpfile = os.fdopen(tmpfd, "w") + tmpfile.write(data) + tmpfile.close() + return tmpname + + def add_key(self, keyring, keyfile, keyid, verify): + """Add key to pacman's keyring""" + if verify: + self.verify_keyfile(keyfile, keyid) + cmd = [self.pacman_key, '--gpgdir', keyring, '--add', keyfile] + self.module.run_command(cmd, check_rc=True) + self.lsign_key(keyring, keyid) + + def remove_key(self, keyring, keyid): + """Remove key from pacman's keyring""" + cmd = [self.pacman_key, '--gpgdir', keyring, '--delete', keyid] + self.module.run_command(cmd, check_rc=True) + + def verify_keyfile(self, keyfile, keyid): + """Verify that keyfile matches the specified key ID""" + if keyfile is None: + self.module.fail_json(msg="expected a key, got none") + elif keyid is None: + self.module.fail_json(msg="expected a key ID, got none") + + rc, stdout, stderr = self.module.run_command( + [ + self.gpg, + '--with-colons', + '--with-fingerprint', + '--batch', + '--no-tty', + '--show-keys', + keyfile + ], + check_rc=True, + ) + + extracted_keyid = None + for line in stdout.splitlines(): + if line.startswith('fpr:'): + extracted_keyid = line.split(':')[9] + break + + if extracted_keyid != keyid: + self.module.fail_json(msg="key ID does not match. expected %s, got %s" % (keyid, extracted_keyid)) + + def key_in_keyring(self, keyring, keyid): + "Check if the key ID is in pacman's keyring" + rc, stdout, stderr = self.module.run_command( + [ + self.gpg, + '--with-colons', + '--batch', + '--no-tty', + '--no-default-keyring', + '--keyring=%s/pubring.gpg' % keyring, + '--list-keys', keyid + ], + check_rc=False, + ) + if rc != 0: + if stderr.find("No public key") >= 0: + return False + else: + self.module.fail_json(msg="gpg returned an error: %s" % stderr) + return True + + +def main(): + module = AnsibleModule( + argument_spec=dict( + id=dict(type='str', required=True), + data=dict(type='str'), + file=dict(type='path'), + url=dict(type='str'), + keyserver=dict(type='str'), + verify=dict(type='bool', default=True), + force_update=dict(type='bool', default=False), + keyring=dict(type='path', default='/etc/pacman.d/gnupg'), + state=dict(type='str', default='present', choices=['absent', 'present']), + ), + supports_check_mode=True, + mutually_exclusive=(('data', 'file', 'url', 'keyserver'),), + required_if=[('state', 'present', ('data', 'file', 'url', 'keyserver'), True)], + ) + PacmanKey(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/pacman_key.py b/plugins/modules/pacman_key.py new file mode 120000 index 0000000000..ac0f448232 --- /dev/null +++ b/plugins/modules/pacman_key.py @@ -0,0 +1 @@ +packaging/os/pacman_key.py \ No newline at end of file diff --git a/tests/unit/plugins/modules/packaging/os/test_pacman_key.py b/tests/unit/plugins/modules/packaging/os/test_pacman_key.py new file mode 100644 index 0000000000..757fee4e87 --- /dev/null +++ b/tests/unit/plugins/modules/packaging/os/test_pacman_key.py @@ -0,0 +1,576 @@ +# Copyright: (c) 2019, George Rawlinson +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.modules.packaging.os import pacman_key +import pytest +import json + +# path used for mocking get_bin_path() +MOCK_BIN_PATH = '/mocked/path' + +# Key ID used for tests +TESTING_KEYID = '14F26682D0916CDD81E37B6D61B7B526D98F0353' +TESTING_KEYFILE_PATH = '/tmp/pubkey.asc' + +# gpg --{show,list}-key output (key present) +GPG_SHOWKEY_OUTPUT = '''tru::1:1616373715:0:3:1:5 +pub:-:4096:1:61B7B526D98F0353:1437155332:::-:::scSC::::::23::0: +fpr:::::::::14F26682D0916CDD81E37B6D61B7B526D98F0353: +uid:-::::1437155332::E57D1F9BFF3B404F9F30333629369B08DF5E2161::Mozilla Software Releases ::::::::::0: +sub:e:4096:1:1C69C4E55E9905DB:1437155572:1500227572:::::s::::::23: +fpr:::::::::F2EF4E6E6AE75B95F11F1EB51C69C4E55E9905DB: +sub:e:4096:1:BBBEBDBB24C6F355:1498143157:1561215157:::::s::::::23: +fpr:::::::::DCEAC5D96135B91C4EA672ABBBBEBDBB24C6F355: +sub:e:4096:1:F1A6668FBB7D572E:1559247338:1622319338:::::s::::::23: +fpr:::::::::097B313077AE62A02F84DA4DF1A6668FBB7D572E:''' + +# gpg --{show,list}-key output (key absent) +GPG_NOKEY_OUTPUT = '''gpg: error reading key: No public key +tru::1:1616373715:0:3:1:5''' + +# pacman-key output (successful invocation) +PACMAN_KEY_SUCCESS = '''==> Updating trust database... +gpg: next trustdb check due at 2021-08-02''' + +# expected command for gpg --list-keys KEYID +RUN_CMD_LISTKEYS = [ + MOCK_BIN_PATH, + '--with-colons', + '--batch', + '--no-tty', + '--no-default-keyring', + '--keyring=/etc/pacman.d/gnupg/pubring.gpg', + '--list-keys', + TESTING_KEYID, +] + +# expected command for gpg --show-keys KEYFILE +RUN_CMD_SHOW_KEYFILE = [ + MOCK_BIN_PATH, + '--with-colons', + '--with-fingerprint', + '--batch', + '--no-tty', + '--show-keys', + TESTING_KEYFILE_PATH, +] + +# expected command for pacman-key --lsign-key KEYID +RUN_CMD_LSIGN_KEY = [ + MOCK_BIN_PATH, + '--gpgdir', + '/etc/pacman.d/gnupg', + '--lsign-key', + TESTING_KEYID, +] + + +TESTCASES = [ + # + # invalid user input + # + # state: present, id: absent + [ + { + 'state': 'present', + }, + { + 'id': 'param_missing_id', + 'msg': 'missing required arguments: id', + 'failed': True, + }, + ], + # state: present, required parameters: missing + [ + { + 'state': 'present', + 'id': '0xDOESNTMATTER', + }, + { + 'id': 'param_missing_method', + 'msg': 'state is present but any of the following are missing: data, file, url, keyserver', + 'failed': True, + }, + ], + # state: present, id: invalid (not full-length) + [ + { + 'id': '0xDOESNTMATTER', + 'data': 'FAKEDATA', + }, + { + 'id': 'param_id_not_full', + 'msg': 'key ID is not full-length: DOESNTMATTER', + 'failed': True, + }, + ], + # state: present, id: invalid (not hexadecimal) + [ + { + 'state': 'present', + 'id': '01234567890ABCDE01234567890ABCDE1234567M', + 'data': 'FAKEDATA', + }, + { + 'id': 'param_id_not_hex', + 'msg': 'key ID is not hexadecimal: 01234567890ABCDE01234567890ABCDE1234567M', + 'failed': True, + }, + ], + # state: absent, id: absent + [ + { + 'state': 'absent', + }, + { + 'id': 'param_absent_state_missing_id', + 'msg': 'missing required arguments: id', + 'failed': True, + }, + ], + # + # check mode + # + # state & key present + [ + { + 'state': 'present', + 'id': TESTING_KEYID, + 'data': 'FAKEDATA', + '_ansible_check_mode': True, + }, + { + 'id': 'checkmode_state_and_key_present', + 'run_command.calls': [ + ( + RUN_CMD_LISTKEYS, + {'check_rc': False}, + ( + 0, + GPG_SHOWKEY_OUTPUT, + '', + ), + ), + ], + 'changed': False, + }, + ], + # state present, key absent + [ + { + 'state': 'present', + 'id': TESTING_KEYID, + 'data': 'FAKEDATA', + '_ansible_check_mode': True, + }, + { + 'id': 'checkmode_state_present_key_absent', + 'run_command.calls': [ + ( + RUN_CMD_LISTKEYS, + {'check_rc': False}, + ( + 2, + '', + GPG_NOKEY_OUTPUT, + ), + ), + ], + 'changed': True, + }, + ], + # state & key absent + [ + { + 'state': 'absent', + 'id': TESTING_KEYID, + '_ansible_check_mode': True, + }, + { + 'id': 'checkmode_state_and_key_absent', + 'run_command.calls': [ + ( + RUN_CMD_LISTKEYS, + {'check_rc': False}, + ( + 2, + '', + GPG_NOKEY_OUTPUT, + ), + ), + ], + 'changed': False, + }, + ], + # state absent, key present + [ + { + 'state': 'absent', + 'id': TESTING_KEYID, + '_ansible_check_mode': True, + }, + { + 'id': 'check_mode_state_absent_key_present', + 'run_command.calls': [ + ( + RUN_CMD_LISTKEYS, + {'check_rc': False}, + ( + 0, + GPG_SHOWKEY_OUTPUT, + '', + ), + ), + ], + 'changed': True, + }, + ], + # + # normal operation + # + # state & key present + [ + { + 'state': 'present', + 'id': TESTING_KEYID, + 'data': 'FAKEDATA', + }, + { + 'id': 'state_and_key_present', + 'run_command.calls': [ + ( + RUN_CMD_LISTKEYS, + {'check_rc': False}, + ( + 0, + GPG_SHOWKEY_OUTPUT, + '', + ), + ), + ], + 'changed': False, + }, + ], + # state absent, key present + [ + { + 'state': 'absent', + 'id': TESTING_KEYID, + }, + { + 'id': 'state_absent_key_present', + 'run_command.calls': [ + ( + RUN_CMD_LISTKEYS, + {'check_rc': False}, + ( + 0, + GPG_SHOWKEY_OUTPUT, + '', + ), + ), + ( + [ + MOCK_BIN_PATH, + '--gpgdir', + '/etc/pacman.d/gnupg', + '--delete', + TESTING_KEYID, + ], + {'check_rc': True}, + ( + 0, + PACMAN_KEY_SUCCESS, + '', + ), + ), + ], + 'changed': True, + }, + ], + # state & key absent + [ + { + 'state': 'absent', + 'id': TESTING_KEYID, + }, + { + 'id': 'state_and_key_absent', + 'run_command.calls': [ + ( + RUN_CMD_LISTKEYS, + {'check_rc': False}, + ( + 2, + '', + GPG_NOKEY_OUTPUT, + ), + ), + ], + 'changed': False, + }, + ], + # state: present, key: absent, method: file + [ + { + 'state': 'present', + 'id': TESTING_KEYID, + 'file': TESTING_KEYFILE_PATH, + }, + { + 'id': 'state_present_key_absent_method_file', + 'run_command.calls': [ + ( + RUN_CMD_LISTKEYS, + {'check_rc': False}, + ( + 2, + '', + GPG_NOKEY_OUTPUT, + ), + ), + ( + RUN_CMD_SHOW_KEYFILE, + {'check_rc': True}, + ( + 0, + GPG_SHOWKEY_OUTPUT, + '', + ), + ), + ( + [ + MOCK_BIN_PATH, + '--gpgdir', + '/etc/pacman.d/gnupg', + '--add', + '/tmp/pubkey.asc', + ], + {'check_rc': True}, + ( + 0, + PACMAN_KEY_SUCCESS, + '', + ), + ), + ( + RUN_CMD_LSIGN_KEY, + {'check_rc': True}, + ( + 0, + PACMAN_KEY_SUCCESS, + '', + ), + ), + ], + 'changed': True, + }, + ], + # state: present, key: absent, method: file + # failure: keyid & keyfile don't match + [ + { + 'state': 'present', + 'id': TESTING_KEYID, + 'file': TESTING_KEYFILE_PATH, + }, + { + 'id': 'state_present_key_absent_verify_failed', + 'msg': 'key ID does not match. expected 14F26682D0916CDD81E37B6D61B7B526D98F0353, got 14F26682D0916CDD81E37B6D61B7B526D98F0354', + 'run_command.calls': [ + ( + RUN_CMD_LISTKEYS, + {'check_rc': False}, + ( + 2, + '', + GPG_NOKEY_OUTPUT, + ), + ), + ( + RUN_CMD_SHOW_KEYFILE, + {'check_rc': True}, + ( + 0, + GPG_SHOWKEY_OUTPUT.replace('61B7B526D98F0353', '61B7B526D98F0354'), + '', + ), + ), + ], + 'failed': True, + }, + ], + # state: present, key: absent, method: keyserver + [ + { + 'state': 'present', + 'id': TESTING_KEYID, + 'keyserver': 'pgp.mit.edu', + }, + { + 'id': 'state_present_key_absent_method_keyserver', + 'run_command.calls': [ + ( + RUN_CMD_LISTKEYS, + {'check_rc': False}, + ( + 2, + '', + GPG_NOKEY_OUTPUT, + ), + ), + ( + [ + MOCK_BIN_PATH, + '--gpgdir', + '/etc/pacman.d/gnupg', + '--keyserver', + 'pgp.mit.edu', + '--recv-keys', + TESTING_KEYID, + ], + {'check_rc': True}, + ( + 0, + ''' +gpg: key 0x61B7B526D98F0353: 32 signatures not checked due to missing keys +gpg: key 0x61B7B526D98F0353: public key "Mozilla Software Releases " imported +gpg: marginals needed: 3 completes needed: 1 trust model: pgp +gpg: depth: 0 valid: 1 signed: 0 trust: 0-, 0q, 0n, 0m, 0f, 1u +gpg: Total number processed: 1 +gpg: imported: 1 +''', + '', + ), + ), + ( + RUN_CMD_LSIGN_KEY, + {'check_rc': True}, + ( + 0, + PACMAN_KEY_SUCCESS, + '', + ), + ), + ], + 'changed': True, + }, + ], + # state: present, key: absent, method: data + [ + { + 'state': 'present', + 'id': TESTING_KEYID, + 'data': 'PGP_DATA', + }, + { + 'id': 'state_present_key_absent_method_data', + 'run_command.calls': [ + ( + RUN_CMD_LISTKEYS, + {'check_rc': False}, + ( + 2, + '', + GPG_NOKEY_OUTPUT, + ), + ), + ( + RUN_CMD_SHOW_KEYFILE, + {'check_rc': True}, + ( + 0, + GPG_SHOWKEY_OUTPUT, + '', + ), + ), + ( + [ + MOCK_BIN_PATH, + '--gpgdir', + '/etc/pacman.d/gnupg', + '--add', + '/tmp/pubkey.asc', + ], + {'check_rc': True}, + ( + 0, + PACMAN_KEY_SUCCESS, + '', + ), + ), + ( + RUN_CMD_LSIGN_KEY, + {'check_rc': True}, + ( + 0, + PACMAN_KEY_SUCCESS, + '', + ), + ), + ], + 'save_key_output': TESTING_KEYFILE_PATH, + 'changed': True, + }, + ], +] + + +@pytest.fixture +def patch_get_bin_path(mocker): + get_bin_path = mocker.patch.object( + AnsibleModule, + 'get_bin_path', + return_value=MOCK_BIN_PATH, + ) + + +@pytest.mark.parametrize( + 'patch_ansible_module, expected', + TESTCASES, + ids=[item[1]['id'] for item in TESTCASES], + indirect=['patch_ansible_module'] +) +@pytest.mark.usefixtures('patch_ansible_module') +def test_operation(mocker, capfd, patch_get_bin_path, expected): + # patch run_command invocations with mock data + if 'run_command.calls' in expected: + mock_run_command = mocker.patch.object( + AnsibleModule, + 'run_command', + side_effect=[item[2] for item in expected['run_command.calls']], + ) + + # patch save_key invocations with mock data + if 'save_key_output' in expected: + mock_save_key = mocker.patch.object( + pacman_key.PacmanKey, + 'save_key', + return_value=expected['save_key_output'], + ) + + # invoke module + with pytest.raises(SystemExit): + pacman_key.main() + + # capture std{out,err} + out, err = capfd.readouterr() + results = json.loads(out) + + # assertion time! + if 'msg' in expected: + assert results['msg'] == expected['msg'] + if 'changed' in expected: + assert results['changed'] == expected['changed'] + if 'failed' in expected: + assert results['failed'] == expected['failed'] + + if 'run_command.calls' in expected: + assert AnsibleModule.run_command.call_count == len(expected['run_command.calls']) + call_args_list = [(item[0][0], item[1]) for item in AnsibleModule.run_command.call_args_list] + expected_call_args_list = [(item[0], item[1]) for item in expected['run_command.calls']] + assert call_args_list == expected_call_args_list From a4f46b881ac4596ff32e8581df4c794301dacd6e Mon Sep 17 00:00:00 2001 From: rainerleber <39616583+rainerleber@users.noreply.github.com> Date: Fri, 4 Jun 2021 07:36:35 +0200 Subject: [PATCH 0106/2828] Add module sapcar_extract to make SAP administration easier. (#2596) * add sapcar * integrate test * test integration * Revert "integrate test" This reverts commit 17cbff4f0227e4c27e1e25671d993823559d94bd. * add requiered * change test * change binary * test * add bin bath * change future * change download logic * change logic * sanity * Apply suggestions from code review Co-authored-by: Felix Fontein * add url and error handling * sanity * Apply suggestions from code review Co-authored-by: Andrew Klychkov * Apply suggestions from code review Co-authored-by: Felix Fontein * cleanup and fixes * sanity * add sec library * add description * remove blanks * sanity * Apply suggestions from code review Co-authored-by: Felix Fontein Co-authored-by: Rainer Leber Co-authored-by: Felix Fontein Co-authored-by: Andrew Klychkov --- plugins/modules/files/sapcar_extract.py | 219 ++++++++++++++++++ plugins/modules/sapcar_extract.py | 1 + tests/unit/plugins/modules/files/__init__.py | 0 .../modules/files/test_sapcar_extract.py | 53 +++++ 4 files changed, 273 insertions(+) create mode 100644 plugins/modules/files/sapcar_extract.py create mode 120000 plugins/modules/sapcar_extract.py create mode 100644 tests/unit/plugins/modules/files/__init__.py create mode 100644 tests/unit/plugins/modules/files/test_sapcar_extract.py diff --git a/plugins/modules/files/sapcar_extract.py b/plugins/modules/files/sapcar_extract.py new file mode 100644 index 0000000000..db0f5f9ea8 --- /dev/null +++ b/plugins/modules/files/sapcar_extract.py @@ -0,0 +1,219 @@ +#!/usr/bin/python + +# Copyright: (c) 2021, Rainer Leber +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: sapcar_extract +short_description: Manages SAP SAPCAR archives +version_added: "3.2.0" +description: + - Provides support for unpacking C(sar)/C(car) files with the SAPCAR binary from SAP and pulling + information back into Ansible. +options: + path: + description: The path to the SAR/CAR file. + type: path + required: true + dest: + description: + - The destination where SAPCAR extracts the SAR file. Missing folders will be created. + If this parameter is not provided it will unpack in the same folder as the SAR file. + type: path + binary_path: + description: + - The path to the SAPCAR binary, for example, C(/home/dummy/sapcar) or C(https://myserver/SAPCAR). + If this parameter is not provided the module will look in C(PATH). + type: path + signature: + description: + - If C(true) the signature will be extracted. + default: false + type: bool + security_library: + description: + - The path to the security library, for example, C(/usr/sap/hostctrl/exe/libsapcrytp.so), for signature operations. + type: path + manifest: + description: + - The name of the manifest. + default: "SIGNATURE.SMF" + type: str + remove: + description: + - If C(true) the SAR/CAR file will be removed. B(This should be used with caution!) + default: false + type: bool +author: + - Rainer Leber (@RainerLeber) +notes: + - Always returns C(changed=true) in C(check_mode). +''' + +EXAMPLES = """ +- name: Extract SAR file + community.general.sapcar_extract: + path: "~/source/hana.sar" + +- name: Extract SAR file with destination + community.general.sapcar_extract: + path: "~/source/hana.sar" + dest: "~/test/" + +- name: Extract SAR file with destination and download from webserver can be a fileshare as well + community.general.sapcar_extract: + path: "~/source/hana.sar" + dest: "~/dest/" + binary_path: "https://myserver/SAPCAR" + +- name: Extract SAR file and delete SAR after extract + community.general.sapcar_extract: + path: "~/source/hana.sar" + remove: true + +- name: Extract SAR file with manifest + community.general.sapcar_extract: + path: "~/source/hana.sar" + signature: true + +- name: Extract SAR file with manifest and rename it + community.general.sapcar_extract: + path: "~/source/hana.sar" + manifest: "MyNewSignature.SMF" + signature: true +""" + +import os +from tempfile import NamedTemporaryFile +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import open_url +from ansible.module_utils._text import to_native + + +def get_list_of_files(dir_name): + # create a list of file and directories + # names in the given directory + list_of_file = os.listdir(dir_name) + allFiles = list() + # Iterate over all the entries + for entry in list_of_file: + # Create full path + fullPath = os.path.join(dir_name, entry) + # If entry is a directory then get the list of files in this directory + if os.path.isdir(fullPath): + allFiles = allFiles + [fullPath] + allFiles = allFiles + get_list_of_files(fullPath) + else: + allFiles.append(fullPath) + return allFiles + + +def download_SAPCAR(binary_path, module): + bin_path = None + # download sapcar binary if url is provided otherwise path is returned + if binary_path is not None: + if binary_path.startswith('https://') or binary_path.startswith('http://'): + random_file = NamedTemporaryFile(delete=False) + with open_url(binary_path) as response: + with random_file as out_file: + data = response.read() + out_file.write(data) + os.chmod(out_file.name, 0o700) + bin_path = out_file.name + module.add_cleanup_file(bin_path) + else: + bin_path = binary_path + return bin_path + + +def check_if_present(command, path, dest, signature, manifest, module): + # manipuliating output from SAR file for compare with already extracted files + iter_command = [command, '-tvf', path] + sar_out = module.run_command(iter_command)[1] + sar_raw = sar_out.split("\n")[1:] + if dest[-1] != "/": + dest = dest + "/" + sar_files = [dest + x.split(" ")[-1] for x in sar_raw if x] + # remove any SIGNATURE.SMF from list because it will not unpacked if signature is false + if not signature: + sar_files = [item for item in sar_files if '.SMF' not in item] + # if signature is renamed manipulate files in list of sar file for compare. + if manifest != "SIGNATURE.SMF": + sar_files = [item for item in sar_files if '.SMF' not in item] + sar_files = sar_files + [manifest] + # get extracted files if present + files_extracted = get_list_of_files(dest) + # compare extracted files with files in sar file + present = all(elem in files_extracted for elem in sar_files) + return present + + +def main(): + module = AnsibleModule( + argument_spec=dict( + path=dict(type='path', required=True), + dest=dict(type='path'), + binary_path=dict(type='path'), + signature=dict(type='bool', default=False), + security_library=dict(type='path'), + manifest=dict(type='str', default="SIGNATURE.SMF"), + remove=dict(type='bool', default=False), + ), + supports_check_mode=True, + ) + rc, out, err = [0, "", ""] + params = module.params + check_mode = module.check_mode + + path = params['path'] + dest = params['dest'] + signature = params['signature'] + security_library = params['security_library'] + manifest = params['manifest'] + remove = params['remove'] + + bin_path = download_SAPCAR(params['binary_path'], module) + + if dest is None: + dest_head_tail = os.path.split(path) + dest = dest_head_tail[0] + '/' + else: + if not os.path.exists(dest): + os.makedirs(dest, 0o755) + + if bin_path is not None: + command = [module.get_bin_path(bin_path, required=True)] + else: + try: + command = [module.get_bin_path('sapcar', required=True)] + except Exception as e: + module.fail_json(msg='Failed to find SAPCAR at the expected path or URL "{0}". Please check whether it is available: {1}' + .format(bin_path, to_native(e))) + + present = check_if_present(command[0], path, dest, signature, manifest, module) + + if not present: + command.extend(['-xvf', path, '-R', dest]) + if security_library: + command.extend(['-L', security_library]) + if signature: + command.extend(['-manifest', manifest]) + if not check_mode: + (rc, out, err) = module.run_command(command, check_rc=True) + changed = True + else: + changed = False + out = "allready unpacked" + + if remove: + os.remove(path) + + module.exit_json(changed=changed, message=rc, stdout=out, + stderr=err, command=' '.join(command)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/sapcar_extract.py b/plugins/modules/sapcar_extract.py new file mode 120000 index 0000000000..7bb47b10c1 --- /dev/null +++ b/plugins/modules/sapcar_extract.py @@ -0,0 +1 @@ +./files/sapcar_extract.py \ No newline at end of file diff --git a/tests/unit/plugins/modules/files/__init__.py b/tests/unit/plugins/modules/files/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit/plugins/modules/files/test_sapcar_extract.py b/tests/unit/plugins/modules/files/test_sapcar_extract.py new file mode 100644 index 0000000000..05946e8217 --- /dev/null +++ b/tests/unit/plugins/modules/files/test_sapcar_extract.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Rainer Leber (@rainerleber) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible_collections.community.general.plugins.modules.files import sapcar_extract +from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible.module_utils import basic + + +def get_bin_path(*args, **kwargs): + """Function to return path of SAPCAR""" + return "/tmp/sapcar" + + +class Testsapcar_extract(ModuleTestCase): + """Main class for testing sapcar_extract module.""" + + def setUp(self): + """Setup.""" + super(Testsapcar_extract, self).setUp() + self.module = sapcar_extract + self.mock_get_bin_path = patch.object(basic.AnsibleModule, 'get_bin_path', get_bin_path) + self.mock_get_bin_path.start() + self.addCleanup(self.mock_get_bin_path.stop) # ensure that the patching is 'undone' + + def tearDown(self): + """Teardown.""" + super(Testsapcar_extract, self).tearDown() + + def test_without_required_parameters(self): + """Failure must occurs when all parameters are missing.""" + with self.assertRaises(AnsibleFailJson): + set_module_args({}) + self.module.main() + + def test_sapcar_extract(self): + """Check that result is changed.""" + set_module_args({ + 'path': "/tmp/HANA_CLIENT_REV2_00_053_00_LINUX_X86_64.SAR", + 'dest': "/tmp/test2", + 'binary_path': "/tmp/sapcar" + }) + with patch.object(basic.AnsibleModule, 'run_command') as run_command: + run_command.return_value = 0, '', '' # successful execution, no output + with self.assertRaises(AnsibleExitJson) as result: + sapcar_extract.main() + self.assertTrue(result.exception.args[0]['changed']) + self.assertEqual(run_command.call_count, 1) From 2e8746a8aadc1af2ddc5a9e140851a9c0cf27092 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Fri, 4 Jun 2021 09:53:34 +0200 Subject: [PATCH 0107/2828] Fix spurious test errors. (#2709) --- tests/integration/targets/lookup_random_string/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/targets/lookup_random_string/test.yml b/tests/integration/targets/lookup_random_string/test.yml index 52a572379b..edbf9fd035 100644 --- a/tests/integration/targets/lookup_random_string/test.yml +++ b/tests/integration/targets/lookup_random_string/test.yml @@ -9,7 +9,7 @@ result4: "{{ query('community.general.random_string', length=-1) }}" result5: "{{ query('community.general.random_string', override_special='_', min_special=1) }}" result6: "{{ query('community.general.random_string', upper=false, special=false) }}" # lower case only - result7: "{{ query('community.general.random_string', lower=false) }}" # upper case only + result7: "{{ query('community.general.random_string', lower=false, special=false) }}" # upper case only result8: "{{ query('community.general.random_string', lower=false, upper=false, special=false) }}" # number only result9: "{{ query('community.general.random_string', lower=false, upper=false, special=false, min_numeric=1, length=1) }}" # single digit only result10: "{{ query('community.general.random_string', numbers=false, upper=false, special=false, min_lower=1, length=1) }}" # single lowercase character only From d49783280e9a3ba1df47ed999e3e8ec05b7206d0 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Fri, 4 Jun 2021 10:34:27 +0200 Subject: [PATCH 0108/2828] Add new module/plugin maintainers to BOTMETA. (#2708) --- .github/BOTMETA.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index a3fb8e1f35..a31ce91a4e 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -120,6 +120,8 @@ files: $lookups/nios: maintainers: $team_networking sganesh-infoblox labels: infoblox networking + $lookups/random_string.py: + maintainers: Akasurde $module_utils/: labels: module_utils $module_utils/gitlab.py: @@ -652,6 +654,9 @@ files: maintainers: elasticdog indrajitr tchernomax labels: pacman ignore: elasticdog + $modules/packaging/os/pacman_key.py: + maintainers: grawlinson + labels: pacman $modules/packaging/os/pkgin.py: maintainers: $team_solaris L2G jasperla szinck martinm82 labels: pkgin solaris From 4396ec9631065ad85154f272193e58d289f21876 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Fri, 4 Jun 2021 10:35:35 +0200 Subject: [PATCH 0109/2828] Fix action plugin BOTMETA entries. (#2707) --- .github/BOTMETA.yml | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index a31ce91a4e..74b53db418 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -6,12 +6,9 @@ files: support: community $actions: labels: action - $actions/aireos.py: - labels: aireos cisco networking - $actions/ironware.py: - maintainers: paulquack - labels: ironware networking - $actions/shutdown.py: + $actions/system/iptables_state.py: + maintainers: quidame + $actions/system/shutdown.py: maintainers: nitzmahone samdoran aminvakil $becomes/: labels: become @@ -853,6 +850,8 @@ files: labels: interfaces_file $modules/system/iptables_state.py: maintainers: quidame + $modules/system/shutdown.py: + maintainers: nitzmahone samdoran aminvakil $modules/system/java_cert.py: maintainers: haad absynth76 $modules/system/java_keystore.py: From a343756e6f3a9ed24f1cb3c16a97dfbae2273bf3 Mon Sep 17 00:00:00 2001 From: Alex Willmer Date: Fri, 4 Jun 2021 18:11:46 +0100 Subject: [PATCH 0110/2828] Fix repeated word in description of fs_type (#2717) --- plugins/modules/system/parted.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/modules/system/parted.py b/plugins/modules/system/parted.py index bbb8c1408b..3796cfc40b 100644 --- a/plugins/modules/system/parted.py +++ b/plugins/modules/system/parted.py @@ -100,7 +100,7 @@ options: fs_type: description: - If specified and the partition does not exist, will set filesystem type to given partition. - - Parameter optional, but see notes below about negative negative C(part_start) values. + - Parameter optional, but see notes below about negative C(part_start) values. type: str version_added: '0.2.0' resize: From c49a384a6522dd9d9b80fd7810df9a8b829e5127 Mon Sep 17 00:00:00 2001 From: christophemorio <49184206+christophemorio@users.noreply.github.com> Date: Fri, 4 Jun 2021 19:12:29 +0200 Subject: [PATCH 0111/2828] Terraform: ensure workspace is reset to current value (#2634) * fix: ensure workspace is reset to current value * chore: linter * chore: changelog --- changelogs/fragments/2634-terraform-switch-workspace.yml | 2 ++ plugins/modules/cloud/misc/terraform.py | 9 ++++++++- 2 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2634-terraform-switch-workspace.yml diff --git a/changelogs/fragments/2634-terraform-switch-workspace.yml b/changelogs/fragments/2634-terraform-switch-workspace.yml new file mode 100644 index 0000000000..247447b3a8 --- /dev/null +++ b/changelogs/fragments/2634-terraform-switch-workspace.yml @@ -0,0 +1,2 @@ +bugfixes: + - terraform - ensure the workspace is set back to its previous value when the apply fails (https://github.com/ansible-collections/community.general/pull/2634). diff --git a/plugins/modules/cloud/misc/terraform.py b/plugins/modules/cloud/misc/terraform.py index 8a34f9699b..86521ed264 100644 --- a/plugins/modules/cloud/misc/terraform.py +++ b/plugins/modules/cloud/misc/terraform.py @@ -447,7 +447,14 @@ def main(): command.append(plan_file) if needs_application and not module.check_mode and not state == 'planned': - rc, out, err = module.run_command(command, check_rc=True, cwd=project_path) + rc, out, err = module.run_command(command, check_rc=False, cwd=project_path) + if rc != 0: + if workspace_ctx["current"] != workspace: + select_workspace(command[0], project_path, workspace_ctx["current"]) + module.fail_json(msg=err.rstrip(), rc=rc, stdout=out, + stdout_lines=out.splitlines(), stderr=err, + stderr_lines=err.splitlines(), + cmd=' '.join(command)) # checks out to decide if changes were made during execution if ' 0 added, 0 changed' not in out and not state == "absent" or ' 0 destroyed' not in out: changed = True From 1a4af9bfc34e417d65e0eb81990d0f023a03c606 Mon Sep 17 00:00:00 2001 From: Anton Nikolaev Date: Sat, 5 Jun 2021 05:53:02 -0700 Subject: [PATCH 0112/2828] Reduce stormssh searches based on host (#2568) * Reduce stormssh searches based on host Due to the stormssh searches in the whole config values, we need to reduce the search results based on the full matching of the hosts * Removed whitespaces in the blank line * Added changelog fragment and tests for the fix. * Added newline at the end of the changelog fragment * Added newline at the end of the tests * Fixed bug with name in tests * Changed assertion for the existing host * Update changelogs/fragments/2568-ssh_config-reduce-stormssh-searches-based-on-host.yml Co-authored-by: Felix Fontein * Adjusted tests * New line at the end of the tests Co-authored-by: Anton Nikolaev Co-authored-by: Felix Fontein --- ...reduce-stormssh-searches-based-on-host.yml | 2 ++ plugins/modules/system/ssh_config.py | 2 ++ .../targets/ssh_config/tasks/main.yml | 36 +++++++++++++++++++ 3 files changed, 40 insertions(+) create mode 100644 changelogs/fragments/2568-ssh_config-reduce-stormssh-searches-based-on-host.yml diff --git a/changelogs/fragments/2568-ssh_config-reduce-stormssh-searches-based-on-host.yml b/changelogs/fragments/2568-ssh_config-reduce-stormssh-searches-based-on-host.yml new file mode 100644 index 0000000000..2f3e400e7e --- /dev/null +++ b/changelogs/fragments/2568-ssh_config-reduce-stormssh-searches-based-on-host.yml @@ -0,0 +1,2 @@ +bugfixes: + - ssh_config - reduce stormssh searches based on host (https://github.com/ansible-collections/community.general/pull/2568/). diff --git a/plugins/modules/system/ssh_config.py b/plugins/modules/system/ssh_config.py index 943f6b44fc..be177baaaf 100644 --- a/plugins/modules/system/ssh_config.py +++ b/plugins/modules/system/ssh_config.py @@ -209,6 +209,8 @@ class SSHConfig(): hosts_removed = [] hosts_added = [] + hosts_result = [host for host in hosts_result if host['host'] == self.host] + if hosts_result: for host in hosts_result: if state == 'absent': diff --git a/tests/integration/targets/ssh_config/tasks/main.yml b/tests/integration/targets/ssh_config/tasks/main.yml index 12f277b455..bd5acc9e04 100644 --- a/tests/integration/targets/ssh_config/tasks/main.yml +++ b/tests/integration/targets/ssh_config/tasks/main.yml @@ -183,3 +183,39 @@ that: - not mut_ex.changed - "'parameters are mutually exclusive' in mut_ex.msg" + +- name: Add a full name host + community.general.ssh_config: + ssh_config_file: "{{ ssh_config_test }}" + host: "full_name" + hostname: full_name.com + identity_file: '{{ ssh_private_key }}' + port: '2223' + state: present + register: full_name + +- name: Check if changes are made + assert: + that: + - full_name is changed + - full_name.hosts_added == ["full_name"] + - full_name.hosts_changed == [] + - full_name.hosts_removed == [] + +- name: Add a host with name which is contained in full name host + community.general.ssh_config: + ssh_config_file: "{{ ssh_config_test }}" + host: "full" + hostname: full.com + identity_file: '{{ ssh_private_key }}' + port: '2223' + state: present + register: short_name + +- name: Check that short name host is added and full name host is not updated + assert: + that: + - short_name is changed + - short_name.hosts_added == ["full"] + - short_name.hosts_changed == [] + - short_name.hosts_removed == [] From 0e6d70697c57889c7af66757dd501f38422cf0b8 Mon Sep 17 00:00:00 2001 From: fkuep Date: Sat, 5 Jun 2021 22:38:42 +0200 Subject: [PATCH 0113/2828] Wire token param into consul_api #2124 (#2126) * Wire token param into consul_api #2124 * Update changelogs/fragments/2124-consul_kv-pass-token.yml Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> * #2124 renamed release fragment to match pr, removed parse_params. * putting look back in, do some linting #2124 * try more linting * linting * try overwriting defaults in parse_params with get_option vals, instead of removing that function completely. * Revert "back to start, from 2nd approach: allow keyword arguments via parse_params for compatibility." This reverts commit 748be8e366d46b43cc63b740cb78cde519274342. * Revert " linting" This reverts commit 1d57374c3e539a2cb640bf1482496d80f654b7d8. * Revert " try more linting" This reverts commit 91c8d06e6af442bd130859a64afbf5d558528e74. * Revert "putting look back in, do some linting #2124" This reverts commit 87eeec71803929f08e2dbfc1bfa3c76c79ea55d0. * Revert " #2124 renamed release fragment to match pr, removed parse_params." This reverts commit d2869b2f22ad64d84945ed91145de5b52bff2676. * Revert "Update changelogs/fragments/2124-consul_kv-pass-token.yml" This reverts commit c50b1cf9d4a53fbbfaa8332ba3a7acca33909f09. * Revert "Wire token param into consul_api #2124" This reverts commit b60b6433a8000459b40c4fdcee1da4fe436729a9. * minimal chnages for this PR relative to current upstream. * superfluous newline in changlog fragment. Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> --- changelogs/fragments/2126-consul_kv-pass-token.yml | 4 ++++ plugins/lookup/consul_kv.py | 8 ++++---- 2 files changed, 8 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/2126-consul_kv-pass-token.yml diff --git a/changelogs/fragments/2126-consul_kv-pass-token.yml b/changelogs/fragments/2126-consul_kv-pass-token.yml new file mode 100644 index 0000000000..a60fd2efcd --- /dev/null +++ b/changelogs/fragments/2126-consul_kv-pass-token.yml @@ -0,0 +1,4 @@ +--- +bugfixes: + - consul_kv lookup plugin - allow to set ``recurse``, ``index``, ``datacenter`` and ``token`` as keyword arguments + (https://github.com/ansible-collections/community.general/issues/2124). diff --git a/plugins/lookup/consul_kv.py b/plugins/lookup/consul_kv.py index 7ba7e5ac90..d567b7f687 100644 --- a/plugins/lookup/consul_kv.py +++ b/plugins/lookup/consul_kv.py @@ -171,10 +171,10 @@ class LookupModule(LookupBase): paramvals = { 'key': params[0], - 'token': None, - 'recurse': False, - 'index': None, - 'datacenter': None + 'token': self.get_option('token'), + 'recurse': self.get_option('recurse'), + 'index': self.get_option('index'), + 'datacenter': self.get_option('datacenter') } # parameters specified? From 9d8bea9d36c1896ce7fff26bbed175ad7b96d601 Mon Sep 17 00:00:00 2001 From: The Binary Date: Sun, 6 Jun 2021 02:25:49 +0545 Subject: [PATCH 0114/2828] open_iscsi: allow same target selected portals login and override (#2684) * fix: include portal and port for logged on check * refactor: remove extra space * fix: allow None portal and port on target_loggedon test * add auto_portal_startup argument * fix: change param name for automatic_portal * add changelog fragment * refactor: Update changelogs/fragments/2684-open_iscsi-single-target-multiple-portal-overrides.yml Co-authored-by: Felix Fontein * add version added info to auto_portal_startup arg * add example for auto_portal_startup * fix: remove alias for auto_portal form arg_spec as well * refactor: elaborate in fragment changelogs Elaborate change Co-authored-by: Amin Vakil * open_iscsi: elaborate changelog fragment * Update plugins/modules/system/open_iscsi.py Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein Co-authored-by: Amin Vakil --- ...ingle-target-multiple-portal-overrides.yml | 3 + plugins/modules/system/open_iscsi.py | 67 +++++++++++++++++-- 2 files changed, 64 insertions(+), 6 deletions(-) create mode 100644 changelogs/fragments/2684-open_iscsi-single-target-multiple-portal-overrides.yml diff --git a/changelogs/fragments/2684-open_iscsi-single-target-multiple-portal-overrides.yml b/changelogs/fragments/2684-open_iscsi-single-target-multiple-portal-overrides.yml new file mode 100644 index 0000000000..cb14a08ba0 --- /dev/null +++ b/changelogs/fragments/2684-open_iscsi-single-target-multiple-portal-overrides.yml @@ -0,0 +1,3 @@ +minor_changes: + - open_iscsi - also consider ``portal`` and ``port`` to check if already logged in or not (https://github.com/ansible-collections/community.general/issues/2683). + - open_iscsi - add ``auto_portal_startup`` parameter to allow ``node.startup`` setting per portal (https://github.com/ansible-collections/community.general/issues/2685). diff --git a/plugins/modules/system/open_iscsi.py b/plugins/modules/system/open_iscsi.py index 222bb82f3d..570925f6a4 100644 --- a/plugins/modules/system/open_iscsi.py +++ b/plugins/modules/system/open_iscsi.py @@ -57,6 +57,11 @@ options: - Whether the target node should be automatically connected at startup. type: bool aliases: [ automatic ] + auto_portal_startup: + description: + - Whether the target node portal should be automatically connected at startup. + type: bool + version_added: 3.2.0 discover: description: - Whether the list of target nodes on the portal should be @@ -102,10 +107,18 @@ EXAMPLES = r''' community.general.open_iscsi: login: no target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d + +- name: Override and disable automatic portal login on specific portal + community.general.open_iscsi: + login: false + portal: 10.1.1.250 + auto_portal_startup: false + target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d ''' import glob import os +import re import socket import time @@ -158,12 +171,18 @@ def iscsi_discover(module, portal, port): module.fail_json(cmd=cmd, rc=rc, msg=err) -def target_loggedon(module, target): +def target_loggedon(module, target, portal=None, port=None): cmd = '%s --mode session' % iscsiadm_cmd (rc, out, err) = module.run_command(cmd) + if portal is None: + portal = "" + if port is None: + port = "" + if rc == 0: - return target in out + search_re = "%s:%s.*%s" % (re.escape(portal), port, re.escape(target)) + return re.search(search_re, out) is not None elif rc == 21: return False else: @@ -219,8 +238,14 @@ def target_device_node(module, target): return devdisks -def target_isauto(module, target): +def target_isauto(module, target, portal=None, port=None): cmd = '%s --mode node --targetname %s' % (iscsiadm_cmd, target) + + if portal is not None: + if port is not None: + portal = '%s:%s' % (portal, port) + cmd = '%s --portal %s' % (cmd, portal) + (rc, out, err) = module.run_command(cmd) if rc == 0: @@ -233,16 +258,28 @@ def target_isauto(module, target): module.fail_json(cmd=cmd, rc=rc, msg=err) -def target_setauto(module, target): +def target_setauto(module, target, portal=None, port=None): cmd = '%s --mode node --targetname %s --op=update --name node.startup --value automatic' % (iscsiadm_cmd, target) + + if portal is not None: + if port is not None: + portal = '%s:%s' % (portal, port) + cmd = '%s --portal %s' % (cmd, portal) + (rc, out, err) = module.run_command(cmd) if rc > 0: module.fail_json(cmd=cmd, rc=rc, msg=err) -def target_setmanual(module, target): +def target_setmanual(module, target, portal=None, port=None): cmd = '%s --mode node --targetname %s --op=update --name node.startup --value manual' % (iscsiadm_cmd, target) + + if portal is not None: + if port is not None: + portal = '%s:%s' % (portal, port) + cmd = '%s --portal %s' % (cmd, portal) + (rc, out, err) = module.run_command(cmd) if rc > 0: @@ -265,6 +302,7 @@ def main(): # actions login=dict(type='bool', aliases=['state']), auto_node_startup=dict(type='bool', aliases=['automatic']), + auto_portal_startup=dict(type='bool'), discover=dict(type='bool', default=False), show_nodes=dict(type='bool', default=False), ), @@ -288,6 +326,7 @@ def main(): port = module.params['port'] login = module.params['login'] automatic = module.params['auto_node_startup'] + automatic_portal = module.params['auto_portal_startup'] discover = module.params['discover'] show_nodes = module.params['show_nodes'] @@ -333,7 +372,7 @@ def main(): result['nodes'] = nodes if login is not None: - loggedon = target_loggedon(module, target) + loggedon = target_loggedon(module, target, portal, port) if (login and loggedon) or (not login and not loggedon): result['changed'] |= False if login: @@ -368,6 +407,22 @@ def main(): result['changed'] |= True result['automatic_changed'] = True + if automatic_portal is not None: + isauto = target_isauto(module, target, portal, port) + if (automatic_portal and isauto) or (not automatic_portal and not isauto): + result['changed'] |= False + result['automatic_portal_changed'] = False + elif not check: + if automatic_portal: + target_setauto(module, target, portal, port) + else: + target_setmanual(module, target, portal, port) + result['changed'] |= True + result['automatic_portal_changed'] = True + else: + result['changed'] |= True + result['automatic_portal_changed'] = True + module.exit_json(**result) From 463c576a67acdd101ecc1d181ad184742a22bcaa Mon Sep 17 00:00:00 2001 From: quidame Date: Sun, 6 Jun 2021 08:20:52 +0200 Subject: [PATCH 0115/2828] iptables_state: fix async status call (-> action plugin) (#2711) * fix call to async_status (-> action plugin) * add changelog fragment * Apply suggestions from code review Co-authored-by: Felix Fontein * rename a local variable for readability Co-authored-by: Felix Fontein --- ...-iptables_state-2700-async_status-call.yml | 4 +++ plugins/action/system/iptables_state.py | 28 +++++++++++-------- 2 files changed, 20 insertions(+), 12 deletions(-) create mode 100644 changelogs/fragments/2711-fix-iptables_state-2700-async_status-call.yml diff --git a/changelogs/fragments/2711-fix-iptables_state-2700-async_status-call.yml b/changelogs/fragments/2711-fix-iptables_state-2700-async_status-call.yml new file mode 100644 index 0000000000..8f94cf5178 --- /dev/null +++ b/changelogs/fragments/2711-fix-iptables_state-2700-async_status-call.yml @@ -0,0 +1,4 @@ +--- +bugfixes: + - "iptables_state - call ``async_status`` action plugin rather than its module + (https://github.com/ansible-collections/community.general/issues/2700)." diff --git a/plugins/action/system/iptables_state.py b/plugins/action/system/iptables_state.py index 887f3f47f9..6884e77713 100644 --- a/plugins/action/system/iptables_state.py +++ b/plugins/action/system/iptables_state.py @@ -40,18 +40,26 @@ class ActionModule(ActionBase): "(=%s) to 0, and 'async' (=%s) to a value >2 and not greater than " "'ansible_timeout' (=%s) (recommended).") - def _async_result(self, module_args, task_vars, timeout): + def _async_result(self, async_status_args, task_vars, timeout): ''' Retrieve results of the asynchonous task, and display them in place of the async wrapper results (those with the ansible_job_id key). ''' + async_status = self._task.copy() + async_status.args = async_status_args + async_status.action = 'ansible.builtin.async_status' + async_status.async_val = 0 + async_action = self._shared_loader_obj.action_loader.get( + async_status.action, task=async_status, connection=self._connection, + play_context=self._play_context, loader=self._loader, templar=self._templar, + shared_loader_obj=self._shared_loader_obj) + + if async_status.args['mode'] == 'cleanup': + return async_action.run(task_vars=task_vars) + # At least one iteration is required, even if timeout is 0. for dummy in range(max(1, timeout)): - async_result = self._execute_module( - module_name='ansible.builtin.async_status', - module_args=module_args, - task_vars=task_vars, - wrap_async=False) + async_result = async_action.run(task_vars=task_vars) if async_result.get('finished', 0) == 1: break time.sleep(min(1, timeout)) @@ -106,7 +114,7 @@ class ActionModule(ActionBase): # longer on the controller); and set a backup file path. module_args['_timeout'] = task_async module_args['_back'] = '%s/iptables.state' % async_dir - async_status_args = dict(_async_dir=async_dir) + async_status_args = dict(mode='status') confirm_cmd = 'rm -f %s' % module_args['_back'] starter_cmd = 'touch %s.starter' % module_args['_back'] remaining_time = max(task_async, max_timeout) @@ -168,11 +176,7 @@ class ActionModule(ActionBase): del result['invocation']['module_args'][key] async_status_args['mode'] = 'cleanup' - dummy = self._execute_module( - module_name='ansible.builtin.async_status', - module_args=async_status_args, - task_vars=task_vars, - wrap_async=False) + dummy = self._async_result(async_status_args, task_vars, 0) if not wrap_async: # remove a temporary path we created From f74b83663bcc1f6269a2bb56ba646f24f6218578 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 7 Jun 2021 17:58:26 +1200 Subject: [PATCH 0116/2828] Bugfix + sanity checks for stacki_host (#2681) * fixed validation-modules for plugins/modules/remote_management/stacki/stacki_host.py * sanity fix * added changelog fragment * extra fix to the documentation * Update plugins/modules/remote_management/stacki/stacki_host.py Co-authored-by: Felix Fontein * Update plugins/modules/remote_management/stacki/stacki_host.py Co-authored-by: Felix Fontein * rollback params Co-authored-by: Felix Fontein --- .../fragments/2681-stacki-host-bugfix.yml | 4 + .../remote_management/stacki/stacki_host.py | 84 +++++++++++-------- tests/sanity/ignore-2.10.txt | 3 - tests/sanity/ignore-2.11.txt | 3 - tests/sanity/ignore-2.12.txt | 3 - tests/sanity/ignore-2.9.txt | 3 - 6 files changed, 54 insertions(+), 46 deletions(-) create mode 100644 changelogs/fragments/2681-stacki-host-bugfix.yml diff --git a/changelogs/fragments/2681-stacki-host-bugfix.yml b/changelogs/fragments/2681-stacki-host-bugfix.yml new file mode 100644 index 0000000000..3403bfbfbe --- /dev/null +++ b/changelogs/fragments/2681-stacki-host-bugfix.yml @@ -0,0 +1,4 @@ +bugfixes: + - stacki_host - when adding a new server, ``rack`` and ``rank`` must be passed, and network parameters are optional (https://github.com/ansible-collections/community.general/pull/2681). +minor_changes: + - stacki_host - minor refactoring (https://github.com/ansible-collections/community.general/pull/2681). diff --git a/plugins/modules/remote_management/stacki/stacki_host.py b/plugins/modules/remote_management/stacki/stacki_host.py index 8bdc0f82f6..fda0c5d318 100644 --- a/plugins/modules/remote_management/stacki/stacki_host.py +++ b/plugins/modules/remote_management/stacki/stacki_host.py @@ -12,46 +12,48 @@ DOCUMENTATION = ''' module: stacki_host short_description: Add or remove host to stacki front-end description: - - Use this module to add or remove hosts to a stacki front-end via API. - - U(https://github.com/StackIQ/stacki) + - Use this module to add or remove hosts to a stacki front-end via API. + - Information on stacki can be found at U(https://github.com/StackIQ/stacki). options: name: description: - - Name of the host to be added to Stacki. + - Name of the host to be added to Stacki. required: True type: str stacki_user: description: - - Username for authenticating with Stacki API, but if not - specified, the environment variable C(stacki_user) is used instead. + - Username for authenticating with Stacki API, but if not specified, the environment variable C(stacki_user) is used instead. required: True type: str stacki_password: description: - - Password for authenticating with Stacki API, but if not + - Password for authenticating with Stacki API, but if not specified, the environment variable C(stacki_password) is used instead. required: True type: str stacki_endpoint: description: - - URL for the Stacki API Endpoint. + - URL for the Stacki API Endpoint. required: True type: str prim_intf_mac: description: - - MAC Address for the primary PXE boot network interface. + - MAC Address for the primary PXE boot network interface. + - Currently not used by the module. type: str prim_intf_ip: description: - - IP Address for the primary network interface. + - IP Address for the primary network interface. + - Currently not used by the module. type: str prim_intf: description: - - Name of the primary network interface. + - Name of the primary network interface. + - Currently not used by the module. type: str force_install: description: - - Set value to True to force node into install state if it already exists in stacki. + - Set value to C(true) to force node into install state if it already exists in stacki. type: bool default: no state: @@ -59,6 +61,30 @@ options: - Set value to the desired state for the specified host. type: str choices: [ absent, present ] + default: present + appliance: + description: + - Applicance to be used in host creation. + - Required if I(state) is C(present) and host does not yet exist. + type: str + default: backend + rack: + description: + - Rack to be used in host creation. + - Required if I(state) is C(present) and host does not yet exist. + type: int + rank: + description: + - Rank to be used in host creation. + - In Stacki terminology, the rank is the position of the machine in a rack. + - Required if I(state) is C(present) and host does not yet exist. + type: int + network: + description: + - Network to be configured in the host. + - Currently not used by the module. + type: str + default: private author: - Hugh Ma (@bbyhuy) ''' @@ -128,7 +154,7 @@ class StackiHost(object): 'PASSWORD': module.params['stacki_password']} # Get Initial CSRF - cred_a = self.do_request(self.module, self.endpoint, method="GET") + cred_a = self.do_request(self.endpoint, method="GET") cookie_a = cred_a.headers.get('Set-Cookie').split(';') init_csrftoken = None for c in cookie_a: @@ -145,8 +171,7 @@ class StackiHost(object): login_endpoint = self.endpoint + "/login" # Get Final CSRF and Session ID - login_req = self.do_request(self.module, login_endpoint, headers=header, - payload=urlencode(auth_creds), method='POST') + login_req = self.do_request(login_endpoint, headers=header, payload=urlencode(auth_creds), method='POST') cookie_f = login_req.headers.get('Set-Cookie').split(';') csrftoken = None @@ -163,8 +188,8 @@ class StackiHost(object): 'Content-type': 'application/json', 'Cookie': login_req.headers.get('Set-Cookie')} - def do_request(self, module, url, payload=None, headers=None, method=None): - res, info = fetch_url(module, url, data=payload, headers=headers, method=method) + def do_request(self, url, payload=None, headers=None, method=None): + res, info = fetch_url(self.module, url, data=payload, headers=headers, method=method) if info['status'] != 200: self.module.fail_json(changed=False, msg=info['msg']) @@ -172,24 +197,16 @@ class StackiHost(object): return res def stack_check_host(self): - res = self.do_request(self.module, self.endpoint, payload=json.dumps({"cmd": "list host"}), headers=self.header, method="POST") - - if self.hostname in res.read(): - return True - else: - return False + res = self.do_request(self.endpoint, payload=json.dumps({"cmd": "list host"}), headers=self.header, method="POST") + return self.hostname in res.read() def stack_sync(self): - self.do_request(self.module, self.endpoint, payload=json.dumps({"cmd": "sync config"}), headers=self.header, method="POST") - self.do_request(self.module, self.endpoint, payload=json.dumps({"cmd": "sync host config"}), headers=self.header, method="POST") + self.do_request(self.endpoint, payload=json.dumps({"cmd": "sync config"}), headers=self.header, method="POST") + self.do_request(self.endpoint, payload=json.dumps({"cmd": "sync host config"}), headers=self.header, method="POST") def stack_force_install(self, result): - data = dict() - changed = False - - data['cmd'] = "set host boot {0} action=install" \ - .format(self.hostname) - self.do_request(self.module, self.endpoint, payload=json.dumps(data), headers=self.header, method="POST") + data = {'cmd': "set host boot {0} action=install".format(self.hostname)} + self.do_request(self.endpoint, payload=json.dumps(data), headers=self.header, method="POST") changed = True self.stack_sync() @@ -203,7 +220,7 @@ class StackiHost(object): data['cmd'] = "add host {0} rack={1} rank={2} appliance={3}"\ .format(self.hostname, self.rack, self.rank, self.appliance) - self.do_request(self.module, self.endpoint, payload=json.dumps(data), headers=self.header, method="POST") + self.do_request(self.endpoint, payload=json.dumps(data), headers=self.header, method="POST") self.stack_sync() @@ -215,7 +232,7 @@ class StackiHost(object): data['cmd'] = "remove host {0}"\ .format(self.hostname) - self.do_request(self.module, self.endpoint, payload=json.dumps(data), headers=self.header, method="POST") + self.do_request(self.endpoint, payload=json.dumps(data), headers=self.header, method="POST") self.stack_sync() @@ -258,8 +275,7 @@ def main(): .format(module.params['name']) # Otherwise, state is present, but host doesn't exists, require more params to add host elif module.params['state'] == 'present' and not host_exists: - for param in ['appliance', 'prim_intf', - 'prim_intf_ip', 'network', 'prim_intf_mac']: + for param in ['appliance', 'rack', 'rank', 'prim_intf', 'prim_intf_ip', 'network', 'prim_intf_mac']: if not module.params[param]: missing_params.append(param) if len(missing_params) > 0: # @FIXME replace with required_if diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 16c94a2c09..1855fc963f 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -46,9 +46,6 @@ plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:parameter-type-not-in-doc # missing docs on suboptions plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:undocumented-parameter # missing docs on suboptions plugins/modules/remote_management/manageiq/manageiq_tags.py validate-modules:parameter-state-invalid-choice -plugins/modules/remote_management/stacki/stacki_host.py validate-modules:doc-default-does-not-match-spec -plugins/modules/remote_management/stacki/stacki_host.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/stacki/stacki_host.py validate-modules:undocumented-parameter plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-invalid plugins/modules/system/gconftool2.py validate-modules:parameter-state-invalid-choice plugins/modules/system/iptables_state.py validate-modules:undocumented-parameter diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index db731736c0..4727b8d6df 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -45,9 +45,6 @@ plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:parameter-type-not-in-doc # missing docs on suboptions plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:undocumented-parameter # missing docs on suboptions plugins/modules/remote_management/manageiq/manageiq_tags.py validate-modules:parameter-state-invalid-choice -plugins/modules/remote_management/stacki/stacki_host.py validate-modules:doc-default-does-not-match-spec -plugins/modules/remote_management/stacki/stacki_host.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/stacki/stacki_host.py validate-modules:undocumented-parameter plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-invalid plugins/modules/system/gconftool2.py validate-modules:parameter-state-invalid-choice plugins/modules/system/iptables_state.py validate-modules:undocumented-parameter diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt index de3634ae40..74b1ea16f6 100644 --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -45,9 +45,6 @@ plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:parameter-type-not-in-doc # missing docs on suboptions plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:undocumented-parameter # missing docs on suboptions plugins/modules/remote_management/manageiq/manageiq_tags.py validate-modules:parameter-state-invalid-choice -plugins/modules/remote_management/stacki/stacki_host.py validate-modules:doc-default-does-not-match-spec -plugins/modules/remote_management/stacki/stacki_host.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/stacki/stacki_host.py validate-modules:undocumented-parameter plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-invalid plugins/modules/system/gconftool2.py validate-modules:parameter-state-invalid-choice plugins/modules/system/iptables_state.py validate-modules:undocumented-parameter diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 9cb31a442d..2dac082311 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -36,9 +36,6 @@ plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:doc-missing-type # missing docs on suboptions plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:parameter-type-not-in-doc # missing docs on suboptions plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:undocumented-parameter # missing docs on suboptions -plugins/modules/remote_management/stacki/stacki_host.py validate-modules:doc-default-does-not-match-spec -plugins/modules/remote_management/stacki/stacki_host.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/stacki/stacki_host.py validate-modules:undocumented-parameter plugins/modules/net_tools/nios/nios_a_record.py validate-modules:deprecation-mismatch plugins/modules/net_tools/nios/nios_a_record.py validate-modules:invalid-documentation plugins/modules/net_tools/nios/nios_aaaa_record.py validate-modules:deprecation-mismatch From 6a41fba2f89c4b2f0d63b5b3b34b5b649101dde1 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 7 Jun 2021 23:06:23 +1200 Subject: [PATCH 0117/2828] ModuleHelper - also uses LC_ALL to force language (#2731) * also uses LC_ALL to force language * adjusted test_xfconf and test_cpanm * added changelog fragment * Update changelogs/fragments/2731-mh-cmd-locale.yml Co-authored-by: Felix Fontein * adjusted chglog frag per PR Co-authored-by: Felix Fontein --- changelogs/fragments/2731-mh-cmd-locale.yml | 5 ++++ plugins/module_utils/mh/mixins/cmd.py | 9 ++++-- .../modules/packaging/language/test_cpanm.py | 26 ++++++++-------- .../plugins/modules/system/test_xfconf.py | 30 +++++++++---------- 4 files changed, 39 insertions(+), 31 deletions(-) create mode 100644 changelogs/fragments/2731-mh-cmd-locale.yml diff --git a/changelogs/fragments/2731-mh-cmd-locale.yml b/changelogs/fragments/2731-mh-cmd-locale.yml new file mode 100644 index 0000000000..ea905cce4b --- /dev/null +++ b/changelogs/fragments/2731-mh-cmd-locale.yml @@ -0,0 +1,5 @@ +bugfixes: + - module_helper module utils - ``CmdMixin`` must also use ``LC_ALL`` to enforce locale choice (https://github.com/ansible-collections/community.general/pull/2731). + - xfconf - also use ``LC_ALL`` to enforce locale choice (https://github.com/ansible-collections/community.general/issues/2715). + - cpanm - also use ``LC_ALL`` to enforce locale choice (https://github.com/ansible-collections/community.general/pull/2731). + - snap - also use ``LC_ALL`` to enforce locale choice (https://github.com/ansible-collections/community.general/pull/2731). diff --git a/plugins/module_utils/mh/mixins/cmd.py b/plugins/module_utils/mh/mixins/cmd.py index 724708868e..0367b6173c 100644 --- a/plugins/module_utils/mh/mixins/cmd.py +++ b/plugins/module_utils/mh/mixins/cmd.py @@ -155,13 +155,16 @@ class CmdMixin(object): def run_command(self, extra_params=None, params=None, process_output=None, *args, **kwargs): self.vars.cmd_args = self._calculate_args(extra_params, params) options = dict(self.run_command_fixed_options) - env_update = dict(options.get('environ_update', {})) options['check_rc'] = options.get('check_rc', self.check_rc) + options.update(kwargs) + env_update = dict(options.get('environ_update', {})) if self.force_lang: - env_update.update({'LANGUAGE': self.force_lang}) + env_update.update({ + 'LANGUAGE': self.force_lang, + 'LC_ALL': self.force_lang, + }) self.update_output(force_lang=self.force_lang) options['environ_update'] = env_update - options.update(kwargs) rc, out, err = self.module.run_command(self.vars.cmd_args, *args, **options) self.update_output(rc=rc, stdout=out, stderr=err) if process_output is None: diff --git a/tests/unit/plugins/modules/packaging/language/test_cpanm.py b/tests/unit/plugins/modules/packaging/language/test_cpanm.py index fd52fc1cc9..10a2955019 100644 --- a/tests/unit/plugins/modules/packaging/language/test_cpanm.py +++ b/tests/unit/plugins/modules/packaging/language/test_cpanm.py @@ -38,7 +38,7 @@ TEST_CASES = [ ), ( ['/testbin/cpanm', 'Dancer'], - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, (0, '', '',), # output rc, out, err ), ], @@ -65,7 +65,7 @@ TEST_CASES = [ 'id': 'install_dancer', 'run_command.calls': [( ['/testbin/cpanm', 'Dancer'], - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, (0, '', '',), # output rc, out, err )], 'changed': True, @@ -77,7 +77,7 @@ TEST_CASES = [ 'id': 'install_distribution_file_compatibility', 'run_command.calls': [( ['/testbin/cpanm', 'MIYAGAWA/Plack-0.99_05.tar.gz'], - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, (0, '', '',), # output rc, out, err )], 'changed': True, @@ -89,7 +89,7 @@ TEST_CASES = [ 'id': 'install_distribution_file', 'run_command.calls': [( ['/testbin/cpanm', 'MIYAGAWA/Plack-0.99_05.tar.gz'], - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, (0, '', '',), # output rc, out, err )], 'changed': True, @@ -101,7 +101,7 @@ TEST_CASES = [ 'id': 'install_into_locallib', 'run_command.calls': [( ['/testbin/cpanm', '--local-lib', '/srv/webapps/my_app/extlib', 'Dancer'], - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, (0, '', '',), # output rc, out, err )], 'changed': True, @@ -113,7 +113,7 @@ TEST_CASES = [ 'id': 'install_from_local_directory', 'run_command.calls': [( ['/testbin/cpanm', '/srv/webapps/my_app/src/'], - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, (0, '', '',), # output rc, out, err )], 'changed': True, @@ -125,7 +125,7 @@ TEST_CASES = [ 'id': 'install_into_locallib_no_unit_testing', 'run_command.calls': [( ['/testbin/cpanm', '--notest', '--local-lib', '/srv/webapps/my_app/extlib', 'Dancer'], - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, (0, '', '',), # output rc, out, err )], 'changed': True, @@ -137,7 +137,7 @@ TEST_CASES = [ 'id': 'install_from_mirror', 'run_command.calls': [( ['/testbin/cpanm', '--mirror', 'http://cpan.cpantesters.org/', 'Dancer'], - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, (0, '', '',), # output rc, out, err )], 'changed': True, @@ -158,7 +158,7 @@ TEST_CASES = [ 'id': 'install_minversion_implicit', 'run_command.calls': [( ['/testbin/cpanm', 'Dancer~1.0'], - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, (0, '', '',), # output rc, out, err )], 'changed': True, @@ -170,7 +170,7 @@ TEST_CASES = [ 'id': 'install_minversion_explicit', 'run_command.calls': [( ['/testbin/cpanm', 'Dancer~1.5'], - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, (0, '', '',), # output rc, out, err )], 'changed': True, @@ -182,7 +182,7 @@ TEST_CASES = [ 'id': 'install_specific_version', 'run_command.calls': [( ['/testbin/cpanm', 'Dancer@1.7'], - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, (0, '', '',), # output rc, out, err )], 'changed': True, @@ -215,7 +215,7 @@ TEST_CASES = [ 'id': 'install_specific_version_from_git_url_explicit', 'run_command.calls': [( ['/testbin/cpanm', 'git://github.com/plack/Plack.git@1.7'], - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, (0, '', '',), # output rc, out, err )], 'changed': True, @@ -228,7 +228,7 @@ TEST_CASES = [ 'id': 'install_specific_version_from_git_url_implicit', 'run_command.calls': [( ['/testbin/cpanm', 'git://github.com/plack/Plack.git@2.5'], - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, (0, '', '',), # output rc, out, err )], 'changed': True, diff --git a/tests/unit/plugins/modules/system/test_xfconf.py b/tests/unit/plugins/modules/system/test_xfconf.py index dee387bd7d..d8c9a30a9a 100644 --- a/tests/unit/plugins/modules/system/test_xfconf.py +++ b/tests/unit/plugins/modules/system/test_xfconf.py @@ -49,7 +49,7 @@ TEST_CASES = [ # Calling of following command will be asserted ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/inactive_opacity'], # Was return code checked? - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, # Mock of returned code, stdout and stderr (0, '100\n', '',), ), @@ -69,7 +69,7 @@ TEST_CASES = [ # Calling of following command will be asserted ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/i_dont_exist'], # Was return code checked? - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, # Mock of returned code, stdout and stderr (1, '', 'Property "/general/i_dont_exist" does not exist on channel "xfwm4".\n',), ), @@ -89,7 +89,7 @@ TEST_CASES = [ # Calling of following command will be asserted ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/workspace_names'], # Was return code checked? - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, # Mock of returned code, stdout and stderr (0, 'Value is an array with 3 items:\n\nMain\nWork\nTmp\n', '',), ), @@ -109,7 +109,7 @@ TEST_CASES = [ # Calling of following command will be asserted ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/use_compositing'], # Was return code checked? - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, # Mock of returned code, stdout and stderr (0, 'true', '',), ), @@ -129,7 +129,7 @@ TEST_CASES = [ # Calling of following command will be asserted ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/use_compositing'], # Was return code checked? - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, # Mock of returned code, stdout and stderr (0, 'false', '',), ), @@ -155,7 +155,7 @@ TEST_CASES = [ # Calling of following command will be asserted ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/inactive_opacity'], # Was return code checked? - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, # Mock of returned code, stdout and stderr (0, '100\n', '',), ), @@ -164,7 +164,7 @@ TEST_CASES = [ ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/inactive_opacity', '--create', '--type', 'int', '--set', '90'], # Was return code checked? - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, # Mock of returned code, stdout and stderr (0, '', '',), ), @@ -190,7 +190,7 @@ TEST_CASES = [ # Calling of following command will be asserted ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/inactive_opacity'], # Was return code checked? - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, # Mock of returned code, stdout and stderr (0, '90\n', '',), ), @@ -199,7 +199,7 @@ TEST_CASES = [ ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/inactive_opacity', '--create', '--type', 'int', '--set', '90'], # Was return code checked? - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, # Mock of returned code, stdout and stderr (0, '', '',), ), @@ -225,7 +225,7 @@ TEST_CASES = [ # Calling of following command will be asserted ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/workspace_names'], # Was return code checked? - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, # Mock of returned code, stdout and stderr (0, 'Value is an array with 3 items:\n\nMain\nWork\nTmp\n', '',), ), @@ -235,7 +235,7 @@ TEST_CASES = [ '--create', '--force-array', '--type', 'string', '--set', 'A', '--type', 'string', '--set', 'B', '--type', 'string', '--set', 'C'], # Was return code checked? - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, # Mock of returned code, stdout and stderr (0, '', '',), ), @@ -261,7 +261,7 @@ TEST_CASES = [ # Calling of following command will be asserted ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/workspace_names'], # Was return code checked? - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, # Mock of returned code, stdout and stderr (0, 'Value is an array with 3 items:\n\nA\nB\nC\n', '',), ), @@ -271,7 +271,7 @@ TEST_CASES = [ '--create', '--force-array', '--type', 'string', '--set', 'A', '--type', 'string', '--set', 'B', '--type', 'string', '--set', 'C'], # Was return code checked? - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, # Mock of returned code, stdout and stderr (0, '', '',), ), @@ -295,7 +295,7 @@ TEST_CASES = [ # Calling of following command will be asserted ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/workspace_names'], # Was return code checked? - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, # Mock of returned code, stdout and stderr (0, 'Value is an array with 3 items:\n\nA\nB\nC\n', '',), ), @@ -304,7 +304,7 @@ TEST_CASES = [ ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/workspace_names', '--reset'], # Was return code checked? - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, # Mock of returned code, stdout and stderr (0, '', '',), ), From 1e34df7ca05c729b29dd27d45d1fa68b0bc87640 Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Mon, 7 Jun 2021 17:47:57 +0430 Subject: [PATCH 0118/2828] Add aminvakil to committers (#2739) --- commit-rights.md | 1 + 1 file changed, 1 insertion(+) diff --git a/commit-rights.md b/commit-rights.md index d10bea9af7..7aae8617fb 100644 --- a/commit-rights.md +++ b/commit-rights.md @@ -67,6 +67,7 @@ Individuals who have been asked to become a part of this group have generally be | Name | GitHub ID | IRC Nick | Other | | ------------------- | -------------------- | ------------------ | -------------------- | +| Amin Vakil | aminvakil | aminvakil | | | Andrew Klychkov | andersson007 | andersson007_ | | | Felix Fontein | felixfontein | felixfontein | | | John R Barker | gundalow | gundalow | | From 7c3f2ae4af4d3d16f6c9ef58d5eab499499ee7c9 Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Mon, 7 Jun 2021 18:57:51 +0430 Subject: [PATCH 0119/2828] Remove aminvakil from supershipit section as it is not needed anymore (#2743) --- .github/BOTMETA.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 74b53db418..6727373e85 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1,7 +1,7 @@ automerge: true files: plugins/: - supershipit: aminvakil russoz + supershipit: russoz changelogs/fragments/: support: community $actions: From 4c50f1add7e23b231afe6b80c8536ab4427b4005 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 7 Jun 2021 21:22:21 +0200 Subject: [PATCH 0120/2828] Re-enable flatpak_remote tests (#2747) * Automate test repo creation, re-enable flatpak_remote tests. * Linting. * Another try. --- .../targets/flatpak_remote/aliases | 1 - .../setup_flatpak_remote/create-repo.sh | 51 ++++++++++++++++++ .../setup_flatpak_remote/files/repo.tar.xz | Bin 15496 -> 5524 bytes 3 files changed, 51 insertions(+), 1 deletion(-) create mode 100755 tests/integration/targets/setup_flatpak_remote/create-repo.sh diff --git a/tests/integration/targets/flatpak_remote/aliases b/tests/integration/targets/flatpak_remote/aliases index 3623baa5c2..39291d435b 100644 --- a/tests/integration/targets/flatpak_remote/aliases +++ b/tests/integration/targets/flatpak_remote/aliases @@ -6,4 +6,3 @@ skip/osx skip/macos skip/rhel needs/root -disabled # FIXME diff --git a/tests/integration/targets/setup_flatpak_remote/create-repo.sh b/tests/integration/targets/setup_flatpak_remote/create-repo.sh new file mode 100755 index 0000000000..1b09bb7956 --- /dev/null +++ b/tests/integration/targets/setup_flatpak_remote/create-repo.sh @@ -0,0 +1,51 @@ +#!/usr/bin/env bash +set -eux + +flatpak install -y --system flathub org.freedesktop.Platform//1.6 org.freedesktop.Sdk//1.6 + +echo $'#!/bin/sh\necho hello world' > hello.sh + +export NUM=1 +flatpak build-init appdir$NUM com.dummy.App$NUM org.freedesktop.Sdk org.freedesktop.Platform 1.6; +flatpak build appdir$NUM mkdir /app/bin; +flatpak build appdir$NUM install --mode=750 hello.sh /app/bin; +flatpak build-finish --command=hello.sh appdir$NUM + +flatpak build-export repo appdir$NUM stable + +mkdir -p gpg +chmod 0700 gpg +gpg --homedir gpg --batch --passphrase '' --quick-gen-key test@dummy.com future-default default 10y + +KEY_ID=$(gpg --homedir=gpg --list-keys --with-colons test@dummy.com | grep fpr: | head -1 | cut -d ':' -f 10) + +gpg --homedir=gpg --export "${KEY_ID}" > dummy-repo.gpg + +BASE64_PUBLIC_KEY=$(base64 dummy-repo.gpg | tr -d '\n') + +cat > repo/com.dummy.App1.flatpakref < repo/dummy-repo.flatpakrepo <IAKH+ooF000E$*0e?f03iVu0001VFXf})@BbB4T>v>5N@un}Hi1L#y~)Z- z{84!W3f)Esb7jq5>fDNTXAtbJtFLh#1{z_@XdBbYO#I_6ejX}{k|qPV5*F#tV*(KZ z&8y&v@-_@%O$~^*VQ2&(ntw~=X4{`14ln0V8gAs7B)fP}6FAt~Kb*`}cin_F`PjKM z=gDE4I1`vmQzegt`_V-)uy0DRn8>u3#F{_S@Q?#j3Pf@d^@SuYQthQu*4bx}Wxgr^ zpzQnk7-!^ooWgD}Y<{Z)25({OuU|9(2RU`Q#t(Nms*UWhQUf?8R&lYuSf-byBX0K8 z^I}P z;jsPqKwt(94>NX^omDYN6iLjF8^%Fq74AggLtsy+E4;4W#LVx|OUJ_~rkBr=I9G`S z-`lF>zkaJh8c8uPIv7!1R#=VKn4$v5FVtOAT~e-|DG}P7pu?u zg2?4<^6J^x4!h7r^PhZef;GY0o&cgx`7rh$+7fCPFWMF3s{tZp9=q<`0M;3wj#_1g zy)AF_+45$2ZeN7&sq;gF7CAoR3TC{{A3kUkG`e@pmB_i|#|gx}R%AMZmCD*tQpM$A zmFmu|SZA0^w?;w2^J#7(!b+rZ$z`H1_w}(oKbr6?5#;6I*|C;sURHR-b#HK0GgHwl zWlO4wdEbX1D|5Dcj7=Qurtcf#d@UCH2{rv6(9o2ArZcO^C#+^h0?!I+spQj?8~88n z&E4e>R8^&N7+DMiqM6PD31Wv0giL;`XN??_@^TLV4hsB_WSOq_+c>H2$Dzqflv>9< zh9V#;-2)ZRNP$CY1HLx8Cx$ODMd$i>V~Lai-_=C*H-nB$WV7w4_|_RdZxY=@`)PU9 zGU-R&Fm5PR*l=QSUedJi+q~F%qp&wRE~3tnSu#;tV>1kSB#HCAD|;^tLzOsOObfGC zxl)v7r8Kf`JlpPMyfI81Mv@JKh+8vmq_9c@{#q=-N;#Y-+4)mJ77tr#mrvm(XFmSP zG`G5J9L|fhqonikigA&LzgIIzkD0JRwG!tEUQDZ}LZ|#5Ual6Xaun6M+(HDC&+jiB zxRa*KG2+)q*k=l9wu2EZW|NN;6w^HoI>w-V*1+BB&9amec?NGnkvXq>&?V<0S79xs zE);IWKN6$<=AHej%3g*(e+_xcmwPhy$}qM#E#@F{P)$5H9+(}YyEW0ZG;nQX00+iN zlwO}I{>mXc^=^Y1-~obpGtoH%Sg8VHbMnK%bcKXdHdNlomWIl)pzNw;A8lK?zqCL$ zNtth-_+Epfbrx87EX;Qr527&3oF#4}aE}OWga@2=Ygo`4h3Y^~>!X^knst8lFS%W5 z22|-1!ABSJyfe!9gP2T9@aCFHh_E-a`8#UE-nKN8K4Lw8PgF5GHLGhpl%DwU3GWAP zb)mCUGrJ1spZEsCe>(_RS+yylRnFv0&(HZPqRJiW{T9=KHLS>_%NoQ>AjypKmy>(K zStvuAI*Zm{SABkrITglicDPv7B=GyQ>NkIV*#NS+VqdSyXmh*i{Giej9qux%*V4!v z04}f5;K6g%G(3;;FrbnT{Vlj~6@tey6YDC&(V_B1$QFNs87L<@XGuRxb z-rW@&t@xrgkZjqx%OVI!>_M>rt9_fwcs-qXQM@YqO~2H@>&AFrl^o0=9KMF&+7hz8 zm9Z0~y=WvtQFC@A4HYGmx0;`V`j zTBirBrULceJufTw9S0oQrlQkQqL4U~0>5<_BHQxuNcybgxl+KdM;RrG>B_c1s)bSoOGy?7`dzpWg+m83c_Bfe}_G zs~{p8wWo+@hwfLuI(xH@T?B%SsJ~n@;DL)@45ldz@biq=qp>{HHsq0s`InRO9bUdOPHf1S(2GBdtU}U`lXj+^OaaJ0#?cIX zz`a_1oZoGHeOnV%cz?|}Ux!?f|BWlg`mh%HY&oR|SzSM*%ToXmZz^C-H2~|!G3&`d zCzie=S>qD)_I?rZJ#|kd6V&+zn{H`5&L{S%Bod&aajrBb!)rj#Xs~5!G zp(wBu%!MMS?AkiP;grvxR|8?)kmt33Ks3^UqR*k(HGaA1apho-VQ-xRYL`*9PsVQ* zgU=Tzp-Ab0@PL%xh?tv)VOvoR%C=YDQrO`Fh~-V^Gg1Am90nf<$hXh$Mj&A`=pj>y zBXH6u+B1j{O~Vxn)sMPc8YQ5@K?48f!TheWcJnpl#Kdl+zNEIal0C0Xxx*ay&BGTt zvJv?!ox`mJ2UfORJJr84*kW2)s;aizuM35WS7&vHCw{<)N>KpncOw%vC$e2I!T>HE z;hHcWk1BUPTsSh_eym-mFuSvZaS;o`j@#x|qz6gkPwEso`$pi&OqTtHJNMOyPYOY38xFB-B}fwBm;0_?4Ml@bxI6QfTp>DJr-^F%|W_ z$-Wlx_;e(f1)dO8QR&45;N*QG-c)lB-2)7qR9vsd*c(}Yp@Jf2nV38Y3W=}J^`9Xj z4^|H>om2DJ(AfpN96L<`CEV)e(`iQ>DemWjW z?#hsDws>z8($(!8fOwYzV|Q^~@ixxn#8TOMTj;{`7Ldq?z2;shLNYtuY?zsJ`8tJ>aXJ5j0S z95)Rb9qy4F`v;^O_Y)*9M+qOq=lX`2v?C+SZkuxNydCv@w$`OWJH43g%4!lk& zNEo2_0s;9TWv_KVzRWhB2Ntq-Xo$j9DB{AwI_{ac@FK$0Qjp|iDZC6+DAFbO-q&!W z9JWT6;p*bW#@Cmiceiq%R)QY|FOBs#3++h4b*o)4u*L^zDzolJbHB(k0dE3?6_-O% z7-;9MqjJs?r_{nm`Ve6AxTvebCYENAxyCW){#(NeH}OZa`;g1*+bVLLFzAHB|D-Bn z!&(K|XeU~^-&Y{a#(j+58(5j~ zLjy#sI4fG4xQ^X_kbH@F#sn}1!;X)2ykHi9Jc;`A34k%vE14LeTJF{G!4Ji4?=wR- z)zjB5K8bajhY%D+T|1S3fN#zt-?FPFOh}pCVrr6 zLTI^gc?vM+d%#YhHQbeZxy-NXZF;C!_oRTgHKSK$Q_i#ex>VZ#>|sc?EPYD?LI#1E zcN(_#74PgUZ)dY`{>3M{@8q@Kp?k1FwB^2l;-~(+A!BfIC4r|?y~l9}juyA2H!gXc zTrJt&h767*t{P?16$BU?g@U+X4XU622arWnyi9bWh1h~F4bjM%yWc*SR0g^_L0|Cx zry_hh3|iWe>UOV_PGS-w!U{Qou~SgKg3phkt06Pe&Z(lGvJ0%rrEzd8*_N{U-7w?m zGE%LF={|x#?(XU5_u{|zRAPuBYDdOfd9~rXcTN2OtliPE$`jdGDLDNHbzXBzGh
  • hitUvXK$sVWTuHQ4`ZdY6WW&L~ zYrr|UGRP5;Nk&K=A2@&o4^{3P!YbP$DR$8*fgz^$Sh1*c?`S(CY4dUUb9om`0^=(hr*Ce&t{Yz$ihlP(i21%ghcqYblmS!V)?*#g4GUc%yqbWw{q1EfU96 zdoUIg(;!t*YXS&P+ETGQ7(}uXNHx5qNdp#Hx)B2o6bgEIb#`ISwH203$fl`Qb1*w8`e`gB{_gynce{ znS8HH0?~yJro9En)?SFWZJC*b%Fvhh%OB7QjMOL9w*(-bFv?tLYogb}Jp6=hzY5rX z?7M`G$X;7JH)FIV&1&Brw(oot0Q^U?K2zJyz&+smc9-F6oS63gQp=36sY*D2xWZ>F zg7$w^{|SQy1o?Y9o%?D!Zix2U>s==f$i}meb-24lJQC*VpKu^3`Q#_GFkN(MW!Ybh z;=JF10DQ1ofCk~}{axu`@Q7TW6SP*i3jw6T*A{*zTwokFZi`oMbVj<_)}Xk36vi|x zKnsnf&pCxuvj;dc=59GwcagEJ9;;#uXs%TpKp+N?lF=RGED=EWet5#4$fiA|z_#p* zDDs0^=ISWtn=}DH7UbDn&`)$%x{t08%}FrW$b4G@9?{mEu~b~p4^r>ZL4+O<^Q zmWlBiQw+J1h|j%WUg;L-5wgiJ=nm)iEM>W06V*1yVI7rY^~0x~&!d=3nUl{gI4{MV zz{aoZ3`dP7q7{j+#B8hwedfY5r-n7`N8l4_12d1GeXQ!~GIK_)wTC0@6?g-b`B|lq2bG-zEloEM#*Y&L z>iQ{;vHn+)Q5IkmswrfxFf>0Hcf5c;#lq`U5DK5@QZ$xL_P)KksR+k449)S}RcT`3 zJ55QKuecH|_`LmTb$1nrdBjsKYJ>X?1=~Ldqaj@_k1A{!`qcnM9DxOzRjRZ$Gue$^ zKz)<LSF(d)Q0N`7h)2otJhEf+5ejV9Whz*;U%qDS zt!lPdMwT3{&pgHhh*}cnO$VoLYDLG;dkBFazn;HN^G=pPX|&& z`V(cM@h`Phy4B$m(G{apx$P@({_U4>E8Y>2+M)c?uO87t{wifMT1UloIoWeD)hB^= zFO2o1-z8`^O=PW1lyG$kt+EBo`AjbCs_N9QhRkW7K73AO-T6XVHPpVHeb~JhW0qKr zRkaP0XpQac_8d48VgHO4$-X_c9oM4h=WMmwCp|UYRHi0LK4F8HR3BA*>6ZsKZEFaZ zI7@6sqGB1x`6Wk^m6RgWy?l5DIu#0YJL&LVd*Ek_H_O;4Duo=^hH*OpHLstavsX5} za)|yUF{mUbR2do&rEK2-YWR4}6Mrn_q{3&RJ6g4PG_!pp#H3S$7x^y%nWeZw;Mwxy z2>Wa#Q((X*uxZKtlj?bxGlD>XMSo{^(n(q=)p#hSiN?stSFuvg!%px*vF-_oWE7_& z=^fo<7l_9=OvK$LGdGZO=<4tiGJ}tGYA4JSz{JLVGcm%62fajWhb+`0Ezbw+2uDZ_ z=`6RLzWT#dX3gKoYE$EXm_JK_!mwVwBN=!?H?4++G9*&csKyne*_VwXSq9#E1~s~F zMIi85_ukPN(T+_}kR2-9k#W*;r8n|SyDzP+b{?0njVMSb!wV%(ZkHTv2`vCv>5N@un}Hi1L#v;q3XE)^E@g)ZgXNF&=H-HakA{z?!OsX)+L=0K`5`#q1FO)| z@QO`e$oR3lw&+4zR2Fe{GDco>H`7iqBNOnV7teHb6Cx<1 z>&?~pmcoQ>3F1}g!1ago(bX6G(FrG%P8{pcIOF-xo_l?yTiKNM7as!Z8&PN*zkoZq zRfv_0BOT6xNqO-DT)?XgX3Hr_afav-8UCX=pDeZvpS|y&_CbTSHU3^Mqu{K#a#T6B zv6qsHt`nNV3v_vf;%pW}b!CM?r~+(D)v~F9oL?c;AfhZla*q}_51k=3T8I~`bTg%q zB8Kw8JEF&~hK1yEGLS)MA$U>hBkH(#$p@zHr(tIo4f4p(AUZr!-1T8dJ5)$~AOI4r zW9^M9Qbgeb_ObTL4>f^sT0QmORdL|eGO$f(AFS?hN}lrs!|di&@vdDFno_+LqVea_ zr24t2x`|&>OC*)UIpfkJ%B`zTXWW9(VXi_gpeU}!?f}HsU$nngar$hOm6pVc%&!J1 z9vRkQ8Reqem-8mN(8FcoeDU5o(r?#OxjYBWacRXpj@BCAm0`_+0xhih(lbAY0_>FrNoa!%Df448ZQtUTI1G zXP&^XUT(}}D4Bg&%o!w<3O;RX+K1vM*q>JNIkO#s>fbT0hOL>_+VvLdb@pRvfC~c0?)kd59 zQ73a#H&@Rp|1;IYrLP=M82GE~c3EK9#S)~Djz>cMMeKc_ti8U@N`4(bt+(`0K#$KfJ|F$RC*=F+UB9Zsl z0n2|(7&YcQrLmkoNj`IOMofpTqLLL4=%Az;>ZQ( zYe8(VMEZua0oT@ihvqxv9xyr1zV)G|ky;0D02OPp@;hNt(h2xA?o02Z7rP(F*SH>% zQ4AwTZU^hAa_i{ah31bJ#IXysyY zX@>R#q*el}uUGBnRDsxzPW?fn2*H}VhLGt6bVL!~I;<#U={?)k!3XVQJM2QTJ}>j_ zU%cL^u9hB{=(Nu{sfQ3Vk`+RE6-9m1AgK`Q&;#%X?R_k|IiFKkp2=h9J*e2GiBvbB8?K2D z1614QF6@zGK^exO1Tx?MKzw2)qZU{dbRUsctI>`t6_y;-k#zlzXs^VLmU9O^kCOT` z-djjy<~z1`sgr|vL*Q`}@RL3t6pJeLi1Et0Pt8?(v+gRg~a7DHw^sxW&V8sffy zM$03@D=5FP#{27Qs|dj39B1$2C!}G$BW^pqpqE<}xDax$wJ>%Wti4kahv0}u(G=Z5 z^AEH}i*F0OMCq7Q@M$xVocH%0m@ zlQA0~IcF16`Rz33DTpycuj+>*Z8!%dL&H207h ziRTl7xfSN?D!?6fGiDIJ`-}M{Z^=QF0hwtxuJbswYImvEb`^O>0`Ln(TJW#Szp>#T z3@RwV6K|SdpsdGYcJF&`E6<2$%UVqPmA<^&dF!$(@R=b-ZsH&VkBs^5Bn1_rmmf2Y zGE|JT>%^v~DwTc_v!MMm5r@b&8sc-fpE^nQssvZ$+NL_QK&d0XQ;^>Lc1$gki???{YOC z0+m_yqN54p5G3sQ=@5DU^K>FH7^x!+5m;DUzVyj`LDv*Me_97$P8k>! z_fI2iVgXe?5!Jt=c9*>zG0s~cU)BP+mc*D7@jX~u)tzlC3||n)E!*&Ez8rob2Te*+ zMUhZ2*}+Clxnwl`bj6z5q*=5^pIa;9dG{Yp%uLG)>~utH!dI5NFq#o5*|5n-%w38{<5cU$J;*F_3vz*s{x%%g zh{gF|46k^2Gu1!qdfT>lCrhhYk%Qn{rc>MQBp}SL*5iw0yeBwKKM1ffNYh~*l5Svl z#+4J76h@8_k|X75=IL31->W}0-v!th*Jb7wE;~B{r?N=7dwesU1p6|*^>Rr&0b7c^ zs8Dt}1edPcwzfkL>9*Q(mc)r_f39f#-xqn%F$iHaHJle3df?^cj!;4;!Z|(Z)PHN! zSXOMsvjI+t5|f?w99Xi{`N+4$t4ikr*2d5JvAmQB1+TNRhFXH|b_vdf233{!jb;sx zBD4x3>pWRQ&5LtkXMUDmk7EG@HDxJ42d1IB{In~H4l5}I&ssd`l7!Q9I4$dc znM}Q*I%DQ``Ipnj_Y^h!|7{|iAyy=5VI(4yL+_ldy)pnbZO5GXxlqdTeV-|!?mKBM z0f+AK*G$x!h4eipvk-r4gsl|F*vHsEiu&N0i6wtCJl@ik{Dh=N3FfNSA2 zGg4mmpM)X$DDQ>%gEY4gB+B&YG)sK#d4hS*av}5!ApJ;H7C6V|cuSs_rOgWCaV=t~ z#22B9k(a93vaA@QmD~A@#|MaPNWS30<=>Yn$(hzu=kF$qvNm?NO399rdp&anluXtO z0oSFyYz!ynhAWIFV{z+sFaB$6RGj)xJ0>$S6=A9}ND&-9--(;xbL~XD|I^G05Nssn z0QG)?#18%gRglqJL8E_3djb~Ky$3&*t_o#MXBu2`m^IQ_v7?bN14_Pe;@Lp+I-E;- z;~nMJIDSFn91WL4@-Gd6hAk}W2J&tZ>Qys-uN-U!%E8j&fN7>ynONqb>~4V4BdZ5* z%@5HDs*E~b(m0p zD#0A`ulh&`IY$E?eit<9_{-hKV-`gWEbs0@E33^#NTLugR9>mQxxFM_7^)J%$^R6o z?_M7A9D&i(c*+u-oH*as}OxEpu}Re90Jm~Ik4JF<*b`- zVlV*e;$-7?))>>%=0Q@5h7peKoa^z2rz!5h=D2K2X2%@*EypQByNl*b6o}4hFo9N9 z1(hR5dqsE^*oO<8Yt^iwucNLIVL=m_$Y>1{r-XM9lt%iE^hi4OTa(@`&8+BXnA|D~ zhB6J%@*=84I+t3&jh7o!|`Q&T0@`EfAdckB_L(B8u-yB}BsoHlL5GKDH z&It#hf9aAai%sp`w<#P zINaytsen+?N^`hHyw7d3giPZ#%%9<^_J{ho7PO zM?(FZ+_embuWRqUpcFzkYpf?CI2g~pmoWhBUNH)f8~+5&KUKM0t9fhh!ng{yRpnKS z)Nz)bH6VnJnaF&#BZ9@_8CK?dcgM~#54aTIV<%C!D7{}JbERouYhYR3iX-oj9j@Z%y+HBZ3$Y;*~Df0Yw8Aa6YQKkWmFgLv^1C|SD z9*CR^4RDxiL)u1&Q9p0kfgbzwJmZjbnRfzowtMOUzR6ry?KbJSYf2NbVL!GWtD5>x~R<VX`x9O(VS|DT;|;T-J-4VbSzW$d4%2+tblhJV>%q&@7ndFS%9kf7l^8L;Hxt3iiv}O^%^Mb}N8svo19=&%%hy*Scb+mZhrZNPRR|1Z!%=)a8TZo5M$l+$OIk>DNg5@T8(>jL-a& zRWsg<&V6B1arj+9$M|xX4Ac>ThMWVT>NAqjcs7QTBoP{5`A{TCEh2$+F4_?_@`NjxeEoWj36_{ z7)7U%pqrDd!)7cTTSbY9+z!WW=Lp-Gk3XnlIH6odYS0H5ynLZIlW*>DID?TkavR)x zfomB9Qk^my_G1bZ_eND*R1Jy}OOL-?m^1y&c1{bB%eE=9(>B1f6j~lrhTN119bAQc z*PS!l@AHTsM|;Gq^t9if5}2$cms|_KQCJNtMHHx=@smY0R$cs!rhT3bt8$b+Vz`4M zSmvX2cKl|j2a(@t{?+FHW?HU^OVO1H-Z$FM+ZX%>=XeX4h!t8@7czO&DQ$4sr`dy1bGh!c4Mh4?OcHbf?syRI4Ppw~&EH|{ z)L)gyiI{8OuGd0RRFBo{z7=w$9R;hjY)H;Mk^Y!!>Z=Ze{DZGNmy+_dLv4MUm5OTe z7)Md_54xnp?e%GEll{izT4EM#op(^O%njvp?=?M(Bi4sjpt&w3ZBxy=28GU!y;GKlUZIIwz zJ;6CSrhzKA2CfEd*Ft;jDM#%GIwgEoZXI=c*xo@^{^WtmKwg985;Bm7#kTw=ZG4r*?9S6n>^PHuU{?<1+pHq=jq#O&TcSVmdJ5vyG(7V5 zi$?tNiVn`M8lF2mn~pl+Elyl}>z=0fc7T|I@5{l64n_(W2B~~Irt%n`#F?|7{*y*+ z^Tgh8j-bcK4Z8@hcdnC?mF0r_w92T6KR zokNjEwz=)NVfqg%unEx(%Tc;Yl=Ofi*5QW9D9qU_6q_P^&Ilh06hhtLZKTKSYw@SP zu`K>&se;@?$nnG_z#HA8~>H(n`s47H;26MS^_Yw6@e!gt&VP!FJQe9K7gS zmKP6Z^(jpotq!kI;#wXHgpRdtVF< zaix0OMBhHsrg9cxwSy~KdAlSpJ^^~8>c`(X?cg;1L+;2Yk`aT07RnytdaOO(S7)mdxKAu(L9H_)Ma|#|5Rsf8mdt)z` z=txb^Oo{BPs-Vv+>MS1e$}%0M zoYF3N;~EWaak@txO$PqrnXD1frwP821m+Z*faKMqK>rI#^pd z=2RVDC(iVqPJq;(xF=jm!T@6T$(n&`yw@t zQwA-skbI56J;@Ec0wrle`ChI$e34)QcDPXW4tp#3eJ~uZ|JqkWK3{|Wxe*NRkIJ|i z-K!BceOoix%Hcn1kUTOL+v*c{INnNhkEg`XKJoCR&PWS?UeU2(pU-oq!6Fck+LN8)x;2} zK*g@NE1D)$SH^uj_}8%%@}{`FCIMmCA0h1%CF6e_??=UxhO+&pY6_0msRfF}QV(xA z)X2mzy!j^%h=+Gy8SrKJt&k9#GLhEYd$f)_p%3|RQ^83~*9!Jyo7?T+;*a4k;_^P9 z_rdQcu&M~2tD$uM>(P6EGlA zYQDF}xaw4@zdjXR6n~4>lGxXr=jgFm9q0jpsrGSlOK}p{G4q5QLm2JMnf0=K<>~tc zbTY_%<+HNA!_43b)aI^T^Fg9X1krde3Lf(oW#Kyq+v{$r%tZr;X>f}URnUE( zOm9G~9>KBkQG@JVV0fClr%wiBNX6-L2a#s1kh4mz(T^e%rOuXTi))yBHhFOPF~eb@ zs%kt}OTwxnQdHeTK75RrvC&mMU7>7d4!GZ)28ou(6)FbymfiSGEQoNO(Ff7qRPsAY z_v5b!UQDG$qLra!>+D=NMk@<+y=ceh+!VFMulvk04OY|RIl5`W`^Y0RgJ@VX0i>sRFR=pKe- zdg$v5PnXNux99@B34^w3V9Frhc%zv|2&6LksL*WeUN?Om2-G}St~9A4h%-$Ybr(On z6k3?jxF9W5JuOd;!dxArme&F&0kJJcLUR;jtVYhnl~Sm*9&J4Bpy<{C^d5!On^@BX zmLo7$#M$_3v!6phsm$`pe`1)J{=_0t`U3hF+9j@sHH|FD;YC-A83IarOh zPCz@FfjaP?&JdnfyX>%8o?;b4C7y-lE*u%!`y?4$Mg?nt7PRuy+9nRSs`pu=H6&FN z^^JT@J2ls$8*7k&td0oVtA-04v_PA*cRxL|uUVQ>A`5a+I2%j*d}5Za*+{OC9SPO`X_pJ>N8ZWct7vq}I>>CY`U|Uke2RcwkmhHUCQSsTdP# zvINmvtaxmP{8ONPiEkoDtvB`sQHY_HUNi_16)--TnKFG~t$|UABtM(0W4WRB69yv2 zrEg!zNfLG|tJH^S0A+|lg>UIG`=tKx`Hmg9X+y5d?(FbkC)=TUH z*si4x2w?f|1xzXqX^rg#hfu=%w37Nq@`wI?p~GxVYgk`}9JA6;TVFJSKPj2NANt>s zXQf4rSOqrdUsyq%U!R$CzQqN~`E<2Ruey-??(Dux1mw!a$^=dcSms7RFkr4~qK5q( zEELR+tUvbVxxL68=Q9h7yCCqYU63%UJJp8w8QBa0TM(vVkz{0SP0~45cY6uC8U|FU zm{(!!;8HT}6jd7d)JaQIjjD3L(0W!6hCT`G$3IO@^>-gC$nmh-WaPgxaSefP%v7(T zk(!L-YlONJ`R#7EwChKIWgQYJ+$wsqSWLU-%H5yViyGF;A zypPobv0+&($Wv=C;j$}2uQ`qK%a-EdsY7lWB3)FFo=_m~5>YYwxKh>!kTJH=6t<*y zQnlYTd7%*}>>WM3?LP>&x32pNteIP86QHv=2R>Ha^ZrdjgQa5{Jdqksgx+uez1bvCq~xV^#& zPnSuI`tfh4+kua!++u?5X-=mDUmL0 zY};2K$O&WeC5$G>A}D-QGv3f3oS++uXVlGGI9+Nu?d9eCXcfyy9BWytT*kdz3wfBIzEF>Ip4lho zlzjX?2)2zhephxrn82>esM-D25H)_h%%y(&W|YP~`>9S~gKt2$r<`$pgx)!`oUA~` zryMFD+Uu>ORffTW;Ah|#Db9}DZ>3t$JyKboSjUj}8~^2on!SEvj>rSIFd?$bt>zDP zeX1f%=HS9xvU%Tlu0<eVdz3{liS zn8le9xs!cRx|1EJX3bLGp}Y4?*!`9ErL= z#%D1|dtfuAx6E$nrvtM6)kIc#sXmnnN?Qd=1XX*r*aD#b2{04)4O$fmR4&3=$^k50 zS|_oK}~3Lhzb-^|_TG>;i3i zw#-w1Avf&Jd9GM1MtSy)`_!-aSGPE27hi@PX|_-(V`5L^`cglM{OsCGKTinEXRAv< z#ef98ReAd9RpwSUGpHXCdmz9a*rk?7$!GMCfK{esQ0^Dmd0*0YAhR+=EgsGT2`skT zbEeB6q>e=CFLK7WnhdDpIWhJq(tgDH{Nu!ZIQUa9~+8W1fNeWDl;pJEz zVRD3X(MUrp2^j6pfOss4@b&D&3IKAZCTB-XL z+n0N?ppOQ{uV+>n3am3;NA~B#RQkI&aMGT;s4b@+_-R@1F+(8iC(bl>LVY4+xho=u^AAxbxlUzNI za3dblAmnBdjh=`?)a4eHEM7YBK3tO41^6$mShn`UH(S#rj!>YyNl z4O&+Bgy~IBW`fbF4Py$|IfVxa}_kAvH#2kmk|ZBsI3Fn1LL|M zGr~9EA>eCJFq#3xtKV@#Dd%F-LDb)EzDrJOf>v2k<;DQxfZD8Xk;izi^91u5njzh! zM+C449$8v^eL-pDP+5Yau#fd>kD$na{WbC$b&Ksv`B0CA2qCS6wVCt3+}#@H_J7g_;X^8ggqQV?uwY z?!vx~v(DxE09vXAqOou_LuYf-cP6bH7-t1QUDJPrzii~jKJakfb|Iyj$T8k~!Mea^)G>Am*rK0$TH3`AZ4cY3`l*DP#^}0p&TG0FFj-QodOA^FRePlyHs#9 z6}jy8!~4eun16|-b7>(b1k);v=RU#o5_kUVn=u1S#F6Mr7jX*%tjmu7+*@gvbJYbgw+twD~?<@_Xu43rN0SH`dKZ7s&9$9GryQIK?REUdMD92B;h6JJl+Zv6Y`>iA?l6liH zn2Qj9ED0DfA+yO25FicFEu>c$An@LWCzLP2Gd;Bis`W;l$W``!_DQ>FVi=g`mK(($ zx@~ADf5hj6b!QcMf8}3mi9ac4MZ3;LlQfYd<~@M_Pt*Vw;oxJwXW62p%XTa)?)rFk zad77g5h|=nfWd$h9KBU?_>l%k%ebdKmJs3$?>LA>Mi8Z#{-kN*e4x^F{Kn2j3t$a< zevNkjt!}m;L#%BJsX8I+3y}nfLV`pxNRF-)h0lKs5GUvOA?lylO%V;$mb6 z1pdt-E#{j#RxLlmIG9Z#WiO^}^KkbLo3JB7(WQACux2y<(GP3JO0aaeK(7c0tk0Ey z!e;Gs^pt(Fgiiv~4euikh%K2K4vTdXKvAr#a!kLpcxVr8I^e*k>wg4wSFYEb5)!g|1QSt30y-}AFB+-AW12f@S0oU4Q=N>BI3Wp+REKWNxaWC#@|>5(OiRM$YJ z+5X+bE8cS@(24P*-U<_6T;?usBxZ;WrFkVk2 zUR^=W21!OszOd^GR51(nFL@9|vLJgJ-pk%|40Rn`H(LX7GSV|IQrqrN_xbAsg-@Zl z2+-tUiHCNG)w#*vtF#Qs?E)hxOi?xAovpCR|USuysIS1>igYxGG zQIz7u(Xt9y^0KhiiL6g1%#$BvuweNgKV?yS5{njxYYgfk57ZdEkLPFjJDL}T4EH+& za{Hl`+6P=F7ZFawI|spOZj9T=WD*_Md-9Q# z_BZ{1#aZ-R4qvwb)VXMPRT#1{!(C~HzgI+WiS)u|{kq40RB2UZ%ZnDQ4Kl<<0ks}o3>B1?pr=sju zg8~b8anYIol87Rh8G_V&zPHpM3|rhE zbevddhxmi{uF8IO`bflW)lMT!ZzvW@8-lAzh)bMU_w+JYEE6jUQH20znRs?2A;N?) zz~Y>9Q)_@6KtYT>GDdn2QfD`rA!VLKr463EXjB!D-}^b6y&t}8{!knko~Y7icCwt3 zSdsnk@v`56$D|eZzOx|5i6Uy89a2MSf4eJFC9wV#rGJkI2iG6Vub zRg)>$Gx)3#0iMSu`AaB|RP%E2+#o{oyU@R}EnjpVaNmEN$>c^3u4X>jGLk20m)WNa zoB(g;C2O3jlEy*vqU6>3GINj4&8xPIrt8V<$v=IVp0tv+?Q}-t{iF46>mD5EA6z8P8-$(Vf&DwWers=`4Ly<0xEDUnV?cD45E#4Yt*V5kpxL) zW+k|J!PZ99Ww;01(R;n{MqbiTpb-1a5d^DFo)lbqI?lEm-H#Mt?L|^sy<5RvUFnCn zI^9yU~`A z!C{&IYk@oj2Z?_b`yc_5a3kQT--(l-EH~sqNM9NiriyCIBPxn3$=MZIV!P6LBT)J7 zuE!c0VAb9_iquO-w!lwoW3e)kUg1IjIWeaLxm{pm!a&Qq!gDuKg}%VQS}oS&^s$_+ zWhJDx>PThuE$LyLx8mQ*{TsL1o4569O;+H15w9xdAhsOa6q{plu}MY4U#;+e%^ef9 za7NCH%G*h_1Jo^d$ri7tq%7#+ZnZEl0f2q3kPs}9e0u2=iT`Sq11++KDuh6`QtLav)MTv-s;ADAkIa;;Jv0MPJOy@CAjS z;j5-TMqbgaojyu4l!!r5nL_^aE3C?Ow2 zgF8s6zs$}bm(u?b!iuwj_#PHFuuDuU6fJbTRHZJ);)~ivr=Z==xax$cWr?MWjjMC2 z=p$D5BvJbg6|5xX`G$UsT5NQs1Z@Lhwuno_SMDZtAAX6; z462%$#v=)j*u~^Ne~*>CwwR~Sh?rt(HVk6z)BM?QKBSuP<%`oc3y{y(T&$eRB>`qW z7QFD>GTW7okA8D)Fw}uD*{IV=eWvCK1efE zlP5cFN7y$WGV$pj)lzI{a@V~ZT<`}Ni9KD?dF}SzB;*O=iJt+`D_vO{=w$aRHtGk$ zoeeYiEC}XMAV$8uh)EmlB)#V9RypW0uJdGEwlPLC3B9Xj0=$B)dM6l4ebmFD^X-lM zBkvA0!=kB{rLe2b3@!Lwy74Dn(?aip(i)?w0U~j9QSe2yg;{PT1{nkmv|BmWi%)U`QamteRv=9(IaNbei~uZ*R$MZ1B}r z^BGWkduxU}k(|7*#4^QT8Pn@;o{Av;=dwK5O_=g9#9My6z9UD41@*`X#l8ITw%Soe z$lZgSV9ZDFsZ}kSqaelMnI< z@F}HoP{69v(?(Yyc#hT(di154uTj#~k_)8vgN@pxR@2Qh77mylz4>vkvDdIHNbijc zgZxL~R=JTv2|lzdEWp@a53Ys^#c&bfb`}}02S$?ndv7q{>hZnYgaMS z$NxERzV^&-d*kMYH=vo=BqUeO9E{|JgR>1TteV9V#e>(WnCmf~Mz}Ff_^#N4dZn0l zkAoBul)*~nQAns7IinCyBaSkEV%RIeF(`MMe<>TcN_7%=!it|D;8IIy38_nH?#L;M ztG^w$9YqeP(;1Q&L$ucXg2$s|!I=@DhV}P9JxuBhpy2_Np`@2bD5GEX)VX%F%G=k&@V(#e0^FEtpd7gx3}6CQS%6^SSFeB}bK#&< zxhffwFQ9=}f2#6|sl|*LBv;>Ly{jMzZeeln@s5L>&s?o=@>|^zGL-NmxR#mDW^S>w z2@lx5$RZ9agL|9FoA0a$3!Fu&x!w+1SOC?=*pbFAhBppZsytTW#WPy6NJ|c6`bfcE zURH*>4>jvKNXl^x3^HDCoXySzAY*ucZ=+D<@vN`n)Y zA;6{4p?+=h05#Qq+RgzX)vH2ZZnFPV5!-=71{l4vlkr~_!a&FQu_KE(Y$?Z#jMw^9Yx}xoDoJ#IjtJt}6Kt$m2_t>9 zejNxZs2Q;dIrJ-y+NVrHkY+5PrN(Bnl!{mZ2DPsI)$_mpc(i%U`0GfB0iUaungn($ z0*Sly7hY`AwMN5W27}}&cRGjVdSp!7^^wl5auMIyDpr00^F8IVcCH(Q$Gb0rA$=(@ z|KXAXS8C!_;%*;Fq0J*2#hldLPyA&V0NRH}cH@O;>3(Bz@yJyp-?{X6m~^q$VYoH? zsuD+J`p(&PA{(+7Gk3bRvKp~UYh#$deJcL4Pt_=^NXE0j;1w~D5iG1tkISO*(s(0I zj|?zX1oOjNFp2oN@Vi#^6R#)s_s3Wx1$@eadQ&BI=4|Es25_~MNB~{!{<0|5|GYaV zDZ|NYlcj9xdfD`|L;<7c?CKYbNxbHB3!q`%)+q!*el&NeK#WEn(P0C;HXcTfEF}as zwhFBji7z4rgSfKPmf>INuf+whMu%o~W{DSejj36&2cuJ>-;n_wwtTG!=tu$$TiL;> zv^V-Bj~1;@t72AFj(eb;_CD-PvKmTN2rRD}G_W)A;TbK9v&U97=a8SGlz#%Ka1j5V z!y@dktdDSxxQW$%ld(A&fiPP`33gMXsovDZLBo=#aQrZqf_t@N)_GJ=Ys^Khh-K7R z&+luI=q58mn)j!w#Mq4$Fr9scxVT7yeRWn!AISNSAq~i z4g4T|oFK)Dv@?!nx1bcmUU<{#7>G?}bX0=ag*1|XyzmW$kC?W25c|V{>OtI~l+vVo zgRt>6t8$krCp_pXve_W4!aomrr{4b7svzfH5ec#>Vgu&RC`}GhaVdKbo|edVmTi}* zWBjdcw?nBE=4VvD)LPWYDRK+@O=~N0A$@1poib0-U#mQtnG6D>!YlD)CNS|Ar5<>S z8g7y)y(PR7cyM@rlYtp4R)Og-Kpt0qia1eR?(%v|cw}eP)b=GOn+c*G!kR8FZHZY~ zVF?!)PrybPU*4rWb^ahS)4;XJz2Hhf?sKc?`W?Z{DwN|_4-o;nfE_Vdz&zgj_CFVP zeI2~^kgioiws9!p$s!UGB*Jka0J_%jhte!q{Qx{wk`GB@-;(Kn`Js+~2#RI&#hBJ4 zX-Ex%y1Ghx?I!$sWtmu29zh2AL{tq$O;OpNQ8)aKgr-0`(jpvtx&tw*x*#F_wv9jQ z`Bz%D;A-c{IS??GaTR20j7J{VKXE!#0000}Y7lFp&~bbK0pfUos0aYD^^l{n#Ao{g K000001X)^a`4u++ From 94a53adff1da71b253536e843bbe20b6d7a16c76 Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Tue, 8 Jun 2021 10:53:32 +0430 Subject: [PATCH 0121/2828] zypper_repository: fix idempotency on adding repo with releasever and basearch variables (#2722) * zypper_repository: Check idempotency on adding repo with releasever * Name required when adding non-repo files. * Initial try to fix releasever * Replace re.sub with .replace * name releaseverrepo releaseverrepo * Change to ansible_distribution_version for removing repo * improve asserts format * add changelog * Fix changelog formatting Co-authored-by: Felix Fontein * improve command used for retrieving releasever variable Co-authored-by: Felix Fontein * add basearch replace * Add basearch to changelog fragment * Check for releasever and basearch only when they are there Co-authored-by: Felix Fontein --- ...potency_on_adding_repo_with_releasever.yml | 5 ++ .../modules/packaging/os/zypper_repository.py | 14 ++++- .../tasks/zypper_repository.yml | 58 +++++++++++++++++++ 3 files changed, 75 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/2722-zypper_repository-fix_idempotency_on_adding_repo_with_releasever.yml diff --git a/changelogs/fragments/2722-zypper_repository-fix_idempotency_on_adding_repo_with_releasever.yml b/changelogs/fragments/2722-zypper_repository-fix_idempotency_on_adding_repo_with_releasever.yml new file mode 100644 index 0000000000..faada2e9bf --- /dev/null +++ b/changelogs/fragments/2722-zypper_repository-fix_idempotency_on_adding_repo_with_releasever.yml @@ -0,0 +1,5 @@ +--- +bugfixes: + - zypper_repository - fix idempotency on adding repository with + ``$releasever`` and ``$basearch`` variables + (https://github.com/ansible-collections/community.general/issues/1985). diff --git a/plugins/modules/packaging/os/zypper_repository.py b/plugins/modules/packaging/os/zypper_repository.py index f1d85376f5..608675528d 100644 --- a/plugins/modules/packaging/os/zypper_repository.py +++ b/plugins/modules/packaging/os/zypper_repository.py @@ -175,7 +175,7 @@ def _parse_repos(module): module.fail_json(msg='Failed to execute "%s"' % " ".join(cmd), rc=rc, stdout=stdout, stderr=stderr) -def _repo_changes(realrepo, repocmp): +def _repo_changes(module, realrepo, repocmp): "Check whether the 2 given repos have different settings." for k in repocmp: if repocmp[k] and k not in realrepo: @@ -186,6 +186,16 @@ def _repo_changes(realrepo, repocmp): valold = str(repocmp[k] or "") valnew = v or "" if k == "url": + if '$releasever' in valold or '$releasever' in valnew: + cmd = ['rpm', '-q', '--qf', '%{version}', '-f', '/etc/os-release'] + rc, stdout, stderr = module.run_command(cmd, check_rc=True) + valnew = valnew.replace('$releasever', stdout) + valold = valold.replace('$releasever', stdout) + if '$basearch' in valold or '$basearch' in valnew: + cmd = ['rpm', '-q', '--qf', '%{arch}', '-f', '/etc/os-release'] + rc, stdout, stderr = module.run_command(cmd, check_rc=True) + valnew = valnew.replace('$basearch', stdout) + valold = valold.replace('$basearch', stdout) valold, valnew = valold.rstrip("/"), valnew.rstrip("/") if valold != valnew: return True @@ -215,7 +225,7 @@ def repo_exists(module, repodata, overwrite_multiple): return (False, False, None) elif len(repos) == 1: # Found an existing repo, look for changes - has_changes = _repo_changes(repos[0], repodata) + has_changes = _repo_changes(module, repos[0], repodata) return (True, has_changes, repos) elif len(repos) >= 2: if overwrite_multiple: diff --git a/tests/integration/targets/zypper_repository/tasks/zypper_repository.yml b/tests/integration/targets/zypper_repository/tasks/zypper_repository.yml index 0290fa4da2..4490ddca7d 100644 --- a/tests/integration/targets/zypper_repository/tasks/zypper_repository.yml +++ b/tests/integration/targets/zypper_repository/tasks/zypper_repository.yml @@ -125,3 +125,61 @@ priority: 100 auto_import_keys: true state: "present" + +- name: add a repo by releasever + community.general.zypper_repository: + name: releaseverrepo + repo: http://download.opensuse.org/repositories/devel:/languages:/ruby/openSUSE_Leap_$releasever/ + state: present + register: add_repo + +- name: add a repo by releasever again + community.general.zypper_repository: + name: releaseverrepo + repo: http://download.opensuse.org/repositories/devel:/languages:/ruby/openSUSE_Leap_$releasever/ + state: present + register: add_repo_again + +- assert: + that: + - add_repo is changed + - add_repo_again is not changed + +- name: remove added repo + community.general.zypper_repository: + repo: http://download.opensuse.org/repositories/devel:/languages:/ruby/openSUSE_Leap_{{ ansible_distribution_version }}/ + state: absent + register: remove_repo + +- assert: + that: + - remove_repo is changed + +- name: add a repo by basearch + community.general.zypper_repository: + name: basearchrepo + repo: https://packagecloud.io/netdata/netdata/opensuse/13.2/$basearch + state: present + register: add_repo + +- name: add a repo by basearch again + community.general.zypper_repository: + name: basearchrepo + repo: https://packagecloud.io/netdata/netdata/opensuse/13.2/$basearch + state: present + register: add_repo_again + +- assert: + that: + - add_repo is changed + - add_repo_again is not changed + +- name: remove added repo + community.general.zypper_repository: + repo: https://packagecloud.io/netdata/netdata/opensuse/13.2/x86_64 + state: absent + register: remove_repo + +- assert: + that: + - remove_repo is changed From bb37b67166a8c80efca92e608f397e4cd820eb5e Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 8 Jun 2021 08:46:20 +0200 Subject: [PATCH 0122/2828] flatpak: add tests in CI, add no_dependencies parameter (#2751) * Similar version restrictions than flatpak_remote tests. * ... * Try to work around missing dependencies. * Revert "Try to work around missing dependencies." This reverts commit 66a4e385668d0212e1150dcfb743478cf5aa042e. * Add changelog. * App8 -> App2; make sure that there are two apps App1 and App2. * Fix forgotten variabe. * Remove test notices. * Seems like flatpak no longer supports file:// URLs. The tests would need to be rewritten to offer the URL via http:// instead. * Try local HTTP server for URL tests. * ... * Lint, add status check. * Add boilerplate. * Add 'ps aux'. * Surrender to -f. * Work around apparent flatpak bug. * Fix YAML. * Improve condition. * Make sure test reruns behave better. --- .../2751-flatpak-no_dependencies.yml | 2 + plugins/modules/packaging/os/flatpak.py | 55 ++++++------- .../modules/packaging/os/flatpak_remote.py | 21 ----- tests/integration/targets/flatpak/aliases | 3 +- .../targets/flatpak/files/serve.py | 65 +++++++++++++++ .../integration/targets/flatpak/meta/main.yml | 1 + .../targets/flatpak/tasks/check_mode.yml | 39 +++++---- .../targets/flatpak/tasks/main.yml | 21 ++++- .../targets/flatpak/tasks/setup.yml | 44 +++++++--- .../targets/flatpak/tasks/test.yml | 76 ++++++++++++------ .../setup_flatpak_remote/create-repo.sh | 68 +++++++++------- .../setup_flatpak_remote/files/repo.tar.xz | Bin 5524 -> 6436 bytes 12 files changed, 255 insertions(+), 140 deletions(-) create mode 100644 changelogs/fragments/2751-flatpak-no_dependencies.yml create mode 100644 tests/integration/targets/flatpak/files/serve.py diff --git a/changelogs/fragments/2751-flatpak-no_dependencies.yml b/changelogs/fragments/2751-flatpak-no_dependencies.yml new file mode 100644 index 0000000000..a07ead96da --- /dev/null +++ b/changelogs/fragments/2751-flatpak-no_dependencies.yml @@ -0,0 +1,2 @@ +minor_changes: +- "flatpak - add ``no_dependencies`` parameter (https://github.com/ansible/ansible/pull/55452, https://github.com/ansible-collections/community.general/pull/2751)." diff --git a/plugins/modules/packaging/os/flatpak.py b/plugins/modules/packaging/os/flatpak.py index 1be1a72243..4a9e214fde 100644 --- a/plugins/modules/packaging/os/flatpak.py +++ b/plugins/modules/packaging/os/flatpak.py @@ -6,27 +6,6 @@ # Copyright: (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# ATTENTION CONTRIBUTORS! -# -# TL;DR: Run this module's integration tests manually before opening a pull request -# -# Long explanation: -# The integration tests for this module are currently NOT run on the Ansible project's continuous -# delivery pipeline. So please: When you make changes to this module, make sure that you run the -# included integration tests manually for both Python 2 and Python 3: -# -# Python 2: -# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 2.7 flatpak -# Python 3: -# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 3.6 flatpak -# -# Because of external dependencies, the current integration tests are somewhat too slow and brittle -# to be included right now. I have plans to rewrite the integration tests based on a local flatpak -# repository so that they can be included into the normal CI pipeline. -# //oolongbrothers - - from __future__ import (absolute_import, division, print_function) __metaclass__ = type @@ -60,18 +39,28 @@ options: name: description: - The name of the flatpak to manage. - - When used with I(state=present), I(name) can be specified as an C(http(s)) URL to a + - When used with I(state=present), I(name) can be specified as a URL to a C(flatpakref) file or the unique reverse DNS name that identifies a flatpak. + - Both C(https://) and C(http://) URLs are supported. - When supplying a reverse DNS name, you can use the I(remote) option to specify on what remote to look for the flatpak. An example for a reverse DNS name is C(org.gnome.gedit). - When used with I(state=absent), it is recommended to specify the name in the reverse DNS format. - - When supplying an C(http(s)) URL with I(state=absent), the module will try to match the + - When supplying a URL with I(state=absent), the module will try to match the installed flatpak based on the name of the flatpakref to remove it. However, there is no guarantee that the names of the flatpakref file and the reverse DNS name of the installed flatpak do match. type: str required: true + no_dependencies: + description: + - If installing runtime dependencies should be omitted or not + - This parameter is primarily implemented for integration testing this module. + There might however be some use cases where you would want to have this, like when you are + packaging your own flatpaks. + type: bool + default: false + version_added: 3.2.0 remote: description: - The flatpak remote (repository) to install the flatpak from. @@ -94,10 +83,11 @@ EXAMPLES = r''' name: https://s3.amazonaws.com/alexlarsson/spotify-repo/spotify.flatpakref state: present -- name: Install the gedit flatpak package +- name: Install the gedit flatpak package without dependencies (not recommended) community.general.flatpak: name: https://git.gnome.org/browse/gnome-apps-nightly/plain/gedit.flatpakref state: present + no_dependencies: true - name: Install the gedit package from flathub for current user community.general.flatpak: @@ -153,18 +143,21 @@ from ansible.module_utils.basic import AnsibleModule OUTDATED_FLATPAK_VERSION_ERROR_MESSAGE = "Unknown option --columns=application" -def install_flat(module, binary, remote, name, method): +def install_flat(module, binary, remote, name, method, no_dependencies): """Add a new flatpak.""" global result flatpak_version = _flatpak_version(module, binary) + command = [binary, "install", "--{0}".format(method)] if StrictVersion(flatpak_version) < StrictVersion('1.1.3'): - noninteractive_arg = "-y" + command += ["-y"] else: - noninteractive_arg = "--noninteractive" + command += ["--noninteractive"] + if no_dependencies: + command += ["--no-deps"] if name.startswith('http://') or name.startswith('https://'): - command = [binary, "install", "--{0}".format(method), noninteractive_arg, name] + command += [name] else: - command = [binary, "install", "--{0}".format(method), noninteractive_arg, remote, name] + command += [remote, name] _flatpak_command(module, module.check_mode, command) result['changed'] = True @@ -279,6 +272,7 @@ def main(): choices=['user', 'system']), state=dict(type='str', default='present', choices=['absent', 'present']), + no_dependencies=dict(type='bool', default=False), executable=dict(type='path', default='flatpak') ), supports_check_mode=True, @@ -287,6 +281,7 @@ def main(): name = module.params['name'] state = module.params['state'] remote = module.params['remote'] + no_dependencies = module.params['no_dependencies'] method = module.params['method'] executable = module.params['executable'] binary = module.get_bin_path(executable, None) @@ -301,7 +296,7 @@ def main(): module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result) if state == 'present' and not flatpak_exists(module, binary, name, method): - install_flat(module, binary, remote, name, method) + install_flat(module, binary, remote, name, method, no_dependencies) elif state == 'absent' and flatpak_exists(module, binary, name, method): uninstall_flat(module, binary, name, method) diff --git a/plugins/modules/packaging/os/flatpak_remote.py b/plugins/modules/packaging/os/flatpak_remote.py index dbb211c2fb..a7767621d7 100644 --- a/plugins/modules/packaging/os/flatpak_remote.py +++ b/plugins/modules/packaging/os/flatpak_remote.py @@ -6,27 +6,6 @@ # Copyright: (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# ATTENTION CONTRIBUTORS! -# -# TL;DR: Run this module's integration tests manually before opening a pull request -# -# Long explanation: -# The integration tests for this module are currently NOT run on the Ansible project's continuous -# delivery pipeline. So please: When you make changes to this module, make sure that you run the -# included integration tests manually for both Python 2 and Python 3: -# -# Python 2: -# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 2.7 flatpak_remote -# Python 3: -# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 3.6 flatpak_remote -# -# Because of external dependencies, the current integration tests are somewhat too slow and brittle -# to be included right now. I have plans to rewrite the integration tests based on a local flatpak -# repository so that they can be included into the normal CI pipeline. -# //oolongbrothers - - from __future__ import (absolute_import, division, print_function) __metaclass__ = type diff --git a/tests/integration/targets/flatpak/aliases b/tests/integration/targets/flatpak/aliases index 59e306f8b4..39291d435b 100644 --- a/tests/integration/targets/flatpak/aliases +++ b/tests/integration/targets/flatpak/aliases @@ -1,4 +1,4 @@ -unsupported +shippable/posix/group3 destructive skip/aix skip/freebsd @@ -6,4 +6,3 @@ skip/osx skip/macos skip/rhel needs/root -needs/privileged diff --git a/tests/integration/targets/flatpak/files/serve.py b/tests/integration/targets/flatpak/files/serve.py new file mode 100644 index 0000000000..d9ca2d17a5 --- /dev/null +++ b/tests/integration/targets/flatpak/files/serve.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import posixpath +import sys + +try: + from http.server import SimpleHTTPRequestHandler, HTTPServer + from urllib.parse import unquote +except ImportError: + from SimpleHTTPServer import SimpleHTTPRequestHandler + from BaseHTTPServer import HTTPServer + from urllib import unquote + + +# Argument parsing +if len(sys.argv) != 4: + print('Syntax: {0} '.format(sys.argv[0])) + sys.exit(-1) + +HOST, PORT, PATH = sys.argv[1:4] +PORT = int(PORT) + + +# The HTTP request handler +class Handler(SimpleHTTPRequestHandler): + def translate_path(self, path): + # Modified from Python 3.6's version of SimpleHTTPRequestHandler + # to support using another base directory than CWD. + + # abandon query parameters + path = path.split('?', 1)[0] + path = path.split('#', 1)[0] + # Don't forget explicit trailing slash when normalizing. Issue17324 + trailing_slash = path.rstrip().endswith('/') + try: + path = unquote(path, errors='surrogatepass') + except (UnicodeDecodeError, TypeError) as exc: + path = unquote(path) + path = posixpath.normpath(path) + words = path.split('/') + words = filter(None, words) + path = PATH + for word in words: + if os.path.dirname(word) or word in (os.curdir, os.pardir): + # Ignore components that are not a simple file/directory name + continue + path = os.path.join(path, word) + if trailing_slash: + path += '/' + return path + + +# Run simple HTTP server +httpd = HTTPServer((HOST, PORT), Handler) + +try: + httpd.serve_forever() +except KeyboardInterrupt: + pass + +httpd.server_close() diff --git a/tests/integration/targets/flatpak/meta/main.yml b/tests/integration/targets/flatpak/meta/main.yml index 07faa21776..314f77eba9 100644 --- a/tests/integration/targets/flatpak/meta/main.yml +++ b/tests/integration/targets/flatpak/meta/main.yml @@ -1,2 +1,3 @@ dependencies: - prepare_tests + - setup_flatpak_remote diff --git a/tests/integration/targets/flatpak/tasks/check_mode.yml b/tests/integration/targets/flatpak/tasks/check_mode.yml index 3186fd2830..2270e0a9be 100644 --- a/tests/integration/targets/flatpak/tasks/check_mode.yml +++ b/tests/integration/targets/flatpak/tasks/check_mode.yml @@ -4,8 +4,8 @@ - name: Test addition of absent flatpak (check mode) flatpak: - name: org.gnome.Characters - remote: flathub + name: com.dummy.App1 + remote: dummy-remote state: present register: addition_result check_mode: true @@ -18,8 +18,8 @@ - name: Test non-existent idempotency of addition of absent flatpak (check mode) flatpak: - name: org.gnome.Characters - remote: flathub + name: com.dummy.App1 + remote: dummy-remote state: present register: double_addition_result check_mode: true @@ -36,7 +36,7 @@ - name: Test removal of absent flatpak check mode flatpak: - name: org.gnome.Characters + name: com.dummy.App1 state: absent register: removal_result check_mode: true @@ -51,8 +51,8 @@ - name: Test addition of absent flatpak with url (check mode) flatpak: - name: https://flathub.org/repo/appstream/org.gnome.Characters.flatpakref - remote: flathub + name: http://127.0.0.1:8000/repo/com.dummy.App1.flatpakref + remote: dummy-remote state: present register: url_addition_result check_mode: true @@ -65,8 +65,8 @@ - name: Test non-existent idempotency of addition of absent flatpak with url (check mode) flatpak: - name: https://flathub.org/repo/appstream/org.gnome.Characters.flatpakref - remote: flathub + name: http://127.0.0.1:8000/repo/com.dummy.App1.flatpakref + remote: dummy-remote state: present register: double_url_addition_result check_mode: true @@ -85,7 +85,7 @@ - name: Test removal of absent flatpak with url not doing anything (check mode) flatpak: - name: https://flathub.org/repo/appstream/org.gnome.Characters.flatpakref + name: http://127.0.0.1:8000/repo/com.dummy.App1.flatpakref state: absent register: url_removal_result check_mode: true @@ -96,15 +96,14 @@ - url_removal_result is not changed msg: "Removing an absent flatpak shall mark module execution as not changed" - # - Tests with present flatpak ------------------------------------------------- # state=present on present flatpak - name: Test addition of present flatpak (check mode) flatpak: - name: org.gnome.Calculator - remote: flathub + name: com.dummy.App2 + remote: dummy-remote state: present register: addition_present_result check_mode: true @@ -119,7 +118,7 @@ - name: Test removal of present flatpak (check mode) flatpak: - name: org.gnome.Calculator + name: com.dummy.App2 state: absent register: removal_present_result check_mode: true @@ -132,7 +131,7 @@ - name: Test non-existent idempotency of removal (check mode) flatpak: - name: org.gnome.Calculator + name: com.dummy.App2 state: absent register: double_removal_present_result check_mode: true @@ -149,8 +148,8 @@ - name: Test addition with url of present flatpak (check mode) flatpak: - name: https://flathub.org/repo/appstream/org.gnome.Calculator.flatpakref - remote: flathub + name: http://127.0.0.1:8000/repo/com.dummy.App2.flatpakref + remote: dummy-remote state: present register: url_addition_present_result check_mode: true @@ -165,7 +164,7 @@ - name: Test removal with url of present flatpak (check mode) flatpak: - name: https://flathub.org/repo/appstream/org.gnome.Calculator.flatpakref + name: http://127.0.0.1:8000/repo/com.dummy.App2.flatpakref state: absent register: url_removal_present_result check_mode: true @@ -178,8 +177,8 @@ - name: Test non-existent idempotency of removal with url of present flatpak (check mode) flatpak: - name: https://flathub.org/repo/appstream/org.gnome.Calculator.flatpakref - remote: flathub + name: http://127.0.0.1:8000/repo/com.dummy.App2.flatpakref + remote: dummy-remote state: absent register: double_url_removal_present_result check_mode: true diff --git a/tests/integration/targets/flatpak/tasks/main.yml b/tests/integration/targets/flatpak/tasks/main.yml index a1d1bda8a4..68d41d2efe 100644 --- a/tests/integration/targets/flatpak/tasks/main.yml +++ b/tests/integration/targets/flatpak/tasks/main.yml @@ -30,8 +30,8 @@ - name: Test executable override flatpak: - name: org.gnome.Characters - remote: flathub + name: com.dummy.App1 + remote: dummy-remote state: present executable: nothing-that-exists ignore_errors: true @@ -57,5 +57,20 @@ vars: method: system + always: + + - name: Check HTTP server status + async_status: + jid: "{{ webserver_status.ansible_job_id }}" + ignore_errors: true + + - name: List processes + command: ps aux + + - name: Stop HTTP server + command: >- + pkill -f -- '{{ remote_tmp_dir }}/serve.py' + when: | - ansible_distribution in ('Fedora', 'Ubuntu') + ansible_distribution == 'Fedora' or + ansible_distribution == 'Ubuntu' and not ansible_distribution_major_version | int < 16 diff --git a/tests/integration/targets/flatpak/tasks/setup.yml b/tests/integration/targets/flatpak/tasks/setup.yml index 2dfa33a0b1..98b07cd480 100644 --- a/tests/integration/targets/flatpak/tasks/setup.yml +++ b/tests/integration/targets/flatpak/tasks/setup.yml @@ -4,32 +4,58 @@ state: present become: true when: ansible_distribution == 'Fedora' + - block: - name: Activate flatpak ppa on Ubuntu apt_repository: repo: ppa:alexlarsson/flatpak state: present mode: '0644' + when: ansible_lsb.major_release | int < 18 + - name: Install flatpak package on Ubuntu apt: name: flatpak state: present - become: true + when: ansible_distribution == 'Ubuntu' -- name: Enable flathub for user + +- name: Install dummy remote for user flatpak_remote: - name: flathub + name: dummy-remote state: present - flatpakrepo_url: https://dl.flathub.org/repo/flathub.flatpakrepo + flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo method: user -- name: Enable flathub for system + +- name: Install dummy remote for system flatpak_remote: - name: flathub + name: dummy-remote state: present - flatpakrepo_url: https://dl.flathub.org/repo/flathub.flatpakrepo + flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo method: system + +- name: Remove (if necessary) flatpak for testing check mode on absent flatpak + flatpak: + name: com.dummy.App1 + remote: dummy-remote + state: absent + no_dependencies: true + - name: Add flatpak for testing check mode on present flatpak flatpak: - name: org.gnome.Calculator - remote: flathub + name: com.dummy.App2 + remote: dummy-remote state: present + no_dependencies: true + +- name: Copy HTTP server + copy: + src: serve.py + dest: '{{ remote_tmp_dir }}/serve.py' + mode: '0755' + +- name: Start HTTP server + command: '{{ remote_tmp_dir }}/serve.py 127.0.0.1 8000 /tmp/flatpak/' + async: 120 + poll: 0 + register: webserver_status diff --git a/tests/integration/targets/flatpak/tasks/test.yml b/tests/integration/targets/flatpak/tasks/test.yml index 1e7d888bb5..7442e4b468 100644 --- a/tests/integration/targets/flatpak/tasks/test.yml +++ b/tests/integration/targets/flatpak/tasks/test.yml @@ -2,10 +2,11 @@ - name: Test addition - {{ method }} flatpak: - name: org.gnome.Characters - remote: flathub + name: com.dummy.App1 + remote: dummy-remote state: present method: "{{ method }}" + no_dependencies: true register: addition_result - name: Verify addition test result - {{ method }} @@ -16,10 +17,11 @@ - name: Test idempotency of addition - {{ method }} flatpak: - name: org.gnome.Characters - remote: flathub + name: com.dummy.App1 + remote: dummy-remote state: present method: "{{ method }}" + no_dependencies: true register: double_addition_result - name: Verify idempotency of addition test result - {{ method }} @@ -32,9 +34,10 @@ - name: Test removal - {{ method }} flatpak: - name: org.gnome.Characters + name: com.dummy.App1 state: absent method: "{{ method }}" + no_dependencies: true register: removal_result - name: Verify removal test result - {{ method }} @@ -45,9 +48,10 @@ - name: Test idempotency of removal - {{ method }} flatpak: - name: org.gnome.Characters + name: com.dummy.App1 state: absent method: "{{ method }}" + no_dependencies: true register: double_removal_result - name: Verify idempotency of removal test result - {{ method }} @@ -60,10 +64,11 @@ - name: Test addition with url - {{ method }} flatpak: - name: https://flathub.org/repo/appstream/org.gnome.Characters.flatpakref - remote: flathub + name: http://127.0.0.1:8000/repo/com.dummy.App1.flatpakref + remote: dummy-remote state: present method: "{{ method }}" + no_dependencies: true register: url_addition_result - name: Verify addition test result - {{ method }} @@ -74,10 +79,11 @@ - name: Test idempotency of addition with url - {{ method }} flatpak: - name: https://flathub.org/repo/appstream/org.gnome.Characters.flatpakref - remote: flathub + name: http://127.0.0.1:8000/repo/com.dummy.App1.flatpakref + remote: dummy-remote state: present method: "{{ method }}" + no_dependencies: true register: double_url_addition_result - name: Verify idempotency of addition with url test result - {{ method }} @@ -90,26 +96,46 @@ - name: Test removal with url - {{ method }} flatpak: - name: https://flathub.org/repo/appstream/org.gnome.Characters.flatpakref + name: http://127.0.0.1:8000/repo/com.dummy.App1.flatpakref state: absent method: "{{ method }}" + no_dependencies: true register: url_removal_result + ignore_errors: true -- name: Verify removal test result - {{ method }} +- name: Verify removal test result failed - {{ method }} + # It looks like flatpak has a bug when the hostname contains a port. If this is the case, it emits + # the following message, which we check for. If another error happens, we fail. + # Upstream issue: https://github.com/flatpak/flatpak/issues/4307 + # (The second message happens with Ubuntu 18.04.) assert: that: - - url_removal_result is changed - msg: "state=absent with url as name shall remove flatpak when present" + - >- + url_removal_result.msg in [ + "error: Invalid branch 127.0.0.1:8000: Branch can't contain :", + "error: Invalid id http:: Name can't contain :", + ] + when: url_removal_result is failed -- name: Test idempotency of removal with url - {{ method }} - flatpak: - name: https://flathub.org/repo/appstream/org.gnome.Characters.flatpakref - state: absent - method: "{{ method }}" - register: double_url_removal_result +- when: url_removal_result is not failed + block: -- name: Verify idempotency of removal with url test result - {{ method }} - assert: - that: - - double_url_removal_result is not changed - msg: "state=absent with url as name shall not do anything when flatpak is not present" + - name: Verify removal test result - {{ method }} + assert: + that: + - url_removal_result is changed + msg: "state=absent with url as name shall remove flatpak when present" + + - name: Test idempotency of removal with url - {{ method }} + flatpak: + name: http://127.0.0.1:8000/repo/com.dummy.App1.flatpakref + state: absent + method: "{{ method }}" + no_dependencies: true + register: double_url_removal_result + + - name: Verify idempotency of removal with url test result - {{ method }} + assert: + that: + - double_url_removal_result is not changed + msg: "state=absent with url as name shall not do anything when flatpak is not present" diff --git a/tests/integration/targets/setup_flatpak_remote/create-repo.sh b/tests/integration/targets/setup_flatpak_remote/create-repo.sh index 1b09bb7956..4ece76ccfc 100755 --- a/tests/integration/targets/setup_flatpak_remote/create-repo.sh +++ b/tests/integration/targets/setup_flatpak_remote/create-repo.sh @@ -1,51 +1,59 @@ #!/usr/bin/env bash set -eux -flatpak install -y --system flathub org.freedesktop.Platform//1.6 org.freedesktop.Sdk//1.6 - -echo $'#!/bin/sh\necho hello world' > hello.sh - -export NUM=1 -flatpak build-init appdir$NUM com.dummy.App$NUM org.freedesktop.Sdk org.freedesktop.Platform 1.6; -flatpak build appdir$NUM mkdir /app/bin; -flatpak build appdir$NUM install --mode=750 hello.sh /app/bin; -flatpak build-finish --command=hello.sh appdir$NUM - -flatpak build-export repo appdir$NUM stable +# Delete traces from last run +rm -rf appdir* dummy-repo.gpg gpg hello.sh repo +# Create GPG key mkdir -p gpg chmod 0700 gpg gpg --homedir gpg --batch --passphrase '' --quick-gen-key test@dummy.com future-default default 10y - KEY_ID=$(gpg --homedir=gpg --list-keys --with-colons test@dummy.com | grep fpr: | head -1 | cut -d ':' -f 10) - gpg --homedir=gpg --export "${KEY_ID}" > dummy-repo.gpg - BASE64_PUBLIC_KEY=$(base64 dummy-repo.gpg | tr -d '\n') -cat > repo/com.dummy.App1.flatpakref < hello.sh + +for NUM in 1 2; do + flatpak build-init appdir${NUM} com.dummy.App${NUM} org.freedesktop.Sdk org.freedesktop.Platform 1.6; + flatpak build appdir${NUM} mkdir /app/bin; + flatpak build appdir${NUM} install --mode=750 hello.sh /app/bin; + flatpak build-finish --command=hello.sh appdir${NUM} + + flatpak build-export repo appdir${NUM} stable + + cat > repo/com.dummy.App${NUM}.flatpakref < repo/dummy-repo.flatpakrepo <v>5N@un}Hi1L#y~)Z- z{84!W3f)Esb7jq8xj(H~uV-ILw_hEK-tS2ml%=3W(9ZbK%*zGuyzw% zx|CRvE$)Z8hP(q}r5_|!pL6c&F%PC`(i85LvmX4tby$`<){6<0vLZ&gXzNHVZ&Xf~SL^u8sOHdo})6*qX{X~g@Rb%r$B zquEDB>&GQIE73m2JY`xc#+vdGzyT>afBQP{JpJd%k^#05fTyu|mQGF%YV6zMjsMHtkdFZgCC3Hu1hT#!3sP zlX%{M1-qoEJrl#7R+_{(He}QOx3|*uAuy=7BSbaUQ8?+uC3n_im4s73NG;MPAh=aV zsr!5fZ9@geCLM}Jn>G|r+q1urn&NgkY4kCkgSRk&Nj)-6mJ}r({(ADFv?Rj1Qi%V= z#?W3PY}yyhh)TAJTmea{WQz!$!kLBs#hrb!Oe&|_EO+32=p~I7Sqw@dyF6|0q@M8hmxVyd6gGOW|A*fgO zCjkQ-W<>L7srkC{ZDbazsy?xXWtl6Tn^-^s5r^=XB~c~PZoyai;ogn=Y?otN+g6?B zWT2F(AKl^h>@zuadxsdm1EXN(hxfSJYH8;e zz;C?|(!_t_gVJQO-YhxZ#DIbf^JItaMJEvG3=GNsV|Pda>WptsL5Ak0>x>89MY&QR zvxJw5)nrELLCY}g=4zkb23M+VcZJ$kRf>*B)7NH^5*~YC1&?h)!NDqJ#4h)y^7lSF zi0y|>Mb(4Bi+hVEtJNPzDGi5ePhs41$l@VJ6rX;uYD1>|^@C@*E|^CE22`*L6)9O( z?oddJ%D4`7)w*+JE5dme9)yRz6QUBv7seZjaaPl+=5^@LXrU2)SW!puqXN*k~< zxJ=^x0|xbva8VGz2sP|0OQ49+NDlq!uPRo^kf`w}9bO}}YYnnZfrnFS6OoqH0DR0Z z)13cHH?2TM5eIPv98c!~DoZUl=tf4~Ez+DZpFz9!hD8{t@ygq-u-`2F@8>dIx~3Pp z_~>w^QGN4mH_|yzkiIl29x4}JQ`?SIiDy8OIc$Rthg#B&sK;_kF(Hm(+xpjo$9{3} zf@XWNwLx*Vd_dj*OANsrRiQ-b!<{tKhioy&{7kMaBAL9ZYqlDm`$jG_&t~4py9Ya*Ge+y~Pv>HtJcFY8 zihplQ2s3yH>rf+Gfp*T`P}GI@BX&K=2Culc>vvXu$qsx^^i!;(p07mgthO5Vt3XwW z-S*i?CrwF}cE4bC?{sxVTM0iKA}878XK!_j}n zlK6Acn`k-ISyDh<>V?YujFd0=*QcORor3$IxiL^c^H>cZN#Ti~LF>Z*guR2b@FiEP zi%z<6o<4R@1mf4(89Y@fr2_iLU2H;6^ScvM6>{p5qy#U3#`+2b5+>I~Z1{Qcp68UB>zR!!-prXQw5y4( zgDKV1lMkGqQl!qMQYo**XhY34KJT4ex%fejcv zZtoQ=*~j`gYdvg;tQHVS^TBB^88ATLx!w{}b>CjXr6-Gk!0=mAp!C5o2jJHp%{A6Q z);jVS6UGe2k_eN$vMlN9BEzj$P+lss#h9gtB0|Slj-IbZAjy0vp+wvw`|l=$>$Db{ z-zX7JIZcLr+t_(b6|;jMi!T zolTrC91#&H^S!VZL5FS!4n!;Ov1TVn?=$9+44O#ga`2q`7k`0GT9BdFrCf(5QkIcBh1g_v`9#z=X}^u zEX3sA`gUnR{hP2ok2#U)b{&^yKRK|m35I1K2YzRc@F<7ISA)5&68BU@T`2SaIh0{Q zR+4P&P?>nXe-5Tu4ADqcQ8IXzze|bQdAZbeumC-v_asSQn; zcsa{!jf~)!@ApoXJ|eoTn#@sI%n_!eZYFJ?wwYK}&b4lJ3xGVH*ed!&CYPC&HO*{2 zx!&?sSwm+%<^c6=(?~CB>I7PY3&9n2}!JRmQS?&N4(`>5bCe#G>_!l-U zV=&BmtaM!@pCI6<7{y&03<(BUBPfg=}#=V1QjNh9r^D3ZQ~$ z-+d4(XSuVNC#N0n>`4rUY=>51W00YMceM5OJr_HgTTfSLEz{8iti&SNn>(EV$o9JW z!m(K@?NUx=&_SExn*qX~{m)clg3E3uI!yy-DPLdxN1DU*tpvKk!8glk*Hf;dQar=R z7on>L@+5L?e(v{&A8(jHcO7kAloZWS&3%0pW#C-Auw_GzDtBS?LYD*}^H6l(nT4DYJ9ve?9;u&7=<;D*FhwV`{oy33F!QH)y+h(Q_{^S__4O*`fbWV(A7 zWjUxL4a#hZr!puK*C~5l45>P9E67TlKz$7iUb0YsC|Yb zCnipl&)YZ90KN)Bxo#d<(iou(K{&J$GWS^<<=I+@__=Xt(A)>w+Yi}dKp8?N2KRwN z^gr}Vwq&VE4@VV%9@O>z>ERVLH36<>+mQ?%=aXkN*odx#Kq2}t^_A!}w#ln1zKPps zedEUYxuo@>=iu>96C9}yB zCbMidGlvUKENqLxrW2cg``-*3sZf3a$US)1zpgLentlC+dxuXcb#e0pH6eYbJF<(` z_(Kj?UXp$c-l`|EQjCV;n|z?PW2`l{DIOv3&U`!(tFpw!5fx^g(=@fBZq$tCk;a}L z?6uR0bKlS#MZXT{8{XUTd)j)acoP7%fg`4QNBm-F=w;VM(4%AF&^DR|Guo7@3417* z1~o&vueD=BWCKjHpR(`EZpGc4HGP_xU-J_pCsD>lqJCrA#QFIhAjjF}&De*Ukji|& zwq=!tF1#YjM2Dln$(9PGgQxvK<_3#XaRKjv3FymIvD{+O|B~>3*5T`aNVtD;A^}9v zPqTh15!MoN?6?KmUg5QY4u|}W6V^A$t!@H*8YtK*JFgf5r2_F4+dd-;oDv7lRl?)A zjlAgqldfV%)8HzC&Ph+uhd^-yLWe^l=?8a&12-Ume96LIJV&0ayQ zEn?2vlqGF3$U=z(-(fZ$tm|>X99kReTLQ8t)u5ltKiMhYq8cL7?Ik@{Sl;Br$a^m& zFD{ZgAv<5POi51>jj>s4sY#-jfDD-Va9FS>^It5_)V8d1)gZs&3zY4)KNF@3d2tn_ zbmXJ*`qQy!S2LEe(bB>G+Y1`&U>rh%dhH3#{MJds9$SwyNYE0n8;s&C=F3DAF85K` zyz~>{k4>dfFy$ks6~q}?sE$8PqQ%BnWg}@qS&AEVvnB5oYxlSxFU|)8D}Re8y3`ur zUlS`Jr|{LMxcP;BwzbI=?!2m`=czdR!Gm*bZEG<;C*@u4)R_SOdzt}!#;0QU?GrWx z(Lv@(8um}*&n6Nas;|`H<~RFk{-`kFbxRuUy$aZ&=aIgMt*6Kx-_=&v6KPfrN&YcP z(ONj_*Q`9$^OcNv!)XRPW5)@JGiz>U2@9tp%aOVi5tfG;a(IT#T5aPIfsmM*BGw{3 z&>-cf;$BH$4La$#zC^ue#8eHZR}=l#%%=4|27{Roc&molVxkR1#f3crno(y59e7uY z;wGqG6(iv~UZT5Ck`J5l8bJv_bG{!(#C5rQyMgXIyVxxf)|HK zoh#K!_$D}6sygpkNZEj5Yy29c$dn03ZPXxSymbD0d@bNhq$4rvQ7=E@=g*}lpY3Ta z6DUhSvmpHVX~4A(|18}-FcAbQq=Et>e7pA(_V9tnXWQ#I4j{VFdF+cVXmQ#RE2bhV z;BUI6f#|8(?6$dxAB!|kOk}CvDP9uw{7OPr79RE6sfkBIqxl$>HUYW|X%H*0n>Z+x z$JeHb2AYZcPy9l7m|1V1i+YKu=rfEwX#rzJfs6&|WJZ!{1m&it#Y%K{3HW3b^2BAu zDxJD3_N=RGd-#Bcw9<(A4TluQX}beZ&4r?ZQ&f~b&ajHscRaFabd#BM5-vR$G{Tnm zakJ|iprJoO7nSuf*Jz4G7Ajx?kstaIqc^RXq=3k!8`QM>WD~YZUs_(<4iGAK0Gcp1 z*FsNFE#=b)cy+7F1D$*S57pxj4-|)vDN*O@8{UaYm9oJZO+;)GzF-U_wmGNGM`)4x zVC{Cq2QVh6mp}>&c>KRZym}qc{$QmdbC7MmVj~b34u%DbOycuRMi~Sz{I$=rNQv$y zDo5s_sUl8K2jV6fnXewZ(7Yeui~8!5peW@sCh5WYXG(eA@g)mpJK1vin?qY($3{n& zdyv10S$%^5I7L8{v6v@p&bt7X@7D#zHePiI_Hp(dIRfD^cR}4cY~IRq>>^{lVl|E zUt+k`p7s9=R+ji-(kp16*(WHvE>e0Oi{E5VRphpC?mL3_t*M5M1eiCBJ%Nk%cvO@7 z-UgmF$$xoxz94TH_m)K- zW>Nq`uil4P5J{Ir2+cL1V}w&1@BzO7B4L<@9JnPDz|u@4bb1C`^-ydjrW>}Kc8trq-Suw90+K&UIaW>GLwj@XbzXWmCp zn;kGaR8DThc(5iNBB^tN$`mHytB#qUu)ktV(B}87t6C%zrvid%=;)B_+f#Bkhed0k zb@OPvh^2Yp!h@4+C9s2zGobnbcGKYp5?!OH805IoPKw>nwhg;x4NJJ0e~`qumk|irf*8I#Ao{g000001X)@i7;SF= literal 5524 zcmV;F6>IAKH+ooF000E$*0e?f03iVu0001VFXf})@BbB4T>v>5N@un}Hi1L#y~)Z- z{84!W3f)Esb7jq5>fDNTXAtbJtFLh#1{z_@XdBbYO#I_6ejX}{k|qPV5*F#tV*(KZ z&8y&v@-_@%O$~^*VQ2&(ntw~=X4{`14ln0V8gAs7B)fP}6FAt~Kb*`}cin_F`PjKM z=gDE4I1`vmQzegt`_V-)uy0DRn8>u3#F{_S@Q?#j3Pf@d^@SuYQthQu*4bx}Wxgr^ zpzQnk7-!^ooWgD}Y<{Z)25({OuU|9(2RU`Q#t(Nms*UWhQUf?8R&lYuSf-byBX0K8 z^I}P z;jsPqKwt(94>NX^omDYN6iLjF8^%Fq74AggLtsy+E4;4W#LVx|OUJ_~rkBr=I9G`S z-`lF>zkaJh8c8uPIv7!1R#=VKn4$v5FVtOAT~e-|DG}P7pu?u zg2?4<^6J^x4!h7r^PhZef;GY0o&cgx`7rh$+7fCPFWMF3s{tZp9=q<`0M;3wj#_1g zy)AF_+45$2ZeN7&sq;gF7CAoR3TC{{A3kUkG`e@pmB_i|#|gx}R%AMZmCD*tQpM$A zmFmu|SZA0^w?;w2^J#7(!b+rZ$z`H1_w}(oKbr6?5#;6I*|C;sURHR-b#HK0GgHwl zWlO4wdEbX1D|5Dcj7=Qurtcf#d@UCH2{rv6(9o2ArZcO^C#+^h0?!I+spQj?8~88n z&E4e>R8^&N7+DMiqM6PD31Wv0giL;`XN??_@^TLV4hsB_WSOq_+c>H2$Dzqflv>9< zh9V#;-2)ZRNP$CY1HLx8Cx$ODMd$i>V~Lai-_=C*H-nB$WV7w4_|_RdZxY=@`)PU9 zGU-R&Fm5PR*l=QSUedJi+q~F%qp&wRE~3tnSu#;tV>1kSB#HCAD|;^tLzOsOObfGC zxl)v7r8Kf`JlpPMyfI81Mv@JKh+8vmq_9c@{#q=-N;#Y-+4)mJ77tr#mrvm(XFmSP zG`G5J9L|fhqonikigA&LzgIIzkD0JRwG!tEUQDZ}LZ|#5Ual6Xaun6M+(HDC&+jiB zxRa*KG2+)q*k=l9wu2EZW|NN;6w^HoI>w-V*1+BB&9amec?NGnkvXq>&?V<0S79xs zE);IWKN6$<=AHej%3g*(e+_xcmwPhy$}qM#E#@F{P)$5H9+(}YyEW0ZG;nQX00+iN zlwO}I{>mXc^=^Y1-~obpGtoH%Sg8VHbMnK%bcKXdHdNlomWIl)pzNw;A8lK?zqCL$ zNtth-_+Epfbrx87EX;Qr527&3oF#4}aE}OWga@2=Ygo`4h3Y^~>!X^knst8lFS%W5 z22|-1!ABSJyfe!9gP2T9@aCFHh_E-a`8#UE-nKN8K4Lw8PgF5GHLGhpl%DwU3GWAP zb)mCUGrJ1spZEsCe>(_RS+yylRnFv0&(HZPqRJiW{T9=KHLS>_%NoQ>AjypKmy>(K zStvuAI*Zm{SABkrITglicDPv7B=GyQ>NkIV*#NS+VqdSyXmh*i{Giej9qux%*V4!v z04}f5;K6g%G(3;;FrbnT{Vlj~6@tey6YDC&(V_B1$QFNs87L<@XGuRxb z-rW@&t@xrgkZjqx%OVI!>_M>rt9_fwcs-qXQM@YqO~2H@>&AFrl^o0=9KMF&+7hz8 zm9Z0~y=WvtQFC@A4HYGmx0;`V`j zTBirBrULceJufTw9S0oQrlQkQqL4U~0>5<_BHQxuNcybgxl+KdM;RrG>B_c1s)bSoOGy?7`dzpWg+m83c_Bfe}_G zs~{p8wWo+@hwfLuI(xH@T?B%SsJ~n@;DL)@45ldz@biq=qp>{HHsq0s`InRO9bUdOPHf1S(2GBdtU}U`lXj+^OaaJ0#?cIX zz`a_1oZoGHeOnV%cz?|}Ux!?f|BWlg`mh%HY&oR|SzSM*%ToXmZz^C-H2~|!G3&`d zCzie=S>qD)_I?rZJ#|kd6V&+zn{H`5&L{S%Bod&aajrBb!)rj#Xs~5!G zp(wBu%!MMS?AkiP;grvxR|8?)kmt33Ks3^UqR*k(HGaA1apho-VQ-xRYL`*9PsVQ* zgU=Tzp-Ab0@PL%xh?tv)VOvoR%C=YDQrO`Fh~-V^Gg1Am90nf<$hXh$Mj&A`=pj>y zBXH6u+B1j{O~Vxn)sMPc8YQ5@K?48f!TheWcJnpl#Kdl+zNEIal0C0Xxx*ay&BGTt zvJv?!ox`mJ2UfORJJr84*kW2)s;aizuM35WS7&vHCw{<)N>KpncOw%vC$e2I!T>HE z;hHcWk1BUPTsSh_eym-mFuSvZaS;o`j@#x|qz6gkPwEso`$pi&OqTtHJNMOyPYOY38xFB-B}fwBm;0_?4Ml@bxI6QfTp>DJr-^F%|W_ z$-Wlx_;e(f1)dO8QR&45;N*QG-c)lB-2)7qR9vsd*c(}Yp@Jf2nV38Y3W=}J^`9Xj z4^|H>om2DJ(AfpN96L<`CEV)e(`iQ>DemWjW z?#hsDws>z8($(!8fOwYzV|Q^~@ixxn#8TOMTj;{`7Ldq?z2;shLNYtuY?zsJ`8tJ>aXJ5j0S z95)Rb9qy4F`v;^O_Y)*9M+qOq=lX`2v?C+SZkuxNydCv@w$`OWJH43g%4!lk& zNEo2_0s;9TWv_KVzRWhB2Ntq-Xo$j9DB{AwI_{ac@FK$0Qjp|iDZC6+DAFbO-q&!W z9JWT6;p*bW#@Cmiceiq%R)QY|FOBs#3++h4b*o)4u*L^zDzolJbHB(k0dE3?6_-O% z7-;9MqjJs?r_{nm`Ve6AxTvebCYENAxyCW){#(NeH}OZa`;g1*+bVLLFzAHB|D-Bn z!&(K|XeU~^-&Y{a#(j+58(5j~ zLjy#sI4fG4xQ^X_kbH@F#sn}1!;X)2ykHi9Jc;`A34k%vE14LeTJF{G!4Ji4?=wR- z)zjB5K8bajhY%D+T|1S3fN#zt-?FPFOh}pCVrr6 zLTI^gc?vM+d%#YhHQbeZxy-NXZF;C!_oRTgHKSK$Q_i#ex>VZ#>|sc?EPYD?LI#1E zcN(_#74PgUZ)dY`{>3M{@8q@Kp?k1FwB^2l;-~(+A!BfIC4r|?y~l9}juyA2H!gXc zTrJt&h767*t{P?16$BU?g@U+X4XU622arWnyi9bWh1h~F4bjM%yWc*SR0g^_L0|Cx zry_hh3|iWe>UOV_PGS-w!U{Qou~SgKg3phkt06Pe&Z(lGvJ0%rrEzd8*_N{U-7w?m zGE%LF={|x#?(XU5_u{|zRAPuBYDdOfd9~rXcTN2OtliPE$`jdGDLDNHbzXBzGh
  • hitUvXK$sVWTuHQ4`ZdY6WW&L~ zYrr|UGRP5;Nk&K=A2@&o4^{3P!YbP$DR$8*fgz^$Sh1*c?`S(CY4dUUb9om`0^=(hr*Ce&t{Yz$ihlP(i21%ghcqYblmS!V)?*#g4GUc%yqbWw{q1EfU96 zdoUIg(;!t*YXS&P+ETGQ7(}uXNHx5qNdp#Hx)B2o6bgEIb#`ISwH203$fl`Qb1*w8`e`gB{_gynce{ znS8HH0?~yJro9En)?SFWZJC*b%Fvhh%OB7QjMOL9w*(-bFv?tLYogb}Jp6=hzY5rX z?7M`G$X;7JH)FIV&1&Brw(oot0Q^U?K2zJyz&+smc9-F6oS63gQp=36sY*D2xWZ>F zg7$w^{|SQy1o?Y9o%?D!Zix2U>s==f$i}meb-24lJQC*VpKu^3`Q#_GFkN(MW!Ybh z;=JF10DQ1ofCk~}{axu`@Q7TW6SP*i3jw6T*A{*zTwokFZi`oMbVj<_)}Xk36vi|x zKnsnf&pCxuvj;dc=59GwcagEJ9;;#uXs%TpKp+N?lF=RGED=EWet5#4$fiA|z_#p* zDDs0^=ISWtn=}DH7UbDn&`)$%x{t08%}FrW$b4G@9?{mEu~b~p4^r>ZL4+O<^Q zmWlBiQw+J1h|j%WUg;L-5wgiJ=nm)iEM>W06V*1yVI7rY^~0x~&!d=3nUl{gI4{MV zz{aoZ3`dP7q7{j+#B8hwedfY5r-n7`N8l4_12d1GeXQ!~GIK_)wTC0@6?g-b`B|lq2bG-zEloEM#*Y&L z>iQ{;vHn+)Q5IkmswrfxFf>0Hcf5c;#lq`U5DK5@QZ$xL_P)KksR+k449)S}RcT`3 zJ55QKuecH|_`LmTb$1nrdBjsKYJ>X?1=~Ldqaj@_k1A{!`qcnM9DxOzRjRZ$Gue$^ zKz)<LSF(d)Q0N`7h)2otJhEf+5ejV9Whz*;U%qDS zt!lPdMwT3{&pgHhh*}cnO$VoLYDLG;dkBFazn;HN^G=pPX|&& z`V(cM@h`Phy4B$m(G{apx$P@({_U4>E8Y>2+M)c?uO87t{wifMT1UloIoWeD)hB^= zFO2o1-z8`^O=PW1lyG$kt+EBo`AjbCs_N9QhRkW7K73AO-T6XVHPpVHeb~JhW0qKr zRkaP0XpQac_8d48VgHO4$-X_c9oM4h=WMmwCp|UYRHi0LK4F8HR3BA*>6ZsKZEFaZ zI7@6sqGB1x`6Wk^m6RgWy?l5DIu#0YJL&LVd*Ek_H_O;4Duo=^hH*OpHLstavsX5} za)|yUF{mUbR2do&rEK2-YWR4}6Mrn_q{3&RJ6g4PG_!pp#H3S$7x^y%nWeZw;Mwxy z2>Wa#Q((X*uxZKtlj?bxGlD>XMSo{^(n(q=)p#hSiN?stSFuvg!%px*vF-_oWE7_& z=^fo<7l_9=OvK$LGdGZO=<4tiGJ}tGYA4JSz{JLVGcm%62fajWhb+`0Ezbw+2uDZ_ z=`6RLzWT#dX3gKoYE$EXm_JK_!mwVwBN=!?H?4++G9*&csKyne*_VwXSq9#E1~s~F zMIi85_ukPN(T+_}kR2-9k#W*;r8n|SyDzP+b{?0njVMSb!wV%(ZkHTv2`vC Date: Tue, 8 Jun 2021 20:33:57 +1200 Subject: [PATCH 0123/2828] Fixed sanity checks for cloud/scaleway/ modules (#2678) * fixed validation-modules for plugins/modules/cloud/scaleway/scaleway_image_info.py * fixed validation-modules for plugins/modules/cloud/scaleway/scaleway_ip_info.py * fixed validation-modules for plugins/modules/cloud/scaleway/scaleway_security_group_info.py * fixed validation-modules for plugins/modules/cloud/scaleway/scaleway_server_info.py * fixed validation-modules for plugins/modules/cloud/scaleway/scaleway_snapshot_info.py * fixed validation-modules for plugins/modules/cloud/scaleway/scaleway_volume_info.py * sanity fix --- plugins/modules/cloud/scaleway/scaleway_image_info.py | 9 +++++---- plugins/modules/cloud/scaleway/scaleway_ip_info.py | 7 +++++-- .../cloud/scaleway/scaleway_security_group_info.py | 7 +++++-- plugins/modules/cloud/scaleway/scaleway_server_info.py | 7 +++++-- plugins/modules/cloud/scaleway/scaleway_snapshot_info.py | 7 +++++-- plugins/modules/cloud/scaleway/scaleway_volume_info.py | 7 +++++-- tests/sanity/ignore-2.10.txt | 6 ------ tests/sanity/ignore-2.11.txt | 6 ------ tests/sanity/ignore-2.12.txt | 6 ------ tests/sanity/ignore-2.9.txt | 6 ------ 10 files changed, 30 insertions(+), 38 deletions(-) diff --git a/plugins/modules/cloud/scaleway/scaleway_image_info.py b/plugins/modules/cloud/scaleway/scaleway_image_info.py index 3fad216ee5..609ba3d1e8 100644 --- a/plugins/modules/cloud/scaleway/scaleway_image_info.py +++ b/plugins/modules/cloud/scaleway/scaleway_image_info.py @@ -19,9 +19,7 @@ author: extends_documentation_fragment: - community.general.scaleway - options: - region: type: str description: @@ -51,9 +49,12 @@ EXAMPLES = r''' RETURN = r''' --- scaleway_image_info: - description: Response from Scaleway API + description: + - Response from Scaleway API. + - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." returned: success - type: complex + type: list + elements: dict sample: "scaleway_image_info": [ { diff --git a/plugins/modules/cloud/scaleway/scaleway_ip_info.py b/plugins/modules/cloud/scaleway/scaleway_ip_info.py index 145fb20338..e2e49557cc 100644 --- a/plugins/modules/cloud/scaleway/scaleway_ip_info.py +++ b/plugins/modules/cloud/scaleway/scaleway_ip_info.py @@ -49,9 +49,12 @@ EXAMPLES = r''' RETURN = r''' --- scaleway_ip_info: - description: Response from Scaleway API + description: + - Response from Scaleway API. + - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." returned: success - type: complex + type: list + elements: dict sample: "scaleway_ip_info": [ { diff --git a/plugins/modules/cloud/scaleway/scaleway_security_group_info.py b/plugins/modules/cloud/scaleway/scaleway_security_group_info.py index d3488f0c8b..1f5af7da53 100644 --- a/plugins/modules/cloud/scaleway/scaleway_security_group_info.py +++ b/plugins/modules/cloud/scaleway/scaleway_security_group_info.py @@ -49,9 +49,12 @@ EXAMPLES = r''' RETURN = r''' --- scaleway_security_group_info: - description: Response from Scaleway API + description: + - Response from Scaleway API. + - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." returned: success - type: complex + type: list + elements: dict sample: "scaleway_security_group_info": [ { diff --git a/plugins/modules/cloud/scaleway/scaleway_server_info.py b/plugins/modules/cloud/scaleway/scaleway_server_info.py index 43b0badc14..61bd9de41b 100644 --- a/plugins/modules/cloud/scaleway/scaleway_server_info.py +++ b/plugins/modules/cloud/scaleway/scaleway_server_info.py @@ -49,9 +49,12 @@ EXAMPLES = r''' RETURN = r''' --- scaleway_server_info: - description: Response from Scaleway API + description: + - Response from Scaleway API. + - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." returned: success - type: complex + type: list + elements: dict sample: "scaleway_server_info": [ { diff --git a/plugins/modules/cloud/scaleway/scaleway_snapshot_info.py b/plugins/modules/cloud/scaleway/scaleway_snapshot_info.py index f31b74b00e..95ec04d16f 100644 --- a/plugins/modules/cloud/scaleway/scaleway_snapshot_info.py +++ b/plugins/modules/cloud/scaleway/scaleway_snapshot_info.py @@ -49,9 +49,12 @@ EXAMPLES = r''' RETURN = r''' --- scaleway_snapshot_info: - description: Response from Scaleway API + description: + - Response from Scaleway API. + - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." returned: success - type: complex + type: list + elements: dict sample: "scaleway_snapshot_info": [ { diff --git a/plugins/modules/cloud/scaleway/scaleway_volume_info.py b/plugins/modules/cloud/scaleway/scaleway_volume_info.py index ff6093e830..0042146795 100644 --- a/plugins/modules/cloud/scaleway/scaleway_volume_info.py +++ b/plugins/modules/cloud/scaleway/scaleway_volume_info.py @@ -49,9 +49,12 @@ EXAMPLES = r''' RETURN = r''' --- scaleway_volume_info: - description: Response from Scaleway API + description: + - Response from Scaleway API. + - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." returned: success - type: complex + type: list + elements: dict sample: "scaleway_volume_info": [ { diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 1855fc963f..7a9c723337 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -9,13 +9,7 @@ plugins/modules/cloud/rackspace/rax_files.py validate-modules:parameter-state-in plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path plugins/modules/cloud/rackspace/rax_mon_notification_plan.py validate-modules:parameter-list-no-elements plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path # fix needed, expanduser() applied to dict values -plugins/modules/cloud/scaleway/scaleway_image_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_ip_info.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_organization_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_security_group_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_server_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_snapshot_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_volume_info.py validate-modules:return-syntax-error plugins/modules/cloud/smartos/vmadm.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/smartos/vmadm.py validate-modules:undocumented-parameter plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:parameter-list-no-elements diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 4727b8d6df..28fae579c9 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -8,13 +8,7 @@ plugins/modules/cloud/rackspace/rax_files.py validate-modules:parameter-state-in plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path plugins/modules/cloud/rackspace/rax_mon_notification_plan.py validate-modules:parameter-list-no-elements plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path # fix needed, expanduser() applied to dict values -plugins/modules/cloud/scaleway/scaleway_image_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_ip_info.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_organization_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_security_group_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_server_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_snapshot_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_volume_info.py validate-modules:return-syntax-error plugins/modules/cloud/smartos/vmadm.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/smartos/vmadm.py validate-modules:undocumented-parameter plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:parameter-list-no-elements diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt index 74b1ea16f6..708845dae9 100644 --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -8,13 +8,7 @@ plugins/modules/cloud/rackspace/rax_files.py validate-modules:parameter-state-in plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path plugins/modules/cloud/rackspace/rax_mon_notification_plan.py validate-modules:parameter-list-no-elements plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path # fix needed, expanduser() applied to dict values -plugins/modules/cloud/scaleway/scaleway_image_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_ip_info.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_organization_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_security_group_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_server_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_snapshot_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_volume_info.py validate-modules:return-syntax-error plugins/modules/cloud/smartos/vmadm.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/smartos/vmadm.py validate-modules:undocumented-parameter plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:parameter-list-no-elements diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 2dac082311..f7c8945c56 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -6,13 +6,7 @@ plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not- plugins/modules/cloud/rackspace/rax.py use-argspec-type-path plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path # fix needed, expanduser() applied to dict values -plugins/modules/cloud/scaleway/scaleway_image_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_ip_info.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_organization_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_security_group_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_server_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_snapshot_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_volume_info.py validate-modules:return-syntax-error plugins/modules/cloud/smartos/vmadm.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/smartos/vmadm.py validate-modules:undocumented-parameter plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:parameter-type-not-in-doc From eef645c3f7c94d5086532feb29184f71e72ab994 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 8 Jun 2021 20:36:14 +1200 Subject: [PATCH 0124/2828] with great powers come great responsibility (#2755) --- .github/BOTMETA.yml | 2 -- commit-rights.md | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 6727373e85..d9f99c60dc 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1,7 +1,5 @@ automerge: true files: - plugins/: - supershipit: russoz changelogs/fragments/: support: community $actions: diff --git a/commit-rights.md b/commit-rights.md index 7aae8617fb..9b39d47b2c 100644 --- a/commit-rights.md +++ b/commit-rights.md @@ -67,6 +67,7 @@ Individuals who have been asked to become a part of this group have generally be | Name | GitHub ID | IRC Nick | Other | | ------------------- | -------------------- | ------------------ | -------------------- | +| Alexei Znamensky | russoz | russoz | | | Amin Vakil | aminvakil | aminvakil | | | Andrew Klychkov | andersson007 | andersson007_ | | | Felix Fontein | felixfontein | felixfontein | | From dab5d941e6ecc6401f97075eb9a5dabef984178e Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Tue, 8 Jun 2021 14:11:21 +0430 Subject: [PATCH 0125/2828] Add domain option to onepassword lookup (#2735) * Add domain to onepassword lookup * Add changelog * Add default to domain documentation * Improve format * Fix sanity issue * Add option type to documentation Co-authored-by: Felix Fontein * Add domain to init Co-authored-by: Felix Fontein --- .../fragments/2735-onepassword-add_domain_option.yml | 3 +++ plugins/lookup/onepassword.py | 9 ++++++++- 2 files changed, 11 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2735-onepassword-add_domain_option.yml diff --git a/changelogs/fragments/2735-onepassword-add_domain_option.yml b/changelogs/fragments/2735-onepassword-add_domain_option.yml new file mode 100644 index 0000000000..eef74439ce --- /dev/null +++ b/changelogs/fragments/2735-onepassword-add_domain_option.yml @@ -0,0 +1,3 @@ +--- +minor_changes: + - onepassword lookup plugin - add ``domain`` option (https://github.com/ansible-collections/community.general/issues/2734). diff --git a/plugins/lookup/onepassword.py b/plugins/lookup/onepassword.py index a2346ed072..715c337ffd 100644 --- a/plugins/lookup/onepassword.py +++ b/plugins/lookup/onepassword.py @@ -30,6 +30,11 @@ DOCUMENTATION = ''' aliases: ['vault_password'] section: description: Item section containing the field to retrieve (case-insensitive). If absent will return first match from any section. + domain: + description: Domain of 1Password. Default is U(1password.com). + version_added: 3.2.0 + default: '1password.com' + type: str subdomain: description: The 1Password subdomain to authenticate against. username: @@ -109,6 +114,7 @@ class OnePass(object): self.logged_in = False self.token = None self.subdomain = None + self.domain = None self.username = None self.secret_key = None self.master_password = None @@ -168,7 +174,7 @@ class OnePass(object): args = [ 'signin', - '{0}.1password.com'.format(self.subdomain), + '{0}.{1}'.format(self.subdomain, self.domain), to_bytes(self.username), to_bytes(self.secret_key), '--output=raw', @@ -265,6 +271,7 @@ class LookupModule(LookupBase): section = kwargs.get('section') vault = kwargs.get('vault') op.subdomain = kwargs.get('subdomain') + op.domain = kwargs.get('domain', '1password.com') op.username = kwargs.get('username') op.secret_key = kwargs.get('secret_key') op.master_password = kwargs.get('master_password', kwargs.get('vault_password')) From 1e968bce27f8cab0b6decd89c53a2475f63ca6e3 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 8 Jun 2021 14:47:51 +0200 Subject: [PATCH 0126/2828] Next expected release is 3.3.0. --- galaxy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/galaxy.yml b/galaxy.yml index ba1969d712..c559415eb2 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,6 +1,6 @@ namespace: community name: general -version: 3.2.0 +version: 3.3.0 readme: README.md authors: - Ansible (https://github.com/ansible) From f44300cec5fc002903139e8d5ee2f88f9d540262 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9my=20Keil?= Date: Thu, 10 Jun 2021 22:05:04 +0200 Subject: [PATCH 0127/2828] add inventory plugin unit test `test_verify_file` (#2773) * add inventory plugin unit test `test_verify_file` * fix typos in `test_verify_file_bad_config` unit test --- tests/unit/plugins/inventory/test_cobbler.py | 8 +++++++- tests/unit/plugins/inventory/test_linode.py | 8 +++++++- tests/unit/plugins/inventory/test_lxd.py | 6 ++++++ tests/unit/plugins/inventory/test_proxmox.py | 6 ++++++ tests/unit/plugins/inventory/test_stackpath_compute.py | 6 ++++++ 5 files changed, 32 insertions(+), 2 deletions(-) diff --git a/tests/unit/plugins/inventory/test_cobbler.py b/tests/unit/plugins/inventory/test_cobbler.py index 477a3039f7..e184d166dc 100644 --- a/tests/unit/plugins/inventory/test_cobbler.py +++ b/tests/unit/plugins/inventory/test_cobbler.py @@ -37,5 +37,11 @@ def test_init_cache(inventory): assert inventory._cache[inventory.cache_key] == {} +def test_verify_file(tmp_path, inventory): + file = tmp_path / "foobar.cobbler.yml" + file.touch() + assert inventory.verify_file(str(file)) is True + + def test_verify_file_bad_config(inventory): - assert inventory.verify_file('foobar.cobber.yml') is False + assert inventory.verify_file('foobar.cobbler.yml') is False diff --git a/tests/unit/plugins/inventory/test_linode.py b/tests/unit/plugins/inventory/test_linode.py index 427a7c69b3..ab75c6c9fc 100644 --- a/tests/unit/plugins/inventory/test_linode.py +++ b/tests/unit/plugins/inventory/test_linode.py @@ -74,5 +74,11 @@ def test_conig_query_options(inventory): assert tags == ['web-server'] +def test_verify_file(tmp_path, inventory): + file = tmp_path / "foobar.linode.yml" + file.touch() + assert inventory.verify_file(str(file)) is True + + def test_verify_file_bad_config(inventory): - assert inventory.verify_file('foobar.linde.yml') is False + assert inventory.verify_file('foobar.linode.yml') is False diff --git a/tests/unit/plugins/inventory/test_lxd.py b/tests/unit/plugins/inventory/test_lxd.py index 8a98af6e71..04cea0af71 100644 --- a/tests/unit/plugins/inventory/test_lxd.py +++ b/tests/unit/plugins/inventory/test_lxd.py @@ -51,6 +51,12 @@ def inventory(): return inv +def test_verify_file(tmp_path, inventory): + file = tmp_path / "foobar.lxd.yml" + file.touch() + assert inventory.verify_file(str(file)) is True + + def test_verify_file_bad_config(inventory): assert inventory.verify_file('foobar.lxd.yml') is False diff --git a/tests/unit/plugins/inventory/test_proxmox.py b/tests/unit/plugins/inventory/test_proxmox.py index e248fb05e3..c2b0408138 100644 --- a/tests/unit/plugins/inventory/test_proxmox.py +++ b/tests/unit/plugins/inventory/test_proxmox.py @@ -21,6 +21,12 @@ def inventory(): return r +def test_verify_file(tmp_path, inventory): + file = tmp_path / "foobar.proxmox.yml" + file.touch() + assert inventory.verify_file(str(file)) is True + + def test_verify_file_bad_config(inventory): assert inventory.verify_file('foobar.proxmox.yml') is False diff --git a/tests/unit/plugins/inventory/test_stackpath_compute.py b/tests/unit/plugins/inventory/test_stackpath_compute.py index 9359cd680f..8a409becd6 100644 --- a/tests/unit/plugins/inventory/test_stackpath_compute.py +++ b/tests/unit/plugins/inventory/test_stackpath_compute.py @@ -66,6 +66,12 @@ def test_get_stack_slugs(inventory): ] +def test_verify_file(tmp_path, inventory): + file = tmp_path / "foobar.stackpath_compute.yml" + file.touch() + assert inventory.verify_file(str(file)) is True + + def test_verify_file_bad_config(inventory): assert inventory.verify_file('foobar.stackpath_compute.yml') is False From c2ce7a0752f84d7e4ca5b373ec7a6103e3ddd6a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc?= Date: Fri, 11 Jun 2021 13:05:29 +0200 Subject: [PATCH 0128/2828] [scaleway inventory] Fix JSON object must be str, not 'bytes' (#2771) * Fix JSON object decoding * Code improvement : python 3.5 fix * Add changelog fragment * Update changelogs/fragments/2771-scaleway_inventory_json_accept_byte_array.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../2771-scaleway_inventory_json_accept_byte_array.yml | 3 +++ plugins/inventory/scaleway.py | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/2771-scaleway_inventory_json_accept_byte_array.yml diff --git a/changelogs/fragments/2771-scaleway_inventory_json_accept_byte_array.yml b/changelogs/fragments/2771-scaleway_inventory_json_accept_byte_array.yml new file mode 100644 index 0000000000..8a6bfd1603 --- /dev/null +++ b/changelogs/fragments/2771-scaleway_inventory_json_accept_byte_array.yml @@ -0,0 +1,3 @@ +bugfixes: + - scaleway plugin inventory - fix ``JSON object must be str, not 'bytes'`` with Python 3.5 + (https://github.com/ansible-collections/community.general/issues/2769). diff --git a/plugins/inventory/scaleway.py b/plugins/inventory/scaleway.py index ae557e2239..ad0a2321ae 100644 --- a/plugins/inventory/scaleway.py +++ b/plugins/inventory/scaleway.py @@ -89,7 +89,7 @@ from ansible.errors import AnsibleError from ansible.plugins.inventory import BaseInventoryPlugin, Constructable from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, parse_pagination_link from ansible.module_utils.urls import open_url -from ansible.module_utils._text import to_native +from ansible.module_utils._text import to_native, to_text import ansible.module_utils.six.moves.urllib.parse as urllib_parse @@ -105,7 +105,7 @@ def _fetch_information(token, url): except Exception as e: raise AnsibleError("Error while fetching %s: %s" % (url, to_native(e))) try: - raw_json = json.loads(response.read()) + raw_json = json.loads(to_text(response.read())) except ValueError: raise AnsibleError("Incorrect JSON payload") From 19549058ce9695b264e36701f2297e9d92b17bb5 Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Fri, 11 Jun 2021 15:42:01 +0430 Subject: [PATCH 0129/2828] yum_versionlock: enable fedora34 integration test (#2543) * Re-enable Fedora 34 * Update procps-ng before anything in yum_versionlock integration test * Move procps-ng installation to block * Revert "Move procps-ng installation to block" This reverts commit 3aa873a110f629d83d393bac648917f3302d8c93. * Update procps-ng only on Fedora 34 --- tests/integration/targets/yum_versionlock/tasks/main.yml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tests/integration/targets/yum_versionlock/tasks/main.yml b/tests/integration/targets/yum_versionlock/tasks/main.yml index 4084bdcb91..d1a1522087 100644 --- a/tests/integration/targets/yum_versionlock/tasks/main.yml +++ b/tests/integration/targets/yum_versionlock/tasks/main.yml @@ -4,6 +4,12 @@ # and should not be used as examples of how to write Ansible roles # #################################################################### +- name: Update procps-ng temporary until issue (#2539) is fixed + yum: + name: procps-ng + state: latest + when: ansible_distribution == 'Fedora' and ansible_distribution_major_version == '34' + - block: - name: Install necessary packages to test yum_versionlock yum: @@ -60,4 +66,4 @@ state: absent when: yum_versionlock_install is changed when: (ansible_distribution in ['CentOS', 'RedHat'] and ansible_distribution_major_version is version('7', '>=')) or - (ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('33', '<=')) + (ansible_distribution == 'Fedora') From 4b37b1bca630d240a9d0a1b7b5344100b1041a6f Mon Sep 17 00:00:00 2001 From: Abhijeet Kasurde Date: Fri, 11 Jun 2021 16:54:11 +0530 Subject: [PATCH 0130/2828] scaleway: Misc doc changes (#2776) * Updated example section for ``variables`` * Added link about token generation * Misc changes in doc Fixes: #467 Signed-off-by: Abhijeet Kasurde --- plugins/inventory/scaleway.py | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/plugins/inventory/scaleway.py b/plugins/inventory/scaleway.py index ad0a2321ae..843a006738 100644 --- a/plugins/inventory/scaleway.py +++ b/plugins/inventory/scaleway.py @@ -1,24 +1,24 @@ -# Copyright (c) 2017 Ansible Project +# Copyright: (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' name: scaleway author: - Remy Leone (@sieben) short_description: Scaleway inventory source description: - - Get inventory hosts from Scaleway + - Get inventory hosts from Scaleway. options: plugin: - description: token that ensures this is a source file for the 'scaleway' plugin. + description: Token that ensures this is a source file for the 'scaleway' plugin. required: True choices: ['scaleway', 'community.general.scaleway'] regions: - description: Filter results on a specific Scaleway region + description: Filter results on a specific Scaleway region. type: list default: - ams1 @@ -26,11 +26,13 @@ DOCUMENTATION = ''' - par2 - waw1 tags: - description: Filter results on a specific tag + description: Filter results on a specific tag. type: list oauth_token: required: True - description: Scaleway OAuth token. + description: + - Scaleway OAuth token. + - More details on L(how to generate token, https://www.scaleway.com/en/docs/generate-api-keys/). env: # in order of precedence - name: SCW_TOKEN @@ -48,14 +50,14 @@ DOCUMENTATION = ''' - hostname - id variables: - description: 'set individual variables: keys are variable names and + description: 'Set individual variables: keys are variable names and values are templates. Any value returned by the L(Scaleway API, https://developer.scaleway.com/#servers-server-get) can be used.' type: dict ''' -EXAMPLES = ''' +EXAMPLES = r''' # scaleway_inventory.yml file in YAML format # Example command line: ansible-inventory --list -i scaleway_inventory.yml @@ -81,6 +83,15 @@ regions: - par1 variables: ansible_host: public_ip.address + +# Using static strings as variables +plugin: community.general.scaleway +hostnames: + - hostname +variables: + ansible_host: public_ip.address + ansible_connection: "'ssh'" + ansible_user: "'admin'" ''' import json @@ -230,8 +241,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): if not matching_tags: return set() - else: - return matching_tags.union((server_zone,)) + return matching_tags.union((server_zone,)) def _filter_host(self, host_infos, hostname_preferences): From 343339655ded19ec35c113643f1d777ceadf84f7 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Sun, 13 Jun 2021 08:25:50 +0200 Subject: [PATCH 0131/2828] Documentation fix for access_level parameter of gitlab_runner (#2788) * * Documentation fix for access_level parameter of gitlab_runner Signed-off-by: Alina Buzachis * Address reviewer's comments Signed-off-by: Alina Buzachis --- plugins/modules/source_control/gitlab/gitlab_runner.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/plugins/modules/source_control/gitlab/gitlab_runner.py b/plugins/modules/source_control/gitlab/gitlab_runner.py index 8803990f22..d38b4819a6 100644 --- a/plugins/modules/source_control/gitlab/gitlab_runner.py +++ b/plugins/modules/source_control/gitlab/gitlab_runner.py @@ -77,7 +77,9 @@ options: type: bool access_level: description: - - Determines if a runner can pick up jobs from protected branches. + - Determines if a runner can pick up jobs only from protected branches. + - If set to C(ref_protected), runner can pick up jobs only from protected branches. + - If set to C(not_protected), runner can pick up jobs from both protected and unprotected branches. required: False default: ref_protected choices: ["ref_protected", "not_protected"] From d4c4d00ad1f08a538bbb3f0868745b721e65c59c Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 13 Jun 2021 23:01:46 +0200 Subject: [PATCH 0132/2828] CI: Remove scripts that are no longer needed (#2793) * Remove scripts that are no longer needed. ci_complete * Remove sanity ignores. --- tests/sanity/ignore-2.10.txt | 2 - tests/sanity/ignore-2.11.txt | 2 - tests/sanity/ignore-2.12.txt | 2 - tests/sanity/ignore-2.9.txt | 2 - tests/utils/shippable/check_matrix.py | 120 -------------------------- tests/utils/shippable/timing.py | 16 ---- tests/utils/shippable/timing.sh | 5 -- 7 files changed, 149 deletions(-) delete mode 100755 tests/utils/shippable/check_matrix.py delete mode 100755 tests/utils/shippable/timing.py delete mode 100755 tests/utils/shippable/timing.sh diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 7a9c723337..c9d750f417 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -56,5 +56,3 @@ plugins/modules/system/xfconf.py validate-modules:return-syntax-error plugins/modules/web_infrastructure/jenkins_plugin.py use-argspec-type-path tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.6 # django generated code tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.7 # django generated code -tests/utils/shippable/check_matrix.py replace-urlopen -tests/utils/shippable/timing.py shebang diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 28fae579c9..1311638dbc 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -55,5 +55,3 @@ plugins/modules/system/xfconf.py validate-modules:return-syntax-error plugins/modules/web_infrastructure/jenkins_plugin.py use-argspec-type-path tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.6 # django generated code tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.7 # django generated code -tests/utils/shippable/check_matrix.py replace-urlopen -tests/utils/shippable/timing.py shebang diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt index 708845dae9..f5b7d772fc 100644 --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -53,5 +53,3 @@ plugins/modules/system/ssh_config.py use-argspec-type-path # Required since modu plugins/modules/system/xfconf.py validate-modules:parameter-state-invalid-choice plugins/modules/system/xfconf.py validate-modules:return-syntax-error plugins/modules/web_infrastructure/jenkins_plugin.py use-argspec-type-path -tests/utils/shippable/check_matrix.py replace-urlopen -tests/utils/shippable/timing.py shebang diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index f7c8945c56..c8c5ff0d25 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -73,5 +73,3 @@ plugins/modules/system/xfconf.py validate-modules:return-syntax-error plugins/modules/web_infrastructure/jenkins_plugin.py use-argspec-type-path tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.6 # django generated code tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.7 # django generated code -tests/utils/shippable/check_matrix.py replace-urlopen -tests/utils/shippable/timing.py shebang diff --git a/tests/utils/shippable/check_matrix.py b/tests/utils/shippable/check_matrix.py deleted file mode 100755 index ca56c4db3d..0000000000 --- a/tests/utils/shippable/check_matrix.py +++ /dev/null @@ -1,120 +0,0 @@ -#!/usr/bin/env python -"""Verify the currently executing Shippable test matrix matches the one defined in the "shippable.yml" file.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import datetime -import json -import os -import re -import sys -import time - -try: - from typing import NoReturn -except ImportError: - NoReturn = None - -try: - # noinspection PyCompatibility - from urllib2 import urlopen # pylint: disable=ansible-bad-import-from -except ImportError: - # noinspection PyCompatibility - from urllib.request import urlopen - - -def main(): # type: () -> None - """Main entry point.""" - repo_full_name = os.environ['REPO_FULL_NAME'] - required_repo_full_name = 'ansible-collections/community.general' - - if repo_full_name != required_repo_full_name: - sys.stderr.write('Skipping matrix check on repo "%s" which is not "%s".\n' % (repo_full_name, required_repo_full_name)) - return - - with open('shippable.yml', 'rb') as yaml_file: - yaml = yaml_file.read().decode('utf-8').splitlines() - - defined_matrix = [match.group(1) for match in [re.search(r'^ *- env: T=(.*)$', line) for line in yaml] if match and match.group(1) != 'none'] - - if not defined_matrix: - fail('No matrix entries found in the "shippable.yml" file.', - 'Did you modify the "shippable.yml" file?') - - run_id = os.environ['SHIPPABLE_BUILD_ID'] - sleep = 1 - jobs = [] - - for attempts_remaining in range(4, -1, -1): - try: - jobs = json.loads(urlopen('https://api.shippable.com/jobs?runIds=%s' % run_id).read()) - - if not isinstance(jobs, list): - raise Exception('Shippable run %s data is not a list.' % run_id) - - break - except Exception as ex: - if not attempts_remaining: - fail('Unable to retrieve Shippable run %s matrix.' % run_id, - str(ex)) - - sys.stderr.write('Unable to retrieve Shippable run %s matrix: %s\n' % (run_id, ex)) - sys.stderr.write('Trying again in %d seconds...\n' % sleep) - time.sleep(sleep) - sleep *= 2 - - if len(jobs) != len(defined_matrix): - if len(jobs) == 1: - hint = '\n\nMake sure you do not use the "Rebuild with SSH" option.' - else: - hint = '' - - fail('Shippable run %s has %d jobs instead of the expected %d jobs.' % (run_id, len(jobs), len(defined_matrix)), - 'Try re-running the entire matrix.%s' % hint) - - actual_matrix = dict((job.get('jobNumber'), dict(tuple(line.split('=', 1)) for line in job.get('env', [])).get('T', '')) for job in jobs) - errors = [(job_number, test, actual_matrix.get(job_number)) for job_number, test in enumerate(defined_matrix, 1) if actual_matrix.get(job_number) != test] - - if len(errors): - error_summary = '\n'.join('Job %s expected "%s" but found "%s" instead.' % (job_number, expected, actual) for job_number, expected, actual in errors) - - fail('Shippable run %s has a job matrix mismatch.' % run_id, - 'Try re-running the entire matrix.\n\n%s' % error_summary) - - -def fail(message, output): # type: (str, str) -> NoReturn - # Include a leading newline to improve readability on Shippable "Tests" tab. - # Without this, the first line becomes indented. - output = '\n' + output.strip() - - timestamp = datetime.datetime.utcnow().replace(microsecond=0).isoformat() - - # hack to avoid requiring junit-xml, which isn't pre-installed on Shippable outside our test containers - xml = ''' - - -\t -\t\t -\t\t\t%s -\t\t -\t - -''' % (timestamp, message, output) - - path = 'shippable/testresults/check-matrix.xml' - dir_path = os.path.dirname(path) - - if not os.path.exists(dir_path): - os.makedirs(dir_path) - - with open(path, 'w') as junit_fd: - junit_fd.write(xml.lstrip()) - - sys.stderr.write(message + '\n') - sys.stderr.write(output + '\n') - - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/tests/utils/shippable/timing.py b/tests/utils/shippable/timing.py deleted file mode 100755 index fb538271b8..0000000000 --- a/tests/utils/shippable/timing.py +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env python3.7 -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import sys -import time - -start = time.time() - -sys.stdin.reconfigure(errors='surrogateescape') -sys.stdout.reconfigure(errors='surrogateescape') - -for line in sys.stdin: - seconds = time.time() - start - sys.stdout.write('%02d:%02d %s' % (seconds // 60, seconds % 60, line)) - sys.stdout.flush() diff --git a/tests/utils/shippable/timing.sh b/tests/utils/shippable/timing.sh deleted file mode 100755 index 77e2578304..0000000000 --- a/tests/utils/shippable/timing.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env bash - -set -o pipefail -eu - -"$@" 2>&1 | "$(dirname "$0")/timing.py" From a55c96d5c14661be3257cf3b465cbf8b301435eb Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 14 Jun 2021 07:25:46 +0200 Subject: [PATCH 0133/2828] Make extra sanity test runner produce ansibullbot and JUnit output. (#2794) --- tests/utils/shippable/sanity.sh | 2 +- tests/utils/shippable/shippable.sh | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/utils/shippable/sanity.sh b/tests/utils/shippable/sanity.sh index 187105409c..eacbd81609 100755 --- a/tests/utils/shippable/sanity.sh +++ b/tests/utils/shippable/sanity.sh @@ -14,7 +14,7 @@ else fi if [ "${group}" == "extra" ]; then - ../internal_test_tools/tools/run.py --color + ../internal_test_tools/tools/run.py --color --bot --junit exit fi diff --git a/tests/utils/shippable/shippable.sh b/tests/utils/shippable/shippable.sh index f70aa11380..472bfca1ca 100755 --- a/tests/utils/shippable/shippable.sh +++ b/tests/utils/shippable/shippable.sh @@ -73,6 +73,10 @@ else export ANSIBLE_COLLECTIONS_PATHS="${PWD}/../../../" fi +if [ "${test}" == "sanity/extra" ]; then + retry pip install junit-xml --disable-pip-version-check +fi + # START: HACK install dependencies if [ "${script}" != "sanity" ] || [ "${test}" == "sanity/extra" ]; then # Nothing further should be added to this list. From 0bd345bfb04d0a98e5f53e979234502d2c4c495b Mon Sep 17 00:00:00 2001 From: Abhijeet Kasurde Date: Mon, 14 Jun 2021 21:52:01 +0530 Subject: [PATCH 0134/2828] timezone: change warning to debug (#2789) * timezone: change warning to debug Convert warning message to debug when timedatectl found but not usable. Fixes: #1942 Signed-off-by: Abhijeet Kasurde * add changelog entry Signed-off-by: Abhijeet Kasurde --- changelogs/fragments/1942_timezone.yml | 3 +++ plugins/modules/system/timezone.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/1942_timezone.yml diff --git a/changelogs/fragments/1942_timezone.yml b/changelogs/fragments/1942_timezone.yml new file mode 100644 index 0000000000..349c263298 --- /dev/null +++ b/changelogs/fragments/1942_timezone.yml @@ -0,0 +1,3 @@ +--- +minor_changes: +- timezone - print error message to debug instead of warning when timedatectl fails (https://github.com/ansible-collections/community.general/issues/1942). diff --git a/plugins/modules/system/timezone.py b/plugins/modules/system/timezone.py index 3cb7601441..27dfc9a98d 100644 --- a/plugins/modules/system/timezone.py +++ b/plugins/modules/system/timezone.py @@ -107,7 +107,7 @@ class Timezone(object): if rc == 0: return super(Timezone, SystemdTimezone).__new__(SystemdTimezone) else: - module.warn('timedatectl command was found but not usable: %s. using other method.' % stderr) + module.debug('timedatectl command was found but not usable: %s. using other method.' % stderr) return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone) else: return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone) From bccf317814f959c23d5b0a61b4c65afb3ab55310 Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Mon, 14 Jun 2021 21:38:28 +0300 Subject: [PATCH 0135/2828] BOTMETA.yml: supershipit to quidame (#2801) --- .github/BOTMETA.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index d9f99c60dc..199a2f2c3c 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1,5 +1,7 @@ automerge: true files: + plugins/: + supershipit: quidame changelogs/fragments/: support: community $actions: From b1b34ee12ea2d404232578a63336e02d38fa6e11 Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Tue, 15 Jun 2021 16:11:48 +0300 Subject: [PATCH 0136/2828] BOTMETA.yml: grant supershipit (#2807) --- .github/BOTMETA.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 199a2f2c3c..8df7297720 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1,7 +1,7 @@ automerge: true files: plugins/: - supershipit: quidame + supershipit: quidame Ajpantuso changelogs/fragments/: support: community $actions: From adf50b106aa956ed1e0a9481965c9dc22a46993a Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Tue, 15 Jun 2021 21:49:18 +0430 Subject: [PATCH 0137/2828] Add Test PRs locally section to CONTRIBUTING.md (#2738) * Add Test PRs locally section to CONTRIBUTING.md * fix formatting Co-authored-by: Felix Fontein * Adjust PR now that ansible-collections/community-docs#16 has been merged * improve sentence Co-authored-by: Andrew Klychkov Co-authored-by: Felix Fontein Co-authored-by: Andrew Klychkov --- CONTRIBUTING.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 959d363236..5a068f9414 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -29,4 +29,8 @@ Also, consider taking up a valuable, reviewed, but abandoned pull request which You can also read [our Quick-start development guide](https://github.com/ansible/community-docs/blob/main/create_pr_quick_start_guide.rst). +## Test pull requests + +If you want to test a PR locally, refer to [our testing guide](https://github.com/ansible/community-docs/blob/main/test_pr_quick_start_guide.rst) for instructions on how do it quickly. + If you find any inconsistencies or places in this document which can be improved, feel free to raise an issue or pull request to fix it. From 2f2f384b4e7f9455631f9143dcb2c5f76817ed67 Mon Sep 17 00:00:00 2001 From: Tong He <68936428+unnecessary-username@users.noreply.github.com> Date: Wed, 16 Jun 2021 03:01:54 -0400 Subject: [PATCH 0138/2828] redhat_subscription: Add server_prefix and server_port as supported arguments (#2779) * Add server_prefix and server_port as supported arguments for the redhat_subscription module. * Adjust the argument sequence in the test case to be consistent with the original code in line 364 in redhat_subscription.py and add the changelog fragment. * Grammatical changes such as adding full stops and using 'an HTTP' instead of 'a HTTP'. * Commit the suggested changelog update. Co-authored-by: Amin Vakil * Fix typo. Co-authored-by: Amin Vakil Co-authored-by: Amin Vakil --- ...tion-add_server_prefix_and_server_port.yml | 2 + .../packaging/os/redhat_subscription.py | 20 +++++++-- .../packaging/os/test_redhat_subscription.py | 41 +++++++++++++++++++ 3 files changed, 60 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/2779_redhat_subscription-add_server_prefix_and_server_port.yml diff --git a/changelogs/fragments/2779_redhat_subscription-add_server_prefix_and_server_port.yml b/changelogs/fragments/2779_redhat_subscription-add_server_prefix_and_server_port.yml new file mode 100644 index 0000000000..d484874ee9 --- /dev/null +++ b/changelogs/fragments/2779_redhat_subscription-add_server_prefix_and_server_port.yml @@ -0,0 +1,2 @@ +minor_changes: + - redhat_subscription - add ``server_prefix`` and ``server_port`` parameters (https://github.com/ansible-collections/community.general/pull/2779). diff --git a/plugins/modules/packaging/os/redhat_subscription.py b/plugins/modules/packaging/os/redhat_subscription.py index b62a7f391c..c8b5e991a0 100644 --- a/plugins/modules/packaging/os/redhat_subscription.py +++ b/plugins/modules/packaging/os/redhat_subscription.py @@ -32,7 +32,7 @@ options: type: str username: description: - - access.redhat.com or Sat6 username + - access.redhat.com or Sat6 username type: str password: description: @@ -46,6 +46,16 @@ options: description: - Enable or disable https server certificate verification when connecting to C(server_hostname) type: str + server_prefix: + description: + - Specify the prefix when registering to the Red Hat Subscription Management or Sat6 server. + type: str + version_added: 3.3.0 + server_port: + description: + - Specify the port when registering to the Red Hat Subscription Management or Sat6 server. + type: str + version_added: 3.3.0 rhsm_baseurl: description: - Specify CDN baseurl @@ -56,11 +66,11 @@ options: type: str server_proxy_hostname: description: - - Specify a HTTP proxy hostname + - Specify an HTTP proxy hostname. type: str server_proxy_port: description: - - Specify a HTTP proxy port + - Specify an HTTP proxy port. type: str server_proxy_user: description: @@ -782,6 +792,8 @@ def main(): 'password': {'no_log': True}, 'server_hostname': {}, 'server_insecure': {}, + 'server_prefix': {}, + 'server_port': {}, 'rhsm_baseurl': {}, 'rhsm_repo_ca_cert': {}, 'auto_attach': {'aliases': ['autosubscribe'], 'type': 'bool'}, @@ -827,6 +839,8 @@ def main(): password = module.params['password'] server_hostname = module.params['server_hostname'] server_insecure = module.params['server_insecure'] + server_prefix = module.params['server_prefix'] + server_port = module.params['server_port'] rhsm_baseurl = module.params['rhsm_baseurl'] rhsm_repo_ca_cert = module.params['rhsm_repo_ca_cert'] auto_attach = module.params['auto_attach'] diff --git a/tests/unit/plugins/modules/packaging/os/test_redhat_subscription.py b/tests/unit/plugins/modules/packaging/os/test_redhat_subscription.py index ef6f28b812..7f430ee72c 100644 --- a/tests/unit/plugins/modules/packaging/os/test_redhat_subscription.py +++ b/tests/unit/plugins/modules/packaging/os/test_redhat_subscription.py @@ -258,6 +258,47 @@ TEST_CASES = [ 'msg': "System successfully registered to 'None'." } ], + # Test of registration with arguments that are not part of register options but needs to be configured + [ + { + 'state': 'present', + 'username': 'admin', + 'password': 'admin', + 'org_id': 'admin', + 'force_register': 'true', + 'server_prefix': '/rhsm', + 'server_port': '443' + }, + { + 'id': 'test_arguments_not_in_register_options', + 'run_command.calls': [ + ( + ['/testbin/subscription-manager', 'identity'], + {'check_rc': False}, + (0, 'This system already registered.', '') + ), + ( + ['/testbin/subscription-manager', 'config', + '--server.port=443', + '--server.prefix=/rhsm' + ], + {'check_rc': True}, + (0, '', '') + ), + ( + ['/testbin/subscription-manager', 'register', + '--force', + '--org', 'admin', + '--username', 'admin', + '--password', 'admin'], + {'check_rc': True, 'expand_user_and_vars': False}, + (0, '', '') + ) + ], + 'changed': True, + 'msg': "System successfully registered to 'None'." + } + ], # Test of registration using username, password and proxy options [ { From 3ca98c2edd2cd878b635b347c44b7de30b3522b7 Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Wed, 16 Jun 2021 13:58:09 -0400 Subject: [PATCH 0139/2828] callback_splunk - Add user-configurable event correlation id (#2790) * Initial commit * Adding changelog fragment * Updating batch description * Update plugins/callback/splunk.py Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../2790-callback_splunk-batch-option.yml | 3 +++ plugins/callback/splunk.py | 22 ++++++++++++++++++- tests/unit/plugins/callback/test_splunk.py | 10 +++++++-- 3 files changed, 32 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/2790-callback_splunk-batch-option.yml diff --git a/changelogs/fragments/2790-callback_splunk-batch-option.yml b/changelogs/fragments/2790-callback_splunk-batch-option.yml new file mode 100644 index 0000000000..70ee61ed64 --- /dev/null +++ b/changelogs/fragments/2790-callback_splunk-batch-option.yml @@ -0,0 +1,3 @@ +--- +minor_changes: + - splunk callback plugin - add ``batch`` option for user-configurable correlation ID's (https://github.com/ansible-collections/community.general/issues/2790). diff --git a/plugins/callback/splunk.py b/plugins/callback/splunk.py index f782161765..cb63d3b23f 100644 --- a/plugins/callback/splunk.py +++ b/plugins/callback/splunk.py @@ -68,6 +68,16 @@ DOCUMENTATION = ''' type: bool default: false version_added: 2.0.0 + batch: + description: + - Correlation ID which can be set across multiple playbook executions. + env: + - name: SPLUNK_BATCH + ini: + - section: callback_splunk + key: batch + type: str + version_added: 3.3.0 ''' EXAMPLES = ''' @@ -107,7 +117,7 @@ class SplunkHTTPCollectorSource(object): self.ip_address = socket.gethostbyname(socket.gethostname()) self.user = getpass.getuser() - def send_event(self, url, authtoken, validate_certs, include_milliseconds, state, result, runtime): + def send_event(self, url, authtoken, validate_certs, include_milliseconds, batch, state, result, runtime): if result._task_fields['args'].get('_ansible_check_mode') is True: self.ansible_check_mode = True @@ -126,6 +136,8 @@ class SplunkHTTPCollectorSource(object): data = {} data['uuid'] = result._task._uuid data['session'] = self.session + if batch is not None: + data['batch'] = batch data['status'] = state if include_milliseconds: @@ -175,6 +187,7 @@ class CallbackModule(CallbackBase): self.authtoken = None self.validate_certs = None self.include_milliseconds = None + self.batch = None self.splunk = SplunkHTTPCollectorSource() def _runtime(self, result): @@ -212,6 +225,8 @@ class CallbackModule(CallbackBase): self.include_milliseconds = self.get_option('include_milliseconds') + self.batch = self.get_option('batch') + def v2_playbook_on_start(self, playbook): self.splunk.ansible_playbook = basename(playbook._file_name) @@ -227,6 +242,7 @@ class CallbackModule(CallbackBase): self.authtoken, self.validate_certs, self.include_milliseconds, + self.batch, 'OK', result, self._runtime(result) @@ -238,6 +254,7 @@ class CallbackModule(CallbackBase): self.authtoken, self.validate_certs, self.include_milliseconds, + self.batch, 'SKIPPED', result, self._runtime(result) @@ -249,6 +266,7 @@ class CallbackModule(CallbackBase): self.authtoken, self.validate_certs, self.include_milliseconds, + self.batch, 'FAILED', result, self._runtime(result) @@ -260,6 +278,7 @@ class CallbackModule(CallbackBase): self.authtoken, self.validate_certs, self.include_milliseconds, + self.batch, 'FAILED', result, self._runtime(result) @@ -271,6 +290,7 @@ class CallbackModule(CallbackBase): self.authtoken, self.validate_certs, self.include_milliseconds, + self.batch, 'UNREACHABLE', result, self._runtime(result) diff --git a/tests/unit/plugins/callback/test_splunk.py b/tests/unit/plugins/callback/test_splunk.py index df4db38d56..3230228da1 100644 --- a/tests/unit/plugins/callback/test_splunk.py +++ b/tests/unit/plugins/callback/test_splunk.py @@ -43,7 +43,10 @@ class TestSplunkClient(unittest.TestCase): mock_datetime.utcnow.return_value = datetime(2020, 12, 1) result = TaskResult(host=self.mock_host, task=self.mock_task, return_data={}, task_fields=self.task_fields) - self.splunk.send_event(url='endpoint', authtoken='token', validate_certs=False, include_milliseconds=True, state='OK', result=result, runtime=100) + self.splunk.send_event( + url='endpoint', authtoken='token', validate_certs=False, include_milliseconds=True, + batch="abcefghi-1234-5678-9012-abcdefghijkl", state='OK', result=result, runtime=100 + ) args, kwargs = open_url_mock.call_args sent_data = json.loads(args[1]) @@ -58,7 +61,10 @@ class TestSplunkClient(unittest.TestCase): mock_datetime.utcnow.return_value = datetime(2020, 12, 1) result = TaskResult(host=self.mock_host, task=self.mock_task, return_data={}, task_fields=self.task_fields) - self.splunk.send_event(url='endpoint', authtoken='token', validate_certs=False, include_milliseconds=False, state='OK', result=result, runtime=100) + self.splunk.send_event( + url='endpoint', authtoken='token', validate_certs=False, include_milliseconds=False, + batch="abcefghi-1234-5678-9012-abcdefghijkl", state='OK', result=result, runtime=100 + ) args, kwargs = open_url_mock.call_args sent_data = json.loads(args[1]) From 4a47d121aa135f246b0ec6c0072f5eaaa532cad5 Mon Sep 17 00:00:00 2001 From: Abhijeet Kasurde Date: Thu, 17 Jun 2021 00:53:54 +0530 Subject: [PATCH 0140/2828] pamd: Add a note in docs about authselect profiles (#2815) pamd module does not handle or modify authselect profiles which are basically template files for authselect. The autheselect generates pam.d files from these profiles. Fixes: #1954 Signed-off-by: Abhijeet Kasurde --- plugins/modules/system/pamd.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/plugins/modules/system/pamd.py b/plugins/modules/system/pamd.py index 45f0082693..39b3f32e44 100644 --- a/plugins/modules/system/pamd.py +++ b/plugins/modules/system/pamd.py @@ -16,7 +16,9 @@ short_description: Manage PAM Modules description: - Edit PAM service's type, control, module path and module arguments. - In order for a PAM rule to be modified, the type, control and - module_path must match an existing rule. See man(5) pam.d for details. + module_path must match an existing rule. See man(5) pam.d for details. +notes: + - This module does not handle authselect profiles. options: name: description: From 3997d5fcc8a548e96ca1d6f2a6bddf7f8b8fd655 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 17 Jun 2021 18:00:49 +1200 Subject: [PATCH 0141/2828] flatpak - allow to add/remove multiple flatpaks at once (#2521) * reviving flatpack PR * added changelog fragment * adjusted integration tests per PR * adjusted examples to use the full name of the module * Use new local artifacts. * Re-add StrictVersion import. * Try to clean up PR. * ... * Use original name in installed/not installed list. * More fixes. * Work around flatpak bug. * Fix bug I introduced. Co-authored-by: Felix Fontein --- changelogs/fragments/2521-flatpak-list.yml | 2 + plugins/modules/packaging/os/flatpak.py | 107 ++++++++----- .../targets/flatpak/tasks/setup.yml | 4 +- .../targets/flatpak/tasks/test.yml | 143 ++++++++++++++++++ .../setup_flatpak_remote/create-repo.sh | 2 +- .../setup_flatpak_remote/files/repo.tar.xz | Bin 6436 -> 7352 bytes 6 files changed, 220 insertions(+), 38 deletions(-) create mode 100644 changelogs/fragments/2521-flatpak-list.yml diff --git a/changelogs/fragments/2521-flatpak-list.yml b/changelogs/fragments/2521-flatpak-list.yml new file mode 100644 index 0000000000..e30607b306 --- /dev/null +++ b/changelogs/fragments/2521-flatpak-list.yml @@ -0,0 +1,2 @@ +minor_changes: +- flatpak - allows installing or uninstalling a list of packages (https://github.com/ansible-collections/community.general/pull/2521). diff --git a/plugins/modules/packaging/os/flatpak.py b/plugins/modules/packaging/os/flatpak.py index 4a9e214fde..7f3963ad3e 100644 --- a/plugins/modules/packaging/os/flatpak.py +++ b/plugins/modules/packaging/os/flatpak.py @@ -38,7 +38,8 @@ options: default: system name: description: - - The name of the flatpak to manage. + - The name of the flatpak to manage. To operate on several packages this + can accept a list of packages. - When used with I(state=present), I(name) can be specified as a URL to a C(flatpakref) file or the unique reverse DNS name that identifies a flatpak. - Both C(https://) and C(http://) URLs are supported. @@ -50,7 +51,8 @@ options: installed flatpak based on the name of the flatpakref to remove it. However, there is no guarantee that the names of the flatpakref file and the reverse DNS name of the installed flatpak do match. - type: str + type: list + elements: str required: true no_dependencies: description: @@ -101,10 +103,25 @@ EXAMPLES = r''' state: present remote: gnome +- name: Install multiple packages + community.general.flatpak: + name: + - org.gimp.GIMP + - org.inkscape.Inkscape + - org.mozilla.firefox + - name: Remove the gedit flatpak community.general.flatpak: name: org.gnome.gedit state: absent + +- name: Remove multiple packages + community.general.flatpak: + name: + - org.gimp.GIMP + - org.inkscape.Inkscape + - org.mozilla.firefox + state: absent ''' RETURN = r''' @@ -143,47 +160,64 @@ from ansible.module_utils.basic import AnsibleModule OUTDATED_FLATPAK_VERSION_ERROR_MESSAGE = "Unknown option --columns=application" -def install_flat(module, binary, remote, name, method, no_dependencies): - """Add a new flatpak.""" +def install_flat(module, binary, remote, names, method, no_dependencies): + """Add new flatpaks.""" global result + uri_names = [] + id_names = [] + for name in names: + if name.startswith('http://') or name.startswith('https://'): + uri_names.append(name) + else: + id_names.append(name) + base_command = [binary, "install", "--{0}".format(method)] + flatpak_version = _flatpak_version(module, binary) + if StrictVersion(flatpak_version) < StrictVersion('1.1.3'): + base_command += ["-y"] + else: + base_command += ["--noninteractive"] + if no_dependencies: + base_command += ["--no-deps"] + if uri_names: + command = base_command + uri_names + _flatpak_command(module, module.check_mode, command) + if id_names: + command = base_command + [remote] + id_names + _flatpak_command(module, module.check_mode, command) + result['changed'] = True + + +def uninstall_flat(module, binary, names, method): + """Remove existing flatpaks.""" + global result + installed_flat_names = [ + _match_installed_flat_name(module, binary, name, method) + for name in names + ] + command = [binary, "uninstall"] flatpak_version = _flatpak_version(module, binary) - command = [binary, "install", "--{0}".format(method)] if StrictVersion(flatpak_version) < StrictVersion('1.1.3'): command += ["-y"] else: command += ["--noninteractive"] - if no_dependencies: - command += ["--no-deps"] - if name.startswith('http://') or name.startswith('https://'): - command += [name] - else: - command += [remote, name] + command += ["--{0}".format(method)] + installed_flat_names _flatpak_command(module, module.check_mode, command) result['changed'] = True -def uninstall_flat(module, binary, name, method): - """Remove an existing flatpak.""" - global result - flatpak_version = _flatpak_version(module, binary) - if StrictVersion(flatpak_version) < StrictVersion('1.1.3'): - noninteractive_arg = "-y" - else: - noninteractive_arg = "--noninteractive" - installed_flat_name = _match_installed_flat_name(module, binary, name, method) - command = [binary, "uninstall", "--{0}".format(method), noninteractive_arg, name] - _flatpak_command(module, module.check_mode, command) - result['changed'] = True - - -def flatpak_exists(module, binary, name, method): - """Check if the flatpak is installed.""" +def flatpak_exists(module, binary, names, method): + """Check if the flatpaks are installed.""" command = [binary, "list", "--{0}".format(method), "--app"] output = _flatpak_command(module, False, command) - name = _parse_flatpak_name(name).lower() - if name in output.lower(): - return True - return False + installed = [] + not_installed = [] + for name in names: + parsed_name = _parse_flatpak_name(name).lower() + if parsed_name in output.lower(): + installed.append(name) + else: + not_installed.append(name) + return installed, not_installed def _match_installed_flat_name(module, binary, name, method): @@ -266,7 +300,7 @@ def main(): # This module supports check mode module = AnsibleModule( argument_spec=dict( - name=dict(type='str', required=True), + name=dict(type='list', elements='str', required=True), remote=dict(type='str', default='flathub'), method=dict(type='str', default='system', choices=['user', 'system']), @@ -295,10 +329,11 @@ def main(): if not binary: module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result) - if state == 'present' and not flatpak_exists(module, binary, name, method): - install_flat(module, binary, remote, name, method, no_dependencies) - elif state == 'absent' and flatpak_exists(module, binary, name, method): - uninstall_flat(module, binary, name, method) + installed, not_installed = flatpak_exists(module, binary, name, method) + if state == 'present' and not_installed: + install_flat(module, binary, remote, not_installed, method, no_dependencies) + elif state == 'absent' and installed: + uninstall_flat(module, binary, installed, method) module.exit_json(**result) diff --git a/tests/integration/targets/flatpak/tasks/setup.yml b/tests/integration/targets/flatpak/tasks/setup.yml index 98b07cd480..8fc0a23566 100644 --- a/tests/integration/targets/flatpak/tasks/setup.yml +++ b/tests/integration/targets/flatpak/tasks/setup.yml @@ -36,7 +36,9 @@ - name: Remove (if necessary) flatpak for testing check mode on absent flatpak flatpak: - name: com.dummy.App1 + name: + - com.dummy.App1 + - com.dummy.App3 remote: dummy-remote state: absent no_dependencies: true diff --git a/tests/integration/targets/flatpak/tasks/test.yml b/tests/integration/targets/flatpak/tasks/test.yml index 7442e4b468..e1bfdbee09 100644 --- a/tests/integration/targets/flatpak/tasks/test.yml +++ b/tests/integration/targets/flatpak/tasks/test.yml @@ -139,3 +139,146 @@ that: - double_url_removal_result is not changed msg: "state=absent with url as name shall not do anything when flatpak is not present" + +- name: Make sure flatpak is really gone - {{ method }} + flatpak: + name: com.dummy.App1 + state: absent + method: "{{ method }}" + no_dependencies: true + +# state=present with list of packages + +- name: Test addition with list - {{ method }} + flatpak: + name: + - com.dummy.App1 + - http://127.0.0.1:8000/repo/com.dummy.App2.flatpakref + remote: dummy-remote + state: present + method: "{{ method }}" + no_dependencies: true + register: addition_result + +- name: Verify addition with list test result - {{ method }} + assert: + that: + - addition_result is changed + msg: "state=present shall add flatpak when absent" + +- name: Test idempotency of addition with list - {{ method }} + flatpak: + name: + - com.dummy.App1 + - http://127.0.0.1:8000/repo/com.dummy.App2.flatpakref + remote: dummy-remote + state: present + method: "{{ method }}" + no_dependencies: true + register: double_addition_result + +- name: Verify idempotency of addition with list test result - {{ method }} + assert: + that: + - double_addition_result is not changed + msg: "state=present shall not do anything when flatpak is already present" + +- name: Test addition with list partially installed - {{ method }} + flatpak: + name: + - com.dummy.App1 + - http://127.0.0.1:8000/repo/com.dummy.App2.flatpakref + - com.dummy.App3 + remote: dummy-remote + state: present + method: "{{ method }}" + no_dependencies: true + register: addition_result + +- name: Verify addition with list partially installed test result - {{ method }} + assert: + that: + - addition_result is changed + msg: "state=present shall add flatpak when absent" + +- name: Test idempotency of addition with list partially installed - {{ method }} + flatpak: + name: + - com.dummy.App1 + - http://127.0.0.1:8000/repo/com.dummy.App2.flatpakref + - com.dummy.App3 + remote: dummy-remote + state: present + method: "{{ method }}" + no_dependencies: true + register: double_addition_result + +- name: Verify idempotency of addition with list partially installed test result - {{ method }} + assert: + that: + - double_addition_result is not changed + msg: "state=present shall not do anything when flatpak is already present" + +# state=absent with list of packages + +- name: Test removal with list - {{ method }} + flatpak: + name: + - com.dummy.App1 + - com.dummy.App2 + state: absent + method: "{{ method }}" + register: removal_result + +- name: Verify removal with list test result - {{ method }} + assert: + that: + - removal_result is changed + msg: "state=absent shall remove flatpak when present" + +- name: Test idempotency of removal with list - {{ method }} + flatpak: + name: + - com.dummy.App1 + - com.dummy.App2 + state: absent + method: "{{ method }}" + register: double_removal_result + +- name: Verify idempotency of removal with list test result - {{ method }} + assert: + that: + - double_removal_result is not changed + msg: "state=absent shall not do anything when flatpak is not present" + +- name: Test removal with list partially removed - {{ method }} + flatpak: + name: + - com.dummy.App1 + - com.dummy.App2 + - com.dummy.App3 + state: absent + method: "{{ method }}" + register: removal_result + +- name: Verify removal with list partially removed test result - {{ method }} + assert: + that: + - removal_result is changed + msg: "state=absent shall remove flatpak when present" + +- name: Test idempotency of removal with list partially removed - {{ method }} + flatpak: + name: + - com.dummy.App1 + - com.dummy.App2 + - com.dummy.App3 + state: absent + method: "{{ method }}" + register: double_removal_result + +- name: Verify idempotency of removal with list partially removed test result - {{ method }} + assert: + that: + - double_removal_result is not changed + msg: "state=absent shall not do anything when flatpak is not present" diff --git a/tests/integration/targets/setup_flatpak_remote/create-repo.sh b/tests/integration/targets/setup_flatpak_remote/create-repo.sh index 4ece76ccfc..3f44fe96f2 100755 --- a/tests/integration/targets/setup_flatpak_remote/create-repo.sh +++ b/tests/integration/targets/setup_flatpak_remote/create-repo.sh @@ -18,7 +18,7 @@ flatpak install -y --system flathub org.freedesktop.Platform//1.6 org.freedeskto # Add individual flatpaks echo $'#!/bin/sh\necho hello world' > hello.sh -for NUM in 1 2; do +for NUM in 1 2 3; do flatpak build-init appdir${NUM} com.dummy.App${NUM} org.freedesktop.Sdk org.freedesktop.Platform 1.6; flatpak build appdir${NUM} mkdir /app/bin; flatpak build appdir${NUM} install --mode=750 hello.sh /app/bin; diff --git a/tests/integration/targets/setup_flatpak_remote/files/repo.tar.xz b/tests/integration/targets/setup_flatpak_remote/files/repo.tar.xz index bed20ff713f57730bc667b75d72a8560df22b636..609acaad7ba44e1b9b836ce80e528b9c5cab9a2c 100644 GIT binary patch literal 7352 zcmV;p97p5*H+ooF000E$*0e?f03iVu0001VFXf}*Xa5{_T>v>5N@un}Hi1L#y~)Z- z{84!W3f)Esb7jq5>g{oDum66E2a4MLZkl)>lqx^in)Ih^T8CUfhB1~dt?%8H8XP^b z9h}A(U~MNWX1tu~C*!)MKp3W*&!WAT31)mB@{V*6JF>PF)YRv6O>LqTQv3!Z!el2{ zaQkzYAXd|8_KjmqEG}{O=#c`t67-10;#q~d!y&%ubUW3y|2=?y&56W_MAv|yDz;Rd z^%k|Lh4klnN^;MO=Po#xICgfHurk}B)Lf6}XPjtoXwS>Va1iDxkoR>a6dFJ#zBWRZ(R8 zP;Aq?st;i6a8HF5&}{*B52sEo1Eu=TKeOGJwO$u*ICnD7y>I<|%6NLMp+{FTvMW$D zwWISRfH54$H3pm6!B$OSVYDigbV3bWcbe&>-y~3+K^RQl(D0H@N-z&czE;DX$*cS@ z^{IqzR8itz-ipygilAjij!S2`joYHBL$@YZv~YNqHGiYehh0`Rhi1kWeGs#Sjo_Sm z&JM#^1fh}nmtI%TRc`fL;$HOFFo^^XaLf_tG6>-0?9_V?M3quPrV;wd<@TmBsS>w3 ztO=oa&B{1E-0V^OXG?y{&=M%sz8kxmJL`*<1mA;dCrCY6mvkKy1P^udVdYMI_p~GI zFNkn-SZh7USmfN~LvytE)l~u|%ZrX#!F-iUcb8WD&HD`)6C`0=XMlo6t%~az0)X+Jn_l zR+7HyiTq#;OEHk!$oCX{Gyu;-2_e|j0`0OKHvc!$USU^}Dzp$!Qu=jC1I(<$TLG5m zr``M+6Y`1Y^`NrND-%wxp|Ll*?m3#zh&~IfKelv}oNm-Dq6F2Wkz0(z=j;tLhLffL z!%bD-JOZz8;KKbH(ylcvA@Vg}XR$xLK)L155Vvn|gW{5E@KT{qW5|G?iOLvmz8-?( zRSV{0BF$1fAf_amI2uL;hw$yT#z(>1*Q4}$tnv}YwonwogkEK+`hIQFEC7W5+IC4D zj#oD4+bW)ZAY>fGHzMf-MR_FReC8CRJ5*u!rEG6b&3UXqbq#|ujRi21P1X7W$M zt#|5Y0?&Uhj;=fRYb7Q{EPVt|r#Hgc_xat1nYavN!tUiYJB&XjzlY#mku=y;Ns}Y% z8H`zm3cHW)E{P<0gTW+(h+c$79;i9Kl`7%AZPpeL8Vu;V?X9_;5l7kxRikoBoKIc1 zyT%CI^BLHFFEp?!giDr zWP!ua64y_HS02||S{&Y28hV$Ugm>P3rw0;tvT28JdIpON8YlNhSPFS5muY<>%t(O3 z&@3Ku`!_gna+Tp|xG%zW9HWTz@YVMi-IfI4MSk-U%Fl=Q8iS0C#q@ot_w)TZp3B)L z?bGB@Y%?n_Ert!~$<7#dAxY=VAVhEzQjnQjJmM%PnN2xgwJdnmgt)h*qvLPaYlSb| z$M89;(bo~;@^duZ9^h(eZ0>XL=+R3O)7FzxcD`k?Q1&YQc>bB*yI^ zm2=|p$h@I^LRDM zG}Y`F(MBjtbuvLz?E?^ZzVo_52!@K`pz-qPc~37H^c;BoF23%>AnqJuK<6 zs2i(<=?QkvLPb|?Y2i%KSbD~}45DwwSfpVtrrV3vfJ6?b+qOklP}`yJ3=}RiwLOa% z>G$sfhLqkCg*2cbV1hTux0p52CKuY5hYB+pDzTrOM2qws(-DyDMIISPt1bV6s5lEQ z;rZUFjm9M+#mtuir0*6Sf4-?xH*8y`vP@r2dtZT=<*;qdZDtFu)3diJSkrv!L<^uqdi7|_6o%p4tG zXJh+2oqVjLQ$5T%3KzzGo37vQoUD4wL>K;QmXDSTrpn3Y=-y?-pOwU$tEbF{gV05I zR1Wj@*^O@V!^O_=XD8ac#O%u^t66|v{4-VY_I*Tlx{J~zeDq;ku~GP%M+q;JozdiJ zN^oFM{^#Sg1Cc`}*A&u;_CasTqlvpdyKLyI5DziPsHlm=o5b4u_;)Vn01!U@wj8p5 zW0iL8<|_xDPb{dc&C-^NP$Fz^mQe$qmd^&ALohcR@!@)sv@ii*FJx{s$Lk8qhA0n= zD}X3&{Sf-^Lnp`|S)urYM0dB6dQqN`JmaKxf2Z2e^@06}?Sp^2Iz$&gcMxfu(1;B* zz%`7yYcbZoQ7UndwM++qqM}8Z18C{NtayqFhpC4+(99?&XAGkuuXgk4z)5L+4T$UQU`4GNZT7Cm)1#hn_AH(_>z zR1A7gV2kgO!u#=U!xmeXSwke8qBK|!1R%Oz@utXGg^$^kbTE1EQ`id2OTX|M-#=t& z54ZB2fnpN&*~~^aU()sUefbr29Yar2F`8}z<5}6^VGNu;nI9;t zyG#QCL09o%6&}<0M@Rj9YU1y2ftoFyC)Z-jUsqM~+(dBt-yXA5JOnxp>3*bvEp%xl zfnv4?)?64Wb7S>hD{MXU@^5dDjfoEN;5V-n%6cio@P(q-PDxdbT_uzmGGe-(Dn5dz zHp-t@54vP=l&fv4CuJw}1E!0(wtfSpUKdWJ1@DPK3TedNk>p3*HuO=Y9RBTXX}_#X z3y{-CQL+eueNusB%niu{aN@yhH;k*1BQpxJ+$1}LfoN5&oWI{GHK4R(+!6@};p{M> zRk1Lg*fusH4$e%Eud?U78%zNn#|HxGvihEY9ZL%I2R#xy52=V$61B%knsM%jX@|XP z&`oLe$}5=Fwsqre)Cbu=+b9mW6H^ob_9j_#l)bE3%_Ca_IT5y@Gasf#HZ(2D*QbDr zL@q2NM~_YL(uq>QSd%i{Ive^$^{&+;CePN2)|(8M)ar136t7d^5Qh#-?0}Q*=RpB{ zm8w{{6wa@u|LjJooulS{$1a`;QYhY!fVmJNq&|ajGv||qSF4-F+%#XSvVn*P{8tX3 zZdxD3I98d#+1|zMe1TJ=1q>GN=ahFOyJLvZLwk7t!6i{ohTu!io)pFb+>$a;f%aS$ z2Xgp+=yK7=K56D|j8LDWj#OPS@i-oEm}IqFd6^tCumMDRoQ0co=pTG@BmXh{-K11} zjFtyrVNh)e;ip{HBTmb@y1My4g+=p3l{q`&|H>wrc#mM>f~%Y>oaDP4P(M8NmAPwI z0#3@TgP63QQ|tI~E5$q7XN22qkAYec!tVmA#*XR)HTYV>y?Y)?Mtk+_S^ZU%$8}GX z=@{b*lPRj#*p!v6gDPze2v2d)KKztATB1sjOr1R_ z_2OHQ^=SgojOGQunM3P1uuK{9lK{i$q=O_y{c8_F3Yq0~Gx^hR@?A-k_Hh1&b8Cmy zx6zqp zV+q&<&tIV&;;6CMOl*+Nj34MJj5F-;CO~B--HiD43tSUm`?P-=MFNDZMM+Uq;CgPP zkn6}R@Cs$huSnNOhC<;0-DexjSz4Cv`Ub0kOkkNs_e;ui{Wuh4u^Yv z2ui{Y6wT@s7)D{Cw?M1GgIK3%U_nqWdSLFZn49q+FlSl*1%zY(OE3s`UdA;s0ai;oW(%q6o zb(VE#CuPo^JQuJB4N9sxTjgpgT}kmukm*XX{;rHSH4^*+?ob_ianb88N{;8$idHql z4D#|3=$E>zVY-W3UFde4&g|a+(oKgG&5_sE&zlIT-${^k-J4CXTE$Pd9IJ+H@~Ffr ztYhPv@n=3gNjFjdR`@~X8IZyym z>sd_Q?4~47xja`V7)l}m#XGQ<^CE_1I95bp53}e&pucRM?RgQ)?oqjwk zYPKMT8j}~4lS;ML4n;OcGtt`ivImCTfg3E1m>8nL)M(DzU_*w^!S|L2wsA2R_3|;e zrh+x&v-AdEnx9MekDIX4=uoUO*Uw3hkd=G7QI{`SOx3k#0Z43(oAbpi_`AR}+`F`Z zd?mJhBQH^UBc@yk5)7uOXB)3v-UFZQTZUTcEDD-oss`|BJs*N~w=%+gz0xiD=L@;q zRwjLSw`4rs5>Ua(n@<=%2^OcM`0BD)2^tNVm__+jv%zNlg6;IaQi2yN_ZQmL6saLn z8G}q;)Ut8q?eB~U*X}otar0`PUpDN#T@g<@OWs>>$Kd-N^!Q)EQ&c85ST8~JC2}M= zWkltj0dbI3gbFim4Pe$-rx_M+bC8M+F~IbaFHfIK*hi7BxbS2YWEqPhnb365u> zC!><30uTdnDXFK{cYu}>)3n3GB<)BV&l;ML1PHqpvepQU14Sdy-Y?XP>g0--lUFt# z1=l(9Sql3CfO@Brr~VV<9oGl=@wGuRE1XTT7M>%>LVfot66Tgi%RFU|5t;BNA2@_ zeiTUGAd1IC?{59S)mjeKV@0VFU1}%z6N@7j z5l_5%ZRuhQDccN51Ithv*Nb`$SbVecdDj;)Y)7?wnV(q(&Bixjz_ewV385F`&T8Of z_q}A}y_Gzth~k6d{Cf=w&2NFW>D~ahPgffNS5{<8sSVyGwLtiArx6`8_j7%h-A4xt zd{bTZGROi|@dC+0mm7d_CVeH^(Qg5zV1}bPEF3aR?P2klvk}19JRJmXH@-+d>)%H-xG%HSL{xK5GI{D$=UR`=J z<}VDd_B7Qyt@=|cR9i1*sMyfp!^z)cU}5qdc^{C~^{r6`C-<&pN_<#AN_LfaC5WJ{ zcS&D0yymz|cr$3f&rp4U1XZVYW-VrR3&fWpfnI9T(z~NY?oJRzX^FjOEKEfa$z7%> zq?T*cD}*yT$XIneqnTN$r z@bX^_c51Lb;FVM|XeBOnlL;jgecM0?_T*_#s%)TAtMm#LG|Tvr}-2(KS<6RC{7d@wJfoq>Iq zM)hAg%_|WzO{76llRz;uUU=dTuyXUA;mW?(1@n(5V;Q|T9bB}lkUz^7zslE**gFYL z{ehFt6(%z)#T=wyKbJ%kdu9hIko@S|1%G2W@`f}Q`)Me2OzI%Mg`b9U>Z3BdpA%7q?8XA%3pNc4r`X$yxEV zb85bx@UyR89#lqtRC_ zx5&)wpP$r z%XWSpJy85W{hg*`b|KL$Vt3ru)ibv;Yz3TUImNp~-;C1B2Tjuf)32lgbUt*yvjIM( zv$?;Fo-;!jCT;zd8-!e?;ajO;;wfCuD5gyu@PA0vl%G90JDye%@hfv_k4P2yFJEQL zd#0Xr6Qbb4#h%2t93n&5&U{kFq+Nm~>r%l=YuL!#WK zPY;xyAKVjO)~&dU0x~4jR>k3y1@2(jlD`gA#Mvn`Upr#SWpAj7w#iK>fkWsBq)Zk# zRW?WCc;i>XU>TBzO>AGA;8KXHX=Eeme(Zcp#D2<1a#7n(DD>KKt>Q^R zaXPYUH->=xo?{bckp&}#hm_q^W-V#Ofbl}V(Cj3DlzXOk-*tVf3nhljF}A|DPm?%6 zb3VXmXDd2r(z&Oa=t&HzRO|)sk4Cl;a{Q&^0dmk{6E&D@sKG}K3yW2&IUEKd$3*jG z{*Y?(x_kaCcu|u$zRbQxtc&VJmp|&IL{2$R_@wSr28Q6--awtv>5N@un}Hi1L#y~)Z- z{84!W3f)Esb7jq8xj(H~uV-ILw_hEK-tS2ml%=3W(9ZbK%*zGuyzw% zx|CRvE$)Z8hP(q}r5_|!pL6c&F%PC`(i85LvmX4tby$`<){6<0vLZ&gXzNHVZ&Xf~SL^u8sOHdo})6*qX{X~g@Rb%r$B zquEDB>&GQIE73m2JY`xc#+vdGzyT>afBQP{JpJd%k^#05fTyu|mQGF%YV6zMjsMHtkdFZgCC3Hu1hT#!3sP zlX%{M1-qoEJrl#7R+_{(He}QOx3|*uAuy=7BSbaUQ8?+uC3n_im4s73NG;MPAh=aV zsr!5fZ9@geCLM}Jn>G|r+q1urn&NgkY4kCkgSRk&Nj)-6mJ}r({(ADFv?Rj1Qi%V= z#?W3PY}yyhh)TAJTmea{WQz!$!kLBs#hrb!Oe&|_EO+32=p~I7Sqw@dyF6|0q@M8hmxVyd6gGOW|A*fgO zCjkQ-W<>L7srkC{ZDbazsy?xXWtl6Tn^-^s5r^=XB~c~PZoyai;ogn=Y?otN+g6?B zWT2F(AKl^h>@zuadxsdm1EXN(hxfSJYH8;e zz;C?|(!_t_gVJQO-YhxZ#DIbf^JItaMJEvG3=GNsV|Pda>WptsL5Ak0>x>89MY&QR zvxJw5)nrELLCY}g=4zkb23M+VcZJ$kRf>*B)7NH^5*~YC1&?h)!NDqJ#4h)y^7lSF zi0y|>Mb(4Bi+hVEtJNPzDGi5ePhs41$l@VJ6rX;uYD1>|^@C@*E|^CE22`*L6)9O( z?oddJ%D4`7)w*+JE5dme9)yRz6QUBv7seZjaaPl+=5^@LXrU2)SW!puqXN*k~< zxJ=^x0|xbva8VGz2sP|0OQ49+NDlq!uPRo^kf`w}9bO}}YYnnZfrnFS6OoqH0DR0Z z)13cHH?2TM5eIPv98c!~DoZUl=tf4~Ez+DZpFz9!hD8{t@ygq-u-`2F@8>dIx~3Pp z_~>w^QGN4mH_|yzkiIl29x4}JQ`?SIiDy8OIc$Rthg#B&sK;_kF(Hm(+xpjo$9{3} zf@XWNwLx*Vd_dj*OANsrRiQ-b!<{tKhioy&{7kMaBAL9ZYqlDm`$jG_&t~4py9Ya*Ge+y~Pv>HtJcFY8 zihplQ2s3yH>rf+Gfp*T`P}GI@BX&K=2Culc>vvXu$qsx^^i!;(p07mgthO5Vt3XwW z-S*i?CrwF}cE4bC?{sxVTM0iKA}878XK!_j}n zlK6Acn`k-ISyDh<>V?YujFd0=*QcORor3$IxiL^c^H>cZN#Ti~LF>Z*guR2b@FiEP zi%z<6o<4R@1mf4(89Y@fr2_iLU2H;6^ScvM6>{p5qy#U3#`+2b5+>I~Z1{Qcp68UB>zR!!-prXQw5y4( zgDKV1lMkGqQl!qMQYo**XhY34KJT4ex%fejcv zZtoQ=*~j`gYdvg;tQHVS^TBB^88ATLx!w{}b>CjXr6-Gk!0=mAp!C5o2jJHp%{A6Q z);jVS6UGe2k_eN$vMlN9BEzj$P+lss#h9gtB0|Slj-IbZAjy0vp+wvw`|l=$>$Db{ z-zX7JIZcLr+t_(b6|;jMi!T zolTrC91#&H^S!VZL5FS!4n!;Ov1TVn?=$9+44O#ga`2q`7k`0GT9BdFrCf(5QkIcBh1g_v`9#z=X}^u zEX3sA`gUnR{hP2ok2#U)b{&^yKRK|m35I1K2YzRc@F<7ISA)5&68BU@T`2SaIh0{Q zR+4P&P?>nXe-5Tu4ADqcQ8IXzze|bQdAZbeumC-v_asSQn; zcsa{!jf~)!@ApoXJ|eoTn#@sI%n_!eZYFJ?wwYK}&b4lJ3xGVH*ed!&CYPC&HO*{2 zx!&?sSwm+%<^c6=(?~CB>I7PY3&9n2}!JRmQS?&N4(`>5bCe#G>_!l-U zV=&BmtaM!@pCI6<7{y&03<(BUBPfg=}#=V1QjNh9r^D3ZQ~$ z-+d4(XSuVNC#N0n>`4rUY=>51W00YMceM5OJr_HgTTfSLEz{8iti&SNn>(EV$o9JW z!m(K@?NUx=&_SExn*qX~{m)clg3E3uI!yy-DPLdxN1DU*tpvKk!8glk*Hf;dQar=R z7on>L@+5L?e(v{&A8(jHcO7kAloZWS&3%0pW#C-Auw_GzDtBS?LYD*}^H6l(nT4DYJ9ve?9;u&7=<;D*FhwV`{oy33F!QH)y+h(Q_{^S__4O*`fbWV(A7 zWjUxL4a#hZr!puK*C~5l45>P9E67TlKz$7iUb0YsC|Yb zCnipl&)YZ90KN)Bxo#d<(iou(K{&J$GWS^<<=I+@__=Xt(A)>w+Yi}dKp8?N2KRwN z^gr}Vwq&VE4@VV%9@O>z>ERVLH36<>+mQ?%=aXkN*odx#Kq2}t^_A!}w#ln1zKPps zedEUYxuo@>=iu>96C9}yB zCbMidGlvUKENqLxrW2cg``-*3sZf3a$US)1zpgLentlC+dxuXcb#e0pH6eYbJF<(` z_(Kj?UXp$c-l`|EQjCV;n|z?PW2`l{DIOv3&U`!(tFpw!5fx^g(=@fBZq$tCk;a}L z?6uR0bKlS#MZXT{8{XUTd)j)acoP7%fg`4QNBm-F=w;VM(4%AF&^DR|Guo7@3417* z1~o&vueD=BWCKjHpR(`EZpGc4HGP_xU-J_pCsD>lqJCrA#QFIhAjjF}&De*Ukji|& zwq=!tF1#YjM2Dln$(9PGgQxvK<_3#XaRKjv3FymIvD{+O|B~>3*5T`aNVtD;A^}9v zPqTh15!MoN?6?KmUg5QY4u|}W6V^A$t!@H*8YtK*JFgf5r2_F4+dd-;oDv7lRl?)A zjlAgqldfV%)8HzC&Ph+uhd^-yLWe^l=?8a&12-Ume96LIJV&0ayQ zEn?2vlqGF3$U=z(-(fZ$tm|>X99kReTLQ8t)u5ltKiMhYq8cL7?Ik@{Sl;Br$a^m& zFD{ZgAv<5POi51>jj>s4sY#-jfDD-Va9FS>^It5_)V8d1)gZs&3zY4)KNF@3d2tn_ zbmXJ*`qQy!S2LEe(bB>G+Y1`&U>rh%dhH3#{MJds9$SwyNYE0n8;s&C=F3DAF85K` zyz~>{k4>dfFy$ks6~q}?sE$8PqQ%BnWg}@qS&AEVvnB5oYxlSxFU|)8D}Re8y3`ur zUlS`Jr|{LMxcP;BwzbI=?!2m`=czdR!Gm*bZEG<;C*@u4)R_SOdzt}!#;0QU?GrWx z(Lv@(8um}*&n6Nas;|`H<~RFk{-`kFbxRuUy$aZ&=aIgMt*6Kx-_=&v6KPfrN&YcP z(ONj_*Q`9$^OcNv!)XRPW5)@JGiz>U2@9tp%aOVi5tfG;a(IT#T5aPIfsmM*BGw{3 z&>-cf;$BH$4La$#zC^ue#8eHZR}=l#%%=4|27{Roc&molVxkR1#f3crno(y59e7uY z;wGqG6(iv~UZT5Ck`J5l8bJv_bG{!(#C5rQyMgXIyVxxf)|HK zoh#K!_$D}6sygpkNZEj5Yy29c$dn03ZPXxSymbD0d@bNhq$4rvQ7=E@=g*}lpY3Ta z6DUhSvmpHVX~4A(|18}-FcAbQq=Et>e7pA(_V9tnXWQ#I4j{VFdF+cVXmQ#RE2bhV z;BUI6f#|8(?6$dxAB!|kOk}CvDP9uw{7OPr79RE6sfkBIqxl$>HUYW|X%H*0n>Z+x z$JeHb2AYZcPy9l7m|1V1i+YKu=rfEwX#rzJfs6&|WJZ!{1m&it#Y%K{3HW3b^2BAu zDxJD3_N=RGd-#Bcw9<(A4TluQX}beZ&4r?ZQ&f~b&ajHscRaFabd#BM5-vR$G{Tnm zakJ|iprJoO7nSuf*Jz4G7Ajx?kstaIqc^RXq=3k!8`QM>WD~YZUs_(<4iGAK0Gcp1 z*FsNFE#=b)cy+7F1D$*S57pxj4-|)vDN*O@8{UaYm9oJZO+;)GzF-U_wmGNGM`)4x zVC{Cq2QVh6mp}>&c>KRZym}qc{$QmdbC7MmVj~b34u%DbOycuRMi~Sz{I$=rNQv$y zDo5s_sUl8K2jV6fnXewZ(7Yeui~8!5peW@sCh5WYXG(eA@g)mpJK1vin?qY($3{n& zdyv10S$%^5I7L8{v6v@p&bt7X@7D#zHePiI_Hp(dIRfD^cR}4cY~IRq>>^{lVl|E zUt+k`p7s9=R+ji-(kp16*(WHvE>e0Oi{E5VRphpC?mL3_t*M5M1eiCBJ%Nk%cvO@7 z-UgmF$$xoxz94TH_m)K- zW>Nq`uil4P5J{Ir2+cL1V}w&1@BzO7B4L<@9JnPDz|u@4bb1C`^-ydjrW>}Kc8trq-Suw90+K&UIaW>GLwj@XbzXWmCp zn;kGaR8DThc(5iNBB^tN$`mHytB#qUu)ktV(B}87t6C%zrvid%=;)B_+f#Bkhed0k zb@OPvh^2Yp!h@4+C9s2zGobnbcGKYp5?!OH805IoPKw>nwhg;x4NJJ0e~`qumk|irf*8I#Ao{g000001X)@i7;SF= From 13ab8f412df668f9163f2c7c1d3a41e788b9ce33 Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Thu, 17 Jun 2021 14:17:28 +0430 Subject: [PATCH 0142/2828] Rename test_pr_quick_start_guide.rst to test_pr_locally_guide.rst (#2823) --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5a068f9414..4dfde91fca 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -31,6 +31,6 @@ You can also read [our Quick-start development guide](https://github.com/ansible ## Test pull requests -If you want to test a PR locally, refer to [our testing guide](https://github.com/ansible/community-docs/blob/main/test_pr_quick_start_guide.rst) for instructions on how do it quickly. +If you want to test a PR locally, refer to [our testing guide](https://github.com/ansible/community-docs/blob/main/test_pr_locally_guide.rst) for instructions on how do it quickly. If you find any inconsistencies or places in this document which can be improved, feel free to raise an issue or pull request to fix it. From e9f3455b623cac390ae56c31fe0c623bb0178f7c Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Thu, 17 Jun 2021 12:48:39 +0300 Subject: [PATCH 0143/2828] Update README (#2802) * Update README * Update README.md Co-authored-by: Amin Vakil * Change * Fix * Update README.md Co-authored-by: Felix Fontein * Update README.md Co-authored-by: Felix Fontein * Fix * Fix * Fix * Fix * Fix * Fix * Fix * Update README.md Co-authored-by: Felix Fontein Co-authored-by: Amin Vakil Co-authored-by: Felix Fontein --- README.md | 47 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 32 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index a874a3e929..6f13fe150c 100644 --- a/README.md +++ b/README.md @@ -3,12 +3,18 @@ [![Build Status](https://dev.azure.com/ansible/community.general/_apis/build/status/CI?branchName=main)](https://dev.azure.com/ansible/community.general/_build?definitionId=31) [![Codecov](https://img.shields.io/codecov/c/github/ansible-collections/community.general)](https://codecov.io/gh/ansible-collections/community.general) -This repo contains the `community.general` Ansible Collection. The collection includes many modules and plugins supported by Ansible community which are not part of more specialized community collections. +This repository contains the `community.general` Ansible Collection. The collection is a part of the Ansible package and includes many modules and plugins supported by Ansible community which are not part of more specialized community collections. You can find [documentation for this collection on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/). Please note that this collection does **not** support Windows targets. Only connection plugins included in this collection might support Windows targets, and will explicitly mention that in their documentation if they do so. +## Code of Conduct + +We follow [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) in all our interactions within this project. + +If you encounter abusive behavior violating the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html), please refer to the [policy violations](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html#policy-violations) section of the Code of Conduct for information on how to raise a complaint. + ## Tested with Ansible Tested with the current Ansible 2.9, ansible-base 2.10 and ansible-core 2.11 releases and the current development version of ansible-core. Ansible versions before 2.9.10 are not supported. @@ -23,7 +29,9 @@ Please check the included content on the [Ansible Galaxy page for this collectio ## Using this collection -Before using the General community collection, you need to install the collection with the `ansible-galaxy` CLI: +This collection is shipped with the Ansible package. So if you have it installed, no more action is required. + +If you have a minimal installation (only Ansible Core installed) or you want to use the latest version of the collection along with the whole Ansible package, you need to install the collection from [Ansible Galaxy](https://galaxy.ansible.com/community/general) manually with the `ansible-galaxy` command-line tool: ansible-galaxy collection install community.general @@ -34,19 +42,29 @@ collections: - name: community.general ``` +Note that if you install the collection manually, it will not be upgraded automatically when you upgrade the Ansible package. To upgrade the collection to the latest available version, run the following command: + +```bash +ansible-galaxy collection install community.general --upgrade +``` + +You can also install a specific version of the collection, for example, if you need to downgrade when something is broken in the latest version (please report an issue in this repository). Use the following syntax where `X.Y.Z` can be any [available version](https://galaxy.ansible.com/community/general): + +```bash +ansible-galaxy collection install community.general:==X.Y.Z +``` + See [Ansible Using collections](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html) for more details. ## Contributing to this collection -If you want to develop new content for this collection or improve what is already here, the easiest way to work on the collection is to clone it into one of the configured [`COLLECTIONS_PATH`](https://docs.ansible.com/ansible/latest/reference_appendices/config.html#collections-paths), and work on it there. +The content of this collection is made by good people like you, a community of individuals collaborating on making the world better through developing automation software. -For example, if you are working in the `~/dev` directory: +All types of contributions are very welcome. -``` -cd ~/dev -git clone git@github.com:ansible-collections/community.general.git collections/ansible_collections/community/general -export COLLECTIONS_PATH=$(pwd)/collections:$COLLECTIONS_PATH -``` +You don't know how to start? Refer to our [contribution guide](https://github.com/ansible-collections/community.general/blob/main/CONTRIBUTING.md)! + +The current maintainers are listed in the [commit-rights.md](https://github.com/ansible-collections/community.general/blob/main/commit-rights.md#people) file. If you have questions or need help, feel free to mention them in the proposals. You can find more information in the [developer guide for collections](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections), and in the [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html). @@ -58,16 +76,15 @@ See [here](https://docs.ansible.com/ansible/devel/dev_guide/developing_collectio ### Communication -We have a dedicated Working Group for Ansible development. +We announce important development changes and releases through Ansible's [The Bullhorn newsletter](https://eepurl.com/gZmiEP). If you are a collection developer, be sure you are subscribed. -You can find other people interested on the following [Libera.chat](https://libera.chat/) IRC channels - -- `#ansible` - For general use questions and support. -- `#ansible-devel` - For discussions on developer topics and code related to features or bugs in ansible-core. -- `#ansible-community` - For discussions on community topics and community meetings, and for general development questions for community collections. +Join us in the `#ansible` (general use questions and support), `#ansible-community` (community and collection development questions), and other [IRC channels](https://docs.ansible.com/ansible/devel/community/communication.html#irc-channels) on [Libera.chat](https://libera.chat). + +We take part in the global quarterly [Ansible Contributor Summit](https://github.com/ansible/community/wiki/Contributor-Summit) virtually or in-person. Track [The Bullhorn newsletter](https://eepurl.com/gZmiEP) and join us. For more information about communities, meetings and agendas see [Community Wiki](https://github.com/ansible/community/wiki/Community). -For more information about [communication](https://docs.ansible.com/ansible/latest/community/communication.html) +For more information about communication, refer to the [Ansible communication guide](https://docs.ansible.com/ansible/devel/community/communication.html). ### Publishing New Version From c9cf641188bad51cc214598e1816da880ee90d8b Mon Sep 17 00:00:00 2001 From: Anas Date: Thu, 17 Jun 2021 19:05:35 +0200 Subject: [PATCH 0144/2828] datadog_event : Adding api_host as an optional parameter (#2775) * 2774 Module datadog_event _ Adding api_host as an optional parameter * Update changelogs/fragments/2774-datadog_event_api_parameter.yml Co-authored-by: Felix Fontein * Update plugins/modules/monitoring/datadog/datadog_event.py Co-authored-by: Felix Fontein * Update datadog_event.py * Update datadog_event.py * Update datadog_event.py * Update datadog_event.py * Update datadog_event.py * Update datadog_event.py * Update datadog_event.py * Update datadog_event.py * Update plugins/modules/monitoring/datadog/datadog_event.py Co-authored-by: Felix Fontein * Update plugins/modules/monitoring/datadog/datadog_event.py Co-authored-by: Felix Fontein * Update plugins/modules/monitoring/datadog/datadog_event.py Co-authored-by: Felix Fontein * Update plugins/modules/monitoring/datadog/datadog_event.py Co-authored-by: Felix Fontein * Update plugins/modules/monitoring/datadog/datadog_event.py Co-authored-by: Amin Vakil * Update plugins/modules/monitoring/datadog/datadog_event.py Co-authored-by: Amin Vakil Co-authored-by: Anas Hamadeh Co-authored-by: Felix Fontein Co-authored-by: Amin Vakil --- .../2774-datadog_event_api_parameter.yml | 2 ++ .../monitoring/datadog/datadog_event.py | 23 ++++++++++++++++++- 2 files changed, 24 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2774-datadog_event_api_parameter.yml diff --git a/changelogs/fragments/2774-datadog_event_api_parameter.yml b/changelogs/fragments/2774-datadog_event_api_parameter.yml new file mode 100644 index 0000000000..6144b89400 --- /dev/null +++ b/changelogs/fragments/2774-datadog_event_api_parameter.yml @@ -0,0 +1,2 @@ +minor_changes: +- "datadog_event - adding parameter ``api_host`` to allow selecting a datadog API endpoint instead of using the default one (https://github.com/ansible-collections/community.general/issues/2774, https://github.com/ansible-collections/community.general/pull/2775)." diff --git a/plugins/modules/monitoring/datadog/datadog_event.py b/plugins/modules/monitoring/datadog/datadog_event.py index c3a3920aee..3f6500f11f 100644 --- a/plugins/modules/monitoring/datadog/datadog_event.py +++ b/plugins/modules/monitoring/datadog/datadog_event.py @@ -54,6 +54,11 @@ options: description: - Host name to associate with the event. - If not specified, it defaults to the remote system's hostname. + api_host: + type: str + description: + - DataDog API endpoint URL. + version_added: '3.3.0' tags: type: list elements: str @@ -90,6 +95,19 @@ EXAMPLES = ''' api_key: 9775a026f1ca7d1c6c5af9d94d9595a4 app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN tags: 'aa,bb,#host:{{ inventory_hostname }}' + +- name: Post an event with several tags to another endpoint + community.general.datadog_event: + title: Testing from ansible + text: Test + api_key: 9775a026f1ca7d1c6c5af9d94d9595a4 + app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN + api_host: 'https://example.datadoghq.eu' + tags: + - aa + - b + - '#host:{{ inventory_hostname }}' + ''' import platform @@ -113,6 +131,7 @@ def main(): argument_spec=dict( api_key=dict(required=True, no_log=True), app_key=dict(required=True, no_log=True), + api_host=dict(type='str'), title=dict(required=True), text=dict(required=True), date_happened=dict(type='int'), @@ -131,8 +150,10 @@ def main(): options = { 'api_key': module.params['api_key'], - 'app_key': module.params['app_key'] + 'app_key': module.params['app_key'], } + if module.params['api_host'] is not None: + options['api_host'] = module.params['api_host'] initialize(**options) From ee23c26150d1215ea315d689d9e4e5624fa6c8b9 Mon Sep 17 00:00:00 2001 From: TizeN85 Date: Thu, 17 Jun 2021 19:08:42 +0200 Subject: [PATCH 0145/2828] fix sudorule_add_allow_command_group (#2821) * fix sudorule_add_allow_command_group fix sudorule_add_allow_command_group is not working on freeIPA 4.8.7 at least, sudorule_add_allow_command should be used instead with item sudocmdgroup * Added changelog fragment --- changelogs/fragments/2821-ipa_sudorule.yml | 4 ++++ plugins/modules/identity/ipa/ipa_sudorule.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2821-ipa_sudorule.yml diff --git a/changelogs/fragments/2821-ipa_sudorule.yml b/changelogs/fragments/2821-ipa_sudorule.yml new file mode 100644 index 0000000000..5e1197da95 --- /dev/null +++ b/changelogs/fragments/2821-ipa_sudorule.yml @@ -0,0 +1,4 @@ +--- +bugfixes: + - "ipa_sudorule - call ``sudorule_add_allow_command`` method instead of ``sudorule_add_allow_command_group`` + (https://github.com/ansible-collections/community.general/issues/2442)." diff --git a/plugins/modules/identity/ipa/ipa_sudorule.py b/plugins/modules/identity/ipa/ipa_sudorule.py index 15abef8f17..4494122e8d 100644 --- a/plugins/modules/identity/ipa/ipa_sudorule.py +++ b/plugins/modules/identity/ipa/ipa_sudorule.py @@ -237,7 +237,7 @@ class SudoRuleIPAClient(IPAClient): return self._post_json(method='sudorule_add_allow_command', name=name, item={'sudocmd': item}) def sudorule_add_allow_command_group(self, name, item): - return self._post_json(method='sudorule_add_allow_command_group', name=name, item={'sudocmdgroup': item}) + return self._post_json(method='sudorule_add_allow_command', name=name, item={'sudocmdgroup': item}) def sudorule_remove_allow_command(self, name, item): return self._post_json(method='sudorule_remove_allow_command', name=name, item=item) From 1ed4394c5ed4e3e9e31165b7979e5b38e16def05 Mon Sep 17 00:00:00 2001 From: Shahar Mor Date: Fri, 18 Jun 2021 23:08:46 +0300 Subject: [PATCH 0146/2828] npm - fix updating version specific modules (#2830) * npm - fix updating version specific modules if a version specific module is used, the comparison will be used with the version and not only by name * Update plugins/modules/packaging/language/npm.py Co-authored-by: Ajpantuso * Update changelogs/fragments/2830-npm-version-update.yml Co-authored-by: Ajpantuso * Update changelogs/fragments/2830-npm-version-update.yml Co-authored-by: Amin Vakil * Update changelogs/fragments/2830-npm-version-update.yml Co-authored-by: Amin Vakil Co-authored-by: Ajpantuso Co-authored-by: Amin Vakil --- .../fragments/2830-npm-version-update.yml | 4 + plugins/modules/packaging/language/npm.py | 13 ++- .../modules/packaging/language/test_npm.py | 103 +++++++++++++++++- 3 files changed, 114 insertions(+), 6 deletions(-) create mode 100644 changelogs/fragments/2830-npm-version-update.yml diff --git a/changelogs/fragments/2830-npm-version-update.yml b/changelogs/fragments/2830-npm-version-update.yml new file mode 100644 index 0000000000..ab05258e2c --- /dev/null +++ b/changelogs/fragments/2830-npm-version-update.yml @@ -0,0 +1,4 @@ +bugfixes: + - "npm - when the ``version`` option is used the comparison of installed vs missing will + use name@version instead of just name, allowing version specific updates + (https://github.com/ansible-collections/community.general/issues/2021)." diff --git a/plugins/modules/packaging/language/npm.py b/plugins/modules/packaging/language/npm.py index 62121297d7..5a48468970 100644 --- a/plugins/modules/packaging/language/npm.py +++ b/plugins/modules/packaging/language/npm.py @@ -181,7 +181,7 @@ class Npm(object): cmd.append('--ignore-scripts') if self.unsafe_perm: cmd.append('--unsafe-perm') - if self.name and add_package_name: + if self.name_version and add_package_name: cmd.append(self.name_version) if self.registry: cmd.append('--registry') @@ -215,14 +215,17 @@ class Npm(object): except (getattr(json, 'JSONDecodeError', ValueError)) as e: self.module.fail_json(msg="Failed to parse NPM output with error %s" % to_native(e)) if 'dependencies' in data: - for dep in data['dependencies']: - if 'missing' in data['dependencies'][dep] and data['dependencies'][dep]['missing']: + for dep, props in data['dependencies'].items(): + dep_version = dep + '@' + str(props['version']) + + if 'missing' in props and props['missing']: missing.append(dep) - elif 'invalid' in data['dependencies'][dep] and data['dependencies'][dep]['invalid']: + elif 'invalid' in props and props['invalid']: missing.append(dep) else: installed.append(dep) - if self.name and self.name not in installed: + installed.append(dep_version) + if self.name_version and self.name_version not in installed: missing.append(self.name) # Named dependency not installed else: diff --git a/tests/unit/plugins/modules/packaging/language/test_npm.py b/tests/unit/plugins/modules/packaging/language/test_npm.py index 849bfac1a6..abdacc6aef 100644 --- a/tests/unit/plugins/modules/packaging/language/test_npm.py +++ b/tests/unit/plugins/modules/packaging/language/test_npm.py @@ -47,6 +47,66 @@ class NPMModuleTestCase(ModuleTestCase): result = self.module_main(AnsibleExitJson) self.assertTrue(result['changed']) + self.module_main_command.assert_has_calls([ + call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None), + call(['/testbin/npm', 'install', '--global', 'coffee-script'], check_rc=True, cwd=None), + ]) + + def test_present_version(self): + set_module_args({ + 'name': 'coffee-script', + 'global': 'true', + 'state': 'present', + 'version': '2.5.1' + }) + self.module_main_command.side_effect = [ + (0, '{}', ''), + (0, '{}', ''), + ] + + result = self.module_main(AnsibleExitJson) + + self.assertTrue(result['changed']) + self.module_main_command.assert_has_calls([ + call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None), + call(['/testbin/npm', 'install', '--global', 'coffee-script@2.5.1'], check_rc=True, cwd=None), + ]) + + def test_present_version_update(self): + set_module_args({ + 'name': 'coffee-script', + 'global': 'true', + 'state': 'present', + 'version': '2.5.1' + }) + self.module_main_command.side_effect = [ + (0, '{"dependencies": {"coffee-script": {"version" : "2.5.0"}}}', ''), + (0, '{}', ''), + ] + + result = self.module_main(AnsibleExitJson) + + self.assertTrue(result['changed']) + self.module_main_command.assert_has_calls([ + call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None), + call(['/testbin/npm', 'install', '--global', 'coffee-script@2.5.1'], check_rc=True, cwd=None), + ]) + + def test_present_version_exists(self): + set_module_args({ + 'name': 'coffee-script', + 'global': 'true', + 'state': 'present', + 'version': '2.5.1' + }) + self.module_main_command.side_effect = [ + (0, '{"dependencies": {"coffee-script": {"version" : "2.5.1"}}}', ''), + (0, '{}', ''), + ] + + result = self.module_main(AnsibleExitJson) + + self.assertFalse(result['changed']) self.module_main_command.assert_has_calls([ call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None), ]) @@ -58,7 +118,7 @@ class NPMModuleTestCase(ModuleTestCase): 'state': 'absent' }) self.module_main_command.side_effect = [ - (0, '{"dependencies": {"coffee-script": {}}}', ''), + (0, '{"dependencies": {"coffee-script": {"version" : "2.5.1"}}}', ''), (0, '{}', ''), ] @@ -66,5 +126,46 @@ class NPMModuleTestCase(ModuleTestCase): self.assertTrue(result['changed']) self.module_main_command.assert_has_calls([ + call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None), + call(['/testbin/npm', 'uninstall', '--global', 'coffee-script'], check_rc=True, cwd=None), + ]) + + def test_absent_version(self): + set_module_args({ + 'name': 'coffee-script', + 'global': 'true', + 'state': 'absent', + 'version': '2.5.1' + }) + self.module_main_command.side_effect = [ + (0, '{"dependencies": {"coffee-script": {"version" : "2.5.1"}}}', ''), + (0, '{}', ''), + ] + + result = self.module_main(AnsibleExitJson) + + self.assertTrue(result['changed']) + self.module_main_command.assert_has_calls([ + call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None), + call(['/testbin/npm', 'uninstall', '--global', 'coffee-script'], check_rc=True, cwd=None), + ]) + + def test_absent_version_different(self): + set_module_args({ + 'name': 'coffee-script', + 'global': 'true', + 'state': 'absent', + 'version': '2.5.1' + }) + self.module_main_command.side_effect = [ + (0, '{"dependencies": {"coffee-script": {"version" : "2.5.0"}}}', ''), + (0, '{}', ''), + ] + + result = self.module_main(AnsibleExitJson) + + self.assertTrue(result['changed']) + self.module_main_command.assert_has_calls([ + call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None), call(['/testbin/npm', 'uninstall', '--global', 'coffee-script'], check_rc=True, cwd=None), ]) From 67cabcb2aa858b5c1195b605468374748c509895 Mon Sep 17 00:00:00 2001 From: omula Date: Sat, 19 Jun 2021 14:42:05 +0200 Subject: [PATCH 0147/2828] Nmcli add options (#2732) * [nmcli] add new network configuration options * [nmcli_add_options] add documentation for new parameters nad add disabled method for IPv6 * [nmcli] fix and version adding. Add changelog fragment * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Felix Fontein * Update changelogs/fragments/2732-nmcli_add_options.yml Co-authored-by: Felix Fontein * Update changelogs/fragments/2732-nmcli_add_options.yml Co-authored-by: Felix Fontein * [nmcli_add_options] fix testing * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Felix Fontein * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Felix Fontein * Update changelogs/fragments/2732-nmcli_add_options.yml Co-authored-by: Amin Vakil * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Amin Vakil * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Amin Vakil * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Amin Vakil Co-authored-by: Oriol MULA VALLS Co-authored-by: Felix Fontein Co-authored-by: Amin Vakil --- .../fragments/2732-nmcli_add_options.yml | 3 +++ plugins/modules/net_tools/nmcli.py | 23 +++++++++++++++++-- .../plugins/modules/net_tools/test_nmcli.py | 8 +++++++ 3 files changed, 32 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/2732-nmcli_add_options.yml diff --git a/changelogs/fragments/2732-nmcli_add_options.yml b/changelogs/fragments/2732-nmcli_add_options.yml new file mode 100644 index 0000000000..58ed2d2ee4 --- /dev/null +++ b/changelogs/fragments/2732-nmcli_add_options.yml @@ -0,0 +1,3 @@ +minor_changes: + - nmcli - add ``routing_rules4`` and ``may_fail4`` options (https://github.com/ansible-collections/community.general/issues/2730). + - nmcli - add ``disabled`` value to ``method6`` option (https://github.com/ansible-collections/community.general/issues/2730). diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index 399d15267a..30f0537e70 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -95,6 +95,11 @@ options: - Set metric level of ipv4 routes configured on interface. type: int version_added: 2.0.0 + routing_rules4: + description: + - Is the same as in an C(ip route add) command, except always requires specifying a priority. + type: str + version_added: 3.3.0 never_default4: description: - Set as default route. @@ -126,6 +131,12 @@ options: type: str choices: [auto, link-local, manual, shared, disabled] version_added: 2.2.0 + may_fail4: + description: + - If you need I(ip4) configured before C(network-online.target) is reached, set this option to C(false). + type: bool + default: true + version_added: 3.3.0 ip6: description: - The IPv6 address to this interface. @@ -164,8 +175,9 @@ options: description: - Configuration method to be used for IPv6 - If I(ip6) is set, C(ipv6.method) is automatically set to C(manual) and this parameter is not needed. + - C(disabled) was added in community.general 3.3.0. type: str - choices: [ignore, auto, dhcp, link-local, manual, shared] + choices: [ignore, auto, dhcp, link-local, manual, shared, disabled] version_added: 2.2.0 mtu: description: @@ -675,11 +687,13 @@ class Nmcli(object): self.gw4_ignore_auto = module.params['gw4_ignore_auto'] self.routes4 = module.params['routes4'] self.route_metric4 = module.params['route_metric4'] + self.routing_rules4 = module.params['routing_rules4'] self.never_default4 = module.params['never_default4'] self.dns4 = module.params['dns4'] self.dns4_search = module.params['dns4_search'] self.dns4_ignore_auto = module.params['dns4_ignore_auto'] self.method4 = module.params['method4'] + self.may_fail4 = module.params['may_fail4'] self.ip6 = module.params['ip6'] self.gw6 = module.params['gw6'] self.gw6_ignore_auto = module.params['gw6_ignore_auto'] @@ -762,8 +776,10 @@ class Nmcli(object): 'ipv4.ignore-auto-routes': self.gw4_ignore_auto, 'ipv4.routes': self.routes4, 'ipv4.route-metric': self.route_metric4, + 'ipv4.routing-rules': self.routing_rules4, 'ipv4.never-default': self.never_default4, 'ipv4.method': self.ipv4_method, + 'ipv4.may-fail': self.may_fail4, 'ipv6.addresses': self.ip6, 'ipv6.dns': self.dns6, 'ipv6.dns-search': self.dns6_search, @@ -935,6 +951,7 @@ class Nmcli(object): 'ipv4.never-default', 'ipv4.ignore-auto-dns', 'ipv4.ignore-auto-routes', + 'ipv4.may-fail', 'ipv6.ignore-auto-dns', 'ipv6.ignore-auto-routes'): return bool @@ -1155,11 +1172,13 @@ def main(): gw4_ignore_auto=dict(type='bool', default=False), routes4=dict(type='list', elements='str'), route_metric4=dict(type='int'), + routing_rules4=dict(type='str'), never_default4=dict(type='bool', default=False), dns4=dict(type='list', elements='str'), dns4_search=dict(type='list', elements='str'), dns4_ignore_auto=dict(type='bool', default=False), method4=dict(type='str', choices=['auto', 'link-local', 'manual', 'shared', 'disabled']), + may_fail4=dict(type='bool', default=True), dhcp_client_id=dict(type='str'), ip6=dict(type='str'), gw6=dict(type='str'), @@ -1167,7 +1186,7 @@ def main(): dns6=dict(type='list', elements='str'), dns6_search=dict(type='list', elements='str'), dns6_ignore_auto=dict(type='bool', default=False), - method6=dict(type='str', choices=['ignore', 'auto', 'dhcp', 'link-local', 'manual', 'shared']), + method6=dict(type='str', choices=['ignore', 'auto', 'dhcp', 'link-local', 'manual', 'shared', 'disabled']), # Bond Specific vars mode=dict(type='str', default='balance-rr', choices=['802.3ad', 'active-backup', 'balance-alb', 'balance-rr', 'balance-tlb', 'balance-xor', 'broadcast']), diff --git a/tests/unit/plugins/modules/net_tools/test_nmcli.py b/tests/unit/plugins/modules/net_tools/test_nmcli.py index 5b3f96937b..8724bd4f60 100644 --- a/tests/unit/plugins/modules/net_tools/test_nmcli.py +++ b/tests/unit/plugins/modules/net_tools/test_nmcli.py @@ -98,6 +98,7 @@ ipv4.gateway: 10.10.10.1 ipv4.ignore-auto-dns: no ipv4.ignore-auto-routes: no ipv4.never-default: no +ipv4.may-fail: yes ipv6.method: auto ipv6.ignore-auto-dns: no ipv6.ignore-auto-routes: no @@ -128,6 +129,7 @@ ipv4.ignore-auto-dns: no ipv4.ignore-auto-routes: no ipv4.never-default: no ipv4.dns-search: search.redhat.com +ipv4.may-fail: yes ipv6.dns-search: search6.redhat.com ipv6.method: auto ipv6.ignore-auto-dns: no @@ -158,6 +160,7 @@ ipv4.gateway: 10.10.10.1 ipv4.ignore-auto-dns: no ipv4.ignore-auto-routes: no ipv4.never-default: no +ipv4.may-fail: yes ipv6.method: auto ipv6.ignore-auto-dns: no ipv6.ignore-auto-routes: no @@ -187,6 +190,7 @@ ipv4.gateway: 10.10.10.1 ipv4.ignore-auto-dns: no ipv4.ignore-auto-routes: no ipv4.never-default: no +ipv4.may-fail: yes ipv6.method: auto ipv6.ignore-auto-dns: no ipv6.ignore-auto-routes: no @@ -218,6 +222,7 @@ ipv4.gateway: 10.10.10.1 ipv4.ignore-auto-dns: no ipv4.ignore-auto-routes: no ipv4.never-default: no +ipv4.may-fail: yes ipv6.method: auto ipv6.ignore-auto-dns: no ipv6.ignore-auto-routes: no @@ -275,6 +280,7 @@ ipv4.gateway: 10.10.10.1 ipv4.ignore-auto-dns: no ipv4.ignore-auto-routes: no ipv4.never-default: no +ipv4.may-fail: yes ipv6.method: auto ipv6.ignore-auto-dns: no ipv6.ignore-auto-routes: no @@ -370,6 +376,7 @@ ipv4.dhcp-client-id: 00:11:22:AA:BB:CC:DD ipv4.ignore-auto-dns: no ipv4.ignore-auto-routes: no ipv4.never-default: no +ipv4.may-fail: yes ipv6.method: auto ipv6.ignore-auto-dns: no ipv6.ignore-auto-routes: no @@ -399,6 +406,7 @@ ipv4.gateway: 10.10.10.1 ipv4.ignore-auto-dns: no ipv4.ignore-auto-routes: no ipv4.never-default: no +ipv4.may-fail: yes ipv4.dns: 1.1.1.1,8.8.8.8 ipv6.method: auto ipv6.ignore-auto-dns: no From 08f7ad06bea1fc71c675550db0f0dc46b1f45224 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 19 Jun 2021 15:06:58 +0200 Subject: [PATCH 0148/2828] Remove inventory and vault scripts (#2696) * Remove inventory and vault scripts. * Remove foreman inventory script tests. --- changelogs/fragments/remove-scripts.yml | 2 + scripts/inventory/__init__.py | 0 scripts/inventory/abiquo.ini | 48 - scripts/inventory/abiquo.py | 224 ---- scripts/inventory/apache-libcloud.py | 336 ------ scripts/inventory/apstra_aos.ini | 20 - scripts/inventory/apstra_aos.py | 580 ----------- scripts/inventory/azure_rm.ini | 23 - scripts/inventory/azure_rm.py | 962 ------------------ scripts/inventory/brook.ini | 39 - scripts/inventory/brook.py | 248 ----- scripts/inventory/cloudforms.ini | 40 - scripts/inventory/cloudforms.py | 499 --------- scripts/inventory/cobbler.ini | 24 - scripts/inventory/cobbler.py | 305 ------ scripts/inventory/collins.ini | 57 -- scripts/inventory/collins.py | 429 -------- scripts/inventory/consul_io.ini | 54 - scripts/inventory/consul_io.py | 553 ---------- scripts/inventory/docker.py | 892 ---------------- scripts/inventory/docker.yml | 74 -- scripts/inventory/fleet.py | 99 -- scripts/inventory/foreman.ini | 200 ---- scripts/inventory/foreman.py | 651 ------------ scripts/inventory/freeipa.py | 126 --- scripts/inventory/infoblox.py | 129 --- scripts/inventory/infoblox.yaml | 24 - scripts/inventory/jail.py | 27 - scripts/inventory/landscape.py | 117 --- scripts/inventory/libcloud.ini | 15 - scripts/inventory/linode.ini | 18 - scripts/inventory/linode.py | 338 ------ scripts/inventory/lxc_inventory.py | 60 -- scripts/inventory/lxd.ini | 13 - scripts/inventory/lxd.py | 93 -- scripts/inventory/mdt.ini | 17 - scripts/inventory/mdt_dynamic_inventory.py | 122 --- scripts/inventory/nagios_livestatus.ini | 41 - scripts/inventory/nagios_livestatus.py | 163 --- scripts/inventory/nagios_ndo.ini | 10 - scripts/inventory/nagios_ndo.py | 95 -- scripts/inventory/nsot.py | 346 ------- scripts/inventory/nsot.yaml | 22 - scripts/inventory/openshift.py | 89 -- scripts/inventory/openvz.py | 74 -- scripts/inventory/ovirt.ini | 35 - scripts/inventory/ovirt.py | 279 ----- scripts/inventory/ovirt4.py | 258 ----- scripts/inventory/packet_net.ini | 53 - scripts/inventory/packet_net.py | 496 --------- scripts/inventory/proxmox.py | 240 ----- scripts/inventory/rackhd.py | 86 -- scripts/inventory/rax.ini | 66 -- scripts/inventory/rax.py | 460 --------- scripts/inventory/rhv.py | 1 - scripts/inventory/rudder.ini | 35 - scripts/inventory/rudder.py | 286 ------ scripts/inventory/scaleway.ini | 37 - scripts/inventory/scaleway.py | 220 ---- scripts/inventory/serf.py | 101 -- scripts/inventory/softlayer.py | 196 ---- scripts/inventory/spacewalk.ini | 16 - scripts/inventory/spacewalk.py | 226 ---- scripts/inventory/ssh_config.py | 121 --- scripts/inventory/stacki.py | 180 ---- scripts/inventory/stacki.yml | 7 - scripts/inventory/vagrant.py | 123 --- scripts/inventory/vbox.py | 107 -- scripts/inventory/zone.py | 33 - scripts/vault/__init__.py | 0 scripts/vault/azure_vault.ini | 10 - scripts/vault/azure_vault.py | 595 ----------- scripts/vault/vault-keyring-client.py | 134 --- scripts/vault/vault-keyring.py | 87 -- .../targets/script_inventory_foreman/aliases | 3 - .../script_inventory_foreman/foreman.sh | 10 - .../targets/script_inventory_foreman/runme.sh | 50 - .../test_foreman_inventory.yml | 7 - 78 files changed, 2 insertions(+), 12854 deletions(-) create mode 100644 changelogs/fragments/remove-scripts.yml delete mode 100644 scripts/inventory/__init__.py delete mode 100644 scripts/inventory/abiquo.ini delete mode 100755 scripts/inventory/abiquo.py delete mode 100755 scripts/inventory/apache-libcloud.py delete mode 100644 scripts/inventory/apstra_aos.ini delete mode 100755 scripts/inventory/apstra_aos.py delete mode 100644 scripts/inventory/azure_rm.ini delete mode 100755 scripts/inventory/azure_rm.py delete mode 100644 scripts/inventory/brook.ini delete mode 100755 scripts/inventory/brook.py delete mode 100644 scripts/inventory/cloudforms.ini delete mode 100755 scripts/inventory/cloudforms.py delete mode 100644 scripts/inventory/cobbler.ini delete mode 100755 scripts/inventory/cobbler.py delete mode 100644 scripts/inventory/collins.ini delete mode 100755 scripts/inventory/collins.py delete mode 100644 scripts/inventory/consul_io.ini delete mode 100755 scripts/inventory/consul_io.py delete mode 100755 scripts/inventory/docker.py delete mode 100644 scripts/inventory/docker.yml delete mode 100755 scripts/inventory/fleet.py delete mode 100644 scripts/inventory/foreman.ini delete mode 100755 scripts/inventory/foreman.py delete mode 100755 scripts/inventory/freeipa.py delete mode 100755 scripts/inventory/infoblox.py delete mode 100644 scripts/inventory/infoblox.yaml delete mode 100755 scripts/inventory/jail.py delete mode 100755 scripts/inventory/landscape.py delete mode 100644 scripts/inventory/libcloud.ini delete mode 100644 scripts/inventory/linode.ini delete mode 100755 scripts/inventory/linode.py delete mode 100755 scripts/inventory/lxc_inventory.py delete mode 100644 scripts/inventory/lxd.ini delete mode 100755 scripts/inventory/lxd.py delete mode 100644 scripts/inventory/mdt.ini delete mode 100755 scripts/inventory/mdt_dynamic_inventory.py delete mode 100644 scripts/inventory/nagios_livestatus.ini delete mode 100755 scripts/inventory/nagios_livestatus.py delete mode 100644 scripts/inventory/nagios_ndo.ini delete mode 100755 scripts/inventory/nagios_ndo.py delete mode 100755 scripts/inventory/nsot.py delete mode 100644 scripts/inventory/nsot.yaml delete mode 100755 scripts/inventory/openshift.py delete mode 100755 scripts/inventory/openvz.py delete mode 100644 scripts/inventory/ovirt.ini delete mode 100755 scripts/inventory/ovirt.py delete mode 100755 scripts/inventory/ovirt4.py delete mode 100644 scripts/inventory/packet_net.ini delete mode 100755 scripts/inventory/packet_net.py delete mode 100755 scripts/inventory/proxmox.py delete mode 100755 scripts/inventory/rackhd.py delete mode 100644 scripts/inventory/rax.ini delete mode 100755 scripts/inventory/rax.py delete mode 120000 scripts/inventory/rhv.py delete mode 100644 scripts/inventory/rudder.ini delete mode 100755 scripts/inventory/rudder.py delete mode 100644 scripts/inventory/scaleway.ini delete mode 100755 scripts/inventory/scaleway.py delete mode 100755 scripts/inventory/serf.py delete mode 100755 scripts/inventory/softlayer.py delete mode 100644 scripts/inventory/spacewalk.ini delete mode 100755 scripts/inventory/spacewalk.py delete mode 100755 scripts/inventory/ssh_config.py delete mode 100755 scripts/inventory/stacki.py delete mode 100644 scripts/inventory/stacki.yml delete mode 100755 scripts/inventory/vagrant.py delete mode 100755 scripts/inventory/vbox.py delete mode 100755 scripts/inventory/zone.py delete mode 100644 scripts/vault/__init__.py delete mode 100644 scripts/vault/azure_vault.ini delete mode 100755 scripts/vault/azure_vault.py delete mode 100755 scripts/vault/vault-keyring-client.py delete mode 100755 scripts/vault/vault-keyring.py delete mode 100644 tests/integration/targets/script_inventory_foreman/aliases delete mode 100755 tests/integration/targets/script_inventory_foreman/foreman.sh delete mode 100755 tests/integration/targets/script_inventory_foreman/runme.sh delete mode 100644 tests/integration/targets/script_inventory_foreman/test_foreman_inventory.yml diff --git a/changelogs/fragments/remove-scripts.yml b/changelogs/fragments/remove-scripts.yml new file mode 100644 index 0000000000..72cee7dee5 --- /dev/null +++ b/changelogs/fragments/remove-scripts.yml @@ -0,0 +1,2 @@ +removed_features: +- "All inventory and vault scripts contained in community.general were moved to the `contrib-scripts GitHub repository `_ (https://github.com/ansible-collections/community.general/pull/2696)." diff --git a/scripts/inventory/__init__.py b/scripts/inventory/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/scripts/inventory/abiquo.ini b/scripts/inventory/abiquo.ini deleted file mode 100644 index 991a2ed803..0000000000 --- a/scripts/inventory/abiquo.ini +++ /dev/null @@ -1,48 +0,0 @@ -# Ansible external inventory script settings for Abiquo -# - -# Define an Abiquo user with access to Abiquo API which will be used to -# perform required queries to obtain information to generate the Ansible -# inventory output. -# -[auth] -apiuser = admin -apipass = xabiquo - - -# Specify Abiquo API version in major.minor format and the access URI to -# API endpoint. Tested versions are: 2.6 , 3.0 and 3.1 -# To confirm that your box haves access to Abiquo API you can perform a -# curl command, replacing with suitable values, similar to this: -# curl -X GET https://192.168.2.100/api/login -u admin:xabiquo -# -[api] -version = 3.0 -uri = https://192.168.2.100/api -# You probably won't need to modify login preferences, but just in case -login_path = /login -login_type = application/vnd.abiquo.user+json - - -# To avoid performing excessive calls to Abiquo API you can define a -# cache for the plugin output. Within the time defined in seconds, latest -# output will be reused. After that time, the cache will be refreshed. -# -[cache] -cache_max_age = 30 -cache_dir = /tmp - - -[defaults] -# Depending in your Abiquo environment, you may want to use only public IP -# addresses (if using public cloud providers) or also private IP addresses. -# You can set this with public_ip_only configuration. -public_ip_only = false -# default_net_interface only is used if public_ip_only = false -# If public_ip_only is set to false, you can choose default nic to obtain -# IP address to define the host. -default_net_interface = nic0 -# Only deployed VM are displayed in the plugin output. -deployed_only = true -# Define if VM metadata is obtained from Abiquo API. -get_metadata = false diff --git a/scripts/inventory/abiquo.py b/scripts/inventory/abiquo.py deleted file mode 100755 index 7602a1d2cb..0000000000 --- a/scripts/inventory/abiquo.py +++ /dev/null @@ -1,224 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -''' -External inventory script for Abiquo -==================================== - -Shamelessly copied from an existing inventory script. - -This script generates an inventory that Ansible can understand by making API requests to Abiquo API -Requires some python libraries, ensure to have them installed when using this script. - -This script has been tested in Abiquo 3.0 but it may work also for Abiquo 2.6. - -Before using this script you may want to modify abiquo.ini config file. - -This script generates an Ansible hosts file with these host groups: - -ABQ_xxx: Defines a hosts itself by Abiquo VM name label -all: Contains all hosts defined in Abiquo user's enterprise -virtualdatecenter: Creates a host group for each virtualdatacenter containing all hosts defined on it -virtualappliance: Creates a host group for each virtualappliance containing all hosts defined on it -imagetemplate: Creates a host group for each image template containing all hosts using it - -''' - -# (c) 2014, Daniel Beneyto -# -# This file is part of Ansible, -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import os -import sys -import time - -import json - -from ansible.module_utils.six.moves import configparser as ConfigParser -from ansible.module_utils.urls import open_url - - -def api_get(link, config): - try: - if link is None: - url = config.get('api', 'uri') + config.get('api', 'login_path') - headers = {"Accept": config.get('api', 'login_type')} - else: - url = link['href'] + '?limit=0' - headers = {"Accept": link['type']} - result = open_url(url, headers=headers, url_username=config.get('auth', 'apiuser').replace('\n', ''), - url_password=config.get('auth', 'apipass').replace('\n', '')) - return json.loads(result.read()) - except Exception: - return None - - -def save_cache(data, config): - ''' saves item to cache ''' - dpath = config.get('cache', 'cache_dir') - try: - cache = open('/'.join([dpath, 'inventory']), 'w') - cache.write(json.dumps(data)) - cache.close() - except IOError as e: - pass # not really sure what to do here - - -def get_cache(cache_item, config): - ''' returns cached item ''' - dpath = config.get('cache', 'cache_dir') - inv = {} - try: - cache = open('/'.join([dpath, 'inventory']), 'r') - inv = cache.read() - cache.close() - except IOError as e: - pass # not really sure what to do here - - return inv - - -def cache_available(config): - ''' checks if we have a 'fresh' cache available for item requested ''' - - if config.has_option('cache', 'cache_dir'): - dpath = config.get('cache', 'cache_dir') - - try: - existing = os.stat('/'.join([dpath, 'inventory'])) - except Exception: - # cache doesn't exist or isn't accessible - return False - - if config.has_option('cache', 'cache_max_age'): - maxage = config.get('cache', 'cache_max_age') - if (int(time.time()) - int(existing.st_mtime)) <= int(maxage): - return True - - return False - - -def generate_inv_from_api(enterprise_entity, config): - try: - inventory['all'] = {} - inventory['all']['children'] = [] - inventory['all']['hosts'] = [] - inventory['_meta'] = {} - inventory['_meta']['hostvars'] = {} - - enterprise = api_get(enterprise_entity, config) - vms_entity = next(link for link in enterprise['links'] if link['rel'] == 'virtualmachines') - vms = api_get(vms_entity, config) - for vmcollection in vms['collection']: - for link in vmcollection['links']: - if link['rel'] == 'virtualappliance': - vm_vapp = link['title'].replace('[', '').replace(']', '').replace(' ', '_') - elif link['rel'] == 'virtualdatacenter': - vm_vdc = link['title'].replace('[', '').replace(']', '').replace(' ', '_') - elif link['rel'] == 'virtualmachinetemplate': - vm_template = link['title'].replace('[', '').replace(']', '').replace(' ', '_') - - # From abiquo.ini: Only adding to inventory VMs with public IP - if config.getboolean('defaults', 'public_ip_only') is True: - for link in vmcollection['links']: - if link['type'] == 'application/vnd.abiquo.publicip+json' and link['rel'] == 'ip': - vm_nic = link['title'] - break - else: - vm_nic = None - # Otherwise, assigning defined network interface IP address - else: - for link in vmcollection['links']: - if link['rel'] == config.get('defaults', 'default_net_interface'): - vm_nic = link['title'] - break - else: - vm_nic = None - - vm_state = True - # From abiquo.ini: Only adding to inventory VMs deployed - if config.getboolean('defaults', 'deployed_only') is True and vmcollection['state'] == 'NOT_ALLOCATED': - vm_state = False - - if vm_nic is not None and vm_state: - if vm_vapp not in inventory: - inventory[vm_vapp] = {} - inventory[vm_vapp]['children'] = [] - inventory[vm_vapp]['hosts'] = [] - if vm_vdc not in inventory: - inventory[vm_vdc] = {} - inventory[vm_vdc]['hosts'] = [] - inventory[vm_vdc]['children'] = [] - if vm_template not in inventory: - inventory[vm_template] = {} - inventory[vm_template]['children'] = [] - inventory[vm_template]['hosts'] = [] - if config.getboolean('defaults', 'get_metadata') is True: - meta_entity = next(link for link in vmcollection['links'] if link['rel'] == 'metadata') - try: - metadata = api_get(meta_entity, config) - if (config.getfloat("api", "version") >= 3.0): - vm_metadata = metadata['metadata'] - else: - vm_metadata = metadata['metadata']['metadata'] - inventory['_meta']['hostvars'][vm_nic] = vm_metadata - except Exception as e: - pass - - inventory[vm_vapp]['children'].append(vmcollection['name']) - inventory[vm_vdc]['children'].append(vmcollection['name']) - inventory[vm_template]['children'].append(vmcollection['name']) - inventory['all']['children'].append(vmcollection['name']) - inventory[vmcollection['name']] = [] - inventory[vmcollection['name']].append(vm_nic) - - return inventory - except Exception as e: - # Return empty hosts output - return {'all': {'hosts': []}, '_meta': {'hostvars': {}}} - - -def get_inventory(enterprise, config): - ''' Reads the inventory from cache or Abiquo api ''' - - if cache_available(config): - inv = get_cache('inventory', config) - else: - default_group = os.path.basename(sys.argv[0]).rstrip('.py') - # MAKE ABIQUO API CALLS # - inv = generate_inv_from_api(enterprise, config) - - save_cache(inv, config) - return json.dumps(inv) - - -if __name__ == '__main__': - inventory = {} - enterprise = {} - - # Read config - config = ConfigParser.SafeConfigParser() - for configfilename in [os.path.abspath(sys.argv[0]).rstrip('.py') + '.ini', 'abiquo.ini']: - if os.path.exists(configfilename): - config.read(configfilename) - break - - try: - login = api_get(None, config) - enterprise = next(link for link in login['links'] if link['rel'] == 'enterprise') - except Exception as e: - enterprise = None - - if cache_available(config): - inventory = get_cache('inventory', config) - else: - inventory = get_inventory(enterprise, config) - - # return to ansible - sys.stdout.write(str(inventory)) - sys.stdout.flush() diff --git a/scripts/inventory/apache-libcloud.py b/scripts/inventory/apache-libcloud.py deleted file mode 100755 index b05752352f..0000000000 --- a/scripts/inventory/apache-libcloud.py +++ /dev/null @@ -1,336 +0,0 @@ -#!/usr/bin/env python - -# (c) 2013, Sebastien Goasguen -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -###################################################################### - -''' -Apache Libcloud generic external inventory script -================================= - -Generates inventory that Ansible can understand by making API request to -Cloud providers using the Apache libcloud library. - -This script also assumes there is a libcloud.ini file alongside it - -''' - -import sys -import os -import argparse -import re -from time import time - -from ansible.module_utils.six import iteritems, string_types -from ansible.module_utils.six.moves import configparser as ConfigParser -from libcloud.compute.types import Provider -from libcloud.compute.providers import get_driver -import libcloud.security as sec - -import json - - -class LibcloudInventory(object): - def __init__(self): - ''' Main execution path ''' - - # Inventory grouped by instance IDs, tags, security groups, regions, - # and availability zones - self.inventory = {} - - # Index of hostname (address) to instance ID - self.index = {} - - # Read settings and parse CLI arguments - self.read_settings() - self.parse_cli_args() - - # Cache - if self.args.refresh_cache: - self.do_api_calls_update_cache() - elif not self.is_cache_valid(): - self.do_api_calls_update_cache() - - # Data to print - if self.args.host: - data_to_print = self.get_host_info() - - elif self.args.list: - # Display list of instances for inventory - if len(self.inventory) == 0: - data_to_print = self.get_inventory_from_cache() - else: - data_to_print = self.json_format_dict(self.inventory, True) - - print(data_to_print) - - def is_cache_valid(self): - ''' Determines if the cache files have expired, or if it is still valid ''' - - if os.path.isfile(self.cache_path_cache): - mod_time = os.path.getmtime(self.cache_path_cache) - current_time = time() - if (mod_time + self.cache_max_age) > current_time: - if os.path.isfile(self.cache_path_index): - return True - - return False - - def read_settings(self): - ''' Reads the settings from the libcloud.ini file ''' - - config = ConfigParser.SafeConfigParser() - libcloud_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'libcloud.ini') - libcloud_ini_path = os.environ.get('LIBCLOUD_INI_PATH', libcloud_default_ini_path) - config.read(libcloud_ini_path) - - if not config.has_section('driver'): - raise ValueError('libcloud.ini file must contain a [driver] section') - - if config.has_option('driver', 'provider'): - self.provider = config.get('driver', 'provider') - else: - raise ValueError('libcloud.ini does not have a provider defined') - - if config.has_option('driver', 'key'): - self.key = config.get('driver', 'key') - else: - raise ValueError('libcloud.ini does not have a key defined') - - if config.has_option('driver', 'secret'): - self.secret = config.get('driver', 'secret') - else: - raise ValueError('libcloud.ini does not have a secret defined') - - if config.has_option('driver', 'host'): - self.host = config.get('driver', 'host') - if config.has_option('driver', 'secure'): - self.secure = config.get('driver', 'secure') - if config.has_option('driver', 'verify_ssl_cert'): - self.verify_ssl_cert = config.get('driver', 'verify_ssl_cert') - if config.has_option('driver', 'port'): - self.port = config.get('driver', 'port') - if config.has_option('driver', 'path'): - self.path = config.get('driver', 'path') - if config.has_option('driver', 'api_version'): - self.api_version = config.get('driver', 'api_version') - - Driver = get_driver(getattr(Provider, self.provider)) - - self.conn = Driver(key=self.key, secret=self.secret, secure=self.secure, - host=self.host, path=self.path) - - # Cache related - cache_path = config.get('cache', 'cache_path') - self.cache_path_cache = cache_path + "/ansible-libcloud.cache" - self.cache_path_index = cache_path + "/ansible-libcloud.index" - self.cache_max_age = config.getint('cache', 'cache_max_age') - - def parse_cli_args(self): - ''' - Command line argument processing - ''' - - parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on libcloud supported providers') - parser.add_argument('--list', action='store_true', default=True, - help='List instances (default: True)') - parser.add_argument('--host', action='store', - help='Get all the variables about a specific instance') - parser.add_argument('--refresh-cache', action='store_true', default=False, - help='Force refresh of cache by making API requests to libcloud supported providers (default: False - use cache files)') - self.args = parser.parse_args() - - def do_api_calls_update_cache(self): - ''' - Do API calls to a location, and save data in cache files - ''' - - self.get_nodes() - - self.write_to_cache(self.inventory, self.cache_path_cache) - self.write_to_cache(self.index, self.cache_path_index) - - def get_nodes(self): - ''' - Gets the list of all nodes - ''' - - for node in self.conn.list_nodes(): - self.add_node(node) - - def get_node(self, node_id): - ''' - Gets details about a specific node - ''' - - return [node for node in self.conn.list_nodes() if node.id == node_id][0] - - def add_node(self, node): - ''' - Adds a node to the inventory and index, as long as it is - addressable - ''' - - # Only want running instances - if node.state != 0: - return - - # Select the best destination address - if not node.public_ips == []: - dest = node.public_ips[0] - if not dest: - # Skip instances we cannot address (e.g. private VPC subnet) - return - - # Add to index - self.index[dest] = node.name - - # Inventory: Group by instance ID (always a group of 1) - self.inventory[node.name] = [dest] - ''' - # Inventory: Group by region - self.push(self.inventory, region, dest) - - # Inventory: Group by availability zone - self.push(self.inventory, node.placement, dest) - - # Inventory: Group by instance type - self.push(self.inventory, self.to_safe('type_' + node.instance_type), dest) - ''' - # Inventory: Group by key pair - if node.extra['key_name']: - self.push(self.inventory, self.to_safe('key_' + node.extra['key_name']), dest) - - # Inventory: Group by security group, quick thing to handle single sg - if node.extra['security_group']: - self.push(self.inventory, self.to_safe('sg_' + node.extra['security_group'][0]), dest) - - # Inventory: Group by tag - if node.extra['tags']: - for tagkey in node.extra['tags'].keys(): - self.push(self.inventory, self.to_safe('tag_' + tagkey + '_' + node.extra['tags'][tagkey]), dest) - - def get_host_info(self): - ''' - Get variables about a specific host - ''' - - if len(self.index) == 0: - # Need to load index from cache - self.load_index_from_cache() - - if self.args.host not in self.index: - # try updating the cache - self.do_api_calls_update_cache() - if self.args.host not in self.index: - # host migh not exist anymore - return self.json_format_dict({}, True) - - node_id = self.index[self.args.host] - - node = self.get_node(node_id) - instance_vars = {} - for key, value in vars(node).items(): - key = self.to_safe('ec2_' + key) - - # Handle complex types - if isinstance(value, (int, bool)): - instance_vars[key] = value - elif isinstance(value, string_types): - instance_vars[key] = value.strip() - elif value is None: - instance_vars[key] = '' - elif key == 'ec2_region': - instance_vars[key] = value.name - elif key == 'ec2_tags': - for k, v in iteritems(value): - key = self.to_safe('ec2_tag_' + k) - instance_vars[key] = v - elif key == 'ec2_groups': - group_ids = [] - group_names = [] - for group in value: - group_ids.append(group.id) - group_names.append(group.name) - instance_vars["ec2_security_group_ids"] = ','.join(group_ids) - instance_vars["ec2_security_group_names"] = ','.join(group_names) - else: - pass - # TODO Product codes if someone finds them useful - # print(key) - # print(type(value)) - # print(value) - - return self.json_format_dict(instance_vars, True) - - def push(self, my_dict, key, element): - ''' - Pushed an element onto an array that may not have been defined in - the dict - ''' - - if key in my_dict: - my_dict[key].append(element) - else: - my_dict[key] = [element] - - def get_inventory_from_cache(self): - ''' - Reads the inventory from the cache file and returns it as a JSON - object - ''' - - cache = open(self.cache_path_cache, 'r') - json_inventory = cache.read() - return json_inventory - - def load_index_from_cache(self): - ''' - Reads the index from the cache file sets self.index - ''' - - cache = open(self.cache_path_index, 'r') - json_index = cache.read() - self.index = json.loads(json_index) - - def write_to_cache(self, data, filename): - ''' - Writes data in JSON format to a file - ''' - - json_data = self.json_format_dict(data, True) - cache = open(filename, 'w') - cache.write(json_data) - cache.close() - - def to_safe(self, word): - ''' - Converts 'bad' characters in a string to underscores so they can be - used as Ansible groups - ''' - - return re.sub(r"[^A-Za-z0-9\-]", "_", word) - - def json_format_dict(self, data, pretty=False): - ''' - Converts a dict to a JSON object and dumps it as a formatted - string - ''' - - if pretty: - return json.dumps(data, sort_keys=True, indent=2) - else: - return json.dumps(data) - - -def main(): - LibcloudInventory() - - -if __name__ == '__main__': - main() diff --git a/scripts/inventory/apstra_aos.ini b/scripts/inventory/apstra_aos.ini deleted file mode 100644 index 1ec1255c9c..0000000000 --- a/scripts/inventory/apstra_aos.ini +++ /dev/null @@ -1,20 +0,0 @@ -# Ansible Apstra AOS external inventory script settings -# Dynamic Inventory script parameter can be provided using this file -# Or by using Environment Variables: -# - AOS_SERVER, AOS_PORT, AOS_USERNAME, AOS_PASSWORD, AOS_BLUEPRINT -# -# This file takes precedence over the Environment Variables -# - -[aos] - -# aos_server = 172.20.62.3 -# port = 8888 -# username = admin -# password = admin - -## Blueprint Mode -# to use the inventory in mode Blueprint, you need to define the blueprint name you want to use - -# blueprint = my-blueprint-l2 -# blueprint_interface = true diff --git a/scripts/inventory/apstra_aos.py b/scripts/inventory/apstra_aos.py deleted file mode 100755 index ce2eb3def7..0000000000 --- a/scripts/inventory/apstra_aos.py +++ /dev/null @@ -1,580 +0,0 @@ -#!/usr/bin/env python -# -# (c) 2017 Apstra Inc, -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -""" -Apstra AOS external inventory script -==================================== - -Ansible has a feature where instead of reading from /etc/ansible/hosts -as a text file, it can query external programs to obtain the list -of hosts, groups the hosts are in, and even variables to assign to each host. - -To use this: - - copy this file over /etc/ansible/hosts and chmod +x the file. - - Copy both files (.py and .ini) in your preferred directory - -More information about Ansible Dynamic Inventory here -http://unix.stackexchange.com/questions/205479/in-ansible-dynamic-inventory-json-can-i-render-hostvars-based-on-the-hostname - -2 modes are currently, supported: **device based** or **blueprint based**: - - For **Device based**, the list of device is taken from the global device list - the serial ID will be used as the inventory_hostname - - For **Blueprint based**, the list of device is taken from the given blueprint - the Node name will be used as the inventory_hostname - -Input parameters parameter can be provided using either with the ini file or by using Environment Variables: -The following list of Environment Variables are supported: AOS_SERVER, AOS_PORT, AOS_USERNAME, AOS_PASSWORD, AOS_BLUEPRINT -The config file takes precedence over the Environment Variables - -Tested with Apstra AOS 1.1 - -This script has been inspired by the cobbler.py inventory. thanks - -Author: Damien Garros (@dgarros) -Version: 0.2.0 -""" -import json -import os -import re -import sys - -try: - import argparse - HAS_ARGPARSE = True -except ImportError: - HAS_ARGPARSE = False - -try: - from apstra.aosom.session import Session - HAS_AOS_PYEZ = True -except ImportError: - HAS_AOS_PYEZ = False - -from ansible.module_utils.six.moves import configparser - - -""" -## -Expected output format in Device mode -{ - "Cumulus": { - "hosts": [ - "52540073956E", - "52540022211A" - ], - "vars": {} - }, - "EOS": { - "hosts": [ - "5254001CAFD8", - "525400DDDF72" - ], - "vars": {} - }, - "Generic Model": { - "hosts": [ - "525400E5486D" - ], - "vars": {} - }, - "Ubuntu GNU/Linux": { - "hosts": [ - "525400E5486D" - ], - "vars": {} - }, - "VX": { - "hosts": [ - "52540073956E", - "52540022211A" - ], - "vars": {} - }, - "_meta": { - "hostvars": { - "5254001CAFD8": { - "agent_start_time": "2017-02-03T00:49:16.000000Z", - "ansible_ssh_host": "172.20.52.6", - "aos_hcl_model": "Arista_vEOS", - "aos_server": "", - "aos_version": "AOS_1.1.1_OB.5", - "comm_state": "on", - "device_start_time": "2017-02-03T00:47:58.454480Z", - "domain_name": "", - "error_message": "", - "fqdn": "localhost", - "hostname": "localhost", - "hw_model": "vEOS", - "hw_version": "", - "is_acknowledged": false, - "mgmt_ifname": "Management1", - "mgmt_ipaddr": "172.20.52.6", - "mgmt_macaddr": "52:54:00:1C:AF:D8", - "os_arch": "x86_64", - "os_family": "EOS", - "os_version": "4.16.6M", - "os_version_info": { - "build": "6M", - "major": "4", - "minor": "16" - }, - "serial_number": "5254001CAFD8", - "state": "OOS-QUARANTINED", - "vendor": "Arista" - }, - "52540022211A": { - "agent_start_time": "2017-02-03T00:45:22.000000Z", - "ansible_ssh_host": "172.20.52.7", - "aos_hcl_model": "Cumulus_VX", - "aos_server": "172.20.52.3", - "aos_version": "AOS_1.1.1_OB.5", - "comm_state": "on", - "device_start_time": "2017-02-03T00:45:11.019189Z", - "domain_name": "", - "error_message": "", - "fqdn": "cumulus", - "hostname": "cumulus", - "hw_model": "VX", - "hw_version": "", - "is_acknowledged": false, - "mgmt_ifname": "eth0", - "mgmt_ipaddr": "172.20.52.7", - "mgmt_macaddr": "52:54:00:22:21:1a", - "os_arch": "x86_64", - "os_family": "Cumulus", - "os_version": "3.1.1", - "os_version_info": { - "build": "1", - "major": "3", - "minor": "1" - }, - "serial_number": "52540022211A", - "state": "OOS-QUARANTINED", - "vendor": "Cumulus" - }, - "52540073956E": { - "agent_start_time": "2017-02-03T00:45:19.000000Z", - "ansible_ssh_host": "172.20.52.8", - "aos_hcl_model": "Cumulus_VX", - "aos_server": "172.20.52.3", - "aos_version": "AOS_1.1.1_OB.5", - "comm_state": "on", - "device_start_time": "2017-02-03T00:45:11.030113Z", - "domain_name": "", - "error_message": "", - "fqdn": "cumulus", - "hostname": "cumulus", - "hw_model": "VX", - "hw_version": "", - "is_acknowledged": false, - "mgmt_ifname": "eth0", - "mgmt_ipaddr": "172.20.52.8", - "mgmt_macaddr": "52:54:00:73:95:6e", - "os_arch": "x86_64", - "os_family": "Cumulus", - "os_version": "3.1.1", - "os_version_info": { - "build": "1", - "major": "3", - "minor": "1" - }, - "serial_number": "52540073956E", - "state": "OOS-QUARANTINED", - "vendor": "Cumulus" - }, - "525400DDDF72": { - "agent_start_time": "2017-02-03T00:49:07.000000Z", - "ansible_ssh_host": "172.20.52.5", - "aos_hcl_model": "Arista_vEOS", - "aos_server": "", - "aos_version": "AOS_1.1.1_OB.5", - "comm_state": "on", - "device_start_time": "2017-02-03T00:47:46.929921Z", - "domain_name": "", - "error_message": "", - "fqdn": "localhost", - "hostname": "localhost", - "hw_model": "vEOS", - "hw_version": "", - "is_acknowledged": false, - "mgmt_ifname": "Management1", - "mgmt_ipaddr": "172.20.52.5", - "mgmt_macaddr": "52:54:00:DD:DF:72", - "os_arch": "x86_64", - "os_family": "EOS", - "os_version": "4.16.6M", - "os_version_info": { - "build": "6M", - "major": "4", - "minor": "16" - }, - "serial_number": "525400DDDF72", - "state": "OOS-QUARANTINED", - "vendor": "Arista" - }, - "525400E5486D": { - "agent_start_time": "2017-02-02T18:44:42.000000Z", - "ansible_ssh_host": "172.20.52.4", - "aos_hcl_model": "Generic_Server_1RU_1x10G", - "aos_server": "172.20.52.3", - "aos_version": "AOS_1.1.1_OB.5", - "comm_state": "on", - "device_start_time": "2017-02-02T21:11:25.188734Z", - "domain_name": "", - "error_message": "", - "fqdn": "localhost", - "hostname": "localhost", - "hw_model": "Generic Model", - "hw_version": "pc-i440fx-trusty", - "is_acknowledged": false, - "mgmt_ifname": "eth0", - "mgmt_ipaddr": "172.20.52.4", - "mgmt_macaddr": "52:54:00:e5:48:6d", - "os_arch": "x86_64", - "os_family": "Ubuntu GNU/Linux", - "os_version": "14.04 LTS", - "os_version_info": { - "build": "", - "major": "14", - "minor": "04" - }, - "serial_number": "525400E5486D", - "state": "OOS-QUARANTINED", - "vendor": "Generic Manufacturer" - } - } - }, - "all": { - "hosts": [ - "5254001CAFD8", - "52540073956E", - "525400DDDF72", - "525400E5486D", - "52540022211A" - ], - "vars": {} - }, - "vEOS": { - "hosts": [ - "5254001CAFD8", - "525400DDDF72" - ], - "vars": {} - } -} -""" - - -def fail(msg): - sys.stderr.write("%s\n" % msg) - sys.exit(1) - - -class AosInventory(object): - - def __init__(self): - - """ Main execution path """ - - if not HAS_AOS_PYEZ: - raise Exception('aos-pyez is not installed. Please see details here: https://github.com/Apstra/aos-pyez') - if not HAS_ARGPARSE: - raise Exception('argparse is not installed. Please install the argparse library or upgrade to python-2.7') - - # Initialize inventory - self.inventory = dict() # A list of groups and the hosts in that group - self.inventory['_meta'] = dict() - self.inventory['_meta']['hostvars'] = dict() - - # Read settings and parse CLI arguments - self.read_settings() - self.parse_cli_args() - - # ---------------------------------------------------- - # Open session to AOS - # ---------------------------------------------------- - aos = Session(server=self.aos_server, - port=self.aos_server_port, - user=self.aos_username, - passwd=self.aos_password) - - aos.login() - - # Save session information in variables of group all - self.add_var_to_group('all', 'aos_session', aos.session) - - # Add the AOS server itself in the inventory - self.add_host_to_group("all", 'aos') - self.add_var_to_host("aos", "ansible_ssh_host", self.aos_server) - self.add_var_to_host("aos", "ansible_ssh_pass", self.aos_password) - self.add_var_to_host("aos", "ansible_ssh_user", self.aos_username) - - # ---------------------------------------------------- - # Build the inventory - # 2 modes are supported: device based or blueprint based - # - For device based, the list of device is taken from the global device list - # the serial ID will be used as the inventory_hostname - # - For Blueprint based, the list of device is taken from the given blueprint - # the Node name will be used as the inventory_hostname - # ---------------------------------------------------- - if self.aos_blueprint: - - bp = aos.Blueprints[self.aos_blueprint] - if bp.exists is False: - fail("Unable to find the Blueprint: %s" % self.aos_blueprint) - - for dev_name, dev_id in bp.params['devices'].value.items(): - - self.add_host_to_group('all', dev_name) - device = aos.Devices.find(uid=dev_id) - - if 'facts' in device.value.keys(): - self.add_device_facts_to_var(dev_name, device) - - # Define admin State and Status - if 'user_config' in device.value.keys(): - if 'admin_state' in device.value['user_config'].keys(): - self.add_var_to_host(dev_name, 'admin_state', device.value['user_config']['admin_state']) - - self.add_device_status_to_var(dev_name, device) - - # Go over the contents data structure - for node in bp.contents['system']['nodes']: - if node['display_name'] == dev_name: - self.add_host_to_group(node['role'], dev_name) - - # Check for additional attribute to import - attributes_to_import = [ - 'loopback_ip', - 'asn', - 'role', - 'position', - ] - for attr in attributes_to_import: - if attr in node.keys(): - self.add_var_to_host(dev_name, attr, node[attr]) - - # if blueprint_interface is enabled in the configuration - # Collect links information - if self.aos_blueprint_int: - interfaces = dict() - - for link in bp.contents['system']['links']: - # each link has 2 sides [0,1], and it's unknown which one match this device - # at first we assume, first side match(0) and peer is (1) - peer_id = 1 - - for side in link['endpoints']: - if side['display_name'] == dev_name: - - # import local information first - int_name = side['interface'] - - # init dict - interfaces[int_name] = dict() - if 'ip' in side.keys(): - interfaces[int_name]['ip'] = side['ip'] - - if 'interface' in side.keys(): - interfaces[int_name]['name'] = side['interface'] - - if 'display_name' in link['endpoints'][peer_id].keys(): - interfaces[int_name]['peer'] = link['endpoints'][peer_id]['display_name'] - - if 'ip' in link['endpoints'][peer_id].keys(): - interfaces[int_name]['peer_ip'] = link['endpoints'][peer_id]['ip'] - - if 'type' in link['endpoints'][peer_id].keys(): - interfaces[int_name]['peer_type'] = link['endpoints'][peer_id]['type'] - - else: - # if we haven't match the first time, prepare the peer_id - # for the second loop iteration - peer_id = 0 - - self.add_var_to_host(dev_name, 'interfaces', interfaces) - - else: - for device in aos.Devices: - # If not reacheable, create by key and - # If reacheable, create by hostname - - self.add_host_to_group('all', device.name) - - # populate information for this host - self.add_device_status_to_var(device.name, device) - - if 'user_config' in device.value.keys(): - for key, value in device.value['user_config'].items(): - self.add_var_to_host(device.name, key, value) - - # Based on device status online|offline, collect facts as well - if device.value['status']['comm_state'] == 'on': - - if 'facts' in device.value.keys(): - self.add_device_facts_to_var(device.name, device) - - # Check if device is associated with a blueprint - # if it's create a new group - if 'blueprint_active' in device.value['status'].keys(): - if 'blueprint_id' in device.value['status'].keys(): - bp = aos.Blueprints.find(uid=device.value['status']['blueprint_id']) - - if bp: - self.add_host_to_group(bp.name, device.name) - - # ---------------------------------------------------- - # Convert the inventory and return a JSON String - # ---------------------------------------------------- - data_to_print = "" - data_to_print += self.json_format_dict(self.inventory, True) - - print(data_to_print) - - def read_settings(self): - """ Reads the settings from the apstra_aos.ini file """ - - config = configparser.ConfigParser() - config.read(os.path.dirname(os.path.realpath(__file__)) + '/apstra_aos.ini') - - # Default Values - self.aos_blueprint = False - self.aos_blueprint_int = True - self.aos_username = 'admin' - self.aos_password = 'admin' - self.aos_server_port = 8888 - - # Try to reach all parameters from File, if not available try from ENV - try: - self.aos_server = config.get('aos', 'aos_server') - except Exception: - if 'AOS_SERVER' in os.environ.keys(): - self.aos_server = os.environ['AOS_SERVER'] - - try: - self.aos_server_port = config.get('aos', 'port') - except Exception: - if 'AOS_PORT' in os.environ.keys(): - self.aos_server_port = os.environ['AOS_PORT'] - - try: - self.aos_username = config.get('aos', 'username') - except Exception: - if 'AOS_USERNAME' in os.environ.keys(): - self.aos_username = os.environ['AOS_USERNAME'] - - try: - self.aos_password = config.get('aos', 'password') - except Exception: - if 'AOS_PASSWORD' in os.environ.keys(): - self.aos_password = os.environ['AOS_PASSWORD'] - - try: - self.aos_blueprint = config.get('aos', 'blueprint') - except Exception: - if 'AOS_BLUEPRINT' in os.environ.keys(): - self.aos_blueprint = os.environ['AOS_BLUEPRINT'] - - try: - if config.get('aos', 'blueprint_interface') in ['false', 'no']: - self.aos_blueprint_int = False - except Exception: - pass - - def parse_cli_args(self): - """ Command line argument processing """ - - parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Apstra AOS') - parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') - parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') - self.args = parser.parse_args() - - def json_format_dict(self, data, pretty=False): - """ Converts a dict to a JSON object and dumps it as a formatted string """ - - if pretty: - return json.dumps(data, sort_keys=True, indent=2) - else: - return json.dumps(data) - - def add_host_to_group(self, group, host): - - # Cleanup group name first - clean_group = self.cleanup_group_name(group) - - # Check if the group exist, if not initialize it - if clean_group not in self.inventory.keys(): - self.inventory[clean_group] = {} - self.inventory[clean_group]['hosts'] = [] - self.inventory[clean_group]['vars'] = {} - - self.inventory[clean_group]['hosts'].append(host) - - def add_var_to_host(self, host, var, value): - - # Check if the host exist, if not initialize it - if host not in self.inventory['_meta']['hostvars'].keys(): - self.inventory['_meta']['hostvars'][host] = {} - - self.inventory['_meta']['hostvars'][host][var] = value - - def add_var_to_group(self, group, var, value): - - # Cleanup group name first - clean_group = self.cleanup_group_name(group) - - # Check if the group exist, if not initialize it - if clean_group not in self.inventory.keys(): - self.inventory[clean_group] = {} - self.inventory[clean_group]['hosts'] = [] - self.inventory[clean_group]['vars'] = {} - - self.inventory[clean_group]['vars'][var] = value - - def add_device_facts_to_var(self, device_name, device): - - # Populate variables for this host - self.add_var_to_host(device_name, - 'ansible_ssh_host', - device.value['facts']['mgmt_ipaddr']) - - self.add_var_to_host(device_name, 'id', device.id) - - # self.add_host_to_group('all', device.name) - for key, value in device.value['facts'].items(): - self.add_var_to_host(device_name, key, value) - - if key == 'os_family': - self.add_host_to_group(value, device_name) - elif key == 'hw_model': - self.add_host_to_group(value, device_name) - - def cleanup_group_name(self, group_name): - """ - Clean up group name by : - - Replacing all non-alphanumeric caracter by underscore - - Converting to lowercase - """ - - rx = re.compile(r'\W+') - clean_group = rx.sub('_', group_name).lower() - - return clean_group - - def add_device_status_to_var(self, device_name, device): - - if 'status' in device.value.keys(): - for key, value in device.value['status'].items(): - self.add_var_to_host(device.name, key, value) - - -# Run the script -if __name__ == '__main__': - AosInventory() diff --git a/scripts/inventory/azure_rm.ini b/scripts/inventory/azure_rm.ini deleted file mode 100644 index 6edd9b981b..0000000000 --- a/scripts/inventory/azure_rm.ini +++ /dev/null @@ -1,23 +0,0 @@ -# -# Configuration file for azure_rm.py -# -[azure] -# Control which resource groups are included. By default all resources groups are included. -# Set resource_groups to a comma separated list of resource groups names. -#resource_groups= - -# Control which tags are included. Set tags to a comma separated list of keys or key:value pairs -#tags= - -# Control which locations are included. Set locations to a comma separated list (e.g. eastus,eastus2,westus) -#locations= - -# Include powerstate. If you don't need powerstate information, turning it off improves runtime performance. -include_powerstate=yes - -# Control grouping with the following boolean flags. Valid values: yes, no, true, false, True, False, 0, 1. -group_by_resource_group=yes -group_by_location=yes -group_by_security_group=yes -group_by_os_family=yes -group_by_tag=yes diff --git a/scripts/inventory/azure_rm.py b/scripts/inventory/azure_rm.py deleted file mode 100755 index ef9e7b1da4..0000000000 --- a/scripts/inventory/azure_rm.py +++ /dev/null @@ -1,962 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (c) 2016 Matt Davis, -# Chris Houseknecht, -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -''' -Important note (2018/10) -======================== -This inventory script is in maintenance mode: only critical bug fixes but no new features. -There's new Azure external inventory script at https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/inventory/azure_rm.py, -with better performance and latest new features. Please go to the link to get latest Azure inventory. - -Azure External Inventory Script -=============================== -Generates dynamic inventory by making API requests to the Azure Resource -Manager using the Azure Python SDK. For instruction on installing the -Azure Python SDK see https://azure-sdk-for-python.readthedocs.io/ - -Authentication --------------- -The order of precedence is command line arguments, environment variables, -and finally the [default] profile found in ~/.azure/credentials. - -If using a credentials file, it should be an ini formatted file with one or -more sections, which we refer to as profiles. The script looks for a -[default] section, if a profile is not specified either on the command line -or with an environment variable. The keys in a profile will match the -list of command line arguments below. - -For command line arguments and environment variables specify a profile found -in your ~/.azure/credentials file, or a service principal or Active Directory -user. - -Command line arguments: - - profile - - client_id - - secret - - subscription_id - - tenant - - ad_user - - password - - cloud_environment - - adfs_authority_url - -Environment variables: - - AZURE_PROFILE - - AZURE_CLIENT_ID - - AZURE_SECRET - - AZURE_SUBSCRIPTION_ID - - AZURE_TENANT - - AZURE_AD_USER - - AZURE_PASSWORD - - AZURE_CLOUD_ENVIRONMENT - - AZURE_ADFS_AUTHORITY_URL - -Run for Specific Host ------------------------ -When run for a specific host using the --host option, a resource group is -required. For a specific host, this script returns the following variables: - -{ - "ansible_host": "XXX.XXX.XXX.XXX", - "computer_name": "computer_name2", - "fqdn": null, - "id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Compute/virtualMachines/object-name", - "image": { - "offer": "CentOS", - "publisher": "OpenLogic", - "sku": "7.1", - "version": "latest" - }, - "location": "westus", - "mac_address": "00-00-5E-00-53-FE", - "name": "object-name", - "network_interface": "interface-name", - "network_interface_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkInterfaces/object-name1", - "network_security_group": null, - "network_security_group_id": null, - "os_disk": { - "name": "object-name", - "operating_system_type": "Linux" - }, - "plan": null, - "powerstate": "running", - "private_ip": "172.26.3.6", - "private_ip_alloc_method": "Static", - "provisioning_state": "Succeeded", - "public_ip": "XXX.XXX.XXX.XXX", - "public_ip_alloc_method": "Static", - "public_ip_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/publicIPAddresses/object-name", - "public_ip_name": "object-name", - "resource_group": "galaxy-production", - "security_group": "object-name", - "security_group_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkSecurityGroups/object-name", - "tags": { - "db": "database" - }, - "type": "Microsoft.Compute/virtualMachines", - "virtual_machine_size": "Standard_DS4" -} - -Groups ------- -When run in --list mode, instances are grouped by the following categories: - - azure - - location - - resource_group - - security_group - - tag key - - tag key_value - -Control groups using azure_rm.ini or set environment variables: - -AZURE_GROUP_BY_RESOURCE_GROUP=yes -AZURE_GROUP_BY_LOCATION=yes -AZURE_GROUP_BY_SECURITY_GROUP=yes -AZURE_GROUP_BY_TAG=yes - -Select hosts within specific resource groups by assigning a comma separated list to: - -AZURE_RESOURCE_GROUPS=resource_group_a,resource_group_b - -Select hosts for specific tag key by assigning a comma separated list of tag keys to: - -AZURE_TAGS=key1,key2,key3 - -Select hosts for specific locations: - -AZURE_LOCATIONS=eastus,westus,eastus2 - -Or, select hosts for specific tag key:value pairs by assigning a comma separated list key:value pairs to: - -AZURE_TAGS=key1:value1,key2:value2 - -If you don't need the powerstate, you can improve performance by turning off powerstate fetching: -AZURE_INCLUDE_POWERSTATE=no - -azure_rm.ini ------------- -As mentioned above, you can control execution using environment variables or a .ini file. A sample -azure_rm.ini is included. The name of the .ini file is the basename of the inventory script (in this case -'azure_rm') with a .ini extension. It also assumes the .ini file is alongside the script. To specify -a different path for the .ini file, define the AZURE_INI_PATH environment variable: - - export AZURE_INI_PATH=/path/to/custom.ini - -Powerstate: ------------ -The powerstate attribute indicates whether or not a host is running. If the value is 'running', the machine is -up. If the value is anything other than 'running', the machine is down, and will be unreachable. - -Examples: ---------- - Execute /bin/uname on all instances in the galaxy-qa resource group - $ ansible -i azure_rm.py galaxy-qa -m shell -a "/bin/uname -a" - - Use the inventory script to print instance specific information - $ contrib/inventory/azure_rm.py --host my_instance_host_name --pretty - - Use with a playbook - $ ansible-playbook -i contrib/inventory/azure_rm.py my_playbook.yml --limit galaxy-qa - - -Insecure Platform Warning -------------------------- -If you receive InsecurePlatformWarning from urllib3, install the -requests security packages: - - pip install requests[security] - - -author: - - Chris Houseknecht (@chouseknecht) - - Matt Davis (@nitzmahone) - -Company: Ansible by Red Hat - -Version: 1.0.0 -''' - -import argparse -import json -import os -import re -import sys -import inspect - -from os.path import expanduser -from ansible.module_utils.six.moves import configparser as cp -import ansible.module_utils.six.moves.urllib.parse as urlparse - -HAS_AZURE = True -HAS_AZURE_EXC = None -HAS_AZURE_CLI_CORE = True -CLIError = None - -try: - from msrestazure.azure_active_directory import AADTokenCredentials - from msrestazure.azure_exceptions import CloudError - from msrestazure.azure_active_directory import MSIAuthentication - from msrestazure import azure_cloud - from azure.mgmt.compute import __version__ as azure_compute_version - from azure.common import AzureMissingResourceHttpError, AzureHttpError - from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials - from azure.mgmt.network import NetworkManagementClient - from azure.mgmt.resource.resources import ResourceManagementClient - from azure.mgmt.resource.subscriptions import SubscriptionClient - from azure.mgmt.compute import ComputeManagementClient - from adal.authentication_context import AuthenticationContext -except ImportError as exc: - HAS_AZURE_EXC = exc - HAS_AZURE = False - -try: - from azure.cli.core.util import CLIError - from azure.common.credentials import get_azure_cli_credentials, get_cli_profile - from azure.common.cloud import get_cli_active_cloud -except ImportError: - HAS_AZURE_CLI_CORE = False - CLIError = Exception - -try: - from ansible.release import __version__ as ansible_version -except ImportError: - ansible_version = 'unknown' - -AZURE_CREDENTIAL_ENV_MAPPING = dict( - profile='AZURE_PROFILE', - subscription_id='AZURE_SUBSCRIPTION_ID', - client_id='AZURE_CLIENT_ID', - secret='AZURE_SECRET', - tenant='AZURE_TENANT', - ad_user='AZURE_AD_USER', - password='AZURE_PASSWORD', - cloud_environment='AZURE_CLOUD_ENVIRONMENT', - adfs_authority_url='AZURE_ADFS_AUTHORITY_URL' -) - -AZURE_CONFIG_SETTINGS = dict( - resource_groups='AZURE_RESOURCE_GROUPS', - tags='AZURE_TAGS', - locations='AZURE_LOCATIONS', - include_powerstate='AZURE_INCLUDE_POWERSTATE', - group_by_resource_group='AZURE_GROUP_BY_RESOURCE_GROUP', - group_by_location='AZURE_GROUP_BY_LOCATION', - group_by_security_group='AZURE_GROUP_BY_SECURITY_GROUP', - group_by_tag='AZURE_GROUP_BY_TAG', - group_by_os_family='AZURE_GROUP_BY_OS_FAMILY', - use_private_ip='AZURE_USE_PRIVATE_IP' -) - -AZURE_MIN_VERSION = "2.0.0" -ANSIBLE_USER_AGENT = 'Ansible/{0}'.format(ansible_version) - - -def azure_id_to_dict(id): - pieces = re.sub(r'^\/', '', id).split('/') - result = {} - index = 0 - while index < len(pieces) - 1: - result[pieces[index]] = pieces[index + 1] - index += 1 - return result - - -class AzureRM(object): - - def __init__(self, args): - self._args = args - self._cloud_environment = None - self._compute_client = None - self._resource_client = None - self._network_client = None - self._adfs_authority_url = None - self._resource = None - - self.debug = False - if args.debug: - self.debug = True - - self.credentials = self._get_credentials(args) - if not self.credentials: - self.fail("Failed to get credentials. Either pass as parameters, set environment variables, " - "or define a profile in ~/.azure/credentials.") - - # if cloud_environment specified, look up/build Cloud object - raw_cloud_env = self.credentials.get('cloud_environment') - if not raw_cloud_env: - self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD # SDK default - else: - # try to look up "well-known" values via the name attribute on azure_cloud members - all_clouds = [x[1] for x in inspect.getmembers(azure_cloud) if isinstance(x[1], azure_cloud.Cloud)] - matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env] - if len(matched_clouds) == 1: - self._cloud_environment = matched_clouds[0] - elif len(matched_clouds) > 1: - self.fail("Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'".format(raw_cloud_env)) - else: - if not urlparse.urlparse(raw_cloud_env).scheme: - self.fail("cloud_environment must be an endpoint discovery URL or one of {0}".format([x.name for x in all_clouds])) - try: - self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(raw_cloud_env) - except Exception as e: - self.fail("cloud_environment {0} could not be resolved: {1}".format(raw_cloud_env, e.message)) - - if self.credentials.get('subscription_id', None) is None: - self.fail("Credentials did not include a subscription_id value.") - self.log("setting subscription_id") - self.subscription_id = self.credentials['subscription_id'] - - # get authentication authority - # for adfs, user could pass in authority or not. - # for others, use default authority from cloud environment - if self.credentials.get('adfs_authority_url'): - self._adfs_authority_url = self.credentials.get('adfs_authority_url') - else: - self._adfs_authority_url = self._cloud_environment.endpoints.active_directory - - # get resource from cloud environment - self._resource = self._cloud_environment.endpoints.active_directory_resource_id - - if self.credentials.get('credentials'): - self.azure_credentials = self.credentials.get('credentials') - elif self.credentials.get('client_id') and self.credentials.get('secret') and self.credentials.get('tenant'): - self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'], - secret=self.credentials['secret'], - tenant=self.credentials['tenant'], - cloud_environment=self._cloud_environment) - - elif self.credentials.get('ad_user') is not None and \ - self.credentials.get('password') is not None and \ - self.credentials.get('client_id') is not None and \ - self.credentials.get('tenant') is not None: - - self.azure_credentials = self.acquire_token_with_username_password( - self._adfs_authority_url, - self._resource, - self.credentials['ad_user'], - self.credentials['password'], - self.credentials['client_id'], - self.credentials['tenant']) - - elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None: - tenant = self.credentials.get('tenant') - if not tenant: - tenant = 'common' - self.azure_credentials = UserPassCredentials(self.credentials['ad_user'], - self.credentials['password'], - tenant=tenant, - cloud_environment=self._cloud_environment) - - else: - self.fail("Failed to authenticate with provided credentials. Some attributes were missing. " - "Credentials must include client_id, secret and tenant or ad_user and password, or " - "ad_user, password, client_id, tenant and adfs_authority_url(optional) for ADFS authentication, or " - "be logged in using AzureCLI.") - - def log(self, msg): - if self.debug: - print(msg + u'\n') - - def fail(self, msg): - raise Exception(msg) - - def _get_profile(self, profile="default"): - path = expanduser("~") - path += "/.azure/credentials" - try: - config = cp.ConfigParser() - config.read(path) - except Exception as exc: - self.fail("Failed to access {0}. Check that the file exists and you have read " - "access. {1}".format(path, str(exc))) - credentials = dict() - for key in AZURE_CREDENTIAL_ENV_MAPPING: - try: - credentials[key] = config.get(profile, key, raw=True) - except Exception: - pass - - if credentials.get('client_id') is not None or credentials.get('ad_user') is not None: - return credentials - - return None - - def _get_env_credentials(self): - env_credentials = dict() - for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items(): - env_credentials[attribute] = os.environ.get(env_variable, None) - - if env_credentials['profile'] is not None: - credentials = self._get_profile(env_credentials['profile']) - return credentials - - if env_credentials['client_id'] is not None or env_credentials['ad_user'] is not None: - return env_credentials - - return None - - def _get_azure_cli_credentials(self): - credentials, subscription_id = get_azure_cli_credentials() - cloud_environment = get_cli_active_cloud() - - cli_credentials = { - 'credentials': credentials, - 'subscription_id': subscription_id, - 'cloud_environment': cloud_environment - } - return cli_credentials - - def _get_msi_credentials(self, subscription_id_param=None): - credentials = MSIAuthentication() - subscription_id_param = subscription_id_param or os.environ.get(AZURE_CREDENTIAL_ENV_MAPPING['subscription_id'], None) - try: - # try to get the subscription in MSI to test whether MSI is enabled - subscription_client = SubscriptionClient(credentials) - subscription = next(subscription_client.subscriptions.list()) - subscription_id = str(subscription.subscription_id) - return { - 'credentials': credentials, - 'subscription_id': subscription_id_param or subscription_id - } - except Exception as exc: - return None - - def _get_credentials(self, params): - # Get authentication credentials. - # Precedence: cmd line parameters-> environment variables-> default profile in ~/.azure/credentials. - - self.log('Getting credentials') - - arg_credentials = dict() - for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items(): - arg_credentials[attribute] = getattr(params, attribute) - - # try module params - if arg_credentials['profile'] is not None: - self.log('Retrieving credentials with profile parameter.') - credentials = self._get_profile(arg_credentials['profile']) - return credentials - - if arg_credentials['client_id'] is not None: - self.log('Received credentials from parameters.') - return arg_credentials - - if arg_credentials['ad_user'] is not None: - self.log('Received credentials from parameters.') - return arg_credentials - - # try environment - env_credentials = self._get_env_credentials() - if env_credentials: - self.log('Received credentials from env.') - return env_credentials - - # try default profile from ~./azure/credentials - default_credentials = self._get_profile() - if default_credentials: - self.log('Retrieved default profile credentials from ~/.azure/credentials.') - return default_credentials - - msi_credentials = self._get_msi_credentials(arg_credentials.get('subscription_id')) - if msi_credentials: - self.log('Retrieved credentials from MSI.') - return msi_credentials - - try: - if HAS_AZURE_CLI_CORE: - self.log('Retrieving credentials from AzureCLI profile') - cli_credentials = self._get_azure_cli_credentials() - return cli_credentials - except CLIError as ce: - self.log('Error getting AzureCLI profile credentials - {0}'.format(ce)) - - return None - - def acquire_token_with_username_password(self, authority, resource, username, password, client_id, tenant): - authority_uri = authority - - if tenant is not None: - authority_uri = authority + '/' + tenant - - context = AuthenticationContext(authority_uri) - token_response = context.acquire_token_with_username_password(resource, username, password, client_id) - return AADTokenCredentials(token_response) - - def _register(self, key): - try: - # We have to perform the one-time registration here. Otherwise, we receive an error the first - # time we attempt to use the requested client. - resource_client = self.rm_client - resource_client.providers.register(key) - except Exception as exc: - self.log("One-time registration of {0} failed - {1}".format(key, str(exc))) - self.log("You might need to register {0} using an admin account".format(key)) - self.log(("To register a provider using the Python CLI: " - "https://docs.microsoft.com/azure/azure-resource-manager/" - "resource-manager-common-deployment-errors#noregisteredproviderfound")) - - def get_mgmt_svc_client(self, client_type, base_url, api_version): - client = client_type(self.azure_credentials, - self.subscription_id, - base_url=base_url, - api_version=api_version) - client.config.add_user_agent(ANSIBLE_USER_AGENT) - return client - - @property - def network_client(self): - self.log('Getting network client') - if not self._network_client: - self._network_client = self.get_mgmt_svc_client(NetworkManagementClient, - self._cloud_environment.endpoints.resource_manager, - '2017-06-01') - self._register('Microsoft.Network') - return self._network_client - - @property - def rm_client(self): - self.log('Getting resource manager client') - if not self._resource_client: - self._resource_client = self.get_mgmt_svc_client(ResourceManagementClient, - self._cloud_environment.endpoints.resource_manager, - '2017-05-10') - return self._resource_client - - @property - def compute_client(self): - self.log('Getting compute client') - if not self._compute_client: - self._compute_client = self.get_mgmt_svc_client(ComputeManagementClient, - self._cloud_environment.endpoints.resource_manager, - '2017-03-30') - self._register('Microsoft.Compute') - return self._compute_client - - -class AzureInventory(object): - - def __init__(self): - - self._args = self._parse_cli_args() - - try: - rm = AzureRM(self._args) - except Exception as e: - sys.exit("{0}".format(str(e))) - - self._compute_client = rm.compute_client - self._network_client = rm.network_client - self._resource_client = rm.rm_client - self._security_groups = None - - self.resource_groups = [] - self.tags = None - self.locations = None - self.replace_dash_in_groups = False - self.group_by_resource_group = True - self.group_by_location = True - self.group_by_os_family = True - self.group_by_security_group = True - self.group_by_tag = True - self.include_powerstate = True - self.use_private_ip = False - - self._inventory = dict( - _meta=dict( - hostvars=dict() - ), - azure=[] - ) - - self._get_settings() - - if self._args.resource_groups: - self.resource_groups = self._args.resource_groups.split(',') - - if self._args.tags: - self.tags = self._args.tags.split(',') - - if self._args.locations: - self.locations = self._args.locations.split(',') - - if self._args.no_powerstate: - self.include_powerstate = False - - self.get_inventory() - print(self._json_format_dict(pretty=self._args.pretty)) - sys.exit(0) - - def _parse_cli_args(self): - # Parse command line arguments - parser = argparse.ArgumentParser( - description='Produce an Ansible Inventory file for an Azure subscription') - parser.add_argument('--list', action='store_true', default=True, - help='List instances (default: True)') - parser.add_argument('--debug', action='store_true', default=False, - help='Send debug messages to STDOUT') - parser.add_argument('--host', action='store', - help='Get all information about an instance') - parser.add_argument('--pretty', action='store_true', default=False, - help='Pretty print JSON output(default: False)') - parser.add_argument('--profile', action='store', - help='Azure profile contained in ~/.azure/credentials') - parser.add_argument('--subscription_id', action='store', - help='Azure Subscription Id') - parser.add_argument('--client_id', action='store', - help='Azure Client Id ') - parser.add_argument('--secret', action='store', - help='Azure Client Secret') - parser.add_argument('--tenant', action='store', - help='Azure Tenant Id') - parser.add_argument('--ad_user', action='store', - help='Active Directory User') - parser.add_argument('--password', action='store', - help='password') - parser.add_argument('--adfs_authority_url', action='store', - help='Azure ADFS authority url') - parser.add_argument('--cloud_environment', action='store', - help='Azure Cloud Environment name or metadata discovery URL') - parser.add_argument('--resource-groups', action='store', - help='Return inventory for comma separated list of resource group names') - parser.add_argument('--tags', action='store', - help='Return inventory for comma separated list of tag key:value pairs') - parser.add_argument('--locations', action='store', - help='Return inventory for comma separated list of locations') - parser.add_argument('--no-powerstate', action='store_true', default=False, - help='Do not include the power state of each virtual host') - return parser.parse_args() - - def get_inventory(self): - if len(self.resource_groups) > 0: - # get VMs for requested resource groups - for resource_group in self.resource_groups: - try: - virtual_machines = self._compute_client.virtual_machines.list(resource_group.lower()) - except Exception as exc: - sys.exit("Error: fetching virtual machines for resource group {0} - {1}".format(resource_group, str(exc))) - if self._args.host or self.tags: - selected_machines = self._selected_machines(virtual_machines) - self._load_machines(selected_machines) - else: - self._load_machines(virtual_machines) - else: - # get all VMs within the subscription - try: - virtual_machines = self._compute_client.virtual_machines.list_all() - except Exception as exc: - sys.exit("Error: fetching virtual machines - {0}".format(str(exc))) - - if self._args.host or self.tags or self.locations: - selected_machines = self._selected_machines(virtual_machines) - self._load_machines(selected_machines) - else: - self._load_machines(virtual_machines) - - def _load_machines(self, machines): - for machine in machines: - id_dict = azure_id_to_dict(machine.id) - - # TODO - The API is returning an ID value containing resource group name in ALL CAPS. If/when it gets - # fixed, we should remove the .lower(). Opened Issue - # #574: https://github.com/Azure/azure-sdk-for-python/issues/574 - resource_group = id_dict['resourceGroups'].lower() - - if self.group_by_security_group: - self._get_security_groups(resource_group) - - host_vars = dict( - ansible_host=None, - private_ip=None, - private_ip_alloc_method=None, - public_ip=None, - public_ip_name=None, - public_ip_id=None, - public_ip_alloc_method=None, - fqdn=None, - location=machine.location, - name=machine.name, - type=machine.type, - id=machine.id, - tags=machine.tags, - network_interface_id=None, - network_interface=None, - resource_group=resource_group, - mac_address=None, - plan=(machine.plan.name if machine.plan else None), - virtual_machine_size=machine.hardware_profile.vm_size, - computer_name=(machine.os_profile.computer_name if machine.os_profile else None), - provisioning_state=machine.provisioning_state, - ) - - host_vars['os_disk'] = dict( - name=machine.storage_profile.os_disk.name, - operating_system_type=machine.storage_profile.os_disk.os_type.value.lower() - ) - - if self.include_powerstate: - host_vars['powerstate'] = self._get_powerstate(resource_group, machine.name) - - if machine.storage_profile.image_reference: - host_vars['image'] = dict( - offer=machine.storage_profile.image_reference.offer, - publisher=machine.storage_profile.image_reference.publisher, - sku=machine.storage_profile.image_reference.sku, - version=machine.storage_profile.image_reference.version - ) - - # Add windows details - if machine.os_profile is not None and machine.os_profile.windows_configuration is not None: - host_vars['ansible_connection'] = 'winrm' - host_vars['windows_auto_updates_enabled'] = \ - machine.os_profile.windows_configuration.enable_automatic_updates - host_vars['windows_timezone'] = machine.os_profile.windows_configuration.time_zone - host_vars['windows_rm'] = None - if machine.os_profile.windows_configuration.win_rm is not None: - host_vars['windows_rm'] = dict(listeners=None) - if machine.os_profile.windows_configuration.win_rm.listeners is not None: - host_vars['windows_rm']['listeners'] = [] - for listener in machine.os_profile.windows_configuration.win_rm.listeners: - host_vars['windows_rm']['listeners'].append(dict(protocol=listener.protocol.name, - certificate_url=listener.certificate_url)) - - for interface in machine.network_profile.network_interfaces: - interface_reference = self._parse_ref_id(interface.id) - network_interface = self._network_client.network_interfaces.get( - interface_reference['resourceGroups'], - interface_reference['networkInterfaces']) - if network_interface.primary: - if self.group_by_security_group and \ - self._security_groups[resource_group].get(network_interface.id, None): - host_vars['security_group'] = \ - self._security_groups[resource_group][network_interface.id]['name'] - host_vars['security_group_id'] = \ - self._security_groups[resource_group][network_interface.id]['id'] - host_vars['network_interface'] = network_interface.name - host_vars['network_interface_id'] = network_interface.id - host_vars['mac_address'] = network_interface.mac_address - for ip_config in network_interface.ip_configurations: - host_vars['private_ip'] = ip_config.private_ip_address - host_vars['private_ip_alloc_method'] = ip_config.private_ip_allocation_method - if self.use_private_ip: - host_vars['ansible_host'] = ip_config.private_ip_address - if ip_config.public_ip_address: - public_ip_reference = self._parse_ref_id(ip_config.public_ip_address.id) - public_ip_address = self._network_client.public_ip_addresses.get( - public_ip_reference['resourceGroups'], - public_ip_reference['publicIPAddresses']) - if not self.use_private_ip: - host_vars['ansible_host'] = public_ip_address.ip_address - host_vars['public_ip'] = public_ip_address.ip_address - host_vars['public_ip_name'] = public_ip_address.name - host_vars['public_ip_alloc_method'] = public_ip_address.public_ip_allocation_method - host_vars['public_ip_id'] = public_ip_address.id - if public_ip_address.dns_settings: - host_vars['fqdn'] = public_ip_address.dns_settings.fqdn - - self._add_host(host_vars) - - def _selected_machines(self, virtual_machines): - selected_machines = [] - for machine in virtual_machines: - if self._args.host and self._args.host == machine.name: - selected_machines.append(machine) - if self.tags and self._tags_match(machine.tags, self.tags): - selected_machines.append(machine) - if self.locations and machine.location in self.locations: - selected_machines.append(machine) - return selected_machines - - def _get_security_groups(self, resource_group): - ''' For a given resource_group build a mapping of network_interface.id to security_group name ''' - if not self._security_groups: - self._security_groups = dict() - if not self._security_groups.get(resource_group): - self._security_groups[resource_group] = dict() - for group in self._network_client.network_security_groups.list(resource_group): - if group.network_interfaces: - for interface in group.network_interfaces: - self._security_groups[resource_group][interface.id] = dict( - name=group.name, - id=group.id - ) - - def _get_powerstate(self, resource_group, name): - try: - vm = self._compute_client.virtual_machines.get(resource_group, - name, - expand='instanceview') - except Exception as exc: - sys.exit("Error: fetching instanceview for host {0} - {1}".format(name, str(exc))) - - return next((s.code.replace('PowerState/', '') - for s in vm.instance_view.statuses if s.code.startswith('PowerState')), None) - - def _add_host(self, vars): - - host_name = self._to_safe(vars['name']) - resource_group = self._to_safe(vars['resource_group']) - operating_system_type = self._to_safe(vars['os_disk']['operating_system_type'].lower()) - security_group = None - if vars.get('security_group'): - security_group = self._to_safe(vars['security_group']) - - if self.group_by_os_family: - if not self._inventory.get(operating_system_type): - self._inventory[operating_system_type] = [] - self._inventory[operating_system_type].append(host_name) - - if self.group_by_resource_group: - if not self._inventory.get(resource_group): - self._inventory[resource_group] = [] - self._inventory[resource_group].append(host_name) - - if self.group_by_location: - if not self._inventory.get(vars['location']): - self._inventory[vars['location']] = [] - self._inventory[vars['location']].append(host_name) - - if self.group_by_security_group and security_group: - if not self._inventory.get(security_group): - self._inventory[security_group] = [] - self._inventory[security_group].append(host_name) - - self._inventory['_meta']['hostvars'][host_name] = vars - self._inventory['azure'].append(host_name) - - if self.group_by_tag and vars.get('tags'): - for key, value in vars['tags'].items(): - safe_key = self._to_safe(key) - safe_value = safe_key + '_' + self._to_safe(value) - if not self._inventory.get(safe_key): - self._inventory[safe_key] = [] - if not self._inventory.get(safe_value): - self._inventory[safe_value] = [] - self._inventory[safe_key].append(host_name) - self._inventory[safe_value].append(host_name) - - def _json_format_dict(self, pretty=False): - # convert inventory to json - if pretty: - return json.dumps(self._inventory, sort_keys=True, indent=2) - else: - return json.dumps(self._inventory) - - def _get_settings(self): - # Load settings from the .ini, if it exists. Otherwise, - # look for environment values. - file_settings = self._load_settings() - if file_settings: - for key in AZURE_CONFIG_SETTINGS: - if key in ('resource_groups', 'tags', 'locations') and file_settings.get(key): - values = file_settings.get(key).split(',') - if len(values) > 0: - setattr(self, key, values) - elif file_settings.get(key): - val = self._to_boolean(file_settings[key]) - setattr(self, key, val) - else: - env_settings = self._get_env_settings() - for key in AZURE_CONFIG_SETTINGS: - if key in ('resource_groups', 'tags', 'locations') and env_settings.get(key): - values = env_settings.get(key).split(',') - if len(values) > 0: - setattr(self, key, values) - elif env_settings.get(key, None) is not None: - val = self._to_boolean(env_settings[key]) - setattr(self, key, val) - - def _parse_ref_id(self, reference): - response = {} - keys = reference.strip('/').split('/') - for index in range(len(keys)): - if index < len(keys) - 1 and index % 2 == 0: - response[keys[index]] = keys[index + 1] - return response - - def _to_boolean(self, value): - if value in ['Yes', 'yes', 1, 'True', 'true', True]: - result = True - elif value in ['No', 'no', 0, 'False', 'false', False]: - result = False - else: - result = True - return result - - def _get_env_settings(self): - env_settings = dict() - for attribute, env_variable in AZURE_CONFIG_SETTINGS.items(): - env_settings[attribute] = os.environ.get(env_variable, None) - return env_settings - - def _load_settings(self): - basename = os.path.splitext(os.path.basename(__file__))[0] - default_path = os.path.join(os.path.dirname(__file__), (basename + '.ini')) - path = os.path.expanduser(os.path.expandvars(os.environ.get('AZURE_INI_PATH', default_path))) - config = None - settings = None - try: - config = cp.ConfigParser() - config.read(path) - except Exception: - pass - - if config is not None: - settings = dict() - for key in AZURE_CONFIG_SETTINGS: - try: - settings[key] = config.get('azure', key, raw=True) - except Exception: - pass - - return settings - - def _tags_match(self, tag_obj, tag_args): - ''' - Return True if the tags object from a VM contains the requested tag values. - - :param tag_obj: Dictionary of string:string pairs - :param tag_args: List of strings in the form key=value - :return: boolean - ''' - - if not tag_obj: - return False - - matches = 0 - for arg in tag_args: - arg_key = arg - arg_value = None - if re.search(r':', arg): - arg_key, arg_value = arg.split(':') - if arg_value and tag_obj.get(arg_key, None) == arg_value: - matches += 1 - elif not arg_value and tag_obj.get(arg_key, None) is not None: - matches += 1 - if matches == len(tag_args): - return True - return False - - def _to_safe(self, word): - ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' - regex = r"[^A-Za-z0-9\_" - if not self.replace_dash_in_groups: - regex += r"\-" - return re.sub(regex + "]", "_", word) - - -def main(): - if not HAS_AZURE: - sys.exit("The Azure python sdk is not installed (try `pip install 'azure>={0}' --upgrade`) - {1}".format(AZURE_MIN_VERSION, HAS_AZURE_EXC)) - - AzureInventory() - - -if __name__ == '__main__': - main() diff --git a/scripts/inventory/brook.ini b/scripts/inventory/brook.ini deleted file mode 100644 index e88c363150..0000000000 --- a/scripts/inventory/brook.ini +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright 2016 Doalitic. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# The Brook.io inventory script has the following dependencies: -# 1. A working Brook.io account -# See https://brook.io -# 2. A valid token generated through the 'API token' panel of Brook.io -# 3. The libbrook python libray. -# See https://github.com/doalitic/libbrook -# -# Author: Francisco Ros - -[brook] -# Valid API token (required). -# E.g. 'Aed342a12A60433697281FeEe1a4037C' -# -api_token = - -# Project id within Brook.io, as obtained from the project settings (optional). If provided, the -# generated inventory will just include the hosts that belong to such project. Otherwise, it will -# include all hosts in projects the requesting user has access to. The response includes groups -# 'project_x', being 'x' the project name. -# E.g. '2e8e099e1bc34cc0979d97ac34e9577b' -# -project_id = diff --git a/scripts/inventory/brook.py b/scripts/inventory/brook.py deleted file mode 100755 index 1acd370ec3..0000000000 --- a/scripts/inventory/brook.py +++ /dev/null @@ -1,248 +0,0 @@ -#!/usr/bin/env python -# Copyright 2016 Doalitic. -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -""" -Brook.io external inventory script -================================== - -Generates inventory that Ansible can understand by making API requests to Brook.io via the libbrook -library. Hence, such dependency must be installed in the system to run this script. - -The default configuration file is named 'brook.ini' and is located alongside this script. You can -choose any other file by setting the BROOK_INI_PATH environment variable. - -If param 'project_id' is left blank in 'brook.ini', the inventory includes all the instances in -projects where the requesting user belongs. Otherwise, only instances from the given project are -included, provided the requesting user belongs to it. - -The following variables are established for every host. They can be retrieved from the hostvars -dictionary. - - brook_pid: str - - brook_name: str - - brook_description: str - - brook_project: str - - brook_template: str - - brook_region: str - - brook_zone: str - - brook_status: str - - brook_tags: list(str) - - brook_internal_ips: list(str) - - brook_external_ips: list(str) - - brook_created_at - - brook_updated_at - - ansible_ssh_host - -Instances are grouped by the following categories: - - tag: - A group is created for each tag. E.g. groups 'tag_foo' and 'tag_bar' are created if there exist - instances with tags 'foo' and/or 'bar'. - - project: - A group is created for each project. E.g. group 'project_test' is created if a project named - 'test' exist. - - status: - A group is created for each instance state. E.g. groups 'status_RUNNING' and 'status_PENDING' - are created if there are instances in running and pending state. - -Examples: - Execute uname on all instances in project 'test' - $ ansible -i brook.py project_test -m shell -a "/bin/uname -a" - - Install nginx on all debian web servers tagged with 'www' - $ ansible -i brook.py tag_www -m apt -a "name=nginx state=present" - - Run site.yml playbook on web servers - $ ansible-playbook -i brook.py site.yml -l tag_www - -Support: - This script is tested on Python 2.7 and 3.4. It may work on other versions though. - -Author: Francisco Ros -Version: 0.2 -""" - - -import sys -import os - -from ansible.module_utils.six.moves.configparser import SafeConfigParser as ConfigParser - -import json - -try: - import libbrook -except Exception: - sys.exit('Brook.io inventory script requires libbrook. See https://github.com/doalitic/libbrook') - - -class BrookInventory: - - _API_ENDPOINT = 'https://api.brook.io' - - def __init__(self): - self._configure_from_file() - self.client = self.get_api_client() - self.inventory = self.get_inventory() - - def _configure_from_file(self): - """Initialize from .ini file. - - Configuration file is assumed to be named 'brook.ini' and to be located on the same - directory than this file, unless the environment variable BROOK_INI_PATH says otherwise. - """ - - brook_ini_default_path = \ - os.path.join(os.path.dirname(os.path.realpath(__file__)), 'brook.ini') - brook_ini_path = os.environ.get('BROOK_INI_PATH', brook_ini_default_path) - - config = ConfigParser(defaults={ - 'api_token': '', - 'project_id': '' - }) - config.read(brook_ini_path) - self.api_token = config.get('brook', 'api_token') - self.project_id = config.get('brook', 'project_id') - - if not self.api_token: - sys.exit('You must provide (at least) your Brook.io API token to generate the dynamic ' - 'inventory.') - - def get_api_client(self): - """Authenticate user via the provided credentials and return the corresponding API client. - """ - - # Get JWT token from API token - # - unauthenticated_client = libbrook.ApiClient(host=self._API_ENDPOINT) - auth_api = libbrook.AuthApi(unauthenticated_client) - api_token = libbrook.AuthTokenRequest() - api_token.token = self.api_token - jwt = auth_api.auth_token(token=api_token) - - # Create authenticated API client - # - return libbrook.ApiClient(host=self._API_ENDPOINT, - header_name='Authorization', - header_value='Bearer %s' % jwt.token) - - def get_inventory(self): - """Generate Ansible inventory. - """ - - groups = dict() - meta = dict() - meta['hostvars'] = dict() - - instances_api = libbrook.InstancesApi(self.client) - projects_api = libbrook.ProjectsApi(self.client) - templates_api = libbrook.TemplatesApi(self.client) - - # If no project is given, get all projects the requesting user has access to - # - if not self.project_id: - projects = [project.id for project in projects_api.index_projects()] - else: - projects = [self.project_id] - - # Build inventory from instances in all projects - # - for project_id in projects: - project = projects_api.show_project(project_id=project_id) - for instance in instances_api.index_instances(project_id=project_id): - # Get template used for this instance if known - template = templates_api.show_template(template_id=instance.template) if instance.template else None - - # Update hostvars - try: - meta['hostvars'][instance.name] = \ - self.hostvars(project, instance, template, instances_api) - except libbrook.rest.ApiException: - continue - - # Group by project - project_group = 'project_%s' % project.name - if project_group in groups: - groups[project_group].append(instance.name) - else: - groups[project_group] = [instance.name] - - # Group by status - status_group = 'status_%s' % meta['hostvars'][instance.name]['brook_status'] - if status_group in groups: - groups[status_group].append(instance.name) - else: - groups[status_group] = [instance.name] - - # Group by tags - tags = meta['hostvars'][instance.name]['brook_tags'] - for tag in tags: - tag_group = 'tag_%s' % tag - if tag_group in groups: - groups[tag_group].append(instance.name) - else: - groups[tag_group] = [instance.name] - - groups['_meta'] = meta - return groups - - def hostvars(self, project, instance, template, api): - """Return the hostvars dictionary for the given instance. - - Raise libbrook.rest.ApiException if it cannot retrieve all required information from the - Brook.io API. - """ - - hostvars = instance.to_dict() - hostvars['brook_pid'] = hostvars.pop('pid') - hostvars['brook_name'] = hostvars.pop('name') - hostvars['brook_description'] = hostvars.pop('description') - hostvars['brook_project'] = hostvars.pop('project') - hostvars['brook_template'] = hostvars.pop('template') - hostvars['brook_region'] = hostvars.pop('region') - hostvars['brook_zone'] = hostvars.pop('zone') - hostvars['brook_created_at'] = hostvars.pop('created_at') - hostvars['brook_updated_at'] = hostvars.pop('updated_at') - del hostvars['id'] - del hostvars['key'] - del hostvars['provider'] - del hostvars['image'] - - # Substitute identifiers for names - # - hostvars['brook_project'] = project.name - hostvars['brook_template'] = template.name if template else None - - # Retrieve instance state - # - status = api.status_instance(project_id=project.id, instance_id=instance.id) - hostvars.update({'brook_status': status.state}) - - # Retrieve instance tags - # - tags = api.instance_tags(project_id=project.id, instance_id=instance.id) - hostvars.update({'brook_tags': tags}) - - # Retrieve instance addresses - # - addresses = api.instance_addresses(project_id=project.id, instance_id=instance.id) - internal_ips = [address.address for address in addresses if address.scope == 'internal'] - external_ips = [address.address for address in addresses - if address.address and address.scope == 'external'] - hostvars.update({'brook_internal_ips': internal_ips}) - hostvars.update({'brook_external_ips': external_ips}) - try: - hostvars.update({'ansible_ssh_host': external_ips[0]}) - except IndexError: - raise libbrook.rest.ApiException(status='502', reason='Instance without public IP') - - return hostvars - - -# Run the script -# -brook = BrookInventory() -print(json.dumps(brook.inventory)) diff --git a/scripts/inventory/cloudforms.ini b/scripts/inventory/cloudforms.ini deleted file mode 100644 index 30b9aa609e..0000000000 --- a/scripts/inventory/cloudforms.ini +++ /dev/null @@ -1,40 +0,0 @@ -[cloudforms] - -# the version of CloudForms ; currently not used, but tested with -version = 4.1 - -# This should be the hostname of the CloudForms server -url = https://cfme.example.com - -# This will more than likely need to be a local CloudForms username -username = - -# The password for said username -password = - -# True = verify SSL certificate / False = trust anything -ssl_verify = True - -# limit the number of vms returned per request -limit = 100 - -# purge the CloudForms actions from hosts -purge_actions = True - -# Clean up group names (from tags and other groupings so Ansible doesn't complain) -clean_group_keys = True - -# Explode tags into nested groups / subgroups -nest_tags = False - -# If set, ensure host name are suffixed with this value -# Note: This suffix *must* include the leading '.' as it is appended to the hostname as is -# suffix = .example.org - -# If true, will try and use an IPv4 address for the ansible_ssh_host rather than just the first IP address in the list -prefer_ipv4 = False - -[cache] - -# Maximum time to trust the cache in seconds -max_age = 600 diff --git a/scripts/inventory/cloudforms.py b/scripts/inventory/cloudforms.py deleted file mode 100755 index 3514698d59..0000000000 --- a/scripts/inventory/cloudforms.py +++ /dev/null @@ -1,499 +0,0 @@ -#!/usr/bin/env python -# vim: set fileencoding=utf-8 : -# -# Copyright (C) 2016 Guido Günther -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import argparse -from ansible.module_utils.six.moves import configparser as ConfigParser -import os -import re -from time import time -import requests -from requests.auth import HTTPBasicAuth -import warnings -from ansible.errors import AnsibleError - -import json - - -class CloudFormsInventory(object): - def __init__(self): - """ - Main execution path - """ - self.inventory = dict() # A list of groups and the hosts in that group - self.hosts = dict() # Details about hosts in the inventory - - # Parse CLI arguments - self.parse_cli_args() - - # Read settings - self.read_settings() - - # Cache - if self.args.refresh_cache or not self.is_cache_valid(): - self.update_cache() - else: - self.load_inventory_from_cache() - self.load_hosts_from_cache() - - data_to_print = "" - - # Data to print - if self.args.host: - if self.args.debug: - print("Fetching host [%s]" % self.args.host) - data_to_print += self.get_host_info(self.args.host) - else: - self.inventory['_meta'] = {'hostvars': {}} - for hostname in self.hosts: - self.inventory['_meta']['hostvars'][hostname] = { - 'cloudforms': self.hosts[hostname], - } - # include the ansible_ssh_host in the top level - if 'ansible_ssh_host' in self.hosts[hostname]: - self.inventory['_meta']['hostvars'][hostname]['ansible_ssh_host'] = self.hosts[hostname]['ansible_ssh_host'] - - data_to_print += self.json_format_dict(self.inventory, self.args.pretty) - - print(data_to_print) - - def is_cache_valid(self): - """ - Determines if the cache files have expired, or if it is still valid - """ - if self.args.debug: - print("Determining if cache [%s] is still valid (< %s seconds old)" % (self.cache_path_hosts, self.cache_max_age)) - - if os.path.isfile(self.cache_path_hosts): - mod_time = os.path.getmtime(self.cache_path_hosts) - current_time = time() - if (mod_time + self.cache_max_age) > current_time: - if os.path.isfile(self.cache_path_inventory): - if self.args.debug: - print("Cache is still valid!") - return True - - if self.args.debug: - print("Cache is stale or does not exist.") - - return False - - def read_settings(self): - """ - Reads the settings from the cloudforms.ini file - """ - config = ConfigParser.SafeConfigParser() - config_paths = [ - os.path.dirname(os.path.realpath(__file__)) + '/cloudforms.ini', - "/etc/ansible/cloudforms.ini", - ] - - env_value = os.environ.get('CLOUDFORMS_INI_PATH') - if env_value is not None: - config_paths.append(os.path.expanduser(os.path.expandvars(env_value))) - - if self.args.debug: - for config_path in config_paths: - print("Reading from configuration file [%s]" % config_path) - - config.read(config_paths) - - # CloudForms API related - if config.has_option('cloudforms', 'url'): - self.cloudforms_url = config.get('cloudforms', 'url') - else: - self.cloudforms_url = None - - if not self.cloudforms_url: - warnings.warn("No url specified, expected something like 'https://cfme.example.com'") - - if config.has_option('cloudforms', 'username'): - self.cloudforms_username = config.get('cloudforms', 'username') - else: - self.cloudforms_username = None - - if not self.cloudforms_username: - warnings.warn("No username specified, you need to specify a CloudForms username.") - - if config.has_option('cloudforms', 'password'): - self.cloudforms_pw = config.get('cloudforms', 'password', raw=True) - else: - self.cloudforms_pw = None - - if not self.cloudforms_pw: - warnings.warn("No password specified, you need to specify a password for the CloudForms user.") - - if config.has_option('cloudforms', 'ssl_verify'): - self.cloudforms_ssl_verify = config.getboolean('cloudforms', 'ssl_verify') - else: - self.cloudforms_ssl_verify = True - - if config.has_option('cloudforms', 'version'): - self.cloudforms_version = config.get('cloudforms', 'version') - else: - self.cloudforms_version = None - - if config.has_option('cloudforms', 'limit'): - self.cloudforms_limit = config.getint('cloudforms', 'limit') - else: - self.cloudforms_limit = 100 - - if config.has_option('cloudforms', 'purge_actions'): - self.cloudforms_purge_actions = config.getboolean('cloudforms', 'purge_actions') - else: - self.cloudforms_purge_actions = True - - if config.has_option('cloudforms', 'clean_group_keys'): - self.cloudforms_clean_group_keys = config.getboolean('cloudforms', 'clean_group_keys') - else: - self.cloudforms_clean_group_keys = True - - if config.has_option('cloudforms', 'nest_tags'): - self.cloudforms_nest_tags = config.getboolean('cloudforms', 'nest_tags') - else: - self.cloudforms_nest_tags = False - - if config.has_option('cloudforms', 'suffix'): - self.cloudforms_suffix = config.get('cloudforms', 'suffix') - if self.cloudforms_suffix[0] != '.': - raise AnsibleError('Leading fullstop is required for Cloudforms suffix') - else: - self.cloudforms_suffix = None - - if config.has_option('cloudforms', 'prefer_ipv4'): - self.cloudforms_prefer_ipv4 = config.getboolean('cloudforms', 'prefer_ipv4') - else: - self.cloudforms_prefer_ipv4 = False - - # Ansible related - try: - group_patterns = config.get('ansible', 'group_patterns') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - group_patterns = "[]" - - self.group_patterns = eval(group_patterns) - - # Cache related - try: - cache_path = os.path.expanduser(config.get('cache', 'path')) - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - cache_path = '.' - (script, ext) = os.path.splitext(os.path.basename(__file__)) - self.cache_path_hosts = cache_path + "/%s.hosts" % script - self.cache_path_inventory = cache_path + "/%s.inventory" % script - self.cache_max_age = config.getint('cache', 'max_age') - - if self.args.debug: - print("CloudForms settings:") - print("cloudforms_url = %s" % self.cloudforms_url) - print("cloudforms_username = %s" % self.cloudforms_username) - print("cloudforms_pw = %s" % self.cloudforms_pw) - print("cloudforms_ssl_verify = %s" % self.cloudforms_ssl_verify) - print("cloudforms_version = %s" % self.cloudforms_version) - print("cloudforms_limit = %s" % self.cloudforms_limit) - print("cloudforms_purge_actions = %s" % self.cloudforms_purge_actions) - print("Cache settings:") - print("cache_max_age = %s" % self.cache_max_age) - print("cache_path_hosts = %s" % self.cache_path_hosts) - print("cache_path_inventory = %s" % self.cache_path_inventory) - - def parse_cli_args(self): - """ - Command line argument processing - """ - parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on CloudForms managed VMs') - parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') - parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') - parser.add_argument('--pretty', action='store_true', default=False, help='Pretty print JSON output (default: False)') - parser.add_argument('--refresh-cache', action='store_true', default=False, - help='Force refresh of cache by making API requests to CloudForms (default: False - use cache files)') - parser.add_argument('--debug', action='store_true', default=False, help='Show debug output while running (default: False)') - self.args = parser.parse_args() - - def _http_request(self, url): - """ - Make a request and return the result converted from JSON - """ - results = [] - - ret = requests.get(url, - auth=HTTPBasicAuth(self.cloudforms_username, self.cloudforms_pw), - verify=self.cloudforms_ssl_verify) - - ret.raise_for_status() - - try: - results = json.loads(ret.text) - except ValueError: - warnings.warn( - "Unexpected response from {0} ({1}): {2}".format(self.cloudforms_url, ret.status_code, ret.reason)) - results = {} - - if self.args.debug: - print("=======================================================================") - print("=======================================================================") - print("=======================================================================") - print(ret.text) - print("=======================================================================") - print("=======================================================================") - print("=======================================================================") - - return results - - def _get_json(self, endpoint, url_suffix): - """ - Make a request by given url, split request by configured limit, - go through all sub-requests and return the aggregated data received - by cloudforms - - :param endpoint: api endpoint to access - :param url_suffix: additional api parameters - - """ - - limit = int(self.cloudforms_limit) - - page = 0 - last_page = False - - results = [] - - while not last_page: - offset = page * limit - url = "%s%s?offset=%s&limit=%s%s" % ( - self.cloudforms_url, endpoint, offset, limit, url_suffix) - - if self.args.debug: - print("Connecting to url '%s'" % url) - - ret = self._http_request(url) - results += [ret] - - if 'subcount' in ret: - if ret['subcount'] < limit: - last_page = True - page += 1 - else: - last_page = True - - return results - - def _get_hosts(self): - """ - Get all hosts - """ - endpoint = "/api/vms" - url_suffix = "&expand=resources,tags,hosts,&attributes=active,ipaddresses&filter[]=active=true" - results = self._get_json(endpoint, url_suffix) - resources = [item for sublist in results for item in sublist['resources']] - - return resources - - def update_cache(self): - """ - Make calls to cloudforms and save the output in a cache - """ - self.groups = dict() - self.hosts = dict() - - if self.args.debug: - print("Updating cache...") - - for host in self._get_hosts(): - if self.cloudforms_suffix is not None and not host['name'].endswith(self.cloudforms_suffix): - host['name'] = host['name'] + self.cloudforms_suffix - - # Ignore VMs that are not powered on - if host['power_state'] != 'on': - if self.args.debug: - print("Skipping %s because power_state = %s" % (host['name'], host['power_state'])) - continue - - # purge actions - if self.cloudforms_purge_actions and 'actions' in host: - del host['actions'] - - # Create ansible groups for tags - if 'tags' in host: - - # Create top-level group - if 'tags' not in self.inventory: - self.inventory['tags'] = dict(children=[], vars={}, hosts=[]) - - if not self.cloudforms_nest_tags: - # don't expand tags, just use them in a safe way - for group in host['tags']: - # Add sub-group, as a child of top-level - safe_key = self.to_safe(group['name']) - if safe_key: - if self.args.debug: - print("Adding sub-group '%s' to parent 'tags'" % safe_key) - - if safe_key not in self.inventory['tags']['children']: - self.push(self.inventory['tags'], 'children', safe_key) - - self.push(self.inventory, safe_key, host['name']) - - if self.args.debug: - print("Found tag [%s] for host which will be mapped to [%s]" % (group['name'], safe_key)) - else: - # expand the tags into nested groups / sub-groups - # Create nested groups for tags - safe_parent_tag_name = 'tags' - for tag in host['tags']: - tag_hierarchy = tag['name'][1:].split('/') - - if self.args.debug: - print("Working on list %s" % tag_hierarchy) - - for tag_name in tag_hierarchy: - if self.args.debug: - print("Working on tag_name = %s" % tag_name) - - safe_tag_name = self.to_safe(tag_name) - if self.args.debug: - print("Using sanitized name %s" % safe_tag_name) - - # Create sub-group - if safe_tag_name not in self.inventory: - self.inventory[safe_tag_name] = dict(children=[], vars={}, hosts=[]) - - # Add sub-group, as a child of top-level - if safe_parent_tag_name: - if self.args.debug: - print("Adding sub-group '%s' to parent '%s'" % (safe_tag_name, safe_parent_tag_name)) - - if safe_tag_name not in self.inventory[safe_parent_tag_name]['children']: - self.push(self.inventory[safe_parent_tag_name], 'children', safe_tag_name) - - # Make sure the next one uses this one as it's parent - safe_parent_tag_name = safe_tag_name - - # Add the host to the last tag - self.push(self.inventory[safe_parent_tag_name], 'hosts', host['name']) - - # Set ansible_ssh_host to the first available ip address - if 'ipaddresses' in host and host['ipaddresses'] and isinstance(host['ipaddresses'], list): - # If no preference for IPv4, just use the first entry - if not self.cloudforms_prefer_ipv4: - host['ansible_ssh_host'] = host['ipaddresses'][0] - else: - # Before we search for an IPv4 address, set using the first entry in case we don't find any - host['ansible_ssh_host'] = host['ipaddresses'][0] - for currenthost in host['ipaddresses']: - if '.' in currenthost: - host['ansible_ssh_host'] = currenthost - - # Create additional groups - for key in ('location', 'type', 'vendor'): - safe_key = self.to_safe(host[key]) - - # Create top-level group - if key not in self.inventory: - self.inventory[key] = dict(children=[], vars={}, hosts=[]) - - # Create sub-group - if safe_key not in self.inventory: - self.inventory[safe_key] = dict(children=[], vars={}, hosts=[]) - - # Add sub-group, as a child of top-level - if safe_key not in self.inventory[key]['children']: - self.push(self.inventory[key], 'children', safe_key) - - if key in host: - # Add host to sub-group - self.push(self.inventory[safe_key], 'hosts', host['name']) - - self.hosts[host['name']] = host - self.push(self.inventory, 'all', host['name']) - - if self.args.debug: - print("Saving cached data") - - self.write_to_cache(self.hosts, self.cache_path_hosts) - self.write_to_cache(self.inventory, self.cache_path_inventory) - - def get_host_info(self, host): - """ - Get variables about a specific host - """ - if not self.hosts or len(self.hosts) == 0: - # Need to load cache from cache - self.load_hosts_from_cache() - - if host not in self.hosts: - if self.args.debug: - print("[%s] not found in cache." % host) - - # try updating the cache - self.update_cache() - - if host not in self.hosts: - if self.args.debug: - print("[%s] does not exist after cache update." % host) - # host might not exist anymore - return self.json_format_dict({}, self.args.pretty) - - return self.json_format_dict(self.hosts[host], self.args.pretty) - - def push(self, d, k, v): - """ - Safely puts a new entry onto an array. - """ - if k in d: - d[k].append(v) - else: - d[k] = [v] - - def load_inventory_from_cache(self): - """ - Reads the inventory from the cache file sets self.inventory - """ - cache = open(self.cache_path_inventory, 'r') - json_inventory = cache.read() - self.inventory = json.loads(json_inventory) - - def load_hosts_from_cache(self): - """ - Reads the cache from the cache file sets self.hosts - """ - cache = open(self.cache_path_hosts, 'r') - json_cache = cache.read() - self.hosts = json.loads(json_cache) - - def write_to_cache(self, data, filename): - """ - Writes data in JSON format to a file - """ - json_data = self.json_format_dict(data, True) - cache = open(filename, 'w') - cache.write(json_data) - cache.close() - - def to_safe(self, word): - """ - Converts 'bad' characters in a string to underscores so they can be used as Ansible groups - """ - if self.cloudforms_clean_group_keys: - regex = r"[^A-Za-z0-9\_]" - return re.sub(regex, "_", word.replace(" ", "")) - else: - return word - - def json_format_dict(self, data, pretty=False): - """ - Converts a dict to a JSON object and dumps it as a formatted string - """ - if pretty: - return json.dumps(data, sort_keys=True, indent=2) - else: - return json.dumps(data) - - -CloudFormsInventory() diff --git a/scripts/inventory/cobbler.ini b/scripts/inventory/cobbler.ini deleted file mode 100644 index 2dc8cd3379..0000000000 --- a/scripts/inventory/cobbler.ini +++ /dev/null @@ -1,24 +0,0 @@ -# Ansible Cobbler external inventory script settings -# - -[cobbler] - -host = http://PATH_TO_COBBLER_SERVER/cobbler_api - -# If API needs authentication add 'username' and 'password' options here. -#username = foo -#password = bar - -# API calls to Cobbler can be slow. For this reason, we cache the results of an API -# call. Set this to the path you want cache files to be written to. Two files -# will be written to this directory: -# - ansible-cobbler.cache -# - ansible-cobbler.index -cache_path = /tmp - -# The number of seconds a cache file is considered valid. After this many -# seconds, a new API call will be made, and the cache file will be updated. -cache_max_age = 900 - - - diff --git a/scripts/inventory/cobbler.py b/scripts/inventory/cobbler.py deleted file mode 100755 index eeb8f58286..0000000000 --- a/scripts/inventory/cobbler.py +++ /dev/null @@ -1,305 +0,0 @@ -#!/usr/bin/env python - -""" -Cobbler external inventory script -================================= - -Ansible has a feature where instead of reading from /etc/ansible/hosts -as a text file, it can query external programs to obtain the list -of hosts, groups the hosts are in, and even variables to assign to each host. - -To use this, copy this file over /etc/ansible/hosts and chmod +x the file. -This, more or less, allows you to keep one central database containing -info about all of your managed instances. - -This script is an example of sourcing that data from Cobbler -(https://cobbler.github.io). With cobbler each --mgmt-class in cobbler -will correspond to a group in Ansible, and --ks-meta variables will be -passed down for use in templates or even in argument lines. - -NOTE: The cobbler system names will not be used. Make sure a -cobbler --dns-name is set for each cobbler system. If a system -appears with two DNS names we do not add it twice because we don't want -ansible talking to it twice. The first one found will be used. If no ---dns-name is set the system will NOT be visible to ansible. We do -not add cobbler system names because there is no requirement in cobbler -that those correspond to addresses. - -Tested with Cobbler 2.0.11. - -Changelog: - - 2015-06-21 dmccue: Modified to support run-once _meta retrieval, results in - higher performance at ansible startup. Groups are determined by owner rather than - default mgmt_classes. DNS name determined from hostname. cobbler values are written - to a 'cobbler' fact namespace - - - 2013-09-01 pgehres: Refactored implementation to make use of caching and to - limit the number of connections to external cobbler server for performance. - Added use of cobbler.ini file to configure settings. Tested with Cobbler 2.4.0 - -""" - -# (c) 2012, Michael DeHaan -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -###################################################################### - -import argparse -import os -import re -from time import time -try: # Python 3 - from xmlrpc.client import Server -except ImportError: # Python 2 - from xmlrpclib import Server - -import json - -from ansible.module_utils.six import iteritems -from ansible.module_utils.six.moves import configparser as ConfigParser - -# NOTE -- this file assumes Ansible is being accessed FROM the cobbler -# server, so it does not attempt to login with a username and password. -# this will be addressed in a future version of this script. - -orderby_keyname = 'owners' # alternatively 'mgmt_classes' - - -class CobblerInventory(object): - - def __init__(self): - - """ Main execution path """ - self.conn = None - - self.inventory = dict() # A list of groups and the hosts in that group - self.cache = dict() # Details about hosts in the inventory - self.ignore_settings = False # used to only look at env vars for settings. - - # Read env vars, read settings, and parse CLI arguments - self.parse_env_vars() - self.read_settings() - self.parse_cli_args() - - # Cache - if self.args.refresh_cache: - self.update_cache() - elif not self.is_cache_valid(): - self.update_cache() - else: - self.load_inventory_from_cache() - self.load_cache_from_cache() - - data_to_print = "" - - # Data to print - if self.args.host: - data_to_print += self.get_host_info() - else: - self.inventory['_meta'] = {'hostvars': {}} - for hostname in self.cache: - self.inventory['_meta']['hostvars'][hostname] = {'cobbler': self.cache[hostname]} - data_to_print += self.json_format_dict(self.inventory, True) - - print(data_to_print) - - def _connect(self): - if not self.conn: - self.conn = Server(self.cobbler_host, allow_none=True) - self.token = None - if self.cobbler_username is not None: - self.token = self.conn.login(self.cobbler_username, self.cobbler_password) - - def is_cache_valid(self): - """ Determines if the cache files have expired, or if it is still valid """ - - if os.path.isfile(self.cache_path_cache): - mod_time = os.path.getmtime(self.cache_path_cache) - current_time = time() - if (mod_time + self.cache_max_age) > current_time: - if os.path.isfile(self.cache_path_inventory): - return True - - return False - - def read_settings(self): - """ Reads the settings from the cobbler.ini file """ - - if(self.ignore_settings): - return - - config = ConfigParser.SafeConfigParser() - config.read(os.path.dirname(os.path.realpath(__file__)) + '/cobbler.ini') - - self.cobbler_host = config.get('cobbler', 'host') - self.cobbler_username = None - self.cobbler_password = None - if config.has_option('cobbler', 'username'): - self.cobbler_username = config.get('cobbler', 'username') - if config.has_option('cobbler', 'password'): - self.cobbler_password = config.get('cobbler', 'password') - - # Cache related - cache_path = config.get('cobbler', 'cache_path') - self.cache_path_cache = cache_path + "/ansible-cobbler.cache" - self.cache_path_inventory = cache_path + "/ansible-cobbler.index" - self.cache_max_age = config.getint('cobbler', 'cache_max_age') - - def parse_env_vars(self): - """ Reads the settings from the environment """ - - # Env. Vars: - # COBBLER_host - # COBBLER_username - # COBBLER_password - # COBBLER_cache_path - # COBBLER_cache_max_age - # COBBLER_ignore_settings - - self.cobbler_host = os.getenv('COBBLER_host', None) - self.cobbler_username = os.getenv('COBBLER_username', None) - self.cobbler_password = os.getenv('COBBLER_password', None) - - # Cache related - cache_path = os.getenv('COBBLER_cache_path', None) - if(cache_path is not None): - self.cache_path_cache = cache_path + "/ansible-cobbler.cache" - self.cache_path_inventory = cache_path + "/ansible-cobbler.index" - - self.cache_max_age = int(os.getenv('COBBLER_cache_max_age', "30")) - - # ignore_settings is used to ignore the settings file, for use in Ansible - # Tower (or AWX inventory scripts and not throw python exceptions.) - if(os.getenv('COBBLER_ignore_settings', False) == "True"): - self.ignore_settings = True - - def parse_cli_args(self): - """ Command line argument processing """ - - parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Cobbler') - parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') - parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') - parser.add_argument('--refresh-cache', action='store_true', default=False, - help='Force refresh of cache by making API requests to cobbler (default: False - use cache files)') - self.args = parser.parse_args() - - def update_cache(self): - """ Make calls to cobbler and save the output in a cache """ - - self._connect() - self.groups = dict() - self.hosts = dict() - if self.token is not None: - data = self.conn.get_systems(self.token) - else: - data = self.conn.get_systems() - - for host in data: - # Get the FQDN for the host and add it to the right groups - dns_name = host['hostname'] # None - ksmeta = None - interfaces = host['interfaces'] - # hostname is often empty for non-static IP hosts - if dns_name == '': - for (iname, ivalue) in iteritems(interfaces): - if ivalue['management'] or not ivalue['static']: - this_dns_name = ivalue.get('dns_name', None) - dns_name = this_dns_name if this_dns_name else '' - - if dns_name == '' or dns_name is None: - continue - - status = host['status'] - profile = host['profile'] - classes = host[orderby_keyname] - - if status not in self.inventory: - self.inventory[status] = [] - self.inventory[status].append(dns_name) - - if profile not in self.inventory: - self.inventory[profile] = [] - self.inventory[profile].append(dns_name) - - for cls in classes: - if cls not in self.inventory: - self.inventory[cls] = [] - self.inventory[cls].append(dns_name) - - # Since we already have all of the data for the host, update the host details as well - - # The old way was ksmeta only -- provide backwards compatibility - - self.cache[dns_name] = host - if "ks_meta" in host: - for key, value in iteritems(host["ks_meta"]): - self.cache[dns_name][key] = value - - self.write_to_cache(self.cache, self.cache_path_cache) - self.write_to_cache(self.inventory, self.cache_path_inventory) - - def get_host_info(self): - """ Get variables about a specific host """ - - if not self.cache or len(self.cache) == 0: - # Need to load index from cache - self.load_cache_from_cache() - - if self.args.host not in self.cache: - # try updating the cache - self.update_cache() - - if self.args.host not in self.cache: - # host might not exist anymore - return self.json_format_dict({}, True) - - return self.json_format_dict(self.cache[self.args.host], True) - - def push(self, my_dict, key, element): - """ Pushed an element onto an array that may not have been defined in the dict """ - - if key in my_dict: - my_dict[key].append(element) - else: - my_dict[key] = [element] - - def load_inventory_from_cache(self): - """ Reads the index from the cache file sets self.index """ - - cache = open(self.cache_path_inventory, 'r') - json_inventory = cache.read() - self.inventory = json.loads(json_inventory) - - def load_cache_from_cache(self): - """ Reads the cache from the cache file sets self.cache """ - - cache = open(self.cache_path_cache, 'r') - json_cache = cache.read() - self.cache = json.loads(json_cache) - - def write_to_cache(self, data, filename): - """ Writes data in JSON format to a file """ - json_data = self.json_format_dict(data, True) - cache = open(filename, 'w') - cache.write(json_data) - cache.close() - - def to_safe(self, word): - """ Converts 'bad' characters in a string to underscores so they can be used as Ansible groups """ - - return re.sub(r"[^A-Za-z0-9\-]", "_", word) - - def json_format_dict(self, data, pretty=False): - """ Converts a dict to a JSON object and dumps it as a formatted string """ - - if pretty: - return json.dumps(data, sort_keys=True, indent=2) - else: - return json.dumps(data) - - -CobblerInventory() diff --git a/scripts/inventory/collins.ini b/scripts/inventory/collins.ini deleted file mode 100644 index 0ce0c2acbd..0000000000 --- a/scripts/inventory/collins.ini +++ /dev/null @@ -1,57 +0,0 @@ -# Ansible Collins external inventory script settings -# - -[collins] - -# You should not have a trailing slash or collins -# will not properly match the URI -host = http://localhost:9000 - -username = blake -password = admin:first - -# Specifies a timeout for all HTTP requests to Collins. -timeout_secs = 120 - -# Specifies a maximum number of retries per Collins request. -max_retries = 5 - -# Specifies the number of results to return per paginated query as specified in -# the Pagination section of the Collins API docs: -# http://tumblr.github.io/collins/api.html -results_per_query = 100 - -# Specifies the Collins asset type which will be queried for; most typically -# you'll want to leave this at the default of SERVER_NODE. -asset_type = SERVER_NODE - -# Collins assets can optionally be assigned hostnames; this option will preference -# the selection of an asset's hostname over an IP address as the primary identifier -# in the Ansible inventory. Typically, this value should be set to true if assets -# are assigned hostnames. -prefer_hostnames = true - -# Within Collins, assets can be granted multiple IP addresses; this configuration -# value specifies the index within the 'ADDRESSES' array as returned by the -# following API endpoint: -# http://tumblr.github.io/collins/api.html#api-ipam-asset-addresses-section -ip_address_index = 0 - -# Sets whether Collins instances in multiple datacenters will be queried. -query_remote_dcs = false - -# API calls to Collins can involve large, substantial queries. For this reason, -# we cache the results of an API call. Set this to the path you want cache files -# to be written to. Two files will be written to this directory: -# - ansible-collins.cache -# - ansible-collins.index -cache_path = /tmp - -# If errors occur while querying inventory, logging messages will be written -# to a logfile in the specified directory: -# - ansible-collins.log -log_path = /tmp - -# The number of seconds that a cache file is considered valid. After this many -# seconds, a new API call will be made, and the cache file will be updated. -cache_max_age = 600 diff --git a/scripts/inventory/collins.py b/scripts/inventory/collins.py deleted file mode 100755 index f481649eeb..0000000000 --- a/scripts/inventory/collins.py +++ /dev/null @@ -1,429 +0,0 @@ -#!/usr/bin/env python - -""" -Collins external inventory script -================================= - -Ansible has a feature where instead of reading from /etc/ansible/hosts -as a text file, it can query external programs to obtain the list -of hosts, groups the hosts are in, and even variables to assign to each host. - -Collins is a hardware asset management system originally developed by -Tumblr for tracking new hardware as it built out its own datacenters. It -exposes a rich API for manipulating and querying one's hardware inventory, -which makes it an ideal 'single point of truth' for driving systems -automation like Ansible. Extensive documentation on Collins, including a quickstart, -API docs, and a full reference manual, can be found here: - -http://tumblr.github.io/collins - -This script adds support to Ansible for obtaining a dynamic inventory of -assets in your infrastructure, grouping them in Ansible by their useful attributes, -and binding all facts provided by Collins to each host so that they can be used to -drive automation. Some parts of this script were cribbed shamelessly from mdehaan's -Cobbler inventory script. - -To use it, copy it to your repo and pass -i to the ansible or -ansible-playbook command; if you'd like to use it by default, simply copy collins.ini -to /etc/ansible and this script to /etc/ansible/hosts. - -Alongside the options set in collins.ini, there are several environment variables -that will be used instead of the configured values if they are set: - - - COLLINS_USERNAME - specifies a username to use for Collins authentication - - COLLINS_PASSWORD - specifies a password to use for Collins authentication - - COLLINS_ASSET_TYPE - specifies a Collins asset type to use during querying; - this can be used to run Ansible automation against different asset classes than - server nodes, such as network switches and PDUs - - COLLINS_CONFIG - specifies an alternative location for collins.ini, defaults to - /collins.ini - -If errors are encountered during operation, this script will return an exit code of -255; otherwise, it will return an exit code of 0. - -Collins attributes are accessible as variables in ansible via the COLLINS['attribute_name']. - -Tested against Ansible 1.8.2 and Collins 1.3.0. -""" - -# (c) 2014, Steve Salevan -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -###################################################################### - - -import argparse -import logging -import os -import re -import sys -from time import time -import traceback - -import json - -from ansible.module_utils.six import iteritems -from ansible.module_utils.six.moves import configparser as ConfigParser -from ansible.module_utils.six.moves.urllib.parse import urlencode - -from ansible.module_utils.urls import open_url - - -class CollinsDefaults(object): - ASSETS_API_ENDPOINT = '%s/api/assets' - SPECIAL_ATTRIBUTES = set([ - 'CREATED', - 'DELETED', - 'UPDATED', - 'STATE', - ]) - LOG_FORMAT = '%(asctime)-15s %(message)s' - - -class Error(Exception): - pass - - -class MaxRetriesError(Error): - pass - - -class CollinsInventory(object): - - def __init__(self): - """ Constructs CollinsInventory object and reads all configuration. """ - - self.inventory = dict() # A list of groups and the hosts in that group - self.cache = dict() # Details about hosts in the inventory - - # Read settings and parse CLI arguments - self.read_settings() - self.parse_cli_args() - - logging.basicConfig(format=CollinsDefaults.LOG_FORMAT, - filename=self.log_location) - self.log = logging.getLogger('CollinsInventory') - - def _asset_get_attribute(self, asset, attrib): - """ Returns a user-defined attribute from an asset if it exists; otherwise, - returns None. """ - - if 'ATTRIBS' in asset: - for attrib_block in asset['ATTRIBS'].keys(): - if attrib in asset['ATTRIBS'][attrib_block]: - return asset['ATTRIBS'][attrib_block][attrib] - return None - - def _asset_has_attribute(self, asset, attrib): - """ Returns whether a user-defined attribute is present on an asset. """ - - if 'ATTRIBS' in asset: - for attrib_block in asset['ATTRIBS'].keys(): - if attrib in asset['ATTRIBS'][attrib_block]: - return True - return False - - def run(self): - """ Main execution path """ - - # Updates cache if cache is not present or has expired. - successful = True - if self.args.refresh_cache: - successful = self.update_cache() - elif not self.is_cache_valid(): - successful = self.update_cache() - else: - successful = self.load_inventory_from_cache() - successful &= self.load_cache_from_cache() - - data_to_print = "" - - # Data to print - if self.args.host: - data_to_print = self.get_host_info() - - elif self.args.list: - # Display list of instances for inventory - data_to_print = self.json_format_dict(self.inventory, self.args.pretty) - - else: # default action with no options - data_to_print = self.json_format_dict(self.inventory, self.args.pretty) - - print(data_to_print) - return successful - - def find_assets(self, attributes=None, operation='AND'): - """ Obtains Collins assets matching the provided attributes. """ - attributes = {} if attributes is None else attributes - - # Formats asset search query to locate assets matching attributes, using - # the CQL search feature as described here: - # http://tumblr.github.io/collins/recipes.html - attributes_query = ['='.join(attr_pair) for attr_pair in iteritems(attributes)] - query_parameters = { - 'details': ['True'], - 'operation': [operation], - 'query': attributes_query, - 'remoteLookup': [str(self.query_remote_dcs)], - 'size': [self.results_per_query], - 'type': [self.collins_asset_type], - } - assets = [] - cur_page = 0 - num_retries = 0 - # Locates all assets matching the provided query, exhausting pagination. - while True: - if num_retries == self.collins_max_retries: - raise MaxRetriesError("Maximum of %s retries reached; giving up" % self.collins_max_retries) - query_parameters['page'] = cur_page - query_url = "%s?%s" % ( - (CollinsDefaults.ASSETS_API_ENDPOINT % self.collins_host), - urlencode(query_parameters, doseq=True) - ) - try: - response = open_url(query_url, - timeout=self.collins_timeout_secs, - url_username=self.collins_username, - url_password=self.collins_password, - force_basic_auth=True) - json_response = json.loads(response.read()) - # Adds any assets found to the array of assets. - assets += json_response['data']['Data'] - # If we've retrieved all of our assets, breaks out of the loop. - if len(json_response['data']['Data']) == 0: - break - cur_page += 1 - num_retries = 0 - except Exception: - self.log.error("Error while communicating with Collins, retrying:\n%s", traceback.format_exc()) - num_retries += 1 - return assets - - def is_cache_valid(self): - """ Determines if the cache files have expired, or if it is still valid """ - - if os.path.isfile(self.cache_path_cache): - mod_time = os.path.getmtime(self.cache_path_cache) - current_time = time() - if (mod_time + self.cache_max_age) > current_time: - if os.path.isfile(self.cache_path_inventory): - return True - - return False - - def read_settings(self): - """ Reads the settings from the collins.ini file """ - - config_loc = os.getenv('COLLINS_CONFIG', os.path.dirname(os.path.realpath(__file__)) + '/collins.ini') - - config = ConfigParser.SafeConfigParser() - config.read(os.path.dirname(os.path.realpath(__file__)) + '/collins.ini') - - self.collins_host = config.get('collins', 'host') - self.collins_username = os.getenv('COLLINS_USERNAME', config.get('collins', 'username')) - self.collins_password = os.getenv('COLLINS_PASSWORD', config.get('collins', 'password')) - self.collins_asset_type = os.getenv('COLLINS_ASSET_TYPE', config.get('collins', 'asset_type')) - self.collins_timeout_secs = config.getint('collins', 'timeout_secs') - self.collins_max_retries = config.getint('collins', 'max_retries') - - self.results_per_query = config.getint('collins', 'results_per_query') - self.ip_address_index = config.getint('collins', 'ip_address_index') - self.query_remote_dcs = config.getboolean('collins', 'query_remote_dcs') - self.prefer_hostnames = config.getboolean('collins', 'prefer_hostnames') - - cache_path = config.get('collins', 'cache_path') - self.cache_path_cache = cache_path + \ - '/ansible-collins-%s.cache' % self.collins_asset_type - self.cache_path_inventory = cache_path + \ - '/ansible-collins-%s.index' % self.collins_asset_type - self.cache_max_age = config.getint('collins', 'cache_max_age') - - log_path = config.get('collins', 'log_path') - self.log_location = log_path + '/ansible-collins.log' - - def parse_cli_args(self): - """ Command line argument processing """ - - parser = argparse.ArgumentParser( - description='Produces an Ansible Inventory file based on Collins') - parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') - parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') - parser.add_argument('--refresh-cache', action='store_true', default=False, - help='Force refresh of cache by making API requests to Collins ' - '(default: False - use cache files)') - parser.add_argument('--pretty', action='store_true', default=False, help='Pretty print all JSON output') - self.args = parser.parse_args() - - def update_cache(self): - """ Make calls to Collins and saves the output in a cache """ - - self.cache = dict() - self.inventory = dict() - - # Locates all server assets from Collins. - try: - server_assets = self.find_assets() - except Exception: - self.log.error("Error while locating assets from Collins:\n%s", traceback.format_exc()) - return False - - for asset in server_assets: - # Determines the index to retrieve the asset's IP address either by an - # attribute set on the Collins asset or the pre-configured value. - if self._asset_has_attribute(asset, 'ANSIBLE_IP_INDEX'): - ip_index = self._asset_get_attribute(asset, 'ANSIBLE_IP_INDEX') - try: - ip_index = int(ip_index) - except Exception: - self.log.error( - "ANSIBLE_IP_INDEX attribute on asset %s not an integer: %s", asset, - ip_index) - else: - ip_index = self.ip_address_index - - asset['COLLINS'] = {} - - # Attempts to locate the asset's primary identifier (hostname or IP address), - # which will be used to index the asset throughout the Ansible inventory. - if self.prefer_hostnames and self._asset_has_attribute(asset, 'HOSTNAME'): - asset_identifier = self._asset_get_attribute(asset, 'HOSTNAME') - elif 'ADDRESSES' not in asset: - self.log.warning("No IP addresses found for asset '%s', skipping", asset) - continue - elif len(asset['ADDRESSES']) < ip_index + 1: - self.log.warning( - "No IP address found at index %s for asset '%s', skipping", - ip_index, asset) - continue - else: - asset_identifier = asset['ADDRESSES'][ip_index]['ADDRESS'] - - # Adds an asset index to the Ansible inventory based upon unpacking - # the name of the asset's current STATE from its dictionary. - if 'STATE' in asset['ASSET'] and asset['ASSET']['STATE']: - state_inventory_key = self.to_safe( - 'STATE-%s' % asset['ASSET']['STATE']['NAME']) - self.push(self.inventory, state_inventory_key, asset_identifier) - - # Indexes asset by all user-defined Collins attributes. - if 'ATTRIBS' in asset: - for attrib_block in asset['ATTRIBS'].keys(): - for attrib in asset['ATTRIBS'][attrib_block].keys(): - asset['COLLINS'][attrib] = asset['ATTRIBS'][attrib_block][attrib] - attrib_key = self.to_safe('%s-%s' % (attrib, asset['ATTRIBS'][attrib_block][attrib])) - self.push(self.inventory, attrib_key, asset_identifier) - - # Indexes asset by all built-in Collins attributes. - for attribute in asset['ASSET'].keys(): - if attribute not in CollinsDefaults.SPECIAL_ATTRIBUTES: - attribute_val = asset['ASSET'][attribute] - if attribute_val is not None: - attrib_key = self.to_safe('%s-%s' % (attribute, attribute_val)) - self.push(self.inventory, attrib_key, asset_identifier) - - # Indexes asset by hardware product information. - if 'HARDWARE' in asset: - if 'PRODUCT' in asset['HARDWARE']['BASE']: - product = asset['HARDWARE']['BASE']['PRODUCT'] - if product: - product_key = self.to_safe( - 'HARDWARE-PRODUCT-%s' % asset['HARDWARE']['BASE']['PRODUCT']) - self.push(self.inventory, product_key, asset_identifier) - - # Indexing now complete, adds the host details to the asset cache. - self.cache[asset_identifier] = asset - - try: - self.write_to_cache(self.cache, self.cache_path_cache) - self.write_to_cache(self.inventory, self.cache_path_inventory) - except Exception: - self.log.error("Error while writing to cache:\n%s", traceback.format_exc()) - return False - return True - - def push(self, dictionary, key, value): - """ Adds a value to a list at a dictionary key, creating the list if it doesn't - exist. """ - - if key not in dictionary: - dictionary[key] = [] - dictionary[key].append(value) - - def get_host_info(self): - """ Get variables about a specific host. """ - - if not self.cache or len(self.cache) == 0: - # Need to load index from cache - self.load_cache_from_cache() - - if self.args.host not in self.cache: - # try updating the cache - self.update_cache() - - if self.args.host not in self.cache: - # host might not exist anymore - return self.json_format_dict({}, self.args.pretty) - - return self.json_format_dict(self.cache[self.args.host], self.args.pretty) - - def load_inventory_from_cache(self): - """ Reads the index from the cache file sets self.index """ - - try: - cache = open(self.cache_path_inventory, 'r') - json_inventory = cache.read() - self.inventory = json.loads(json_inventory) - return True - except Exception: - self.log.error("Error while loading inventory:\n%s", - traceback.format_exc()) - self.inventory = {} - return False - - def load_cache_from_cache(self): - """ Reads the cache from the cache file sets self.cache """ - - try: - cache = open(self.cache_path_cache, 'r') - json_cache = cache.read() - self.cache = json.loads(json_cache) - return True - except Exception: - self.log.error("Error while loading host cache:\n%s", - traceback.format_exc()) - self.cache = {} - return False - - def write_to_cache(self, data, filename): - """ Writes data in JSON format to a specified file. """ - - json_data = self.json_format_dict(data, self.args.pretty) - cache = open(filename, 'w') - cache.write(json_data) - cache.close() - - def to_safe(self, word): - """ Converts 'bad' characters in a string to underscores so they - can be used as Ansible groups """ - - return re.sub(r"[^A-Za-z0-9\-]", "_", word) - - def json_format_dict(self, data, pretty=False): - """ Converts a dict to a JSON object and dumps it as a formatted string """ - - if pretty: - return json.dumps(data, sort_keys=True, indent=2) - else: - return json.dumps(data) - - -if __name__ in '__main__': - inventory = CollinsInventory() - if inventory.run(): - sys.exit(0) - else: - sys.exit(-1) diff --git a/scripts/inventory/consul_io.ini b/scripts/inventory/consul_io.ini deleted file mode 100644 index d18a1494dd..0000000000 --- a/scripts/inventory/consul_io.ini +++ /dev/null @@ -1,54 +0,0 @@ -# Ansible Consul external inventory script settings. - -[consul] - -# -# Bulk load. Load all possible data before building inventory JSON -# If true, script processes in-memory data. JSON generation reduces drastically -# -bulk_load = false - -# restrict included nodes to those from this datacenter -#datacenter = nyc1 - -# url of the consul cluster to query -#url = http://demo.consul.io -url = http://localhost:8500 - -# suffix added to each service to create a group name e.g Service of 'redis' and -# a suffix of '_servers' will add each address to the group name 'redis_servers' -servers_suffix = _servers - -# -# By default, final JSON is built based on all available info in consul. -# Suffixes means that services groups will be added in addition to basic information. See servers_suffix for additional info -# There are cases when speed is preferable than having services groups -# False value will reduce script execution time drastically. -# -suffixes = true - -# if specified then the inventory will generate domain names that will resolve -# via Consul's inbuilt DNS. -#domain=consul - -# make groups from service tags. the name of the group is derived from the -# service name and the tag name e.g. a service named nginx with tags ['master', 'v1'] -# will create groups nginx_master and nginx_v1 -tags = true - -# looks up the node name at the given path for a list of groups to which the -# node should be added. -kv_groups=ansible/groups - -# looks up the node name at the given path for a json dictionary of metadata that -# should be attached as metadata for the node -kv_metadata=ansible/metadata - -# looks up the health of each service and adds the node to 'up' and 'down' groups -# based on the service availability -# -# !!!! if availability is true, suffixes also must be true. !!!! -# -availability = true -available_suffix = _up -unavailable_suffix = _down diff --git a/scripts/inventory/consul_io.py b/scripts/inventory/consul_io.py deleted file mode 100755 index 6af0675707..0000000000 --- a/scripts/inventory/consul_io.py +++ /dev/null @@ -1,553 +0,0 @@ -#!/usr/bin/env python - -# -# (c) 2015, Steve Gargan -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -###################################################################### - -''' -Consul.io inventory script (http://consul.io) -====================================== - -Generates Ansible inventory from nodes in a Consul cluster. This script will -group nodes by: - - datacenter, - - registered service - - service tags - - service status - - values from the k/v store - -This script can be run with the switches ---list as expected groups all the nodes in all datacenters ---datacenter, to restrict the nodes to a single datacenter ---host to restrict the inventory to a single named node. (requires datacenter config) - -The configuration for this plugin is read from a consul_io.ini file located in the -same directory as this inventory script or via environment variables. All config options in the config file -are optional except the host and port, which must point to a valid agent or -server running the http api. For more information on enabling the endpoint see. - -http://www.consul.io/docs/agent/options.html - -Other options include: - -'bulk_load' - -boolean flag. Load all possible data before building inventory JSON -If true, script processes in-memory data. JSON generation reduces drastically -This can also be set with the environmental variable CONSUL_BULK_LOAD. - -'datacenter': - -which restricts the included nodes to those from the given datacenter -This can also be set with the environmental variable CONSUL_DATACENTER. - -'url': - -the URL of the Consul cluster. host, port and scheme are derived from the -URL. If not specified, connection configuration defaults to http requests -to localhost on port 8500. -This can also be set with the environmental variable CONSUL_URL. - -'domain': - -if specified then the inventory will generate domain names that will resolve -via Consul's inbuilt DNS. The name is derived from the node name, datacenter -and domain .node... Note that you will need to -have consul hooked into your DNS server for these to resolve. See the consul -DNS docs for more info. - -which restricts the included nodes to those from the given datacenter -This can also be set with the environmental variable CONSUL_DOMAIN. - -'suffixes': - -boolean flag. By default, final JSON is built based on all available info in consul. -Suffixes means that services groups will be added in addition to basic information. See servers_suffix for additional info -There are cases when speed is preferable than having services groups -False value will reduce script execution time drastically. -This can also be set with the environmental variable CONSUL_SUFFIXES. - -'servers_suffix': - -defining the a suffix to add to the service name when creating the service -group. e.g Service name of 'redis' and a suffix of '_servers' will add -each nodes address to the group name 'redis_servers'. No suffix is added -if this is not set -This can also be set with the environmental variable CONSUL_SERVERS_SUFFIX. - -'tags': - -boolean flag defining if service tags should be used to create Inventory -groups e.g. an nginx service with the tags ['master', 'v1'] will create -groups nginx_master and nginx_v1 to which the node running the service -will be added. No tag groups are created if this is missing. -This can also be set with the environmental variable CONSUL_TAGS. - -'token': - -ACL token to use to authorize access to the key value store. May be required -to retrieve the kv_groups and kv_metadata based on your consul configuration. -This can also be set with the environmental variable CONSUL_TOKEN. - -'kv_groups': - -This is used to lookup groups for a node in the key value store. It specifies a -path to which each discovered node's name will be added to create a key to query -the key/value store. There it expects to find a comma separated list of group -names to which the node should be added e.g. if the inventory contains node -'nyc-web-1' in datacenter 'nyc-dc1' and kv_groups = 'ansible/groups' then the key -'ansible/groups/nyc-dc1/nyc-web-1' will be queried for a group list. If this query - returned 'test,honeypot' then the node address to both groups. -This can also be set with the environmental variable CONSUL_KV_GROUPS. - -'kv_metadata': - -kv_metadata is used to lookup metadata for each discovered node. Like kv_groups -above it is used to build a path to lookup in the kv store where it expects to -find a json dictionary of metadata entries. If found, each key/value pair in the -dictionary is added to the metadata for the node. eg node 'nyc-web-1' in datacenter -'nyc-dc1' and kv_metadata = 'ansible/metadata', then the key -'ansible/metadata/nyc-dc1/nyc-web-1' should contain '{"databse": "postgres"}' -This can also be set with the environmental variable CONSUL_KV_METADATA. - -'availability': - -if true then availability groups will be created for each service. The node will -be added to one of the groups based on the health status of the service. The -group name is derived from the service name and the configurable availability -suffixes -This can also be set with the environmental variable CONSUL_AVAILABILITY. - -'available_suffix': - -suffix that should be appended to the service availability groups for available -services e.g. if the suffix is '_up' and the service is nginx, then nodes with -healthy nginx services will be added to the nginix_up group. Defaults to -'_available' -This can also be set with the environmental variable CONSUL_AVAILABLE_SUFFIX. - -'unavailable_suffix': - -as above but for unhealthy services, defaults to '_unavailable' -This can also be set with the environmental variable CONSUL_UNAVAILABLE_SUFFIX. - -Note that if the inventory discovers an 'ssh' service running on a node it will -register the port as ansible_ssh_port in the node's metadata and this port will -be used to access the machine. -``` - -''' - -import os -import re -import argparse -import sys - -from ansible.module_utils.six.moves import configparser - - -def get_log_filename(): - tty_filename = '/dev/tty' - stdout_filename = '/dev/stdout' - - if not os.path.exists(tty_filename): - return stdout_filename - if not os.access(tty_filename, os.W_OK): - return stdout_filename - if os.getenv('TEAMCITY_VERSION'): - return stdout_filename - - return tty_filename - - -def setup_logging(): - filename = get_log_filename() - - import logging.config - logging.config.dictConfig({ - 'version': 1, - 'formatters': { - 'simple': { - 'format': '%(asctime)s - %(name)s - %(levelname)s - %(message)s', - }, - }, - 'root': { - 'level': os.getenv('ANSIBLE_INVENTORY_CONSUL_IO_LOG_LEVEL', 'WARN'), - 'handlers': ['console'], - }, - 'handlers': { - 'console': { - 'class': 'logging.FileHandler', - 'filename': filename, - 'formatter': 'simple', - }, - }, - 'loggers': { - 'iso8601': { - 'qualname': 'iso8601', - 'level': 'INFO', - }, - }, - }) - logger = logging.getLogger('consul_io.py') - logger.debug('Invoked with %r', sys.argv) - - -if os.getenv('ANSIBLE_INVENTORY_CONSUL_IO_LOG_ENABLED'): - setup_logging() - - -import json - -try: - import consul -except ImportError as e: - sys.exit("""failed=True msg='python-consul required for this module. -See https://python-consul.readthedocs.io/en/latest/#installation'""") - -from ansible.module_utils.six import iteritems - - -class ConsulInventory(object): - - def __init__(self): - ''' Create an inventory based on the catalog of nodes and services - registered in a consul cluster''' - self.node_metadata = {} - self.nodes = {} - self.nodes_by_service = {} - self.nodes_by_tag = {} - self.nodes_by_datacenter = {} - self.nodes_by_kv = {} - self.nodes_by_availability = {} - self.current_dc = None - self.inmemory_kv = [] - self.inmemory_nodes = [] - - config = ConsulConfig() - self.config = config - - self.consul_api = config.get_consul_api() - - if config.has_config('datacenter'): - if config.has_config('host'): - self.load_data_for_node(config.host, config.datacenter) - else: - self.load_data_for_datacenter(config.datacenter) - else: - self.load_all_data_consul() - - self.combine_all_results() - print(json.dumps(self.inventory, sort_keys=True, indent=2)) - - def bulk_load(self, datacenter): - index, groups_list = self.consul_api.kv.get(self.config.kv_groups, recurse=True, dc=datacenter) - index, metadata_list = self.consul_api.kv.get(self.config.kv_metadata, recurse=True, dc=datacenter) - index, nodes = self.consul_api.catalog.nodes(dc=datacenter) - self.inmemory_kv += groups_list - self.inmemory_kv += metadata_list - self.inmemory_nodes += nodes - - def load_all_data_consul(self): - ''' cycle through each of the datacenters in the consul catalog and process - the nodes in each ''' - self.datacenters = self.consul_api.catalog.datacenters() - for datacenter in self.datacenters: - self.current_dc = datacenter - self.bulk_load(datacenter) - self.load_data_for_datacenter(datacenter) - - def load_availability_groups(self, node, datacenter): - '''check the health of each service on a node and add the node to either - an 'available' or 'unavailable' grouping. The suffix for each group can be - controlled from the config''' - if self.config.has_config('availability'): - for service_name, service in iteritems(node['Services']): - for node in self.consul_api.health.service(service_name)[1]: - if self.is_service_available(node, service_name): - suffix = self.config.get_availability_suffix( - 'available_suffix', '_available') - else: - suffix = self.config.get_availability_suffix( - 'unavailable_suffix', '_unavailable') - self.add_node_to_map(self.nodes_by_availability, - service_name + suffix, node['Node']) - - def is_service_available(self, node, service_name): - '''check the availability of the service on the node beside ensuring the - availability of the node itself''' - consul_ok = service_ok = False - for check in node['Checks']: - if check['CheckID'] == 'serfHealth': - consul_ok = check['Status'] == 'passing' - elif check['ServiceName'] == service_name: - service_ok = check['Status'] == 'passing' - return consul_ok and service_ok - - def consul_get_kv_inmemory(self, key): - result = filter(lambda x: x['Key'] == key, self.inmemory_kv) - return result.pop() if result else None - - def consul_get_node_inmemory(self, node): - result = filter(lambda x: x['Node'] == node, self.inmemory_nodes) - return {"Node": result.pop(), "Services": {}} if result else None - - def load_data_for_datacenter(self, datacenter): - '''processes all the nodes in a particular datacenter''' - if self.config.bulk_load == 'true': - nodes = self.inmemory_nodes - else: - index, nodes = self.consul_api.catalog.nodes(dc=datacenter) - for node in nodes: - self.add_node_to_map(self.nodes_by_datacenter, datacenter, node) - self.load_data_for_node(node['Node'], datacenter) - - def load_data_for_node(self, node, datacenter): - '''loads the data for a single node adding it to various groups based on - metadata retrieved from the kv store and service availability''' - - if self.config.suffixes == 'true': - index, node_data = self.consul_api.catalog.node(node, dc=datacenter) - else: - node_data = self.consul_get_node_inmemory(node) - node = node_data['Node'] - - self.add_node_to_map(self.nodes, 'all', node) - self.add_metadata(node_data, "consul_datacenter", datacenter) - self.add_metadata(node_data, "consul_nodename", node['Node']) - - self.load_groups_from_kv(node_data) - self.load_node_metadata_from_kv(node_data) - if self.config.suffixes == 'true': - self.load_availability_groups(node_data, datacenter) - for name, service in node_data['Services'].items(): - self.load_data_from_service(name, service, node_data) - - def load_node_metadata_from_kv(self, node_data): - ''' load the json dict at the metadata path defined by the kv_metadata value - and the node name add each entry in the dictionary to the node's - metadata ''' - node = node_data['Node'] - if self.config.has_config('kv_metadata'): - key = "%s/%s/%s" % (self.config.kv_metadata, self.current_dc, node['Node']) - if self.config.bulk_load == 'true': - metadata = self.consul_get_kv_inmemory(key) - else: - index, metadata = self.consul_api.kv.get(key) - if metadata and metadata['Value']: - try: - metadata = json.loads(metadata['Value']) - for k, v in metadata.items(): - self.add_metadata(node_data, k, v) - except Exception: - pass - - def load_groups_from_kv(self, node_data): - ''' load the comma separated list of groups at the path defined by the - kv_groups config value and the node name add the node address to each - group found ''' - node = node_data['Node'] - if self.config.has_config('kv_groups'): - key = "%s/%s/%s" % (self.config.kv_groups, self.current_dc, node['Node']) - if self.config.bulk_load == 'true': - groups = self.consul_get_kv_inmemory(key) - else: - index, groups = self.consul_api.kv.get(key) - if groups and groups['Value']: - for group in groups['Value'].decode().split(','): - self.add_node_to_map(self.nodes_by_kv, group.strip(), node) - - def load_data_from_service(self, service_name, service, node_data): - '''process a service registered on a node, adding the node to a group with - the service name. Each service tag is extracted and the node is added to a - tag grouping also''' - self.add_metadata(node_data, "consul_services", service_name, True) - - if self.is_service("ssh", service_name): - self.add_metadata(node_data, "ansible_ssh_port", service['Port']) - - if self.config.has_config('servers_suffix'): - service_name = service_name + self.config.servers_suffix - - self.add_node_to_map(self.nodes_by_service, service_name, node_data['Node']) - self.extract_groups_from_tags(service_name, service, node_data) - - def is_service(self, target, name): - return name and (name.lower() == target.lower()) - - def extract_groups_from_tags(self, service_name, service, node_data): - '''iterates each service tag and adds the node to groups derived from the - service and tag names e.g. nginx_master''' - if self.config.has_config('tags') and service['Tags']: - tags = service['Tags'] - self.add_metadata(node_data, "consul_%s_tags" % service_name, tags) - for tag in service['Tags']: - tagname = service_name + '_' + tag - self.add_node_to_map(self.nodes_by_tag, tagname, node_data['Node']) - - def combine_all_results(self): - '''prunes and sorts all groupings for combination into the final map''' - self.inventory = {"_meta": {"hostvars": self.node_metadata}} - groupings = [self.nodes, self.nodes_by_datacenter, self.nodes_by_service, - self.nodes_by_tag, self.nodes_by_kv, self.nodes_by_availability] - for grouping in groupings: - for name, addresses in grouping.items(): - self.inventory[name] = sorted(list(set(addresses))) - - def add_metadata(self, node_data, key, value, is_list=False): - ''' Pushed an element onto a metadata dict for the node, creating - the dict if it doesn't exist ''' - key = self.to_safe(key) - node = self.get_inventory_name(node_data['Node']) - - if node in self.node_metadata: - metadata = self.node_metadata[node] - else: - metadata = {} - self.node_metadata[node] = metadata - if is_list: - self.push(metadata, key, value) - else: - metadata[key] = value - - def get_inventory_name(self, node_data): - '''return the ip or a node name that can be looked up in consul's dns''' - domain = self.config.domain - if domain: - node_name = node_data['Node'] - if self.current_dc: - return '%s.node.%s.%s' % (node_name, self.current_dc, domain) - else: - return '%s.node.%s' % (node_name, domain) - else: - return node_data['Address'] - - def add_node_to_map(self, map, name, node): - self.push(map, name, self.get_inventory_name(node)) - - def push(self, my_dict, key, element): - ''' Pushed an element onto an array that may not have been defined in the - dict ''' - key = self.to_safe(key) - if key in my_dict: - my_dict[key].append(element) - else: - my_dict[key] = [element] - - def to_safe(self, word): - ''' Converts 'bad' characters in a string to underscores so they can be used - as Ansible groups ''' - return re.sub(r'[^A-Za-z0-9\-\.]', '_', word) - - def sanitize_dict(self, d): - - new_dict = {} - for k, v in d.items(): - if v is not None: - new_dict[self.to_safe(str(k))] = self.to_safe(str(v)) - return new_dict - - def sanitize_list(self, seq): - new_seq = [] - for d in seq: - new_seq.append(self.sanitize_dict(d)) - return new_seq - - -class ConsulConfig(dict): - - def __init__(self): - self.read_settings() - self.read_cli_args() - self.read_env_vars() - - def has_config(self, name): - if hasattr(self, name): - return getattr(self, name) - else: - return False - - def read_settings(self): - ''' Reads the settings from the consul_io.ini file (or consul.ini for backwards compatibility)''' - config = configparser.SafeConfigParser() - if os.path.isfile(os.path.dirname(os.path.realpath(__file__)) + '/consul_io.ini'): - config.read(os.path.dirname(os.path.realpath(__file__)) + '/consul_io.ini') - else: - config.read(os.path.dirname(os.path.realpath(__file__)) + '/consul.ini') - - config_options = ['host', 'token', 'datacenter', 'servers_suffix', - 'tags', 'kv_metadata', 'kv_groups', 'availability', - 'unavailable_suffix', 'available_suffix', 'url', - 'domain', 'suffixes', 'bulk_load'] - for option in config_options: - value = None - if config.has_option('consul', option): - value = config.get('consul', option).lower() - setattr(self, option, value) - - def read_cli_args(self): - ''' Command line argument processing ''' - parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based nodes in a Consul cluster') - - parser.add_argument('--list', action='store_true', - help='Get all inventory variables from all nodes in the consul cluster') - parser.add_argument('--host', action='store', - help='Get all inventory variables about a specific consul node,' - 'requires datacenter set in consul.ini.') - parser.add_argument('--datacenter', action='store', - help='Get all inventory about a specific consul datacenter') - - args = parser.parse_args() - arg_names = ['host', 'datacenter'] - - for arg in arg_names: - if getattr(args, arg): - setattr(self, arg, getattr(args, arg)) - - def read_env_vars(self): - env_var_options = ['host', 'token', 'datacenter', 'servers_suffix', - 'tags', 'kv_metadata', 'kv_groups', 'availability', - 'unavailable_suffix', 'available_suffix', 'url', - 'domain', 'suffixes', 'bulk_load'] - for option in env_var_options: - value = None - env_var = 'CONSUL_' + option.upper() - if os.environ.get(env_var): - setattr(self, option, os.environ.get(env_var)) - - def get_availability_suffix(self, suffix, default): - if self.has_config(suffix): - return self.has_config(suffix) - return default - - def get_consul_api(self): - '''get an instance of the api based on the supplied configuration''' - host = 'localhost' - port = 8500 - token = None - scheme = 'http' - - if hasattr(self, 'url'): - from ansible.module_utils.six.moves.urllib.parse import urlparse - o = urlparse(self.url) - if o.hostname: - host = o.hostname - if o.port: - port = o.port - if o.scheme: - scheme = o.scheme - - if hasattr(self, 'token'): - token = self.token - if not token: - token = 'anonymous' - return consul.Consul(host=host, port=port, token=token, scheme=scheme) - - -ConsulInventory() diff --git a/scripts/inventory/docker.py b/scripts/inventory/docker.py deleted file mode 100755 index b029d1f51b..0000000000 --- a/scripts/inventory/docker.py +++ /dev/null @@ -1,892 +0,0 @@ -#!/usr/bin/env python -# -# (c) 2016 Paul Durivage -# Chris Houseknecht -# James Tanner -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' - -Docker Inventory Script -======================= -The inventory script generates dynamic inventory by making API requests to one or more Docker APIs. It's dynamic -because the inventory is generated at run-time rather than being read from a static file. The script generates the -inventory by connecting to one or many Docker APIs and inspecting the containers it finds at each API. Which APIs the -script contacts can be defined using environment variables or a configuration file. - -Requirements ------------- - -Using the docker modules requires having docker-py -installed on the host running Ansible. To install docker-py: - - pip install docker-py - - -Run for Specific Host ---------------------- -When run for a specific container using the --host option this script returns the following hostvars: - -{ - "ansible_ssh_host": "", - "ansible_ssh_port": 0, - "docker_apparmorprofile": "", - "docker_args": [], - "docker_config": { - "AttachStderr": false, - "AttachStdin": false, - "AttachStdout": false, - "Cmd": [ - "/hello" - ], - "Domainname": "", - "Entrypoint": null, - "Env": null, - "Hostname": "9f2f80b0a702", - "Image": "hello-world", - "Labels": {}, - "OnBuild": null, - "OpenStdin": false, - "StdinOnce": false, - "Tty": false, - "User": "", - "Volumes": null, - "WorkingDir": "" - }, - "docker_created": "2016-04-18T02:05:59.659599249Z", - "docker_driver": "aufs", - "docker_execdriver": "native-0.2", - "docker_execids": null, - "docker_graphdriver": { - "Data": null, - "Name": "aufs" - }, - "docker_hostconfig": { - "Binds": null, - "BlkioWeight": 0, - "CapAdd": null, - "CapDrop": null, - "CgroupParent": "", - "ConsoleSize": [ - 0, - 0 - ], - "ContainerIDFile": "", - "CpuPeriod": 0, - "CpuQuota": 0, - "CpuShares": 0, - "CpusetCpus": "", - "CpusetMems": "", - "Devices": null, - "Dns": null, - "DnsOptions": null, - "DnsSearch": null, - "ExtraHosts": null, - "GroupAdd": null, - "IpcMode": "", - "KernelMemory": 0, - "Links": null, - "LogConfig": { - "Config": {}, - "Type": "json-file" - }, - "LxcConf": null, - "Memory": 0, - "MemoryReservation": 0, - "MemorySwap": 0, - "MemorySwappiness": null, - "NetworkMode": "default", - "OomKillDisable": false, - "PidMode": "host", - "PortBindings": null, - "Privileged": false, - "PublishAllPorts": false, - "ReadonlyRootfs": false, - "RestartPolicy": { - "MaximumRetryCount": 0, - "Name": "" - }, - "SecurityOpt": [ - "label:disable" - ], - "UTSMode": "", - "Ulimits": null, - "VolumeDriver": "", - "VolumesFrom": null - }, - "docker_hostnamepath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/hostname", - "docker_hostspath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/hosts", - "docker_id": "9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14", - "docker_image": "0a6ba66e537a53a5ea94f7c6a99c534c6adb12e3ed09326d4bf3b38f7c3ba4e7", - "docker_logpath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/9f2f80b0a702361d1ac432e6a-json.log", - "docker_mountlabel": "", - "docker_mounts": [], - "docker_name": "/hello-world", - "docker_networksettings": { - "Bridge": "", - "EndpointID": "", - "Gateway": "", - "GlobalIPv6Address": "", - "GlobalIPv6PrefixLen": 0, - "HairpinMode": false, - "IPAddress": "", - "IPPrefixLen": 0, - "IPv6Gateway": "", - "LinkLocalIPv6Address": "", - "LinkLocalIPv6PrefixLen": 0, - "MacAddress": "", - "Networks": { - "bridge": { - "EndpointID": "", - "Gateway": "", - "GlobalIPv6Address": "", - "GlobalIPv6PrefixLen": 0, - "IPAddress": "", - "IPPrefixLen": 0, - "IPv6Gateway": "", - "MacAddress": "" - } - }, - "Ports": null, - "SandboxID": "", - "SandboxKey": "", - "SecondaryIPAddresses": null, - "SecondaryIPv6Addresses": null - }, - "docker_path": "/hello", - "docker_processlabel": "", - "docker_resolvconfpath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/resolv.conf", - "docker_restartcount": 0, - "docker_short_id": "9f2f80b0a7023", - "docker_state": { - "Dead": false, - "Error": "", - "ExitCode": 0, - "FinishedAt": "2016-04-18T02:06:00.296619369Z", - "OOMKilled": false, - "Paused": false, - "Pid": 0, - "Restarting": false, - "Running": false, - "StartedAt": "2016-04-18T02:06:00.272065041Z", - "Status": "exited" - } -} - -Groups ------- -When run in --list mode (the default), container instances are grouped by: - - - container id - - container name - - container short id - - image_name (image_) - - stack_name (stack_) - - service_name (service_) - - docker_host - - running - - stopped - - -Configuration: --------------- -You can control the behavior of the inventory script by passing arguments, defining environment variables, or -creating a configuration file named docker.yml (sample provided in ansible/contrib/inventory). The order of precedence -is command line args, then the docker.yml file and finally environment variables. - -Environment variables: -...................... - -To connect to a single Docker API the following variables can be defined in the environment to control the connection -options. These are the same environment variables used by the Docker modules. - - DOCKER_HOST - The URL or Unix socket path used to connect to the Docker API. Defaults to unix://var/run/docker.sock. - - DOCKER_API_VERSION: - The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported - by docker-py. - - DOCKER_TIMEOUT: - The maximum amount of time in seconds to wait on a response fromm the API. Defaults to 60 seconds. - - DOCKER_TLS: - Secure the connection to the API by using TLS without verifying the authenticity of the Docker host server. - Defaults to False. - - DOCKER_TLS_VERIFY: - Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server. - Default is False - - DOCKER_TLS_HOSTNAME: - When verifying the authenticity of the Docker Host server, provide the expected name of the server. Defaults - to localhost. - - DOCKER_CERT_PATH: - Path to the directory containing the client certificate, client key and CA certificate. - - DOCKER_SSL_VERSION: - Provide a valid SSL version number. Default value determined by docker-py, which at the time of this writing - was 1.0 - -In addition to the connection variables there are a couple variables used to control the execution and output of the -script: - - DOCKER_CONFIG_FILE - Path to the configuration file. Defaults to ./docker.yml. - - DOCKER_PRIVATE_SSH_PORT: - The private port (container port) on which SSH is listening for connections. Defaults to 22. - - DOCKER_DEFAULT_IP: - The IP address to assign to ansible_host when the container's SSH port is mapped to interface '0.0.0.0'. - - -Configuration File -.................. - -Using a configuration file provides a means for defining a set of Docker APIs from which to build an inventory. - -The default name of the file is derived from the name of the inventory script. By default the script will look for -basename of the script (i.e. docker) with an extension of '.yml'. - -You can also override the default name of the script by defining DOCKER_CONFIG_FILE in the environment. - -Here's what you can define in docker_inventory.yml: - - defaults - Defines a default connection. Defaults will be taken from this and applied to any values not provided - for a host defined in the hosts list. - - hosts - If you wish to get inventory from more than one Docker host, define a hosts list. - -For the default host and each host in the hosts list define the following attributes: - - host: - description: The URL or Unix socket path used to connect to the Docker API. - required: yes - - tls: - description: Connect using TLS without verifying the authenticity of the Docker host server. - default: false - required: false - - tls_verify: - description: Connect using TLS without verifying the authenticity of the Docker host server. - default: false - required: false - - cert_path: - description: Path to the client's TLS certificate file. - default: null - required: false - - cacert_path: - description: Use a CA certificate when performing server verification by providing the path to a CA certificate file. - default: null - required: false - - key_path: - description: Path to the client's TLS key file. - default: null - required: false - - version: - description: The Docker API version. - required: false - default: will be supplied by the docker-py module. - - timeout: - description: The amount of time in seconds to wait on an API response. - required: false - default: 60 - - default_ip: - description: The IP address to assign to ansible_host when the container's SSH port is mapped to interface - '0.0.0.0'. - required: false - default: 127.0.0.1 - - private_ssh_port: - description: The port containers use for SSH - required: false - default: 22 - -Examples --------- - -# Connect to the Docker API on localhost port 4243 and format the JSON output -DOCKER_HOST=tcp://localhost:4243 ./docker.py --pretty - -# Any container's ssh port exposed on 0.0.0.0 will be mapped to -# another IP address (where Ansible will attempt to connect via SSH) -DOCKER_DEFAULT_IP=1.2.3.4 ./docker.py --pretty - -# Run as input to a playbook: -ansible-playbook -i ~/projects/ansible/contrib/inventory/docker.py docker_inventory_test.yml - -# Simple playbook to invoke with the above example: - - - name: Test docker_inventory - hosts: all - connection: local - gather_facts: no - tasks: - - debug: msg="Container - {{ inventory_hostname }}" - -''' - -import os -import sys -import json -import argparse -import re -import yaml - -from collections import defaultdict -# Manipulation of the path is needed because the docker-py -# module is imported by the name docker, and because this file -# is also named docker -for path in [os.getcwd(), '', os.path.dirname(os.path.abspath(__file__))]: - try: - del sys.path[sys.path.index(path)] - except Exception: - pass - -HAS_DOCKER_PY = True -HAS_DOCKER_ERROR = False - -try: - from docker.errors import APIError, TLSParameterError - from docker.tls import TLSConfig - from docker.constants import DEFAULT_TIMEOUT_SECONDS, DEFAULT_DOCKER_API_VERSION -except ImportError as exc: - HAS_DOCKER_ERROR = str(exc) - HAS_DOCKER_PY = False - -# Client has recently been split into DockerClient and APIClient -try: - from docker import Client -except ImportError as dummy: - try: - from docker import APIClient as Client - except ImportError as exc: - HAS_DOCKER_ERROR = str(exc) - HAS_DOCKER_PY = False - - class Client: - pass - -DEFAULT_DOCKER_CONFIG_FILE = os.path.splitext(os.path.basename(__file__))[0] + '.yml' -DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock' -DEFAULT_TLS = False -DEFAULT_TLS_VERIFY = False -DEFAULT_TLS_HOSTNAME = "localhost" -DEFAULT_IP = '127.0.0.1' -DEFAULT_SSH_PORT = '22' - -BOOLEANS_TRUE = ['yes', 'on', '1', 'true', 1, True] -BOOLEANS_FALSE = ['no', 'off', '0', 'false', 0, False] - - -DOCKER_ENV_ARGS = dict( - config_file='DOCKER_CONFIG_FILE', - docker_host='DOCKER_HOST', - api_version='DOCKER_API_VERSION', - cert_path='DOCKER_CERT_PATH', - ssl_version='DOCKER_SSL_VERSION', - tls='DOCKER_TLS', - tls_verify='DOCKER_TLS_VERIFY', - tls_hostname='DOCKER_TLS_HOSTNAME', - timeout='DOCKER_TIMEOUT', - private_ssh_port='DOCKER_DEFAULT_SSH_PORT', - default_ip='DOCKER_DEFAULT_IP', -) - - -def fail(msg): - sys.stderr.write("%s\n" % msg) - sys.exit(1) - - -def log(msg, pretty_print=False): - if pretty_print: - print(json.dumps(msg, sort_keys=True, indent=2)) - else: - print(msg + u'\n') - - -class AnsibleDockerClient(Client): - def __init__(self, auth_params, debug): - - self.auth_params = auth_params - self.debug = debug - self._connect_params = self._get_connect_params() - - try: - super(AnsibleDockerClient, self).__init__(**self._connect_params) - except APIError as exc: - self.fail("Docker API error: %s" % exc) - except Exception as exc: - self.fail("Error connecting: %s" % exc) - - def fail(self, msg): - fail(msg) - - def log(self, msg, pretty_print=False): - if self.debug: - log(msg, pretty_print) - - def _get_tls_config(self, **kwargs): - self.log("get_tls_config:") - for key in kwargs: - self.log(" %s: %s" % (key, kwargs[key])) - try: - tls_config = TLSConfig(**kwargs) - return tls_config - except TLSParameterError as exc: - self.fail("TLS config error: %s" % exc) - - def _get_connect_params(self): - auth = self.auth_params - - self.log("auth params:") - for key in auth: - self.log(" %s: %s" % (key, auth[key])) - - if auth['tls'] or auth['tls_verify']: - auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://') - - if auth['tls'] and auth['cert_path'] and auth['key_path']: - # TLS with certs and no host verification - tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']), - verify=False, - ssl_version=auth['ssl_version']) - return dict(base_url=auth['docker_host'], - tls=tls_config, - version=auth['api_version'], - timeout=auth['timeout']) - - if auth['tls']: - # TLS with no certs and not host verification - tls_config = self._get_tls_config(verify=False, - ssl_version=auth['ssl_version']) - return dict(base_url=auth['docker_host'], - tls=tls_config, - version=auth['api_version'], - timeout=auth['timeout']) - - if auth['tls_verify'] and auth['cert_path'] and auth['key_path']: - # TLS with certs and host verification - if auth['cacert_path']: - tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']), - ca_cert=auth['cacert_path'], - verify=True, - assert_hostname=auth['tls_hostname'], - ssl_version=auth['ssl_version']) - else: - tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']), - verify=True, - assert_hostname=auth['tls_hostname'], - ssl_version=auth['ssl_version']) - - return dict(base_url=auth['docker_host'], - tls=tls_config, - version=auth['api_version'], - timeout=auth['timeout']) - - if auth['tls_verify'] and auth['cacert_path']: - # TLS with cacert only - tls_config = self._get_tls_config(ca_cert=auth['cacert_path'], - assert_hostname=auth['tls_hostname'], - verify=True, - ssl_version=auth['ssl_version']) - return dict(base_url=auth['docker_host'], - tls=tls_config, - version=auth['api_version'], - timeout=auth['timeout']) - - if auth['tls_verify']: - # TLS with verify and no certs - tls_config = self._get_tls_config(verify=True, - assert_hostname=auth['tls_hostname'], - ssl_version=auth['ssl_version']) - return dict(base_url=auth['docker_host'], - tls=tls_config, - version=auth['api_version'], - timeout=auth['timeout']) - # No TLS - return dict(base_url=auth['docker_host'], - version=auth['api_version'], - timeout=auth['timeout']) - - def _handle_ssl_error(self, error): - match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error)) - if match: - msg = "You asked for verification that Docker host name matches %s. The actual hostname is %s. " \ - "Most likely you need to set DOCKER_TLS_HOSTNAME or pass tls_hostname with a value of %s. " \ - "You may also use TLS without verification by setting the tls parameter to true." \ - % (self.auth_params['tls_hostname'], match.group(1), match.group(1)) - self.fail(msg) - self.fail("SSL Exception: %s" % (error)) - - -class EnvArgs(object): - def __init__(self): - self.config_file = None - self.docker_host = None - self.api_version = None - self.cert_path = None - self.ssl_version = None - self.tls = None - self.tls_verify = None - self.tls_hostname = None - self.timeout = None - self.default_ssh_port = None - self.default_ip = None - - -class DockerInventory(object): - - def __init__(self): - self._args = self._parse_cli_args() - self._env_args = self._parse_env_args() - self.groups = defaultdict(list) - self.hostvars = defaultdict(dict) - - def run(self): - config_from_file = self._parse_config_file() - if not config_from_file: - config_from_file = dict() - docker_hosts = self.get_hosts(config_from_file) - - for host in docker_hosts: - client = AnsibleDockerClient(host, self._args.debug) - self.get_inventory(client, host) - - if not self._args.host: - self.groups['docker_hosts'] = [host.get('docker_host') for host in docker_hosts] - self.groups['_meta'] = dict( - hostvars=self.hostvars - ) - print(self._json_format_dict(self.groups, pretty_print=self._args.pretty)) - else: - print(self._json_format_dict(self.hostvars.get(self._args.host, dict()), pretty_print=self._args.pretty)) - - sys.exit(0) - - def get_inventory(self, client, host): - - ssh_port = host.get('default_ssh_port') - default_ip = host.get('default_ip') - hostname = host.get('docker_host') - - try: - containers = client.containers(all=True) - except Exception as exc: - self.fail("Error fetching containers for host %s - %s" % (hostname, str(exc))) - - for container in containers: - id = container.get('Id') - short_id = id[:13] - - try: - name = container.get('Names', list()).pop(0).lstrip('/') - except IndexError: - name = short_id - - if not self._args.host or (self._args.host and self._args.host in [name, id, short_id]): - try: - inspect = client.inspect_container(id) - except Exception as exc: - self.fail("Error inspecting container %s - %s" % (name, str(exc))) - - running = inspect.get('State', dict()).get('Running') - - # Add container to groups - image_name = inspect.get('Config', dict()).get('Image') - if image_name: - self.groups["image_%s" % (image_name)].append(name) - - stack_name = inspect.get('Config', dict()).get('Labels', dict()).get('com.docker.stack.namespace') - if stack_name: - self.groups["stack_%s" % stack_name].append(name) - - service_name = inspect.get('Config', dict()).get('Labels', dict()).get('com.docker.swarm.service.name') - if service_name: - self.groups["service_%s" % service_name].append(name) - - self.groups[id].append(name) - self.groups[name].append(name) - if short_id not in self.groups: - self.groups[short_id].append(name) - self.groups[hostname].append(name) - - if running is True: - self.groups['running'].append(name) - else: - self.groups['stopped'].append(name) - - # Figure ous ssh IP and Port - try: - # Lookup the public facing port Nat'ed to ssh port. - port = client.port(container, ssh_port)[0] - except (IndexError, AttributeError, TypeError): - port = dict() - - try: - ip = default_ip if port['HostIp'] == '0.0.0.0' else port['HostIp'] - except KeyError: - ip = '' - - facts = dict( - ansible_ssh_host=ip, - ansible_ssh_port=port.get('HostPort', int()), - docker_name=name, - docker_short_id=short_id - ) - - for key in inspect: - fact_key = self._slugify(key) - facts[fact_key] = inspect.get(key) - - self.hostvars[name].update(facts) - - def _slugify(self, value): - return 'docker_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_')) - - def get_hosts(self, config): - ''' - Determine the list of docker hosts we need to talk to. - - :param config: dictionary read from config file. can be empty. - :return: list of connection dictionaries - ''' - hosts = list() - - hosts_list = config.get('hosts') - defaults = config.get('defaults', dict()) - self.log('defaults:') - self.log(defaults, pretty_print=True) - def_host = defaults.get('host') - def_tls = defaults.get('tls') - def_tls_verify = defaults.get('tls_verify') - def_tls_hostname = defaults.get('tls_hostname') - def_ssl_version = defaults.get('ssl_version') - def_cert_path = defaults.get('cert_path') - def_cacert_path = defaults.get('cacert_path') - def_key_path = defaults.get('key_path') - def_version = defaults.get('version') - def_timeout = defaults.get('timeout') - def_ip = defaults.get('default_ip') - def_ssh_port = defaults.get('private_ssh_port') - - if hosts_list: - # use hosts from config file - for host in hosts_list: - docker_host = host.get('host') or def_host or self._args.docker_host or \ - self._env_args.docker_host or DEFAULT_DOCKER_HOST - api_version = host.get('version') or def_version or self._args.api_version or \ - self._env_args.api_version or DEFAULT_DOCKER_API_VERSION - tls_hostname = host.get('tls_hostname') or def_tls_hostname or self._args.tls_hostname or \ - self._env_args.tls_hostname or DEFAULT_TLS_HOSTNAME - tls_verify = host.get('tls_verify') or def_tls_verify or self._args.tls_verify or \ - self._env_args.tls_verify or DEFAULT_TLS_VERIFY - tls = host.get('tls') or def_tls or self._args.tls or self._env_args.tls or DEFAULT_TLS - ssl_version = host.get('ssl_version') or def_ssl_version or self._args.ssl_version or \ - self._env_args.ssl_version - - cert_path = host.get('cert_path') or def_cert_path or self._args.cert_path or \ - self._env_args.cert_path - if cert_path and cert_path == self._env_args.cert_path: - cert_path = os.path.join(cert_path, 'cert.pem') - - cacert_path = host.get('cacert_path') or def_cacert_path or self._args.cacert_path or \ - self._env_args.cert_path - if cacert_path and cacert_path == self._env_args.cert_path: - cacert_path = os.path.join(cacert_path, 'ca.pem') - - key_path = host.get('key_path') or def_key_path or self._args.key_path or \ - self._env_args.cert_path - if key_path and key_path == self._env_args.cert_path: - key_path = os.path.join(key_path, 'key.pem') - - timeout = host.get('timeout') or def_timeout or self._args.timeout or self._env_args.timeout or \ - DEFAULT_TIMEOUT_SECONDS - default_ip = host.get('default_ip') or def_ip or self._env_args.default_ip or \ - self._args.default_ip_address or DEFAULT_IP - default_ssh_port = host.get('private_ssh_port') or def_ssh_port or self._args.private_ssh_port or \ - DEFAULT_SSH_PORT - host_dict = dict( - docker_host=docker_host, - api_version=api_version, - tls=tls, - tls_verify=tls_verify, - tls_hostname=tls_hostname, - cert_path=cert_path, - cacert_path=cacert_path, - key_path=key_path, - ssl_version=ssl_version, - timeout=timeout, - default_ip=default_ip, - default_ssh_port=default_ssh_port, - ) - hosts.append(host_dict) - else: - # use default definition - docker_host = def_host or self._args.docker_host or self._env_args.docker_host or DEFAULT_DOCKER_HOST - api_version = def_version or self._args.api_version or self._env_args.api_version or \ - DEFAULT_DOCKER_API_VERSION - tls_hostname = def_tls_hostname or self._args.tls_hostname or self._env_args.tls_hostname or \ - DEFAULT_TLS_HOSTNAME - tls_verify = def_tls_verify or self._args.tls_verify or self._env_args.tls_verify or DEFAULT_TLS_VERIFY - tls = def_tls or self._args.tls or self._env_args.tls or DEFAULT_TLS - ssl_version = def_ssl_version or self._args.ssl_version or self._env_args.ssl_version - - cert_path = def_cert_path or self._args.cert_path or self._env_args.cert_path - if cert_path and cert_path == self._env_args.cert_path: - cert_path = os.path.join(cert_path, 'cert.pem') - - cacert_path = def_cacert_path or self._args.cacert_path or self._env_args.cert_path - if cacert_path and cacert_path == self._env_args.cert_path: - cacert_path = os.path.join(cacert_path, 'ca.pem') - - key_path = def_key_path or self._args.key_path or self._env_args.cert_path - if key_path and key_path == self._env_args.cert_path: - key_path = os.path.join(key_path, 'key.pem') - - timeout = def_timeout or self._args.timeout or self._env_args.timeout or DEFAULT_TIMEOUT_SECONDS - default_ip = def_ip or self._env_args.default_ip or self._args.default_ip_address or DEFAULT_IP - default_ssh_port = def_ssh_port or self._args.private_ssh_port or DEFAULT_SSH_PORT - host_dict = dict( - docker_host=docker_host, - api_version=api_version, - tls=tls, - tls_verify=tls_verify, - tls_hostname=tls_hostname, - cert_path=cert_path, - cacert_path=cacert_path, - key_path=key_path, - ssl_version=ssl_version, - timeout=timeout, - default_ip=default_ip, - default_ssh_port=default_ssh_port, - ) - hosts.append(host_dict) - self.log("hosts: ") - self.log(hosts, pretty_print=True) - return hosts - - def _parse_config_file(self): - config = dict() - config_file = DEFAULT_DOCKER_CONFIG_FILE - - if self._args.config_file: - config_file = self._args.config_file - elif self._env_args.config_file: - config_file = self._env_args.config_file - - config_file = os.path.abspath(config_file) - - if os.path.isfile(config_file): - with open(config_file) as f: - try: - config = yaml.safe_load(f.read()) - except Exception as exc: - self.fail("Error: parsing %s - %s" % (config_file, str(exc))) - else: - msg = "Error: config file given by {} does not exist - " + config_file - if self._args.config_file: - self.fail(msg.format('command line argument')) - elif self._env_args.config_file: - self.fail(msg.format(DOCKER_ENV_ARGS.get('config_file'))) - else: - self.log(msg.format('DEFAULT_DOCKER_CONFIG_FILE')) - return config - - def log(self, msg, pretty_print=False): - if self._args.debug: - log(msg, pretty_print) - - def fail(self, msg): - fail(msg) - - def _parse_env_args(self): - args = EnvArgs() - for key, value in DOCKER_ENV_ARGS.items(): - if os.environ.get(value): - val = os.environ.get(value) - if val in BOOLEANS_TRUE: - val = True - if val in BOOLEANS_FALSE: - val = False - setattr(args, key, val) - return args - - def _parse_cli_args(self): - # Parse command line arguments - - parser = argparse.ArgumentParser( - description='Return Ansible inventory for one or more Docker hosts.') - parser.add_argument('--list', action='store_true', default=True, - help='List all containers (default: True)') - parser.add_argument('--debug', action='store_true', default=False, - help='Send debug messages to STDOUT') - parser.add_argument('--host', action='store', - help='Only get information for a specific container.') - parser.add_argument('--pretty', action='store_true', default=False, - help='Pretty print JSON output(default: False)') - parser.add_argument('--config-file', action='store', default=None, - help="Name of the config file to use. Default is %s" % (DEFAULT_DOCKER_CONFIG_FILE)) - parser.add_argument('--docker-host', action='store', default=None, - help="The base url or Unix sock path to connect to the docker daemon. Defaults to %s" - % (DEFAULT_DOCKER_HOST)) - parser.add_argument('--tls-hostname', action='store', default=None, - help="Host name to expect in TLS certs. Defaults to %s" % DEFAULT_TLS_HOSTNAME) - parser.add_argument('--api-version', action='store', default=None, - help="Docker daemon API version. Defaults to %s" % (DEFAULT_DOCKER_API_VERSION)) - parser.add_argument('--timeout', action='store', default=None, - help="Docker connection timeout in seconds. Defaults to %s" - % (DEFAULT_TIMEOUT_SECONDS)) - parser.add_argument('--cacert-path', action='store', default=None, - help="Path to the TLS certificate authority pem file.") - parser.add_argument('--cert-path', action='store', default=None, - help="Path to the TLS certificate pem file.") - parser.add_argument('--key-path', action='store', default=None, - help="Path to the TLS encryption key pem file.") - parser.add_argument('--ssl-version', action='store', default=None, - help="TLS version number") - parser.add_argument('--tls', action='store_true', default=None, - help="Use TLS. Defaults to %s" % (DEFAULT_TLS)) - parser.add_argument('--tls-verify', action='store_true', default=None, - help="Verify TLS certificates. Defaults to %s" % (DEFAULT_TLS_VERIFY)) - parser.add_argument('--private-ssh-port', action='store', default=None, - help="Default private container SSH Port. Defaults to %s" % (DEFAULT_SSH_PORT)) - parser.add_argument('--default-ip-address', action='store', default=None, - help="Default container SSH IP address. Defaults to %s" % (DEFAULT_IP)) - return parser.parse_args() - - def _json_format_dict(self, data, pretty_print=False): - # format inventory data for output - if pretty_print: - return json.dumps(data, sort_keys=True, indent=4) - else: - return json.dumps(data) - - -def main(): - - if not HAS_DOCKER_PY: - fail("Failed to import docker-py. Try `pip install docker-py` - %s" % (HAS_DOCKER_ERROR)) - - DockerInventory().run() - - -main() diff --git a/scripts/inventory/docker.yml b/scripts/inventory/docker.yml deleted file mode 100644 index 97239392d1..0000000000 --- a/scripts/inventory/docker.yml +++ /dev/null @@ -1,74 +0,0 @@ -# This is the configuration file for the Docker inventory script: docker_inventory.py. -# -# You can define the following in this file: -# -# defaults -# Defines a default connection. Defaults will be taken from this and applied to any values not provided -# for a host defined in the hosts list. -# -# hosts -# If you wish to get inventory from more than one Docker host, define a hosts list. -# -# For the default host and each host in the hosts list define the following attributes: -# -# host: -# description: The URL or Unix socket path used to connect to the Docker API. -# required: yes -# -# tls: -# description: Connect using TLS without verifying the authenticity of the Docker host server. -# default: false -# required: false -# -# tls_verify: -# description: Connect using TLS without verifying the authenticity of the Docker host server. -# default: false -# required: false -# -# cert_path: -# description: Path to the client's TLS certificate file. -# default: null -# required: false -# -# cacert_path: -# description: Use a CA certificate when performing server verification by providing the path to a CA certificate file. -# default: null -# required: false -# -# key_path: -# description: Path to the client's TLS key file. -# default: null -# required: false -# -# version: -# description: The Docker API version. -# required: false -# default: will be supplied by the docker-py module. -# -# timeout: -# description: The amount of time in seconds to wait on an API response. -# required: false -# default: 60 -# -# default_ip: -# description: The IP address to assign to ansible_host when the container's SSH port is mapped to interface -# '0.0.0.0'. -# required: false -# default: 127.0.0.1 -# -# private_ssh_port: -# description: The port containers use for SSH -# required: false -# default: 22 - -#defaults: -# host: unix:///var/run/docker.sock -# private_ssh_port: 22 -# default_ip: 127.0.0.1 - -#hosts: -# - host: tcp://10.45.5.16:4243 -# private_ssh_port: 2022 -# default_ip: 172.16.3.45 -# - host: tcp://localhost:4243 -# private_ssh_port: 2029 diff --git a/scripts/inventory/fleet.py b/scripts/inventory/fleet.py deleted file mode 100755 index cc9537e115..0000000000 --- a/scripts/inventory/fleet.py +++ /dev/null @@ -1,99 +0,0 @@ -#!/usr/bin/env python -""" -fleetctl base external inventory script. Automatically finds the IPs of the booted coreos instances and -returns it under the host group 'coreos' -""" - -# Copyright (C) 2014 Andrew Rothstein -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -# -# Thanks to the vagrant.py inventory script for giving me the basic structure -# of this. -# - -import sys -import subprocess -import re -import string -from optparse import OptionParser -import json - -# Options -# ------------------------------ - -parser = OptionParser(usage="%prog [options] --list | --host ") -parser.add_option('--list', default=False, dest="list", action="store_true", - help="Produce a JSON consumable grouping of servers in your fleet") -parser.add_option('--host', default=None, dest="host", - help="Generate additional host specific details for given host for Ansible") -(options, args) = parser.parse_args() - -# -# helper functions -# - - -def get_ssh_config(): - configs = [] - for box in list_running_boxes(): - config = get_a_ssh_config(box) - configs.append(config) - return configs - - -# list all the running instances in the fleet -def list_running_boxes(): - boxes = [] - for line in subprocess.check_output(["fleetctl", "list-machines"]).split('\n'): - matcher = re.search(r"[^\s]+[\s]+([^\s]+).+", line) - if matcher and matcher.group(1) != "IP": - boxes.append(matcher.group(1)) - - return boxes - - -def get_a_ssh_config(box_name): - config = {} - config['Host'] = box_name - config['ansible_ssh_user'] = 'core' - config['ansible_python_interpreter'] = '/opt/bin/python' - return config - - -# List out servers that vagrant has running -# ------------------------------ -if options.list: - ssh_config = get_ssh_config() - hosts = {'coreos': []} - - for data in ssh_config: - hosts['coreos'].append(data['Host']) - - print(json.dumps(hosts)) - sys.exit(1) - -# Get out the host details -# ------------------------------ -elif options.host: - result = {} - ssh_config = get_ssh_config() - - details = filter(lambda x: (x['Host'] == options.host), ssh_config) - if len(details) > 0: - # pass through the port, in case it's non standard. - result = details[0] - - print(json.dumps(result)) - sys.exit(1) - - -# Print out help -# ------------------------------ -else: - parser.print_help() - sys.exit(1) diff --git a/scripts/inventory/foreman.ini b/scripts/inventory/foreman.ini deleted file mode 100644 index d157963848..0000000000 --- a/scripts/inventory/foreman.ini +++ /dev/null @@ -1,200 +0,0 @@ -# Foreman inventory (https://github.com/theforeman/foreman_ansible_inventory) -# -# This script can be used as an Ansible dynamic inventory. -# The connection parameters are set up via *foreman.ini* -# This is how the script founds the configuration file in -# order of discovery. -# -# * `/etc/ansible/foreman.ini` -# * Current directory of your inventory script. -# * `FOREMAN_INI_PATH` environment variable. -# -# ## Variables and Parameters -# -# The data returned from Foreman for each host is stored in a foreman -# hash so they're available as *host_vars* along with the parameters -# of the host and it's hostgroups: -# -# "foo.example.com": { -# "foreman": { -# "architecture_id": 1, -# "architecture_name": "x86_64", -# "build": false, -# "build_status": 0, -# "build_status_label": "Installed", -# "capabilities": [ -# "build", -# "image" -# ], -# "compute_profile_id": 4, -# "hostgroup_name": "webtier/myapp", -# "id": 70, -# "image_name": "debian8.1", -# ... -# "uuid": "50197c10-5ebb-b5cf-b384-a1e203e19e77" -# }, -# "foreman_params": { -# "testparam1": "foobar", -# "testparam2": "small", -# ... -# } -# -# and could therefore be used in Ansible like: -# -# - debug: msg="From Foreman host {{ foreman['uuid'] }}" -# -# Which yields -# -# TASK [test_foreman : debug] **************************************************** -# ok: [foo.example.com] => { -# "msg": "From Foreman host 50190bd1-052a-a34a-3c9c-df37a39550bf" -# } -# -# ## Automatic Ansible groups -# -# The inventory will provide a set of groups, by default prefixed by -# 'foreman_'. If you want to customize this prefix, change the -# group_prefix option in /etc/ansible/foreman.ini. The rest of this -# guide will assume the default prefix of 'foreman' -# -# The hostgroup, location, organization, content view, and lifecycle -# environment of each host are created as Ansible groups with a -# foreman_ prefix, all lowercase and problematic parameters -# removed. So e.g. the foreman hostgroup -# -# myapp / webtier / datacenter1 -# -# would turn into the Ansible group: -# -# foreman_hostgroup_myapp_webtier_datacenter1 -# -# If the parameter want_hostcollections is set to true, the -# collections each host is in are created as Ansible groups with a -# foreman_hostcollection prefix, all lowercase and problematic -# parameters removed. So e.g. the Foreman host collection -# -# Patch Window Thursday -# -# would turn into the Ansible group: -# -# foreman_hostcollection_patchwindowthursday -# -# If the parameter host_filters is set, it will be used as the -# "search" parameter for the /api/v2/hosts call. This can be used to -# restrict the list of returned host, as shown below. -# -# Furthermore Ansible groups can be created on the fly using the -# *group_patterns* variable in *foreman.ini* so that you can build up -# hierarchies using parameters on the hostgroup and host variables. -# -# Lets assume you have a host that is built using this nested hostgroup: -# -# myapp / webtier / datacenter1 -# -# and each of the hostgroups defines a parameters respectively: -# -# myapp: app_param = myapp -# webtier: tier_param = webtier -# datacenter1: dc_param = datacenter1 -# -# The host is also in a subnet called "mysubnet" and provisioned via an image -# then *group_patterns* like: -# -# [ansible] -# group_patterns = ["{app_param}-{tier_param}-{dc_param}", -# "{app_param}-{tier_param}", -# "{app_param}", -# "{subnet_name}-{provision_method}"] -# -# would put the host into the additional Ansible groups: -# -# - myapp-webtier-datacenter1 -# - myapp-webtier -# - myapp -# - mysubnet-image -# -# by recursively resolving the hostgroups, getting the parameter keys -# and values and doing a Python *string.format()* like replacement on -# it. -# -[foreman] -url = http://localhost:3000/ -user = foreman -password = secret -ssl_verify = True - -# Foreman 1.24 introduces a new reports API to improve performance of the inventory script. -# Note: This requires foreman_ansible plugin installed. -# Set to False if you want to use the old API. Defaults to True. - -use_reports_api = True - -# Retrieve only hosts from the organization "Web Engineering". -# host_filters = organization="Web Engineering" - -# Retrieve only hosts from the organization "Web Engineering" that are -# also in the host collection "Apache Servers". -# host_filters = organization="Web Engineering" and host_collection="Apache Servers" - -# Foreman Inventory report related configuration options. -# Configs that default to True : -# want_organization , want_location, want_ipv4, want_host_group, want_subnet, want_smart_proxies, want_facts -# Configs that default to False : -# want_ipv6, want_subnet_v6, want_content_facet_attributes, want_host_params - -[report] -# want_organization = True -# want_location = True -# want_ipv4 = True -# want_ipv6 = False -# want_host_group = True -# want_subnet = True -# want_subnet_v6 = False -# want_smart_proxies = True -# want_content_facet_attributes = False -# want_host_params = False - -# use this config to determine if facts are to be fetched in the report and stored on the hosts. -# want_facts = False - -# Upon receiving a request to return inventory report, Foreman schedules a report generation job. -# The script then polls the report_data endpoint repeatedly to check if the job is complete and retrieves data -# poll_interval allows to define the polling interval between 2 calls to the report_data endpoint while polling. -# Defaults to 10 seconds - -# poll_interval = 10 - -[ansible] -group_patterns = ["{app}-{tier}-{color}", - "{app}-{color}", - "{app}", - "{tier}"] -group_prefix = foreman_ - -# Whether to fetch facts from Foreman and store them on the host -want_facts = True - -# Whether to create Ansible groups for host collections. Only tested -# with Katello (Red Hat Satellite). Disabled by default to not break -# the script for stand-alone Foreman. -want_hostcollections = False - -# Whether to interpret global parameters value as JSON (if possible, else -# take as is). Only tested with Katello (Red Hat Satellite). -# This allows to define lists and dictionaries (and more complicated structures) -# variables by entering them as JSON string in Foreman parameters. -# Disabled by default as the change would else not be backward compatible. -rich_params = False - -# Whether to populate the ansible_ssh_host variable to explicitly specify the -# connection target. Only tested with Katello (Red Hat Satellite). -# If the foreman 'ip' fact exists then the ansible_ssh_host varibale is populated -# to permit connections where DNS resolution fails. -want_ansible_ssh_host = False - -[cache] -path = . -max_age = 60 - -# Whether to scan foreman to add recently created hosts in inventory cache -scan_new_hosts = True diff --git a/scripts/inventory/foreman.py b/scripts/inventory/foreman.py deleted file mode 100755 index f2e729b6a7..0000000000 --- a/scripts/inventory/foreman.py +++ /dev/null @@ -1,651 +0,0 @@ -#!/usr/bin/env python -# vim: set fileencoding=utf-8 : -# -# Copyright (C) 2016 Guido Günther , -# Daniel Lobato Garcia -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# -# This is somewhat based on cobbler inventory - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import json -import argparse -import copy -import os -import re -import sys -from time import time, sleep -from collections import defaultdict -from distutils.version import LooseVersion, StrictVersion - -# 3rd party imports -import requests -if LooseVersion(requests.__version__) < LooseVersion('1.1.0'): - print('This script requires python-requests 1.1 as a minimum version') - sys.exit(1) - -from requests.auth import HTTPBasicAuth - -from ansible.module_utils._text import to_text -from ansible.module_utils.six.moves import configparser as ConfigParser - - -def json_format_dict(data, pretty=False): - """Converts a dict to a JSON object and dumps it as a formatted string""" - - if pretty: - return json.dumps(data, sort_keys=True, indent=2) - else: - return json.dumps(data) - - -class ForemanInventory(object): - - def __init__(self): - self.inventory = defaultdict(list) # A list of groups and the hosts in that group - self.cache = dict() # Details about hosts in the inventory - self.params = dict() # Params of each host - self.facts = dict() # Facts of each host - self.hostgroups = dict() # host groups - self.hostcollections = dict() # host collections - self.session = None # Requests session - self.config_paths = [ - "/etc/ansible/foreman.ini", - os.path.dirname(os.path.realpath(__file__)) + '/foreman.ini', - ] - env_value = os.environ.get('FOREMAN_INI_PATH') - if env_value is not None: - self.config_paths.append(os.path.expanduser(os.path.expandvars(env_value))) - - def read_settings(self): - """Reads the settings from the foreman.ini file""" - - config = ConfigParser.SafeConfigParser() - config.read(self.config_paths) - - # Foreman API related - try: - self.foreman_url = config.get('foreman', 'url') - self.foreman_user = config.get('foreman', 'user') - self.foreman_pw = config.get('foreman', 'password', raw=True) - self.foreman_ssl_verify = config.getboolean('foreman', 'ssl_verify') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError) as e: - print("Error parsing configuration: %s" % e, file=sys.stderr) - return False - - # Inventory Report Related - try: - self.foreman_use_reports_api = config.getboolean('foreman', 'use_reports_api') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.foreman_use_reports_api = True - - try: - self.want_organization = config.getboolean('report', 'want_organization') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.want_organization = True - - try: - self.want_location = config.getboolean('report', 'want_location') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.want_location = True - - try: - self.want_IPv4 = config.getboolean('report', 'want_ipv4') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.want_IPv4 = True - - try: - self.want_IPv6 = config.getboolean('report', 'want_ipv6') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.want_IPv6 = False - - try: - self.want_host_group = config.getboolean('report', 'want_host_group') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.want_host_group = True - - try: - self.want_host_params = config.getboolean('report', 'want_host_params') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.want_host_params = False - - try: - self.want_subnet = config.getboolean('report', 'want_subnet') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.want_subnet = True - - try: - self.want_subnet_v6 = config.getboolean('report', 'want_subnet_v6') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.want_subnet_v6 = False - - try: - self.want_smart_proxies = config.getboolean('report', 'want_smart_proxies') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.want_smart_proxies = True - - try: - self.want_content_facet_attributes = config.getboolean('report', 'want_content_facet_attributes') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.want_content_facet_attributes = False - - try: - self.report_want_facts = config.getboolean('report', 'want_facts') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.report_want_facts = True - - try: - self.poll_interval = config.getint('report', 'poll_interval') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.poll_interval = 10 - - # Ansible related - try: - group_patterns = config.get('ansible', 'group_patterns') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - group_patterns = "[]" - - self.group_patterns = json.loads(group_patterns) - - try: - self.group_prefix = config.get('ansible', 'group_prefix') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.group_prefix = "foreman_" - - try: - self.want_facts = config.getboolean('ansible', 'want_facts') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.want_facts = True - - self.want_facts = self.want_facts and self.report_want_facts - - try: - self.want_hostcollections = config.getboolean('ansible', 'want_hostcollections') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.want_hostcollections = False - - try: - self.want_ansible_ssh_host = config.getboolean('ansible', 'want_ansible_ssh_host') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.want_ansible_ssh_host = False - - # Do we want parameters to be interpreted if possible as JSON? (no by default) - try: - self.rich_params = config.getboolean('ansible', 'rich_params') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.rich_params = False - - try: - self.host_filters = config.get('foreman', 'host_filters') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.host_filters = None - - # Cache related - try: - cache_path = os.path.expanduser(config.get('cache', 'path')) - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - cache_path = '.' - (script, ext) = os.path.splitext(os.path.basename(__file__)) - self.cache_path_cache = cache_path + "/%s.cache" % script - self.cache_path_inventory = cache_path + "/%s.index" % script - self.cache_path_params = cache_path + "/%s.params" % script - self.cache_path_facts = cache_path + "/%s.facts" % script - self.cache_path_hostcollections = cache_path + "/%s.hostcollections" % script - try: - self.cache_max_age = config.getint('cache', 'max_age') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.cache_max_age = 60 - try: - self.scan_new_hosts = config.getboolean('cache', 'scan_new_hosts') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.scan_new_hosts = False - - return True - - def parse_cli_args(self): - """Command line argument processing""" - - parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on foreman') - parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') - parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') - parser.add_argument('--refresh-cache', action='store_true', default=False, - help='Force refresh of cache by making API requests to foreman (default: False - use cache files)') - self.args = parser.parse_args() - - def _get_session(self): - if not self.session: - self.session = requests.session() - self.session.auth = HTTPBasicAuth(self.foreman_user, self.foreman_pw) - self.session.verify = self.foreman_ssl_verify - return self.session - - def _get_json(self, url, ignore_errors=None, params=None): - if params is None: - params = {} - params['per_page'] = 250 - - page = 1 - results = [] - s = self._get_session() - while True: - params['page'] = page - ret = s.get(url, params=params) - if ignore_errors and ret.status_code in ignore_errors: - break - ret.raise_for_status() - json = ret.json() - # /hosts/:id has not results key - if 'results' not in json: - return json - # Facts are returned as dict in results not list - if isinstance(json['results'], dict): - return json['results'] - # List of all hosts is returned paginaged - results = results + json['results'] - if len(results) >= json['subtotal']: - break - page += 1 - if len(json['results']) == 0: - print("Did not make any progress during loop. " - "expected %d got %d" % (json['total'], len(results)), - file=sys.stderr) - break - return results - - def _use_inventory_report(self): - if not self.foreman_use_reports_api: - return False - status_url = "%s/api/v2/status" % self.foreman_url - result = self._get_json(status_url) - foreman_version = (LooseVersion(result.get('version')) >= LooseVersion('1.24.0')) - return foreman_version - - def _fetch_params(self): - options, params = ("no", "yes"), dict() - params["Organization"] = options[self.want_organization] - params["Location"] = options[self.want_location] - params["IPv4"] = options[self.want_IPv4] - params["IPv6"] = options[self.want_IPv6] - params["Facts"] = options[self.want_facts] - params["Host Group"] = options[self.want_host_group] - params["Host Collections"] = options[self.want_hostcollections] - params["Subnet"] = options[self.want_subnet] - params["Subnet v6"] = options[self.want_subnet_v6] - params["Smart Proxies"] = options[self.want_smart_proxies] - params["Content Attributes"] = options[self.want_content_facet_attributes] - params["Host Parameters"] = options[self.want_host_params] - if self.host_filters: - params["Hosts"] = self.host_filters - return params - - def _post_request(self): - url = "%s/ansible/api/v2/ansible_inventories/schedule" % self.foreman_url - session = self._get_session() - params = {'input_values': self._fetch_params()} - ret = session.post(url, json=params) - if not ret: - raise Exception("Error scheduling inventory report on foreman. Please check foreman logs!") - url = "{0}/{1}".format(self.foreman_url, ret.json().get('data_url')) - response = session.get(url) - while response: - if response.status_code != 204: - break - else: - sleep(self.poll_interval) - response = session.get(url) - if not response: - raise Exception("Error receiving inventory report from foreman. Please check foreman logs!") - else: - return response.json() - - def _get_hosts(self): - url = "%s/api/v2/hosts" % self.foreman_url - - params = {} - if self.host_filters: - params['search'] = self.host_filters - - return self._get_json(url, params=params) - - def _get_host_data_by_id(self, hid): - url = "%s/api/v2/hosts/%s" % (self.foreman_url, hid) - return self._get_json(url) - - def _get_facts_by_id(self, hid): - url = "%s/api/v2/hosts/%s/facts" % (self.foreman_url, hid) - return self._get_json(url) - - def _resolve_params(self, host_params): - """Convert host params to dict""" - params = {} - - for param in host_params: - name = param['name'] - if self.rich_params: - try: - params[name] = json.loads(param['value']) - except ValueError: - params[name] = param['value'] - else: - params[name] = param['value'] - - return params - - def _get_facts(self, host): - """Fetch all host facts of the host""" - if not self.want_facts: - return {} - - ret = self._get_facts_by_id(host['id']) - if len(ret.values()) == 0: - facts = {} - elif len(ret.values()) == 1: - facts = list(ret.values())[0] - else: - raise ValueError("More than one set of facts returned for '%s'" % host) - return facts - - def write_to_cache(self, data, filename): - """Write data in JSON format to a file""" - json_data = json_format_dict(data, True) - cache = open(filename, 'w') - cache.write(json_data) - cache.close() - - def _write_cache(self): - self.write_to_cache(self.cache, self.cache_path_cache) - self.write_to_cache(self.inventory, self.cache_path_inventory) - self.write_to_cache(self.params, self.cache_path_params) - self.write_to_cache(self.facts, self.cache_path_facts) - self.write_to_cache(self.hostcollections, self.cache_path_hostcollections) - - def to_safe(self, word): - '''Converts 'bad' characters in a string to underscores - so they can be used as Ansible groups - - >>> ForemanInventory.to_safe("foo-bar baz") - 'foo_barbaz' - ''' - regex = r"[^A-Za-z0-9\_]" - return re.sub(regex, "_", word.replace(" ", "")) - - def update_cache(self, scan_only_new_hosts=False): - """Make calls to foreman and save the output in a cache""" - use_inventory_report = self._use_inventory_report() - if use_inventory_report: - self._update_cache_inventory(scan_only_new_hosts) - else: - self._update_cache_host_api(scan_only_new_hosts) - - def _update_cache_inventory(self, scan_only_new_hosts): - self.groups = dict() - self.hosts = dict() - try: - inventory_report_response = self._post_request() - except Exception: - self._update_cache_host_api(scan_only_new_hosts) - return - host_data = json.loads(inventory_report_response) - for host in host_data: - if not(host) or (host["name"] in self.cache.keys() and scan_only_new_hosts): - continue - dns_name = host['name'] - - host_params = host.pop('host_parameters', {}) - fact_list = host.pop('facts', {}) - content_facet_attributes = host.get('content_attributes', {}) or {} - - # Create ansible groups for hostgroup - group = 'host_group' - val = host.get(group) - if val: - safe_key = self.to_safe('%s%s_%s' % ( - to_text(self.group_prefix), - group, - to_text(val).lower() - )) - self.inventory[safe_key].append(dns_name) - - # Create ansible groups for environment, location and organization - for group in ['environment', 'location', 'organization']: - val = host.get('%s' % group) - if val: - safe_key = self.to_safe('%s%s_%s' % ( - to_text(self.group_prefix), - group, - to_text(val).lower() - )) - self.inventory[safe_key].append(dns_name) - - for group in ['lifecycle_environment', 'content_view']: - val = content_facet_attributes.get('%s_name' % group) - if val: - safe_key = self.to_safe('%s%s_%s' % ( - to_text(self.group_prefix), - group, - to_text(val).lower() - )) - self.inventory[safe_key].append(dns_name) - - params = host_params - - # Ansible groups by parameters in host groups and Foreman host - # attributes. - groupby = dict() - for k, v in params.items(): - groupby[k] = self.to_safe(to_text(v)) - - # The name of the ansible groups is given by group_patterns: - for pattern in self.group_patterns: - try: - key = pattern.format(**groupby) - self.inventory[key].append(dns_name) - except KeyError: - pass # Host not part of this group - - if self.want_hostcollections: - hostcollections = host.get('host_collections') - - if hostcollections: - # Create Ansible groups for host collections - for hostcollection in hostcollections: - safe_key = self.to_safe('%shostcollection_%s' % (self.group_prefix, hostcollection.lower())) - self.inventory[safe_key].append(dns_name) - - self.hostcollections[dns_name] = hostcollections - - self.cache[dns_name] = host - self.params[dns_name] = params - self.facts[dns_name] = fact_list - self.inventory['all'].append(dns_name) - self._write_cache() - - def _update_cache_host_api(self, scan_only_new_hosts): - """Make calls to foreman and save the output in a cache""" - - self.groups = dict() - self.hosts = dict() - - for host in self._get_hosts(): - if host['name'] in self.cache.keys() and scan_only_new_hosts: - continue - dns_name = host['name'] - - host_data = self._get_host_data_by_id(host['id']) - host_params = host_data.get('all_parameters', {}) - - # Create ansible groups for hostgroup - group = 'hostgroup' - val = host.get('%s_title' % group) or host.get('%s_name' % group) - if val: - safe_key = self.to_safe('%s%s_%s' % ( - to_text(self.group_prefix), - group, - to_text(val).lower() - )) - self.inventory[safe_key].append(dns_name) - - # Create ansible groups for environment, location and organization - for group in ['environment', 'location', 'organization']: - val = host.get('%s_name' % group) - if val: - safe_key = self.to_safe('%s%s_%s' % ( - to_text(self.group_prefix), - group, - to_text(val).lower() - )) - self.inventory[safe_key].append(dns_name) - - for group in ['lifecycle_environment', 'content_view']: - val = host.get('content_facet_attributes', {}).get('%s_name' % group) - if val: - safe_key = self.to_safe('%s%s_%s' % ( - to_text(self.group_prefix), - group, - to_text(val).lower() - )) - self.inventory[safe_key].append(dns_name) - - params = self._resolve_params(host_params) - - # Ansible groups by parameters in host groups and Foreman host - # attributes. - groupby = dict() - for k, v in params.items(): - groupby[k] = self.to_safe(to_text(v)) - - # The name of the ansible groups is given by group_patterns: - for pattern in self.group_patterns: - try: - key = pattern.format(**groupby) - self.inventory[key].append(dns_name) - except KeyError: - pass # Host not part of this group - - if self.want_hostcollections: - hostcollections = host_data.get('host_collections') - - if hostcollections: - # Create Ansible groups for host collections - for hostcollection in hostcollections: - safe_key = self.to_safe('%shostcollection_%s' % (self.group_prefix, hostcollection['name'].lower())) - self.inventory[safe_key].append(dns_name) - - self.hostcollections[dns_name] = hostcollections - - self.cache[dns_name] = host - self.params[dns_name] = params - self.facts[dns_name] = self._get_facts(host) - self.inventory['all'].append(dns_name) - self._write_cache() - - def is_cache_valid(self): - """Determines if the cache is still valid""" - if os.path.isfile(self.cache_path_cache): - mod_time = os.path.getmtime(self.cache_path_cache) - current_time = time() - if (mod_time + self.cache_max_age) > current_time: - if (os.path.isfile(self.cache_path_inventory) and - os.path.isfile(self.cache_path_params) and - os.path.isfile(self.cache_path_facts)): - return True - return False - - def load_inventory_from_cache(self): - """Read the index from the cache file sets self.index""" - - with open(self.cache_path_inventory, 'r') as fp: - self.inventory = json.load(fp) - - def load_params_from_cache(self): - """Read the index from the cache file sets self.index""" - - with open(self.cache_path_params, 'r') as fp: - self.params = json.load(fp) - - def load_facts_from_cache(self): - """Read the index from the cache file sets self.facts""" - - if not self.want_facts: - return - with open(self.cache_path_facts, 'r') as fp: - self.facts = json.load(fp) - - def load_hostcollections_from_cache(self): - """Read the index from the cache file sets self.hostcollections""" - - if not self.want_hostcollections: - return - with open(self.cache_path_hostcollections, 'r') as fp: - self.hostcollections = json.load(fp) - - def load_cache_from_cache(self): - """Read the cache from the cache file sets self.cache""" - - with open(self.cache_path_cache, 'r') as fp: - self.cache = json.load(fp) - - def get_inventory(self): - if self.args.refresh_cache or not self.is_cache_valid(): - self.update_cache() - else: - self.load_inventory_from_cache() - self.load_params_from_cache() - self.load_facts_from_cache() - self.load_hostcollections_from_cache() - self.load_cache_from_cache() - if self.scan_new_hosts: - self.update_cache(True) - - def get_host_info(self): - """Get variables about a specific host""" - - if not self.cache or len(self.cache) == 0: - # Need to load index from cache - self.load_cache_from_cache() - - if self.args.host not in self.cache: - # try updating the cache - self.update_cache() - - if self.args.host not in self.cache: - # host might not exist anymore - return json_format_dict({}, True) - - return json_format_dict(self.cache[self.args.host], True) - - def _print_data(self): - data_to_print = "" - if self.args.host: - data_to_print += self.get_host_info() - else: - self.inventory['_meta'] = {'hostvars': {}} - for hostname in self.cache: - self.inventory['_meta']['hostvars'][hostname] = { - 'foreman': self.cache[hostname], - 'foreman_params': self.params[hostname], - } - if self.want_ansible_ssh_host and 'ip' in self.cache[hostname]: - self.inventory['_meta']['hostvars'][hostname]['ansible_ssh_host'] = self.cache[hostname]['ip'] - if self.want_facts: - self.inventory['_meta']['hostvars'][hostname]['foreman_facts'] = self.facts[hostname] - - data_to_print += json_format_dict(self.inventory, True) - - print(data_to_print) - - def run(self): - # Read settings and parse CLI arguments - if not self.read_settings(): - return False - self.parse_cli_args() - self.get_inventory() - self._print_data() - return True - - -if __name__ == '__main__': - sys.exit(not ForemanInventory().run()) diff --git a/scripts/inventory/freeipa.py b/scripts/inventory/freeipa.py deleted file mode 100755 index f7ffe1d223..0000000000 --- a/scripts/inventory/freeipa.py +++ /dev/null @@ -1,126 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import argparse -from distutils.version import LooseVersion -import json -import os -import sys -from ipalib import api, errors, __version__ as IPA_VERSION -from ansible.module_utils.six import u - - -def initialize(): - ''' - This function initializes the FreeIPA/IPA API. This function requires - no arguments. A kerberos key must be present in the users keyring in - order for this to work. IPA default configuration directory is /etc/ipa, - this path could be overridden with IPA_CONFDIR environment variable. - ''' - - api.bootstrap(context='cli') - - if not os.path.isdir(api.env.confdir): - print("WARNING: IPA configuration directory (%s) is missing. " - "Environment variable IPA_CONFDIR could be used to override " - "default path." % api.env.confdir) - - if LooseVersion(IPA_VERSION) >= LooseVersion('4.6.2'): - # With ipalib < 4.6.0 'server' and 'domain' have default values - # ('localhost:8888', 'example.com'), newer versions don't and - # DNS autodiscovery is broken, then one of jsonrpc_uri / xmlrpc_uri is - # required. - # ipalib 4.6.0 is unusable (https://pagure.io/freeipa/issue/7132) - # that's why 4.6.2 is explicitely tested. - if 'server' not in api.env or 'domain' not in api.env: - sys.exit("ERROR: ('jsonrpc_uri' or 'xmlrpc_uri') or 'domain' are not " - "defined in '[global]' section of '%s' nor in '%s'." % - (api.env.conf, api.env.conf_default)) - - api.finalize() - try: - api.Backend.rpcclient.connect() - except AttributeError: - # FreeIPA < 4.0 compatibility - api.Backend.xmlclient.connect() - - return api - - -def list_groups(api): - ''' - This function prints a list of all host groups. This function requires - one argument, the FreeIPA/IPA API object. - ''' - - inventory = {} - hostvars = {} - - result = api.Command.hostgroup_find(all=True)['result'] - - for hostgroup in result: - # Get direct and indirect members (nested hostgroups) of hostgroup - members = [] - - if 'member_host' in hostgroup: - members = list(hostgroup['member_host']) - if 'memberindirect_host' in hostgroup: - members += (host for host in hostgroup['memberindirect_host']) - inventory[hostgroup['cn'][0]] = {'hosts': list(members)} - - for member in members: - hostvars[member] = {} - - inventory['_meta'] = {'hostvars': hostvars} - inv_string = json.dumps(inventory, indent=1, sort_keys=True) - print(inv_string) - - return None - - -def parse_args(): - ''' - This function parses the arguments that were passed in via the command line. - This function expects no arguments. - ''' - - parser = argparse.ArgumentParser(description='Ansible FreeIPA/IPA ' - 'inventory module') - group = parser.add_mutually_exclusive_group(required=True) - group.add_argument('--list', action='store_true', - help='List active servers') - group.add_argument('--host', help='List details about the specified host') - - return parser.parse_args() - - -def get_host_attributes(api, host): - """ - This function expects one string, this hostname to lookup variables for. - Args: - api: FreeIPA API Object - host: Name of Hostname - - Returns: Dict of Host vars if found else None - """ - try: - result = api.Command.host_show(u(host))['result'] - if 'usercertificate' in result: - del result['usercertificate'] - return json.dumps(result, indent=1) - except errors.NotFound as e: - return {} - - -if __name__ == '__main__': - args = parse_args() - api = initialize() - - if args.host: - print(get_host_attributes(api, args.host)) - elif args.list: - list_groups(api) diff --git a/scripts/inventory/infoblox.py b/scripts/inventory/infoblox.py deleted file mode 100755 index 209509025e..0000000000 --- a/scripts/inventory/infoblox.py +++ /dev/null @@ -1,129 +0,0 @@ -#!/usr/bin/env python -# -# (c) 2018, Red Hat, Inc. -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import os -import sys -import json -import argparse - -from ansible.parsing.dataloader import DataLoader -from ansible.module_utils.six import iteritems, raise_from -from ansible.module_utils._text import to_text -try: - from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiInventory - from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import normalize_extattrs, flatten_extattrs -except ImportError as exc: - try: - # Fallback for Ansible 2.9 - from ansible.module_utils.net_tools.nios.api import WapiInventory - from ansible.module_utils.net_tools.nios.api import normalize_extattrs, flatten_extattrs - except ImportError: - raise_from( - Exception( - 'This inventory plugin only works with Ansible 2.9, 2.10, or 3, or when community.general is installed correctly in PYTHONPATH.' - ' Try using the inventory plugin from infoblox.nios_modules instead.'), - exc) - - -CONFIG_FILES = [ - os.environ.get('INFOBLOX_CONFIG_FILE', ''), - '/etc/ansible/infoblox.yaml', - '/etc/ansible/infoblox.yml' -] - - -def parse_args(): - parser = argparse.ArgumentParser() - - parser.add_argument('--list', action='store_true', - help='List host records from NIOS for use in Ansible') - - parser.add_argument('--host', - help='List meta data about single host (not used)') - - return parser.parse_args() - - -def main(): - args = parse_args() - - for config_file in CONFIG_FILES: - if os.path.exists(config_file): - break - else: - sys.stderr.write('unable to locate config file at /etc/ansible/infoblox.yaml\n') - sys.exit(-1) - - try: - loader = DataLoader() - config = loader.load_from_file(config_file) - provider = config.get('provider') or {} - wapi = WapiInventory(provider) - except Exception as exc: - sys.stderr.write(to_text(exc)) - sys.exit(-1) - - if args.host: - host_filter = {'name': args.host} - else: - host_filter = {} - - config_filters = config.get('filters') - - if config_filters.get('view') is not None: - host_filter['view'] = config_filters['view'] - - if config_filters.get('extattrs'): - extattrs = normalize_extattrs(config_filters['extattrs']) - else: - extattrs = {} - - hostvars = {} - inventory = { - '_meta': { - 'hostvars': hostvars - } - } - - return_fields = ['name', 'view', 'extattrs', 'ipv4addrs'] - - hosts = wapi.get_object('record:host', - host_filter, - extattrs=extattrs, - return_fields=return_fields) - - if hosts: - for item in hosts: - view = item['view'] - name = item['name'] - - if view not in inventory: - inventory[view] = {'hosts': []} - - inventory[view]['hosts'].append(name) - - hostvars[name] = { - 'view': view - } - - if item.get('extattrs'): - for key, value in iteritems(flatten_extattrs(item['extattrs'])): - if key.startswith('ansible_'): - hostvars[name][key] = value - else: - if 'extattrs' not in hostvars[name]: - hostvars[name]['extattrs'] = {} - hostvars[name]['extattrs'][key] = value - - sys.stdout.write(json.dumps(inventory, indent=4)) - sys.exit(0) - - -if __name__ == '__main__': - main() diff --git a/scripts/inventory/infoblox.yaml b/scripts/inventory/infoblox.yaml deleted file mode 100644 index c1be5324ac..0000000000 --- a/scripts/inventory/infoblox.yaml +++ /dev/null @@ -1,24 +0,0 @@ ---- -# This file provides the configuration information for the Infoblox dynamic -# inventory script that is used to dynamically pull host information from NIOS. -# This file should be copied to /etc/ansible/infoblox.yaml in order for the -# dynamic script to find it. - -# Sets the provider arguments for authenticating to the Infoblox server to -# retrieve inventory hosts. Provider arguments can also be set using -# environment variables. Supported environment variables all start with -# INFOBLOX_{{ name }}. For instance, to set the host provider value, the -# environment variable would be INFOBLOX_HOST. -provider: - host: - username: - password: - -# Filters allow the dynamic inventory script to restrict the set of hosts that -# are returned from the Infoblox server. -filters: - # restrict returned hosts by extensible attributes - extattrs: {} - - # restrict returned hosts to a specified DNS view - view: null diff --git a/scripts/inventory/jail.py b/scripts/inventory/jail.py deleted file mode 100755 index a28b923b10..0000000000 --- a/scripts/inventory/jail.py +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env python - -# (c) 2013, Michael Scherer -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from subprocess import Popen, PIPE -import sys -import json - -result = {} -result['all'] = {} - -pipe = Popen(['jls', '-q', 'name'], stdout=PIPE, universal_newlines=True) -result['all']['hosts'] = [x[:-1] for x in pipe.stdout.readlines()] -result['all']['vars'] = {} -result['all']['vars']['ansible_connection'] = 'jail' - -if len(sys.argv) == 2 and sys.argv[1] == '--list': - print(json.dumps(result)) -elif len(sys.argv) == 3 and sys.argv[1] == '--host': - print(json.dumps({'ansible_connection': 'jail'})) -else: - sys.stderr.write("Need an argument, either --list or --host \n") diff --git a/scripts/inventory/landscape.py b/scripts/inventory/landscape.py deleted file mode 100755 index 8301e00b71..0000000000 --- a/scripts/inventory/landscape.py +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/env python - -# (c) 2015, Marc Abramowitz -# -# This file is part of Ansible. -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -# Dynamic inventory script which lets you use nodes discovered by Canonical's -# Landscape (http://www.ubuntu.com/management/landscape-features). -# -# Requires the `landscape_api` Python module -# See: -# - https://landscape.canonical.com/static/doc/api/api-client-package.html -# - https://landscape.canonical.com/static/doc/api/python-api.html -# -# Environment variables -# --------------------- -# - `LANDSCAPE_API_URI` -# - `LANDSCAPE_API_KEY` -# - `LANDSCAPE_API_SECRET` -# - `LANDSCAPE_API_SSL_CA_FILE` (optional) - - -import argparse -import collections -import os -import sys - -from landscape_api.base import API, HTTPError - -import json - -_key = 'landscape' - - -class EnvironmentConfig(object): - uri = os.getenv('LANDSCAPE_API_URI') - access_key = os.getenv('LANDSCAPE_API_KEY') - secret_key = os.getenv('LANDSCAPE_API_SECRET') - ssl_ca_file = os.getenv('LANDSCAPE_API_SSL_CA_FILE') - - -def _landscape_client(): - env = EnvironmentConfig() - return API( - uri=env.uri, - access_key=env.access_key, - secret_key=env.secret_key, - ssl_ca_file=env.ssl_ca_file) - - -def get_landscape_members_data(): - return _landscape_client().get_computers() - - -def get_nodes(data): - return [node['hostname'] for node in data] - - -def get_groups(data): - groups = collections.defaultdict(list) - - for node in data: - for value in node['tags']: - groups[value].append(node['hostname']) - - return groups - - -def get_meta(data): - meta = {'hostvars': {}} - for node in data: - meta['hostvars'][node['hostname']] = {'tags': node['tags']} - return meta - - -def print_list(): - data = get_landscape_members_data() - nodes = get_nodes(data) - groups = get_groups(data) - meta = get_meta(data) - inventory_data = {_key: nodes, '_meta': meta} - inventory_data.update(groups) - print(json.dumps(inventory_data)) - - -def print_host(host): - data = get_landscape_members_data() - meta = get_meta(data) - print(json.dumps(meta['hostvars'][host])) - - -def get_args(args_list): - parser = argparse.ArgumentParser( - description='ansible inventory script reading from landscape cluster') - mutex_group = parser.add_mutually_exclusive_group(required=True) - help_list = 'list all hosts from landscape cluster' - mutex_group.add_argument('--list', action='store_true', help=help_list) - help_host = 'display variables for a host' - mutex_group.add_argument('--host', help=help_host) - return parser.parse_args(args_list) - - -def main(args_list): - args = get_args(args_list) - if args.list: - print_list() - if args.host: - print_host(args.host) - - -if __name__ == '__main__': - main(sys.argv[1:]) diff --git a/scripts/inventory/libcloud.ini b/scripts/inventory/libcloud.ini deleted file mode 100644 index 7592c41cd0..0000000000 --- a/scripts/inventory/libcloud.ini +++ /dev/null @@ -1,15 +0,0 @@ -# Ansible Apache Libcloud Generic inventory script - -[driver] -provider = CLOUDSTACK -host = -path = -secure = True -verify_ssl_cert = True - -key = -secret = - -[cache] -cache_path=/path/to/your/cache -cache_max_age=60 diff --git a/scripts/inventory/linode.ini b/scripts/inventory/linode.ini deleted file mode 100644 index c925d970e9..0000000000 --- a/scripts/inventory/linode.ini +++ /dev/null @@ -1,18 +0,0 @@ -# Ansible Linode external inventory script settings -# - -[linode] - -# API calls to Linode are slow. For this reason, we cache the results of an API -# call. Set this to the path you want cache files to be written to. Two files -# will be written to this directory: -# - ansible-Linode.cache -# - ansible-Linode.index -cache_path = /tmp - -# The number of seconds a cache file is considered valid. After this many -# seconds, a new API call will be made, and the cache file will be updated. -cache_max_age = 300 - -# If set to true use the hosts public ip in the dictionary instead of the label -use_public_ip = false \ No newline at end of file diff --git a/scripts/inventory/linode.py b/scripts/inventory/linode.py deleted file mode 100755 index 2972725d88..0000000000 --- a/scripts/inventory/linode.py +++ /dev/null @@ -1,338 +0,0 @@ -#!/usr/bin/env python - -''' -Linode external inventory script -================================= - -Generates inventory that Ansible can understand by making API request to -Linode using the Chube library. - -NOTE: This script assumes Ansible is being executed where Chube is already -installed and has a valid config at ~/.chube. If not, run: - - pip install chube - echo -e "---\napi_key: " > ~/.chube - -For more details, see: https://github.com/exosite/chube - -NOTE: By default, this script also assumes that the Linodes in your account all have -labels that correspond to hostnames that are in your resolver search path. -Your resolver search path resides in /etc/hosts. -Optionally, if you would like to use the hosts public IP instead of it's label use -the following setting in linode.ini: - - use_public_ip = true - -When run against a specific host, this script returns the following variables: - - - api_id - - datacenter_id - - datacenter_city (lowercase city name of data center, e.g. 'tokyo') - - label - - display_group - - create_dt - - total_hd - - total_xfer - - total_ram - - status - - public_ip (The first public IP found) - - private_ip (The first private IP found, or empty string if none) - - alert_cpu_enabled - - alert_cpu_threshold - - alert_diskio_enabled - - alert_diskio_threshold - - alert_bwin_enabled - - alert_bwin_threshold - - alert_bwout_enabled - - alert_bwout_threshold - - alert_bwquota_enabled - - alert_bwquota_threshold - - backup_weekly_daily - - backup_window - - watchdog - -Peter Sankauskas did most of the legwork here with his linode plugin; I -just adapted that for Linode. -''' - -# (c) 2013, Dan Slimmon -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -###################################################################### - -# Standard imports -import os -import re -import sys -import argparse -from time import time - -import json - -try: - from chube import load_chube_config - from chube import api as chube_api - from chube.datacenter import Datacenter - from chube.linode_obj import Linode -except Exception: - try: - # remove local paths and other stuff that may - # cause an import conflict, as chube is sensitive - # to name collisions on importing - old_path = sys.path - sys.path = [d for d in sys.path if d not in ('', os.getcwd(), os.path.dirname(os.path.realpath(__file__)))] - - from chube import load_chube_config - from chube import api as chube_api - from chube.datacenter import Datacenter - from chube.linode_obj import Linode - - sys.path = old_path - except Exception as e: - raise Exception("could not import chube") - -load_chube_config() - -# Imports for ansible -from ansible.module_utils.six.moves import configparser as ConfigParser - - -class LinodeInventory(object): - def _empty_inventory(self): - return {"_meta": {"hostvars": {}}} - - def __init__(self): - """Main execution path.""" - # Inventory grouped by display group - self.inventory = self._empty_inventory() - # Index of label to Linode ID - self.index = {} - # Local cache of Datacenter objects populated by populate_datacenter_cache() - self._datacenter_cache = None - - # Read settings and parse CLI arguments - self.read_settings() - self.parse_cli_args() - - # Cache - if self.args.refresh_cache: - self.do_api_calls_update_cache() - elif not self.is_cache_valid(): - self.do_api_calls_update_cache() - - # Data to print - if self.args.host: - data_to_print = self.get_host_info() - elif self.args.list: - # Display list of nodes for inventory - if len(self.inventory) == 1: - data_to_print = self.get_inventory_from_cache() - else: - data_to_print = self.json_format_dict(self.inventory, True) - - print(data_to_print) - - def is_cache_valid(self): - """Determines if the cache file has expired, or if it is still valid.""" - if os.path.isfile(self.cache_path_cache): - mod_time = os.path.getmtime(self.cache_path_cache) - current_time = time() - if (mod_time + self.cache_max_age) > current_time: - if os.path.isfile(self.cache_path_index): - return True - return False - - def read_settings(self): - """Reads the settings from the .ini file.""" - config = ConfigParser.SafeConfigParser() - config.read(os.path.dirname(os.path.realpath(__file__)) + '/linode.ini') - - # Cache related - cache_path = config.get('linode', 'cache_path') - self.cache_path_cache = cache_path + "/ansible-linode.cache" - self.cache_path_index = cache_path + "/ansible-linode.index" - self.cache_max_age = config.getint('linode', 'cache_max_age') - self.use_public_ip = config.getboolean('linode', 'use_public_ip') - - def parse_cli_args(self): - """Command line argument processing""" - parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Linode') - parser.add_argument('--list', action='store_true', default=True, - help='List nodes (default: True)') - parser.add_argument('--host', action='store', - help='Get all the variables about a specific node') - parser.add_argument('--refresh-cache', action='store_true', default=False, - help='Force refresh of cache by making API requests to Linode (default: False - use cache files)') - self.args = parser.parse_args() - - def do_api_calls_update_cache(self): - """Do API calls, and save data in cache files.""" - self.get_nodes() - self.write_to_cache(self.inventory, self.cache_path_cache) - self.write_to_cache(self.index, self.cache_path_index) - - def get_nodes(self): - """Makes an Linode API call to get the list of nodes.""" - try: - for node in Linode.search(status=Linode.STATUS_RUNNING): - self.add_node(node) - except chube_api.linode_api.ApiError as e: - sys.exit("Looks like Linode's API is down:\n %s" % e) - - def get_node(self, linode_id): - """Gets details about a specific node.""" - try: - return Linode.find(api_id=linode_id) - except chube_api.linode_api.ApiError as e: - sys.exit("Looks like Linode's API is down:\n%s" % e) - - def populate_datacenter_cache(self): - """Creates self._datacenter_cache, containing all Datacenters indexed by ID.""" - self._datacenter_cache = {} - dcs = Datacenter.search() - for dc in dcs: - self._datacenter_cache[dc.api_id] = dc - - def get_datacenter_city(self, node): - """Returns a the lowercase city name of the node's data center.""" - if self._datacenter_cache is None: - self.populate_datacenter_cache() - location = self._datacenter_cache[node.datacenter_id].location - location = location.lower() - location = location.split(",")[0] - return location - - def add_node(self, node): - """Adds an node to the inventory and index.""" - if self.use_public_ip: - dest = self.get_node_public_ip(node) - else: - dest = node.label - - # Add to index - self.index[dest] = node.api_id - - # Inventory: Group by node ID (always a group of 1) - self.inventory[node.api_id] = [dest] - - # Inventory: Group by datacenter city - self.push(self.inventory, self.get_datacenter_city(node), dest) - - # Inventory: Group by display group - self.push(self.inventory, node.display_group, dest) - - # Inventory: Add a "linode" global tag group - self.push(self.inventory, "linode", dest) - - # Add host info to hostvars - self.inventory["_meta"]["hostvars"][dest] = self._get_host_info(node) - - def get_node_public_ip(self, node): - """Returns a the public IP address of the node""" - return [addr.address for addr in node.ipaddresses if addr.is_public][0] - - def get_host_info(self): - """Get variables about a specific host.""" - - if len(self.index) == 0: - # Need to load index from cache - self.load_index_from_cache() - - if self.args.host not in self.index: - # try updating the cache - self.do_api_calls_update_cache() - if self.args.host not in self.index: - # host might not exist anymore - return self.json_format_dict({}, True) - - node_id = self.index[self.args.host] - node = self.get_node(node_id) - - return self.json_format_dict(self._get_host_info(node), True) - - def _get_host_info(self, node): - node_vars = {} - for direct_attr in [ - "api_id", - "datacenter_id", - "label", - "display_group", - "create_dt", - "total_hd", - "total_xfer", - "total_ram", - "status", - "alert_cpu_enabled", - "alert_cpu_threshold", - "alert_diskio_enabled", - "alert_diskio_threshold", - "alert_bwin_enabled", - "alert_bwin_threshold", - "alert_bwout_enabled", - "alert_bwout_threshold", - "alert_bwquota_enabled", - "alert_bwquota_threshold", - "backup_weekly_daily", - "backup_window", - "watchdog" - ]: - node_vars[direct_attr] = getattr(node, direct_attr) - - node_vars["datacenter_city"] = self.get_datacenter_city(node) - node_vars["public_ip"] = self.get_node_public_ip(node) - - # Set the SSH host information, so these inventory items can be used if - # their labels aren't FQDNs - node_vars['ansible_ssh_host'] = node_vars["public_ip"] - node_vars['ansible_host'] = node_vars["public_ip"] - - private_ips = [addr.address for addr in node.ipaddresses if not addr.is_public] - - if private_ips: - node_vars["private_ip"] = private_ips[0] - - return node_vars - - def push(self, my_dict, key, element): - """Pushed an element onto an array that may not have been defined in the dict.""" - if key in my_dict: - my_dict[key].append(element) - else: - my_dict[key] = [element] - - def get_inventory_from_cache(self): - """Reads the inventory from the cache file and returns it as a JSON object.""" - cache = open(self.cache_path_cache, 'r') - json_inventory = cache.read() - return json_inventory - - def load_index_from_cache(self): - """Reads the index from the cache file and sets self.index.""" - cache = open(self.cache_path_index, 'r') - json_index = cache.read() - self.index = json.loads(json_index) - - def write_to_cache(self, data, filename): - """Writes data in JSON format to a file.""" - json_data = self.json_format_dict(data, True) - cache = open(filename, 'w') - cache.write(json_data) - cache.close() - - def to_safe(self, word): - """Escapes any characters that would be invalid in an ansible group name.""" - return re.sub(r"[^A-Za-z0-9\-]", "_", word) - - def json_format_dict(self, data, pretty=False): - """Converts a dict to a JSON object and dumps it as a formatted string.""" - if pretty: - return json.dumps(data, sort_keys=True, indent=2) - else: - return json.dumps(data) - - -LinodeInventory() diff --git a/scripts/inventory/lxc_inventory.py b/scripts/inventory/lxc_inventory.py deleted file mode 100755 index 5a40b40837..0000000000 --- a/scripts/inventory/lxc_inventory.py +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/env python -# -# (c) 2015-16 Florian Haas, hastexo Professional Services GmbH -# -# Based in part on: -# libvirt_lxc.py, (c) 2013, Michael Scherer -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -""" -Ansible inventory script for LXC containers. Requires Python -bindings for LXC API. - -In LXC, containers can be grouped by setting the lxc.group option, -which may be found more than once in a container's -configuration. So, we enumerate all containers, fetch their list -of groups, and then build the dictionary in the way Ansible expects -it. -""" - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import sys -import lxc -import json - - -def build_dict(): - """Returns a dictionary keyed to the defined LXC groups. All - containers, including the ones not in any group, are included in the - "all" group.""" - # Enumerate all containers, and list the groups they are in. Also, - # implicitly add every container to the 'all' group. - containers = dict([(c, - ['all'] + - (lxc.Container(c).get_config_item('lxc.group') or [])) - for c in lxc.list_containers()]) - - # Extract the groups, flatten the list, and remove duplicates - groups = set(sum(containers.values(), [])) - - # Create a dictionary for each group (including the 'all' group - return dict([(g, {'hosts': [k for k, v in containers.items() if g in v], - 'vars': {'ansible_connection': 'lxc'}}) for g in groups]) - - -def main(argv): - """Returns a JSON dictionary as expected by Ansible""" - result = build_dict() - if len(argv) == 2 and argv[1] == '--list': - json.dump(result, sys.stdout) - elif len(argv) == 3 and argv[1] == '--host': - json.dump({'ansible_connection': 'lxc'}, sys.stdout) - else: - print("Need an argument, either --list or --host ", file=sys.stderr) - - -if __name__ == '__main__': - main(sys.argv) diff --git a/scripts/inventory/lxd.ini b/scripts/inventory/lxd.ini deleted file mode 100644 index 5398e7d021..0000000000 --- a/scripts/inventory/lxd.ini +++ /dev/null @@ -1,13 +0,0 @@ -# LXD external inventory script settings - -[lxd] - -# The default resource -#resource = local: - -# The group name to add the hosts to -#group = lxd - -# The connection type to return for these hosts - lxd hasn't been tested yet -#connection = lxd -connection = smart diff --git a/scripts/inventory/lxd.py b/scripts/inventory/lxd.py deleted file mode 100755 index 8e8794eb81..0000000000 --- a/scripts/inventory/lxd.py +++ /dev/null @@ -1,93 +0,0 @@ -#!/usr/bin/env python - -# (c) 2013, Michael Scherer -# (c) 2014, Hiroaki Nakamura -# (c) 2016, Andew Clarke -# -# This file is based on https://github.com/ansible/ansible/blob/devel/plugins/inventory/libvirt_lxc.py which is part of Ansible, -# and https://github.com/hnakamur/lxc-ansible-playbooks/blob/master/provisioning/inventory-lxc.py -# -# NOTE, this file has some obvious limitations, improvements welcome -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import os -from subprocess import Popen, PIPE -import distutils.spawn -import sys -import json - -from ansible.module_utils.six.moves import configparser - -# Set up defaults -resource = 'local:' -group = 'lxd' -connection = 'lxd' -hosts = {} -result = {} - -# Read the settings from the lxd.ini file -config = configparser.SafeConfigParser() -config.read(os.path.dirname(os.path.realpath(__file__)) + '/lxd.ini') -if config.has_option('lxd', 'resource'): - resource = config.get('lxd', 'resource') -if config.has_option('lxd', 'group'): - group = config.get('lxd', 'group') -if config.has_option('lxd', 'connection'): - connection = config.get('lxd', 'connection') - -# Ensure executable exists -if distutils.spawn.find_executable('lxc'): - - # Set up containers result and hosts array - result[group] = {} - result[group]['hosts'] = [] - - # Run the command and load json result - pipe = Popen(['lxc', 'list', resource, '--format', 'json'], stdout=PIPE, universal_newlines=True) - lxdjson = json.load(pipe.stdout) - - # Iterate the json lxd output - for item in lxdjson: - - # Check state and network - if 'state' in item and item['state'] is not None and 'network' in item['state']: - network = item['state']['network'] - - # Check for eth0 and addresses - if 'eth0' in network and 'addresses' in network['eth0']: - addresses = network['eth0']['addresses'] - - # Iterate addresses - for address in addresses: - - # Only return inet family addresses - if 'family' in address and address['family'] == 'inet': - if 'address' in address: - ip = address['address'] - name = item['name'] - - # Add the host to the results and the host array - result[group]['hosts'].append(name) - hosts[name] = ip - - # Set the other containers result values - result[group]['vars'] = {} - result[group]['vars']['ansible_connection'] = connection - -# Process arguments -if len(sys.argv) == 2 and sys.argv[1] == '--list': - print(json.dumps(result)) -elif len(sys.argv) == 3 and sys.argv[1] == '--host': - if sys.argv[2] == 'localhost': - print(json.dumps({'ansible_connection': 'local'})) - else: - if connection == 'lxd': - print(json.dumps({'ansible_connection': connection})) - else: - print(json.dumps({'ansible_connection': connection, 'ansible_host': hosts[sys.argv[2]]})) -else: - print("Need an argument, either --list or --host ") diff --git a/scripts/inventory/mdt.ini b/scripts/inventory/mdt.ini deleted file mode 100644 index c401c0ce17..0000000000 --- a/scripts/inventory/mdt.ini +++ /dev/null @@ -1,17 +0,0 @@ -[mdt] - -# Set the MDT server to connect to -server = localhost.example.com - -# Set the MDT Instance -instance = EXAMPLEINSTANCE - -# Set the MDT database -database = MDTDB - -# Configure login credentials -user = local.domain\admin -password = adminpassword - -[tower] -groupname = mdt diff --git a/scripts/inventory/mdt_dynamic_inventory.py b/scripts/inventory/mdt_dynamic_inventory.py deleted file mode 100755 index 6409e37fe1..0000000000 --- a/scripts/inventory/mdt_dynamic_inventory.py +++ /dev/null @@ -1,122 +0,0 @@ -#!/usr/bin/env python - -# (c) 2016, Julian Barnett -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -''' -MDT external inventory script -================================= -author: J Barnett 06/23/2016 01:15 -maintainer: J Barnett (github @jbarnett1981) -''' - -import argparse -import json -import pymssql -from ansible.module_utils.six.moves import configparser - - -class MDTInventory(object): - - def __init__(self): - ''' Main execution path ''' - self.conn = None - - # Initialize empty inventory - self.inventory = self._empty_inventory() - - # Read CLI arguments - self.read_settings() - self.parse_cli_args() - - # Get Hosts - if self.args.list: - self.get_hosts() - - # Get specific host vars - if self.args.host: - self.get_hosts(self.args.host) - - def _connect(self, query): - ''' - Connect to MDT and dump contents of dbo.ComputerIdentity database - ''' - if not self.conn: - self.conn = pymssql.connect(server=self.mdt_server + "\\" + self.mdt_instance, user=self.mdt_user, password=self.mdt_password, - database=self.mdt_database) - cursor = self.conn.cursor() - cursor.execute(query) - self.mdt_dump = cursor.fetchall() - self.conn.close() - - def get_hosts(self, hostname=False): - ''' - Gets host from MDT Database - ''' - if hostname: - query = ("SELECT t1.ID, t1.Description, t1.MacAddress, t2.Role " - "FROM ComputerIdentity as t1 join Settings_Roles as t2 on t1.ID = t2.ID where t1.Description = '%s'" % hostname) - else: - query = 'SELECT t1.ID, t1.Description, t1.MacAddress, t2.Role FROM ComputerIdentity as t1 join Settings_Roles as t2 on t1.ID = t2.ID' - self._connect(query) - - # Configure to group name configured in Ansible Tower for this inventory - groupname = self.mdt_groupname - - # Initialize empty host list - hostlist = [] - - # Parse through db dump and populate inventory - for hosts in self.mdt_dump: - self.inventory['_meta']['hostvars'][hosts[1]] = {'id': hosts[0], 'name': hosts[1], 'mac': hosts[2], 'role': hosts[3]} - hostlist.append(hosts[1]) - self.inventory[groupname] = hostlist - - # Print it all out - print(json.dumps(self.inventory, indent=2)) - - def _empty_inventory(self): - ''' - Create empty inventory dictionary - ''' - return {"_meta": {"hostvars": {}}} - - def read_settings(self): - ''' - Reads the settings from the mdt.ini file - ''' - config = configparser.SafeConfigParser() - config.read('mdt.ini') - - # MDT Server and instance and database - self.mdt_server = config.get('mdt', 'server') - self.mdt_instance = config.get('mdt', 'instance') - self.mdt_database = config.get('mdt', 'database') - - # MDT Login credentials - if config.has_option('mdt', 'user'): - self.mdt_user = config.get('mdt', 'user') - if config.has_option('mdt', 'password'): - self.mdt_password = config.get('mdt', 'password') - - # Group name in Tower - if config.has_option('tower', 'groupname'): - self.mdt_groupname = config.get('tower', 'groupname') - - def parse_cli_args(self): - ''' - Command line argument processing - ''' - parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on MDT') - parser.add_argument('--list', action='store_true', default=False, help='List instances') - parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') - self.args = parser.parse_args() - - -if __name__ == "__main__": - # Run the script - MDTInventory() diff --git a/scripts/inventory/nagios_livestatus.ini b/scripts/inventory/nagios_livestatus.ini deleted file mode 100644 index 320f11f35c..0000000000 --- a/scripts/inventory/nagios_livestatus.ini +++ /dev/null @@ -1,41 +0,0 @@ -# Ansible Nagios external inventory script settings -# -# To get all available possibilities, check following URL: -# http://www.naemon.org/documentation/usersguide/livestatus.html -# https://mathias-kettner.de/checkmk_livestatus.html -# - -[local] -# Livestatus URI -# Example for default naemon livestatus unix socket : -# livestatus_uri=unix:/var/cache/naemon/live - -[remote] - -# default field name for host: name -# Uncomment to override: -# host_field=address -# -# default field group for host: groups -# Uncomment to override: -# group_field=state -# default fields retrieved: address, alias, display_name, children, parents -# To override, uncomment the following line -# fields_to_retrieve=address,alias,display_name -# -# default variable prefix: livestatus_ -# To override, uncomment the following line -# var_prefix=naemon_ -# -# default filter: None -# -# Uncomment to override -# -# All host with state = OK -# host_filter=state = 0 -# Warning: for the moment, you can use only one filter at a time. You cannot combine various conditions. -# -# All host in groups Linux -# host_filter=groups >= Linux -# -livestatus_uri=tcp:192.168.66.137:6557 diff --git a/scripts/inventory/nagios_livestatus.py b/scripts/inventory/nagios_livestatus.py deleted file mode 100755 index bdf9f673de..0000000000 --- a/scripts/inventory/nagios_livestatus.py +++ /dev/null @@ -1,163 +0,0 @@ -#!/usr/bin/env python - -# (c) 2015, Yannig Perre -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -''' -Nagios livestatus inventory script. Before using this script, please -update nagios_livestatus.ini file. - -Livestatus is a nagios/naemon/shinken module which let you retrieve -informations stored in the monitoring core. - -This plugin inventory need livestatus API for python. Please install it -before using this script (apt/pip/yum/...). - -Checkmk livestatus: https://mathias-kettner.de/checkmk_livestatus.html -Livestatus API: http://www.naemon.org/documentation/usersguide/livestatus.html -''' - -import os -import re -import argparse -import sys - -from ansible.module_utils.six.moves import configparser -import json - -try: - from mk_livestatus import Socket -except ImportError: - sys.exit("Error: mk_livestatus is needed. Try something like: pip install python-mk-livestatus") - - -class NagiosLivestatusInventory(object): - - def parse_ini_file(self): - config = configparser.SafeConfigParser() - config.read(os.path.dirname(os.path.realpath(__file__)) + '/nagios_livestatus.ini') - for section in config.sections(): - if not config.has_option(section, 'livestatus_uri'): - continue - - # If fields_to_retrieve is not set, using default fields - fields_to_retrieve = self.default_fields_to_retrieve - if config.has_option(section, 'fields_to_retrieve'): - fields_to_retrieve = [field.strip() for field in config.get(section, 'fields_to_retrieve').split(',')] - fields_to_retrieve = tuple(fields_to_retrieve) - - # default section values - section_values = { - 'var_prefix': 'livestatus_', - 'host_filter': None, - 'host_field': 'name', - 'group_field': 'groups' - } - for key, value in section_values.items(): - if config.has_option(section, key): - section_values[key] = config.get(section, key).strip() - - # Retrieving livestatus string connection - livestatus_uri = config.get(section, 'livestatus_uri') - backend_definition = None - - # Local unix socket - unix_match = re.match('unix:(.*)', livestatus_uri) - if unix_match is not None: - backend_definition = {'connection': unix_match.group(1)} - - # Remote tcp connection - tcp_match = re.match('tcp:(.*):([^:]*)', livestatus_uri) - if tcp_match is not None: - backend_definition = {'connection': (tcp_match.group(1), int(tcp_match.group(2)))} - - # No valid livestatus_uri => exiting - if backend_definition is None: - raise Exception('livestatus_uri field is invalid (%s). Expected: unix:/path/to/live or tcp:host:port' % livestatus_uri) - - # Updating backend_definition with current value - backend_definition['name'] = section - backend_definition['fields'] = fields_to_retrieve - for key, value in section_values.items(): - backend_definition[key] = value - - self.backends.append(backend_definition) - - def parse_options(self): - parser = argparse.ArgumentParser() - parser.add_argument('--host', nargs=1) - parser.add_argument('--list', action='store_true') - parser.add_argument('--pretty', action='store_true') - self.options = parser.parse_args() - - def add_host(self, hostname, group): - if group not in self.result: - self.result[group] = {} - self.result[group]['hosts'] = [] - if hostname not in self.result[group]['hosts']: - self.result[group]['hosts'].append(hostname) - - def query_backend(self, backend, host=None): - '''Query a livestatus backend''' - hosts_request = Socket(backend['connection']).hosts.columns(backend['host_field'], backend['group_field']) - - if backend['host_filter'] is not None: - hosts_request = hosts_request.filter(backend['host_filter']) - - if host is not None: - hosts_request = hosts_request.filter('name = ' + host[0]) - - hosts_request._columns += backend['fields'] - - hosts = hosts_request.call() - for host in hosts: - hostname = host[backend['host_field']] - hostgroups = host[backend['group_field']] - if not isinstance(hostgroups, list): - hostgroups = [hostgroups] - self.add_host(hostname, 'all') - self.add_host(hostname, backend['name']) - for group in hostgroups: - self.add_host(hostname, group) - for field in backend['fields']: - var_name = backend['var_prefix'] + field - if hostname not in self.result['_meta']['hostvars']: - self.result['_meta']['hostvars'][hostname] = {} - self.result['_meta']['hostvars'][hostname][var_name] = host[field] - - def __init__(self): - - self.defaultgroup = 'group_all' - self.default_fields_to_retrieve = ('address', 'alias', 'display_name', 'childs', 'parents') - self.backends = [] - self.options = None - - self.parse_ini_file() - self.parse_options() - - self.result = {} - self.result['_meta'] = {} - self.result['_meta']['hostvars'] = {} - self.json_indent = None - if self.options.pretty: - self.json_indent = 2 - - if len(self.backends) == 0: - sys.exit("Error: Livestatus configuration is missing. See nagios_livestatus.ini.") - - for backend in self.backends: - self.query_backend(backend, self.options.host) - - if self.options.host: - print(json.dumps(self.result['_meta']['hostvars'][self.options.host[0]], indent=self.json_indent)) - elif self.options.list: - print(json.dumps(self.result, indent=self.json_indent)) - else: - sys.exit("usage: --list or --host HOSTNAME [--pretty]") - - -NagiosLivestatusInventory() diff --git a/scripts/inventory/nagios_ndo.ini b/scripts/inventory/nagios_ndo.ini deleted file mode 100644 index 1e133a29f3..0000000000 --- a/scripts/inventory/nagios_ndo.ini +++ /dev/null @@ -1,10 +0,0 @@ -# Ansible Nagios external inventory script settings -# - -[ndo] -# NDO database URI -# Make sure that data is returned as strings and not bytes if using python 3. -# See http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html -# for supported databases and URI format. -# Example for mysqlclient module : -database_uri=mysql+mysqldb://user:passwd@hostname/ndo?charset=utf8&use_unicode=1 diff --git a/scripts/inventory/nagios_ndo.py b/scripts/inventory/nagios_ndo.py deleted file mode 100755 index ffd5500f8e..0000000000 --- a/scripts/inventory/nagios_ndo.py +++ /dev/null @@ -1,95 +0,0 @@ -#!/usr/bin/env python - -# (c) 2014, Jonathan Lestrelin -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -""" -Nagios NDO external inventory script. -======================================== - -Returns hosts and hostgroups from Nagios NDO. - -Configuration is read from `nagios_ndo.ini`. -""" - -import os -import argparse -import sys -from ansible.module_utils.six.moves import configparser -import json - -try: - from sqlalchemy import text - from sqlalchemy.engine import create_engine -except ImportError: - sys.exit("Error: SQLAlchemy is needed. Try something like: pip install sqlalchemy") - - -class NagiosNDOInventory(object): - - def read_settings(self): - config = configparser.SafeConfigParser() - config.read(os.path.dirname(os.path.realpath(__file__)) + '/nagios_ndo.ini') - if config.has_option('ndo', 'database_uri'): - self.ndo_database_uri = config.get('ndo', 'database_uri') - - def read_cli(self): - parser = argparse.ArgumentParser() - parser.add_argument('--host', nargs=1) - parser.add_argument('--list', action='store_true') - self.options = parser.parse_args() - - def get_hosts(self): - engine = create_engine(self.ndo_database_uri) - connection = engine.connect() - select_hosts = text("SELECT display_name \ - FROM nagios_hosts") - select_hostgroups = text("SELECT alias \ - FROM nagios_hostgroups") - select_hostgroup_hosts = text("SELECT h.display_name \ - FROM nagios_hostgroup_members hgm, nagios_hosts h, nagios_hostgroups hg \ - WHERE hgm.hostgroup_id = hg.hostgroup_id \ - AND hgm.host_object_id = h.host_object_id \ - AND hg.alias =:hostgroup_alias") - - hosts = connection.execute(select_hosts) - self.result['all']['hosts'] = [host['display_name'] for host in hosts] - - for hostgroup in connection.execute(select_hostgroups): - hostgroup_alias = hostgroup['alias'] - self.result[hostgroup_alias] = {} - hosts = connection.execute(select_hostgroup_hosts, hostgroup_alias=hostgroup_alias) - self.result[hostgroup_alias]['hosts'] = [host['display_name'] for host in hosts] - - def __init__(self): - - self.defaultgroup = 'group_all' - self.ndo_database_uri = None - self.options = None - - self.read_settings() - self.read_cli() - - self.result = {} - self.result['all'] = {} - self.result['all']['hosts'] = [] - self.result['_meta'] = {} - self.result['_meta']['hostvars'] = {} - - if self.ndo_database_uri: - self.get_hosts() - if self.options.host: - print(json.dumps({})) - elif self.options.list: - print(json.dumps(self.result)) - else: - sys.exit("usage: --list or --host HOSTNAME") - else: - sys.exit("Error: Database configuration is missing. See nagios_ndo.ini.") - - -NagiosNDOInventory() diff --git a/scripts/inventory/nsot.py b/scripts/inventory/nsot.py deleted file mode 100755 index 10f24ad48b..0000000000 --- a/scripts/inventory/nsot.py +++ /dev/null @@ -1,346 +0,0 @@ -#!/usr/bin/env python - -''' -nsot -==== - -Ansible Dynamic Inventory to pull hosts from NSoT, a flexible CMDB by Dropbox - -Features --------- - -* Define host groups in form of NSoT device attribute criteria - -* All parameters defined by the spec as of 2015-09-05 are supported. - - + ``--list``: Returns JSON hash of host groups -> hosts and top-level - ``_meta`` -> ``hostvars`` which correspond to all device attributes. - - Group vars can be specified in the YAML configuration, noted below. - - + ``--host ``: Returns JSON hash where every item is a device - attribute. - -* In addition to all attributes assigned to resource being returned, script - will also append ``site_id`` and ``id`` as facts to utilize. - - -Configuration ------------- - -Since it'd be annoying and failure prone to guess where you're configuration -file is, use ``NSOT_INVENTORY_CONFIG`` to specify the path to it. - -This file should adhere to the YAML spec. All top-level variable must be -desired Ansible group-name hashed with single 'query' item to define the NSoT -attribute query. - -Queries follow the normal NSoT query syntax, `shown here`_ - -.. _shown here: https://github.com/dropbox/pynsot#set-queries - -.. code:: yaml - - routers: - query: 'deviceType=ROUTER' - vars: - a: b - c: d - - juniper_fw: - query: 'deviceType=FIREWALL manufacturer=JUNIPER' - - not_f10: - query: '-manufacturer=FORCE10' - -The inventory will automatically use your ``.pynsotrc`` like normal pynsot from -cli would, so make sure that's configured appropriately. - -.. note:: - - Attributes I'm showing above are influenced from ones that the Trigger - project likes. As is the spirit of NSoT, use whichever attributes work best - for your workflow. - -If config file is blank or absent, the following default groups will be -created: - -* ``routers``: deviceType=ROUTER -* ``switches``: deviceType=SWITCH -* ``firewalls``: deviceType=FIREWALL - -These are likely not useful for everyone so please use the configuration. :) - -.. note:: - - By default, resources will only be returned for what your default - site is set for in your ``~/.pynsotrc``. - - If you want to specify, add an extra key under the group for ``site: n``. - -Output Examples ---------------- - -Here are some examples shown from just calling the command directly:: - - $ NSOT_INVENTORY_CONFIG=$PWD/test.yaml ansible_nsot --list | jq '.' - { - "routers": { - "hosts": [ - "test1.example.com" - ], - "vars": { - "cool_level": "very", - "group": "routers" - } - }, - "firewalls": { - "hosts": [ - "test2.example.com" - ], - "vars": { - "cool_level": "enough", - "group": "firewalls" - } - }, - "_meta": { - "hostvars": { - "test2.example.com": { - "make": "SRX", - "site_id": 1, - "id": 108 - }, - "test1.example.com": { - "make": "MX80", - "site_id": 1, - "id": 107 - } - } - }, - "rtr_and_fw": { - "hosts": [ - "test1.example.com", - "test2.example.com" - ], - "vars": {} - } - } - - - $ NSOT_INVENTORY_CONFIG=$PWD/test.yaml ansible_nsot --host test1 | jq '.' - { - "make": "MX80", - "site_id": 1, - "id": 107 - } - -''' - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import sys -import os -import pkg_resources -import argparse -import json -import yaml -from textwrap import dedent -from pynsot.client import get_api_client -from pynsot.app import HttpServerError -from click.exceptions import UsageError - -from ansible.module_utils.six import string_types - - -def warning(*objs): - print("WARNING: ", *objs, file=sys.stderr) - - -class NSoTInventory(object): - '''NSoT Client object for gather inventory''' - - def __init__(self): - self.config = dict() - config_env = os.environ.get('NSOT_INVENTORY_CONFIG') - if config_env: - try: - config_file = os.path.abspath(config_env) - except IOError: # If file non-existent, use default config - self._config_default() - except Exception as e: - sys.exit('%s\n' % e) - - with open(config_file) as f: - try: - self.config.update(yaml.safe_load(f)) - except TypeError: # If empty file, use default config - warning('Empty config file') - self._config_default() - except Exception as e: - sys.exit('%s\n' % e) - else: # Use defaults if env var missing - self._config_default() - self.groups = list(self.config.keys()) - self.client = get_api_client() - self._meta = {'hostvars': dict()} - - def _config_default(self): - default_yaml = ''' - --- - routers: - query: deviceType=ROUTER - switches: - query: deviceType=SWITCH - firewalls: - query: deviceType=FIREWALL - ''' - self.config = yaml.safe_load(dedent(default_yaml)) - - def do_list(self): - '''Direct callback for when ``--list`` is provided - - Relies on the configuration generated from init to run - _inventory_group() - ''' - inventory = dict() - for group, contents in self.config.items(): - group_response = self._inventory_group(group, contents) - inventory.update(group_response) - inventory.update({'_meta': self._meta}) - return json.dumps(inventory) - - def do_host(self, host): - return json.dumps(self._hostvars(host)) - - def _hostvars(self, host): - '''Return dictionary of all device attributes - - Depending on number of devices in NSoT, could be rather slow since this - has to request every device resource to filter through - ''' - device = [i for i in self.client.devices.get() - if host in i['hostname']][0] - attributes = device['attributes'] - attributes.update({'site_id': device['site_id'], 'id': device['id']}) - return attributes - - def _inventory_group(self, group, contents): - '''Takes a group and returns inventory for it as dict - - :param group: Group name - :type group: str - :param contents: The contents of the group's YAML config - :type contents: dict - - contents param should look like:: - - { - 'query': 'xx', - 'vars': - 'a': 'b' - } - - Will return something like:: - - { group: { - hosts: [], - vars: {}, - } - ''' - query = contents.get('query') - hostvars = contents.get('vars', dict()) - site = contents.get('site', dict()) - obj = {group: dict()} - obj[group]['hosts'] = [] - obj[group]['vars'] = hostvars - try: - assert isinstance(query, string_types) - except Exception: - sys.exit('ERR: Group queries must be a single string\n' - ' Group: %s\n' - ' Query: %s\n' % (group, query) - ) - try: - if site: - site = self.client.sites(site) - devices = site.devices.query.get(query=query) - else: - devices = self.client.devices.query.get(query=query) - except HttpServerError as e: - if '500' in str(e.response): - _site = 'Correct site id?' - _attr = 'Queried attributes actually exist?' - questions = _site + '\n' + _attr - sys.exit('ERR: 500 from server.\n%s' % questions) - else: - raise - except UsageError: - sys.exit('ERR: Could not connect to server. Running?') - - # Would do a list comprehension here, but would like to save code/time - # and also acquire attributes in this step - for host in devices: - # Iterate through each device that matches query, assign hostname - # to the group's hosts array and then use this single iteration as - # a chance to update self._meta which will be used in the final - # return - hostname = host['hostname'] - obj[group]['hosts'].append(hostname) - attributes = host['attributes'] - attributes.update({'site_id': host['site_id'], 'id': host['id']}) - self._meta['hostvars'].update({hostname: attributes}) - - return obj - - -def parse_args(): - desc = __doc__.splitlines()[4] # Just to avoid being redundant - - # Establish parser with options and error out if no action provided - parser = argparse.ArgumentParser( - description=desc, - conflict_handler='resolve', - ) - - # Arguments - # - # Currently accepting (--list | -l) and (--host | -h) - # These must not be allowed together - parser.add_argument( - '--list', '-l', - help='Print JSON object containing hosts to STDOUT', - action='store_true', - dest='list_', # Avoiding syntax highlighting for list - ) - - parser.add_argument( - '--host', '-h', - help='Print JSON object containing hostvars for ', - action='store', - ) - args = parser.parse_args() - - if not args.list_ and not args.host: # Require at least one option - parser.exit(status=1, message='No action requested') - - if args.list_ and args.host: # Do not allow multiple options - parser.exit(status=1, message='Too many actions requested') - - return args - - -def main(): - '''Set up argument handling and callback routing''' - args = parse_args() - client = NSoTInventory() - - # Callback condition - if args.list_: - print(client.do_list()) - elif args.host: - print(client.do_host(args.host)) - - -if __name__ == '__main__': - main() diff --git a/scripts/inventory/nsot.yaml b/scripts/inventory/nsot.yaml deleted file mode 100644 index ebddbc8234..0000000000 --- a/scripts/inventory/nsot.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- -juniper_routers: - query: 'deviceType=ROUTER manufacturer=JUNIPER' - vars: - group: juniper_routers - netconf: true - os: junos - -cisco_asa: - query: 'manufacturer=CISCO deviceType=FIREWALL' - vars: - group: cisco_asa - routed_vpn: false - stateful: true - -old_cisco_asa: - query: 'manufacturer=CISCO deviceType=FIREWALL -softwareVersion=8.3+' - vars: - old_nat: true - -not_f10: - query: '-manufacturer=FORCE10' diff --git a/scripts/inventory/openshift.py b/scripts/inventory/openshift.py deleted file mode 100755 index 85ea00cb1d..0000000000 --- a/scripts/inventory/openshift.py +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env python - -# (c) 2013, Michael Scherer -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -inventory: openshift -short_description: Openshift gears external inventory script -description: - - Generates inventory of Openshift gears using the REST interface - - this permit to reuse playbook to setup an Openshift gear -author: Michael Scherer -''' - -import json -import os -import os.path -import sys -import StringIO - -from ansible.module_utils.urls import open_url -from ansible.module_utils.six.moves import configparser as ConfigParser - -configparser = None - - -def get_from_rhc_config(variable): - global configparser - CONF_FILE = os.path.expanduser('~/.openshift/express.conf') - if os.path.exists(CONF_FILE): - if not configparser: - ini_str = '[root]\n' + open(CONF_FILE, 'r').read() - configparser = ConfigParser.SafeConfigParser() - configparser.readfp(StringIO.StringIO(ini_str)) - try: - return configparser.get('root', variable) - except ConfigParser.NoOptionError: - return None - - -def get_config(env_var, config_var): - result = os.getenv(env_var) - if not result: - result = get_from_rhc_config(config_var) - if not result: - sys.exit("failed=True msg='missing %s'" % env_var) - return result - - -def get_json_from_api(url, username, password): - headers = {'Accept': 'application/json; version=1.5'} - response = open_url(url, headers=headers, url_username=username, url_password=password) - return json.loads(response.read())['data'] - - -username = get_config('ANSIBLE_OPENSHIFT_USERNAME', 'default_rhlogin') -password = get_config('ANSIBLE_OPENSHIFT_PASSWORD', 'password') -broker_url = 'https://%s/broker/rest/' % get_config('ANSIBLE_OPENSHIFT_BROKER', 'libra_server') - - -response = get_json_from_api(broker_url + '/domains', username, password) - -response = get_json_from_api("%s/domains/%s/applications" % - (broker_url, response[0]['id']), username, password) - -result = {} -for app in response: - - # ssh://520311404832ce3e570000ff@blog-johndoe.example.org - (user, host) = app['ssh_url'][6:].split('@') - app_name = host.split('-')[0] - - result[app_name] = {} - result[app_name]['hosts'] = [] - result[app_name]['hosts'].append(host) - result[app_name]['vars'] = {} - result[app_name]['vars']['ansible_ssh_user'] = user - -if len(sys.argv) == 2 and sys.argv[1] == '--list': - print(json.dumps(result)) -elif len(sys.argv) == 3 and sys.argv[1] == '--host': - print(json.dumps({})) -else: - print("Need an argument, either --list or --host ") diff --git a/scripts/inventory/openvz.py b/scripts/inventory/openvz.py deleted file mode 100755 index 95eec83912..0000000000 --- a/scripts/inventory/openvz.py +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# openvz.py -# -# Copyright 2014 jordonr -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# -# Inspired by libvirt_lxc.py inventory script -# https://github.com/ansible/ansible/blob/e5ef0eca03cbb6c8950c06dc50d0ca22aa8902f4/plugins/inventory/libvirt_lxc.py -# -# Groups are determined by the description field of openvz guests -# multiple groups can be separated by commas: webserver,dbserver - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from subprocess import Popen, PIPE -import sys -import json - - -# List openvz hosts -vzhosts = ['vzhost1', 'vzhost2', 'vzhost3'] -# Add openvz hosts to the inventory and Add "_meta" trick -inventory = {'vzhosts': {'hosts': vzhosts}, '_meta': {'hostvars': {}}} -# default group, when description not defined -default_group = ['vzguest'] - - -def get_guests(): - # Loop through vzhosts - for h in vzhosts: - # SSH to vzhost and get the list of guests in json - pipe = Popen(['ssh', h, 'vzlist', '-j'], stdout=PIPE, universal_newlines=True) - - # Load Json info of guests - json_data = json.loads(pipe.stdout.read()) - - # loop through guests - for j in json_data: - # Add information to host vars - inventory['_meta']['hostvars'][j['hostname']] = { - 'ctid': j['ctid'], - 'veid': j['veid'], - 'vpsid': j['vpsid'], - 'private_path': j['private'], - 'root_path': j['root'], - 'ip': j['ip'] - } - - # determine group from guest description - if j['description'] is not None: - groups = j['description'].split(",") - else: - groups = default_group - - # add guest to inventory - for g in groups: - if g not in inventory: - inventory[g] = {'hosts': []} - - inventory[g]['hosts'].append(j['hostname']) - - return inventory - - -if len(sys.argv) == 2 and sys.argv[1] == '--list': - inv_json = get_guests() - print(json.dumps(inv_json, sort_keys=True)) -elif len(sys.argv) == 3 and sys.argv[1] == '--host': - print(json.dumps({})) -else: - print("Need an argument, either --list or --host ") diff --git a/scripts/inventory/ovirt.ini b/scripts/inventory/ovirt.ini deleted file mode 100644 index d9aaf8a73e..0000000000 --- a/scripts/inventory/ovirt.ini +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2013 Google Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - - -# Author: Josha Inglis based on the gce.ini by Eric Johnson - -[ovirt] -# For ovirt.py script, which can be used with Python SDK version 3 -# Service Account configuration information can be stored in the -# libcloud 'secrets.py' file. Ideally, the 'secrets.py' file will already -# exist in your PYTHONPATH and be picked up automatically with an import -# statement in the inventory script. However, you can specify an absolute -# path to the secrets.py file with 'libcloud_secrets' parameter. -ovirt_api_secrets = - -# If you are not going to use a 'secrets.py' file, you can set the necessary -# authorization parameters here. -ovirt_url = -ovirt_username = -ovirt_password = -ovirt_ca_file = diff --git a/scripts/inventory/ovirt.py b/scripts/inventory/ovirt.py deleted file mode 100755 index 04f7fc58ae..0000000000 --- a/scripts/inventory/ovirt.py +++ /dev/null @@ -1,279 +0,0 @@ -#!/usr/bin/env python -# Copyright 2015 IIX Inc. -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -""" -ovirt external inventory script -================================= - -Generates inventory that Ansible can understand by making API requests to -oVirt via the ovirt-engine-sdk-python library. - -When run against a specific host, this script returns the following variables -based on the data obtained from the ovirt_sdk Node object: - - ovirt_uuid - - ovirt_id - - ovirt_image - - ovirt_machine_type - - ovirt_ips - - ovirt_name - - ovirt_description - - ovirt_status - - ovirt_zone - - ovirt_tags - - ovirt_stats - -When run in --list mode, instances are grouped by the following categories: - - - zone: - zone group name. - - instance tags: - An entry is created for each tag. For example, if you have two instances - with a common tag called 'foo', they will both be grouped together under - the 'tag_foo' name. - - network name: - the name of the network is appended to 'network_' (e.g. the 'default' - network will result in a group named 'network_default') - - running status: - group name prefixed with 'status_' (e.g. status_up, status_down,..) - -Examples: - Execute uname on all instances in the us-central1-a zone - $ ansible -i ovirt.py us-central1-a -m shell -a "/bin/uname -a" - - Use the ovirt inventory script to print out instance specific information - $ contrib/inventory/ovirt.py --host my_instance - -Author: Josha Inglis based on the gce.py by Eric Johnson -Version: 0.0.1 -""" - -USER_AGENT_PRODUCT = "Ansible-ovirt_inventory_plugin" -USER_AGENT_VERSION = "v1" - -import sys -import os -import argparse -from collections import defaultdict -from ansible.module_utils.six.moves import configparser as ConfigParser - -import json - -try: - # noinspection PyUnresolvedReferences - from ovirtsdk.api import API - # noinspection PyUnresolvedReferences - from ovirtsdk.xml import params -except ImportError: - print("ovirt inventory script requires ovirt-engine-sdk-python") - sys.exit(1) - - -class OVirtInventory(object): - def __init__(self): - # Read settings and parse CLI arguments - self.args = self.parse_cli_args() - self.driver = self.get_ovirt_driver() - - # Just display data for specific host - if self.args.host: - print(self.json_format_dict( - self.node_to_dict(self.get_instance(self.args.host)), - pretty=self.args.pretty - )) - sys.exit(0) - - # Otherwise, assume user wants all instances grouped - print( - self.json_format_dict( - data=self.group_instances(), - pretty=self.args.pretty - ) - ) - sys.exit(0) - - @staticmethod - def get_ovirt_driver(): - """ - Determine the ovirt authorization settings and return a ovirt_sdk driver. - - :rtype : ovirtsdk.api.API - """ - kwargs = {} - - ovirt_ini_default_path = os.path.join( - os.path.dirname(os.path.realpath(__file__)), "ovirt.ini") - ovirt_ini_path = os.environ.get('OVIRT_INI_PATH', ovirt_ini_default_path) - - # Create a ConfigParser. - # This provides empty defaults to each key, so that environment - # variable configuration (as opposed to INI configuration) is able - # to work. - config = ConfigParser.SafeConfigParser(defaults={ - 'ovirt_url': '', - 'ovirt_username': '', - 'ovirt_password': '', - 'ovirt_api_secrets': '', - }) - if 'ovirt' not in config.sections(): - config.add_section('ovirt') - config.read(ovirt_ini_path) - - # Attempt to get ovirt params from a configuration file, if one - # exists. - secrets_path = config.get('ovirt', 'ovirt_api_secrets') - secrets_found = False - try: - # noinspection PyUnresolvedReferences,PyPackageRequirements - import secrets - - kwargs = getattr(secrets, 'OVIRT_KEYWORD_PARAMS', {}) - secrets_found = True - except ImportError: - pass - - if not secrets_found and secrets_path: - if not secrets_path.endswith('secrets.py'): - err = "Must specify ovirt_sdk secrets file as /absolute/path/to/secrets.py" - print(err) - sys.exit(1) - sys.path.append(os.path.dirname(secrets_path)) - try: - # noinspection PyUnresolvedReferences,PyPackageRequirements - import secrets - - kwargs = getattr(secrets, 'OVIRT_KEYWORD_PARAMS', {}) - except ImportError: - pass - if not secrets_found: - kwargs = { - 'url': config.get('ovirt', 'ovirt_url'), - 'username': config.get('ovirt', 'ovirt_username'), - 'password': config.get('ovirt', 'ovirt_password'), - } - - # If the appropriate environment variables are set, they override - # other configuration; process those into our args and kwargs. - kwargs['url'] = os.environ.get('OVIRT_URL', kwargs['url']) - kwargs['username'] = next(val for val in [os.environ.get('OVIRT_EMAIL'), os.environ.get('OVIRT_USERNAME'), kwargs['username']] if val is not None) - kwargs['password'] = next(val for val in [os.environ.get('OVIRT_PASS'), os.environ.get('OVIRT_PASSWORD'), kwargs['password']] if val is not None) - - # Retrieve and return the ovirt driver. - return API(insecure=True, **kwargs) - - @staticmethod - def parse_cli_args(): - """ - Command line argument processing - - :rtype : argparse.Namespace - """ - - parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on ovirt') - parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') - parser.add_argument('--host', action='store', help='Get all information about an instance') - parser.add_argument('--pretty', action='store_true', default=False, help='Pretty format (default: False)') - return parser.parse_args() - - def node_to_dict(self, inst): - """ - :type inst: params.VM - """ - if inst is None: - return {} - - inst.get_custom_properties() - ips = [ip.get_address() for ip in inst.get_guest_info().get_ips().get_ip()] \ - if inst.get_guest_info() is not None else [] - stats = {} - for stat in inst.get_statistics().list(): - stats[stat.get_name()] = stat.get_values().get_value()[0].get_datum() - - return { - 'ovirt_uuid': inst.get_id(), - 'ovirt_id': inst.get_id(), - 'ovirt_image': inst.get_os().get_type(), - 'ovirt_machine_type': self.get_machine_type(inst), - 'ovirt_ips': ips, - 'ovirt_name': inst.get_name(), - 'ovirt_description': inst.get_description(), - 'ovirt_status': inst.get_status().get_state(), - 'ovirt_zone': inst.get_cluster().get_id(), - 'ovirt_tags': self.get_tags(inst), - 'ovirt_stats': stats, - # Hosts don't have a public name, so we add an IP - 'ansible_ssh_host': ips[0] if len(ips) > 0 else None - } - - @staticmethod - def get_tags(inst): - """ - :type inst: params.VM - """ - return [x.get_name() for x in inst.get_tags().list()] - - def get_machine_type(self, inst): - inst_type = inst.get_instance_type() - if inst_type: - return self.driver.instancetypes.get(id=inst_type.id).name - - # noinspection PyBroadException,PyUnusedLocal - def get_instance(self, instance_name): - """Gets details about a specific instance """ - try: - return self.driver.vms.get(name=instance_name) - except Exception as e: - return None - - def group_instances(self): - """Group all instances""" - groups = defaultdict(list) - meta = {"hostvars": {}} - - for node in self.driver.vms.list(): - assert isinstance(node, params.VM) - name = node.get_name() - - meta["hostvars"][name] = self.node_to_dict(node) - - zone = node.get_cluster().get_name() - groups[zone].append(name) - - tags = self.get_tags(node) - for t in tags: - tag = 'tag_%s' % t - groups[tag].append(name) - - nets = [x.get_name() for x in node.get_nics().list()] - for net in nets: - net = 'network_%s' % net - groups[net].append(name) - - status = node.get_status().get_state() - stat = 'status_%s' % status.lower() - if stat in groups: - groups[stat].append(name) - else: - groups[stat] = [name] - - groups["_meta"] = meta - - return groups - - @staticmethod - def json_format_dict(data, pretty=False): - """ Converts a dict to a JSON object and dumps it as a formatted - string """ - - if pretty: - return json.dumps(data, sort_keys=True, indent=2) - else: - return json.dumps(data) - - -# Run the script -OVirtInventory() diff --git a/scripts/inventory/ovirt4.py b/scripts/inventory/ovirt4.py deleted file mode 100755 index 84b68a1258..0000000000 --- a/scripts/inventory/ovirt4.py +++ /dev/null @@ -1,258 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2016 Red Hat, Inc. -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -""" -oVirt dynamic inventory script -================================= - -Generates dynamic inventory file for oVirt. - -Script will return following attributes for each virtual machine: - - id - - name - - host - - cluster - - status - - description - - fqdn - - os_type - - template - - tags - - statistics - - devices - -When run in --list mode, virtual machines are grouped by the following categories: - - cluster - - tag - - status - - Note: If there is some virtual machine which has has more tags it will be in both tag - records. - -Examples: - # Execute update of system on webserver virtual machine: - - $ ansible -i contrib/inventory/ovirt4.py webserver -m yum -a "name=* state=latest" - - # Get webserver virtual machine information: - - $ contrib/inventory/ovirt4.py --host webserver - -Author: Ondra Machacek (@machacekondra) -""" - -import argparse -import os -import sys - -from collections import defaultdict - -from ansible.module_utils.six.moves import configparser -from ansible.module_utils.six import PY2 - -import json - -try: - import ovirtsdk4 as sdk - import ovirtsdk4.types as otypes -except ImportError: - print('oVirt inventory script requires ovirt-engine-sdk-python >= 4.0.0') - sys.exit(1) - - -def parse_args(): - """ - Create command line parser for oVirt dynamic inventory script. - """ - parser = argparse.ArgumentParser( - description='Ansible dynamic inventory script for oVirt.', - ) - parser.add_argument( - '--list', - action='store_true', - default=True, - help='Get data of all virtual machines (default: True).', - ) - parser.add_argument( - '--host', - help='Get data of virtual machines running on specified host.', - ) - parser.add_argument( - '--pretty', - action='store_true', - default=False, - help='Pretty format (default: False).', - ) - return parser.parse_args() - - -def create_connection(): - """ - Create a connection to oVirt engine API. - """ - # Get the path of the configuration file, by default use - # 'ovirt.ini' file in script directory: - default_path = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - 'ovirt.ini', - ) - config_path = os.environ.get('OVIRT_INI_PATH', default_path) - - # Create parser and add ovirt section if it doesn't exist: - if PY2: - config = configparser.SafeConfigParser( - defaults={ - 'ovirt_url': os.environ.get('OVIRT_URL'), - 'ovirt_username': os.environ.get('OVIRT_USERNAME'), - 'ovirt_password': os.environ.get('OVIRT_PASSWORD'), - 'ovirt_ca_file': os.environ.get('OVIRT_CAFILE', ''), - }, allow_no_value=True - ) - else: - config = configparser.ConfigParser( - defaults={ - 'ovirt_url': os.environ.get('OVIRT_URL'), - 'ovirt_username': os.environ.get('OVIRT_USERNAME'), - 'ovirt_password': os.environ.get('OVIRT_PASSWORD'), - 'ovirt_ca_file': os.environ.get('OVIRT_CAFILE', ''), - }, allow_no_value=True - ) - if not config.has_section('ovirt'): - config.add_section('ovirt') - config.read(config_path) - - # Create a connection with options defined in ini file: - return sdk.Connection( - url=config.get('ovirt', 'ovirt_url'), - username=config.get('ovirt', 'ovirt_username'), - password=config.get('ovirt', 'ovirt_password', raw=True), - ca_file=config.get('ovirt', 'ovirt_ca_file') or None, - insecure=not config.get('ovirt', 'ovirt_ca_file'), - ) - - -def get_dict_of_struct(connection, vm): - """ - Transform SDK Vm Struct type to Python dictionary. - """ - if vm is None: - return dict() - - vms_service = connection.system_service().vms_service() - clusters_service = connection.system_service().clusters_service() - vm_service = vms_service.vm_service(vm.id) - devices = vm_service.reported_devices_service().list() - tags = vm_service.tags_service().list() - stats = vm_service.statistics_service().list() - labels = vm_service.affinity_labels_service().list() - groups = clusters_service.cluster_service( - vm.cluster.id - ).affinity_groups_service().list() - - return { - 'id': vm.id, - 'name': vm.name, - 'host': connection.follow_link(vm.host).name if vm.host else None, - 'cluster': connection.follow_link(vm.cluster).name, - 'status': str(vm.status), - 'description': vm.description, - 'fqdn': vm.fqdn, - 'os_type': vm.os.type, - 'template': connection.follow_link(vm.template).name, - 'tags': [tag.name for tag in tags], - 'affinity_labels': [label.name for label in labels], - 'affinity_groups': [ - group.name for group in groups - if vm.name in [vm.name for vm in connection.follow_link(group.vms)] - ], - 'statistics': dict( - (stat.name, stat.values[0].datum) for stat in stats if stat.values - ), - 'devices': dict( - (device.name, [ip.address for ip in device.ips]) for device in devices if device.ips - ), - 'ansible_host': next((device.ips[0].address for device in devices if device.ips), None) - } - - -def get_data(connection, vm_name=None): - """ - Obtain data of `vm_name` if specified, otherwise obtain data of all vms. - """ - vms_service = connection.system_service().vms_service() - clusters_service = connection.system_service().clusters_service() - - if vm_name: - vm = vms_service.list(search='name=%s' % vm_name) or [None] - data = get_dict_of_struct( - connection=connection, - vm=vm[0], - ) - else: - vms = dict() - data = defaultdict(list) - for vm in vms_service.list(): - name = vm.name - vm_service = vms_service.vm_service(vm.id) - cluster_service = clusters_service.cluster_service(vm.cluster.id) - - # Add vm to vms dict: - vms[name] = get_dict_of_struct(connection, vm) - - # Add vm to cluster group: - cluster_name = connection.follow_link(vm.cluster).name - data['cluster_%s' % cluster_name].append(name) - - # Add vm to tag group: - tags_service = vm_service.tags_service() - for tag in tags_service.list(): - data['tag_%s' % tag.name].append(name) - - # Add vm to status group: - data['status_%s' % vm.status].append(name) - - # Add vm to affinity group: - for group in cluster_service.affinity_groups_service().list(): - if vm.name in [ - v.name for v in connection.follow_link(group.vms) - ]: - data['affinity_group_%s' % group.name].append(vm.name) - - # Add vm to affinity label group: - affinity_labels_service = vm_service.affinity_labels_service() - for label in affinity_labels_service.list(): - data['affinity_label_%s' % label.name].append(name) - - data["_meta"] = { - 'hostvars': vms, - } - - return data - - -def main(): - args = parse_args() - connection = create_connection() - - print( - json.dumps( - obj=get_data( - connection=connection, - vm_name=args.host, - ), - sort_keys=args.pretty, - indent=args.pretty * 2, - ) - ) - - -if __name__ == '__main__': - main() diff --git a/scripts/inventory/packet_net.ini b/scripts/inventory/packet_net.ini deleted file mode 100644 index 6dcc027b15..0000000000 --- a/scripts/inventory/packet_net.ini +++ /dev/null @@ -1,53 +0,0 @@ -# Ansible Packet.net external inventory script settings -# - -[packet] - -# Packet projects to get info for. Set this to 'all' to get info for all -# projects in Packet and merge the results together. Alternatively, set -# this to a comma separated list of projects. E.g. 'project-1,project-3,project-4' -projects = all -projects_exclude = - -# By default, packet devices in all state are returned. Specify -# packet device states to return as a comma-separated list. -# device_states = active, inactive, queued, provisioning - -# items per page to retrieve from packet api at a time -items_per_page = 999 - -# API calls to Packet are costly. For this reason, we cache the results of an API -# call. Set this to the path you want cache files to be written to. Two files -# will be written to this directory: -# - ansible-packet.cache -# - ansible-packet.index -cache_path = ~/.ansible/tmp - -# The number of seconds a cache file is considered valid. After this many -# seconds, a new API call will be made, and the cache file will be updated. -# To disable the cache, set this value to 0 -cache_max_age = 300 - -# Organize groups into a nested/hierarchy instead of a flat namespace. -nested_groups = False - -# Replace - tags when creating groups to avoid issues with ansible -replace_dash_in_groups = True - -# The packet inventory output can become very large. To manage its size, -# configure which groups should be created. -group_by_device_id = True -group_by_hostname = True -group_by_facility = True -group_by_project = True -group_by_operating_system = True -group_by_plan_type = True -group_by_tags = True -group_by_tag_none = True - -# If you only want to include hosts that match a certain regular expression -# pattern_include = staging-* - -# If you want to exclude any hosts that match a certain regular expression -# pattern_exclude = staging-* - diff --git a/scripts/inventory/packet_net.py b/scripts/inventory/packet_net.py deleted file mode 100755 index 196e26869d..0000000000 --- a/scripts/inventory/packet_net.py +++ /dev/null @@ -1,496 +0,0 @@ -#!/usr/bin/env python - -''' -Packet.net external inventory script -================================= - -Generates inventory that Ansible can understand by making API request to -Packet.net using the Packet library. - -NOTE: This script assumes Ansible is being executed where the environment -variable needed for Packet API Token already been set: - export PACKET_API_TOKEN=Bfse9F24SFtfs423Gsd3ifGsd43sSdfs - -This script also assumes there is a packet_net.ini file alongside it. To specify a -different path to packet_net.ini, define the PACKET_NET_INI_PATH environment variable: - - export PACKET_NET_INI_PATH=/path/to/my_packet_net.ini - -''' - -# (c) 2016, Peter Sankauskas -# (c) 2017, Tomas Karasek -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -###################################################################### - -import sys -import os -import argparse -import re -from time import time - -from ansible.module_utils import six -from ansible.module_utils.six.moves import configparser - -try: - import packet -except ImportError as e: - sys.exit("failed=True msg='`packet-python` library required for this script'") - -import traceback - - -import json - - -ini_section = 'packet' - - -class PacketInventory(object): - - def _empty_inventory(self): - return {"_meta": {"hostvars": {}}} - - def __init__(self): - ''' Main execution path ''' - - # Inventory grouped by device IDs, tags, security groups, regions, - # and availability zones - self.inventory = self._empty_inventory() - - # Index of hostname (address) to device ID - self.index = {} - - # Read settings and parse CLI arguments - self.parse_cli_args() - self.read_settings() - - # Cache - if self.args.refresh_cache: - self.do_api_calls_update_cache() - elif not self.is_cache_valid(): - self.do_api_calls_update_cache() - - # Data to print - if self.args.host: - data_to_print = self.get_host_info() - - elif self.args.list: - # Display list of devices for inventory - if self.inventory == self._empty_inventory(): - data_to_print = self.get_inventory_from_cache() - else: - data_to_print = self.json_format_dict(self.inventory, True) - - print(data_to_print) - - def is_cache_valid(self): - ''' Determines if the cache files have expired, or if it is still valid ''' - - if os.path.isfile(self.cache_path_cache): - mod_time = os.path.getmtime(self.cache_path_cache) - current_time = time() - if (mod_time + self.cache_max_age) > current_time: - if os.path.isfile(self.cache_path_index): - return True - - return False - - def read_settings(self): - ''' Reads the settings from the packet_net.ini file ''' - if six.PY3: - config = configparser.ConfigParser() - else: - config = configparser.SafeConfigParser() - - _ini_path_raw = os.environ.get('PACKET_NET_INI_PATH') - - if _ini_path_raw: - packet_ini_path = os.path.expanduser(os.path.expandvars(_ini_path_raw)) - else: - packet_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'packet_net.ini') - config.read(packet_ini_path) - - # items per page - self.items_per_page = 999 - if config.has_option(ini_section, 'items_per_page'): - config.get(ini_section, 'items_per_page') - - # Instance states to be gathered in inventory. Default is all of them. - packet_valid_device_states = [ - 'active', - 'inactive', - 'queued', - 'provisioning' - ] - self.packet_device_states = [] - if config.has_option(ini_section, 'device_states'): - for device_state in config.get(ini_section, 'device_states').split(','): - device_state = device_state.strip() - if device_state not in packet_valid_device_states: - continue - self.packet_device_states.append(device_state) - else: - self.packet_device_states = packet_valid_device_states - - # Cache related - cache_dir = os.path.expanduser(config.get(ini_section, 'cache_path')) - if not os.path.exists(cache_dir): - os.makedirs(cache_dir) - - self.cache_path_cache = cache_dir + "/ansible-packet.cache" - self.cache_path_index = cache_dir + "/ansible-packet.index" - self.cache_max_age = config.getint(ini_section, 'cache_max_age') - - # Configure nested groups instead of flat namespace. - if config.has_option(ini_section, 'nested_groups'): - self.nested_groups = config.getboolean(ini_section, 'nested_groups') - else: - self.nested_groups = False - - # Replace dash or not in group names - if config.has_option(ini_section, 'replace_dash_in_groups'): - self.replace_dash_in_groups = config.getboolean(ini_section, 'replace_dash_in_groups') - else: - self.replace_dash_in_groups = True - - # Configure which groups should be created. - group_by_options = [ - 'group_by_device_id', - 'group_by_hostname', - 'group_by_facility', - 'group_by_project', - 'group_by_operating_system', - 'group_by_plan_type', - 'group_by_tags', - 'group_by_tag_none', - ] - for option in group_by_options: - if config.has_option(ini_section, option): - setattr(self, option, config.getboolean(ini_section, option)) - else: - setattr(self, option, True) - - # Do we need to just include hosts that match a pattern? - try: - pattern_include = config.get(ini_section, 'pattern_include') - if pattern_include and len(pattern_include) > 0: - self.pattern_include = re.compile(pattern_include) - else: - self.pattern_include = None - except configparser.NoOptionError: - self.pattern_include = None - - # Do we need to exclude hosts that match a pattern? - try: - pattern_exclude = config.get(ini_section, 'pattern_exclude') - if pattern_exclude and len(pattern_exclude) > 0: - self.pattern_exclude = re.compile(pattern_exclude) - else: - self.pattern_exclude = None - except configparser.NoOptionError: - self.pattern_exclude = None - - # Projects - self.projects = [] - configProjects = config.get(ini_section, 'projects') - configProjects_exclude = config.get(ini_section, 'projects_exclude') - if (configProjects == 'all'): - for projectInfo in self.get_projects(): - if projectInfo.name not in configProjects_exclude: - self.projects.append(projectInfo.name) - else: - self.projects = configProjects.split(",") - - def parse_cli_args(self): - ''' Command line argument processing ''' - - parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Packet') - parser.add_argument('--list', action='store_true', default=True, - help='List Devices (default: True)') - parser.add_argument('--host', action='store', - help='Get all the variables about a specific device') - parser.add_argument('--refresh-cache', action='store_true', default=False, - help='Force refresh of cache by making API requests to Packet (default: False - use cache files)') - self.args = parser.parse_args() - - def do_api_calls_update_cache(self): - ''' Do API calls to each region, and save data in cache files ''' - - for projectInfo in self.get_projects(): - if projectInfo.name in self.projects: - self.get_devices_by_project(projectInfo) - - self.write_to_cache(self.inventory, self.cache_path_cache) - self.write_to_cache(self.index, self.cache_path_index) - - def connect(self): - ''' create connection to api server''' - token = os.environ.get('PACKET_API_TOKEN') - if token is None: - raise Exception("Error reading token from environment (PACKET_API_TOKEN)!") - manager = packet.Manager(auth_token=token) - return manager - - def get_projects(self): - '''Makes a Packet API call to get the list of projects''' - - params = { - 'per_page': self.items_per_page - } - - try: - manager = self.connect() - projects = manager.list_projects(params=params) - return projects - except Exception as e: - traceback.print_exc() - self.fail_with_error(e, 'getting Packet projects') - - def get_devices_by_project(self, project): - ''' Makes an Packet API call to the list of devices in a particular - project ''' - - params = { - 'per_page': self.items_per_page - } - - try: - manager = self.connect() - devices = manager.list_devices(project_id=project.id, params=params) - - for device in devices: - self.add_device(device, project) - - except Exception as e: - traceback.print_exc() - self.fail_with_error(e, 'getting Packet devices') - - def fail_with_error(self, err_msg, err_operation=None): - '''log an error to std err for ansible-playbook to consume and exit''' - if err_operation: - err_msg = 'ERROR: "{err_msg}", while: {err_operation}\n'.format( - err_msg=err_msg, err_operation=err_operation) - sys.stderr.write(err_msg) - sys.exit(1) - - def get_device(self, device_id): - manager = self.connect() - - device = manager.get_device(device_id) - return device - - def add_device(self, device, project): - ''' Adds a device to the inventory and index, as long as it is - addressable ''' - - # Only return devices with desired device states - if device.state not in self.packet_device_states: - return - - # Select the best destination address. Only include management - # addresses as non-management (elastic) addresses need manual - # host configuration to be routable. - # See https://help.packet.net/article/54-elastic-ips. - dest = None - for ip_address in device.ip_addresses: - if ip_address['public'] is True and \ - ip_address['address_family'] == 4 and \ - ip_address['management'] is True: - dest = ip_address['address'] - - if not dest: - # Skip devices we cannot address (e.g. private VPC subnet) - return - - # if we only want to include hosts that match a pattern, skip those that don't - if self.pattern_include and not self.pattern_include.match(device.hostname): - return - - # if we need to exclude hosts that match a pattern, skip those - if self.pattern_exclude and self.pattern_exclude.match(device.hostname): - return - - # Add to index - self.index[dest] = [project.id, device.id] - - # Inventory: Group by device ID (always a group of 1) - if self.group_by_device_id: - self.inventory[device.id] = [dest] - if self.nested_groups: - self.push_group(self.inventory, 'devices', device.id) - - # Inventory: Group by device name (hopefully a group of 1) - if self.group_by_hostname: - self.push(self.inventory, device.hostname, dest) - if self.nested_groups: - self.push_group(self.inventory, 'hostnames', project.name) - - # Inventory: Group by project - if self.group_by_project: - self.push(self.inventory, project.name, dest) - if self.nested_groups: - self.push_group(self.inventory, 'projects', project.name) - - # Inventory: Group by facility - if self.group_by_facility: - self.push(self.inventory, device.facility['code'], dest) - if self.nested_groups: - if self.group_by_facility: - self.push_group(self.inventory, project.name, device.facility['code']) - - # Inventory: Group by OS - if self.group_by_operating_system: - self.push(self.inventory, device.operating_system.slug, dest) - if self.nested_groups: - self.push_group(self.inventory, 'operating_systems', device.operating_system.slug) - - # Inventory: Group by plan type - if self.group_by_plan_type: - self.push(self.inventory, device.plan['slug'], dest) - if self.nested_groups: - self.push_group(self.inventory, 'plans', device.plan['slug']) - - # Inventory: Group by tag keys - if self.group_by_tags: - for k in device.tags: - key = self.to_safe("tag_" + k) - self.push(self.inventory, key, dest) - if self.nested_groups: - self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) - - # Global Tag: devices without tags - if self.group_by_tag_none and len(device.tags) == 0: - self.push(self.inventory, 'tag_none', dest) - if self.nested_groups: - self.push_group(self.inventory, 'tags', 'tag_none') - - # Global Tag: tag all Packet devices - self.push(self.inventory, 'packet', dest) - - self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_device(device) - - def get_host_info_dict_from_device(self, device): - device_vars = {} - for key in vars(device): - value = getattr(device, key) - key = self.to_safe('packet_' + key) - - # Handle complex types - if key == 'packet_state': - device_vars[key] = device.state or '' - elif key == 'packet_hostname': - device_vars[key] = value - elif isinstance(value, (int, bool)): - device_vars[key] = value - elif isinstance(value, six.string_types): - device_vars[key] = value.strip() - elif value is None: - device_vars[key] = '' - elif key == 'packet_facility': - device_vars[key] = value['code'] - elif key == 'packet_operating_system': - device_vars[key] = value.slug - elif key == 'packet_plan': - device_vars[key] = value['slug'] - elif key == 'packet_tags': - for k in value: - key = self.to_safe('packet_tag_' + k) - device_vars[key] = k - else: - pass - # print key - # print type(value) - # print value - - return device_vars - - def get_host_info(self): - ''' Get variables about a specific host ''' - - if len(self.index) == 0: - # Need to load index from cache - self.load_index_from_cache() - - if self.args.host not in self.index: - # try updating the cache - self.do_api_calls_update_cache() - if self.args.host not in self.index: - # host might not exist anymore - return self.json_format_dict({}, True) - - (project_id, device_id) = self.index[self.args.host] - - device = self.get_device(device_id) - return self.json_format_dict(self.get_host_info_dict_from_device(device), True) - - def push(self, my_dict, key, element): - ''' Push an element onto an array that may not have been defined in - the dict ''' - group_info = my_dict.setdefault(key, []) - if isinstance(group_info, dict): - host_list = group_info.setdefault('hosts', []) - host_list.append(element) - else: - group_info.append(element) - - def push_group(self, my_dict, key, element): - ''' Push a group as a child of another group. ''' - parent_group = my_dict.setdefault(key, {}) - if not isinstance(parent_group, dict): - parent_group = my_dict[key] = {'hosts': parent_group} - child_groups = parent_group.setdefault('children', []) - if element not in child_groups: - child_groups.append(element) - - def get_inventory_from_cache(self): - ''' Reads the inventory from the cache file and returns it as a JSON - object ''' - - cache = open(self.cache_path_cache, 'r') - json_inventory = cache.read() - return json_inventory - - def load_index_from_cache(self): - ''' Reads the index from the cache file sets self.index ''' - - cache = open(self.cache_path_index, 'r') - json_index = cache.read() - self.index = json.loads(json_index) - - def write_to_cache(self, data, filename): - ''' Writes data in JSON format to a file ''' - - json_data = self.json_format_dict(data, True) - cache = open(filename, 'w') - cache.write(json_data) - cache.close() - - def uncammelize(self, key): - temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key) - return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower() - - def to_safe(self, word): - ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' - regex = r"[^A-Za-z0-9\_" - if not self.replace_dash_in_groups: - regex += r"\-" - return re.sub(regex + "]", "_", word) - - def json_format_dict(self, data, pretty=False): - ''' Converts a dict to a JSON object and dumps it as a formatted - string ''' - - if pretty: - return json.dumps(data, sort_keys=True, indent=2) - else: - return json.dumps(data) - - -# Run the script -PacketInventory() diff --git a/scripts/inventory/proxmox.py b/scripts/inventory/proxmox.py deleted file mode 100755 index 2196934115..0000000000 --- a/scripts/inventory/proxmox.py +++ /dev/null @@ -1,240 +0,0 @@ -#!/usr/bin/env python - -# Copyright (C) 2014 Mathieu GAUTHIER-LAFAYE -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -# Updated 2016 by Matt Harris -# -# Added support for Proxmox VE 4.x -# Added support for using the Notes field of a VM to define groups and variables: -# A well-formatted JSON object in the Notes field will be added to the _meta -# section for that VM. In addition, the "groups" key of this JSON object may be -# used to specify group membership: -# -# { "groups": ["utility", "databases"], "a": false, "b": true } - -import json -import os -import sys -from optparse import OptionParser - -from ansible.module_utils.six import iteritems -from ansible.module_utils.six.moves.urllib.parse import urlencode - -from ansible.module_utils.urls import open_url - - -class ProxmoxNodeList(list): - def get_names(self): - return [node['node'] for node in self] - - -class ProxmoxVM(dict): - def get_variables(self): - variables = {} - for key, value in iteritems(self): - variables['proxmox_' + key] = value - return variables - - -class ProxmoxVMList(list): - def __init__(self, data=None): - data = [] if data is None else data - - for item in data: - self.append(ProxmoxVM(item)) - - def get_names(self): - return [vm['name'] for vm in self if vm['template'] != 1] - - def get_by_name(self, name): - results = [vm for vm in self if vm['name'] == name] - return results[0] if len(results) > 0 else None - - def get_variables(self): - variables = {} - for vm in self: - variables[vm['name']] = vm.get_variables() - - return variables - - -class ProxmoxPoolList(list): - def get_names(self): - return [pool['poolid'] for pool in self] - - -class ProxmoxPool(dict): - def get_members_name(self): - return [member['name'] for member in self['members'] if member['template'] != 1] - - -class ProxmoxAPI(object): - def __init__(self, options): - self.options = options - self.credentials = None - - if not options.url: - raise Exception('Missing mandatory parameter --url (or PROXMOX_URL).') - elif not options.username: - raise Exception('Missing mandatory parameter --username (or PROXMOX_USERNAME).') - elif not options.password: - raise Exception('Missing mandatory parameter --password (or PROXMOX_PASSWORD).') - - def auth(self): - request_path = '{0}api2/json/access/ticket'.format(self.options.url) - - request_params = urlencode({ - 'username': self.options.username, - 'password': self.options.password, - }) - - data = json.load(open_url(request_path, data=request_params)) - - self.credentials = { - 'ticket': data['data']['ticket'], - 'CSRFPreventionToken': data['data']['CSRFPreventionToken'], - } - - def get(self, url, data=None): - request_path = '{0}{1}'.format(self.options.url, url) - - headers = {'Cookie': 'PVEAuthCookie={0}'.format(self.credentials['ticket'])} - request = open_url(request_path, data=data, headers=headers) - - response = json.load(request) - return response['data'] - - def nodes(self): - return ProxmoxNodeList(self.get('api2/json/nodes')) - - def vms_by_type(self, node, type): - return ProxmoxVMList(self.get('api2/json/nodes/{0}/{1}'.format(node, type))) - - def vm_description_by_type(self, node, vm, type): - return self.get('api2/json/nodes/{0}/{1}/{2}/config'.format(node, type, vm)) - - def node_qemu(self, node): - return self.vms_by_type(node, 'qemu') - - def node_qemu_description(self, node, vm): - return self.vm_description_by_type(node, vm, 'qemu') - - def node_lxc(self, node): - return self.vms_by_type(node, 'lxc') - - def node_lxc_description(self, node, vm): - return self.vm_description_by_type(node, vm, 'lxc') - - def pools(self): - return ProxmoxPoolList(self.get('api2/json/pools')) - - def pool(self, poolid): - return ProxmoxPool(self.get('api2/json/pools/{0}'.format(poolid))) - - -def main_list(options): - results = { - 'all': { - 'hosts': [], - }, - '_meta': { - 'hostvars': {}, - } - } - - proxmox_api = ProxmoxAPI(options) - proxmox_api.auth() - - for node in proxmox_api.nodes().get_names(): - qemu_list = proxmox_api.node_qemu(node) - results['all']['hosts'] += qemu_list.get_names() - results['_meta']['hostvars'].update(qemu_list.get_variables()) - lxc_list = proxmox_api.node_lxc(node) - results['all']['hosts'] += lxc_list.get_names() - results['_meta']['hostvars'].update(lxc_list.get_variables()) - - for vm in results['_meta']['hostvars']: - vmid = results['_meta']['hostvars'][vm]['proxmox_vmid'] - try: - type = results['_meta']['hostvars'][vm]['proxmox_type'] - except KeyError: - type = 'qemu' - try: - description = proxmox_api.vm_description_by_type(node, vmid, type)['description'] - except KeyError: - description = None - - try: - metadata = json.loads(description) - except TypeError: - metadata = {} - except ValueError: - metadata = { - 'notes': description - } - - if 'groups' in metadata: - # print metadata - for group in metadata['groups']: - if group not in results: - results[group] = { - 'hosts': [] - } - results[group]['hosts'] += [vm] - - results['_meta']['hostvars'][vm].update(metadata) - - # pools - for pool in proxmox_api.pools().get_names(): - results[pool] = { - 'hosts': proxmox_api.pool(pool).get_members_name(), - } - - return results - - -def main_host(options): - proxmox_api = ProxmoxAPI(options) - proxmox_api.auth() - - for node in proxmox_api.nodes().get_names(): - qemu_list = proxmox_api.node_qemu(node) - qemu = qemu_list.get_by_name(options.host) - if qemu: - return qemu.get_variables() - - return {} - - -def main(): - parser = OptionParser(usage='%prog [options] --list | --host HOSTNAME') - parser.add_option('--list', action="store_true", default=False, dest="list") - parser.add_option('--host', dest="host") - parser.add_option('--url', default=os.environ.get('PROXMOX_URL'), dest='url') - parser.add_option('--username', default=os.environ.get('PROXMOX_USERNAME'), dest='username') - parser.add_option('--password', default=os.environ.get('PROXMOX_PASSWORD'), dest='password') - parser.add_option('--pretty', action="store_true", default=False, dest='pretty') - (options, args) = parser.parse_args() - - if options.list: - data = main_list(options) - elif options.host: - data = main_host(options) - else: - parser.print_help() - sys.exit(1) - - indent = None - if options.pretty: - indent = 2 - - print(json.dumps(data, indent=indent)) - - -if __name__ == '__main__': - main() diff --git a/scripts/inventory/rackhd.py b/scripts/inventory/rackhd.py deleted file mode 100755 index 9b4372f679..0000000000 --- a/scripts/inventory/rackhd.py +++ /dev/null @@ -1,86 +0,0 @@ -#!/usr/bin/env python -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import json -import os -import requests -import argparse - -RACKHD_URL = 'http://localhost:8080' - - -class RackhdInventory(object): - def __init__(self, nodeids): - self._inventory = {} - for nodeid in nodeids: - self._load_inventory_data(nodeid) - inventory = {} - for (nodeid, info) in self._inventory.items(): - inventory[nodeid] = (self._format_output(nodeid, info)) - print(json.dumps(inventory)) - - def _load_inventory_data(self, nodeid): - info = {} - info['ohai'] = RACKHD_URL + '/api/common/nodes/{0}/catalogs/ohai'.format(nodeid) - info['lookup'] = RACKHD_URL + '/api/common/lookups/?q={0}'.format(nodeid) - - results = {} - for (key, url) in info.items(): - r = requests.get(url, verify=False) - results[key] = r.text - self._inventory[nodeid] = results - - def _format_output(self, nodeid, info): - try: - node_info = json.loads(info['lookup']) - ipaddress = '' - if len(node_info) > 0: - ipaddress = node_info[0]['ipAddress'] - output = {'hosts': [ipaddress], 'vars': {}} - for (key, result) in info.items(): - output['vars'][key] = json.loads(result) - output['vars']['ansible_ssh_user'] = 'monorail' - except KeyError: - pass - return output - - -def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument('--host') - parser.add_argument('--list', action='store_true') - return parser.parse_args() - - -try: - # check if rackhd url(ie:10.1.1.45:8080) is specified in the environment - RACKHD_URL = 'http://' + str(os.environ['RACKHD_URL']) -except Exception: - # use default values - pass - -# Use the nodeid specified in the environment to limit the data returned -# or return data for all available nodes -nodeids = [] - -if (parse_args().host): - try: - nodeids += parse_args().host.split(',') - RackhdInventory(nodeids) - except Exception: - pass -if (parse_args().list): - try: - url = RACKHD_URL + '/api/common/nodes' - r = requests.get(url, verify=False) - data = json.loads(r.text) - for entry in data: - if entry['type'] == 'compute': - nodeids.append(entry['id']) - RackhdInventory(nodeids) - except Exception: - pass diff --git a/scripts/inventory/rax.ini b/scripts/inventory/rax.ini deleted file mode 100644 index 15948e7b2e..0000000000 --- a/scripts/inventory/rax.ini +++ /dev/null @@ -1,66 +0,0 @@ -# Ansible Rackspace external inventory script settings -# - -[rax] - -# Environment Variable: RAX_CREDS_FILE -# -# An optional configuration that points to a pyrax-compatible credentials -# file. -# -# If not supplied, rax.py will look for a credentials file -# at ~/.rackspace_cloud_credentials. It uses the Rackspace Python SDK, -# and therefore requires a file formatted per the SDK's specifications. -# -# https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md -# creds_file = ~/.rackspace_cloud_credentials - -# Environment Variable: RAX_REGION -# -# An optional environment variable to narrow inventory search -# scope. If used, needs a value like ORD, DFW, SYD (a Rackspace -# datacenter) and optionally accepts a comma-separated list. -# regions = IAD,ORD,DFW - -# Environment Variable: RAX_ENV -# -# A configuration that will use an environment as configured in -# ~/.pyrax.cfg, see -# https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md -# env = prod - -# Environment Variable: RAX_META_PREFIX -# Default: meta -# -# A configuration that changes the prefix used for meta key/value groups. -# For compatibility with ec2.py set to "tag" -# meta_prefix = meta - -# Environment Variable: RAX_ACCESS_NETWORK -# Default: public -# -# A configuration that will tell the inventory script to use a specific -# server network to determine the ansible_ssh_host value. If no address -# is found, ansible_ssh_host will not be set. Accepts a comma-separated -# list of network names, the first found wins. -# access_network = public - -# Environment Variable: RAX_ACCESS_IP_VERSION -# Default: 4 -# -# A configuration related to "access_network" that will attempt to -# determine the ansible_ssh_host value for either IPv4 or IPv6. If no -# address is found, ansible_ssh_host will not be set. -# Acceptable values are: 4 or 6. Values other than 4 or 6 -# will be ignored, and 4 will be used. Accepts a comma separated list, -# the first found wins. -# access_ip_version = 4 - -# Environment Variable: RAX_CACHE_MAX_AGE -# Default: 600 -# -# A configuration the changes the behavior or the inventory cache. -# Inventory listing performed before this value will be returned from -# the cache instead of making a full request for all inventory. Setting -# this value to 0 will force a full request. -# cache_max_age = 600 \ No newline at end of file diff --git a/scripts/inventory/rax.py b/scripts/inventory/rax.py deleted file mode 100755 index 0cac0f002c..0000000000 --- a/scripts/inventory/rax.py +++ /dev/null @@ -1,460 +0,0 @@ -#!/usr/bin/env python - -# (c) 2013, Jesse Keating , -# Matt Martz -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -""" -Rackspace Cloud Inventory - -Authors: - Jesse Keating , - Matt Martz - - -Description: - Generates inventory that Ansible can understand by making API request to - Rackspace Public Cloud API - - When run against a specific host, this script returns variables similar to: - rax_os-ext-sts_task_state - rax_addresses - rax_links - rax_image - rax_os-ext-sts_vm_state - rax_flavor - rax_id - rax_rax-bandwidth_bandwidth - rax_user_id - rax_os-dcf_diskconfig - rax_accessipv4 - rax_accessipv6 - rax_progress - rax_os-ext-sts_power_state - rax_metadata - rax_status - rax_updated - rax_hostid - rax_name - rax_created - rax_tenant_id - rax_loaded - -Configuration: - rax.py can be configured using a rax.ini file or via environment - variables. The rax.ini file should live in the same directory along side - this script. - - The section header for configuration values related to this - inventory plugin is [rax] - - [rax] - creds_file = ~/.rackspace_cloud_credentials - regions = IAD,ORD,DFW - env = prod - meta_prefix = meta - access_network = public - access_ip_version = 4 - - Each of these configurations also has a corresponding environment variable. - An environment variable will override a configuration file value. - - creds_file: - Environment Variable: RAX_CREDS_FILE - - An optional configuration that points to a pyrax-compatible credentials - file. - - If not supplied, rax.py will look for a credentials file - at ~/.rackspace_cloud_credentials. It uses the Rackspace Python SDK, - and therefore requires a file formatted per the SDK's specifications. - - https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md - - regions: - Environment Variable: RAX_REGION - - An optional environment variable to narrow inventory search - scope. If used, needs a value like ORD, DFW, SYD (a Rackspace - datacenter) and optionally accepts a comma-separated list. - - environment: - Environment Variable: RAX_ENV - - A configuration that will use an environment as configured in - ~/.pyrax.cfg, see - https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md - - meta_prefix: - Environment Variable: RAX_META_PREFIX - Default: meta - - A configuration that changes the prefix used for meta key/value groups. - For compatibility with ec2.py set to "tag" - - access_network: - Environment Variable: RAX_ACCESS_NETWORK - Default: public - - A configuration that will tell the inventory script to use a specific - server network to determine the ansible_ssh_host value. If no address - is found, ansible_ssh_host will not be set. Accepts a comma-separated - list of network names, the first found wins. - - access_ip_version: - Environment Variable: RAX_ACCESS_IP_VERSION - Default: 4 - - A configuration related to "access_network" that will attempt to - determine the ansible_ssh_host value for either IPv4 or IPv6. If no - address is found, ansible_ssh_host will not be set. - Acceptable values are: 4 or 6. Values other than 4 or 6 - will be ignored, and 4 will be used. Accepts a comma-separated list, - the first found wins. - -Examples: - List server instances - $ RAX_CREDS_FILE=~/.raxpub rax.py --list - - List servers in ORD datacenter only - $ RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD rax.py --list - - List servers in ORD and DFW datacenters - $ RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD,DFW rax.py --list - - Get server details for server named "server.example.com" - $ RAX_CREDS_FILE=~/.raxpub rax.py --host server.example.com - - Use the instance private IP to connect (instead of public IP) - $ RAX_CREDS_FILE=~/.raxpub RAX_ACCESS_NETWORK=private rax.py --list -""" - -import os -import re -import sys -import argparse -import warnings -import collections - -from ansible.module_utils.six import iteritems -from ansible.module_utils.six.moves import configparser as ConfigParser - -import json - -try: - import pyrax - from pyrax.utils import slugify -except ImportError: - sys.exit('pyrax is required for this module') - -from time import time - -from ansible.constants import get_config -from ansible.module_utils.parsing.convert_bool import boolean -from ansible.module_utils.six import text_type - -NON_CALLABLES = (text_type, str, bool, dict, int, list, type(None)) - - -def load_config_file(): - p = ConfigParser.ConfigParser() - config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), - 'rax.ini') - try: - p.read(config_file) - except ConfigParser.Error: - return None - else: - return p - - -def rax_slugify(value): - return 'rax_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_')) - - -def to_dict(obj): - instance = {} - for key in dir(obj): - value = getattr(obj, key) - if isinstance(value, NON_CALLABLES) and not key.startswith('_'): - key = rax_slugify(key) - instance[key] = value - - return instance - - -def host(regions, hostname): - hostvars = {} - - for region in regions: - # Connect to the region - cs = pyrax.connect_to_cloudservers(region=region) - for server in cs.servers.list(): - if server.name == hostname: - for key, value in to_dict(server).items(): - hostvars[key] = value - - # And finally, add an IP address - hostvars['ansible_ssh_host'] = server.accessIPv4 - print(json.dumps(hostvars, sort_keys=True, indent=4)) - - -def _list_into_cache(regions): - groups = collections.defaultdict(list) - hostvars = collections.defaultdict(dict) - images = {} - cbs_attachments = collections.defaultdict(dict) - - prefix = get_config(p, 'rax', 'meta_prefix', 'RAX_META_PREFIX', 'meta') - - try: - # Ansible 2.3+ - networks = get_config(p, 'rax', 'access_network', - 'RAX_ACCESS_NETWORK', 'public', value_type='list') - except TypeError: - # Ansible 2.2.x and below - # pylint: disable=unexpected-keyword-arg - networks = get_config(p, 'rax', 'access_network', - 'RAX_ACCESS_NETWORK', 'public', islist=True) - try: - try: - # Ansible 2.3+ - ip_versions = map(int, get_config(p, 'rax', 'access_ip_version', - 'RAX_ACCESS_IP_VERSION', 4, value_type='list')) - except TypeError: - # Ansible 2.2.x and below - # pylint: disable=unexpected-keyword-arg - ip_versions = map(int, get_config(p, 'rax', 'access_ip_version', - 'RAX_ACCESS_IP_VERSION', 4, islist=True)) - except Exception: - ip_versions = [4] - else: - ip_versions = [v for v in ip_versions if v in [4, 6]] - if not ip_versions: - ip_versions = [4] - - # Go through all the regions looking for servers - for region in regions: - # Connect to the region - cs = pyrax.connect_to_cloudservers(region=region) - if cs is None: - warnings.warn( - 'Connecting to Rackspace region "%s" has caused Pyrax to ' - 'return None. Is this a valid region?' % region, - RuntimeWarning) - continue - for server in cs.servers.list(): - # Create a group on region - groups[region].append(server.name) - - # Check if group metadata key in servers' metadata - group = server.metadata.get('group') - if group: - groups[group].append(server.name) - - for extra_group in server.metadata.get('groups', '').split(','): - if extra_group: - groups[extra_group].append(server.name) - - # Add host metadata - for key, value in to_dict(server).items(): - hostvars[server.name][key] = value - - hostvars[server.name]['rax_region'] = region - - for key, value in iteritems(server.metadata): - groups['%s_%s_%s' % (prefix, key, value)].append(server.name) - - groups['instance-%s' % server.id].append(server.name) - groups['flavor-%s' % server.flavor['id']].append(server.name) - - # Handle boot from volume - if not server.image: - if not cbs_attachments[region]: - cbs = pyrax.connect_to_cloud_blockstorage(region) - for vol in cbs.list(): - if boolean(vol.bootable, strict=False): - for attachment in vol.attachments: - metadata = vol.volume_image_metadata - server_id = attachment['server_id'] - cbs_attachments[region][server_id] = { - 'id': metadata['image_id'], - 'name': slugify(metadata['image_name']) - } - image = cbs_attachments[region].get(server.id) - if image: - server.image = {'id': image['id']} - hostvars[server.name]['rax_image'] = server.image - hostvars[server.name]['rax_boot_source'] = 'volume' - images[image['id']] = image['name'] - else: - hostvars[server.name]['rax_boot_source'] = 'local' - - try: - imagegroup = 'image-%s' % images[server.image['id']] - groups[imagegroup].append(server.name) - groups['image-%s' % server.image['id']].append(server.name) - except KeyError: - try: - image = cs.images.get(server.image['id']) - except cs.exceptions.NotFound: - groups['image-%s' % server.image['id']].append(server.name) - else: - images[image.id] = image.human_id - groups['image-%s' % image.human_id].append(server.name) - groups['image-%s' % server.image['id']].append(server.name) - - # And finally, add an IP address - ansible_ssh_host = None - # use accessIPv[46] instead of looping address for 'public' - for network_name in networks: - if ansible_ssh_host: - break - if network_name == 'public': - for version_name in ip_versions: - if ansible_ssh_host: - break - if version_name == 6 and server.accessIPv6: - ansible_ssh_host = server.accessIPv6 - elif server.accessIPv4: - ansible_ssh_host = server.accessIPv4 - if not ansible_ssh_host: - addresses = server.addresses.get(network_name, []) - for address in addresses: - for version_name in ip_versions: - if ansible_ssh_host: - break - if address.get('version') == version_name: - ansible_ssh_host = address.get('addr') - break - if ansible_ssh_host: - hostvars[server.name]['ansible_ssh_host'] = ansible_ssh_host - - if hostvars: - groups['_meta'] = {'hostvars': hostvars} - - with open(get_cache_file_path(regions), 'w') as cache_file: - json.dump(groups, cache_file) - - -def get_cache_file_path(regions): - regions_str = '.'.join([reg.strip().lower() for reg in regions]) - ansible_tmp_path = os.path.join(os.path.expanduser("~"), '.ansible', 'tmp') - if not os.path.exists(ansible_tmp_path): - os.makedirs(ansible_tmp_path) - return os.path.join(ansible_tmp_path, - 'ansible-rax-%s-%s.cache' % ( - pyrax.identity.username, regions_str)) - - -def _list(regions, refresh_cache=True): - cache_max_age = int(get_config(p, 'rax', 'cache_max_age', - 'RAX_CACHE_MAX_AGE', 600)) - - if (not os.path.exists(get_cache_file_path(regions)) or - refresh_cache or - (time() - os.stat(get_cache_file_path(regions))[-1]) > cache_max_age): - # Cache file doesn't exist or older than 10m or refresh cache requested - _list_into_cache(regions) - - with open(get_cache_file_path(regions), 'r') as cache_file: - groups = json.load(cache_file) - print(json.dumps(groups, sort_keys=True, indent=4)) - - -def parse_args(): - parser = argparse.ArgumentParser(description='Ansible Rackspace Cloud ' - 'inventory module') - group = parser.add_mutually_exclusive_group(required=True) - group.add_argument('--list', action='store_true', - help='List active servers') - group.add_argument('--host', help='List details about the specific host') - parser.add_argument('--refresh-cache', action='store_true', default=False, - help=('Force refresh of cache, making API requests to' - 'RackSpace (default: False - use cache files)')) - return parser.parse_args() - - -def setup(): - default_creds_file = os.path.expanduser('~/.rackspace_cloud_credentials') - - env = get_config(p, 'rax', 'environment', 'RAX_ENV', None) - if env: - pyrax.set_environment(env) - - keyring_username = pyrax.get_setting('keyring_username') - - # Attempt to grab credentials from environment first - creds_file = get_config(p, 'rax', 'creds_file', - 'RAX_CREDS_FILE', None) - if creds_file is not None: - creds_file = os.path.expanduser(creds_file) - else: - # But if that fails, use the default location of - # ~/.rackspace_cloud_credentials - if os.path.isfile(default_creds_file): - creds_file = default_creds_file - elif not keyring_username: - sys.exit('No value in environment variable %s and/or no ' - 'credentials file at %s' - % ('RAX_CREDS_FILE', default_creds_file)) - - identity_type = pyrax.get_setting('identity_type') - pyrax.set_setting('identity_type', identity_type or 'rackspace') - - region = pyrax.get_setting('region') - - try: - if keyring_username: - pyrax.keyring_auth(keyring_username, region=region) - else: - pyrax.set_credential_file(creds_file, region=region) - except Exception as e: - sys.exit("%s: %s" % (e, e.message)) - - regions = [] - if region: - regions.append(region) - else: - try: - # Ansible 2.3+ - region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all', - value_type='list') - except TypeError: - # Ansible 2.2.x and below - # pylint: disable=unexpected-keyword-arg - region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all', - islist=True) - - for region in region_list: - region = region.strip().upper() - if region == 'ALL': - regions = pyrax.regions - break - elif region not in pyrax.regions: - sys.exit('Unsupported region %s' % region) - elif region not in regions: - regions.append(region) - - return regions - - -def main(): - args = parse_args() - regions = setup() - if args.list: - _list(regions, refresh_cache=args.refresh_cache) - elif args.host: - host(regions, args.host) - sys.exit(0) - - -p = load_config_file() -if __name__ == '__main__': - main() diff --git a/scripts/inventory/rhv.py b/scripts/inventory/rhv.py deleted file mode 120000 index e66635dd42..0000000000 --- a/scripts/inventory/rhv.py +++ /dev/null @@ -1 +0,0 @@ -ovirt4.py \ No newline at end of file diff --git a/scripts/inventory/rudder.ini b/scripts/inventory/rudder.ini deleted file mode 100644 index 748b3d2121..0000000000 --- a/scripts/inventory/rudder.ini +++ /dev/null @@ -1,35 +0,0 @@ -# Rudder external inventory script settings -# - -[rudder] - -# Your Rudder server API URL, typically: -# https://rudder.local/rudder/api -uri = https://rudder.local/rudder/api - -# By default, Rudder uses a self-signed certificate. Set this to True -# to disable certificate validation. -disable_ssl_certificate_validation = True - -# Your Rudder API token, created in the Web interface. -token = aaabbbccc - -# Rudder API version to use, use "latest" for latest available -# version. -version = latest - -# Property to use as group name in the output. -# Can generally be "id" or "displayName". -group_name = displayName - -# Fail if there are two groups with the same name or two hosts with the -# same hostname in the output. -fail_if_name_collision = True - -# We cache the results of Rudder API in a local file -cache_path = /tmp/ansible-rudder.cache - -# The number of seconds a cache file is considered valid. After this many -# seconds, a new API call will be made, and the cache file will be updated. -# Set to 0 to disable cache. -cache_max_age = 500 diff --git a/scripts/inventory/rudder.py b/scripts/inventory/rudder.py deleted file mode 100755 index 9a65aca99a..0000000000 --- a/scripts/inventory/rudder.py +++ /dev/null @@ -1,286 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2015, Normation SAS -# -# Inspired by the EC2 inventory plugin: -# https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.py -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -###################################################################### - -''' -Rudder external inventory script -================================= - -Generates inventory that Ansible can understand by making API request to -a Rudder server. This script is compatible with Rudder 2.10 or later. - -The output JSON includes all your Rudder groups, containing the hostnames of -their nodes. Groups and nodes have a variable called rudder_group_id and -rudder_node_id, which is the Rudder internal id of the item, allowing to identify -them uniquely. Hosts variables also include your node properties, which are -key => value properties set by the API and specific to each node. - -This script assumes there is an rudder.ini file alongside it. To specify a -different path to rudder.ini, define the RUDDER_INI_PATH environment variable: - - export RUDDER_INI_PATH=/path/to/my_rudder.ini - -You have to configure your Rudder server information, either in rudder.ini or -by overriding it with environment variables: - - export RUDDER_API_VERSION='latest' - export RUDDER_API_TOKEN='my_token' - export RUDDER_API_URI='https://rudder.local/rudder/api' -''' - - -import sys -import os -import re -import argparse -import httplib2 as http -from time import time -from ansible.module_utils import six -from ansible.module_utils.six.moves import configparser -from ansible.module_utils.six.moves.urllib.parse import urlparse - -import json - - -class RudderInventory(object): - def __init__(self): - ''' Main execution path ''' - - # Empty inventory by default - self.inventory = {} - - # Read settings and parse CLI arguments - self.read_settings() - self.parse_cli_args() - - # Create connection - self.conn = http.Http(disable_ssl_certificate_validation=self.disable_ssl_validation) - - # Cache - if self.args.refresh_cache: - self.update_cache() - elif not self.is_cache_valid(): - self.update_cache() - else: - self.load_cache() - - data_to_print = {} - - if self.args.host: - data_to_print = self.get_host_info(self.args.host) - elif self.args.list: - data_to_print = self.get_list_info() - - print(self.json_format_dict(data_to_print, True)) - - def read_settings(self): - ''' Reads the settings from the rudder.ini file ''' - if six.PY2: - config = configparser.SafeConfigParser() - else: - config = configparser.ConfigParser() - rudder_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'rudder.ini') - rudder_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('RUDDER_INI_PATH', rudder_default_ini_path))) - config.read(rudder_ini_path) - - self.token = os.environ.get('RUDDER_API_TOKEN', config.get('rudder', 'token')) - self.version = os.environ.get('RUDDER_API_VERSION', config.get('rudder', 'version')) - self.uri = os.environ.get('RUDDER_API_URI', config.get('rudder', 'uri')) - - self.disable_ssl_validation = config.getboolean('rudder', 'disable_ssl_certificate_validation') - self.group_name = config.get('rudder', 'group_name') - self.fail_if_name_collision = config.getboolean('rudder', 'fail_if_name_collision') - - self.cache_path = config.get('rudder', 'cache_path') - self.cache_max_age = config.getint('rudder', 'cache_max_age') - - def parse_cli_args(self): - ''' Command line argument processing ''' - - parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Rudder inventory') - parser.add_argument('--list', action='store_true', default=True, - help='List instances (default: True)') - parser.add_argument('--host', action='store', - help='Get all the variables about a specific instance') - parser.add_argument('--refresh-cache', action='store_true', default=False, - help='Force refresh of cache by making API requests to Rudder (default: False - use cache files)') - self.args = parser.parse_args() - - def is_cache_valid(self): - ''' Determines if the cache files have expired, or if it is still valid ''' - - if os.path.isfile(self.cache_path): - mod_time = os.path.getmtime(self.cache_path) - current_time = time() - if (mod_time + self.cache_max_age) > current_time: - return True - - return False - - def load_cache(self): - ''' Reads the cache from the cache file sets self.cache ''' - - cache = open(self.cache_path, 'r') - json_cache = cache.read() - - try: - self.inventory = json.loads(json_cache) - except ValueError as e: - self.fail_with_error('Could not parse JSON response from local cache', 'parsing local cache') - - def write_cache(self): - ''' Writes data in JSON format to a file ''' - - json_data = self.json_format_dict(self.inventory, True) - cache = open(self.cache_path, 'w') - cache.write(json_data) - cache.close() - - def get_nodes(self): - ''' Gets the nodes list from Rudder ''' - - path = '/nodes?select=nodeAndPolicyServer' - result = self.api_call(path) - - nodes = {} - - for node in result['data']['nodes']: - nodes[node['id']] = {} - nodes[node['id']]['hostname'] = node['hostname'] - if 'properties' in node: - nodes[node['id']]['properties'] = node['properties'] - else: - nodes[node['id']]['properties'] = [] - - return nodes - - def get_groups(self): - ''' Gets the groups list from Rudder ''' - - path = '/groups' - result = self.api_call(path) - - groups = {} - - for group in result['data']['groups']: - groups[group['id']] = {'hosts': group['nodeIds'], 'name': self.to_safe(group[self.group_name])} - - return groups - - def update_cache(self): - ''' Fetches the inventory information from Rudder and creates the inventory ''' - - nodes = self.get_nodes() - groups = self.get_groups() - - inventory = {} - - for group in groups: - # Check for name collision - if self.fail_if_name_collision: - if groups[group]['name'] in inventory: - self.fail_with_error('Name collision on groups: "%s" appears twice' % groups[group]['name'], 'creating groups') - # Add group to inventory - inventory[groups[group]['name']] = {} - inventory[groups[group]['name']]['hosts'] = [] - inventory[groups[group]['name']]['vars'] = {} - inventory[groups[group]['name']]['vars']['rudder_group_id'] = group - for node in groups[group]['hosts']: - # Add node to group - inventory[groups[group]['name']]['hosts'].append(nodes[node]['hostname']) - - properties = {} - - for node in nodes: - # Check for name collision - if self.fail_if_name_collision: - if nodes[node]['hostname'] in properties: - self.fail_with_error('Name collision on hosts: "%s" appears twice' % nodes[node]['hostname'], 'creating hosts') - # Add node properties to inventory - properties[nodes[node]['hostname']] = {} - properties[nodes[node]['hostname']]['rudder_node_id'] = node - for node_property in nodes[node]['properties']: - properties[nodes[node]['hostname']][self.to_safe(node_property['name'])] = node_property['value'] - - inventory['_meta'] = {} - inventory['_meta']['hostvars'] = properties - - self.inventory = inventory - - if self.cache_max_age > 0: - self.write_cache() - - def get_list_info(self): - ''' Gets inventory information from local cache ''' - - return self.inventory - - def get_host_info(self, hostname): - ''' Gets information about a specific host from local cache ''' - - if hostname in self.inventory['_meta']['hostvars']: - return self.inventory['_meta']['hostvars'][hostname] - else: - return {} - - def api_call(self, path): - ''' Performs an API request ''' - - headers = { - 'X-API-Token': self.token, - 'X-API-Version': self.version, - 'Content-Type': 'application/json;charset=utf-8' - } - - target = urlparse(self.uri + path) - method = 'GET' - body = '' - - try: - response, content = self.conn.request(target.geturl(), method, body, headers) - except Exception: - self.fail_with_error('Error connecting to Rudder server') - - try: - data = json.loads(content) - except ValueError as e: - self.fail_with_error('Could not parse JSON response from Rudder API', 'reading API response') - - return data - - def fail_with_error(self, err_msg, err_operation=None): - ''' Logs an error to std err for ansible-playbook to consume and exit ''' - if err_operation: - err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format( - err_msg=err_msg, err_operation=err_operation) - sys.stderr.write(err_msg) - sys.exit(1) - - def json_format_dict(self, data, pretty=False): - ''' Converts a dict to a JSON object and dumps it as a formatted - string ''' - - if pretty: - return json.dumps(data, sort_keys=True, indent=2) - else: - return json.dumps(data) - - def to_safe(self, word): - ''' Converts 'bad' characters in a string to underscores so they can be - used as Ansible variable names ''' - - return re.sub(r'[^A-Za-z0-9\_]', '_', word) - - -# Run the script -RudderInventory() diff --git a/scripts/inventory/scaleway.ini b/scripts/inventory/scaleway.ini deleted file mode 100644 index 99615a124c..0000000000 --- a/scripts/inventory/scaleway.ini +++ /dev/null @@ -1,37 +0,0 @@ -# Ansible dynamic inventory script for Scaleway cloud provider -# - -[compute] -# Fetch inventory for regions. If not defined will read the SCALEWAY_REGION environment variable -# -# regions = all -# regions = ams1 -# regions = par1, ams1 -regions = par1 - - -# Define a Scaleway token to perform required queries on the API -# in order to generate inventory output. -# -[auth] -# Token to authenticate with Scaleway's API. -# If not defined will read the SCALEWAY_TOKEN environment variable -# -api_token = mysecrettoken - - -# To avoid performing excessive calls to Scaleway API you can define a -# cache for the plugin output. Within the time defined in seconds, latest -# output will be reused. After that time, the cache will be refreshed. -# -[cache] -cache_max_age = 60 -cache_dir = '~/.ansible/tmp' - - -[defaults] -# You may want to use only public IP addresses or private IP addresses. -# You can set public_ip_only configuration to get public IPs only. -# If not defined defaults to retrieving private IP addresses. -# -public_ip_only = false diff --git a/scripts/inventory/scaleway.py b/scripts/inventory/scaleway.py deleted file mode 100755 index f68eb128a5..0000000000 --- a/scripts/inventory/scaleway.py +++ /dev/null @@ -1,220 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -''' -External inventory script for Scaleway -==================================== - -Shamelessly copied from an existing inventory script. - -This script generates an inventory that Ansible can understand by making API requests to Scaleway API - -Requires some python libraries, ensure to have them installed when using this script. (pip install requests https://pypi.org/project/requests/) - -Before using this script you may want to modify scaleway.ini config file. - -This script generates an Ansible hosts file with these host groups: - -: Defines host itself with Scaleway's hostname as group name. -: Contains all hosts which has "" as tag. -: Contains all hosts which are in the "" region. -all: Contains all hosts defined in Scaleway. -''' - -# (c) 2017, Paul B. -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import copy -import os -import requests -from ansible.module_utils import six -from ansible.module_utils.six.moves import configparser -import sys -import time -import traceback - -import json - -EMPTY_GROUP = { - 'children': [], - 'hosts': [] -} - - -class ScalewayAPI: - REGIONS = ['par1', 'ams1'] - - def __init__(self, auth_token, region): - self.session = requests.session() - self.session.headers.update({ - 'User-Agent': 'Ansible Python/%s' % (sys.version.split(' ')[0]) - }) - self.session.headers.update({ - 'X-Auth-Token': auth_token.encode('latin1') - }) - self.base_url = 'https://cp-%s.scaleway.com' % (region) - - def servers(self): - raw = self.session.get('/'.join([self.base_url, 'servers'])) - - try: - response = raw.json() - return self.get_resource('servers', response, raw) - except ValueError: - return [] - - def get_resource(self, resource, response, raw): - raw.raise_for_status() - - if resource in response: - return response[resource] - else: - raise ValueError( - "Resource %s not found in Scaleway API response" % (resource)) - - -def env_or_param(env_key, param=None, fallback=None): - env_value = os.environ.get(env_key) - - if (param, env_value) == (None, None): - return fallback - elif env_value is not None: - return env_value - else: - return param - - -def save_cache(data, config): - ''' saves item to cache ''' - dpath = config.get('cache', 'cache_dir') - try: - cache = open('/'.join([dpath, 'scaleway_ansible_inventory.json']), 'w') - cache.write(json.dumps(data)) - cache.close() - except IOError as e: - pass # not really sure what to do here - - -def get_cache(cache_item, config): - ''' returns cached item ''' - dpath = config.get('cache', 'cache_dir') - inv = {} - try: - cache = open('/'.join([dpath, 'scaleway_ansible_inventory.json']), 'r') - inv = cache.read() - cache.close() - except IOError as e: - pass # not really sure what to do here - - return inv - - -def cache_available(config): - ''' checks if we have a 'fresh' cache available for item requested ''' - - if config.has_option('cache', 'cache_dir'): - dpath = config.get('cache', 'cache_dir') - - try: - existing = os.stat( - '/'.join([dpath, 'scaleway_ansible_inventory.json'])) - except OSError: - return False - - if config.has_option('cache', 'cache_max_age'): - maxage = config.get('cache', 'cache_max_age') - else: - maxage = 60 - if (int(time.time()) - int(existing.st_mtime)) <= int(maxage): - return True - - return False - - -def generate_inv_from_api(config): - try: - inventory['scaleway'] = copy.deepcopy(EMPTY_GROUP) - - auth_token = None - if config.has_option('auth', 'api_token'): - auth_token = config.get('auth', 'api_token') - auth_token = env_or_param('SCALEWAY_TOKEN', param=auth_token) - if auth_token is None: - sys.stderr.write('ERROR: missing authentication token for Scaleway API') - sys.exit(1) - - if config.has_option('compute', 'regions'): - regions = config.get('compute', 'regions') - if regions == 'all': - regions = ScalewayAPI.REGIONS - else: - regions = map(str.strip, regions.split(',')) - else: - regions = [ - env_or_param('SCALEWAY_REGION', fallback='par1') - ] - - for region in regions: - api = ScalewayAPI(auth_token, region) - - for server in api.servers(): - hostname = server['hostname'] - if config.has_option('defaults', 'public_ip_only') and config.getboolean('defaults', 'public_ip_only'): - ip = server['public_ip']['address'] - else: - ip = server['private_ip'] - for server_tag in server['tags']: - if server_tag not in inventory: - inventory[server_tag] = copy.deepcopy(EMPTY_GROUP) - inventory[server_tag]['children'].append(hostname) - if region not in inventory: - inventory[region] = copy.deepcopy(EMPTY_GROUP) - inventory[region]['children'].append(hostname) - inventory['scaleway']['children'].append(hostname) - inventory[hostname] = [] - inventory[hostname].append(ip) - - return inventory - except Exception: - # Return empty hosts output - traceback.print_exc() - return {'scaleway': {'hosts': []}, '_meta': {'hostvars': {}}} - - -def get_inventory(config): - ''' Reads the inventory from cache or Scaleway api ''' - - if cache_available(config): - inv = get_cache('scaleway_ansible_inventory.json', config) - else: - inv = generate_inv_from_api(config) - - save_cache(inv, config) - return json.dumps(inv) - - -if __name__ == '__main__': - inventory = {} - - # Read config - if six.PY3: - config = configparser.ConfigParser() - else: - config = configparser.SafeConfigParser() - for configfilename in [os.path.abspath(sys.argv[0]).rsplit('.py')[0] + '.ini', 'scaleway.ini']: - if os.path.exists(configfilename): - config.read(configfilename) - break - - if cache_available(config): - inventory = get_cache('scaleway_ansible_inventory.json', config) - else: - inventory = get_inventory(config) - - # return to ansible - sys.stdout.write(str(inventory)) - sys.stdout.flush() diff --git a/scripts/inventory/serf.py b/scripts/inventory/serf.py deleted file mode 100755 index df917ef554..0000000000 --- a/scripts/inventory/serf.py +++ /dev/null @@ -1,101 +0,0 @@ -#!/usr/bin/env python - -# (c) 2015, Marc Abramowitz -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -# Dynamic inventory script which lets you use nodes discovered by Serf -# (https://serfdom.io/). -# -# Requires the `serfclient` Python module from -# https://pypi.org/project/serfclient/ -# -# Environment variables -# --------------------- -# - `SERF_RPC_ADDR` -# - `SERF_RPC_AUTH` -# -# These variables are described at https://www.serfdom.io/docs/commands/members.html#_rpc_addr - -import argparse -import collections -import os -import sys - -# https://pypi.org/project/serfclient/ -from serfclient import SerfClient, EnvironmentConfig - -import json - -_key = 'serf' - - -def _serf_client(): - env = EnvironmentConfig() - return SerfClient(host=env.host, port=env.port, rpc_auth=env.auth_key) - - -def get_serf_members_data(): - return _serf_client().members().body['Members'] - - -def get_nodes(data): - return [node['Name'] for node in data] - - -def get_groups(data): - groups = collections.defaultdict(list) - - for node in data: - for key, value in node['Tags'].items(): - groups[value].append(node['Name']) - - return groups - - -def get_meta(data): - meta = {'hostvars': {}} - for node in data: - meta['hostvars'][node['Name']] = node['Tags'] - return meta - - -def print_list(): - data = get_serf_members_data() - nodes = get_nodes(data) - groups = get_groups(data) - meta = get_meta(data) - inventory_data = {_key: nodes, '_meta': meta} - inventory_data.update(groups) - print(json.dumps(inventory_data)) - - -def print_host(host): - data = get_serf_members_data() - meta = get_meta(data) - print(json.dumps(meta['hostvars'][host])) - - -def get_args(args_list): - parser = argparse.ArgumentParser( - description='ansible inventory script reading from serf cluster') - mutex_group = parser.add_mutually_exclusive_group(required=True) - help_list = 'list all hosts from serf cluster' - mutex_group.add_argument('--list', action='store_true', help=help_list) - help_host = 'display variables for a host' - mutex_group.add_argument('--host', help=help_host) - return parser.parse_args(args_list) - - -def main(args_list): - args = get_args(args_list) - if args.list: - print_list() - if args.host: - print_host(args.host) - - -if __name__ == '__main__': - main(sys.argv[1:]) diff --git a/scripts/inventory/softlayer.py b/scripts/inventory/softlayer.py deleted file mode 100755 index 03f9820ad2..0000000000 --- a/scripts/inventory/softlayer.py +++ /dev/null @@ -1,196 +0,0 @@ -#!/usr/bin/env python -""" -SoftLayer external inventory script. - -The SoftLayer Python API client is required. Use `pip install softlayer` to install it. -You have a few different options for configuring your username and api_key. You can pass -environment variables (SL_USERNAME and SL_API_KEY). You can also write INI file to -~/.softlayer or /etc/softlayer.conf. For more information see the SL API at: -- https://softlayer-python.readthedocs.io/en/latest/config_file.html - -The SoftLayer Python client has a built in command for saving this configuration file -via the command `sl config setup`. -""" - -# Copyright (C) 2014 AJ Bourg -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -# -# I found the structure of the ec2.py script very helpful as an example -# as I put this together. Thanks to whoever wrote that script! -# - -import SoftLayer -import re -import argparse -import itertools -import json - - -class SoftLayerInventory(object): - common_items = [ - 'id', - 'globalIdentifier', - 'hostname', - 'domain', - 'fullyQualifiedDomainName', - 'primaryBackendIpAddress', - 'primaryIpAddress', - 'datacenter', - 'tagReferences', - 'userData.value', - ] - - vs_items = [ - 'lastKnownPowerState.name', - 'powerState', - 'maxCpu', - 'maxMemory', - 'activeTransaction.transactionStatus[friendlyName,name]', - 'status', - ] - - hw_items = [ - 'hardwareStatusId', - 'processorPhysicalCoreAmount', - 'memoryCapacity', - ] - - def _empty_inventory(self): - return {"_meta": {"hostvars": {}}} - - def __init__(self): - '''Main path''' - - self.inventory = self._empty_inventory() - - self.parse_options() - - if self.args.list: - self.get_all_servers() - print(self.json_format_dict(self.inventory, True)) - elif self.args.host: - self.get_all_servers() - print(self.json_format_dict(self.inventory["_meta"]["hostvars"][self.args.host], True)) - - def to_safe(self, word): - '''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups''' - - return re.sub(r"[^A-Za-z0-9\-\.]", "_", word) - - def push(self, my_dict, key, element): - '''Push an element onto an array that may not have been defined in the dict''' - - if key in my_dict: - my_dict[key].append(element) - else: - my_dict[key] = [element] - - def parse_options(self): - '''Parse all the arguments from the CLI''' - - parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on SoftLayer') - parser.add_argument('--list', action='store_true', default=False, - help='List instances (default: False)') - parser.add_argument('--host', action='store', - help='Get all the variables about a specific instance') - self.args = parser.parse_args() - - def json_format_dict(self, data, pretty=False): - '''Converts a dict to a JSON object and dumps it as a formatted string''' - - if pretty: - return json.dumps(data, sort_keys=True, indent=2) - else: - return json.dumps(data) - - def process_instance(self, instance, instance_type="virtual"): - '''Populate the inventory dictionary with any instance information''' - - # only want active instances - if 'status' in instance and instance['status']['name'] != 'Active': - return - - # and powered on instances - if 'powerState' in instance and instance['powerState']['name'] != 'Running': - return - - # 5 is active for hardware... see https://forums.softlayer.com/forum/softlayer-developer-network/general-discussion/2955-hardwarestatusid - if 'hardwareStatusId' in instance and instance['hardwareStatusId'] != 5: - return - - # if there's no IP address, we can't reach it - if 'primaryIpAddress' not in instance: - return - - instance['userData'] = instance['userData'][0]['value'] if instance['userData'] else '' - - dest = instance['primaryIpAddress'] - - instance['tags'] = list() - for tag in instance['tagReferences']: - instance['tags'].append(tag['tag']['name']) - - del instance['tagReferences'] - - self.inventory["_meta"]["hostvars"][dest] = instance - - # Inventory: group by memory - if 'maxMemory' in instance: - self.push(self.inventory, self.to_safe('memory_' + str(instance['maxMemory'])), dest) - elif 'memoryCapacity' in instance: - self.push(self.inventory, self.to_safe('memory_' + str(instance['memoryCapacity'])), dest) - - # Inventory: group by cpu count - if 'maxCpu' in instance: - self.push(self.inventory, self.to_safe('cpu_' + str(instance['maxCpu'])), dest) - elif 'processorPhysicalCoreAmount' in instance: - self.push(self.inventory, self.to_safe('cpu_' + str(instance['processorPhysicalCoreAmount'])), dest) - - # Inventory: group by datacenter - self.push(self.inventory, self.to_safe('datacenter_' + instance['datacenter']['name']), dest) - - # Inventory: group by hostname - self.push(self.inventory, self.to_safe(instance['hostname']), dest) - - # Inventory: group by FQDN - self.push(self.inventory, self.to_safe(instance['fullyQualifiedDomainName']), dest) - - # Inventory: group by domain - self.push(self.inventory, self.to_safe(instance['domain']), dest) - - # Inventory: group by type (hardware/virtual) - self.push(self.inventory, instance_type, dest) - - for tag in instance['tags']: - self.push(self.inventory, tag, dest) - - def get_virtual_servers(self): - '''Get all the CCI instances''' - vs = SoftLayer.VSManager(self.client) - mask = "mask[%s]" % ','.join(itertools.chain(self.common_items, self.vs_items)) - instances = vs.list_instances(mask=mask) - - for instance in instances: - self.process_instance(instance) - - def get_physical_servers(self): - '''Get all the hardware instances''' - hw = SoftLayer.HardwareManager(self.client) - mask = "mask[%s]" % ','.join(itertools.chain(self.common_items, self.hw_items)) - instances = hw.list_hardware(mask=mask) - - for instance in instances: - self.process_instance(instance, 'hardware') - - def get_all_servers(self): - self.client = SoftLayer.Client() - self.get_virtual_servers() - self.get_physical_servers() - - -SoftLayerInventory() diff --git a/scripts/inventory/spacewalk.ini b/scripts/inventory/spacewalk.ini deleted file mode 100644 index 5433c4221b..0000000000 --- a/scripts/inventory/spacewalk.ini +++ /dev/null @@ -1,16 +0,0 @@ -# Put this ini-file in the same directory as spacewalk.py -# Command line options have precedence over options defined in here. - -[spacewalk] -# To limit the script on one organization in spacewalk, uncomment org_number -# and fill in the organization ID: -# org_number=2 - -# To prefix the group names with the organization ID set prefix_org_name=true. -# This is convenient when org_number is not set and you have the same group names -# in multiple organizations within spacewalk -# The prefix is "org_number-" -prefix_org_name=false - -# Default cache_age for files created with spacewalk-report is 300sec. -cache_age=300 diff --git a/scripts/inventory/spacewalk.py b/scripts/inventory/spacewalk.py deleted file mode 100755 index b3b8cf8e7f..0000000000 --- a/scripts/inventory/spacewalk.py +++ /dev/null @@ -1,226 +0,0 @@ -#!/usr/bin/env python - -""" -Spacewalk external inventory script -================================= - -Ansible has a feature where instead of reading from /etc/ansible/hosts -as a text file, it can query external programs to obtain the list -of hosts, groups the hosts are in, and even variables to assign to each host. - -To use this, copy this file over /etc/ansible/hosts and chmod +x the file. -This, more or less, allows you to keep one central database containing -info about all of your managed instances. - -This script is dependent upon the spacealk-reports package being installed -on the same machine. It is basically a CSV-to-JSON converter from the -output of "spacewalk-report system-groups-systems|inventory". - -Tested with Ansible 1.9.2 and spacewalk 2.3 -""" -# -# Author:: Jon Miller -# Copyright:: Copyright (c) 2013, Jon Miller -# -# Extended for support of multiple organizations and -# adding the "_meta" dictionary to --list output by -# Bernhard Lichtinger 2015 -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import sys -import os -import time -from optparse import OptionParser -import subprocess -import json - -from ansible.module_utils.six import iteritems -from ansible.module_utils.six.moves import configparser as ConfigParser - - -base_dir = os.path.dirname(os.path.realpath(__file__)) -default_ini_file = os.path.join(base_dir, "spacewalk.ini") - -SW_REPORT = '/usr/bin/spacewalk-report' -CACHE_DIR = os.path.join(base_dir, ".spacewalk_reports") -CACHE_AGE = 300 # 5min -INI_FILE = os.path.expanduser(os.path.expandvars(os.environ.get("SPACEWALK_INI_PATH", default_ini_file))) - - -# Sanity check -if not os.path.exists(SW_REPORT): - print('Error: %s is required for operation.' % (SW_REPORT), file=sys.stderr) - sys.exit(1) - -# Pre-startup work -if not os.path.exists(CACHE_DIR): - os.mkdir(CACHE_DIR) - os.chmod(CACHE_DIR, 0o2775) - -# Helper functions -# ------------------------------ - - -def spacewalk_report(name): - """Yield a dictionary form of each CSV output produced by the specified - spacewalk-report - """ - cache_filename = os.path.join(CACHE_DIR, name) - if not os.path.exists(cache_filename) or \ - (time.time() - os.stat(cache_filename).st_mtime) > CACHE_AGE: - # Update the cache - fh = open(cache_filename, 'w') - p = subprocess.Popen([SW_REPORT, name], stdout=fh) - p.wait() - fh.close() - - with open(cache_filename, 'r') as f: - lines = f.readlines() - keys = lines[0].strip().split(',') - # add 'spacewalk_' prefix to the keys - keys = ['spacewalk_' + key for key in keys] - for line in lines[1:]: - values = line.strip().split(',') - if len(keys) == len(values): - yield dict(zip(keys, values)) - - -# Options -# ------------------------------ - -parser = OptionParser(usage="%prog [options] --list | --host ") -parser.add_option('--list', default=False, dest="list", action="store_true", - help="Produce a JSON consumable grouping of servers for Ansible") -parser.add_option('--host', default=None, dest="host", - help="Generate additional host specific details for given host for Ansible") -parser.add_option('-H', '--human', dest="human", - default=False, action="store_true", - help="Produce a friendlier version of either server list or host detail") -parser.add_option('-o', '--org', default=None, dest="org_number", - help="Limit to spacewalk organization number") -parser.add_option('-p', default=False, dest="prefix_org_name", action="store_true", - help="Prefix the group name with the organization number") -(options, args) = parser.parse_args() - - -# read spacewalk.ini if present -# ------------------------------ -if os.path.exists(INI_FILE): - config = ConfigParser.SafeConfigParser() - config.read(INI_FILE) - if config.has_option('spacewalk', 'cache_age'): - CACHE_AGE = config.get('spacewalk', 'cache_age') - if not options.org_number and config.has_option('spacewalk', 'org_number'): - options.org_number = config.get('spacewalk', 'org_number') - if not options.prefix_org_name and config.has_option('spacewalk', 'prefix_org_name'): - options.prefix_org_name = config.getboolean('spacewalk', 'prefix_org_name') - - -# Generate dictionary for mapping group_id to org_id -# ------------------------------ -org_groups = {} -try: - for group in spacewalk_report('system-groups'): - org_groups[group['spacewalk_group_id']] = group['spacewalk_org_id'] - -except (OSError) as e: - print('Problem executing the command "%s system-groups": %s' % - (SW_REPORT, str(e)), file=sys.stderr) - sys.exit(2) - - -# List out the known server from Spacewalk -# ------------------------------ -if options.list: - - # to build the "_meta"-Group with hostvars first create dictionary for later use - host_vars = {} - try: - for item in spacewalk_report('inventory'): - host_vars[item['spacewalk_profile_name']] = dict((key, (value.split(';') if ';' in value else value)) for key, value in item.items()) - - except (OSError) as e: - print('Problem executing the command "%s inventory": %s' % - (SW_REPORT, str(e)), file=sys.stderr) - sys.exit(2) - - groups = {} - meta = {"hostvars": {}} - try: - for system in spacewalk_report('system-groups-systems'): - # first get org_id of system - org_id = org_groups[system['spacewalk_group_id']] - - # shall we add the org_id as prefix to the group name: - if options.prefix_org_name: - prefix = org_id + "-" - group_name = prefix + system['spacewalk_group_name'] - else: - group_name = system['spacewalk_group_name'] - - # if we are limited to one organization: - if options.org_number: - if org_id == options.org_number: - if group_name not in groups: - groups[group_name] = set() - - groups[group_name].add(system['spacewalk_server_name']) - if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta["hostvars"]: - meta["hostvars"][system['spacewalk_server_name']] = host_vars[system['spacewalk_server_name']] - # or we list all groups and systems: - else: - if group_name not in groups: - groups[group_name] = set() - - groups[group_name].add(system['spacewalk_server_name']) - if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta["hostvars"]: - meta["hostvars"][system['spacewalk_server_name']] = host_vars[system['spacewalk_server_name']] - - except (OSError) as e: - print('Problem executing the command "%s system-groups-systems": %s' % - (SW_REPORT, str(e)), file=sys.stderr) - sys.exit(2) - - if options.human: - for group, systems in iteritems(groups): - print('[%s]\n%s\n' % (group, '\n'.join(systems))) - else: - final = dict([(k, list(s)) for k, s in iteritems(groups)]) - final["_meta"] = meta - print(json.dumps(final)) - # print(json.dumps(groups)) - sys.exit(0) - - -# Return a details information concerning the spacewalk server -# ------------------------------ -elif options.host: - - host_details = {} - try: - for system in spacewalk_report('inventory'): - if system['spacewalk_hostname'] == options.host: - host_details = system - break - - except (OSError) as e: - print('Problem executing the command "%s inventory": %s' % - (SW_REPORT, str(e)), file=sys.stderr) - sys.exit(2) - - if options.human: - print('Host: %s' % options.host) - for k, v in iteritems(host_details): - print(' %s: %s' % (k, '\n '.join(v.split(';')))) - else: - print(json.dumps(dict((key, (value.split(';') if ';' in value else value)) for key, value in host_details.items()))) - sys.exit(0) - -else: - - parser.print_help() - sys.exit(1) diff --git a/scripts/inventory/ssh_config.py b/scripts/inventory/ssh_config.py deleted file mode 100755 index ad56a53ebb..0000000000 --- a/scripts/inventory/ssh_config.py +++ /dev/null @@ -1,121 +0,0 @@ -#!/usr/bin/env python - -# (c) 2014, Tomas Karasek -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -# Dynamic inventory script which lets you use aliases from ~/.ssh/config. -# -# There were some issues with various Paramiko versions. I took a deeper look -# and tested heavily. Now, ansible parses this alright with Paramiko versions -# 1.7.2 to 1.15.2. -# -# It prints inventory based on parsed ~/.ssh/config. You can refer to hosts -# with their alias, rather than with the IP or hostname. It takes advantage -# of the ansible_ssh_{host,port,user,private_key_file}. -# -# If you have in your .ssh/config: -# Host git -# HostName git.domain.org -# User tkarasek -# IdentityFile /home/tomk/keys/thekey -# -# You can do -# $ ansible git -m ping -# -# Example invocation: -# ssh_config.py --list -# ssh_config.py --host - -import argparse -import os.path -import sys - -import json - -import paramiko - -from ansible.module_utils.common._collections_compat import MutableSequence - -SSH_CONF = '~/.ssh/config' - -_key = 'ssh_config' - -_ssh_to_ansible = [('user', 'ansible_ssh_user'), - ('hostname', 'ansible_ssh_host'), - ('identityfile', 'ansible_ssh_private_key_file'), - ('port', 'ansible_ssh_port')] - - -def get_config(): - if not os.path.isfile(os.path.expanduser(SSH_CONF)): - return {} - with open(os.path.expanduser(SSH_CONF)) as f: - cfg = paramiko.SSHConfig() - cfg.parse(f) - ret_dict = {} - for d in cfg._config: - if isinstance(d['host'], MutableSequence): - alias = d['host'][0] - else: - alias = d['host'] - if ('?' in alias) or ('*' in alias): - continue - _copy = dict(d) - del _copy['host'] - if 'config' in _copy: - ret_dict[alias] = _copy['config'] - else: - ret_dict[alias] = _copy - return ret_dict - - -def print_list(): - cfg = get_config() - meta = {'hostvars': {}} - for alias, attributes in cfg.items(): - tmp_dict = {} - for ssh_opt, ans_opt in _ssh_to_ansible: - if ssh_opt in attributes: - # If the attribute is a list, just take the first element. - # Private key is returned in a list for some reason. - attr = attributes[ssh_opt] - if isinstance(attr, MutableSequence): - attr = attr[0] - tmp_dict[ans_opt] = attr - if tmp_dict: - meta['hostvars'][alias] = tmp_dict - - print(json.dumps({_key: list(set(meta['hostvars'].keys())), '_meta': meta})) - - -def print_host(host): - cfg = get_config() - print(json.dumps(cfg[host])) - - -def get_args(args_list): - parser = argparse.ArgumentParser( - description='ansible inventory script parsing .ssh/config') - mutex_group = parser.add_mutually_exclusive_group(required=True) - help_list = 'list all hosts from .ssh/config inventory' - mutex_group.add_argument('--list', action='store_true', help=help_list) - help_host = 'display variables for a host' - mutex_group.add_argument('--host', help=help_host) - return parser.parse_args(args_list) - - -def main(args_list): - - args = get_args(args_list) - if args.list: - print_list() - if args.host: - print_host(args.host) - - -if __name__ == '__main__': - main(sys.argv[1:]) diff --git a/scripts/inventory/stacki.py b/scripts/inventory/stacki.py deleted file mode 100755 index 2c6bb37c9a..0000000000 --- a/scripts/inventory/stacki.py +++ /dev/null @@ -1,180 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2016, Hugh Ma -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -# Stacki inventory script -# Configure stacki.yml with proper auth information and place in the following: -# - ../inventory/stacki.yml -# - /etc/stacki/stacki.yml -# - /etc/ansible/stacki.yml -# The stacki.yml file can contain entries for authentication information -# regarding the Stacki front-end node. -# -# use_hostnames uses hostname rather than interface ip as connection -# -# - -""" -Example Usage: - List Stacki Nodes - $ ./stack.py --list - - -Example Configuration: ---- -stacki: - auth: - stacki_user: admin - stacki_password: abc12345678910 - stacki_endpoint: http://192.168.200.50/stack -use_hostnames: false -""" - -import argparse -import os -import sys -import yaml -from distutils.version import StrictVersion - -import json - -try: - import requests -except Exception: - sys.exit('requests package is required for this inventory script') - - -CONFIG_FILES = ['/etc/stacki/stacki.yml', '/etc/ansible/stacki.yml'] - - -def stack_auth(params): - endpoint = params['stacki_endpoint'] - auth_creds = {'USERNAME': params['stacki_user'], - 'PASSWORD': params['stacki_password']} - - client = requests.session() - client.get(endpoint) - - init_csrf = client.cookies['csrftoken'] - - header = {'csrftoken': init_csrf, 'X-CSRFToken': init_csrf, - 'Content-type': 'application/x-www-form-urlencoded'} - - login_endpoint = endpoint + "/login" - - login_req = client.post(login_endpoint, data=auth_creds, headers=header) - - csrftoken = login_req.cookies['csrftoken'] - sessionid = login_req.cookies['sessionid'] - - auth_creds.update(CSRFTOKEN=csrftoken, SESSIONID=sessionid) - - return client, auth_creds - - -def stack_build_header(auth_creds): - header = {'csrftoken': auth_creds['CSRFTOKEN'], - 'X-CSRFToken': auth_creds['CSRFTOKEN'], - 'sessionid': auth_creds['SESSIONID'], - 'Content-type': 'application/json'} - - return header - - -def stack_host_list(endpoint, header, client): - - stack_r = client.post(endpoint, data=json.dumps({"cmd": "list host"}), - headers=header) - return json.loads(stack_r.json()) - - -def stack_net_list(endpoint, header, client): - - stack_r = client.post(endpoint, data=json.dumps({"cmd": "list host interface"}), - headers=header) - return json.loads(stack_r.json()) - - -def format_meta(hostdata, intfdata, config): - use_hostnames = config['use_hostnames'] - meta = dict(all=dict(hosts=list()), - frontends=dict(hosts=list()), - backends=dict(hosts=list()), - _meta=dict(hostvars=dict())) - - # Iterate through list of dicts of hosts and remove - # environment key as it causes conflicts - for host in hostdata: - del host['environment'] - meta['_meta']['hostvars'][host['host']] = host - meta['_meta']['hostvars'][host['host']]['interfaces'] = list() - - # @bbyhuy to improve readability in next iteration - - for intf in intfdata: - if intf['host'] in meta['_meta']['hostvars']: - meta['_meta']['hostvars'][intf['host']]['interfaces'].append(intf) - if intf['default'] is True: - meta['_meta']['hostvars'][intf['host']]['ansible_host'] = intf['ip'] - if not use_hostnames: - meta['all']['hosts'].append(intf['ip']) - if meta['_meta']['hostvars'][intf['host']]['appliance'] != 'frontend': - meta['backends']['hosts'].append(intf['ip']) - else: - meta['frontends']['hosts'].append(intf['ip']) - else: - meta['all']['hosts'].append(intf['host']) - if meta['_meta']['hostvars'][intf['host']]['appliance'] != 'frontend': - meta['backends']['hosts'].append(intf['host']) - else: - meta['frontends']['hosts'].append(intf['host']) - return meta - - -def parse_args(): - parser = argparse.ArgumentParser(description='Stacki Inventory Module') - group = parser.add_mutually_exclusive_group(required=True) - group.add_argument('--list', action='store_true', - help='List active hosts') - group.add_argument('--host', help='List details about the specific host') - - return parser.parse_args() - - -def main(): - args = parse_args() - - if StrictVersion(requests.__version__) < StrictVersion("2.4.3"): - sys.exit('requests>=2.4.3 is required for this inventory script') - - try: - config_files = CONFIG_FILES - config_files.append(os.path.dirname(os.path.realpath(__file__)) + '/stacki.yml') - config = None - for cfg_file in config_files: - if os.path.isfile(cfg_file): - stream = open(cfg_file, 'r') - config = yaml.safe_load(stream) - break - if not config: - sys.stderr.write("No config file found at {0}\n".format(config_files)) - sys.exit(1) - client, auth_creds = stack_auth(config['stacki']['auth']) - header = stack_build_header(auth_creds) - host_list = stack_host_list(config['stacki']['auth']['stacki_endpoint'], header, client) - intf_list = stack_net_list(config['stacki']['auth']['stacki_endpoint'], header, client) - final_meta = format_meta(host_list, intf_list, config) - print(json.dumps(final_meta, indent=4)) - except Exception as e: - sys.stderr.write('%s\n' % e.message) - sys.exit(1) - sys.exit(0) - - -if __name__ == '__main__': - main() diff --git a/scripts/inventory/stacki.yml b/scripts/inventory/stacki.yml deleted file mode 100644 index 2e31c72cbc..0000000000 --- a/scripts/inventory/stacki.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -stacki: - auth: - stacki_user: admin - stacki_password: GhYgWut1hfGbbnstmbW3m-bJbeME-3EvC20rF1LHrDM - stacki_endpoint: http://192.168.200.50/stack -use_hostnames: false \ No newline at end of file diff --git a/scripts/inventory/vagrant.py b/scripts/inventory/vagrant.py deleted file mode 100755 index 74db0212c5..0000000000 --- a/scripts/inventory/vagrant.py +++ /dev/null @@ -1,123 +0,0 @@ -#!/usr/bin/env python -""" -Vagrant external inventory script. Automatically finds the IP of the booted vagrant vm(s), and -returns it under the host group 'vagrant' - -Example Vagrant configuration using this script: - - config.vm.provision :ansible do |ansible| - ansible.playbook = "./provision/your_playbook.yml" - ansible.inventory_path = "./provision/inventory/vagrant.py" - ansible.verbose = true - end -""" - -# Copyright (C) 2013 Mark Mandel -# 2015 Igor Khomyakov -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -# -# Thanks to the spacewalk.py inventory script for giving me the basic structure -# of this. -# - -import sys -import os.path -import subprocess -import re -from paramiko import SSHConfig -from optparse import OptionParser -from collections import defaultdict -import json - -from ansible.module_utils._text import to_text -from ansible.module_utils.six.moves import StringIO - - -_group = 'vagrant' # a default group -_ssh_to_ansible = [('user', 'ansible_user'), - ('hostname', 'ansible_host'), - ('identityfile', 'ansible_ssh_private_key_file'), - ('port', 'ansible_port')] - -# Options -# ------------------------------ - -parser = OptionParser(usage="%prog [options] --list | --host ") -parser.add_option('--list', default=False, dest="list", action="store_true", - help="Produce a JSON consumable grouping of Vagrant servers for Ansible") -parser.add_option('--host', default=None, dest="host", - help="Generate additional host specific details for given host for Ansible") -(options, args) = parser.parse_args() - -# -# helper functions -# - - -# get all the ssh configs for all boxes in an array of dictionaries. -def get_ssh_config(): - return dict((k, get_a_ssh_config(k)) for k in list_running_boxes()) - - -# list all the running boxes -def list_running_boxes(): - - output = to_text(subprocess.check_output(["vagrant", "status"]), errors='surrogate_or_strict').split('\n') - - boxes = [] - - for line in output: - matcher = re.search(r"([^\s]+)[\s]+running \(.+", line) - if matcher: - boxes.append(matcher.group(1)) - - return boxes - - -# get the ssh config for a single box -def get_a_ssh_config(box_name): - """Gives back a map of all the machine's ssh configurations""" - - output = to_text(subprocess.check_output(["vagrant", "ssh-config", box_name]), errors='surrogate_or_strict') - config = SSHConfig() - config.parse(StringIO(output)) - host_config = config.lookup(box_name) - - # man 5 ssh_config: - # > It is possible to have multiple identity files ... - # > all these identities will be tried in sequence. - for id in host_config['identityfile']: - if os.path.isfile(id): - host_config['identityfile'] = id - - return dict((v, host_config[k]) for k, v in _ssh_to_ansible) - - -# List out servers that vagrant has running -# ------------------------------ -if options.list: - ssh_config = get_ssh_config() - meta = defaultdict(dict) - - for host in ssh_config: - meta['hostvars'][host] = ssh_config[host] - - print(json.dumps({_group: list(ssh_config.keys()), '_meta': meta})) - sys.exit(0) - -# Get out the host details -# ------------------------------ -elif options.host: - print(json.dumps(get_a_ssh_config(options.host))) - sys.exit(0) - -# Print out help -# ------------------------------ -else: - parser.print_help() - sys.exit(0) diff --git a/scripts/inventory/vbox.py b/scripts/inventory/vbox.py deleted file mode 100755 index 110ead1471..0000000000 --- a/scripts/inventory/vbox.py +++ /dev/null @@ -1,107 +0,0 @@ -#!/usr/bin/env python - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import sys -from subprocess import Popen, PIPE - -import json - - -class SetEncoder(json.JSONEncoder): - def default(self, obj): - if isinstance(obj, set): - return list(obj) - return json.JSONEncoder.default(self, obj) - - -VBOX = "VBoxManage" - - -def get_hosts(host=None): - - returned = {} - try: - if host: - p = Popen([VBOX, 'showvminfo', host], stdout=PIPE) - else: - returned = {'all': set(), '_metadata': {}} - p = Popen([VBOX, 'list', '-l', 'vms'], stdout=PIPE) - except Exception: - sys.exit(1) - - hostvars = {} - prevkey = pref_k = '' - - for line in p.stdout.readlines(): - - try: - k, v = line.split(':', 1) - except Exception: - continue - - if k == '': - continue - - v = v.strip() - if k.startswith('Name'): - if v not in hostvars: - curname = v - hostvars[curname] = {} - try: # try to get network info - x = Popen([VBOX, 'guestproperty', 'get', curname, "/VirtualBox/GuestInfo/Net/0/V4/IP"], stdout=PIPE) - ipinfo = x.stdout.read() - if 'Value' in ipinfo: - a, ip = ipinfo.split(':', 1) - hostvars[curname]['ansible_ssh_host'] = ip.strip() - except Exception: - pass - - continue - - if not host: - if k == 'Groups': - for group in v.split('/'): - if group: - if group not in returned: - returned[group] = set() - returned[group].add(curname) - returned['all'].add(curname) - continue - - pref_k = 'vbox_' + k.strip().replace(' ', '_') - if k.startswith(' '): - if prevkey not in hostvars[curname]: - hostvars[curname][prevkey] = {} - hostvars[curname][prevkey][pref_k] = v - else: - if v != '': - hostvars[curname][pref_k] = v - - prevkey = pref_k - - if not host: - returned['_metadata']['hostvars'] = hostvars - else: - returned = hostvars[host] - return returned - - -if __name__ == '__main__': - - inventory = {} - hostname = None - - if len(sys.argv) > 1: - if sys.argv[1] == "--host": - hostname = sys.argv[2] - - if hostname: - inventory = get_hosts(hostname) - else: - inventory = get_hosts() - - sys.stdout.write(json.dumps(inventory, indent=2, cls=SetEncoder)) diff --git a/scripts/inventory/zone.py b/scripts/inventory/zone.py deleted file mode 100755 index 9020f9ea79..0000000000 --- a/scripts/inventory/zone.py +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env python - -# (c) 2015, Dagobert Michelsen -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from subprocess import Popen, PIPE -import sys -import json - -result = {} -result['all'] = {} - -pipe = Popen(['zoneadm', 'list', '-ip'], stdout=PIPE, universal_newlines=True) -result['all']['hosts'] = [] -for l in pipe.stdout.readlines(): - # 1:work:running:/zones/work:3126dc59-9a07-4829-cde9-a816e4c5040e:native:shared - s = l.split(':') - if s[1] != 'global': - result['all']['hosts'].append(s[1]) - -result['all']['vars'] = {} -result['all']['vars']['ansible_connection'] = 'zone' - -if len(sys.argv) == 2 and sys.argv[1] == '--list': - print(json.dumps(result)) -elif len(sys.argv) == 3 and sys.argv[1] == '--host': - print(json.dumps({'ansible_connection': 'zone'})) -else: - sys.stderr.write("Need an argument, either --list or --host \n") diff --git a/scripts/vault/__init__.py b/scripts/vault/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/scripts/vault/azure_vault.ini b/scripts/vault/azure_vault.ini deleted file mode 100644 index d47f976201..0000000000 --- a/scripts/vault/azure_vault.ini +++ /dev/null @@ -1,10 +0,0 @@ -[azure_keyvault] # Used with Azure KeyVault -vault_name=django-keyvault -secret_name=vaultpw -secret_version=9k1e6c7367b33eac8ee241b3698009f3 - -[azure] # Used by Dynamic Inventory -group_by_resource_group=yes -group_by_location=yes -group_by_security_group=yes -group_by_tag=yes \ No newline at end of file diff --git a/scripts/vault/azure_vault.py b/scripts/vault/azure_vault.py deleted file mode 100755 index c27418f34f..0000000000 --- a/scripts/vault/azure_vault.py +++ /dev/null @@ -1,595 +0,0 @@ -#!/usr/bin/env python -# -# This script borrows a great deal of code from the azure_rm.py dynamic inventory script -# that is packaged with Ansible. This can be found in the Ansible GitHub project at: -# https://github.com/ansible/ansible/blob/devel/contrib/inventory/azure_rm.py -# -# The Azure Dynamic Inventory script was written by: -# Copyright (c) 2016 Matt Davis, -# Chris Houseknecht, -# Altered/Added for Vault functionality: -# Austin Hobbs, GitHub: @OxHobbs - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -''' -Ansible Vault Password with Azure Key Vault Secret Script -========================================================= -This script is designed to be used with Ansible Vault. It provides the -capability to provide this script as the password file to the ansible-vault -command. This script uses the Azure Python SDK. For instruction on installing -the Azure Python SDK see http://azure-sdk-for-python.readthedocs.org/ - -Authentication --------------- -The order of precedence is command line arguments, environment variables, -and finally the [default] profile found in ~/.azure/credentials for all -authentication parameters. - -If using a credentials file, it should be an ini formatted file with one or -more sections, which we refer to as profiles. The script looks for a -[default] section, if a profile is not specified either on the command line -or with an environment variable. The keys in a profile will match the -list of command line arguments below. - -For command line arguments and environment variables specify a profile found -in your ~/.azure/credentials file, or a service principal or Active Directory -user. - -Command line arguments: - - profile - - client_id - - secret - - subscription_id - - tenant - - ad_user - - password - - cloud_environment - - adfs_authority_url - - vault-name - - secret-name - - secret-version - -Environment variables: - - AZURE_PROFILE - - AZURE_CLIENT_ID - - AZURE_SECRET - - AZURE_SUBSCRIPTION_ID - - AZURE_TENANT - - AZURE_AD_USER - - AZURE_PASSWORD - - AZURE_CLOUD_ENVIRONMENT - - AZURE_ADFS_AUTHORITY_URL - - AZURE_VAULT_NAME - - AZURE_VAULT_SECRET_NAME - - AZURE_VAULT_SECRET_VERSION - - -Vault ------ - -The order of precedence of Azure Key Vault Secret information is the same. -Command line arguments, environment variables, and finally the azure_vault.ini -file with the [azure_keyvault] section. - -azure_vault.ini (or azure_rm.ini if merged with Azure Dynamic Inventory Script) ------------------------------------------------------------------------------- -As mentioned above, you can control execution using environment variables or a .ini file. A sample -azure_vault.ini is included. The name of the .ini file is the basename of the inventory script (in this case -'azure_vault') with a .ini extension. It also assumes the .ini file is alongside the script. To specify -a different path for the .ini file, define the AZURE_VAULT_INI_PATH environment variable: - - export AZURE_VAULT_INI_PATH=/path/to/custom.ini - or - export AZURE_VAULT_INI_PATH=[same path as azure_rm.ini if merged] - - __NOTE__: If using the azure_rm.py dynamic inventory script, it is possible to use the same .ini - file for both the azure_rm dynamic inventory and the azure_vault password file. Simply add a section - named [azure_keyvault] to the ini file with the following properties: vault_name, secret_name and - secret_version. - -Examples: ---------- - Validate the vault_pw script with Python - $ python azure_vault.py -n mydjangovault -s vaultpw -v 6b6w7f7252b44eac8ee726b3698009f3 - $ python azure_vault.py --vault-name 'mydjangovault' --secret-name 'vaultpw' \ - --secret-version 6b6w7f7252b44eac8ee726b3698009f3 - - Use with a playbook - $ ansible-playbook -i ./azure_rm.py my_playbook.yml --limit galaxy-qa --vault-password-file ./azure_vault.py - - -Insecure Platform Warning -------------------------- -If you receive InsecurePlatformWarning from urllib3, install the -requests security packages: - - pip install requests[security] - - -author: - - Chris Houseknecht (@chouseknecht) - - Matt Davis (@nitzmahone) - - Austin Hobbs (@OxHobbs) - -Company: Ansible by Red Hat, Microsoft - -Version: 0.1.0 -''' - -import argparse -import os -import re -import sys -import inspect -from azure.keyvault import KeyVaultClient - -from ansible.module_utils.six.moves import configparser as cp - -from os.path import expanduser -import ansible.module_utils.six.moves.urllib.parse as urlparse - -HAS_AZURE = True -HAS_AZURE_EXC = None -HAS_AZURE_CLI_CORE = True -CLIError = None - -try: - from msrestazure.azure_active_directory import AADTokenCredentials - from msrestazure.azure_exceptions import CloudError - from msrestazure.azure_active_directory import MSIAuthentication - from msrestazure import azure_cloud - from azure.mgmt.compute import __version__ as azure_compute_version - from azure.common import AzureMissingResourceHttpError, AzureHttpError - from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials - from azure.mgmt.network import NetworkManagementClient - from azure.mgmt.resource.resources import ResourceManagementClient - from azure.mgmt.resource.subscriptions import SubscriptionClient - from azure.mgmt.compute import ComputeManagementClient - from adal.authentication_context import AuthenticationContext -except ImportError as exc: - HAS_AZURE_EXC = exc - HAS_AZURE = False - -try: - from azure.cli.core.util import CLIError - from azure.common.credentials import get_azure_cli_credentials, get_cli_profile - from azure.common.cloud import get_cli_active_cloud -except ImportError: - HAS_AZURE_CLI_CORE = False - CLIError = Exception - -try: - from ansible.release import __version__ as ansible_version -except ImportError: - ansible_version = 'unknown' - - -AZURE_CREDENTIAL_ENV_MAPPING = dict( - profile='AZURE_PROFILE', - subscription_id='AZURE_SUBSCRIPTION_ID', - client_id='AZURE_CLIENT_ID', - secret='AZURE_SECRET', - tenant='AZURE_TENANT', - ad_user='AZURE_AD_USER', - password='AZURE_PASSWORD', - cloud_environment='AZURE_CLOUD_ENVIRONMENT', - adfs_authority_url='AZURE_ADFS_AUTHORITY_URL' -) - -AZURE_VAULT_SETTINGS = dict( - vault_name='AZURE_VAULT_NAME', - secret_name='AZURE_VAULT_SECRET_NAME', - secret_version='AZURE_VAULT_SECRET_VERSION', -) - -AZURE_MIN_VERSION = "2.0.0" -ANSIBLE_USER_AGENT = 'Ansible/{0}'.format(ansible_version) - - -class AzureRM(object): - - def __init__(self, args): - self._args = args - self._cloud_environment = None - self._compute_client = None - self._resource_client = None - self._network_client = None - self._adfs_authority_url = None - self._vault_client = None - self._resource = None - - self.debug = False - if args.debug: - self.debug = True - - self.credentials = self._get_credentials(args) - if not self.credentials: - self.fail("Failed to get credentials. Either pass as parameters, set environment variables, " - "or define a profile in ~/.azure/credentials.") - - # if cloud_environment specified, look up/build Cloud object - raw_cloud_env = self.credentials.get('cloud_environment') - if not raw_cloud_env: - self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD # SDK default - else: - # try to look up "well-known" values via the name attribute on azure_cloud members - all_clouds = [x[1] for x in inspect.getmembers(azure_cloud) if isinstance(x[1], azure_cloud.Cloud)] - matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env] - if len(matched_clouds) == 1: - self._cloud_environment = matched_clouds[0] - elif len(matched_clouds) > 1: - self.fail("Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'".format( - raw_cloud_env)) - else: - if not urlparse.urlparse(raw_cloud_env).scheme: - self.fail("cloud_environment must be an endpoint discovery URL or one of {0}".format( - [x.name for x in all_clouds])) - try: - self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(raw_cloud_env) - except Exception as e: - self.fail("cloud_environment {0} could not be resolved: {1}".format(raw_cloud_env, e.message)) - - if self.credentials.get('subscription_id', None) is None: - self.fail("Credentials did not include a subscription_id value.") - self.log("setting subscription_id") - self.subscription_id = self.credentials['subscription_id'] - - # get authentication authority - # for adfs, user could pass in authority or not. - # for others, use default authority from cloud environment - if self.credentials.get('adfs_authority_url'): - self._adfs_authority_url = self.credentials.get('adfs_authority_url') - else: - self._adfs_authority_url = self._cloud_environment.endpoints.active_directory - - # get resource from cloud environment - self._resource = self._cloud_environment.endpoints.active_directory_resource_id - - if self.credentials.get('credentials'): - self.azure_credentials = self.credentials.get('credentials') - elif self.credentials.get('client_id') and self.credentials.get('secret') and self.credentials.get('tenant'): - self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'], - secret=self.credentials['secret'], - tenant=self.credentials['tenant'], - cloud_environment=self._cloud_environment) - - elif self.credentials.get('ad_user') is not None and \ - self.credentials.get('password') is not None and \ - self.credentials.get('client_id') is not None and \ - self.credentials.get('tenant') is not None: - - self.azure_credentials = self.acquire_token_with_username_password( - self._adfs_authority_url, - self._resource, - self.credentials['ad_user'], - self.credentials['password'], - self.credentials['client_id'], - self.credentials['tenant']) - - elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None: - tenant = self.credentials.get('tenant') - if not tenant: - tenant = 'common' - self.azure_credentials = UserPassCredentials(self.credentials['ad_user'], - self.credentials['password'], - tenant=tenant, - cloud_environment=self._cloud_environment) - - else: - self.fail("Failed to authenticate with provided credentials. Some attributes were missing. " - "Credentials must include client_id, secret and tenant or ad_user and password, or " - "ad_user, password, client_id, tenant and adfs_authority_url(optional) for ADFS authentication, " - "or be logged in using AzureCLI.") - - def log(self, msg): - if self.debug: - print(msg + u'\n') - - def fail(self, msg): - raise Exception(msg) - - def _get_profile(self, profile="default"): - path = expanduser("~") - path += "/.azure/credentials" - try: - config = cp.ConfigParser() - config.read(path) - except Exception as exc: - self.fail("Failed to access {0}. Check that the file exists and you have read " - "access. {1}".format(path, str(exc))) - credentials = dict() - for key in AZURE_CREDENTIAL_ENV_MAPPING: - try: - credentials[key] = config.get(profile, key, raw=True) - except Exception: - pass - - if credentials.get('client_id') is not None or credentials.get('ad_user') is not None: - return credentials - - return None - - def _get_env_credentials(self): - env_credentials = dict() - for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items(): - env_credentials[attribute] = os.environ.get(env_variable, None) - - if env_credentials['profile'] is not None: - credentials = self._get_profile(env_credentials['profile']) - return credentials - - if env_credentials['client_id'] is not None or env_credentials['ad_user'] is not None: - return env_credentials - - return None - - def _get_azure_cli_credentials(self): - credentials, subscription_id = get_azure_cli_credentials() - cloud_environment = get_cli_active_cloud() - - cli_credentials = { - 'credentials': credentials, - 'subscription_id': subscription_id, - 'cloud_environment': cloud_environment - } - return cli_credentials - - def _get_msi_credentials(self, subscription_id_param=None): - credentials = MSIAuthentication() - try: - # try to get the subscription in MSI to test whether MSI is enabled - subscription_client = SubscriptionClient(credentials) - subscription = next(subscription_client.subscriptions.list()) - subscription_id = str(subscription.subscription_id) - return { - 'credentials': credentials, - 'subscription_id': subscription_id_param or subscription_id - } - except Exception as exc: - return None - - def _get_credentials(self, params): - # Get authentication credentials. - # Precedence: cmd line parameters-> environment variables-> default profile in ~/.azure/credentials. - - self.log('Getting credentials') - - arg_credentials = dict() - for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items(): - arg_credentials[attribute] = getattr(params, attribute) - - # try module params - if arg_credentials['profile'] is not None: - self.log('Retrieving credentials with profile parameter.') - credentials = self._get_profile(arg_credentials['profile']) - return credentials - - if arg_credentials['client_id'] is not None: - self.log('Received credentials from parameters.') - return arg_credentials - - if arg_credentials['ad_user'] is not None: - self.log('Received credentials from parameters.') - return arg_credentials - - # try environment - env_credentials = self._get_env_credentials() - if env_credentials: - self.log('Received credentials from env.') - return env_credentials - - # try default profile from ~./azure/credentials - default_credentials = self._get_profile() - if default_credentials: - self.log('Retrieved default profile credentials from ~/.azure/credentials.') - return default_credentials - - msi_credentials = self._get_msi_credentials(arg_credentials.get('subscription_id')) - if msi_credentials: - self.log('Retrieved credentials from MSI.') - return msi_credentials - - try: - if HAS_AZURE_CLI_CORE: - self.log('Retrieving credentials from AzureCLI profile') - cli_credentials = self._get_azure_cli_credentials() - return cli_credentials - except CLIError as ce: - self.log('Error getting AzureCLI profile credentials - {0}'.format(ce)) - - return None - - def acquire_token_with_username_password(self, authority, resource, username, password, client_id, tenant): - authority_uri = authority - - if tenant is not None: - authority_uri = authority + '/' + tenant - - context = AuthenticationContext(authority_uri) - token_response = context.acquire_token_with_username_password(resource, username, password, client_id) - return AADTokenCredentials(token_response) - - def _register(self, key): - try: - # We have to perform the one-time registration here. Otherwise, we receive an error the first - # time we attempt to use the requested client. - resource_client = self.rm_client - resource_client.providers.register(key) - except Exception as exc: - self.log("One-time registration of {0} failed - {1}".format(key, str(exc))) - self.log("You might need to register {0} using an admin account".format(key)) - self.log(("To register a provider using the Python CLI: " - "https://docs.microsoft.com/azure/azure-resource-manager/" - "resource-manager-common-deployment-errors#noregisteredproviderfound")) - - def get_mgmt_svc_client(self, client_type, base_url, api_version): - client = client_type(self.azure_credentials, - self.subscription_id, - base_url=base_url, - api_version=api_version) - client.config.add_user_agent(ANSIBLE_USER_AGENT) - return client - - def get_vault_client(self): - return KeyVaultClient(self.azure_credentials) - - def get_vault_suffix(self): - return self._cloud_environment.suffixes.keyvault_dns - - @property - def network_client(self): - self.log('Getting network client') - if not self._network_client: - self._network_client = self.get_mgmt_svc_client(NetworkManagementClient, - self._cloud_environment.endpoints.resource_manager, - '2017-06-01') - self._register('Microsoft.Network') - return self._network_client - - @property - def rm_client(self): - self.log('Getting resource manager client') - if not self._resource_client: - self._resource_client = self.get_mgmt_svc_client(ResourceManagementClient, - self._cloud_environment.endpoints.resource_manager, - '2017-05-10') - return self._resource_client - - @property - def compute_client(self): - self.log('Getting compute client') - if not self._compute_client: - self._compute_client = self.get_mgmt_svc_client(ComputeManagementClient, - self._cloud_environment.endpoints.resource_manager, - '2017-03-30') - self._register('Microsoft.Compute') - return self._compute_client - - @property - def vault_client(self): - self.log('Getting the Key Vault client') - if not self._vault_client: - self._vault_client = self.get_vault_client() - - return self._vault_client - - -class AzureKeyVaultSecret: - - def __init__(self): - - self._args = self._parse_cli_args() - - try: - rm = AzureRM(self._args) - except Exception as e: - sys.exit("{0}".format(str(e))) - - self._get_vault_settings() - - if self._args.vault_name: - self.vault_name = self._args.vault_name - - if self._args.secret_name: - self.secret_name = self._args.secret_name - - if self._args.secret_version: - self.secret_version = self._args.secret_version - - self._vault_suffix = rm.get_vault_suffix() - self._vault_client = rm.vault_client - - print(self.get_password_from_vault()) - - def _parse_cli_args(self): - parser = argparse.ArgumentParser( - description='Obtain the vault password used to secure your Ansilbe secrets' - ) - parser.add_argument('-n', '--vault-name', action='store', help='Name of Azure Key Vault') - parser.add_argument('-s', '--secret-name', action='store', - help='Name of the secret stored in Azure Key Vault') - parser.add_argument('-v', '--secret-version', action='store', - help='Version of the secret to be retrieved') - parser.add_argument('--debug', action='store_true', default=False, - help='Send the debug messages to STDOUT') - parser.add_argument('--profile', action='store', - help='Azure profile contained in ~/.azure/credentials') - parser.add_argument('--subscription_id', action='store', - help='Azure Subscription Id') - parser.add_argument('--client_id', action='store', - help='Azure Client Id ') - parser.add_argument('--secret', action='store', - help='Azure Client Secret') - parser.add_argument('--tenant', action='store', - help='Azure Tenant Id') - parser.add_argument('--ad_user', action='store', - help='Active Directory User') - parser.add_argument('--password', action='store', - help='password') - parser.add_argument('--adfs_authority_url', action='store', - help='Azure ADFS authority url') - parser.add_argument('--cloud_environment', action='store', - help='Azure Cloud Environment name or metadata discovery URL') - - return parser.parse_args() - - def get_password_from_vault(self): - vault_url = 'https://{0}{1}'.format(self.vault_name, self._vault_suffix) - secret = self._vault_client.get_secret(vault_url, self.secret_name, self.secret_version) - return secret.value - - def _get_vault_settings(self): - env_settings = self._get_vault_env_settings() - if None not in set(env_settings.values()): - for key in AZURE_VAULT_SETTINGS: - setattr(self, key, env_settings.get(key, None)) - else: - file_settings = self._load_vault_settings() - if not file_settings: - return - - for key in AZURE_VAULT_SETTINGS: - if file_settings.get(key): - setattr(self, key, file_settings.get(key)) - - def _get_vault_env_settings(self): - env_settings = dict() - for attribute, env_variable in AZURE_VAULT_SETTINGS.items(): - env_settings[attribute] = os.environ.get(env_variable, None) - return env_settings - - def _load_vault_settings(self): - basename = os.path.splitext(os.path.basename(__file__))[0] - default_path = os.path.join(os.path.dirname(__file__), (basename + '.ini')) - path = os.path.expanduser(os.path.expandvars(os.environ.get('AZURE_VAULT_INI_PATH', default_path))) - config = None - settings = None - try: - config = cp.ConfigParser() - config.read(path) - except Exception: - pass - - if config is not None: - settings = dict() - for key in AZURE_VAULT_SETTINGS: - try: - settings[key] = config.get('azure_keyvault', key, raw=True) - except Exception: - pass - - return settings - - -def main(): - if not HAS_AZURE: - sys.exit("The Azure python sdk is not installed (try `pip install 'azure>={0}' --upgrade`) - {1}".format( - AZURE_MIN_VERSION, HAS_AZURE_EXC)) - - AzureKeyVaultSecret() - - -if __name__ == '__main__': - main() diff --git a/scripts/vault/vault-keyring-client.py b/scripts/vault/vault-keyring-client.py deleted file mode 100755 index 8332b228f9..0000000000 --- a/scripts/vault/vault-keyring-client.py +++ /dev/null @@ -1,134 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# (c) 2014, Matt Martz -# (c) 2016, Justin Mayer -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# -# ============================================================================= -# -# This script is to be used with ansible-vault's --vault-id arg -# to retrieve the vault password via your OS's native keyring application. -# -# This file *MUST* be saved with executable permissions. Otherwise, Ansible -# will try to parse as a password file and display: "ERROR! Decryption failed" -# -# The `keyring` Python module is required: https://pypi.org/project/keyring/ -# -# By default, this script will store the specified password in the keyring of -# the user that invokes the script. To specify a user keyring, add a [vault] -# section to your ansible.cfg file with a 'username' option. Example: -# -# [vault] -# username = 'ansible-vault' -# -# In useage like: -# -# ansible-vault --vault-id keyring_id@contrib/vault/vault-keyring-client.py view some_encrypted_file -# -# --vault-id will call this script like: -# -# contrib/vault/vault-keyring-client.py --vault-id keyring_id -# -# That will retrieve the password from users keyring for the -# keyring service 'keyring_id'. The equivalent of: -# -# keyring get keyring_id $USER -# -# If no vault-id name is specified to ansible command line, the vault-keyring-client.py -# script will be called without a '--vault-id' and will default to the keyring service 'ansible' -# This is equivalent to: -# -# keyring get ansible $USER -# -# You can configure the `vault_password_file` option in ansible.cfg: -# -# [defaults] -# ... -# vault_password_file = /path/to/vault-keyring-client.py -# ... -# -# To set your password, `cd` to your project directory and run: -# -# # will use default keyring service / vault-id of 'ansible' -# /path/to/vault-keyring-client.py --set -# -# or to specify the keyring service / vault-id of 'my_ansible_secret': -# -# /path/to/vault-keyring-client.py --vault-id my_ansible_secret --set -# -# If you choose not to configure the path to `vault_password_file` in -# ansible.cfg, your `ansible-playbook` command might look like: -# -# ansible-playbook --vault-id=keyring_id@/path/to/vault-keyring-client.py site.yml - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import argparse -import sys -import getpass -import keyring - -from ansible.config.manager import ConfigManager - -KEYNAME_UNKNOWN_RC = 2 - - -def build_arg_parser(): - parser = argparse.ArgumentParser(description='Get a vault password from user keyring') - - parser.add_argument('--vault-id', action='store', default=None, - dest='vault_id', - help='name of the vault secret to get from keyring') - parser.add_argument('--username', action='store', default=None, - help='the username whose keyring is queried') - parser.add_argument('--set', action='store_true', default=False, - dest='set_password', - help='set the password instead of getting it') - return parser - - -def main(): - config_manager = ConfigManager() - username = config_manager.data.get_setting('vault.username') - if not username: - username = getpass.getuser() - - keyname = config_manager.data.get_setting('vault.keyname') - if not keyname: - keyname = 'ansible' - - arg_parser = build_arg_parser() - args = arg_parser.parse_args() - - username = args.username or username - keyname = args.vault_id or keyname - - # print('username: %s keyname: %s' % (username, keyname)) - - if args.set_password: - intro = 'Storing password in "{}" user keyring using key name: {}\n' - sys.stdout.write(intro.format(username, keyname)) - password = getpass.getpass() - confirm = getpass.getpass('Confirm password: ') - if password == confirm: - keyring.set_password(keyname, username, password) - else: - sys.stderr.write('Passwords do not match\n') - sys.exit(1) - else: - secret = keyring.get_password(keyname, username) - if secret is None: - sys.stderr.write('vault-keyring-client could not find key="%s" for user="%s" via backend="%s"\n' % - (keyname, username, keyring.get_keyring().name)) - sys.exit(KEYNAME_UNKNOWN_RC) - - # print('secret: %s' % secret) - sys.stdout.write('%s\n' % secret) - - sys.exit(0) - - -if __name__ == '__main__': - main() diff --git a/scripts/vault/vault-keyring.py b/scripts/vault/vault-keyring.py deleted file mode 100755 index 45188b122d..0000000000 --- a/scripts/vault/vault-keyring.py +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# (c) 2014, Matt Martz -# (c) 2016, Justin Mayer -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# -# ============================================================================= -# -# This script is to be used with vault_password_file or --vault-password-file -# to retrieve the vault password via your OS's native keyring application. -# -# This file *MUST* be saved with executable permissions. Otherwise, Ansible -# will try to parse as a password file and display: "ERROR! Decryption failed" -# -# The `keyring` Python module is required: https://pypi.org/project/keyring/ -# -# By default, this script will store the specified password in the keyring of -# the user that invokes the script. To specify a user keyring, add a [vault] -# section to your ansible.cfg file with a 'username' option. Example: -# -# [vault] -# username = 'ansible-vault' -# -# Another optional setting is for the key name, which allows you to use this -# script to handle multiple project vaults with different passwords: -# -# [vault] -# keyname = 'ansible-vault-yourproject' -# -# You can configure the `vault_password_file` option in ansible.cfg: -# -# [defaults] -# ... -# vault_password_file = /path/to/vault-keyring.py -# ... -# -# To set your password, `cd` to your project directory and run: -# -# python /path/to/vault-keyring.py set -# -# If you choose not to configure the path to `vault_password_file` in -# ansible.cfg, your `ansible-playbook` command might look like: -# -# ansible-playbook --vault-password-file=/path/to/vault-keyring.py site.yml - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import sys -import getpass -import keyring - -from ansible.config.manager import ConfigManager, get_ini_config_value - - -def main(): - config = ConfigManager() - username = get_ini_config_value( - config._parsers[config._config_file], - dict(section='vault', key='username') - ) or getpass.getuser() - - keyname = get_ini_config_value( - config._parsers[config._config_file], - dict(section='vault', key='keyname') - ) or 'ansible' - - if len(sys.argv) == 2 and sys.argv[1] == 'set': - intro = 'Storing password in "{}" user keyring using key name: {}\n' - sys.stdout.write(intro.format(username, keyname)) - password = getpass.getpass() - confirm = getpass.getpass('Confirm password: ') - if password == confirm: - keyring.set_password(keyname, username, password) - else: - sys.stderr.write('Passwords do not match\n') - sys.exit(1) - else: - sys.stdout.write('{0}\n'.format(keyring.get_password(keyname, - username))) - - sys.exit(0) - - -if __name__ == '__main__': - main() diff --git a/tests/integration/targets/script_inventory_foreman/aliases b/tests/integration/targets/script_inventory_foreman/aliases deleted file mode 100644 index a965d6e836..0000000000 --- a/tests/integration/targets/script_inventory_foreman/aliases +++ /dev/null @@ -1,3 +0,0 @@ -shippable/cloud/group1 -cloud/foreman -needs/file/scripts/inventory/foreman.py diff --git a/tests/integration/targets/script_inventory_foreman/foreman.sh b/tests/integration/targets/script_inventory_foreman/foreman.sh deleted file mode 100755 index 1b3e70fb1a..0000000000 --- a/tests/integration/targets/script_inventory_foreman/foreman.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env bash -# Wrapper to use the correct Python interpreter and support code coverage. - -REL_SCRIPT="../../../../scripts/inventory/foreman.py" -ABS_SCRIPT="$("${ANSIBLE_TEST_PYTHON_INTERPRETER}" -c "import os; print(os.path.abspath('${REL_SCRIPT}'))")" - -# Make sure output written to current directory ends up in the temp dir. -cd "${OUTPUT_DIR}" - -python.py "${ABS_SCRIPT}" "$@" diff --git a/tests/integration/targets/script_inventory_foreman/runme.sh b/tests/integration/targets/script_inventory_foreman/runme.sh deleted file mode 100755 index a9c94fbe7d..0000000000 --- a/tests/integration/targets/script_inventory_foreman/runme.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -export FOREMAN_HOST="${FOREMAN_HOST:-localhost}" -export FOREMAN_PORT="${FOREMAN_PORT:-8080}" -export FOREMAN_INI_PATH="${OUTPUT_DIR}/foreman.ini" - - -############################################ -# SMOKETEST WITH SIMPLE INI -############################################ - -cat > "$FOREMAN_INI_PATH" < "$FOREMAN_INI_PATH" < 900 }}" From db713bd0f5c88b2ffbc27d5c4eb123b35972af8f Mon Sep 17 00:00:00 2001 From: Anup Chenthamarakshan Date: Sun, 20 Jun 2021 03:42:19 -0700 Subject: [PATCH 0149/2828] proxmox_kvm: Fix ZFS device string parsing (#2841) ZFS-backed block devices may contain just the bare device name and not have extra options like `,size=foo`, `,format=qcow2` etc. This breaks an assumption in existing regex (which expects a comma). Support such device strings and add a couple of testcases to validate. --- .../fragments/2841-proxmox_kvm_zfs_devstr.yml | 4 ++++ plugins/modules/cloud/misc/proxmox_kvm.py | 20 ++++++++++--------- .../modules/cloud/misc/test_proxmox_kvm.py | 17 ++++++++++++++++ 3 files changed, 32 insertions(+), 9 deletions(-) create mode 100644 changelogs/fragments/2841-proxmox_kvm_zfs_devstr.yml create mode 100644 tests/unit/plugins/modules/cloud/misc/test_proxmox_kvm.py diff --git a/changelogs/fragments/2841-proxmox_kvm_zfs_devstr.yml b/changelogs/fragments/2841-proxmox_kvm_zfs_devstr.yml new file mode 100644 index 0000000000..7b61f175c6 --- /dev/null +++ b/changelogs/fragments/2841-proxmox_kvm_zfs_devstr.yml @@ -0,0 +1,4 @@ +bugfixes: + - "proxmox_kvm - fix parsing of Proxmox VM information with device info not containing + a comma, like disks backed by ZFS zvols + (https://github.com/ansible-collections/community.general/issues/2840)." diff --git a/plugins/modules/cloud/misc/proxmox_kvm.py b/plugins/modules/cloud/misc/proxmox_kvm.py index a664279e57..0fb486600c 100644 --- a/plugins/modules/cloud/misc/proxmox_kvm.py +++ b/plugins/modules/cloud/misc/proxmox_kvm.py @@ -818,23 +818,25 @@ def get_vminfo(module, proxmox, node, vmid, **kwargs): # Split information by type re_net = re.compile(r'net[0-9]') re_dev = re.compile(r'(virtio|ide|scsi|sata)[0-9]') - for k, v in kwargs.items(): + for k in kwargs.keys(): if re_net.match(k): - interface = k - k = vm[k] - k = re.search('=(.*?),', k).group(1) - mac[interface] = k + mac[k] = parse_mac(vm[k]) elif re_dev.match(k): - device = k - k = vm[k] - k = re.search('(.*?),', k).group(1) - devices[device] = k + devices[k] = parse_dev(vm[k]) results['mac'] = mac results['devices'] = devices results['vmid'] = int(vmid) +def parse_mac(netstr): + return re.search('=(.*?),', netstr).group(1) + + +def parse_dev(devstr): + return re.search('(.*?)(,|$)', devstr).group(1) + + def settings(proxmox, vmid, node, **kwargs): proxmox_node = proxmox.nodes(node) diff --git a/tests/unit/plugins/modules/cloud/misc/test_proxmox_kvm.py b/tests/unit/plugins/modules/cloud/misc/test_proxmox_kvm.py new file mode 100644 index 0000000000..d486000ed1 --- /dev/null +++ b/tests/unit/plugins/modules/cloud/misc/test_proxmox_kvm.py @@ -0,0 +1,17 @@ +# Copyright: (c) 2021, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible_collections.community.general.plugins.modules.cloud.misc.proxmox_kvm import parse_dev, parse_mac + + +def test_parse_mac(): + assert parse_mac('virtio=00:11:22:AA:BB:CC,bridge=vmbr0,firewall=1') == '00:11:22:AA:BB:CC' + + +def test_parse_dev(): + assert parse_dev('local-lvm:vm-1000-disk-0,format=qcow2') == 'local-lvm:vm-1000-disk-0' + assert parse_dev('local-lvm:vm-101-disk-1,size=8G') == 'local-lvm:vm-101-disk-1' + assert parse_dev('local-zfs:vm-1001-disk-0') == 'local-zfs:vm-1001-disk-0' From 2768eda89573e8ec82ddc046f9d754bc131dfa3e Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 20 Jun 2021 23:07:45 +1200 Subject: [PATCH 0150/2828] serverless - deprecating unused param (#2845) * deprecating unused param * added changelog fragment * deprecate param in arg_spec * Update plugins/modules/cloud/misc/serverless.py Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../2845-serverless-deprecate-functions-param.yml | 2 ++ plugins/modules/cloud/misc/serverless.py | 11 +++-------- 2 files changed, 5 insertions(+), 8 deletions(-) create mode 100644 changelogs/fragments/2845-serverless-deprecate-functions-param.yml diff --git a/changelogs/fragments/2845-serverless-deprecate-functions-param.yml b/changelogs/fragments/2845-serverless-deprecate-functions-param.yml new file mode 100644 index 0000000000..6565b18974 --- /dev/null +++ b/changelogs/fragments/2845-serverless-deprecate-functions-param.yml @@ -0,0 +1,2 @@ +deprecated_features: + - serverless - deprecating parameter ``functions`` because it was not used in the code (https://github.com/ansible-collections/community.general/pull/2845). diff --git a/plugins/modules/cloud/misc/serverless.py b/plugins/modules/cloud/misc/serverless.py index 1b2f8b62a6..878621c38c 100644 --- a/plugins/modules/cloud/misc/serverless.py +++ b/plugins/modules/cloud/misc/serverless.py @@ -38,6 +38,7 @@ options: description: - A list of specific functions to deploy. - If this is not provided, all functions in the service will be deployed. + - Deprecated parameter, it will be removed in community.general 5.0.0. type: list elements: str default: [] @@ -79,13 +80,6 @@ EXAMPLES = r''' service_path: '{{ project_dir }}' state: present -- name: Deploy specific functions - community.general.serverless: - service_path: '{{ project_dir }}' - functions: - - my_func_one - - my_func_two - - name: Deploy a project, then pull its resource list back into Ansible community.general.serverless: stage: dev @@ -165,7 +159,8 @@ def main(): argument_spec=dict( service_path=dict(type='path', required=True), state=dict(type='str', default='present', choices=['absent', 'present']), - functions=dict(type='list', elements='str'), + functions=dict(type='list', elements='str', + removed_in_version="5.0.0", removed_from_collection="community.general"), region=dict(type='str', default=''), stage=dict(type='str', default=''), deploy=dict(type='bool', default=True), From 519411e026760477113de5332ba6f514e9599108 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 20 Jun 2021 23:17:58 +1200 Subject: [PATCH 0151/2828] ali_instance_info - marked parameters for deprecation in c.g. 5.0.0 (#2844) * marked parameters for deprecation in c.g. 5.0.0 * added changelog fragment * deprecate params in arg_spec * doc adjusment per PR --- .../2844-ali_instance_info-deprecate-params.yml | 2 ++ plugins/modules/cloud/alicloud/ali_instance_info.py | 12 ++++++++---- 2 files changed, 10 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/2844-ali_instance_info-deprecate-params.yml diff --git a/changelogs/fragments/2844-ali_instance_info-deprecate-params.yml b/changelogs/fragments/2844-ali_instance_info-deprecate-params.yml new file mode 100644 index 0000000000..a37555edcd --- /dev/null +++ b/changelogs/fragments/2844-ali_instance_info-deprecate-params.yml @@ -0,0 +1,2 @@ +deprecated_features: + - ali_instance_info - marked removal version of deprecated parameters ``availability_zone`` and ``instance_names`` (https://github.com/ansible-collections/community.general/issues/2429). diff --git a/plugins/modules/cloud/alicloud/ali_instance_info.py b/plugins/modules/cloud/alicloud/ali_instance_info.py index 8a3b8aeed0..23665bbcad 100644 --- a/plugins/modules/cloud/alicloud/ali_instance_info.py +++ b/plugins/modules/cloud/alicloud/ali_instance_info.py @@ -35,12 +35,14 @@ description: options: availability_zone: description: - - (Deprecated) Aliyun availability zone ID in which to launch the instance. Please use filter item 'zone_id' instead. + - Aliyun availability zone ID in which to launch the instance. + - Deprecated parameter, it will be removed in community.general 5.0.0. Please use filter item I(zone_id) instead. aliases: ['alicloud_zone'] type: str instance_names: description: - - (Deprecated) A list of ECS instance names. Please use filter item 'instance_name' instead. + - A list of ECS instance names. + - Deprecated parameter, it will be removed in community.general 5.0.0. Please use filter item I(instance_name) instead. aliases: ["names"] type: list elements: str @@ -374,8 +376,10 @@ except ImportError: def main(): argument_spec = ecs_argument_spec() argument_spec.update(dict( - availability_zone=dict(aliases=['alicloud_zone']), - instance_ids=dict(type='list', elements='str', aliases=['ids']), + availability_zone=dict(aliases=['alicloud_zone'], + removed_in_version="5.0.0", removed_from_collection="community.general"), + instance_ids=dict(type='list', elements='str', aliases=['ids'], + removed_in_version="5.0.0", removed_from_collection="community.general"), instance_names=dict(type='list', elements='str', aliases=['names']), name_prefix=dict(type='str'), tags=dict(type='dict', aliases=['instance_tags']), From ce35d8809474d2a1dee1f2cefd5416c491b97594 Mon Sep 17 00:00:00 2001 From: Stanislav German-Evtushenko Date: Mon, 21 Jun 2021 16:53:03 +0900 Subject: [PATCH 0152/2828] gem_module: Add bindir option (#2837) * gem_module: Add bindir option This option allows to specify directory to install executables, e.g. `/home/user/bin` or `/home/user/.local/bin`. This comes especially handy when used with user_install option as the default path of executables is not in PATH. * Update changelogs/fragments/gem_module_add_bindir_option.yml Co-authored-by: Ajpantuso * gem_module: Integration tests for bindir option * gem_module: Update Integration tests for bindir option * gem_module: Update Integration tests for bindir option Make sure gist is not installed system-wide prior the tests * Revert "gem_module: Update Integration tests for bindir option" This reverts commit 04eec6db27aa90d2b23ead7941aeb5889a7c6437. * Do not check "install_gem_result is changed" for ansible develop on openSUSE * Revert "Do not check "install_gem_result is changed" for ansible develop on openSUSE" This reverts commit 48ecb27889a6d86b91eb70a5b1432a5649846b99. * gem_module: Use --norc to avoid surprises Run install and uninstall actions with `--norc`. This way ansible has more control over the way gems are installed. * Revert "gem_module: Use --norc to avoid surprises" This reverts commit 66f40bcfe684ba306759a0fdc028a21ba73ba1dd. * gem_module: bindir - Ignore openSUSE Leap * Update plugins/modules/packaging/language/gem.py Co-authored-by: Felix Fontein * gem_module: Use --norc to avoid surprises Run install and uninstall actions with `--norc` when supported (rubygems >= 2.5.2). This way ansible has more control over the way gems are installed. * Try distutils.version instead of packaging * ver is an list, not string * ver is not list either but tuple * Update changelogs/fragments/gem_module_add_bindir_option.yml Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> * ver can be None (when can this happen?) * gem: Add norc option * Apply suggestions from code review Co-authored-by: Felix Fontein * Update plugins/modules/packaging/language/gem.py Co-authored-by: Felix Fontein * Use tuples to compare versions * Apply suggestions from code review Co-authored-by: Amin Vakil * Update plugins/modules/packaging/language/gem.py Co-authored-by: Amin Vakil * lost norc option check is back * Move handling norc option to separate function * cosmetic * fix for the previos commit * Apply suggestions from code review Co-authored-by: Felix Fontein * Cache result of get_rubygems_version Co-authored-by: Ajpantuso Co-authored-by: Felix Fontein Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> Co-authored-by: Amin Vakil --- .../gem_module_add_bindir_option.yml | 3 ++ plugins/modules/packaging/language/gem.py | 47 ++++++++++++++++--- tests/integration/targets/gem/tasks/main.yml | 41 ++++++++++++++++ 3 files changed, 84 insertions(+), 7 deletions(-) create mode 100644 changelogs/fragments/gem_module_add_bindir_option.yml diff --git a/changelogs/fragments/gem_module_add_bindir_option.yml b/changelogs/fragments/gem_module_add_bindir_option.yml new file mode 100644 index 0000000000..f47b6deb27 --- /dev/null +++ b/changelogs/fragments/gem_module_add_bindir_option.yml @@ -0,0 +1,3 @@ +minor_changes: + - gem - add ``bindir`` option to specify an installation path for executables such as ``/home/user/bin`` or ``/home/user/.local/bin`` (https://github.com/ansible-collections/community.general/pull/2837). + - gem - add ``norc`` option to avoid loading any ``.gemrc`` file (https://github.com/ansible-collections/community.general/pull/2837). diff --git a/plugins/modules/packaging/language/gem.py b/plugins/modules/packaging/language/gem.py index 516c9b0a41..c7ccdec498 100644 --- a/plugins/modules/packaging/language/gem.py +++ b/plugins/modules/packaging/language/gem.py @@ -62,6 +62,19 @@ options: These gems will be independent from the global installed ones. Specifying this requires user_install to be false. required: false + bindir: + type: path + description: + - Install executables into a specific directory. + version_added: 3.3.0 + norc: + type: bool + default: false + description: + - Avoid loading any C(.gemrc) file. Ignored for RubyGems prior to 2.5.2. + - "The current default value will be deprecated in community.general 4.0.0: if the value is not explicitly specified, a deprecation message will be shown." + - From community.general 5.0.0 on, the default will be changed to C(true). + version_added: 3.3.0 env_shebang: description: - Rewrite the shebang line on installed scripts to use /usr/bin/env. @@ -134,6 +147,9 @@ def get_rubygems_path(module): def get_rubygems_version(module): + if hasattr(get_rubygems_version, "ver"): + return get_rubygems_version.ver + cmd = get_rubygems_path(module) + ['--version'] (rc, out, err) = module.run_command(cmd, check_rc=True) @@ -141,7 +157,10 @@ def get_rubygems_version(module): if not match: return None - return tuple(int(x) for x in match.groups()) + ver = tuple(int(x) for x in match.groups()) + get_rubygems_version.ver = ver + + return ver def get_rubygems_environ(module): @@ -154,6 +173,7 @@ def get_installed_versions(module, remote=False): cmd = get_rubygems_path(module) cmd.append('query') + cmd.extend(common_opts(module)) if remote: cmd.append('--remote') if module.params['repository']: @@ -188,6 +208,14 @@ def exists(module): return False +def common_opts(module): + opts = [] + ver = get_rubygems_version(module) + if module.params['norc'] and ver and ver >= (2, 5, 2): + opts.append('--norc') + return opts + + def uninstall(module): if module.check_mode: @@ -195,9 +223,13 @@ def uninstall(module): cmd = get_rubygems_path(module) environ = get_rubygems_environ(module) cmd.append('uninstall') + cmd.extend(common_opts(module)) if module.params['install_dir']: cmd.extend(['--install-dir', module.params['install_dir']]) + if module.params['bindir']: + cmd.extend(['--bindir', module.params['bindir']]) + if module.params['version']: cmd.extend(['--version', module.params['version']]) else: @@ -213,13 +245,10 @@ def install(module): return ver = get_rubygems_version(module) - if ver: - major = ver[0] - else: - major = None cmd = get_rubygems_path(module) cmd.append('install') + cmd.extend(common_opts(module)) if module.params['version']: cmd.extend(['--version', module.params['version']]) if module.params['repository']: @@ -227,7 +256,7 @@ def install(module): if not module.params['include_dependencies']: cmd.append('--ignore-dependencies') else: - if major and major < 2: + if ver and ver < (2, 0, 0): cmd.append('--include-dependencies') if module.params['user_install']: cmd.append('--user-install') @@ -235,10 +264,12 @@ def install(module): cmd.append('--no-user-install') if module.params['install_dir']: cmd.extend(['--install-dir', module.params['install_dir']]) + if module.params['bindir']: + cmd.extend(['--bindir', module.params['bindir']]) if module.params['pre_release']: cmd.append('--pre') if not module.params['include_doc']: - if major and major < 2: + if ver and ver < (2, 0, 0): cmd.append('--no-rdoc') cmd.append('--no-ri') else: @@ -265,6 +296,8 @@ def main(): state=dict(required=False, default='present', choices=['present', 'absent', 'latest'], type='str'), user_install=dict(required=False, default=True, type='bool'), install_dir=dict(required=False, type='path'), + bindir=dict(type='path'), + norc=dict(default=False, type='bool'), pre_release=dict(required=False, default=False, type='bool'), include_doc=dict(required=False, default=False, type='bool'), env_shebang=dict(required=False, default=False, type='bool'), diff --git a/tests/integration/targets/gem/tasks/main.yml b/tests/integration/targets/gem/tasks/main.yml index ce64364d78..499057775c 100644 --- a/tests/integration/targets/gem/tasks/main.yml +++ b/tests/integration/targets/gem/tasks/main.yml @@ -178,3 +178,44 @@ that: - install_gem_result is changed - gem_search.files | length == 0 + +# Custom directory for executables (--bindir) +- name: Install gem with custom bindir + gem: + name: gist + state: present + bindir: "{{ output_dir }}/custom_bindir" + norc: yes + user_install: no # Avoid conflicts between --install-dir and --user-install when running as root on CentOS / Fedora / RHEL + register: install_gem_result + +- name: Get stats of gem executable + stat: + path: "{{ output_dir }}/custom_bindir/gist" + register: gem_bindir_stat + +- name: Ensure gem executable was installed in custom directory + assert: + that: + - install_gem_result is changed + - gem_bindir_stat.stat.exists and gem_bindir_stat.stat.isreg + +- name: Remove gem with custom bindir + gem: + name: gist + state: absent + bindir: "{{ output_dir }}/custom_bindir" + norc: yes + user_install: no # Avoid conflicts between --install-dir and --user-install when running as root on CentOS / Fedora / RHEL + register: install_gem_result + +- name: Get stats of gem executable + stat: + path: "{{ output_dir }}/custom_bindir/gist" + register: gem_bindir_stat + +- name: Ensure gem executable was removed from custom directory + assert: + that: + - install_gem_result is changed + - not gem_bindir_stat.stat.exists From d6d0b6f0c1e760bfe4343457d64d0f40ab8a0b15 Mon Sep 17 00:00:00 2001 From: Lennert Mertens Date: Mon, 21 Jun 2021 21:32:07 +0200 Subject: [PATCH 0153/2828] gitlab_user: add support for identity provider (#2691) * Add identity functionality * Add functionality for user without provider or extern_uid * Fix missing key error and documentation * Fix failing tests * Update docs * Add changelog fragment * Update plugins/modules/source_control/gitlab/gitlab_user.py Add version Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_user.py Update boolean default Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_user.py Fix syntax Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_user.py Remove no_log Co-authored-by: Felix Fontein * Update changelogs/fragments/2691-gitlab_user-support-identity-provider.yml Update syntax Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_user.py Update syntax Co-authored-by: Felix Fontein * Update docs * Add functionality to add multiple identities at once * Fix identity example * Add suboptions * Add elements * Update plugins/modules/source_control/gitlab/gitlab_user.py Co-authored-by: Felix Fontein * Apply comma's at the end of dictionaries Co-authored-by: Felix Fontein * Add check mode * Change checkmode for user add and identity delete * Update plugins/modules/source_control/gitlab/gitlab_user.py * Update changelogs/fragments/2691-gitlab_user-support-identity-provider.yml Add more features to changelog as suggested here https://github.com/ansible-collections/community.general/pull/2691#discussion_r653250717 Co-authored-by: Felix Fontein * Add better description for identities list and overwrite_identities boolean Co-authored-by: Felix Fontein Co-authored-by: lennert.mertens Co-authored-by: Felix Fontein Co-authored-by: stef.graces Co-authored-by: Stef Graces Co-authored-by: Stef Graces Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> --- ...-gitlab_user-support-identity-provider.yml | 5 + .../source_control/gitlab/gitlab_user.py | 122 +++++++++++++++++- 2 files changed, 123 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/2691-gitlab_user-support-identity-provider.yml diff --git a/changelogs/fragments/2691-gitlab_user-support-identity-provider.yml b/changelogs/fragments/2691-gitlab_user-support-identity-provider.yml new file mode 100644 index 0000000000..065b524c86 --- /dev/null +++ b/changelogs/fragments/2691-gitlab_user-support-identity-provider.yml @@ -0,0 +1,5 @@ +--- +minor_changes: + - "gitlab_user - specifying a password is no longer necessary (https://github.com/ansible-collections/community.general/pull/2691)." + - "gitlab_user - allow to reset an existing password with the new ``reset_password`` option (https://github.com/ansible-collections/community.general/pull/2691)." + - "gitlab_user - add functionality for adding external identity providers to a GitLab user (https://github.com/ansible-collections/community.general/pull/2691)." diff --git a/plugins/modules/source_control/gitlab/gitlab_user.py b/plugins/modules/source_control/gitlab/gitlab_user.py index 4d300ea842..8770a041b4 100644 --- a/plugins/modules/source_control/gitlab/gitlab_user.py +++ b/plugins/modules/source_control/gitlab/gitlab_user.py @@ -1,6 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- +# Copyright: (c) 2021, Lennert Mertens (lennert@nubera.be) # Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) # Copyright: (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -22,6 +23,8 @@ notes: author: - Werner Dijkerman (@dj-wasabi) - Guillaume Martinez (@Lunik) + - Lennert Mertens (@LennertMertens) + - Stef Graces (@stgrace) requirements: - python >= 2.7 - python-gitlab python module @@ -50,6 +53,12 @@ options: - GitLab server enforces minimum password length to 8, set this value with 8 or more characters. - Required only if C(state) is set to C(present). type: str + reset_password: + description: + - Whether the user can change its password or not. + default: false + type: bool + version_added: 3.3.0 email: description: - The email that belongs to the user. @@ -107,6 +116,30 @@ options: - Define external parameter for this user. type: bool default: no + identities: + description: + - List of identities to be added/updated for this user. + - To remove all other identities from this user, set I(overwrite_identities=true). + type: list + elements: dict + suboptions: + provider: + description: + - The name of the external identity provider + type: str + extern_uid: + description: + - User ID for external identity. + type: str + version_added: 3.3.0 + overwrite_identities: + description: + - Overwrite identities with identities added in this module. + - This means that all identities that the user has and that are not listed in I(identities) are removed from the user. + - This is only done if a list is provided for I(identities). To remove all identities, provide an empty list. + type: bool + default: false + version_added: 3.3.0 ''' EXAMPLES = ''' @@ -134,6 +167,22 @@ EXAMPLES = ''' group: super_group/mon_group access_level: owner +- name: "Create GitLab User using external identity provider" + community.general.gitlab_user: + api_url: https://gitlab.example.com/ + validate_certs: True + api_token: "{{ access_token }}" + name: My Name + username: myusername + password: mysecretpassword + email: me@example.com + identities: + - provider: Keycloak + extern_uid: f278f95c-12c7-4d51-996f-758cc2eb11bc + state: present + group: super_group/mon_group + access_level: owner + - name: "Block GitLab User" community.general.gitlab_user: api_url: https://gitlab.example.com/ @@ -219,10 +268,13 @@ class GitLabUser(object): 'name': options['name'], 'username': username, 'password': options['password'], + 'reset_password': options['reset_password'], 'email': options['email'], 'skip_confirmation': not options['confirm'], 'admin': options['isadmin'], - 'external': options['external']}) + 'external': options['external'], + 'identities': options['identities'], + }) changed = True else: changed, user = self.updateUser( @@ -240,6 +292,7 @@ class GitLabUser(object): 'value': options['isadmin'], 'setter': 'admin' }, 'external': {'value': options['external']}, + 'identities': {'value': options['identities']}, }, { # put "uncheckable" params here, this means params @@ -247,6 +300,8 @@ class GitLabUser(object): # not return any information about it 'skip_reconfirmation': {'value': not options['confirm']}, 'password': {'value': options['password']}, + 'reset_password': {'value': options['reset_password']}, + 'overwrite_identities': {'value': options['overwrite_identities']}, } ) @@ -393,7 +448,10 @@ class GitLabUser(object): av = arg_value['value'] if av is not None: - if getattr(user, arg_key) != av: + if arg_key == "identities": + changed = self.addIdentities(user, av, uncheckable_args['overwrite_identities']['value']) + + elif getattr(user, arg_key) != av: setattr(user, arg_value.get('setter', arg_key), av) changed = True @@ -412,13 +470,53 @@ class GitLabUser(object): if self._module.check_mode: return True + identities = None + if 'identities' in arguments: + identities = arguments['identities'] + del arguments['identities'] + try: user = self._gitlab.users.create(arguments) + if identities: + self.addIdentities(user, identities) + except (gitlab.exceptions.GitlabCreateError) as e: self._module.fail_json(msg="Failed to create user: %s " % to_native(e)) return user + ''' + @param user User object + @param identites List of identities to be added/updated + @param overwrite_identities Overwrite user identities with identities passed to this module + ''' + def addIdentities(self, user, identities, overwrite_identities=False): + changed = False + if overwrite_identities: + changed = self.deleteIdentities(user, identities) + + for identity in identities: + if identity not in user.identities: + setattr(user, 'provider', identity['provider']) + setattr(user, 'extern_uid', identity['extern_uid']) + if not self._module.check_mode: + user.save() + changed = True + return changed + + ''' + @param user User object + @param identites List of identities to be added/updated + ''' + def deleteIdentities(self, user, identities): + changed = False + for identity in user.identities: + if identity not in identities: + if not self._module.check_mode: + user.identityproviders.delete(identity['provider']) + changed = True + return changed + ''' @param username Username of the user ''' @@ -471,6 +569,13 @@ class GitLabUser(object): return user.unblock() +def sanitize_arguments(arguments): + for key, value in list(arguments.items()): + if value is None: + del arguments[key] + return arguments + + def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(dict( @@ -479,6 +584,7 @@ def main(): state=dict(type='str', default="present", choices=["absent", "present", "blocked", "unblocked"]), username=dict(type='str', required=True), password=dict(type='str', no_log=True), + reset_password=dict(type='bool', default=False, no_log=False), email=dict(type='str'), sshkey_name=dict(type='str'), sshkey_file=dict(type='str', no_log=False), @@ -488,6 +594,8 @@ def main(): confirm=dict(type='bool', default=True), isadmin=dict(type='bool', default=False), external=dict(type='bool', default=False), + identities=dict(type='list', elements='dict'), + overwrite_identities=dict(type='bool', default=False), )) module = AnsibleModule( @@ -504,7 +612,7 @@ def main(): ], supports_check_mode=True, required_if=( - ('state', 'present', ['name', 'email', 'password']), + ('state', 'present', ['name', 'email']), ) ) @@ -512,6 +620,7 @@ def main(): state = module.params['state'] user_username = module.params['username'].lower() user_password = module.params['password'] + user_reset_password = module.params['reset_password'] user_email = module.params['email'] user_sshkey_name = module.params['sshkey_name'] user_sshkey_file = module.params['sshkey_file'] @@ -521,6 +630,8 @@ def main(): confirm = module.params['confirm'] user_isadmin = module.params['isadmin'] user_external = module.params['external'] + user_identities = module.params['identities'] + overwrite_identities = module.params['overwrite_identities'] if not HAS_GITLAB_PACKAGE: module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) @@ -559,6 +670,7 @@ def main(): if gitlab_user.createOrUpdateUser(user_username, { "name": user_name, "password": user_password, + "reset_password": user_reset_password, "email": user_email, "sshkey_name": user_sshkey_name, "sshkey_file": user_sshkey_file, @@ -567,7 +679,9 @@ def main(): "access_level": access_level, "confirm": confirm, "isadmin": user_isadmin, - "external": user_external}): + "external": user_external, + "identities": user_identities, + "overwrite_identities": overwrite_identities}): module.exit_json(changed=True, msg="Successfully created or updated the user %s" % user_username, user=gitlab_user.userObject._attrs) else: module.exit_json(changed=False, msg="No need to update the user %s" % user_username, user=gitlab_user.userObject._attrs) From 07085785a38ed456742ec893481fe7f7746d0e30 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 22 Jun 2021 08:16:26 +0200 Subject: [PATCH 0154/2828] Dynamically add meta/runtime.yml redirects before integration tests. (#2633) ci_coverage --- tests/utils/shippable/shippable.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/utils/shippable/shippable.sh b/tests/utils/shippable/shippable.sh index 472bfca1ca..3a00812f12 100755 --- a/tests/utils/shippable/shippable.sh +++ b/tests/utils/shippable/shippable.sh @@ -97,6 +97,13 @@ fi # END: HACK +if [ "${script}" != "sanity" ] && [ "${script}" != "units" ]; then + # Adds meta/runtime.yml redirects for all modules before running integration tests. + # This ensures that ansible-base and ansible-core will use the "real" modules instead of the + # symbolic links, which results in coverage to be reported correctly. + "${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/community/internal_test_tools/tools/meta_runtime.py" redirect --target both --flatmap +fi + export PYTHONIOENCODING='utf-8' if [ "${JOB_TRIGGERED_BY_NAME:-}" == "nightly-trigger" ]; then From 860b2b89a308549d4225e0b3a8576e0c8f35f3d2 Mon Sep 17 00:00:00 2001 From: Tong He <68936428+unnecessary-username@users.noreply.github.com> Date: Wed, 23 Jun 2021 17:29:50 -0400 Subject: [PATCH 0155/2828] jenkins_build: Support stop a running Jenkins build (#2850) * Support stop a running Jenkins build. Meanwhile enrich document content and test cases. * Fix the inconsistencies regarding the function name. * Submit the changelog and fix a PEP8 issue. * Remedy whitespace related PEP8 issues. * Implement the idempotent test case for the stop build function. * Make sure it returns proper changed status when we stop a build repeatedly. * Fix incorrect usages on comparison with True or False and incorrect usages on validating the changed status. * In this mocking situation, adjust the mock return value and test case to perform unit testing. * Implement JenkinsMockIdempotent() to mock return value in idempotent test cases. * Fix issues reported by CI. * Refactor the code to avoid CI exception and remove get_build_status() from mock function as they should not be there. * Update plugins/modules/web_infrastructure/jenkins_build.py Co-authored-by: Felix Fontein --- ...nkins_build-support-stop-jenkins-build.yml | 4 + .../web_infrastructure/jenkins_build.py | 59 +++++++++++--- .../web_infrastructure/test_jenkins_build.py | 76 ++++++++++++++++++- 3 files changed, 126 insertions(+), 13 deletions(-) create mode 100644 changelogs/fragments/2850-jenkins_build-support-stop-jenkins-build.yml diff --git a/changelogs/fragments/2850-jenkins_build-support-stop-jenkins-build.yml b/changelogs/fragments/2850-jenkins_build-support-stop-jenkins-build.yml new file mode 100644 index 0000000000..ad64e58eec --- /dev/null +++ b/changelogs/fragments/2850-jenkins_build-support-stop-jenkins-build.yml @@ -0,0 +1,4 @@ +minor_changes: + - jenkins_build - support stopping a running jenkins build (https://github.com/ansible-collections/community.general/pull/2850). +bugfixes: + - jenkins_build - examine presence of ``build_number`` before deleting a jenkins build (https://github.com/ansible-collections/community.general/pull/2850). \ No newline at end of file diff --git a/plugins/modules/web_infrastructure/jenkins_build.py b/plugins/modules/web_infrastructure/jenkins_build.py index 7f1d32b602..68f64f7a7b 100644 --- a/plugins/modules/web_infrastructure/jenkins_build.py +++ b/plugins/modules/web_infrastructure/jenkins_build.py @@ -15,7 +15,9 @@ description: - Manage Jenkins builds with Jenkins REST API. requirements: - "python-jenkins >= 0.4.12" -author: Brett Milford (@brettmilford) +author: + - Brett Milford (@brettmilford) + - Tong He (@unnecessary-username) options: args: description: @@ -36,9 +38,10 @@ options: type: str state: description: - - Attribute that specifies if the build is to be created or deleted. + - Attribute that specifies if the build is to be created, deleted or stopped. + - The C(stopped) state has been added in community.general 3.3.0. default: present - choices: ['present', 'absent'] + choices: ['present', 'absent', 'stopped'] type: str token: description: @@ -62,9 +65,26 @@ EXAMPLES = ''' args: cloud: "test" availability_zone: "test_az" + state: present user: admin password: asdfg url: http://localhost:8080 + +- name: Stop a running jenkins build anonymously + community.general.jenkins_build: + name: "stop-check" + build_number: 3 + state: stopped + url: http://localhost:8080 + +- name: Delete a jenkins build using token authentication + community.general.jenkins_build: + name: "delete-experiment" + build_number: 30 + state: absent + user: Jenkins + token: abcdefghijklmnopqrstuvwxyz123456 + url: http://localhost:8080 ''' RETURN = ''' @@ -152,7 +172,8 @@ class JenkinsBuild: try: build_number = self.server.get_job_info(self.name)['nextBuildNumber'] except Exception as e: - self.module.fail_json(msg='Unable to get job info from Jenkins server, %s' % to_native(e), exception=traceback.format_exc()) + self.module.fail_json(msg='Unable to get job info from Jenkins server, %s' % to_native(e), + exception=traceback.format_exc()) return build_number @@ -162,7 +183,8 @@ class JenkinsBuild: return response except Exception as e: - self.module.fail_json(msg='Unable to fetch build information, %s' % to_native(e), exception=traceback.format_exc()) + self.module.fail_json(msg='Unable to fetch build information, %s' % to_native(e), + exception=traceback.format_exc()) def present_build(self): self.build_number = self.get_next_build() @@ -176,6 +198,19 @@ class JenkinsBuild: self.module.fail_json(msg='Unable to create build for %s: %s' % (self.jenkins_url, to_native(e)), exception=traceback.format_exc()) + def stopped_build(self): + build_info = None + try: + build_info = self.server.get_build_info(self.name, self.build_number) + if build_info['building'] is True: + self.server.stop_build(self.name, self.build_number) + except Exception as e: + self.module.fail_json(msg='Unable to stop build for %s: %s' % (self.jenkins_url, to_native(e)), + exception=traceback.format_exc()) + else: + if build_info['building'] is False: + self.module.exit_json(**self.result) + def absent_build(self): try: self.server.delete_build(self.name, self.build_number) @@ -191,7 +226,10 @@ class JenkinsBuild: sleep(10) self.get_result() else: - if build_status['result'] == "SUCCESS": + if self.state == "stopped" and build_status['result'] == "ABORTED": + result['changed'] = True + result['build_info'] = build_status + elif build_status['result'] == "SUCCESS": result['changed'] = True result['build_info'] = build_status else: @@ -216,14 +254,13 @@ def main(): build_number=dict(type='int'), name=dict(required=True), password=dict(no_log=True), - state=dict(choices=['present', 'absent'], default="present"), + state=dict(choices=['present', 'absent', 'stopped'], default="present"), token=dict(no_log=True), url=dict(default="http://localhost:8080"), user=dict(), ), - mutually_exclusive=[ - ['password', 'token'], - ], + mutually_exclusive=[['password', 'token']], + required_if=[['state', 'absent', ['build_number'], True], ['state', 'stopped', ['build_number'], True]], ) test_dependencies(module) @@ -231,6 +268,8 @@ def main(): if module.params.get('state') == "present": jenkins_build.present_build() + elif module.params.get('state') == "stopped": + jenkins_build.stopped_build() else: jenkins_build.absent_build() diff --git a/tests/unit/plugins/modules/web_infrastructure/test_jenkins_build.py b/tests/unit/plugins/modules/web_infrastructure/test_jenkins_build.py index d0bbafcc91..3774871329 100644 --- a/tests/unit/plugins/modules/web_infrastructure/test_jenkins_build.py +++ b/tests/unit/plugins/modules/web_infrastructure/test_jenkins_build.py @@ -50,18 +50,42 @@ class JenkinsMock(): def get_build_info(self, name, build_number): return { + "building": True, "result": "SUCCESS" } - def get_build_status(self): - pass - def build_job(self, *args): return None def delete_build(self, name, build_number): return None + def stop_build(self, name, build_number): + return None + + +class JenkinsMockIdempotent(): + + def get_job_info(self, name): + return { + "nextBuildNumber": 1235 + } + + def get_build_info(self, name, build_number): + return { + "building": False, + "result": "ABORTED" + } + + def build_job(self, *args): + return None + + def delete_build(self, name, build_number): + return None + + def stop_build(self, name, build_number): + return None + class TestJenkinsBuild(unittest.TestCase): @@ -79,6 +103,16 @@ class TestJenkinsBuild(unittest.TestCase): set_module_args({}) jenkins_build.main() + @patch('ansible_collections.community.general.plugins.modules.web_infrastructure.jenkins_build.test_dependencies') + def test_module_fail_when_missing_build_number(self, test_deps): + test_deps.return_value = None + with self.assertRaises(AnsibleFailJson): + set_module_args({ + "name": "required-if", + "state": "stopped" + }) + jenkins_build.main() + @patch('ansible_collections.community.general.plugins.modules.web_infrastructure.jenkins_build.test_dependencies') @patch('ansible_collections.community.general.plugins.modules.web_infrastructure.jenkins_build.JenkinsBuild.get_jenkins_connection') def test_module_create_build(self, jenkins_connection, test_deps): @@ -93,6 +127,42 @@ class TestJenkinsBuild(unittest.TestCase): }) jenkins_build.main() + @patch('ansible_collections.community.general.plugins.modules.web_infrastructure.jenkins_build.test_dependencies') + @patch('ansible_collections.community.general.plugins.modules.web_infrastructure.jenkins_build.JenkinsBuild.get_jenkins_connection') + def test_module_stop_build(self, jenkins_connection, test_deps): + test_deps.return_value = None + jenkins_connection.return_value = JenkinsMock() + + with self.assertRaises(AnsibleExitJson) as return_json: + set_module_args({ + "name": "host-check", + "build_number": "1234", + "state": "stopped", + "user": "abc", + "token": "xyz" + }) + jenkins_build.main() + + self.assertTrue(return_json.exception.args[0]['changed']) + + @patch('ansible_collections.community.general.plugins.modules.web_infrastructure.jenkins_build.test_dependencies') + @patch('ansible_collections.community.general.plugins.modules.web_infrastructure.jenkins_build.JenkinsBuild.get_jenkins_connection') + def test_module_stop_build_again(self, jenkins_connection, test_deps): + test_deps.return_value = None + jenkins_connection.return_value = JenkinsMockIdempotent() + + with self.assertRaises(AnsibleExitJson) as return_json: + set_module_args({ + "name": "host-check", + "build_number": "1234", + "state": "stopped", + "user": "abc", + "password": "xyz" + }) + jenkins_build.main() + + self.assertFalse(return_json.exception.args[0]['changed']) + @patch('ansible_collections.community.general.plugins.modules.web_infrastructure.jenkins_build.test_dependencies') @patch('ansible_collections.community.general.plugins.modules.web_infrastructure.jenkins_build.JenkinsBuild.get_jenkins_connection') def test_module_delete_build(self, jenkins_connection, test_deps): From 24dabda95b4bf6340436a445c13cf9689029b51f Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Thu, 24 Jun 2021 07:33:10 -0400 Subject: [PATCH 0156/2828] archive - refactor and bugfix (#2816) * Initial Commit * Further refinement * Fixing archive name distortion for single file zips * Applying initial review suggestions * Updating path value for single target * Adding test case for single target zip archiving * Fixing integration for RHEL/FreeBSD on ansible 2.x * Fixing integration second attempt * Adding changelog fragment * Updating changelog fragment --- .../fragments/2816-archive-refactor.yml | 5 + plugins/modules/files/archive.py | 719 +++++++++--------- .../targets/archive/files/sub/subfile.txt | 0 .../targets/archive/tasks/main.yml | 96 ++- .../targets/archive/tasks/remove.yml | 31 + 5 files changed, 475 insertions(+), 376 deletions(-) create mode 100644 changelogs/fragments/2816-archive-refactor.yml create mode 100644 tests/integration/targets/archive/files/sub/subfile.txt diff --git a/changelogs/fragments/2816-archive-refactor.yml b/changelogs/fragments/2816-archive-refactor.yml new file mode 100644 index 0000000000..75c30bcdfc --- /dev/null +++ b/changelogs/fragments/2816-archive-refactor.yml @@ -0,0 +1,5 @@ +--- +bugfixes: + - archive - fixed incorrect ``state`` result value documentation (https://github.com/ansible-collections/community.general/pull/2816). + - archive - fixed ``exclude_path`` values causing incorrect archive root (https://github.com/ansible-collections/community.general/pull/2816). + - archive - fixed improper file names for single file zip archives (https://github.com/ansible-collections/community.general/issues/2818). diff --git a/plugins/modules/files/archive.py b/plugins/modules/files/archive.py index 8d4afa58a5..5cdd6630d1 100644 --- a/plugins/modules/files/archive.py +++ b/plugins/modules/files/archive.py @@ -44,6 +44,7 @@ options: - Use I(exclusion_patterns) to instead exclude files or subdirectories below any of the paths from the I(path) list. type: list elements: path + default: [] exclusion_patterns: description: - Glob style patterns to exclude files or directories from the resulting archive. @@ -133,11 +134,7 @@ EXAMPLES = r''' RETURN = r''' state: description: - The current state of the archived file. - If 'absent', then no source files were found and the archive does not exist. - If 'compress', then the file source file is in the compressed state. - If 'archive', then the source file or paths are currently archived. - If 'incomplete', then an archive was created, but not all source paths were found. + The state of the input C(path). type: str returned: always missing: @@ -162,6 +159,7 @@ expanded_exclude_paths: returned: always ''' +import abc import bz2 import glob import gzip @@ -176,12 +174,12 @@ from sys import version_info from traceback import format_exc from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_bytes, to_native -from ansible.module_utils.six import PY3 +from ansible.module_utils.common.text.converters import to_bytes, to_native +from ansible.module_utils import six LZMA_IMP_ERR = None -if PY3: +if six.PY3: try: import lzma HAS_LZMA = True @@ -196,18 +194,24 @@ else: LZMA_IMP_ERR = format_exc() HAS_LZMA = False +PATH_SEP = to_bytes(os.sep) PY27 = version_info[0:2] >= (2, 7) +STATE_ABSENT = 'absent' +STATE_ARCHIVED = 'archive' +STATE_COMPRESSED = 'compress' +STATE_INCOMPLETE = 'incomplete' -def to_b(s): + +def _to_bytes(s): return to_bytes(s, errors='surrogate_or_strict') -def to_n(s): +def _to_native(s): return to_native(s, errors='surrogate_or_strict') -def to_na(s): +def _to_native_ascii(s): return to_native(s, errors='surrogate_or_strict', encoding='ascii') @@ -215,68 +219,330 @@ def expand_paths(paths): expanded_path = [] is_globby = False for path in paths: - b_path = to_b(path) + b_path = _to_bytes(path) if b'*' in b_path or b'?' in b_path: e_paths = glob.glob(b_path) is_globby = True - else: e_paths = [b_path] expanded_path.extend(e_paths) return expanded_path, is_globby +def is_archive(path): + return re.search(br'\.(tar|tar\.(gz|bz2|xz)|tgz|tbz2|zip)$', os.path.basename(path), re.IGNORECASE) + + +def legacy_filter(path, exclusion_patterns): + return matches_exclusion_patterns(path, exclusion_patterns) + + def matches_exclusion_patterns(path, exclusion_patterns): return any(fnmatch(path, p) for p in exclusion_patterns) -def get_filter(exclusion_patterns, format): - def zip_filter(path): - return matches_exclusion_patterns(path, exclusion_patterns) +@six.add_metaclass(abc.ABCMeta) +class Archive(object): + def __init__(self, module): + self.module = module - def tar_filter(tarinfo): - return None if matches_exclusion_patterns(tarinfo.name, exclusion_patterns) else tarinfo + self.destination = _to_bytes(module.params['dest']) if module.params['dest'] else None + self.exclusion_patterns = module.params['exclusion_patterns'] or [] + self.format = module.params['format'] + self.must_archive = module.params['force_archive'] + self.remove = module.params['remove'] - return zip_filter if format == 'zip' or not PY27 else tar_filter + self.changed = False + self.destination_state = STATE_ABSENT + self.errors = [] + self.file = None + self.root = b'' + self.successes = [] + self.targets = [] + self.not_found = [] + paths = module.params['path'] + self.expanded_paths, has_globs = expand_paths(paths) + self.expanded_exclude_paths = expand_paths(module.params['exclude_path'])[0] -def get_archive_contains(format): - def archive_contains(archive, name): + self.paths = list(set(self.expanded_paths) - set(self.expanded_exclude_paths)) + + if not self.paths: + module.fail_json( + path=', '.join(paths), + expanded_paths=_to_native(b', '.join(self.expanded_paths)), + expanded_exclude_paths=_to_native(b', '.join(self.expanded_exclude_paths)), + msg='Error, no source paths were found' + ) + + if not self.must_archive: + self.must_archive = any([has_globs, os.path.isdir(self.paths[0]), len(self.paths) > 1]) + + if not self.destination and not self.must_archive: + self.destination = b'%s.%s' % (self.paths[0], _to_bytes(self.format)) + + if self.must_archive and not self.destination: + module.fail_json( + dest=_to_native(self.destination), + path=', '.join(paths), + msg='Error, must specify "dest" when archiving multiple files or trees' + ) + + def add(self, path, archive_name): try: - if format == 'zip': - archive.getinfo(name) + self._add(_to_native_ascii(path), _to_native(archive_name)) + if self.contains(_to_native(archive_name)): + self.successes.append(path) + except Exception as e: + self.errors.append('%s: %s' % (_to_native_ascii(path), _to_native(e))) + + def add_single_target(self, path): + if self.format in ('zip', 'tar'): + archive_name = re.sub(br'^%s' % re.escape(self.root), b'', path) + self.open() + self.add(path, archive_name) + self.close() + self.destination_state = STATE_ARCHIVED + else: + try: + f_out = self._open_compressed_file(_to_native_ascii(self.destination)) + with open(path, 'rb') as f_in: + shutil.copyfileobj(f_in, f_out) + f_out.close() + self.successes.append(path) + self.destination_state = STATE_COMPRESSED + except (IOError, OSError) as e: + self.module.fail_json( + path=_to_native(path), + dest=_to_native(self.destination), + msg='Unable to write to compressed file: %s' % _to_native(e), exception=format_exc() + ) + + def add_targets(self): + self.open() + try: + match_root = re.compile(br'^%s' % re.escape(self.root)) + for target in self.targets: + if os.path.isdir(target): + for directory_path, directory_names, file_names in os.walk(target, topdown=True): + if not directory_path.endswith(PATH_SEP): + directory_path += PATH_SEP + + for directory_name in directory_names: + full_path = directory_path + directory_name + archive_name = match_root.sub(b'', full_path) + self.add(full_path, archive_name) + + for file_name in file_names: + full_path = directory_path + file_name + archive_name = match_root.sub(b'', full_path) + self.add(full_path, archive_name) + else: + archive_name = match_root.sub(b'', target) + self.add(target, archive_name) + except Exception as e: + if self.format in ('zip', 'tar'): + archive_format = self.format else: - archive.getmember(name) + archive_format = 'tar.' + self.format + self.module.fail_json( + msg='Error when writing %s archive at %s: %s' % ( + archive_format, _to_native(self.destination), _to_native(e) + ), + exception=format_exc() + ) + self.close() + + if self.errors: + self.module.fail_json( + msg='Errors when writing archive at %s: %s' % (_to_native(self.destination), '; '.join(self.errors)) + ) + + def destination_exists(self): + return self.destination and os.path.exists(self.destination) + + def destination_size(self): + return os.path.getsize(self.destination) if self.destination_exists() else 0 + + def find_targets(self): + for path in self.paths: + # Use the longest common directory name among all the files as the archive root path + if self.root == b'': + self.root = os.path.dirname(path) + PATH_SEP + else: + for i in range(len(self.root)): + if path[i] != self.root[i]: + break + + if i < len(self.root): + self.root = os.path.dirname(self.root[0:i + 1]) + + self.root += PATH_SEP + # Don't allow archives to be created anywhere within paths to be removed + if self.remove and os.path.isdir(path): + prefix = path if path.endswith(PATH_SEP) else path + PATH_SEP + if self.destination.startswith(prefix): + self.module.fail_json( + path=', '.join(self.paths), + msg='Error, created archive can not be contained in source paths when remove=true' + ) + if not os.path.lexists(path): + self.not_found.append(path) + else: + self.targets.append(path) + + def has_targets(self): + return bool(self.targets) + + def has_unfound_targets(self): + return bool(self.not_found) + + def remove_targets(self): + for path in self.successes: + try: + if os.path.isdir(path): + shutil.rmtree(path) + else: + os.remove(path) + except OSError: + self.errors.append(_to_native(path)) + for path in self.paths: + try: + if os.path.isdir(path): + shutil.rmtree(path) + except OSError: + self.errors.append(_to_native(path)) + + if self.errors: + self.module.fail_json( + dest=_to_native(self.destination), msg='Error deleting some source files: ', files=self.errors + ) + + def update_permissions(self): + try: + file_args = self.module.load_file_common_arguments(self.module.params, path=self.destination) + except TypeError: + # The path argument is only supported in Ansible-base 2.10+. Fall back to + # pre-2.10 behavior for older Ansible versions. + self.module.params['path'] = self.destination + file_args = self.module.load_file_common_arguments(self.module.params) + + self.changed = self.module.set_fs_attributes_if_different(file_args, self.changed) + + @property + def result(self): + return { + 'archived': [_to_native(p) for p in self.successes], + 'dest': _to_native(self.destination), + 'changed': self.changed, + 'arcroot': _to_native(self.root), + 'missing': [_to_native(p) for p in self.not_found], + 'expanded_paths': [_to_native(p) for p in self.expanded_paths], + 'expanded_exclude_paths': [_to_native(p) for p in self.expanded_exclude_paths], + } + + def _open_compressed_file(self, path): + f = None + if self.format == 'gz': + f = gzip.open(path, 'wb') + elif self.format == 'bz2': + f = bz2.BZ2File(path, 'wb') + elif self.format == 'xz': + f = lzma.LZMAFile(path, 'wb') + else: + self.module.fail_json(msg="%s is not a valid format" % self.format) + + return f + + @abc.abstractmethod + def close(self): + pass + + @abc.abstractmethod + def contains(self, name): + pass + + @abc.abstractmethod + def open(self): + pass + + @abc.abstractmethod + def _add(self, path, archive_name): + pass + + +class ZipArchive(Archive): + def __init__(self, module): + super(ZipArchive, self).__init__(module) + + def close(self): + self.file.close() + + def contains(self, name): + try: + self.file.getinfo(name) except KeyError: return False - return True - return archive_contains + def open(self): + self.file = zipfile.ZipFile(_to_native_ascii(self.destination), 'w', zipfile.ZIP_DEFLATED, True) + + def _add(self, path, archive_name): + if not legacy_filter(path, self.exclusion_patterns): + self.file.write(path, archive_name) -def get_add_to_archive(format, filter): - def add_to_zip_archive(archive_file, path, archive_name): +class TarArchive(Archive): + def __init__(self, module): + super(TarArchive, self).__init__(module) + self.fileIO = None + + def close(self): + self.file.close() + if self.format == 'xz': + with lzma.open(_to_native(self.destination), 'wb') as f: + f.write(self.fileIO.getvalue()) + self.fileIO.close() + + def contains(self, name): try: - if not filter(path): - archive_file.write(path, archive_name) - except Exception as e: - return e + self.file.getmember(name) + except KeyError: + return False + return True - return None + def open(self): + if self.format in ('gz', 'bz2'): + self.file = tarfile.open(_to_native_ascii(self.destination), 'w|' + self.format) + # python3 tarfile module allows xz format but for python2 we have to create the tarfile + # in memory and then compress it with lzma. + elif self.format == 'xz': + self.fileIO = io.BytesIO() + self.file = tarfile.open(fileobj=self.fileIO, mode='w') + elif self.format == 'tar': + self.file = tarfile.open(_to_native_ascii(self.destination), 'w') + else: + self.module.fail_json(msg="%s is not a valid archive format" % self.format) - def add_to_tar_archive(archive_file, path, archive_name): - try: - if PY27: - archive_file.add(path, archive_name, recursive=False, filter=filter) - else: - archive_file.add(path, archive_name, recursive=False, exclude=filter) - except Exception as e: - return e + def _add(self, path, archive_name): + def py27_filter(tarinfo): + return None if matches_exclusion_patterns(tarinfo.name, self.exclusion_patterns) else tarinfo - return None + def py26_filter(path): + return matches_exclusion_patterns(path, self.exclusion_patterns) - return add_to_zip_archive if format == 'zip' else add_to_tar_archive + if PY27: + self.file.add(path, archive_name, recursive=False, filter=py27_filter) + else: + self.file.add(path, archive_name, recursive=False, exclude=py26_filter) + + +def get_archive(module): + if module.params['format'] == 'zip': + return ZipArchive(module) + else: + return TarArchive(module) def main(): @@ -285,7 +551,7 @@ def main(): path=dict(type='list', elements='path', required=True), format=dict(type='str', default='gz', choices=['bz2', 'gz', 'tar', 'xz', 'zip']), dest=dict(type='path'), - exclude_path=dict(type='list', elements='path'), + exclude_path=dict(type='list', elements='path', default=[]), exclusion_patterns=dict(type='list', elements='path'), force_archive=dict(type='bool', default=False), remove=dict(type='bool', default=False), @@ -294,349 +560,52 @@ def main(): supports_check_mode=True, ) - params = module.params - check_mode = module.check_mode - paths = params['path'] - dest = params['dest'] - b_dest = None if not dest else to_b(dest) - exclude_paths = params['exclude_path'] - remove = params['remove'] - - fmt = params['format'] - b_fmt = to_b(fmt) - force_archive = params['force_archive'] - changed = False - state = 'absent' - - exclusion_patterns = params['exclusion_patterns'] or [] - - # Simple or archive file compression (inapplicable with 'zip' since it's always an archive) - b_successes = [] - - # Fail early - if not HAS_LZMA and fmt == 'xz': - module.fail_json(msg=missing_required_lib("lzma or backports.lzma", reason="when using xz format"), - exception=LZMA_IMP_ERR) - module.fail_json(msg="lzma or backports.lzma is required when using xz format.") - - b_expanded_paths, globby = expand_paths(paths) - if not b_expanded_paths: - return module.fail_json( - path=', '.join(paths), - expanded_paths=to_native(b', '.join(b_expanded_paths), errors='surrogate_or_strict'), - msg='Error, no source paths were found' + if not HAS_LZMA and module.params['format'] == 'xz': + module.fail_json( + msg=missing_required_lib("lzma or backports.lzma", reason="when using xz format"), exception=LZMA_IMP_ERR ) - # Only attempt to expand the exclude paths if it exists - b_expanded_exclude_paths = expand_paths(exclude_paths)[0] if exclude_paths else [] + check_mode = module.check_mode - filter = get_filter(exclusion_patterns, fmt) - archive_contains = get_archive_contains(fmt) - add_to_archive = get_add_to_archive(fmt, filter) + archive = get_archive(module) + size = archive.destination_size() + archive.find_targets() - # Only try to determine if we are working with an archive or not if we haven't set archive to true - if not force_archive: - # If we actually matched multiple files or TRIED to, then - # treat this as a multi-file archive - archive = globby or os.path.isdir(b_expanded_paths[0]) or len(b_expanded_paths) > 1 + if not archive.has_targets(): + if archive.destination_exists(): + archive.destination_state = STATE_ARCHIVED if is_archive(archive.destination) else STATE_COMPRESSED + elif archive.has_targets() and archive.must_archive: + if check_mode: + archive.changed = True + else: + archive.add_targets() + archive.destination_state = STATE_INCOMPLETE if archive.has_unfound_targets() else STATE_ARCHIVED + if archive.remove: + archive.remove_targets() + if archive.destination_size() != size: + archive.changed = True else: - archive = True - - # Default created file name (for single-file archives) to - # . - if not b_dest and not archive: - b_dest = b'%s.%s' % (b_expanded_paths[0], b_fmt) - - # Force archives to specify 'dest' - if archive and not b_dest: - module.fail_json(dest=dest, path=', '.join(paths), msg='Error, must specify "dest" when archiving multiple files or trees') - - b_sep = to_b(os.sep) - - b_archive_paths = [] - b_missing = [] - b_arcroot = b'' - - for b_path in b_expanded_paths: - # Use the longest common directory name among all the files - # as the archive root path - if b_arcroot == b'': - b_arcroot = os.path.dirname(b_path) + b_sep + if check_mode: + if not archive.destination_exists(): + archive.changed = True else: - for i in range(len(b_arcroot)): - if b_path[i] != b_arcroot[i]: - break - - if i < len(b_arcroot): - b_arcroot = os.path.dirname(b_arcroot[0:i + 1]) - - b_arcroot += b_sep - - # Don't allow archives to be created anywhere within paths to be removed - if remove and os.path.isdir(b_path): - b_path_dir = b_path - if not b_path.endswith(b'/'): - b_path_dir += b'/' - - if b_dest.startswith(b_path_dir): - module.fail_json( - path=', '.join(paths), - msg='Error, created archive can not be contained in source paths when remove=True' - ) - - if os.path.lexists(b_path) and b_path not in b_expanded_exclude_paths: - b_archive_paths.append(b_path) - else: - b_missing.append(b_path) - - # No source files were found but the named archive exists: are we 'compress' or 'archive' now? - if len(b_missing) == len(b_expanded_paths) and b_dest and os.path.exists(b_dest): - # Just check the filename to know if it's an archive or simple compressed file - if re.search(br'\.(tar|tar\.(gz|bz2|xz)|tgz|tbz2|zip)$', os.path.basename(b_dest), re.IGNORECASE): - state = 'archive' - else: - state = 'compress' - - # Multiple files, or globbiness - elif archive: - if not b_archive_paths: - # No source files were found, but the archive is there. - if os.path.lexists(b_dest): - state = 'archive' - elif b_missing: - # SOME source files were found, but not all of them - state = 'incomplete' - - archive = None - size = 0 - errors = [] - - if os.path.lexists(b_dest): - size = os.path.getsize(b_dest) - - if state != 'archive': - if check_mode: - changed = True - - else: + path = archive.paths[0] + archive.add_single_target(path) + if archive.destination_size() != size: + archive.changed = True + if archive.remove: try: - # Slightly more difficult (and less efficient!) compression using zipfile module - if fmt == 'zip': - arcfile = zipfile.ZipFile( - to_na(b_dest), - 'w', - zipfile.ZIP_DEFLATED, - True - ) - - # Easier compression using tarfile module - elif fmt == 'gz' or fmt == 'bz2': - arcfile = tarfile.open(to_na(b_dest), 'w|' + fmt) - - # python3 tarfile module allows xz format but for python2 we have to create the tarfile - # in memory and then compress it with lzma. - elif fmt == 'xz': - arcfileIO = io.BytesIO() - arcfile = tarfile.open(fileobj=arcfileIO, mode='w') - - # Or plain tar archiving - elif fmt == 'tar': - arcfile = tarfile.open(to_na(b_dest), 'w') - - b_match_root = re.compile(br'^%s' % re.escape(b_arcroot)) - for b_path in b_archive_paths: - if os.path.isdir(b_path): - # Recurse into directories - for b_dirpath, b_dirnames, b_filenames in os.walk(b_path, topdown=True): - if not b_dirpath.endswith(b_sep): - b_dirpath += b_sep - - for b_dirname in b_dirnames: - b_fullpath = b_dirpath + b_dirname - n_fullpath = to_na(b_fullpath) - n_arcname = to_native(b_match_root.sub(b'', b_fullpath), errors='surrogate_or_strict') - - err = add_to_archive(arcfile, n_fullpath, n_arcname) - if err: - errors.append('%s: %s' % (n_fullpath, to_native(err))) - - for b_filename in b_filenames: - b_fullpath = b_dirpath + b_filename - n_fullpath = to_na(b_fullpath) - n_arcname = to_n(b_match_root.sub(b'', b_fullpath)) - - err = add_to_archive(arcfile, n_fullpath, n_arcname) - if err: - errors.append('Adding %s: %s' % (to_native(b_path), to_native(err))) - - if archive_contains(arcfile, n_arcname): - b_successes.append(b_fullpath) - else: - path = to_na(b_path) - arcname = to_n(b_match_root.sub(b'', b_path)) - - err = add_to_archive(arcfile, path, arcname) - if err: - errors.append('Adding %s: %s' % (to_native(b_path), to_native(err))) - - if archive_contains(arcfile, arcname): - b_successes.append(b_path) - - except Exception as e: - expanded_fmt = 'zip' if fmt == 'zip' else ('tar.' + fmt) - module.fail_json( - msg='Error when writing %s archive at %s: %s' % (expanded_fmt, dest, to_native(e)), - exception=format_exc() - ) - - if arcfile: - arcfile.close() - state = 'archive' - - if fmt == 'xz': - with lzma.open(b_dest, 'wb') as f: - f.write(arcfileIO.getvalue()) - arcfileIO.close() - - if errors: - module.fail_json(msg='Errors when writing archive at %s: %s' % (dest, '; '.join(errors))) - - if state in ['archive', 'incomplete'] and remove: - for b_path in b_successes: - try: - if os.path.isdir(b_path): - shutil.rmtree(b_path) - elif not check_mode: - os.remove(b_path) - except OSError: - errors.append(to_native(b_path)) - - for b_path in b_expanded_paths: - try: - if os.path.isdir(b_path): - shutil.rmtree(b_path) - except OSError: - errors.append(to_native(b_path)) - - if errors: - module.fail_json(dest=dest, msg='Error deleting some source files: ', files=errors) - - # Rudimentary check: If size changed then file changed. Not perfect, but easy. - if not check_mode and os.path.getsize(b_dest) != size: - changed = True - - if b_successes and state != 'incomplete': - state = 'archive' - - # Simple, single-file compression - else: - b_path = b_expanded_paths[0] - - # No source or compressed file - if not (os.path.exists(b_path) or os.path.lexists(b_dest)): - state = 'absent' - - # if it already exists and the source file isn't there, consider this done - elif not os.path.lexists(b_path) and os.path.lexists(b_dest): - state = 'compress' - - else: - if module.check_mode: - if not os.path.exists(b_dest): - changed = True - else: - size = 0 - f_in = f_out = arcfile = None - - if os.path.lexists(b_dest): - size = os.path.getsize(b_dest) - - try: - if fmt == 'zip': - arcfile = zipfile.ZipFile( - to_na(b_dest), - 'w', - zipfile.ZIP_DEFLATED, - True - ) - arcfile.write( - to_na(b_path), - to_n(b_path[len(b_arcroot):]) - ) - arcfile.close() - state = 'archive' # because all zip files are archives - elif fmt == 'tar': - arcfile = tarfile.open(to_na(b_dest), 'w') - arcfile.add(to_na(b_path)) - arcfile.close() - else: - f_in = open(b_path, 'rb') - - n_dest = to_na(b_dest) - if fmt == 'gz': - f_out = gzip.open(n_dest, 'wb') - elif fmt == 'bz2': - f_out = bz2.BZ2File(n_dest, 'wb') - elif fmt == 'xz': - f_out = lzma.LZMAFile(n_dest, 'wb') - else: - raise OSError("Invalid format") - - shutil.copyfileobj(f_in, f_out) - - b_successes.append(b_path) - + os.remove(path) except OSError as e: module.fail_json( - path=to_native(b_path), - dest=dest, - msg='Unable to write to compressed file: %s' % to_native(e), exception=format_exc() + path=_to_native(path), + msg='Unable to remove source file: %s' % _to_native(e), exception=format_exc() ) - if arcfile: - arcfile.close() - if f_in: - f_in.close() - if f_out: - f_out.close() + if archive.destination_exists(): + archive.update_permissions() - # Rudimentary check: If size changed then file changed. Not perfect, but easy. - if os.path.getsize(b_dest) != size: - changed = True - - state = 'compress' - - if remove and not check_mode: - try: - os.remove(b_path) - - except OSError as e: - module.fail_json( - path=to_native(b_path), - msg='Unable to remove source file: %s' % to_native(e), exception=format_exc() - ) - - try: - file_args = module.load_file_common_arguments(params, path=b_dest) - except TypeError: - # The path argument is only supported in Ansible-base 2.10+. Fall back to - # pre-2.10 behavior for older Ansible versions. - params['path'] = b_dest - file_args = module.load_file_common_arguments(params) - - if not check_mode: - changed = module.set_fs_attributes_if_different(file_args, changed) - - module.exit_json( - archived=[to_n(p) for p in b_successes], - dest=dest, - changed=changed, - state=state, - arcroot=to_n(b_arcroot), - missing=[to_n(p) for p in b_missing], - expanded_paths=[to_n(p) for p in b_expanded_paths], - expanded_exclude_paths=[to_n(p) for p in b_expanded_exclude_paths], - ) + module.exit_json(**archive.result) if __name__ == '__main__': diff --git a/tests/integration/targets/archive/files/sub/subfile.txt b/tests/integration/targets/archive/files/sub/subfile.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/integration/targets/archive/tasks/main.yml b/tests/integration/targets/archive/tasks/main.yml index 761f9eb7b8..35a8f1edf3 100644 --- a/tests/integration/targets/archive/tasks/main.yml +++ b/tests/integration/targets/archive/tasks/main.yml @@ -79,6 +79,8 @@ - foo.txt - bar.txt - empty.txt + - sub + - sub/subfile.txt - name: archive using gz archive: @@ -366,7 +368,7 @@ - name: Test exclusion_patterns option archive: path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/test-archive-exclustion-patterns.tgz" + dest: "{{ output_dir }}/test-archive-exclusion-patterns.tgz" exclusion_patterns: b?r.* register: exclusion_patterns_result @@ -376,6 +378,98 @@ - exclusion_patterns_result is changed - "'bar.txt' not in exclusion_patterns_result.archived" +- name: Test that excluded paths do not influence archive root + archive: + path: + - "{{ output_dir }}/sub/subfile.txt" + - "{{ output_dir }}" + exclude_path: + - "{{ output_dir }}" + dest: "{{ output_dir }}/test-archive-root.tgz" + register: archive_root_result + +- name: Assert that excluded paths do not influence archive root + assert: + that: + - archive_root_result.arcroot != output_dir + +- name: Remove archive root test + file: + path: "{{ output_dir }}/test-archive-root.tgz" + state: absent + +- name: Test Single Target with format={{ item }} + archive: + path: "{{ output_dir }}/foo.txt" + dest: "{{ output_dir }}/test-single-target.{{ item }}" + format: "{{ item }}" + register: "single_target_test" + loop: + - zip + - tar + - gz + - bz2 + - xz + +# Dummy tests until ``dest_state`` result value can be implemented +- name: Assert that single target tests are effective + assert: + that: + - single_target_test.results[0] is changed + - single_target_test.results[1] is changed + - single_target_test.results[2] is changed + - single_target_test.results[3] is changed + - single_target_test.results[4] is changed + +- name: Retrieve contents of single target archives + ansible.builtin.unarchive: + src: "{{ output_dir }}/test-single-target.zip" + dest: . + list_files: true + check_mode: true + ignore_errors: true + register: single_target_test_contents + +- name: Assert that file names in single-file zip archives are preserved + assert: + that: + - "'oo.txt' not in single_target_test_contents.files" + - "'foo.txt' in single_target_test_contents.files" + # ``unarchive`` fails for RHEL and FreeBSD on ansible 2.x + when: single_target_test_contents is success and single_target_test_contents is not skipped + +- name: Remove single target test with format={{ item }} + file: + path: "{{ output_dir }}/test-single-target.{{ item }}" + state: absent + loop: + - zip + - tar + - gz + - bz2 + - xz + +- name: Test that missing files result in incomplete state + archive: + path: + - "{{ output_dir }}/*.txt" + - "{{ output_dir }}/dne.txt" + exclude_path: "{{ output_dir }}/foo.txt" + dest: "{{ output_dir }}/test-incomplete-archive.tgz" + register: incomplete_archive_result + +- name: Assert that incomplete archive has incomplete state + assert: + that: + - incomplete_archive_result is changed + - "'{{ output_dir }}/dne.txt' in incomplete_archive_result.missing" + - "'{{ output_dir }}/foo.txt' not in incomplete_archive_result.missing" + +- name: Remove incomplete archive + file: + path: "{{ output_dir }}/test-incomplete-archive.tgz" + state: absent + - name: Remove backports.lzma if previously installed (pip) pip: name=backports.lzma state=absent when: backports_lzma_pip is changed diff --git a/tests/integration/targets/archive/tasks/remove.yml b/tests/integration/targets/archive/tasks/remove.yml index 44d2024068..9600eb9f6d 100644 --- a/tests/integration/targets/archive/tasks/remove.yml +++ b/tests/integration/targets/archive/tasks/remove.yml @@ -117,6 +117,37 @@ - name: verify that excluded file is still present file: path={{ output_dir }}/tmpdir/empty.txt state=file +- name: prep our files in tmpdir again + copy: src={{ item }} dest={{ output_dir }}/tmpdir/{{ item }} + with_items: + - foo.txt + - bar.txt + - empty.txt + - sub + - sub/subfile.txt + +- name: archive using gz and remove src directory + archive: + path: + - "{{ output_dir }}/tmpdir/*.txt" + - "{{ output_dir }}/tmpdir/sub/*" + dest: "{{ output_dir }}/archive_remove_04.gz" + format: gz + remove: yes + exclude_path: "{{ output_dir }}/tmpdir/sub/subfile.txt" + register: archive_remove_result_04 + +- debug: msg="{{ archive_remove_result_04 }}" + +- name: verify that the files archived + file: path={{ output_dir }}/archive_remove_04.gz state=file + +- name: remove our gz + file: path="{{ output_dir }}/archive_remove_04.gz" state=absent + +- name: verify that excluded sub file is still present + file: path={{ output_dir }}/tmpdir/sub/subfile.txt state=file + - name: remove temporary directory file: path: "{{ output_dir }}/tmpdir" From 24c5d4320f64ec3ec6a155e6be0a1b5f8be08a5a Mon Sep 17 00:00:00 2001 From: Gaetan2907 <48204380+Gaetan2907@users.noreply.github.com> Date: Thu, 24 Jun 2021 13:35:00 +0200 Subject: [PATCH 0157/2828] Keycloak: add authentication management (#2456) * Allow keycloak_group.py to take token as parameter for the authentification Refactor get_token to pass module.params + Documentation Fix unit test and add new one for token as param Fix identation Update plugins/modules/identity/keycloak/keycloak_client.py Co-authored-by: Felix Fontein Update plugins/modules/identity/keycloak/keycloak_clienttemplate.py Co-authored-by: Felix Fontein Allow keycloak_group.py to take token as parameter for the authentification Refactor get_token to pass module.params + Documentation * Update plugins/module_utils/identity/keycloak/keycloak.py Co-authored-by: Felix Fontein Check if base_url is None before to check format Update plugins/module_utils/identity/keycloak/keycloak.py Co-authored-by: Felix Fontein Update plugins/modules/identity/keycloak/keycloak_client.py Co-authored-by: Amin Vakil Update plugins/modules/identity/keycloak/keycloak_clienttemplate.py Co-authored-by: Amin Vakil Switch to modern syntax for the documentation (e.g. community.general.keycloak_client) Update keycloak_client.py Update keycloak_clienttemplate.py Add keycloak_authentication module to manage authentication Minor fixex Fix indent * Update plugins/modules/identity/keycloak/keycloak_authentication.py Co-authored-by: Felix Fontein Update plugins/modules/identity/keycloak/keycloak_authentication.py Co-authored-by: Felix Fontein Update plugins/modules/identity/keycloak/keycloak_authentication.py Co-authored-by: Felix Fontein Update plugins/modules/identity/keycloak/keycloak_authentication.py Co-authored-by: Felix Fontein Update plugins/modules/identity/keycloak/keycloak_authentication.py Co-authored-by: Felix Fontein Removing variable ANSIBLE_METADATA from beginning of file Minor fix Refactoring create_or_update_executions :add change_execution_priority function Refactoring create_or_update_executions :add create_execution function Refactoring create_or_update_executions: add create_subflow Refactoring create_or_update_executions: add update_authentication_executions function Minor fix * Using FQCN for the examples Minor fix Update plugins/module_utils/identity/keycloak/keycloak.py Co-authored-by: Felix Fontein Update plugins/module_utils/identity/keycloak/keycloak.py Co-authored-by: Felix Fontein Update plugins/module_utils/identity/keycloak/keycloak.py Co-authored-by: Felix Fontein Update plugins/module_utils/identity/keycloak/keycloak.py Co-authored-by: Felix Fontein Update plugins/module_utils/identity/keycloak/keycloak.py Co-authored-by: Felix Fontein Update plugins/module_utils/identity/keycloak/keycloak.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_authentication.py Co-authored-by: Felix Fontein Update plugins/modules/identity/keycloak/keycloak_authentication.py Co-authored-by: Felix Fontein Refactoring: rename isDictEquals into is_dict_equals Refactoring: rename variable as authentication_flow Refactoring: rename variable as new_name Refactoring: rename variable as flow_list Refactoring: rename variable as new_flow Refactoring: changing construction of dict newAuthenticationRepresentation and renaming as new_auth_repr Minor fix * Refactoring: rename variables with correct Python syntax (auth_repr, exec_repr) Move create_or_update_executions function from keycloak.py to keycloak_authentication.py Minor fix Remove mock_create_or_update_executions not needed anymore Fix unit test Update plugins/module_utils/identity/keycloak/keycloak.py is_dict_equals function return True if value1 empty Update plugins/module_utils/identity/keycloak/keycloak.py Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> Rename is_dict_equal as is_struct_included and rename params as struct1 and struct2 Rename variables according to Python naming conventions Refactoring: add find_exec_in_executions function in keycloak_authentication to remove code duplication typo Add blank line Add required parameter, either creds or token Typo try/except only surround for loop containing struct2[key] Add sub-options to meta_args assigment of result['changed'] after if-elif-else block Fix CI error: parameter-type-not-in-doc Fix unit test: none value excluded from comparison Minor fix Simplify is_struct_included function Replace 'type(..) is' by isinstance(..) Remove redundant required=True and redundant parenthesis Add check_mode, check if value is None (None value added by argument spec checker) Apply suggestions from code review Update plugins/modules/identity/keycloak/keycloak_authentication.py * Update plugins/modules/identity/keycloak/keycloak_authentication.py * Add index paramter to configure the priority order of the execution * Minor fix: authenticationConfig dict instead of str Co-authored-by: Felix Fontein --- .../identity/keycloak/keycloak.py | 319 ++++++++- .../keycloak/keycloak_authentication.py | 383 +++++++++++ plugins/modules/keycloak_authentication.py | 1 + .../keycloak/test_keycloak_authentication.py | 622 ++++++++++++++++++ 4 files changed, 1323 insertions(+), 2 deletions(-) create mode 100644 plugins/modules/identity/keycloak/keycloak_authentication.py create mode 120000 plugins/modules/keycloak_authentication.py create mode 100644 tests/unit/plugins/modules/identity/keycloak/test_keycloak_authentication.py diff --git a/plugins/module_utils/identity/keycloak/keycloak.py b/plugins/module_utils/identity/keycloak/keycloak.py index c0a1c2a158..ae002a7c94 100644 --- a/plugins/module_utils/identity/keycloak/keycloak.py +++ b/plugins/module_utils/identity/keycloak/keycloak.py @@ -33,9 +33,9 @@ import json import traceback from ansible.module_utils.urls import open_url -from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.six.moves.urllib.parse import urlencode, quote from ansible.module_utils.six.moves.urllib.error import HTTPError -from ansible.module_utils._text import to_native +from ansible.module_utils._text import to_native, to_text URL_REALMS = "{url}/admin/realms" URL_REALM = "{url}/admin/realms/{realm}" @@ -51,6 +51,17 @@ URL_CLIENTTEMPLATES = "{url}/admin/realms/{realm}/client-templates" URL_GROUPS = "{url}/admin/realms/{realm}/groups" URL_GROUP = "{url}/admin/realms/{realm}/groups/{groupid}" +URL_AUTHENTICATION_FLOWS = "{url}/admin/realms/{realm}/authentication/flows" +URL_AUTHENTICATION_FLOW = "{url}/admin/realms/{realm}/authentication/flows/{id}" +URL_AUTHENTICATION_FLOW_COPY = "{url}/admin/realms/{realm}/authentication/flows/{copyfrom}/copy" +URL_AUTHENTICATION_FLOW_EXECUTIONS = "{url}/admin/realms/{realm}/authentication/flows/{flowalias}/executions" +URL_AUTHENTICATION_FLOW_EXECUTIONS_EXECUTION = "{url}/admin/realms/{realm}/authentication/flows/{flowalias}/executions/execution" +URL_AUTHENTICATION_FLOW_EXECUTIONS_FLOW = "{url}/admin/realms/{realm}/authentication/flows/{flowalias}/executions/flow" +URL_AUTHENTICATION_EXECUTION_CONFIG = "{url}/admin/realms/{realm}/authentication/executions/{id}/config" +URL_AUTHENTICATION_EXECUTION_RAISE_PRIORITY = "{url}/admin/realms/{realm}/authentication/executions/{id}/raise-priority" +URL_AUTHENTICATION_EXECUTION_LOWER_PRIORITY = "{url}/admin/realms/{realm}/authentication/executions/{id}/lower-priority" +URL_AUTHENTICATION_CONFIG = "{url}/admin/realms/{realm}/authentication/config/{id}" + def keycloak_argument_spec(): """ @@ -132,6 +143,59 @@ def get_token(module_params): } +def is_struct_included(struct1, struct2, exclude=None): + """ + This function compare if the first parameter structure is included in the second. + The function use every elements of struct1 and validates they are present in the struct2 structure. + The two structure does not need to be equals for that function to return true. + Each elements are compared recursively. + :param struct1: + type: + dict for the initial call, can be dict, list, bool, int or str for recursive calls + description: + reference structure + :param struct2: + type: + dict for the initial call, can be dict, list, bool, int or str for recursive calls + description: + structure to compare with first parameter. + :param exclude: + type: + list + description: + Key to exclude from the comparison. + default: None + :return: + type: + bool + description: + Return True if all element of dict 1 are present in dict 2, return false otherwise. + """ + if isinstance(struct1, list) and isinstance(struct2, list): + for item1 in struct1: + if isinstance(item1, (list, dict)): + for item2 in struct2: + if not is_struct_included(item1, item2, exclude): + return False + else: + if item1 not in struct2: + return False + return True + elif isinstance(struct1, dict) and isinstance(struct2, dict): + try: + for key in struct1: + if not (exclude and key in exclude): + if not is_struct_included(struct1[key], struct2[key], exclude): + return False + return True + except KeyError: + return False + elif isinstance(struct1, bool) and isinstance(struct2, bool): + return struct1 == struct2 + else: + return to_text(struct1, 'utf-8') == to_text(struct2, 'utf-8') + + class KeycloakAPI(object): """ Keycloak API access; Keycloak uses OAuth 2.0 to protect its API, an access token for which is obtained through OpenID connect @@ -571,3 +635,254 @@ class KeycloakAPI(object): except Exception as e: self.module.fail_json(msg="Unable to delete group %s: %s" % (groupid, str(e))) + + def get_authentication_flow_by_alias(self, alias, realm='master'): + """ + Get an authentication flow by it's alias + :param alias: Alias of the authentication flow to get. + :param realm: Realm. + :return: Authentication flow representation. + """ + try: + authentication_flow = {} + # Check if the authentication flow exists on the Keycloak serveraders + authentications = json.load(open_url(URL_AUTHENTICATION_FLOWS.format(url=self.baseurl, realm=realm), method='GET', headers=self.restheaders)) + for authentication in authentications: + if authentication["alias"] == alias: + authentication_flow = authentication + break + return authentication_flow + except Exception as e: + self.module.fail_json(msg="Unable get authentication flow %s: %s" % (alias, str(e))) + + def delete_authentication_flow_by_id(self, id, realm='master'): + """ + Delete an authentication flow from Keycloak + :param id: id of authentication flow to be deleted + :param realm: realm of client to be deleted + :return: HTTPResponse object on success + """ + flow_url = URL_AUTHENTICATION_FLOW.format(url=self.baseurl, realm=realm, id=id) + + try: + return open_url(flow_url, method='DELETE', headers=self.restheaders, + validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Could not delete authentication flow %s in realm %s: %s' + % (id, realm, str(e))) + + def copy_auth_flow(self, config, realm='master'): + """ + Create a new authentication flow from a copy of another. + :param config: Representation of the authentication flow to create. + :param realm: Realm. + :return: Representation of the new authentication flow. + """ + try: + new_name = dict( + newName=config["alias"] + ) + open_url( + URL_AUTHENTICATION_FLOW_COPY.format( + url=self.baseurl, + realm=realm, + copyfrom=quote(config["copyFrom"])), + method='POST', + headers=self.restheaders, + data=json.dumps(new_name)) + flow_list = json.load( + open_url( + URL_AUTHENTICATION_FLOWS.format(url=self.baseurl, + realm=realm), + method='GET', + headers=self.restheaders)) + for flow in flow_list: + if flow["alias"] == config["alias"]: + return flow + return None + except Exception as e: + self.module.fail_json(msg='Could not copy authentication flow %s in realm %s: %s' + % (config["alias"], realm, str(e))) + + def create_empty_auth_flow(self, config, realm='master'): + """ + Create a new empty authentication flow. + :param config: Representation of the authentication flow to create. + :param realm: Realm. + :return: Representation of the new authentication flow. + """ + try: + new_flow = dict( + alias=config["alias"], + providerId=config["providerId"], + description=config["description"], + topLevel=True + ) + open_url( + URL_AUTHENTICATION_FLOWS.format( + url=self.baseurl, + realm=realm), + method='POST', + headers=self.restheaders, + data=json.dumps(new_flow)) + flow_list = json.load( + open_url( + URL_AUTHENTICATION_FLOWS.format( + url=self.baseurl, + realm=realm), + method='GET', + headers=self.restheaders)) + for flow in flow_list: + if flow["alias"] == config["alias"]: + return flow + return None + except Exception as e: + self.module.fail_json(msg='Could not create empty authentication flow %s in realm %s: %s' + % (config["alias"], realm, str(e))) + + def update_authentication_executions(self, flowAlias, updatedExec, realm='master'): + """ Update authentication executions + + :param flowAlias: name of the parent flow + :param updatedExec: JSON containing updated execution + :return: HTTPResponse object on success + """ + try: + open_url( + URL_AUTHENTICATION_FLOW_EXECUTIONS.format( + url=self.baseurl, + realm=realm, + flowalias=quote(flowAlias)), + method='PUT', + headers=self.restheaders, + data=json.dumps(updatedExec)) + except Exception as e: + self.module.fail_json(msg="Unable to update executions %s: %s" % (updatedExec, str(e))) + + def add_authenticationConfig_to_execution(self, executionId, authenticationConfig, realm='master'): + """ Add autenticatorConfig to the execution + + :param executionId: id of execution + :param authenticationConfig: config to add to the execution + :return: HTTPResponse object on success + """ + try: + open_url( + URL_AUTHENTICATION_EXECUTION_CONFIG.format( + url=self.baseurl, + realm=realm, + id=executionId), + method='POST', + headers=self.restheaders, + data=json.dumps(authenticationConfig)) + except Exception as e: + self.module.fail_json(msg="Unable to add authenticationConfig %s: %s" % (executionId, str(e))) + + def create_subflow(self, subflowName, flowAlias, realm='master'): + """ Create new sublow on the flow + + :param subflowName: name of the subflow to create + :param flowAlias: name of the parent flow + :return: HTTPResponse object on success + """ + try: + newSubFlow = {} + newSubFlow["alias"] = subflowName + newSubFlow["provider"] = "registration-page-form" + newSubFlow["type"] = "basic-flow" + open_url( + URL_AUTHENTICATION_FLOW_EXECUTIONS_FLOW.format( + url=self.baseurl, + realm=realm, + flowalias=quote(flowAlias)), + method='POST', + headers=self.restheaders, + data=json.dumps(newSubFlow)) + except Exception as e: + self.module.fail_json(msg="Unable to create new subflow %s: %s" % (subflowName, str(e))) + + def create_execution(self, execution, flowAlias, realm='master'): + """ Create new execution on the flow + + :param execution: name of execution to create + :param flowAlias: name of the parent flow + :return: HTTPResponse object on success + """ + try: + newExec = {} + newExec["provider"] = execution["providerId"] + newExec["requirement"] = execution["requirement"] + open_url( + URL_AUTHENTICATION_FLOW_EXECUTIONS_EXECUTION.format( + url=self.baseurl, + realm=realm, + flowalias=quote(flowAlias)), + method='POST', + headers=self.restheaders, + data=json.dumps(newExec)) + except Exception as e: + self.module.fail_json(msg="Unable to create new execution %s: %s" % (execution["provider"], str(e))) + + def change_execution_priority(self, executionId, diff, realm='master'): + """ Raise or lower execution priority of diff time + + :param executionId: id of execution to lower priority + :param realm: realm the client is in + :param diff: Integer number, raise of diff time if positive lower of diff time if negative + :return: HTTPResponse object on success + """ + try: + if diff > 0: + for i in range(diff): + open_url( + URL_AUTHENTICATION_EXECUTION_RAISE_PRIORITY.format( + url=self.baseurl, + realm=realm, + id=executionId), + method='POST', + headers=self.restheaders) + elif diff < 0: + for i in range(-diff): + open_url( + URL_AUTHENTICATION_EXECUTION_LOWER_PRIORITY.format( + url=self.baseurl, + realm=realm, + id=executionId), + method='POST', + headers=self.restheaders) + except Exception as e: + self.module.fail_json(msg="Unable to change execution priority %s: %s" % (executionId, str(e))) + + def get_executions_representation(self, config, realm='master'): + """ + Get a representation of the executions for an authentication flow. + :param config: Representation of the authentication flow + :param realm: Realm + :return: Representation of the executions + """ + try: + # Get executions created + executions = json.load( + open_url( + URL_AUTHENTICATION_FLOW_EXECUTIONS.format( + url=self.baseurl, + realm=realm, + flowalias=quote(config["alias"])), + method='GET', + headers=self.restheaders)) + for execution in executions: + if "authenticationConfig" in execution: + execConfigId = execution["authenticationConfig"] + execConfig = json.load( + open_url( + URL_AUTHENTICATION_CONFIG.format( + url=self.baseurl, + realm=realm, + id=execConfigId), + method='GET', + headers=self.restheaders)) + execution["authenticationConfig"] = execConfig + return executions + except Exception as e: + self.module.fail_json(msg='Could not get executions for authentication flow %s in realm %s: %s' + % (config["alias"], realm, str(e))) diff --git a/plugins/modules/identity/keycloak/keycloak_authentication.py b/plugins/modules/identity/keycloak/keycloak_authentication.py new file mode 100644 index 0000000000..98b6378dac --- /dev/null +++ b/plugins/modules/identity/keycloak/keycloak_authentication.py @@ -0,0 +1,383 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2019, INSPQ +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: keycloak_authentication +short_description: Configure authentication in Keycloak +description: + - This module actually can only make a copy of an existing authentication flow, add an execution to it and configure it. + - It can also delete the flow. +version_added: "3.3.0" +options: + realm: + description: + - The name of the realm in which is the authentication. + required: true + type: str + alias: + description: + - Alias for the authentication flow. + required: true + type: str + description: + description: + - Description of the flow. + type: str + providerId: + description: + - C(providerId) for the new flow when not copied from an existing flow. + type: str + copyFrom: + description: + - C(flowAlias) of the authentication flow to use for the copy. + type: str + authenticationExecutions: + description: + - Configuration structure for the executions. + type: list + elements: dict + suboptions: + providerId: + description: + - C(providerID) for the new flow when not copied from an existing flow. + type: str + displayName: + description: + - Name of the execution or subflow to create or update. + type: str + requirement: + description: + - Control status of the subflow or execution. + choices: [ "REQUIRED", "ALTERNATIVE", "DISABLED", "CONDITIONAL" ] + type: str + flowAlias: + description: + - Alias of parent flow. + type: str + authenticationConfig: + description: + - Describe the config of the authentication. + type: dict + index: + description: + - Priority order of the execution. + type: int + state: + description: + - Control if the authentication flow must exists or not. + choices: [ "present", "absent" ] + default: present + type: str + force: + type: bool + default: false + description: + - If C(true), allows to remove the authentication flow and recreate it. +extends_documentation_fragment: +- community.general.keycloak + +author: + - Philippe Gauthier (@elfelip) + - Gaëtan Daubresse (@Gaetan2907) +''' + +EXAMPLES = ''' + - name: Create an authentication flow from first broker login and add an execution to it. + community.general.keycloak_authentication: + auth_keycloak_url: http://localhost:8080/auth + auth_realm: master + auth_username: admin + auth_password: password + realm: master + alias: "Copy of first broker login" + copyFrom: "first broker login" + authenticationExecutions: + - providerId: "test-execution1" + requirement: "REQUIRED" + authenticationConfig: + alias: "test.execution1.property" + config: + test1.property: "value" + - providerId: "test-execution2" + requirement: "REQUIRED" + authenticationConfig: + alias: "test.execution2.property" + config: + test2.property: "value" + state: present + + - name: Re-create the authentication flow + community.general.keycloak_authentication: + auth_keycloak_url: http://localhost:8080/auth + auth_realm: master + auth_username: admin + auth_password: password + realm: master + alias: "Copy of first broker login" + copyFrom: "first broker login" + authenticationExecutions: + - providerId: "test-provisioning" + requirement: "REQUIRED" + authenticationConfig: + alias: "test.provisioning.property" + config: + test.provisioning.property: "value" + state: present + force: true + + - name: Create an authentication flow with subflow containing an execution. + community.general.keycloak_authentication: + auth_keycloak_url: http://localhost:8080/auth + auth_realm: master + auth_username: admin + auth_password: password + realm: master + alias: "Copy of first broker login" + copyFrom: "first broker login" + authenticationExecutions: + - providerId: "test-execution1" + requirement: "REQUIRED" + - displayName: "New Subflow" + requirement: "REQUIRED" + - providerId: "auth-cookie" + requirement: "REQUIRED" + flowAlias: "New Sublow" + state: present + + - name: Remove authentication. + community.general.keycloak_authentication: + auth_keycloak_url: http://localhost:8080/auth + auth_realm: master + auth_username: admin + auth_password: password + realm: master + alias: "Copy of first broker login" + state: absent +''' + +RETURN = ''' +flow: + description: JSON representation for the authentication. + returned: on success + type: dict +''' + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak \ + import KeycloakAPI, camel, keycloak_argument_spec, get_token, KeycloakError, is_struct_included +from ansible.module_utils.basic import AnsibleModule + + +def find_exec_in_executions(searched_exec, executions): + """ + Search if exec is contained in the executions. + :param searched_exec: Execution to search for. + :param executions: List of executions. + :return: Index of the execution, -1 if not found.. + """ + for i, existing_exec in enumerate(executions, start=0): + if ("providerId" in existing_exec and "providerId" in searched_exec and + existing_exec["providerId"] == searched_exec["providerId"] or + "displayName" in existing_exec and "displayName" in searched_exec and + existing_exec["displayName"] == searched_exec["displayName"]): + return i + return -1 + + +def create_or_update_executions(kc, config, realm='master'): + """ + Create or update executions for an authentication flow. + :param kc: Keycloak API access. + :param config: Representation of the authentication flow including it's executions. + :param realm: Realm + :return: True if executions have been modified. False otherwise. + """ + try: + changed = False + if "authenticationExecutions" in config: + for new_exec_index, new_exec in enumerate(config["authenticationExecutions"], start=0): + if new_exec["index"] is not None: + new_exec_index = new_exec["index"] + # Get existing executions on the Keycloak server for this alias + existing_executions = kc.get_executions_representation(config, realm=realm) + exec_found = False + # Get flowalias parent if given + if new_exec["flowAlias"] is not None: + flow_alias_parent = new_exec["flowAlias"] + else: + flow_alias_parent = config["alias"] + # Check if same providerId or displayName name between existing and new execution + exec_index = find_exec_in_executions(new_exec, existing_executions) + if exec_index != -1: + # Remove key that doesn't need to be compared with existing_exec + exclude_key = ["flowAlias"] + for index_key, key in enumerate(new_exec, start=0): + if new_exec[key] is None: + exclude_key.append(key) + # Compare the executions to see if it need changes + if not is_struct_included(new_exec, existing_executions[exec_index], exclude_key) or exec_index != new_exec_index: + changed = True + elif new_exec["providerId"] is not None: + kc.create_execution(new_exec, flowAlias=flow_alias_parent, realm=realm) + changed = True + elif new_exec["displayName"] is not None: + kc.create_subflow(new_exec["displayName"], flow_alias_parent, realm=realm) + changed = True + if changed: + # Get existing executions on the Keycloak server for this alias + existing_executions = kc.get_executions_representation(config, realm=realm) + exec_index = find_exec_in_executions(new_exec, existing_executions) + if exec_index != -1: + # Update the existing execution + updated_exec = { + "id": existing_executions[exec_index]["id"] + } + # add the execution configuration + if new_exec["authenticationConfig"] is not None: + kc.add_authenticationConfig_to_execution(updated_exec["id"], new_exec["authenticationConfig"], realm=realm) + for key in new_exec: + # remove unwanted key for the next API call + if key != "flowAlias" and key != "authenticationConfig": + updated_exec[key] = new_exec[key] + if new_exec["requirement"] is not None: + kc.update_authentication_executions(flow_alias_parent, updated_exec, realm=realm) + diff = exec_index - new_exec_index + kc.change_execution_priority(updated_exec["id"], diff, realm=realm) + return changed + except Exception as e: + kc.module.fail_json(msg='Could not create or update executions for authentication flow %s in realm %s: %s' + % (config["alias"], realm, str(e))) + + +def main(): + """ + Module execution + :return: + """ + argument_spec = keycloak_argument_spec() + meta_args = dict( + realm=dict(type='str', required=True), + alias=dict(type='str', required=True), + providerId=dict(type='str'), + description=dict(type='str'), + copyFrom=dict(type='str'), + authenticationExecutions=dict(type='list', elements='dict', + options=dict( + providerId=dict(type='str'), + displayName=dict(type='str'), + requirement=dict(choices=["REQUIRED", "ALTERNATIVE", "DISABLED", "CONDITIONAL"], type='str'), + flowAlias=dict(type='str'), + authenticationConfig=dict(type='dict'), + index=dict(type='int'), + )), + state=dict(choices=["absent", "present"], default='present'), + force=dict(type='bool', default=False), + ) + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), + required_together=([['auth_realm', 'auth_username', 'auth_password']]) + ) + + result = dict(changed=False, msg='', flow={}) + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + realm = module.params.get('realm') + state = module.params.get('state') + force = module.params.get('force') + + new_auth_repr = { + "alias": module.params.get("alias"), + "copyFrom": module.params.get("copyFrom"), + "providerId": module.params.get("providerId"), + "authenticationExecutions": module.params.get("authenticationExecutions"), + "description": module.params.get("description"), + "builtIn": module.params.get("builtIn"), + "subflow": module.params.get("subflow"), + } + + auth_repr = kc.get_authentication_flow_by_alias(alias=new_auth_repr["alias"], realm=realm) + if auth_repr == {}: # Authentication flow does not exist + if state == 'present': # If desired state is present + result['changed'] = True + if module._diff: + result['diff'] = dict(before='', after=new_auth_repr) + if module.check_mode: + module.exit_json(**result) + # If copyFrom is defined, create authentication flow from a copy + if "copyFrom" in new_auth_repr and new_auth_repr["copyFrom"] is not None: + auth_repr = kc.copy_auth_flow(config=new_auth_repr, realm=realm) + else: # Create an empty authentication flow + auth_repr = kc.create_empty_auth_flow(config=new_auth_repr, realm=realm) + # If the authentication still not exist on the server, raise an exception. + if auth_repr is None: + result['msg'] = "Authentication just created not found: " + str(new_auth_repr) + module.fail_json(**result) + # Configure the executions for the flow + create_or_update_executions(kc=kc, config=new_auth_repr, realm=realm) + # Get executions created + exec_repr = kc.get_executions_representation(config=new_auth_repr, realm=realm) + if exec_repr is not None: + auth_repr["authenticationExecutions"] = exec_repr + result['flow'] = auth_repr + elif state == 'absent': # If desired state is absent. + if module._diff: + result['diff'] = dict(before='', after='') + result['msg'] = new_auth_repr["alias"] + ' absent' + else: # The authentication flow already exist + if state == 'present': # if desired state is present + if force: # If force option is true + # Delete the actual authentication flow + result['changed'] = True + if module._diff: + result['diff'] = dict(before=auth_repr, after=new_auth_repr) + if module.check_mode: + module.exit_json(**result) + kc.delete_authentication_flow_by_id(id=auth_repr["id"], realm=realm) + # If copyFrom is defined, create authentication flow from a copy + if "copyFrom" in new_auth_repr and new_auth_repr["copyFrom"] is not None: + auth_repr = kc.copy_auth_flow(config=new_auth_repr, realm=realm) + else: # Create an empty authentication flow + auth_repr = kc.create_empty_auth_flow(config=new_auth_repr, realm=realm) + # If the authentication still not exist on the server, raise an exception. + if auth_repr is None: + result['msg'] = "Authentication just created not found: " + str(new_auth_repr) + module.fail_json(**result) + # Configure the executions for the flow + if module.check_mode: + module.exit_json(**result) + if create_or_update_executions(kc=kc, config=new_auth_repr, realm=realm): + result['changed'] = True + # Get executions created + exec_repr = kc.get_executions_representation(config=new_auth_repr, realm=realm) + if exec_repr is not None: + auth_repr["authenticationExecutions"] = exec_repr + result['flow'] = auth_repr + elif state == 'absent': # If desired state is absent + result['changed'] = True + # Delete the authentication flow alias. + if module._diff: + result['diff'] = dict(before=auth_repr, after='') + if module.check_mode: + module.exit_json(**result) + kc.delete_authentication_flow_by_id(id=auth_repr["id"], realm=realm) + result['msg'] = 'Authentication flow: {alias} id: {id} is deleted'.format(alias=new_auth_repr['alias'], + id=auth_repr["id"]) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_authentication.py b/plugins/modules/keycloak_authentication.py new file mode 120000 index 0000000000..e27a180a01 --- /dev/null +++ b/plugins/modules/keycloak_authentication.py @@ -0,0 +1 @@ +./identity/keycloak/keycloak_authentication.py \ No newline at end of file diff --git a/tests/unit/plugins/modules/identity/keycloak/test_keycloak_authentication.py b/tests/unit/plugins/modules/identity/keycloak/test_keycloak_authentication.py new file mode 100644 index 0000000000..91e34eea7b --- /dev/null +++ b/tests/unit/plugins/modules/identity/keycloak/test_keycloak_authentication.py @@ -0,0 +1,622 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from contextlib import contextmanager + +from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.general.tests.unit.compat.mock import call, patch +from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args + +from ansible_collections.community.general.plugins.modules.identity.keycloak import keycloak_authentication + +from itertools import count + +from ansible.module_utils.six import StringIO + + +@contextmanager +def patch_keycloak_api(get_authentication_flow_by_alias=None, copy_auth_flow=None, create_empty_auth_flow=None, + get_executions_representation=None, delete_authentication_flow_by_id=None): + """Mock context manager for patching the methods in PwPolicyIPAClient that contact the IPA server + + Patches the `login` and `_post_json` methods + + Keyword arguments are passed to the mock object that patches `_post_json` + + No arguments are passed to the mock object that patches `login` because no tests require it + + Example:: + + with patch_ipa(return_value={}) as (mock_login, mock_post): + ... + """ + + obj = keycloak_authentication.KeycloakAPI + with patch.object(obj, 'get_authentication_flow_by_alias', side_effect=get_authentication_flow_by_alias) \ + as mock_get_authentication_flow_by_alias: + with patch.object(obj, 'copy_auth_flow', side_effect=copy_auth_flow) \ + as mock_copy_auth_flow: + with patch.object(obj, 'create_empty_auth_flow', side_effect=create_empty_auth_flow) \ + as mock_create_empty_auth_flow: + with patch.object(obj, 'get_executions_representation', return_value=get_executions_representation) \ + as mock_get_executions_representation: + with patch.object(obj, 'delete_authentication_flow_by_id', side_effect=delete_authentication_flow_by_id) \ + as mock_delete_authentication_flow_by_id: + yield mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, \ + mock_get_executions_representation, mock_delete_authentication_flow_by_id + + +def get_response(object_with_future_response, method, get_id_call_count): + if callable(object_with_future_response): + return object_with_future_response() + if isinstance(object_with_future_response, dict): + return get_response( + object_with_future_response[method], method, get_id_call_count) + if isinstance(object_with_future_response, list): + call_number = next(get_id_call_count) + return get_response( + object_with_future_response[call_number], method, get_id_call_count) + return object_with_future_response + + +def build_mocked_request(get_id_user_count, response_dict): + def _mocked_requests(*args, **kwargs): + url = args[0] + method = kwargs['method'] + future_response = response_dict.get(url, None) + return get_response(future_response, method, get_id_user_count) + return _mocked_requests + + +def create_wrapper(text_as_string): + """Allow to mock many times a call to one address. + Without this function, the StringIO is empty for the second call. + """ + def _create_wrapper(): + return StringIO(text_as_string) + return _create_wrapper + + +def mock_good_connection(): + token_response = { + 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), } + return patch( + 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + side_effect=build_mocked_request(count(), token_response), + autospec=True + ) + + +class TestKeycloakAuthentication(ModuleTestCase): + def setUp(self): + super(TestKeycloakAuthentication, self).setUp() + self.module = keycloak_authentication + + def test_create_auth_flow_from_copy(self): + """Add a new authentication flow from copy of an other flow""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'realm': 'realm-name', + 'alias': 'Test create authentication flow copy', + 'copyFrom': 'first broker login', + 'authenticationExecutions': [ + { + 'providerId': 'identity-provider-redirector', + 'requirement': 'ALTERNATIVE', + }, + ], + 'state': 'present', + } + return_value_auth_flow_before = [{}] + return_value_copied = [{ + 'id': '2ac059fc-c548-414f-9c9e-84d42bd4944e', + 'alias': 'first broker login', + 'description': 'browser based authentication', + 'providerId': 'basic-flow', + 'topLevel': True, + 'builtIn': False, + 'authenticationExecutions': [ + { + 'authenticator': 'auth-cookie', + 'requirement': 'ALTERNATIVE', + 'priority': 10, + 'userSetupAllowed': False, + 'autheticatorFlow': False + }, + ], + }] + return_value_executions_after = [ + { + 'id': 'b678e30c-8469-40a7-8c21-8d0cda76a591', + 'requirement': 'ALTERNATIVE', + 'displayName': 'Identity Provider Redirector', + 'requirementChoices': ['REQUIRED', 'DISABLED'], + 'configurable': True, + 'providerId': 'identity-provider-redirector', + 'level': 0, + 'index': 0 + }, + { + 'id': 'fdc208e9-c292-48b7-b7d1-1d98315ee893', + 'requirement': 'ALTERNATIVE', + 'displayName': 'Cookie', + 'requirementChoices': [ + 'REQUIRED', + 'ALTERNATIVE', + 'DISABLED' + ], + 'configurable': False, + 'providerId': 'auth-cookie', + 'level': 0, + 'index': 1 + }, + ] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before, copy_auth_flow=return_value_copied, + get_executions_representation=return_value_executions_after) \ + as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, + mock_get_executions_representation, mock_delete_authentication_flow_by_id): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + # Verify number of call on each mock + self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1) + self.assertEqual(len(mock_copy_auth_flow.mock_calls), 1) + self.assertEqual(len(mock_create_empty_auth_flow.mock_calls), 0) + self.assertEqual(len(mock_get_executions_representation.mock_calls), 2) + self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_create_auth_flow_from_copy_idempotency(self): + """Add an already existing authentication flow from copy of an other flow to test idempotency""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'realm': 'realm-name', + 'alias': 'Test create authentication flow copy', + 'copyFrom': 'first broker login', + 'authenticationExecutions': [ + { + 'providerId': 'identity-provider-redirector', + 'requirement': 'ALTERNATIVE', + }, + ], + 'state': 'present', + } + return_value_auth_flow_before = [{ + 'id': '71275d5e-e11f-4be4-b119-0abfa87987a4', + 'alias': 'Test create authentication flow copy', + 'description': '', + 'providerId': 'basic-flow', + 'topLevel': True, + 'builtIn': False, + 'authenticationExecutions': [ + { + 'authenticator': 'identity-provider-redirector', + 'requirement': 'ALTERNATIVE', + 'priority': 0, + 'userSetupAllowed': False, + 'autheticatorFlow': False + }, + { + 'authenticator': 'auth-cookie', + 'requirement': 'ALTERNATIVE', + 'priority': 0, + 'userSetupAllowed': False, + 'autheticatorFlow': False + }, + ], + }] + return_value_executions_after = [ + { + 'id': 'b678e30c-8469-40a7-8c21-8d0cda76a591', + 'requirement': 'ALTERNATIVE', + 'displayName': 'Identity Provider Redirector', + 'requirementChoices': ['REQUIRED', 'DISABLED'], + 'configurable': True, + 'providerId': 'identity-provider-redirector', + 'level': 0, + 'index': 0 + }, + { + 'id': 'fdc208e9-c292-48b7-b7d1-1d98315ee893', + 'requirement': 'ALTERNATIVE', + 'displayName': 'Cookie', + 'requirementChoices': [ + 'REQUIRED', + 'ALTERNATIVE', + 'DISABLED' + ], + 'configurable': False, + 'providerId': 'auth-cookie', + 'level': 0, + 'index': 1 + }, + ] + changed = False + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before, + get_executions_representation=return_value_executions_after) \ + as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, + mock_get_executions_representation, mock_delete_authentication_flow_by_id): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + # Verify number of call on each mock + self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1) + self.assertEqual(len(mock_copy_auth_flow.mock_calls), 0) + self.assertEqual(len(mock_create_empty_auth_flow.mock_calls), 0) + self.assertEqual(len(mock_get_executions_representation.mock_calls), 2) + self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_create_auth_flow_without_copy(self): + """Add authentication without copy""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'realm': 'realm-name', + 'alias': 'Test create authentication flow copy', + 'authenticationExecutions': [ + { + 'providerId': 'identity-provider-redirector', + 'requirement': 'ALTERNATIVE', + 'authenticationConfig': { + 'alias': 'name', + 'config': { + 'defaultProvider': 'value' + }, + }, + }, + ], + 'state': 'present', + } + return_value_auth_flow_before = [{}] + return_value_created_empty_flow = [ + { + "alias": "Test of the keycloak_auth module", + "authenticationExecutions": [], + "builtIn": False, + "description": "", + "id": "513f5baa-cc42-47bf-b4b6-1d23ccc0a67f", + "providerId": "basic-flow", + "topLevel": True + }, + ] + return_value_executions_after = [ + { + 'id': 'b678e30c-8469-40a7-8c21-8d0cda76a591', + 'requirement': 'ALTERNATIVE', + 'displayName': 'Identity Provider Redirector', + 'requirementChoices': ['REQUIRED', 'DISABLED'], + 'configurable': True, + 'providerId': 'identity-provider-redirector', + 'level': 0, + 'index': 0 + }, + ] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before, + get_executions_representation=return_value_executions_after, create_empty_auth_flow=return_value_created_empty_flow) \ + as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, + mock_get_executions_representation, mock_delete_authentication_flow_by_id): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + # Verify number of call on each mock + self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1) + self.assertEqual(len(mock_copy_auth_flow.mock_calls), 0) + self.assertEqual(len(mock_create_empty_auth_flow.mock_calls), 1) + self.assertEqual(len(mock_get_executions_representation.mock_calls), 3) + self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_update_auth_flow_adding_exec(self): + """Update authentication flow by adding execution""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'realm': 'realm-name', + 'alias': 'Test create authentication flow copy', + 'authenticationExecutions': [ + { + 'providerId': 'identity-provider-redirector', + 'requirement': 'ALTERNATIVE', + 'authenticationConfig': { + 'alias': 'name', + 'config': { + 'defaultProvider': 'value' + }, + }, + }, + ], + 'state': 'present', + } + return_value_auth_flow_before = [{ + 'id': '71275d5e-e11f-4be4-b119-0abfa87987a4', + 'alias': 'Test create authentication flow copy', + 'description': '', + 'providerId': 'basic-flow', + 'topLevel': True, + 'builtIn': False, + 'authenticationExecutions': [ + { + 'authenticator': 'auth-cookie', + 'requirement': 'ALTERNATIVE', + 'priority': 0, + 'userSetupAllowed': False, + 'autheticatorFlow': False + }, + ], + }] + return_value_executions_after = [ + { + 'id': 'b678e30c-8469-40a7-8c21-8d0cda76a591', + 'requirement': 'DISABLED', + 'displayName': 'Identity Provider Redirector', + 'requirementChoices': ['REQUIRED', 'DISABLED'], + 'configurable': True, + 'providerId': 'identity-provider-redirector', + 'level': 0, + 'index': 0 + }, + { + 'id': 'fdc208e9-c292-48b7-b7d1-1d98315ee893', + 'requirement': 'ALTERNATIVE', + 'displayName': 'Cookie', + 'requirementChoices': [ + 'REQUIRED', + 'ALTERNATIVE', + 'DISABLED' + ], + 'configurable': False, + 'providerId': 'auth-cookie', + 'level': 0, + 'index': 1 + }, + ] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before, + get_executions_representation=return_value_executions_after) \ + as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, + mock_get_executions_representation, mock_delete_authentication_flow_by_id): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + # Verify number of call on each mock + self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1) + self.assertEqual(len(mock_copy_auth_flow.mock_calls), 0) + self.assertEqual(len(mock_create_empty_auth_flow.mock_calls), 0) + self.assertEqual(len(mock_get_executions_representation.mock_calls), 3) + self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_delete_auth_flow(self): + """Delete authentication flow""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'realm': 'realm-name', + 'alias': 'Test create authentication flow copy', + 'state': 'absent', + } + return_value_auth_flow_before = [{ + 'id': '71275d5e-e11f-4be4-b119-0abfa87987a4', + 'alias': 'Test create authentication flow copy', + 'description': '', + 'providerId': 'basic-flow', + 'topLevel': True, + 'builtIn': False, + 'authenticationExecutions': [ + { + 'authenticator': 'auth-cookie', + 'requirement': 'ALTERNATIVE', + 'priority': 0, + 'userSetupAllowed': False, + 'autheticatorFlow': False + }, + ], + }] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before) \ + as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, + mock_get_executions_representation, mock_delete_authentication_flow_by_id): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + # Verify number of call on each mock + self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1) + self.assertEqual(len(mock_copy_auth_flow.mock_calls), 0) + self.assertEqual(len(mock_create_empty_auth_flow.mock_calls), 0) + self.assertEqual(len(mock_get_executions_representation.mock_calls), 0) + self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 1) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_delete_auth_flow_idempotency(self): + """Delete second time authentication flow to test idempotency""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'realm': 'realm-name', + 'alias': 'Test create authentication flow copy', + 'state': 'absent', + } + return_value_auth_flow_before = [{}] + changed = False + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before) \ + as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, + mock_get_executions_representation, mock_delete_authentication_flow_by_id): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + # Verify number of call on each mock + self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1) + self.assertEqual(len(mock_copy_auth_flow.mock_calls), 0) + self.assertEqual(len(mock_create_empty_auth_flow.mock_calls), 0) + self.assertEqual(len(mock_get_executions_representation.mock_calls), 0) + self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_force_update_auth_flow(self): + """Delete authentication flow and create new one""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'realm': 'realm-name', + 'alias': 'Test create authentication flow copy', + 'authenticationExecutions': [ + { + 'providerId': 'identity-provider-redirector', + 'requirement': 'ALTERNATIVE', + 'authenticationConfig': { + 'alias': 'name', + 'config': { + 'defaultProvider': 'value' + }, + }, + }, + ], + 'state': 'present', + 'force': 'yes', + } + return_value_auth_flow_before = [{ + 'id': '71275d5e-e11f-4be4-b119-0abfa87987a4', + 'alias': 'Test create authentication flow copy', + 'description': '', + 'providerId': 'basic-flow', + 'topLevel': True, + 'builtIn': False, + 'authenticationExecutions': [ + { + 'authenticator': 'auth-cookie', + 'requirement': 'ALTERNATIVE', + 'priority': 0, + 'userSetupAllowed': False, + 'autheticatorFlow': False + }, + ], + }] + return_value_created_empty_flow = [ + { + "alias": "Test of the keycloak_auth module", + "authenticationExecutions": [], + "builtIn": False, + "description": "", + "id": "513f5baa-cc42-47bf-b4b6-1d23ccc0a67f", + "providerId": "basic-flow", + "topLevel": True + }, + ] + return_value_executions_after = [ + { + 'id': 'b678e30c-8469-40a7-8c21-8d0cda76a591', + 'requirement': 'DISABLED', + 'displayName': 'Identity Provider Redirector', + 'requirementChoices': ['REQUIRED', 'DISABLED'], + 'configurable': True, + 'providerId': 'identity-provider-redirector', + 'level': 0, + 'index': 0 + }, + ] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before, + get_executions_representation=return_value_executions_after, create_empty_auth_flow=return_value_created_empty_flow) \ + as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, + mock_get_executions_representation, mock_delete_authentication_flow_by_id): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + # Verify number of call on each mock + self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1) + self.assertEqual(len(mock_copy_auth_flow.mock_calls), 0) + self.assertEqual(len(mock_create_empty_auth_flow.mock_calls), 1) + self.assertEqual(len(mock_get_executions_representation.mock_calls), 3) + self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 1) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + +if __name__ == '__main__': + unittest.main() From 2d1f5408d3f1bc370502aebc39a3f18c0fc6715d Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 24 Jun 2021 22:33:29 +0200 Subject: [PATCH 0158/2828] Redis: slave -> replica (#2867) * Redis: slave -> replica * Fallback for old Redis versions in CI. --- .../fragments/2867-redis-terminology.yml | 2 + plugins/modules/database/misc/redis.py | 66 ++++++++++--------- .../targets/redis_info/defaults/main.yml | 2 +- .../targets/redis_info/tasks/main.yml | 6 +- .../setup_redis_replication/defaults/main.yml | 17 +++-- .../setup_redis_replication/handlers/main.yml | 10 +-- .../tasks/setup_redis_cluster.yml | 22 +++---- 7 files changed, 70 insertions(+), 55 deletions(-) create mode 100644 changelogs/fragments/2867-redis-terminology.yml diff --git a/changelogs/fragments/2867-redis-terminology.yml b/changelogs/fragments/2867-redis-terminology.yml new file mode 100644 index 0000000000..add76c0f91 --- /dev/null +++ b/changelogs/fragments/2867-redis-terminology.yml @@ -0,0 +1,2 @@ +minor_changes: +- "redis - allow to use the term ``replica`` instead of ``slave``, which has been the official Redis terminology since 2018 (https://github.com/ansible-collections/community.general/pull/2867)." diff --git a/plugins/modules/database/misc/redis.py b/plugins/modules/database/misc/redis.py index 5ffbd7db57..602aaf6c74 100644 --- a/plugins/modules/database/misc/redis.py +++ b/plugins/modules/database/misc/redis.py @@ -10,17 +10,17 @@ __metaclass__ = type DOCUMENTATION = ''' --- module: redis -short_description: Various redis commands, slave and flush +short_description: Various redis commands, replica and flush description: - Unified utility to interact with redis instances. options: command: description: - The selected redis command - - C(config) (new in 1.6), ensures a configuration setting on an instance. + - C(config) ensures a configuration setting on an instance. - C(flush) flushes all the instance or a specified db. - - C(slave) sets a redis instance in slave or master mode. - choices: [ config, flush, slave ] + - C(replica) sets a redis instance in replica or master mode. (C(slave) is an alias for C(replica).) + choices: [ config, flush, replica, slave ] type: str login_password: description: @@ -38,18 +38,21 @@ options: type: int master_host: description: - - The host of the master instance [slave command] + - The host of the master instance [replica command] type: str master_port: description: - - The port of the master instance [slave command] + - The port of the master instance [replica command] type: int - slave_mode: + replica_mode: description: - - the mode of the redis instance [slave command] - default: slave - choices: [ master, slave ] + - The mode of the redis instance [replica command] + - C(slave) is an alias for C(replica). + default: replica + choices: [ master, replica, slave ] type: str + aliases: + - slave_mode db: description: - The database to flush (used in db mode) [flush command] @@ -76,7 +79,7 @@ notes: - Requires the redis-py Python package on the remote host. You can install it with pip (pip install redis) or with a package manager. https://github.com/andymccurdy/redis-py - - If the redis master instance we are making slave of is password protected + - If the redis master instance we are making replica of is password protected this needs to be in the redis.conf in the masterauth variable seealso: @@ -86,16 +89,16 @@ author: "Xabier Larrakoetxea (@slok)" ''' EXAMPLES = ''' -- name: Set local redis instance to be slave of melee.island on port 6377 +- name: Set local redis instance to be a replica of melee.island on port 6377 community.general.redis: - command: slave + command: replica master_host: melee.island master_port: 6377 -- name: Deactivate slave mode +- name: Deactivate replica mode community.general.redis: - command: slave - slave_mode: master + command: replica + replica_mode: master - name: Flush all the redis db community.general.redis: @@ -145,7 +148,7 @@ import re # Redis module specific support methods. -def set_slave_mode(client, master_host, master_port): +def set_replica_mode(client, master_host, master_port): try: return client.slaveof(master_host, master_port) except Exception: @@ -174,13 +177,13 @@ def flush(client, db=None): def main(): module = AnsibleModule( argument_spec=dict( - command=dict(type='str', choices=['config', 'flush', 'slave']), + command=dict(type='str', choices=['config', 'flush', 'replica', 'slave']), login_password=dict(type='str', no_log=True), login_host=dict(type='str', default='localhost'), login_port=dict(type='int', default=6379), master_host=dict(type='str'), master_port=dict(type='int'), - slave_mode=dict(type='str', default='slave', choices=['master', 'slave']), + replica_mode=dict(type='str', default='replica', choices=['master', 'replica', 'slave'], aliases=["slave_mode"]), db=dict(type='int'), flush_mode=dict(type='str', default='all', choices=['all', 'db']), name=dict(type='str'), @@ -196,20 +199,24 @@ def main(): login_host = module.params['login_host'] login_port = module.params['login_port'] command = module.params['command'] - - # Slave Command section ----------- if command == "slave": + command = "replica" + + # Replica Command section ----------- + if command == "replica": master_host = module.params['master_host'] master_port = module.params['master_port'] - mode = module.params['slave_mode'] + mode = module.params['replica_mode'] + if mode == "slave": + mode = "replica" # Check if we have all the data - if mode == "slave": # Only need data if we want to be slave + if mode == "replica": # Only need data if we want to be replica if not master_host: - module.fail_json(msg='In slave mode master host must be provided') + module.fail_json(msg='In replica mode master host must be provided') if not master_port: - module.fail_json(msg='In slave mode master port must be provided') + module.fail_json(msg='In replica mode master port must be provided') # Connect and check r = redis.StrictRedis(host=login_host, port=login_port, password=login_password) @@ -223,7 +230,7 @@ def main(): if mode == "master" and info["role"] == "master": module.exit_json(changed=False, mode=mode) - elif mode == "slave" and info["role"] == "slave" and info["master_host"] == master_host and info["master_port"] == master_port: + elif mode == "replica" and info["role"] == "slave" and info["master_host"] == master_host and info["master_port"] == master_port: status = dict( status=mode, master_host=master_host, @@ -234,9 +241,8 @@ def main(): # Do the stuff # (Check Check_mode before commands so the commands aren't evaluated # if not necessary) - if mode == "slave": - if module.check_mode or\ - set_slave_mode(r, master_host, master_port): + if mode == "replica": + if module.check_mode or set_replica_mode(r, master_host, master_port): info = r.info() status = { 'status': mode, @@ -245,7 +251,7 @@ def main(): } module.exit_json(changed=True, mode=status) else: - module.fail_json(msg='Unable to set slave mode') + module.fail_json(msg='Unable to set replica mode') else: if module.check_mode or set_master_mode(r): diff --git a/tests/integration/targets/redis_info/defaults/main.yml b/tests/integration/targets/redis_info/defaults/main.yml index 1352c55cc3..e1f03ee7ed 100644 --- a/tests/integration/targets/redis_info/defaults/main.yml +++ b/tests/integration/targets/redis_info/defaults/main.yml @@ -1,4 +1,4 @@ --- redis_password: PASS master_port: 6379 -slave_port: 6380 +replica_port: 6380 diff --git a/tests/integration/targets/redis_info/tasks/main.yml b/tests/integration/targets/redis_info/tasks/main.yml index d02775200c..dc76101157 100644 --- a/tests/integration/targets/redis_info/tasks/main.yml +++ b/tests/integration/targets/redis_info/tasks/main.yml @@ -33,9 +33,9 @@ - result.info.tcp_port == master_port - result.info.role == 'master' -- name: redis_info - connect to slave +- name: redis_info - connect to replica community.general.redis_info: - login_port: "{{ slave_port }}" + login_port: "{{ replica_port }}" login_password: "{{ redis_password }}" register: result @@ -43,5 +43,5 @@ that: - result is not changed - result.info is defined - - result.info.tcp_port == slave_port + - result.info.tcp_port == replica_port - result.info.role == 'slave' diff --git a/tests/integration/targets/setup_redis_replication/defaults/main.yml b/tests/integration/targets/setup_redis_replication/defaults/main.yml index bdbbbb2cac..5855519fc9 100644 --- a/tests/integration/targets/setup_redis_replication/defaults/main.yml +++ b/tests/integration/targets/setup_redis_replication/defaults/main.yml @@ -22,14 +22,21 @@ redis_module: "{{ (ansible_python_version is version('2.7', '>=')) | ternary('re redis_password: PASS +old_redis: >- + {{ + (ansible_distribution == 'CentOS' and ansible_distribution_major_version|int <= 7) or + (ansible_distribution == 'Ubuntu' and ansible_distribution_major_version|int <= 18) or + (ansible_os_family == 'FreeBSD' and ansible_distribution_major_version|int <= 12) + }} + # Master master_port: 6379 master_conf: /etc/redis-master.conf master_datadir: /var/lib/redis-master master_logdir: /var/log/redis-master -# Slave -slave_port: 6380 -slave_conf: /etc/redis-slave.conf -slave_datadir: /var/lib/redis-slave -slave_logdir: /var/log/redis-slave +# Replica +replica_port: 6380 +replica_conf: /etc/redis-replica.conf +replica_datadir: /var/lib/redis-replica +replica_logdir: /var/log/redis-replica diff --git a/tests/integration/targets/setup_redis_replication/handlers/main.yml b/tests/integration/targets/setup_redis_replication/handlers/main.yml index d4d535cdf7..1b3cd57912 100644 --- a/tests/integration/targets/setup_redis_replication/handlers/main.yml +++ b/tests/integration/targets/setup_redis_replication/handlers/main.yml @@ -1,7 +1,7 @@ - name: stop redis services shell: | kill -TERM $(cat /var/run/redis_{{ master_port }}.pid) - kill -TERM $(cat /var/run/redis_{{ slave_port }}.pid) + kill -TERM $(cat /var/run/redis_{{ replica_port }}.pid) listen: cleanup redis - name: remove redis packages @@ -27,8 +27,8 @@ - "{{ master_datadir }}" - "{{ master_logdir }}" - /var/run/redis_{{ master_port }}.pid - - "{{ slave_conf }}" - - "{{ slave_datadir }}" - - "{{ slave_logdir }}" - - /var/run/redis_{{ slave_port }}.pid + - "{{ replica_conf }}" + - "{{ replica_datadir }}" + - "{{ replica_logdir }}" + - /var/run/redis_{{ replica_port }}.pid listen: cleanup redis diff --git a/tests/integration/targets/setup_redis_replication/tasks/setup_redis_cluster.yml b/tests/integration/targets/setup_redis_replication/tasks/setup_redis_cluster.yml index 2445ba242a..03cb9da6ab 100644 --- a/tests/integration/targets/setup_redis_replication/tasks/setup_redis_cluster.yml +++ b/tests/integration/targets/setup_redis_replication/tasks/setup_redis_cluster.yml @@ -1,5 +1,5 @@ # We run two servers listening different ports -# to be able to check replication (one server for master, another for slave). +# to be able to check replication (one server for master, another for replica). - name: Install redis server apt dependencies apt: @@ -56,8 +56,8 @@ loop: - "{{ master_datadir }}" - "{{ master_logdir }}" - - "{{ slave_datadir }}" - - "{{ slave_logdir }}" + - "{{ replica_datadir }}" + - "{{ replica_logdir }}" - name: Create redis configs copy: @@ -75,16 +75,16 @@ port: "{{ master_port }}" logdir: "{{ master_logdir }}" datadir: "{{ master_datadir }}" - - file: "{{ slave_conf }}" - port: "{{ slave_port }}" - logdir: "{{ slave_logdir }}" - datadir: "{{ slave_datadir }}" + - file: "{{ replica_conf }}" + port: "{{ replica_port }}" + logdir: "{{ replica_logdir }}" + datadir: "{{ replica_datadir }}" - name: Start redis master shell: "{{ redis_bin[ansible_distribution] }} {{ master_conf }}" -- name: Start redis slave - shell: "{{ redis_bin[ansible_distribution] }} {{ slave_conf }} --slaveof 127.0.0.1 {{ master_port }}" +- name: Start redis replica + shell: "{{ redis_bin[ansible_distribution] }} {{ replica_conf }} --{% if old_redis %}slaveof{% else %}replicaof{% endif %} 127.0.0.1 {{ master_port }}" - name: Wait for redis master to be started ansible.builtin.wait_for: @@ -95,10 +95,10 @@ connect_timeout: 5 timeout: 30 -- name: Wait for redis slave to be started +- name: Wait for redis replica to be started ansible.builtin.wait_for: host: 127.0.0.1 - port: "{{ slave_port }}" + port: "{{ replica_port }}" state: started delay: 1 connect_timeout: 5 From d2a984ded1b9f59ab5ac1c8588d82ee53ed97af2 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 24 Jun 2021 22:57:40 +0200 Subject: [PATCH 0159/2828] Adjust example to remove unnecessary offensive language. (#2869) --- plugins/modules/cloud/softlayer/sl_vm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/modules/cloud/softlayer/sl_vm.py b/plugins/modules/cloud/softlayer/sl_vm.py index c8db13d815..825d82e173 100644 --- a/plugins/modules/cloud/softlayer/sl_vm.py +++ b/plugins/modules/cloud/softlayer/sl_vm.py @@ -217,7 +217,7 @@ EXAMPLES = ''' datacenter: dal09 tags: - ansible-module-test - - ansible-module-test-slaves + - ansible-module-test-replicas hourly: yes private: no dedicated: no @@ -235,7 +235,7 @@ EXAMPLES = ''' datacenter: dal09 tags: - ansible-module-test - - ansible-module-test-slaves + - ansible-module-test-replicas hourly: yes private: no dedicated: no From d180390dbc99e9cfd0cffaee3edb6e9d8eee406c Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Sat, 26 Jun 2021 07:27:41 -0400 Subject: [PATCH 0160/2828] modprobe - fix task status when module cannot be loaded (#2843) * Initial Commit * Adding changelog fragment * Ensured params are present during verbose output and enhanced check_mode * Making specific to builtins * Removing unneccessary external call * Acutal bugfix --- .../2843-modprobe-failure-conditions.yml | 3 + plugins/modules/system/modprobe.py | 139 ++++++++------ .../plugins/modules/system/test_modprobe.py | 174 ++++++++++++++++++ 3 files changed, 263 insertions(+), 53 deletions(-) create mode 100644 changelogs/fragments/2843-modprobe-failure-conditions.yml create mode 100644 tests/unit/plugins/modules/system/test_modprobe.py diff --git a/changelogs/fragments/2843-modprobe-failure-conditions.yml b/changelogs/fragments/2843-modprobe-failure-conditions.yml new file mode 100644 index 0000000000..78ee5ce1e9 --- /dev/null +++ b/changelogs/fragments/2843-modprobe-failure-conditions.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - modprobe - added additional checks to ensure module load/unload is effective (https://github.com/ansible-collections/community.general/issues/1608). diff --git a/plugins/modules/system/modprobe.py b/plugins/modules/system/modprobe.py index 0ab7523537..07f7cd8cc3 100644 --- a/plugins/modules/system/modprobe.py +++ b/plugins/modules/system/modprobe.py @@ -50,11 +50,90 @@ EXAMPLES = ''' ''' import os.path +import platform import shlex import traceback from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native + +RELEASE_VER = platform.release() + + +class Modprobe(object): + def __init__(self, module): + self.module = module + self.modprobe_bin = module.get_bin_path('modprobe', True) + + self.check_mode = module.check_mode + self.desired_state = module.params['state'] + self.name = module.params['name'] + self.params = module.params['params'] + + self.changed = False + + def load_module(self): + command = [self.modprobe_bin] + if self.check_mode: + command.append('-n') + command.extend([self.name] + shlex.split(self.params)) + + rc, out, err = self.module.run_command(command) + + if rc != 0: + return self.module.fail_json(msg=err, rc=rc, stdout=out, stderr=err, **self.result) + + if self.check_mode or self.module_loaded(): + self.changed = True + else: + rc, stdout, stderr = self.module.run_command( + [self.modprobe_bin, '-n', '--first-time', self.name] + shlex.split(self.params) + ) + if rc != 0: + self.module.warn(stderr) + + def module_loaded(self): + is_loaded = False + try: + with open('/proc/modules') as modules: + module_name = self.name.replace('-', '_') + ' ' + for line in modules: + if line.startswith(module_name): + is_loaded = True + break + + if not is_loaded: + module_file = '/' + self.name + '.ko' + builtin_path = os.path.join('/lib/modules/', RELEASE_VER, 'modules.builtin') + with open(builtin_path) as builtins: + for line in builtins: + if line.rstrip().endswith(module_file): + is_loaded = True + break + except (IOError, OSError) as e: + self.module.fail_json(msg=to_native(e), exception=traceback.format_exc(), **self.result) + + return is_loaded + + def unload_module(self): + command = [self.modprobe_bin, '-r', self.name] + if self.check_mode: + command.append('-n') + + rc, out, err = self.module.run_command(command) + if rc != 0: + return self.module.fail_json(msg=err, rc=rc, stdout=out, stderr=err, **self.result) + + self.changed = True + + @property + def result(self): + return { + 'changed': self.changed, + 'name': self.name, + 'params': self.params, + 'state': self.desired_state, + } def main(): @@ -67,60 +146,14 @@ def main(): supports_check_mode=True, ) - name = module.params['name'] - params = module.params['params'] - state = module.params['state'] + modprobe = Modprobe(module) - # FIXME: Adding all parameters as result values is useless - result = dict( - changed=False, - name=name, - params=params, - state=state, - ) + if modprobe.desired_state == 'present' and not modprobe.module_loaded(): + modprobe.load_module() + elif modprobe.desired_state == 'absent' and modprobe.module_loaded(): + modprobe.unload_module() - # Check if module is present - try: - present = False - with open('/proc/modules') as modules: - module_name = name.replace('-', '_') + ' ' - for line in modules: - if line.startswith(module_name): - present = True - break - if not present: - command = [module.get_bin_path('uname', True), '-r'] - rc, uname_kernel_release, err = module.run_command(command) - module_file = '/' + name + '.ko' - builtin_path = os.path.join('/lib/modules/', uname_kernel_release.strip(), - 'modules.builtin') - with open(builtin_path) as builtins: - for line in builtins: - if line.endswith(module_file): - present = True - break - except IOError as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc(), **result) - - # Add/remove module as needed - if state == 'present': - if not present: - if not module.check_mode: - command = [module.get_bin_path('modprobe', True), name] - command.extend(shlex.split(params)) - rc, out, err = module.run_command(command) - if rc != 0: - module.fail_json(msg=err, rc=rc, stdout=out, stderr=err, **result) - result['changed'] = True - elif state == 'absent': - if present: - if not module.check_mode: - rc, out, err = module.run_command([module.get_bin_path('modprobe', True), '-r', name]) - if rc != 0: - module.fail_json(msg=err, rc=rc, stdout=out, stderr=err, **result) - result['changed'] = True - - module.exit_json(**result) + module.exit_json(**modprobe.result) if __name__ == '__main__': diff --git a/tests/unit/plugins/modules/system/test_modprobe.py b/tests/unit/plugins/modules/system/test_modprobe.py new file mode 100644 index 0000000000..6f2c6b3d19 --- /dev/null +++ b/tests/unit/plugins/modules/system/test_modprobe.py @@ -0,0 +1,174 @@ +# -*- coding: utf-8 -*- +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible_collections.community.general.tests.unit.plugins.modules.utils import ModuleTestCase, set_module_args +from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.general.tests.unit.compat.mock import Mock +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.modules.system.modprobe import Modprobe + + +class TestLoadModule(ModuleTestCase): + def setUp(self): + super(TestLoadModule, self).setUp() + + self.mock_module_loaded = patch( + 'ansible_collections.community.general.plugins.modules.system.modprobe.Modprobe.module_loaded' + ) + self.mock_run_command = patch('ansible.module_utils.basic.AnsibleModule.run_command') + self.mock_get_bin_path = patch('ansible.module_utils.basic.AnsibleModule.get_bin_path') + + self.module_loaded = self.mock_module_loaded.start() + self.run_command = self.mock_run_command.start() + self.get_bin_path = self.mock_get_bin_path.start() + + def tearDown(self): + """Teardown.""" + super(TestLoadModule, self).tearDown() + self.mock_module_loaded.stop() + self.mock_run_command.stop() + self.mock_get_bin_path.stop() + + def test_load_module_success(self): + set_module_args(dict( + name='test', + state='present', + )) + + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + params=dict(type='str', default=''), + ), + supports_check_mode=True, + ) + + self.get_bin_path.side_effect = ['modprobe'] + self.module_loaded.side_effect = [True] + self.run_command.side_effect = [(0, '', '')] + + modprobe = Modprobe(module) + modprobe.load_module() + + assert modprobe.result == { + 'changed': True, + 'name': 'test', + 'params': '', + 'state': 'present', + } + + def test_load_module_unchanged(self): + set_module_args(dict( + name='test', + state='present', + )) + + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + params=dict(type='str', default=''), + ), + supports_check_mode=True, + ) + + module.warn = Mock() + + self.get_bin_path.side_effect = ['modprobe'] + self.module_loaded.side_effect = [False] + self.run_command.side_effect = [(0, '', ''), (1, '', '')] + + modprobe = Modprobe(module) + modprobe.load_module() + + module.warn.assert_called_once_with('') + + +class TestUnloadModule(ModuleTestCase): + def setUp(self): + super(TestUnloadModule, self).setUp() + + self.mock_module_loaded = patch( + 'ansible_collections.community.general.plugins.modules.system.modprobe.Modprobe.module_loaded' + ) + self.mock_run_command = patch('ansible.module_utils.basic.AnsibleModule.run_command') + self.mock_get_bin_path = patch('ansible.module_utils.basic.AnsibleModule.get_bin_path') + + self.module_loaded = self.mock_module_loaded.start() + self.run_command = self.mock_run_command.start() + self.get_bin_path = self.mock_get_bin_path.start() + + def tearDown(self): + """Teardown.""" + super(TestUnloadModule, self).tearDown() + self.mock_module_loaded.stop() + self.mock_run_command.stop() + self.mock_get_bin_path.stop() + + def test_unload_module_success(self): + set_module_args(dict( + name='test', + state='absent', + )) + + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + params=dict(type='str', default=''), + ), + supports_check_mode=True, + ) + + self.get_bin_path.side_effect = ['modprobe'] + self.module_loaded.side_effect = [False] + self.run_command.side_effect = [(0, '', '')] + + modprobe = Modprobe(module) + modprobe.unload_module() + + assert modprobe.result == { + 'changed': True, + 'name': 'test', + 'params': '', + 'state': 'absent', + } + + def test_unload_module_failure(self): + set_module_args(dict( + name='test', + state='absent', + )) + + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + params=dict(type='str', default=''), + ), + supports_check_mode=True, + ) + + module.fail_json = Mock() + + self.get_bin_path.side_effect = ['modprobe'] + self.module_loaded.side_effect = [True] + self.run_command.side_effect = [(1, '', '')] + + modprobe = Modprobe(module) + modprobe.unload_module() + + dummy_result = { + 'changed': False, + 'name': 'test', + 'state': 'absent', + 'params': '', + } + + module.fail_json.assert_called_once_with( + msg='', rc=1, stdout='', stderr='', **dummy_result + ) From fafabed9e6acc6bd49ce6e9bf266ee27f686aebe Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 26 Jun 2021 23:59:11 +0200 Subject: [PATCH 0161/2828] Replace ansible.module_utils._text by ansible.module_utils.common.text.converters (#2877) * Replace ansible.module_utils._text by ansible.module_utils.common.text.converters. * Also adjust tests. --- changelogs/fragments/ansible-core-_text.yml | 2 ++ plugins/action/system/shutdown.py | 2 +- plugins/become/doas.py | 2 +- plugins/become/ksu.py | 2 +- plugins/cache/redis.py | 2 +- plugins/callback/diy.py | 2 +- plugins/callback/log_plays.py | 2 +- plugins/callback/logentries.py | 2 +- plugins/callback/mail.py | 2 +- plugins/callback/selective.py | 2 +- plugins/callback/slack.py | 2 +- plugins/callback/unixy.py | 2 +- plugins/callback/yaml.py | 2 +- plugins/connection/chroot.py | 2 +- plugins/connection/iocage.py | 2 +- plugins/connection/jail.py | 2 +- plugins/connection/lxc.py | 2 +- plugins/connection/lxd.py | 2 +- plugins/connection/qubes.py | 2 +- plugins/connection/zone.py | 2 +- plugins/filter/from_csv.py | 2 +- plugins/inventory/cobbler.py | 2 +- plugins/inventory/gitlab_runners.py | 2 +- plugins/inventory/lxd.py | 2 +- plugins/inventory/nmap.py | 2 +- plugins/inventory/online.py | 2 +- plugins/inventory/scaleway.py | 2 +- plugins/inventory/virtualbox.py | 2 +- plugins/lookup/consul_kv.py | 2 +- plugins/lookup/cyberarkpassword.py | 2 +- plugins/lookup/dig.py | 2 +- plugins/lookup/dnstxt.py | 2 +- plugins/lookup/etcd3.py | 2 +- plugins/lookup/filetree.py | 2 +- plugins/lookup/hiera.py | 2 +- plugins/lookup/lastpass.py | 2 +- plugins/lookup/lmdb_kv.py | 2 +- plugins/lookup/nios_next_ip.py | 2 +- plugins/lookup/nios_next_network.py | 2 +- plugins/lookup/onepassword.py | 2 +- plugins/lookup/passwordstore.py | 2 +- plugins/lookup/random_string.py | 2 +- plugins/lookup/redis.py | 2 +- plugins/lookup/shelvefile.py | 2 +- plugins/module_utils/_netapp.py | 2 +- plugins/module_utils/csv.py | 2 +- plugins/module_utils/gandi_livedns_api.py | 2 +- plugins/module_utils/gitlab.py | 2 +- plugins/module_utils/hwc_utils.py | 2 +- plugins/module_utils/ibm_sa_utils.py | 2 +- plugins/module_utils/identity/keycloak/keycloak.py | 2 +- plugins/module_utils/ipa.py | 2 +- plugins/module_utils/ldap.py | 2 +- plugins/module_utils/lxd.py | 2 +- plugins/module_utils/net_tools/nios/api.py | 4 ++-- plugins/module_utils/oneview.py | 2 +- plugins/module_utils/oracle/oci_utils.py | 2 +- plugins/module_utils/redfish_utils.py | 4 ++-- plugins/module_utils/source_control/bitbucket.py | 2 +- plugins/module_utils/utm_utils.py | 2 +- plugins/module_utils/vexata.py | 2 +- plugins/modules/cloud/atomic/atomic_container.py | 2 +- plugins/modules/cloud/atomic/atomic_host.py | 2 +- plugins/modules/cloud/atomic/atomic_image.py | 2 +- plugins/modules/cloud/dimensiondata/dimensiondata_network.py | 2 +- plugins/modules/cloud/lxc/lxc_container.py | 2 +- plugins/modules/cloud/misc/cloud_init_data_facts.py | 2 +- plugins/modules/cloud/misc/proxmox.py | 2 +- plugins/modules/cloud/misc/proxmox_kvm.py | 2 +- plugins/modules/cloud/misc/proxmox_snap.py | 2 +- plugins/modules/cloud/packet/packet_device.py | 2 +- plugins/modules/cloud/packet/packet_ip_subnet.py | 2 +- plugins/modules/cloud/packet/packet_project.py | 2 +- plugins/modules/cloud/packet/packet_volume.py | 2 +- plugins/modules/cloud/packet/packet_volume_attachment.py | 2 +- plugins/modules/cloud/profitbricks/profitbricks.py | 2 +- plugins/modules/cloud/profitbricks/profitbricks_volume.py | 2 +- plugins/modules/cloud/pubnub/pubnub_blocks.py | 2 +- plugins/modules/cloud/rackspace/rax_cdb_user.py | 2 +- .../modules/cloud/scaleway/scaleway_security_group_rule.py | 2 +- plugins/modules/cloud/smartos/vmadm.py | 2 +- plugins/modules/clustering/consul/consul_kv.py | 2 +- plugins/modules/clustering/etcd3.py | 2 +- plugins/modules/clustering/nomad/nomad_job.py | 2 +- plugins/modules/clustering/nomad/nomad_job_info.py | 2 +- plugins/modules/clustering/znode.py | 2 +- plugins/modules/database/influxdb/influxdb_query.py | 2 +- .../modules/database/influxdb/influxdb_retention_policy.py | 2 +- plugins/modules/database/influxdb/influxdb_user.py | 2 +- plugins/modules/database/influxdb/influxdb_write.py | 2 +- plugins/modules/database/misc/odbc.py | 2 +- plugins/modules/database/misc/redis.py | 2 +- plugins/modules/database/misc/redis_info.py | 2 +- plugins/modules/database/saphana/hana_query.py | 2 +- plugins/modules/database/vertica/vertica_configuration.py | 2 +- plugins/modules/database/vertica/vertica_info.py | 2 +- plugins/modules/database/vertica/vertica_role.py | 2 +- plugins/modules/database/vertica/vertica_schema.py | 2 +- plugins/modules/database/vertica/vertica_user.py | 2 +- plugins/modules/files/filesize.py | 2 +- plugins/modules/files/iso_create.py | 2 +- plugins/modules/files/read_csv.py | 2 +- plugins/modules/files/sapcar_extract.py | 2 +- plugins/modules/files/xattr.py | 2 +- plugins/modules/files/xml.py | 2 +- plugins/modules/identity/ipa/ipa_config.py | 2 +- plugins/modules/identity/ipa/ipa_dnsrecord.py | 2 +- plugins/modules/identity/ipa/ipa_dnszone.py | 2 +- plugins/modules/identity/ipa/ipa_group.py | 2 +- plugins/modules/identity/ipa/ipa_hbacrule.py | 2 +- plugins/modules/identity/ipa/ipa_host.py | 2 +- plugins/modules/identity/ipa/ipa_hostgroup.py | 2 +- plugins/modules/identity/ipa/ipa_otpconfig.py | 2 +- plugins/modules/identity/ipa/ipa_otptoken.py | 2 +- plugins/modules/identity/ipa/ipa_pwpolicy.py | 2 +- plugins/modules/identity/ipa/ipa_role.py | 2 +- plugins/modules/identity/ipa/ipa_service.py | 2 +- plugins/modules/identity/ipa/ipa_subca.py | 2 +- plugins/modules/identity/ipa/ipa_sudocmd.py | 2 +- plugins/modules/identity/ipa/ipa_sudocmdgroup.py | 2 +- plugins/modules/identity/ipa/ipa_sudorule.py | 2 +- plugins/modules/identity/ipa/ipa_user.py | 2 +- plugins/modules/identity/ipa/ipa_vault.py | 2 +- plugins/modules/identity/onepassword_info.py | 2 +- plugins/modules/monitoring/bigpanda.py | 2 +- plugins/modules/monitoring/circonus_annotation.py | 2 +- plugins/modules/monitoring/datadog/datadog_event.py | 2 +- plugins/modules/monitoring/datadog/datadog_monitor.py | 2 +- plugins/modules/monitoring/honeybadger_deployment.py | 2 +- plugins/modules/monitoring/rollbar_deployment.py | 2 +- plugins/modules/monitoring/sensu/sensu_check.py | 2 +- plugins/modules/monitoring/sensu/sensu_silence.py | 2 +- plugins/modules/monitoring/sensu/sensu_subscription.py | 2 +- plugins/modules/monitoring/spectrum_model_attrs.py | 2 +- plugins/modules/monitoring/stackdriver.py | 2 +- plugins/modules/monitoring/statusio_maintenance.py | 2 +- plugins/modules/monitoring/uptimerobot.py | 2 +- plugins/modules/net_tools/cloudflare_dns.py | 2 +- plugins/modules/net_tools/haproxy.py | 2 +- plugins/modules/net_tools/ip_netns.py | 2 +- plugins/modules/net_tools/ipify_facts.py | 2 +- plugins/modules/net_tools/ldap/ldap_attrs.py | 2 +- plugins/modules/net_tools/ldap/ldap_entry.py | 2 +- plugins/modules/net_tools/ldap/ldap_search.py | 2 +- plugins/modules/net_tools/nmcli.py | 2 +- plugins/modules/net_tools/nsupdate.py | 2 +- plugins/modules/net_tools/omapi_host.py | 2 +- plugins/modules/net_tools/pritunl/pritunl_org.py | 2 +- plugins/modules/net_tools/pritunl/pritunl_org_info.py | 2 +- plugins/modules/net_tools/pritunl/pritunl_user.py | 2 +- plugins/modules/net_tools/pritunl/pritunl_user_info.py | 2 +- plugins/modules/net_tools/snmp_facts.py | 2 +- plugins/modules/notification/hipchat.py | 2 +- plugins/modules/notification/irc.py | 2 +- plugins/modules/notification/jabber.py | 2 +- plugins/modules/notification/mail.py | 2 +- plugins/modules/notification/mqtt.py | 2 +- plugins/modules/notification/sendgrid.py | 2 +- plugins/modules/notification/syslogger.py | 2 +- plugins/modules/packaging/language/maven_artifact.py | 2 +- plugins/modules/packaging/language/npm.py | 2 +- plugins/modules/packaging/language/pear.py | 2 +- plugins/modules/packaging/language/pip_package_info.py | 2 +- plugins/modules/packaging/os/flatpak_remote.py | 2 +- plugins/modules/packaging/os/homebrew_cask.py | 2 +- plugins/modules/packaging/os/mas.py | 2 +- plugins/modules/packaging/os/pacman_key.py | 2 +- plugins/modules/packaging/os/portage.py | 2 +- plugins/modules/packaging/os/redhat_subscription.py | 2 +- plugins/modules/packaging/os/rhn_channel.py | 2 +- plugins/modules/packaging/os/yum_versionlock.py | 2 +- plugins/modules/packaging/os/zypper.py | 2 +- plugins/modules/remote_management/cobbler/cobbler_sync.py | 2 +- plugins/modules/remote_management/cobbler/cobbler_system.py | 2 +- plugins/modules/remote_management/hpilo/hpilo_info.py | 2 +- .../remote_management/lenovoxcc/xcc_redfish_command.py | 2 +- .../remote_management/redfish/idrac_redfish_command.py | 2 +- .../modules/remote_management/redfish/idrac_redfish_config.py | 2 +- .../modules/remote_management/redfish/idrac_redfish_info.py | 2 +- plugins/modules/remote_management/redfish/redfish_command.py | 2 +- plugins/modules/remote_management/redfish/redfish_config.py | 2 +- plugins/modules/remote_management/wakeonlan.py | 2 +- plugins/modules/source_control/github/github_release.py | 2 +- plugins/modules/source_control/github/github_webhook.py | 2 +- plugins/modules/source_control/github/github_webhook_info.py | 2 +- plugins/modules/source_control/gitlab/gitlab_deploy_key.py | 2 +- plugins/modules/source_control/gitlab/gitlab_group.py | 2 +- plugins/modules/source_control/gitlab/gitlab_hook.py | 2 +- plugins/modules/source_control/gitlab/gitlab_project.py | 2 +- .../modules/source_control/gitlab/gitlab_project_variable.py | 2 +- plugins/modules/source_control/gitlab/gitlab_runner.py | 2 +- plugins/modules/source_control/gitlab/gitlab_user.py | 2 +- plugins/modules/source_control/hg.py | 2 +- plugins/modules/storage/emc/emc_vnx_sg_member.py | 2 +- plugins/modules/system/crypttab.py | 2 +- plugins/modules/system/dpkg_divert.py | 2 +- plugins/modules/system/filesystem.py | 2 +- plugins/modules/system/interfaces_file.py | 2 +- plugins/modules/system/iptables_state.py | 2 +- plugins/modules/system/launchd.py | 2 +- plugins/modules/system/listen_ports_facts.py | 2 +- plugins/modules/system/locale_gen.py | 2 +- plugins/modules/system/nosh.py | 2 +- plugins/modules/system/openwrt_init.py | 2 +- plugins/modules/system/pam_limits.py | 2 +- plugins/modules/system/runit.py | 2 +- plugins/modules/system/sefcontext.py | 2 +- plugins/modules/system/selinux_permissive.py | 2 +- plugins/modules/system/selogin.py | 2 +- plugins/modules/system/seport.py | 2 +- plugins/modules/system/ssh_config.py | 2 +- plugins/modules/system/svc.py | 2 +- plugins/modules/web_infrastructure/deploy_helper.py | 2 +- plugins/modules/web_infrastructure/htpasswd.py | 2 +- plugins/modules/web_infrastructure/jenkins_build.py | 2 +- plugins/modules/web_infrastructure/jenkins_job.py | 2 +- plugins/modules/web_infrastructure/jenkins_job_info.py | 2 +- plugins/modules/web_infrastructure/jenkins_plugin.py | 2 +- plugins/modules/web_infrastructure/jenkins_script.py | 2 +- plugins/modules/web_infrastructure/jira.py | 2 +- plugins/modules/web_infrastructure/nginx_status_info.py | 2 +- plugins/modules/web_infrastructure/rundeck_acl_policy.py | 2 +- plugins/modules/web_infrastructure/rundeck_project.py | 2 +- .../modules/web_infrastructure/sophos_utm/utm_aaa_group.py | 2 +- .../web_infrastructure/sophos_utm/utm_aaa_group_info.py | 2 +- .../web_infrastructure/sophos_utm/utm_ca_host_key_cert.py | 2 +- .../sophos_utm/utm_ca_host_key_cert_info.py | 2 +- plugins/modules/web_infrastructure/sophos_utm/utm_dns_host.py | 2 +- .../sophos_utm/utm_network_interface_address.py | 2 +- .../sophos_utm/utm_network_interface_address_info.py | 2 +- .../web_infrastructure/sophos_utm/utm_proxy_auth_profile.py | 2 +- .../web_infrastructure/sophos_utm/utm_proxy_exception.py | 2 +- .../web_infrastructure/sophos_utm/utm_proxy_frontend.py | 2 +- .../web_infrastructure/sophos_utm/utm_proxy_frontend_info.py | 2 +- .../web_infrastructure/sophos_utm/utm_proxy_location.py | 2 +- .../web_infrastructure/sophos_utm/utm_proxy_location_info.py | 2 +- plugins/modules/web_infrastructure/taiga_issue.py | 2 +- tests/unit/mock/loader.py | 2 +- tests/unit/mock/procenv.py | 2 +- tests/unit/mock/vault_helper.py | 2 +- tests/unit/plugins/module_utils/conftest.py | 2 +- tests/unit/plugins/modules/conftest.py | 2 +- .../plugins/modules/monitoring/test_circonus_annotation.py | 2 +- tests/unit/plugins/modules/net_tools/test_nmcli.py | 2 +- tests/unit/plugins/modules/packaging/os/test_rhn_register.py | 2 +- .../remote_management/lenovoxcc/test_xcc_redfish_command.py | 2 +- tests/unit/plugins/modules/system/test_ufw.py | 2 +- tests/unit/plugins/modules/utils.py | 2 +- .../plugins/modules/web_infrastructure/test_jenkins_build.py | 2 +- 249 files changed, 252 insertions(+), 250 deletions(-) create mode 100644 changelogs/fragments/ansible-core-_text.yml diff --git a/changelogs/fragments/ansible-core-_text.yml b/changelogs/fragments/ansible-core-_text.yml new file mode 100644 index 0000000000..fae6391582 --- /dev/null +++ b/changelogs/fragments/ansible-core-_text.yml @@ -0,0 +1,2 @@ +minor_changes: +- "Avoid internal ansible-core module_utils in favor of equivalent public API available since at least Ansible 2.9 (https://github.com/ansible-collections/community.general/pull/2877)." diff --git a/plugins/action/system/shutdown.py b/plugins/action/system/shutdown.py index e36397ffe7..953b73778b 100644 --- a/plugins/action/system/shutdown.py +++ b/plugins/action/system/shutdown.py @@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.errors import AnsibleError, AnsibleConnectionFailure -from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.common.text.converters import to_native, to_text from ansible.module_utils.common.collections import is_string from ansible.plugins.action import ActionBase from ansible.utils.display import Display diff --git a/plugins/become/doas.py b/plugins/become/doas.py index ec660bb763..431e33cd6d 100644 --- a/plugins/become/doas.py +++ b/plugins/become/doas.py @@ -81,7 +81,7 @@ DOCUMENTATION = ''' import re -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes from ansible.plugins.become import BecomeBase diff --git a/plugins/become/ksu.py b/plugins/become/ksu.py index dad2663639..f5600c1d70 100644 --- a/plugins/become/ksu.py +++ b/plugins/become/ksu.py @@ -82,7 +82,7 @@ DOCUMENTATION = ''' import re -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes from ansible.plugins.become import BecomeBase diff --git a/plugins/cache/redis.py b/plugins/cache/redis.py index 6af7c731e4..20616096ae 100644 --- a/plugins/cache/redis.py +++ b/plugins/cache/redis.py @@ -67,7 +67,7 @@ import json from ansible import constants as C from ansible.errors import AnsibleError -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder from ansible.plugins.cache import BaseCacheModule from ansible.release import __version__ as ansible_base_version diff --git a/plugins/callback/diy.py b/plugins/callback/diy.py index dfed68b791..b288ee4b97 100644 --- a/plugins/callback/diy.py +++ b/plugins/callback/diy.py @@ -792,7 +792,7 @@ from ansible.utils.color import colorize, hostcolor from ansible.template import Templar from ansible.vars.manager import VariableManager from ansible.plugins.callback.default import CallbackModule as Default -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text class DummyStdout(object): diff --git a/plugins/callback/log_plays.py b/plugins/callback/log_plays.py index 7383313482..df3482f483 100644 --- a/plugins/callback/log_plays.py +++ b/plugins/callback/log_plays.py @@ -31,7 +31,7 @@ import time import json from ansible.utils.path import makedirs_safe -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes from ansible.module_utils.common._collections_compat import MutableMapping from ansible.parsing.ajson import AnsibleJSONEncoder from ansible.plugins.callback import CallbackBase diff --git a/plugins/callback/logentries.py b/plugins/callback/logentries.py index e4a8b51e79..d78bff331c 100644 --- a/plugins/callback/logentries.py +++ b/plugins/callback/logentries.py @@ -111,7 +111,7 @@ try: except ImportError: HAS_FLATDICT = False -from ansible.module_utils._text import to_bytes, to_text +from ansible.module_utils.common.text.converters import to_bytes, to_text from ansible.plugins.callback import CallbackBase # Todo: diff --git a/plugins/callback/mail.py b/plugins/callback/mail.py index 6964528da6..e48e2de98e 100644 --- a/plugins/callback/mail.py +++ b/plugins/callback/mail.py @@ -62,7 +62,7 @@ import re import smtplib from ansible.module_utils.six import string_types -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes from ansible.parsing.ajson import AnsibleJSONEncoder from ansible.plugins.callback import CallbackBase diff --git a/plugins/callback/selective.py b/plugins/callback/selective.py index 23813b0e3c..8d882d89bd 100644 --- a/plugins/callback/selective.py +++ b/plugins/callback/selective.py @@ -40,7 +40,7 @@ import difflib from ansible import constants as C from ansible.plugins.callback import CallbackBase -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text try: codeCodes = C.COLOR_CODES diff --git a/plugins/callback/slack.py b/plugins/callback/slack.py index 5974c41a71..74d338dbcc 100644 --- a/plugins/callback/slack.py +++ b/plugins/callback/slack.py @@ -58,7 +58,7 @@ import os import uuid from ansible import context -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.urls import open_url from ansible.plugins.callback import CallbackBase diff --git a/plugins/callback/unixy.py b/plugins/callback/unixy.py index 783729916f..aaca1bd8cc 100644 --- a/plugins/callback/unixy.py +++ b/plugins/callback/unixy.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' from os.path import basename from ansible import constants as C from ansible import context -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text from ansible.utils.color import colorize, hostcolor from ansible.plugins.callback.default import CallbackModule as CallbackModule_default diff --git a/plugins/callback/yaml.py b/plugins/callback/yaml.py index 9aa8488807..da931d6b73 100644 --- a/plugins/callback/yaml.py +++ b/plugins/callback/yaml.py @@ -25,7 +25,7 @@ import re import string import sys -from ansible.module_utils._text import to_bytes, to_text +from ansible.module_utils.common.text.converters import to_bytes, to_text from ansible.module_utils.six import string_types from ansible.parsing.yaml.dumper import AnsibleDumper from ansible.plugins.callback import CallbackBase, strip_internal_keys, module_response_deepcopy diff --git a/plugins/connection/chroot.py b/plugins/connection/chroot.py index a18506cb80..c4c427aa0a 100644 --- a/plugins/connection/chroot.py +++ b/plugins/connection/chroot.py @@ -54,7 +54,7 @@ from ansible.errors import AnsibleError from ansible.module_utils.basic import is_executable from ansible.module_utils.common.process import get_bin_path from ansible.module_utils.six.moves import shlex_quote -from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils.common.text.converters import to_bytes, to_native from ansible.plugins.connection import ConnectionBase, BUFSIZE from ansible.utils.display import Display diff --git a/plugins/connection/iocage.py b/plugins/connection/iocage.py index beb440eae3..e97867e58f 100644 --- a/plugins/connection/iocage.py +++ b/plugins/connection/iocage.py @@ -32,7 +32,7 @@ DOCUMENTATION = ''' import subprocess from ansible_collections.community.general.plugins.connection.jail import Connection as Jail -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.errors import AnsibleError from ansible.utils.display import Display diff --git a/plugins/connection/jail.py b/plugins/connection/jail.py index f5d787b62f..cee08ed8fd 100644 --- a/plugins/connection/jail.py +++ b/plugins/connection/jail.py @@ -38,7 +38,7 @@ import traceback from ansible.errors import AnsibleError from ansible.module_utils.six.moves import shlex_quote -from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text from ansible.plugins.connection import ConnectionBase, BUFSIZE from ansible.utils.display import Display diff --git a/plugins/connection/lxc.py b/plugins/connection/lxc.py index 6512a87c6d..b18919efd3 100644 --- a/plugins/connection/lxc.py +++ b/plugins/connection/lxc.py @@ -43,7 +43,7 @@ except ImportError: pass from ansible import errors -from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils.common.text.converters import to_bytes, to_native from ansible.plugins.connection import ConnectionBase diff --git a/plugins/connection/lxd.py b/plugins/connection/lxd.py index 58bb09906e..d523234449 100644 --- a/plugins/connection/lxd.py +++ b/plugins/connection/lxd.py @@ -46,7 +46,7 @@ from distutils.spawn import find_executable from subprocess import Popen, PIPE from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound -from ansible.module_utils._text import to_bytes, to_text +from ansible.module_utils.common.text.converters import to_bytes, to_text from ansible.plugins.connection import ConnectionBase diff --git a/plugins/connection/qubes.py b/plugins/connection/qubes.py index d3f934b601..ca221a7fac 100644 --- a/plugins/connection/qubes.py +++ b/plugins/connection/qubes.py @@ -39,7 +39,7 @@ DOCUMENTATION = ''' import subprocess -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes from ansible.plugins.connection import ConnectionBase, ensure_connect from ansible.errors import AnsibleConnectionFailure from ansible.utils.display import Display diff --git a/plugins/connection/zone.py b/plugins/connection/zone.py index b101ec5cf3..b12cffe28d 100644 --- a/plugins/connection/zone.py +++ b/plugins/connection/zone.py @@ -33,7 +33,7 @@ import traceback from ansible.errors import AnsibleError from ansible.module_utils.six.moves import shlex_quote -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes from ansible.plugins.connection import ConnectionBase, BUFSIZE from ansible.utils.display import Display diff --git a/plugins/filter/from_csv.py b/plugins/filter/from_csv.py index 13a18aa88a..b66d47699b 100644 --- a/plugins/filter/from_csv.py +++ b/plugins/filter/from_csv.py @@ -8,7 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type from ansible.errors import AnsibleFilterError -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.csv import (initialize_dialect, read_csv, CSVError, DialectNotAvailableError, diff --git a/plugins/inventory/cobbler.py b/plugins/inventory/cobbler.py index 1550c41a4f..d9bc549ed6 100644 --- a/plugins/inventory/cobbler.py +++ b/plugins/inventory/cobbler.py @@ -72,7 +72,7 @@ from distutils.version import LooseVersion import socket from ansible.errors import AnsibleError -from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text from ansible.module_utils.common._collections_compat import MutableMapping from ansible.module_utils.six import iteritems from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable, to_safe_group_name diff --git a/plugins/inventory/gitlab_runners.py b/plugins/inventory/gitlab_runners.py index daa3755875..ddf64cd626 100644 --- a/plugins/inventory/gitlab_runners.py +++ b/plugins/inventory/gitlab_runners.py @@ -82,7 +82,7 @@ keyed_groups: ''' from ansible.errors import AnsibleError, AnsibleParserError -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.plugins.inventory import BaseInventoryPlugin, Constructable try: diff --git a/plugins/inventory/lxd.py b/plugins/inventory/lxd.py index 06c620ac60..59bb8845ff 100644 --- a/plugins/inventory/lxd.py +++ b/plugins/inventory/lxd.py @@ -124,7 +124,7 @@ import time import os import socket from ansible.plugins.inventory import BaseInventoryPlugin -from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.common.text.converters import to_native, to_text from ansible.module_utils.common.dict_transformations import dict_merge from ansible.module_utils.six import raise_from from ansible.errors import AnsibleError, AnsibleParserError diff --git a/plugins/inventory/nmap.py b/plugins/inventory/nmap.py index 39a6ff3a67..05a83367af 100644 --- a/plugins/inventory/nmap.py +++ b/plugins/inventory/nmap.py @@ -56,7 +56,7 @@ from subprocess import Popen, PIPE from ansible import constants as C from ansible.errors import AnsibleParserError -from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.common.text.converters import to_native, to_text from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable from ansible.module_utils.common.process import get_bin_path diff --git a/plugins/inventory/online.py b/plugins/inventory/online.py index f5a939b69d..2d305bb8d6 100644 --- a/plugins/inventory/online.py +++ b/plugins/inventory/online.py @@ -61,7 +61,7 @@ from sys import version as python_version from ansible.errors import AnsibleError from ansible.module_utils.urls import open_url from ansible.plugins.inventory import BaseInventoryPlugin -from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.common.text.converters import to_native, to_text from ansible.module_utils.ansible_release import __version__ as ansible_version from ansible.module_utils.six.moves.urllib.parse import urljoin diff --git a/plugins/inventory/scaleway.py b/plugins/inventory/scaleway.py index 843a006738..2e863a2531 100644 --- a/plugins/inventory/scaleway.py +++ b/plugins/inventory/scaleway.py @@ -100,7 +100,7 @@ from ansible.errors import AnsibleError from ansible.plugins.inventory import BaseInventoryPlugin, Constructable from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, parse_pagination_link from ansible.module_utils.urls import open_url -from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.common.text.converters import to_native, to_text import ansible.module_utils.six.moves.urllib.parse as urllib_parse diff --git a/plugins/inventory/virtualbox.py b/plugins/inventory/virtualbox.py index 3827aa0d1a..827618131a 100644 --- a/plugins/inventory/virtualbox.py +++ b/plugins/inventory/virtualbox.py @@ -56,7 +56,7 @@ import os from subprocess import Popen, PIPE from ansible.errors import AnsibleParserError -from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text from ansible.module_utils.common._collections_compat import MutableMapping from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable from ansible.module_utils.common.process import get_bin_path diff --git a/plugins/lookup/consul_kv.py b/plugins/lookup/consul_kv.py index d567b7f687..8b9e4e9102 100644 --- a/plugins/lookup/consul_kv.py +++ b/plugins/lookup/consul_kv.py @@ -106,7 +106,7 @@ import os from ansible.module_utils.six.moves.urllib.parse import urlparse from ansible.errors import AnsibleError, AnsibleAssertionError from ansible.plugins.lookup import LookupBase -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text try: import consul diff --git a/plugins/lookup/cyberarkpassword.py b/plugins/lookup/cyberarkpassword.py index f2a720a042..ec6e6fcb56 100644 --- a/plugins/lookup/cyberarkpassword.py +++ b/plugins/lookup/cyberarkpassword.py @@ -74,7 +74,7 @@ from subprocess import Popen from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase from ansible.parsing.splitter import parse_kv -from ansible.module_utils._text import to_bytes, to_text, to_native +from ansible.module_utils.common.text.converters import to_bytes, to_text, to_native from ansible.utils.display import Display display = Display() diff --git a/plugins/lookup/dig.py b/plugins/lookup/dig.py index 16e6bf4f69..b6c71954f0 100644 --- a/plugins/lookup/dig.py +++ b/plugins/lookup/dig.py @@ -152,7 +152,7 @@ RETURN = """ from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native import socket try: diff --git a/plugins/lookup/dnstxt.py b/plugins/lookup/dnstxt.py index 5252991c72..d52301e7fb 100644 --- a/plugins/lookup/dnstxt.py +++ b/plugins/lookup/dnstxt.py @@ -54,7 +54,7 @@ except ImportError: pass from ansible.errors import AnsibleError -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.plugins.lookup import LookupBase # ============================================================== diff --git a/plugins/lookup/etcd3.py b/plugins/lookup/etcd3.py index 333b8889d8..5b2c334c41 100644 --- a/plugins/lookup/etcd3.py +++ b/plugins/lookup/etcd3.py @@ -138,7 +138,7 @@ import re from ansible.plugins.lookup import LookupBase from ansible.utils.display import Display from ansible.module_utils.basic import missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.plugins.lookup import LookupBase from ansible.errors import AnsibleError, AnsibleLookupError diff --git a/plugins/lookup/filetree.py b/plugins/lookup/filetree.py index 40e449e600..06b89bf396 100644 --- a/plugins/lookup/filetree.py +++ b/plugins/lookup/filetree.py @@ -124,7 +124,7 @@ except ImportError: pass from ansible.plugins.lookup import LookupBase -from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.common.text.converters import to_native, to_text from ansible.utils.display import Display display = Display() diff --git a/plugins/lookup/hiera.py b/plugins/lookup/hiera.py index 899820191a..a4358f7b1e 100644 --- a/plugins/lookup/hiera.py +++ b/plugins/lookup/hiera.py @@ -63,7 +63,7 @@ import os from ansible.plugins.lookup import LookupBase from ansible.utils.cmd_functions import run_cmd -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text ANSIBLE_HIERA_CFG = os.getenv('ANSIBLE_HIERA_CFG', '/etc/hiera.yaml') ANSIBLE_HIERA_BIN = os.getenv('ANSIBLE_HIERA_BIN', '/usr/bin/hiera') diff --git a/plugins/lookup/lastpass.py b/plugins/lookup/lastpass.py index e6137f4080..5e9f9907bd 100644 --- a/plugins/lookup/lastpass.py +++ b/plugins/lookup/lastpass.py @@ -39,7 +39,7 @@ RETURN = """ from subprocess import Popen, PIPE from ansible.errors import AnsibleError -from ansible.module_utils._text import to_bytes, to_text +from ansible.module_utils.common.text.converters import to_bytes, to_text from ansible.plugins.lookup import LookupBase diff --git a/plugins/lookup/lmdb_kv.py b/plugins/lookup/lmdb_kv.py index 18a6a2ceac..a417874898 100644 --- a/plugins/lookup/lmdb_kv.py +++ b/plugins/lookup/lmdb_kv.py @@ -55,7 +55,7 @@ _raw: from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase -from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.common.text.converters import to_native, to_text HAVE_LMDB = True try: import lmdb diff --git a/plugins/lookup/nios_next_ip.py b/plugins/lookup/nios_next_ip.py index 21773cb53e..58e95c7d13 100644 --- a/plugins/lookup/nios_next_ip.py +++ b/plugins/lookup/nios_next_ip.py @@ -74,7 +74,7 @@ _list: from ansible.plugins.lookup import LookupBase from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiLookup -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text from ansible.errors import AnsibleError diff --git a/plugins/lookup/nios_next_network.py b/plugins/lookup/nios_next_network.py index 2aa22ab704..c18c6ae993 100644 --- a/plugins/lookup/nios_next_network.py +++ b/plugins/lookup/nios_next_network.py @@ -84,7 +84,7 @@ _list: from ansible.plugins.lookup import LookupBase from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiLookup -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text from ansible.errors import AnsibleError diff --git a/plugins/lookup/onepassword.py b/plugins/lookup/onepassword.py index 715c337ffd..9f97a90e71 100644 --- a/plugins/lookup/onepassword.py +++ b/plugins/lookup/onepassword.py @@ -103,7 +103,7 @@ from subprocess import Popen, PIPE from ansible.plugins.lookup import LookupBase from ansible.errors import AnsibleLookupError -from ansible.module_utils._text import to_bytes, to_text +from ansible.module_utils.common.text.converters import to_bytes, to_text class OnePass(object): diff --git a/plugins/lookup/passwordstore.py b/plugins/lookup/passwordstore.py index 976dfb837e..9c545a1cb0 100644 --- a/plugins/lookup/passwordstore.py +++ b/plugins/lookup/passwordstore.py @@ -142,7 +142,7 @@ import yaml from distutils import util from ansible.errors import AnsibleError, AnsibleAssertionError -from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text from ansible.utils.display import Display from ansible.utils.encrypt import random_password from ansible.plugins.lookup import LookupBase diff --git a/plugins/lookup/random_string.py b/plugins/lookup/random_string.py index 6a05cfd041..d67a75ed99 100644 --- a/plugins/lookup/random_string.py +++ b/plugins/lookup/random_string.py @@ -138,7 +138,7 @@ import string from ansible.errors import AnsibleLookupError from ansible.plugins.lookup import LookupBase -from ansible.module_utils._text import to_bytes, to_text +from ansible.module_utils.common.text.converters import to_bytes, to_text class LookupModule(LookupBase): diff --git a/plugins/lookup/redis.py b/plugins/lookup/redis.py index 074b9490bf..a1d5a381b2 100644 --- a/plugins/lookup/redis.py +++ b/plugins/lookup/redis.py @@ -80,7 +80,7 @@ try: except ImportError: pass -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase diff --git a/plugins/lookup/shelvefile.py b/plugins/lookup/shelvefile.py index 808bb942b0..0067472513 100644 --- a/plugins/lookup/shelvefile.py +++ b/plugins/lookup/shelvefile.py @@ -36,7 +36,7 @@ import shelve from ansible.errors import AnsibleError, AnsibleAssertionError from ansible.plugins.lookup import LookupBase -from ansible.module_utils._text import to_bytes, to_text +from ansible.module_utils.common.text.converters import to_bytes, to_text class LookupModule(LookupBase): diff --git a/plugins/module_utils/_netapp.py b/plugins/module_utils/_netapp.py index d80506bb9a..81a50a336d 100644 --- a/plugins/module_utils/_netapp.py +++ b/plugins/module_utils/_netapp.py @@ -41,7 +41,7 @@ from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError from ansible.module_utils.urls import open_url from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native try: from ansible.module_utils.ansible_release import __version__ as ansible_version diff --git a/plugins/module_utils/csv.py b/plugins/module_utils/csv.py index 426e2eb279..86c4694524 100644 --- a/plugins/module_utils/csv.py +++ b/plugins/module_utils/csv.py @@ -10,7 +10,7 @@ __metaclass__ = type import csv from io import BytesIO, StringIO -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.six import PY3 diff --git a/plugins/module_utils/gandi_livedns_api.py b/plugins/module_utils/gandi_livedns_api.py index 60e0761d26..2c785353ad 100644 --- a/plugins/module_utils/gandi_livedns_api.py +++ b/plugins/module_utils/gandi_livedns_api.py @@ -7,7 +7,7 @@ __metaclass__ = type import json -from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.common.text.converters import to_native, to_text from ansible.module_utils.urls import fetch_url diff --git a/plugins/module_utils/gitlab.py b/plugins/module_utils/gitlab.py index e13f38c099..5ddafa2b42 100644 --- a/plugins/module_utils/gitlab.py +++ b/plugins/module_utils/gitlab.py @@ -12,7 +12,7 @@ from distutils.version import StrictVersion from ansible.module_utils.basic import missing_required_lib from ansible.module_utils.urls import fetch_url -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native try: from urllib import quote_plus # Python 2.X diff --git a/plugins/module_utils/hwc_utils.py b/plugins/module_utils/hwc_utils.py index 05e0c1378d..c11cb7d4d2 100644 --- a/plugins/module_utils/hwc_utils.py +++ b/plugins/module_utils/hwc_utils.py @@ -21,7 +21,7 @@ except ImportError: from ansible.module_utils.basic import (AnsibleModule, env_fallback, missing_required_lib) -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text class HwcModuleException(Exception): diff --git a/plugins/module_utils/ibm_sa_utils.py b/plugins/module_utils/ibm_sa_utils.py index c3ab4103a9..fdaa38a9fc 100644 --- a/plugins/module_utils/ibm_sa_utils.py +++ b/plugins/module_utils/ibm_sa_utils.py @@ -9,7 +9,7 @@ __metaclass__ = type import traceback from functools import wraps -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.basic import missing_required_lib PYXCLI_INSTALLED = True diff --git a/plugins/module_utils/identity/keycloak/keycloak.py b/plugins/module_utils/identity/keycloak/keycloak.py index ae002a7c94..b11289a634 100644 --- a/plugins/module_utils/identity/keycloak/keycloak.py +++ b/plugins/module_utils/identity/keycloak/keycloak.py @@ -35,7 +35,7 @@ import traceback from ansible.module_utils.urls import open_url from ansible.module_utils.six.moves.urllib.parse import urlencode, quote from ansible.module_utils.six.moves.urllib.error import HTTPError -from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.common.text.converters import to_native, to_text URL_REALMS = "{url}/admin/realms" URL_REALM = "{url}/admin/realms/{realm}" diff --git a/plugins/module_utils/ipa.py b/plugins/module_utils/ipa.py index b2b1a892cd..76fe6ca717 100644 --- a/plugins/module_utils/ipa.py +++ b/plugins/module_utils/ipa.py @@ -18,7 +18,7 @@ import socket import uuid import re -from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text from ansible.module_utils.six import PY3 from ansible.module_utils.six.moves.urllib.parse import quote from ansible.module_utils.urls import fetch_url, HAS_GSSAPI diff --git a/plugins/module_utils/ldap.py b/plugins/module_utils/ldap.py index 999d7e67ee..30dbaf7640 100644 --- a/plugins/module_utils/ldap.py +++ b/plugins/module_utils/ldap.py @@ -10,7 +10,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type import traceback -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native try: import ldap diff --git a/plugins/module_utils/lxd.py b/plugins/module_utils/lxd.py index e835a6abca..e393090799 100644 --- a/plugins/module_utils/lxd.py +++ b/plugins/module_utils/lxd.py @@ -20,7 +20,7 @@ import ssl from ansible.module_utils.urls import generic_urlparse from ansible.module_utils.six.moves.urllib.parse import urlparse from ansible.module_utils.six.moves import http_client -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text # httplib/http.client connection using unix domain socket HTTPConnection = http_client.HTTPConnection diff --git a/plugins/module_utils/net_tools/nios/api.py b/plugins/module_utils/net_tools/nios/api.py index 4a771e49af..cbb8b63f3b 100644 --- a/plugins/module_utils/net_tools/nios/api.py +++ b/plugins/module_utils/net_tools/nios/api.py @@ -14,9 +14,9 @@ __metaclass__ = type import os from functools import partial -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.six import iteritems -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.basic import env_fallback from ansible.module_utils.common.validation import check_type_dict diff --git a/plugins/module_utils/oneview.py b/plugins/module_utils/oneview.py index bfa5f09102..3ebb057ca7 100644 --- a/plugins/module_utils/oneview.py +++ b/plugins/module_utils/oneview.py @@ -27,7 +27,7 @@ except ImportError: from ansible.module_utils import six from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.common._collections_compat import Mapping diff --git a/plugins/module_utils/oracle/oci_utils.py b/plugins/module_utils/oracle/oci_utils.py index 610366d9ba..0b82dadf0e 100644 --- a/plugins/module_utils/oracle/oci_utils.py +++ b/plugins/module_utils/oracle/oci_utils.py @@ -38,7 +38,7 @@ except ImportError: HAS_OCI_PY_SDK = False -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes from ansible.module_utils.six import iteritems __version__ = "1.6.0-dev" diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index df7011a0b4..c39c02a42e 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -6,8 +6,8 @@ __metaclass__ = type import json from ansible.module_utils.urls import open_url -from ansible.module_utils._text import to_native -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.six.moves import http_client from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError from ansible.module_utils.six.moves.urllib.parse import urlparse diff --git a/plugins/module_utils/source_control/bitbucket.py b/plugins/module_utils/source_control/bitbucket.py index c17dcb1d9e..c24a25074a 100644 --- a/plugins/module_utils/source_control/bitbucket.py +++ b/plugins/module_utils/source_control/bitbucket.py @@ -7,7 +7,7 @@ __metaclass__ = type import json -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.basic import env_fallback from ansible.module_utils.urls import fetch_url, basic_auth_header diff --git a/plugins/module_utils/utm_utils.py b/plugins/module_utils/utm_utils.py index 591305a4b3..fd196dcbca 100644 --- a/plugins/module_utils/utm_utils.py +++ b/plugins/module_utils/utm_utils.py @@ -13,7 +13,7 @@ __metaclass__ = type import json -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url diff --git a/plugins/module_utils/vexata.py b/plugins/module_utils/vexata.py index e5c9bdb819..3d6fb7aaca 100644 --- a/plugins/module_utils/vexata.py +++ b/plugins/module_utils/vexata.py @@ -13,7 +13,7 @@ try: except ImportError: HAS_VEXATAPI = False -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.basic import env_fallback VXOS_VERSION = None diff --git a/plugins/modules/cloud/atomic/atomic_container.py b/plugins/modules/cloud/atomic/atomic_container.py index 273cdc8931..ca63125661 100644 --- a/plugins/modules/cloud/atomic/atomic_container.py +++ b/plugins/modules/cloud/atomic/atomic_container.py @@ -95,7 +95,7 @@ msg: import traceback from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def do_install(module, mode, rootfs, container, image, values_list, backend): diff --git a/plugins/modules/cloud/atomic/atomic_host.py b/plugins/modules/cloud/atomic/atomic_host.py index d7164a9adb..85b00f917a 100644 --- a/plugins/modules/cloud/atomic/atomic_host.py +++ b/plugins/modules/cloud/atomic/atomic_host.py @@ -52,7 +52,7 @@ import os import traceback from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def core(module): diff --git a/plugins/modules/cloud/atomic/atomic_image.py b/plugins/modules/cloud/atomic/atomic_image.py index fd99bb3bf7..350ad4c2ae 100644 --- a/plugins/modules/cloud/atomic/atomic_image.py +++ b/plugins/modules/cloud/atomic/atomic_image.py @@ -69,7 +69,7 @@ msg: import traceback from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def do_upgrade(module, image): diff --git a/plugins/modules/cloud/dimensiondata/dimensiondata_network.py b/plugins/modules/cloud/dimensiondata/dimensiondata_network.py index 246b486d06..64cc8b118a 100644 --- a/plugins/modules/cloud/dimensiondata/dimensiondata_network.py +++ b/plugins/modules/cloud/dimensiondata/dimensiondata_network.py @@ -113,7 +113,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.dimensiondata import HAS_LIBCLOUD, DimensionDataModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native if HAS_LIBCLOUD: from libcloud.compute.base import NodeLocation diff --git a/plugins/modules/cloud/lxc/lxc_container.py b/plugins/modules/cloud/lxc/lxc_container.py index 636508dbda..18f1d02efe 100644 --- a/plugins/modules/cloud/lxc/lxc_container.py +++ b/plugins/modules/cloud/lxc/lxc_container.py @@ -433,7 +433,7 @@ else: from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE, BOOLEANS_TRUE from ansible.module_utils.six.moves import xrange -from ansible.module_utils._text import to_text, to_bytes +from ansible.module_utils.common.text.converters import to_text, to_bytes # LXC_COMPRESSION_MAP is a map of available compression types when creating diff --git a/plugins/modules/cloud/misc/cloud_init_data_facts.py b/plugins/modules/cloud/misc/cloud_init_data_facts.py index 5774fa6f39..1b44c50cbe 100644 --- a/plugins/modules/cloud/misc/cloud_init_data_facts.py +++ b/plugins/modules/cloud/misc/cloud_init_data_facts.py @@ -85,7 +85,7 @@ cloud_init_data_facts: import os from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text CLOUD_INIT_PATH = "/var/lib/cloud/data" diff --git a/plugins/modules/cloud/misc/proxmox.py b/plugins/modules/cloud/misc/proxmox.py index 422c108c35..21817f10dc 100644 --- a/plugins/modules/cloud/misc/proxmox.py +++ b/plugins/modules/cloud/misc/proxmox.py @@ -364,7 +364,7 @@ except ImportError: HAS_PROXMOXER = False from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native VZ_TYPE = None diff --git a/plugins/modules/cloud/misc/proxmox_kvm.py b/plugins/modules/cloud/misc/proxmox_kvm.py index 0fb486600c..939c72a126 100644 --- a/plugins/modules/cloud/misc/proxmox_kvm.py +++ b/plugins/modules/cloud/misc/proxmox_kvm.py @@ -771,7 +771,7 @@ except ImportError: HAS_PROXMOXER = False from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def get_nextvmid(module, proxmox): diff --git a/plugins/modules/cloud/misc/proxmox_snap.py b/plugins/modules/cloud/misc/proxmox_snap.py index 17c6ef335a..4ee2d27893 100644 --- a/plugins/modules/cloud/misc/proxmox_snap.py +++ b/plugins/modules/cloud/misc/proxmox_snap.py @@ -119,7 +119,7 @@ except ImportError: HAS_PROXMOXER = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native VZ_TYPE = None diff --git a/plugins/modules/cloud/packet/packet_device.py b/plugins/modules/cloud/packet/packet_device.py index 5dc662a255..f939572656 100644 --- a/plugins/modules/cloud/packet/packet_device.py +++ b/plugins/modules/cloud/packet/packet_device.py @@ -275,7 +275,7 @@ import uuid import traceback from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native HAS_PACKET_SDK = True try: diff --git a/plugins/modules/cloud/packet/packet_ip_subnet.py b/plugins/modules/cloud/packet/packet_ip_subnet.py index fbc12698a1..718de36f22 100644 --- a/plugins/modules/cloud/packet/packet_ip_subnet.py +++ b/plugins/modules/cloud/packet/packet_ip_subnet.py @@ -151,7 +151,7 @@ import uuid import re from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native HAS_PACKET_SDK = True diff --git a/plugins/modules/cloud/packet/packet_project.py b/plugins/modules/cloud/packet/packet_project.py index 38d7ca7640..c6502c6ea6 100644 --- a/plugins/modules/cloud/packet/packet_project.py +++ b/plugins/modules/cloud/packet/packet_project.py @@ -122,7 +122,7 @@ id: ''' from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native HAS_PACKET_SDK = True diff --git a/plugins/modules/cloud/packet/packet_volume.py b/plugins/modules/cloud/packet/packet_volume.py index 2966139a43..97c1e7498d 100644 --- a/plugins/modules/cloud/packet/packet_volume.py +++ b/plugins/modules/cloud/packet/packet_volume.py @@ -168,7 +168,7 @@ description: import uuid from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native HAS_PACKET_SDK = True diff --git a/plugins/modules/cloud/packet/packet_volume_attachment.py b/plugins/modules/cloud/packet/packet_volume_attachment.py index 7cda16ce86..9044fbcffa 100644 --- a/plugins/modules/cloud/packet/packet_volume_attachment.py +++ b/plugins/modules/cloud/packet/packet_volume_attachment.py @@ -130,7 +130,7 @@ device_id: import uuid from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native HAS_PACKET_SDK = True diff --git a/plugins/modules/cloud/profitbricks/profitbricks.py b/plugins/modules/cloud/profitbricks/profitbricks.py index c64151d68e..4c24d6408f 100644 --- a/plugins/modules/cloud/profitbricks/profitbricks.py +++ b/plugins/modules/cloud/profitbricks/profitbricks.py @@ -198,7 +198,7 @@ except ImportError: from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves import xrange -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native LOCATIONS = ['us/las', diff --git a/plugins/modules/cloud/profitbricks/profitbricks_volume.py b/plugins/modules/cloud/profitbricks/profitbricks_volume.py index 0e9523c664..5fff01d3d7 100644 --- a/plugins/modules/cloud/profitbricks/profitbricks_volume.py +++ b/plugins/modules/cloud/profitbricks/profitbricks_volume.py @@ -149,7 +149,7 @@ except ImportError: from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves import xrange -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native uuid_match = re.compile( diff --git a/plugins/modules/cloud/pubnub/pubnub_blocks.py b/plugins/modules/cloud/pubnub/pubnub_blocks.py index 1dbe416b9c..c8de702597 100644 --- a/plugins/modules/cloud/pubnub/pubnub_blocks.py +++ b/plugins/modules/cloud/pubnub/pubnub_blocks.py @@ -247,7 +247,7 @@ except ImportError: exceptions = None from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text def pubnub_user(module): diff --git a/plugins/modules/cloud/rackspace/rax_cdb_user.py b/plugins/modules/cloud/rackspace/rax_cdb_user.py index 2034170f42..01c10950c4 100644 --- a/plugins/modules/cloud/rackspace/rax_cdb_user.py +++ b/plugins/modules/cloud/rackspace/rax_cdb_user.py @@ -77,7 +77,7 @@ except ImportError: HAS_PYRAX = False from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module diff --git a/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py b/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py index 48e2f10ef3..118883328a 100644 --- a/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py +++ b/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py @@ -133,7 +133,7 @@ data: import traceback from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway, payload_from_object -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.basic import AnsibleModule, missing_required_lib try: diff --git a/plugins/modules/cloud/smartos/vmadm.py b/plugins/modules/cloud/smartos/vmadm.py index 63a4c21231..03a022423e 100644 --- a/plugins/modules/cloud/smartos/vmadm.py +++ b/plugins/modules/cloud/smartos/vmadm.py @@ -404,7 +404,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native # While vmadm(1M) supports a -E option to return any errors in JSON, the # generated JSON does not play well with the JSON parsers of Python. diff --git a/plugins/modules/clustering/consul/consul_kv.py b/plugins/modules/clustering/consul/consul_kv.py index 01e9be2d05..d392228146 100644 --- a/plugins/modules/clustering/consul/consul_kv.py +++ b/plugins/modules/clustering/consul/consul_kv.py @@ -136,7 +136,7 @@ EXAMPLES = ''' state: acquire ''' -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text try: import consul diff --git a/plugins/modules/clustering/etcd3.py b/plugins/modules/clustering/etcd3.py index 0f87e32d13..28c5915693 100644 --- a/plugins/modules/clustering/etcd3.py +++ b/plugins/modules/clustering/etcd3.py @@ -119,7 +119,7 @@ old_value: import traceback from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native try: diff --git a/plugins/modules/clustering/nomad/nomad_job.py b/plugins/modules/clustering/nomad/nomad_job.py index 6c28579773..a5e1cd3755 100644 --- a/plugins/modules/clustering/nomad/nomad_job.py +++ b/plugins/modules/clustering/nomad/nomad_job.py @@ -84,7 +84,7 @@ EXAMPLES = ''' import json from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native import_nomad = None try: diff --git a/plugins/modules/clustering/nomad/nomad_job_info.py b/plugins/modules/clustering/nomad/nomad_job_info.py index 5e9455f77b..d913ebeb61 100644 --- a/plugins/modules/clustering/nomad/nomad_job_info.py +++ b/plugins/modules/clustering/nomad/nomad_job_info.py @@ -270,7 +270,7 @@ import os import json from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native import_nomad = None try: diff --git a/plugins/modules/clustering/znode.py b/plugins/modules/clustering/znode.py index e85537e6e8..8456a187ee 100644 --- a/plugins/modules/clustering/znode.py +++ b/plugins/modules/clustering/znode.py @@ -108,7 +108,7 @@ except ImportError: KAZOO_INSTALLED = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes def main(): diff --git a/plugins/modules/database/influxdb/influxdb_query.py b/plugins/modules/database/influxdb/influxdb_query.py index d9cf500727..bff6fa989b 100644 --- a/plugins/modules/database/influxdb/influxdb_query.py +++ b/plugins/modules/database/influxdb/influxdb_query.py @@ -64,7 +64,7 @@ query_results: ''' from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb diff --git a/plugins/modules/database/influxdb/influxdb_retention_policy.py b/plugins/modules/database/influxdb/influxdb_retention_policy.py index 3ff48cbad0..a145f9e32b 100644 --- a/plugins/modules/database/influxdb/influxdb_retention_policy.py +++ b/plugins/modules/database/influxdb/influxdb_retention_policy.py @@ -145,7 +145,7 @@ except ImportError: from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native VALID_DURATION_REGEX = re.compile(r'^(INF|(\d+(ns|u|µ|ms|s|m|h|d|w)))+$') diff --git a/plugins/modules/database/influxdb/influxdb_user.py b/plugins/modules/database/influxdb/influxdb_user.py index cb35ea7ce6..8746445335 100644 --- a/plugins/modules/database/influxdb/influxdb_user.py +++ b/plugins/modules/database/influxdb/influxdb_user.py @@ -104,7 +104,7 @@ import json from ansible.module_utils.urls import ConnectionError from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native import ansible_collections.community.general.plugins.module_utils.influxdb as influx diff --git a/plugins/modules/database/influxdb/influxdb_write.py b/plugins/modules/database/influxdb/influxdb_write.py index 0dc063a7b1..e34fe9c2cf 100644 --- a/plugins/modules/database/influxdb/influxdb_write.py +++ b/plugins/modules/database/influxdb/influxdb_write.py @@ -61,7 +61,7 @@ RETURN = r''' ''' from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb diff --git a/plugins/modules/database/misc/odbc.py b/plugins/modules/database/misc/odbc.py index 313a7f7096..5d1cdf884b 100644 --- a/plugins/modules/database/misc/odbc.py +++ b/plugins/modules/database/misc/odbc.py @@ -78,7 +78,7 @@ row_count: ''' from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native HAS_PYODBC = None try: diff --git a/plugins/modules/database/misc/redis.py b/plugins/modules/database/misc/redis.py index 602aaf6c74..960b072fea 100644 --- a/plugins/modules/database/misc/redis.py +++ b/plugins/modules/database/misc/redis.py @@ -143,7 +143,7 @@ else: from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.common.text.formatters import human_to_bytes -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native import re diff --git a/plugins/modules/database/misc/redis_info.py b/plugins/modules/database/misc/redis_info.py index b615addbd2..9762b03c98 100644 --- a/plugins/modules/database/misc/redis_info.py +++ b/plugins/modules/database/misc/redis_info.py @@ -196,7 +196,7 @@ except ImportError: HAS_REDIS_PACKAGE = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def redis_client(**client_params): diff --git a/plugins/modules/database/saphana/hana_query.py b/plugins/modules/database/saphana/hana_query.py index ab147ef3fe..9b26134022 100644 --- a/plugins/modules/database/saphana/hana_query.py +++ b/plugins/modules/database/saphana/hana_query.py @@ -103,7 +103,7 @@ query_result: import csv from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six import StringIO -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def csv_to_list(rawcsv): diff --git a/plugins/modules/database/vertica/vertica_configuration.py b/plugins/modules/database/vertica/vertica_configuration.py index 1d67a831d9..b210e3f6f0 100644 --- a/plugins/modules/database/vertica/vertica_configuration.py +++ b/plugins/modules/database/vertica/vertica_configuration.py @@ -76,7 +76,7 @@ else: pyodbc_found = True from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class NotSupportedError(Exception): diff --git a/plugins/modules/database/vertica/vertica_info.py b/plugins/modules/database/vertica/vertica_info.py index c0aa94be1e..feaebecbdc 100644 --- a/plugins/modules/database/vertica/vertica_info.py +++ b/plugins/modules/database/vertica/vertica_info.py @@ -74,7 +74,7 @@ else: pyodbc_found = True from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class NotSupportedError(Exception): diff --git a/plugins/modules/database/vertica/vertica_role.py b/plugins/modules/database/vertica/vertica_role.py index fc80907cc6..06dd218ed0 100644 --- a/plugins/modules/database/vertica/vertica_role.py +++ b/plugins/modules/database/vertica/vertica_role.py @@ -87,7 +87,7 @@ else: pyodbc_found = True from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class NotSupportedError(Exception): diff --git a/plugins/modules/database/vertica/vertica_schema.py b/plugins/modules/database/vertica/vertica_schema.py index 0c85e3e091..749234add0 100644 --- a/plugins/modules/database/vertica/vertica_schema.py +++ b/plugins/modules/database/vertica/vertica_schema.py @@ -109,7 +109,7 @@ else: pyodbc_found = True from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class NotSupportedError(Exception): diff --git a/plugins/modules/database/vertica/vertica_user.py b/plugins/modules/database/vertica/vertica_user.py index 791ef5fef9..fed3a2a56f 100644 --- a/plugins/modules/database/vertica/vertica_user.py +++ b/plugins/modules/database/vertica/vertica_user.py @@ -118,7 +118,7 @@ else: pyodbc_found = True from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class NotSupportedError(Exception): diff --git a/plugins/modules/files/filesize.py b/plugins/modules/files/filesize.py index 5b22fb4512..f073ff4119 100644 --- a/plugins/modules/files/filesize.py +++ b/plugins/modules/files/filesize.py @@ -224,7 +224,7 @@ import os import math from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native # These are the multiplicative suffixes understood (or returned) by dd and diff --git a/plugins/modules/files/iso_create.py b/plugins/modules/files/iso_create.py index bf6359b14a..3fa456339e 100644 --- a/plugins/modules/files/iso_create.py +++ b/plugins/modules/files/iso_create.py @@ -153,7 +153,7 @@ except ImportError: HAS_PYCDLIB = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def add_file(module, iso_file=None, src_file=None, file_path=None, rock_ridge=None, use_joliet=None, use_udf=None): diff --git a/plugins/modules/files/read_csv.py b/plugins/modules/files/read_csv.py index c48efc7440..2d5644db2e 100644 --- a/plugins/modules/files/read_csv.py +++ b/plugins/modules/files/read_csv.py @@ -138,7 +138,7 @@ list: ''' from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.csv import (initialize_dialect, read_csv, CSVError, DialectNotAvailableError, diff --git a/plugins/modules/files/sapcar_extract.py b/plugins/modules/files/sapcar_extract.py index db0f5f9ea8..b6a76a1629 100644 --- a/plugins/modules/files/sapcar_extract.py +++ b/plugins/modules/files/sapcar_extract.py @@ -90,7 +90,7 @@ import os from tempfile import NamedTemporaryFile from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import open_url -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def get_list_of_files(dir_name): diff --git a/plugins/modules/files/xattr.py b/plugins/modules/files/xattr.py index 7691f30905..8578ed4c4e 100644 --- a/plugins/modules/files/xattr.py +++ b/plugins/modules/files/xattr.py @@ -94,7 +94,7 @@ import os # import module snippets from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def get_xattr_keys(module, path, follow): diff --git a/plugins/modules/files/xml.py b/plugins/modules/files/xml.py index e7c6ca3f1e..ffdb65400c 100644 --- a/plugins/modules/files/xml.py +++ b/plugins/modules/files/xml.py @@ -369,7 +369,7 @@ except ImportError: from ansible.module_utils.basic import AnsibleModule, json_dict_bytes_to_unicode, missing_required_lib from ansible.module_utils.six import iteritems, string_types -from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils.common.text.converters import to_bytes, to_native from ansible.module_utils.common._collections_compat import MutableMapping _IDENT = r"[a-zA-Z-][a-zA-Z0-9_\-\.]*" diff --git a/plugins/modules/identity/ipa/ipa_config.py b/plugins/modules/identity/ipa/ipa_config.py index 49d46fb5b2..e8ee073d6e 100644 --- a/plugins/modules/identity/ipa/ipa_config.py +++ b/plugins/modules/identity/ipa/ipa_config.py @@ -194,7 +194,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class ConfigIPAClient(IPAClient): diff --git a/plugins/modules/identity/ipa/ipa_dnsrecord.py b/plugins/modules/identity/ipa/ipa_dnsrecord.py index 635bf2ff91..73b6695698 100644 --- a/plugins/modules/identity/ipa/ipa_dnsrecord.py +++ b/plugins/modules/identity/ipa/ipa_dnsrecord.py @@ -151,7 +151,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class DNSRecordIPAClient(IPAClient): diff --git a/plugins/modules/identity/ipa/ipa_dnszone.py b/plugins/modules/identity/ipa/ipa_dnszone.py index 1536866c29..3dabad8db8 100644 --- a/plugins/modules/identity/ipa/ipa_dnszone.py +++ b/plugins/modules/identity/ipa/ipa_dnszone.py @@ -71,7 +71,7 @@ zone: from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class DNSZoneIPAClient(IPAClient): diff --git a/plugins/modules/identity/ipa/ipa_group.py b/plugins/modules/identity/ipa/ipa_group.py index 84ff443a59..f62d9f0a18 100644 --- a/plugins/modules/identity/ipa/ipa_group.py +++ b/plugins/modules/identity/ipa/ipa_group.py @@ -115,7 +115,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class GroupIPAClient(IPAClient): diff --git a/plugins/modules/identity/ipa/ipa_hbacrule.py b/plugins/modules/identity/ipa/ipa_hbacrule.py index cb49fd53dd..5f0704d58b 100644 --- a/plugins/modules/identity/ipa/ipa_hbacrule.py +++ b/plugins/modules/identity/ipa/ipa_hbacrule.py @@ -153,7 +153,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class HBACRuleIPAClient(IPAClient): diff --git a/plugins/modules/identity/ipa/ipa_host.py b/plugins/modules/identity/ipa/ipa_host.py index 80892c01c0..25c65f0b34 100644 --- a/plugins/modules/identity/ipa/ipa_host.py +++ b/plugins/modules/identity/ipa/ipa_host.py @@ -163,7 +163,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class HostIPAClient(IPAClient): diff --git a/plugins/modules/identity/ipa/ipa_hostgroup.py b/plugins/modules/identity/ipa/ipa_hostgroup.py index ae1f1a6b33..9d5c6f99c7 100644 --- a/plugins/modules/identity/ipa/ipa_hostgroup.py +++ b/plugins/modules/identity/ipa/ipa_hostgroup.py @@ -86,7 +86,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class HostGroupIPAClient(IPAClient): diff --git a/plugins/modules/identity/ipa/ipa_otpconfig.py b/plugins/modules/identity/ipa/ipa_otpconfig.py index 84a9e969cb..9a10baec0b 100644 --- a/plugins/modules/identity/ipa/ipa_otpconfig.py +++ b/plugins/modules/identity/ipa/ipa_otpconfig.py @@ -78,7 +78,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class OTPConfigIPAClient(IPAClient): diff --git a/plugins/modules/identity/ipa/ipa_otptoken.py b/plugins/modules/identity/ipa/ipa_otptoken.py index f8f48d68a6..4027a1c459 100644 --- a/plugins/modules/identity/ipa/ipa_otptoken.py +++ b/plugins/modules/identity/ipa/ipa_otptoken.py @@ -168,7 +168,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule, sanitize_keys from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class OTPTokenIPAClient(IPAClient): diff --git a/plugins/modules/identity/ipa/ipa_pwpolicy.py b/plugins/modules/identity/ipa/ipa_pwpolicy.py index 7c694f32ee..0f9b141b4c 100644 --- a/plugins/modules/identity/ipa/ipa_pwpolicy.py +++ b/plugins/modules/identity/ipa/ipa_pwpolicy.py @@ -127,7 +127,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class PwPolicyIPAClient(IPAClient): diff --git a/plugins/modules/identity/ipa/ipa_role.py b/plugins/modules/identity/ipa/ipa_role.py index 589a6d5efe..c602614ef9 100644 --- a/plugins/modules/identity/ipa/ipa_role.py +++ b/plugins/modules/identity/ipa/ipa_role.py @@ -131,7 +131,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class RoleIPAClient(IPAClient): diff --git a/plugins/modules/identity/ipa/ipa_service.py b/plugins/modules/identity/ipa/ipa_service.py index 088127e0c3..f85b80d44e 100644 --- a/plugins/modules/identity/ipa/ipa_service.py +++ b/plugins/modules/identity/ipa/ipa_service.py @@ -82,7 +82,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class ServiceIPAClient(IPAClient): diff --git a/plugins/modules/identity/ipa/ipa_subca.py b/plugins/modules/identity/ipa/ipa_subca.py index 218951a071..3b0d3e8707 100644 --- a/plugins/modules/identity/ipa/ipa_subca.py +++ b/plugins/modules/identity/ipa/ipa_subca.py @@ -77,7 +77,7 @@ subca: from distutils.version import LooseVersion from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class SubCAIPAClient(IPAClient): diff --git a/plugins/modules/identity/ipa/ipa_sudocmd.py b/plugins/modules/identity/ipa/ipa_sudocmd.py index aa09e0e44b..d75aff44ce 100644 --- a/plugins/modules/identity/ipa/ipa_sudocmd.py +++ b/plugins/modules/identity/ipa/ipa_sudocmd.py @@ -63,7 +63,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class SudoCmdIPAClient(IPAClient): diff --git a/plugins/modules/identity/ipa/ipa_sudocmdgroup.py b/plugins/modules/identity/ipa/ipa_sudocmdgroup.py index 96eb655930..65fdd4f75f 100644 --- a/plugins/modules/identity/ipa/ipa_sudocmdgroup.py +++ b/plugins/modules/identity/ipa/ipa_sudocmdgroup.py @@ -72,7 +72,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class SudoCmdGroupIPAClient(IPAClient): diff --git a/plugins/modules/identity/ipa/ipa_sudorule.py b/plugins/modules/identity/ipa/ipa_sudorule.py index 4494122e8d..2054599f9d 100644 --- a/plugins/modules/identity/ipa/ipa_sudorule.py +++ b/plugins/modules/identity/ipa/ipa_sudorule.py @@ -178,7 +178,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class SudoRuleIPAClient(IPAClient): diff --git a/plugins/modules/identity/ipa/ipa_user.py b/plugins/modules/identity/ipa/ipa_user.py index 847749f15e..8a7b3abea2 100644 --- a/plugins/modules/identity/ipa/ipa_user.py +++ b/plugins/modules/identity/ipa/ipa_user.py @@ -172,7 +172,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class UserIPAClient(IPAClient): diff --git a/plugins/modules/identity/ipa/ipa_vault.py b/plugins/modules/identity/ipa/ipa_vault.py index 3376b8c4e7..7a6a601fa9 100644 --- a/plugins/modules/identity/ipa/ipa_vault.py +++ b/plugins/modules/identity/ipa/ipa_vault.py @@ -135,7 +135,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class VaultIPAClient(IPAClient): diff --git a/plugins/modules/identity/onepassword_info.py b/plugins/modules/identity/onepassword_info.py index 42a6311c0d..95ef7c12b7 100644 --- a/plugins/modules/identity/onepassword_info.py +++ b/plugins/modules/identity/onepassword_info.py @@ -163,7 +163,7 @@ import re from subprocess import Popen, PIPE -from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils.common.text.converters import to_bytes, to_native from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/monitoring/bigpanda.py b/plugins/modules/monitoring/bigpanda.py index 8392c19536..c5fe61cbf6 100644 --- a/plugins/modules/monitoring/bigpanda.py +++ b/plugins/modules/monitoring/bigpanda.py @@ -130,7 +130,7 @@ import socket import traceback from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.urls import fetch_url diff --git a/plugins/modules/monitoring/circonus_annotation.py b/plugins/modules/monitoring/circonus_annotation.py index 27d2316873..8543aa00fa 100644 --- a/plugins/modules/monitoring/circonus_annotation.py +++ b/plugins/modules/monitoring/circonus_annotation.py @@ -155,7 +155,7 @@ except ImportError: from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.six import PY3 -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def check_requests_dep(module): diff --git a/plugins/modules/monitoring/datadog/datadog_event.py b/plugins/modules/monitoring/datadog/datadog_event.py index 3f6500f11f..6284b5bf23 100644 --- a/plugins/modules/monitoring/datadog/datadog_event.py +++ b/plugins/modules/monitoring/datadog/datadog_event.py @@ -123,7 +123,7 @@ except Exception: HAS_DATADOG = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/monitoring/datadog/datadog_monitor.py b/plugins/modules/monitoring/datadog/datadog_monitor.py index 8be71297f4..6c0f8cdb02 100644 --- a/plugins/modules/monitoring/datadog/datadog_monitor.py +++ b/plugins/modules/monitoring/datadog/datadog_monitor.py @@ -198,7 +198,7 @@ except Exception: HAS_DATADOG = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/monitoring/honeybadger_deployment.py b/plugins/modules/monitoring/honeybadger_deployment.py index 0b96af04a9..2e2198e1a3 100644 --- a/plugins/modules/monitoring/honeybadger_deployment.py +++ b/plugins/modules/monitoring/honeybadger_deployment.py @@ -67,7 +67,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.urls import fetch_url diff --git a/plugins/modules/monitoring/rollbar_deployment.py b/plugins/modules/monitoring/rollbar_deployment.py index 161361b774..cea3bfdf51 100644 --- a/plugins/modules/monitoring/rollbar_deployment.py +++ b/plugins/modules/monitoring/rollbar_deployment.py @@ -84,7 +84,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.urls import fetch_url diff --git a/plugins/modules/monitoring/sensu/sensu_check.py b/plugins/modules/monitoring/sensu/sensu_check.py index 71e8f07228..ec43b60abe 100644 --- a/plugins/modules/monitoring/sensu/sensu_check.py +++ b/plugins/modules/monitoring/sensu/sensu_check.py @@ -179,7 +179,7 @@ import json import traceback from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def sensu_check(module, path, name, state='present', backup=False): diff --git a/plugins/modules/monitoring/sensu/sensu_silence.py b/plugins/modules/monitoring/sensu/sensu_silence.py index 12dc5d2068..80a5216711 100644 --- a/plugins/modules/monitoring/sensu/sensu_silence.py +++ b/plugins/modules/monitoring/sensu/sensu_silence.py @@ -97,7 +97,7 @@ RETURN = ''' import json -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url diff --git a/plugins/modules/monitoring/sensu/sensu_subscription.py b/plugins/modules/monitoring/sensu/sensu_subscription.py index 6316254d7b..947c6e0de5 100644 --- a/plugins/modules/monitoring/sensu/sensu_subscription.py +++ b/plugins/modules/monitoring/sensu/sensu_subscription.py @@ -66,7 +66,7 @@ import json import traceback from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def sensu_subscription(module, path, name, state='present', backup=False): diff --git a/plugins/modules/monitoring/spectrum_model_attrs.py b/plugins/modules/monitoring/spectrum_model_attrs.py index d6f3948254..231352acd6 100644 --- a/plugins/modules/monitoring/spectrum_model_attrs.py +++ b/plugins/modules/monitoring/spectrum_model_attrs.py @@ -142,7 +142,7 @@ changed_attrs: from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.urls import fetch_url from ansible.module_utils.six.moves.urllib.parse import quote import json diff --git a/plugins/modules/monitoring/stackdriver.py b/plugins/modules/monitoring/stackdriver.py index 8e2d19a9ab..8eacdbfe49 100644 --- a/plugins/modules/monitoring/stackdriver.py +++ b/plugins/modules/monitoring/stackdriver.py @@ -96,7 +96,7 @@ import json import traceback from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.urls import fetch_url diff --git a/plugins/modules/monitoring/statusio_maintenance.py b/plugins/modules/monitoring/statusio_maintenance.py index 3a6124f8b0..10f733d4a8 100644 --- a/plugins/modules/monitoring/statusio_maintenance.py +++ b/plugins/modules/monitoring/statusio_maintenance.py @@ -177,7 +177,7 @@ import datetime import json from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.urls import open_url diff --git a/plugins/modules/monitoring/uptimerobot.py b/plugins/modules/monitoring/uptimerobot.py index bb4e60faee..833a7f191e 100644 --- a/plugins/modules/monitoring/uptimerobot.py +++ b/plugins/modules/monitoring/uptimerobot.py @@ -56,7 +56,7 @@ import json from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils.urls import fetch_url -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text API_BASE = "https://api.uptimerobot.com/" diff --git a/plugins/modules/net_tools/cloudflare_dns.py b/plugins/modules/net_tools/cloudflare_dns.py index ffa4e55745..4e82e0af36 100644 --- a/plugins/modules/net_tools/cloudflare_dns.py +++ b/plugins/modules/net_tools/cloudflare_dns.py @@ -360,7 +360,7 @@ import json from ansible.module_utils.basic import AnsibleModule, env_fallback from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.common.text.converters import to_native, to_text from ansible.module_utils.urls import fetch_url diff --git a/plugins/modules/net_tools/haproxy.py b/plugins/modules/net_tools/haproxy.py index a3320b45c5..f736036671 100644 --- a/plugins/modules/net_tools/haproxy.py +++ b/plugins/modules/net_tools/haproxy.py @@ -211,7 +211,7 @@ import time from string import Template from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_bytes, to_text +from ansible.module_utils.common.text.converters import to_bytes, to_text DEFAULT_SOCKET_LOCATION = "/var/run/haproxy.sock" diff --git a/plugins/modules/net_tools/ip_netns.py b/plugins/modules/net_tools/ip_netns.py index 50aec392c5..9854709e82 100644 --- a/plugins/modules/net_tools/ip_netns.py +++ b/plugins/modules/net_tools/ip_netns.py @@ -58,7 +58,7 @@ RETURN = ''' ''' from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text class Namespace(object): diff --git a/plugins/modules/net_tools/ipify_facts.py b/plugins/modules/net_tools/ipify_facts.py index dcdc5ef801..2ae0348cb1 100644 --- a/plugins/modules/net_tools/ipify_facts.py +++ b/plugins/modules/net_tools/ipify_facts.py @@ -62,7 +62,7 @@ import json from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text class IpifyFacts(object): diff --git a/plugins/modules/net_tools/ldap/ldap_attrs.py b/plugins/modules/net_tools/ldap/ldap_attrs.py index ae5cb7fdae..c357a83087 100644 --- a/plugins/modules/net_tools/ldap/ldap_attrs.py +++ b/plugins/modules/net_tools/ldap/ldap_attrs.py @@ -166,7 +166,7 @@ modlist: import traceback from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native, to_bytes +from ansible.module_utils.common.text.converters import to_native, to_bytes from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs import re diff --git a/plugins/modules/net_tools/ldap/ldap_entry.py b/plugins/modules/net_tools/ldap/ldap_entry.py index ac1d63ac0e..2ef06b9693 100644 --- a/plugins/modules/net_tools/ldap/ldap_entry.py +++ b/plugins/modules/net_tools/ldap/ldap_entry.py @@ -104,7 +104,7 @@ RETURN = """ import traceback from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native, to_bytes +from ansible.module_utils.common.text.converters import to_native, to_bytes from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs LDAP_IMP_ERR = None diff --git a/plugins/modules/net_tools/ldap/ldap_search.py b/plugins/modules/net_tools/ldap/ldap_search.py index f4d02c1cd2..6b83321ff9 100644 --- a/plugins/modules/net_tools/ldap/ldap_search.py +++ b/plugins/modules/net_tools/ldap/ldap_search.py @@ -77,7 +77,7 @@ EXAMPLES = r""" import traceback from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs LDAP_IMP_ERR = None diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index 30f0537e70..657df3bd2a 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -650,7 +650,7 @@ RETURN = r"""# """ from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text import re diff --git a/plugins/modules/net_tools/nsupdate.py b/plugins/modules/net_tools/nsupdate.py index b110c6fe20..520d12e803 100644 --- a/plugins/modules/net_tools/nsupdate.py +++ b/plugins/modules/net_tools/nsupdate.py @@ -198,7 +198,7 @@ except ImportError: HAVE_DNSPYTHON = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class RecordManager(object): diff --git a/plugins/modules/net_tools/omapi_host.py b/plugins/modules/net_tools/omapi_host.py index 41c68a471a..4d65fcb95d 100644 --- a/plugins/modules/net_tools/omapi_host.py +++ b/plugins/modules/net_tools/omapi_host.py @@ -140,7 +140,7 @@ except ImportError: pureomapi_found = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils.common.text.converters import to_bytes, to_native class OmapiHostManager: diff --git a/plugins/modules/net_tools/pritunl/pritunl_org.py b/plugins/modules/net_tools/pritunl/pritunl_org.py index 7fa7cbc124..35796ae361 100644 --- a/plugins/modules/net_tools/pritunl/pritunl_org.py +++ b/plugins/modules/net_tools/pritunl/pritunl_org.py @@ -78,7 +78,7 @@ response: from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.common.dict_transformations import dict_merge from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api import ( PritunlException, diff --git a/plugins/modules/net_tools/pritunl/pritunl_org_info.py b/plugins/modules/net_tools/pritunl/pritunl_org_info.py index e0c573fb19..a7e65c80d1 100644 --- a/plugins/modules/net_tools/pritunl/pritunl_org_info.py +++ b/plugins/modules/net_tools/pritunl/pritunl_org_info.py @@ -75,7 +75,7 @@ organizations: """ from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.common.dict_transformations import dict_merge from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api import ( PritunlException, diff --git a/plugins/modules/net_tools/pritunl/pritunl_user.py b/plugins/modules/net_tools/pritunl/pritunl_user.py index 3d1c7f338f..7ea4f18a44 100644 --- a/plugins/modules/net_tools/pritunl/pritunl_user.py +++ b/plugins/modules/net_tools/pritunl/pritunl_user.py @@ -142,7 +142,7 @@ response: from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.common.dict_transformations import dict_merge from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api import ( PritunlException, diff --git a/plugins/modules/net_tools/pritunl/pritunl_user_info.py b/plugins/modules/net_tools/pritunl/pritunl_user_info.py index c00da6dc23..e8cf5e2955 100644 --- a/plugins/modules/net_tools/pritunl/pritunl_user_info.py +++ b/plugins/modules/net_tools/pritunl/pritunl_user_info.py @@ -93,7 +93,7 @@ users: """ from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.common.dict_transformations import dict_merge from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api import ( PritunlException, diff --git a/plugins/modules/net_tools/snmp_facts.py b/plugins/modules/net_tools/snmp_facts.py index 3918a3a1c0..221eda30f9 100644 --- a/plugins/modules/net_tools/snmp_facts.py +++ b/plugins/modules/net_tools/snmp_facts.py @@ -190,7 +190,7 @@ except Exception: HAS_PYSNMP = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text class DefineOid(object): diff --git a/plugins/modules/notification/hipchat.py b/plugins/modules/notification/hipchat.py index 06c9fca4d2..76c1227af4 100644 --- a/plugins/modules/notification/hipchat.py +++ b/plugins/modules/notification/hipchat.py @@ -96,7 +96,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils.six.moves.urllib.request import pathname2url -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.urls import fetch_url diff --git a/plugins/modules/notification/irc.py b/plugins/modules/notification/irc.py index 1c050fc187..9b1b91f586 100644 --- a/plugins/modules/notification/irc.py +++ b/plugins/modules/notification/irc.py @@ -137,7 +137,7 @@ import ssl import time import traceback -from ansible.module_utils._text import to_native, to_bytes +from ansible.module_utils.common.text.converters import to_native, to_bytes from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/notification/jabber.py b/plugins/modules/notification/jabber.py index 68e2c5938b..9b6811b3fa 100644 --- a/plugins/modules/notification/jabber.py +++ b/plugins/modules/notification/jabber.py @@ -92,7 +92,7 @@ except ImportError: HAS_XMPP = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/notification/mail.py b/plugins/modules/notification/mail.py index 3b5936d134..2f03f8c239 100644 --- a/plugins/modules/notification/mail.py +++ b/plugins/modules/notification/mail.py @@ -204,7 +204,7 @@ from email.header import Header from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six import PY3 -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/notification/mqtt.py b/plugins/modules/notification/mqtt.py index 0551ab203c..991114e8ae 100644 --- a/plugins/modules/notification/mqtt.py +++ b/plugins/modules/notification/mqtt.py @@ -136,7 +136,7 @@ except ImportError: HAS_PAHOMQTT = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native # =========================================== diff --git a/plugins/modules/notification/sendgrid.py b/plugins/modules/notification/sendgrid.py index 02ab072270..4a63a03db7 100644 --- a/plugins/modules/notification/sendgrid.py +++ b/plugins/modules/notification/sendgrid.py @@ -136,7 +136,7 @@ except ImportError: from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes from ansible.module_utils.urls import fetch_url diff --git a/plugins/modules/notification/syslogger.py b/plugins/modules/notification/syslogger.py index 7f4f899f8c..226126f5a9 100644 --- a/plugins/modules/notification/syslogger.py +++ b/plugins/modules/notification/syslogger.py @@ -98,7 +98,7 @@ import syslog import traceback from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def get_facility(facility): diff --git a/plugins/modules/packaging/language/maven_artifact.py b/plugins/modules/packaging/language/maven_artifact.py index 83833b0480..9e2f94190f 100644 --- a/plugins/modules/packaging/language/maven_artifact.py +++ b/plugins/modules/packaging/language/maven_artifact.py @@ -261,7 +261,7 @@ except ImportError: from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.six.moves.urllib.parse import urlparse from ansible.module_utils.urls import fetch_url -from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text def split_pre_existing_dir(dirname): diff --git a/plugins/modules/packaging/language/npm.py b/plugins/modules/packaging/language/npm.py index 5a48468970..283b8e0be7 100644 --- a/plugins/modules/packaging/language/npm.py +++ b/plugins/modules/packaging/language/npm.py @@ -141,7 +141,7 @@ import os import re from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class Npm(object): diff --git a/plugins/modules/packaging/language/pear.py b/plugins/modules/packaging/language/pear.py index fef04d325f..e8e36b3c56 100644 --- a/plugins/modules/packaging/language/pear.py +++ b/plugins/modules/packaging/language/pear.py @@ -111,7 +111,7 @@ EXAMPLES = r''' import os -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/packaging/language/pip_package_info.py b/plugins/modules/packaging/language/pip_package_info.py index b769afb866..cdcc9f51cc 100644 --- a/plugins/modules/packaging/language/pip_package_info.py +++ b/plugins/modules/packaging/language/pip_package_info.py @@ -89,7 +89,7 @@ packages: import json import os -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.facts.packages import CLIMgr diff --git a/plugins/modules/packaging/os/flatpak_remote.py b/plugins/modules/packaging/os/flatpak_remote.py index a7767621d7..e0e4170f47 100644 --- a/plugins/modules/packaging/os/flatpak_remote.py +++ b/plugins/modules/packaging/os/flatpak_remote.py @@ -119,7 +119,7 @@ stdout: ''' from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils.common.text.converters import to_bytes, to_native def add_remote(module, binary, name, flatpakrepo_url, method): diff --git a/plugins/modules/packaging/os/homebrew_cask.py b/plugins/modules/packaging/os/homebrew_cask.py index 498d0b8771..6c3de1c9ba 100644 --- a/plugins/modules/packaging/os/homebrew_cask.py +++ b/plugins/modules/packaging/os/homebrew_cask.py @@ -142,7 +142,7 @@ import re import tempfile from distutils import version -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six import iteritems, string_types diff --git a/plugins/modules/packaging/os/mas.py b/plugins/modules/packaging/os/mas.py index bc3e6dfd66..dd394b7c43 100644 --- a/plugins/modules/packaging/os/mas.py +++ b/plugins/modules/packaging/os/mas.py @@ -96,7 +96,7 @@ EXAMPLES = ''' RETURN = r''' # ''' from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from distutils.version import StrictVersion import os diff --git a/plugins/modules/packaging/os/pacman_key.py b/plugins/modules/packaging/os/pacman_key.py index 85896c211d..a40575b697 100644 --- a/plugins/modules/packaging/os/pacman_key.py +++ b/plugins/modules/packaging/os/pacman_key.py @@ -118,7 +118,7 @@ import os.path import tempfile from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class PacmanKey(object): diff --git a/plugins/modules/packaging/os/portage.py b/plugins/modules/packaging/os/portage.py index 1f0fdc682a..2a8679dbbd 100644 --- a/plugins/modules/packaging/os/portage.py +++ b/plugins/modules/packaging/os/portage.py @@ -229,7 +229,7 @@ import os import re from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def query_package(module, package, action): diff --git a/plugins/modules/packaging/os/redhat_subscription.py b/plugins/modules/packaging/os/redhat_subscription.py index c8b5e991a0..f3e5400900 100644 --- a/plugins/modules/packaging/os/redhat_subscription.py +++ b/plugins/modules/packaging/os/redhat_subscription.py @@ -277,7 +277,7 @@ import tempfile import json from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.six.moves import configparser diff --git a/plugins/modules/packaging/os/rhn_channel.py b/plugins/modules/packaging/os/rhn_channel.py index 63be03230c..f1954037fa 100644 --- a/plugins/modules/packaging/os/rhn_channel.py +++ b/plugins/modules/packaging/os/rhn_channel.py @@ -73,7 +73,7 @@ EXAMPLES = ''' ''' import ssl -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves import xmlrpc_client diff --git a/plugins/modules/packaging/os/yum_versionlock.py b/plugins/modules/packaging/os/yum_versionlock.py index 13319f6711..6dfb3d20ba 100644 --- a/plugins/modules/packaging/os/yum_versionlock.py +++ b/plugins/modules/packaging/os/yum_versionlock.py @@ -75,7 +75,7 @@ state: ''' from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class YumVersionLock: diff --git a/plugins/modules/packaging/os/zypper.py b/plugins/modules/packaging/os/zypper.py index 9c9b12a1a5..367bd8d9a0 100644 --- a/plugins/modules/packaging/os/zypper.py +++ b/plugins/modules/packaging/os/zypper.py @@ -216,7 +216,7 @@ EXAMPLES = ''' import xml import re from xml.dom.minidom import parseString as parseXML -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native # import module snippets from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/remote_management/cobbler/cobbler_sync.py b/plugins/modules/remote_management/cobbler/cobbler_sync.py index 3ce1c25564..157208216b 100644 --- a/plugins/modules/remote_management/cobbler/cobbler_sync.py +++ b/plugins/modules/remote_management/cobbler/cobbler_sync.py @@ -72,7 +72,7 @@ import ssl from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves import xmlrpc_client -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text def main(): diff --git a/plugins/modules/remote_management/cobbler/cobbler_system.py b/plugins/modules/remote_management/cobbler/cobbler_system.py index 504369e56a..e97be01239 100644 --- a/plugins/modules/remote_management/cobbler/cobbler_system.py +++ b/plugins/modules/remote_management/cobbler/cobbler_system.py @@ -151,7 +151,7 @@ import ssl from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six import iteritems from ansible.module_utils.six.moves import xmlrpc_client -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text IFPROPS_MAPPING = dict( bondingopts='bonding_opts', diff --git a/plugins/modules/remote_management/hpilo/hpilo_info.py b/plugins/modules/remote_management/hpilo/hpilo_info.py index 0f204b4a15..f373b58639 100644 --- a/plugins/modules/remote_management/hpilo/hpilo_info.py +++ b/plugins/modules/remote_management/hpilo/hpilo_info.py @@ -128,7 +128,7 @@ except ImportError: HAS_HPILO = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native # Suppress warnings from hpilo diff --git a/plugins/modules/remote_management/lenovoxcc/xcc_redfish_command.py b/plugins/modules/remote_management/lenovoxcc/xcc_redfish_command.py index d8966c6d64..1dbf4ad0b6 100644 --- a/plugins/modules/remote_management/lenovoxcc/xcc_redfish_command.py +++ b/plugins/modules/remote_management/lenovoxcc/xcc_redfish_command.py @@ -283,7 +283,7 @@ redfish_facts: ''' from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils diff --git a/plugins/modules/remote_management/redfish/idrac_redfish_command.py b/plugins/modules/remote_management/redfish/idrac_redfish_command.py index a637d15631..5e02154ed8 100644 --- a/plugins/modules/remote_management/redfish/idrac_redfish_command.py +++ b/plugins/modules/remote_management/redfish/idrac_redfish_command.py @@ -82,7 +82,7 @@ msg: import re from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class IdracRedfishUtils(RedfishUtils): diff --git a/plugins/modules/remote_management/redfish/idrac_redfish_config.py b/plugins/modules/remote_management/redfish/idrac_redfish_config.py index b16401311b..adea4b11a9 100644 --- a/plugins/modules/remote_management/redfish/idrac_redfish_config.py +++ b/plugins/modules/remote_management/redfish/idrac_redfish_config.py @@ -150,7 +150,7 @@ from ansible.module_utils.common.validation import ( check_required_arguments ) from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class IdracRedfishUtils(RedfishUtils): diff --git a/plugins/modules/remote_management/redfish/idrac_redfish_info.py b/plugins/modules/remote_management/redfish/idrac_redfish_info.py index 0033db7384..cb1aa8f34f 100644 --- a/plugins/modules/remote_management/redfish/idrac_redfish_info.py +++ b/plugins/modules/remote_management/redfish/idrac_redfish_info.py @@ -120,7 +120,7 @@ msg: from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class IdracRedfishUtils(RedfishUtils): diff --git a/plugins/modules/remote_management/redfish/redfish_command.py b/plugins/modules/remote_management/redfish/redfish_command.py index a2f290d16a..01f1fd771d 100644 --- a/plugins/modules/remote_management/redfish/redfish_command.py +++ b/plugins/modules/remote_management/redfish/redfish_command.py @@ -551,7 +551,7 @@ msg: from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native # More will be added as module features are expanded diff --git a/plugins/modules/remote_management/redfish/redfish_config.py b/plugins/modules/remote_management/redfish/redfish_config.py index e084c670f4..9b15a3e63e 100644 --- a/plugins/modules/remote_management/redfish/redfish_config.py +++ b/plugins/modules/remote_management/redfish/redfish_config.py @@ -204,7 +204,7 @@ msg: from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native # More will be added as module features are expanded diff --git a/plugins/modules/remote_management/wakeonlan.py b/plugins/modules/remote_management/wakeonlan.py index 2f097fcf30..725e070cd8 100644 --- a/plugins/modules/remote_management/wakeonlan.py +++ b/plugins/modules/remote_management/wakeonlan.py @@ -65,7 +65,7 @@ import struct import traceback from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def wakeonlan(module, mac, broadcast, port): diff --git a/plugins/modules/source_control/github/github_release.py b/plugins/modules/source_control/github/github_release.py index 7813ba1d89..654dce5f98 100644 --- a/plugins/modules/source_control/github/github_release.py +++ b/plugins/modules/source_control/github/github_release.py @@ -135,7 +135,7 @@ except ImportError: HAS_GITHUB_API = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/source_control/github/github_webhook.py b/plugins/modules/source_control/github/github_webhook.py index 2a737ef5a4..b1f0cb7a2b 100644 --- a/plugins/modules/source_control/github/github_webhook.py +++ b/plugins/modules/source_control/github/github_webhook.py @@ -148,7 +148,7 @@ except ImportError: HAS_GITHUB = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def _create_hook_config(module): diff --git a/plugins/modules/source_control/github/github_webhook_info.py b/plugins/modules/source_control/github/github_webhook_info.py index 2e7012e631..3936cbe37b 100644 --- a/plugins/modules/source_control/github/github_webhook_info.py +++ b/plugins/modules/source_control/github/github_webhook_info.py @@ -94,7 +94,7 @@ except ImportError: HAS_GITHUB = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def _munge_hook(hook_obj): diff --git a/plugins/modules/source_control/gitlab/gitlab_deploy_key.py b/plugins/modules/source_control/gitlab/gitlab_deploy_key.py index a75aef4e48..45149e275c 100644 --- a/plugins/modules/source_control/gitlab/gitlab_deploy_key.py +++ b/plugins/modules/source_control/gitlab/gitlab_deploy_key.py @@ -124,7 +124,7 @@ except Exception: from ansible.module_utils.api import basic_auth_argument_spec from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.gitlab import findProject, gitlabAuthentication diff --git a/plugins/modules/source_control/gitlab/gitlab_group.py b/plugins/modules/source_control/gitlab/gitlab_group.py index 0c61273363..42e1801a81 100644 --- a/plugins/modules/source_control/gitlab/gitlab_group.py +++ b/plugins/modules/source_control/gitlab/gitlab_group.py @@ -131,7 +131,7 @@ except Exception: from ansible.module_utils.api import basic_auth_argument_spec from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.gitlab import findGroup, gitlabAuthentication diff --git a/plugins/modules/source_control/gitlab/gitlab_hook.py b/plugins/modules/source_control/gitlab/gitlab_hook.py index bc4b6ecba4..5128fba9e1 100644 --- a/plugins/modules/source_control/gitlab/gitlab_hook.py +++ b/plugins/modules/source_control/gitlab/gitlab_hook.py @@ -174,7 +174,7 @@ except Exception: from ansible.module_utils.api import basic_auth_argument_spec from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.gitlab import findProject, gitlabAuthentication diff --git a/plugins/modules/source_control/gitlab/gitlab_project.py b/plugins/modules/source_control/gitlab/gitlab_project.py index 060d77ef6a..73def710c3 100644 --- a/plugins/modules/source_control/gitlab/gitlab_project.py +++ b/plugins/modules/source_control/gitlab/gitlab_project.py @@ -181,7 +181,7 @@ except Exception: from ansible.module_utils.api import basic_auth_argument_spec from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.gitlab import findGroup, findProject, gitlabAuthentication diff --git a/plugins/modules/source_control/gitlab/gitlab_project_variable.py b/plugins/modules/source_control/gitlab/gitlab_project_variable.py index 2ca788a194..21821cd495 100644 --- a/plugins/modules/source_control/gitlab/gitlab_project_variable.py +++ b/plugins/modules/source_control/gitlab/gitlab_project_variable.py @@ -129,7 +129,7 @@ project_variable: import traceback from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.api import basic_auth_argument_spec from ansible.module_utils.six import string_types from ansible.module_utils.six import integer_types diff --git a/plugins/modules/source_control/gitlab/gitlab_runner.py b/plugins/modules/source_control/gitlab/gitlab_runner.py index d38b4819a6..25490b00dd 100644 --- a/plugins/modules/source_control/gitlab/gitlab_runner.py +++ b/plugins/modules/source_control/gitlab/gitlab_runner.py @@ -169,7 +169,7 @@ except Exception: from ansible.module_utils.api import basic_auth_argument_spec from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.gitlab import gitlabAuthentication diff --git a/plugins/modules/source_control/gitlab/gitlab_user.py b/plugins/modules/source_control/gitlab/gitlab_user.py index 8770a041b4..c586cafd60 100644 --- a/plugins/modules/source_control/gitlab/gitlab_user.py +++ b/plugins/modules/source_control/gitlab/gitlab_user.py @@ -236,7 +236,7 @@ except Exception: from ansible.module_utils.api import basic_auth_argument_spec from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.gitlab import findGroup, gitlabAuthentication diff --git a/plugins/modules/source_control/hg.py b/plugins/modules/source_control/hg.py index 810b918bd6..572b036e1f 100644 --- a/plugins/modules/source_control/hg.py +++ b/plugins/modules/source_control/hg.py @@ -89,7 +89,7 @@ EXAMPLES = ''' import os from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class Hg(object): diff --git a/plugins/modules/storage/emc/emc_vnx_sg_member.py b/plugins/modules/storage/emc/emc_vnx_sg_member.py index b5b68d4ef4..2698f5327a 100644 --- a/plugins/modules/storage/emc/emc_vnx_sg_member.py +++ b/plugins/modules/storage/emc/emc_vnx_sg_member.py @@ -79,7 +79,7 @@ hluid: import traceback from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.storage.emc.emc_vnx import emc_vnx_argument_spec LIB_IMP_ERR = None diff --git a/plugins/modules/system/crypttab.py b/plugins/modules/system/crypttab.py index 9841a786c1..8eeec56d3d 100644 --- a/plugins/modules/system/crypttab.py +++ b/plugins/modules/system/crypttab.py @@ -76,7 +76,7 @@ import os import traceback from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils.common.text.converters import to_bytes, to_native def main(): diff --git a/plugins/modules/system/dpkg_divert.py b/plugins/modules/system/dpkg_divert.py index b7b57fd321..1033f70f14 100644 --- a/plugins/modules/system/dpkg_divert.py +++ b/plugins/modules/system/dpkg_divert.py @@ -161,7 +161,7 @@ import os from distutils.version import LooseVersion from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils.common.text.converters import to_bytes, to_native def diversion_state(module, command, path): diff --git a/plugins/modules/system/filesystem.py b/plugins/modules/system/filesystem.py index 97fe2dc1ab..cbb0e5e95e 100644 --- a/plugins/modules/system/filesystem.py +++ b/plugins/modules/system/filesystem.py @@ -110,7 +110,7 @@ import re import stat from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class Device(object): diff --git a/plugins/modules/system/interfaces_file.py b/plugins/modules/system/interfaces_file.py index 618a472d91..c22c0ce29e 100644 --- a/plugins/modules/system/interfaces_file.py +++ b/plugins/modules/system/interfaces_file.py @@ -145,7 +145,7 @@ import re import tempfile from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes def lineDict(line): diff --git a/plugins/modules/system/iptables_state.py b/plugins/modules/system/iptables_state.py index 66ba2c9b20..1f35edc04b 100644 --- a/plugins/modules/system/iptables_state.py +++ b/plugins/modules/system/iptables_state.py @@ -232,7 +232,7 @@ import filecmp import shutil from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils.common.text.converters import to_bytes, to_native IPTABLES = dict( diff --git a/plugins/modules/system/launchd.py b/plugins/modules/system/launchd.py index 919d8d7bd2..30a5ed02b2 100644 --- a/plugins/modules/system/launchd.py +++ b/plugins/modules/system/launchd.py @@ -114,7 +114,7 @@ from abc import ABCMeta, abstractmethod from time import sleep from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class ServiceState: diff --git a/plugins/modules/system/listen_ports_facts.py b/plugins/modules/system/listen_ports_facts.py index 27ecca8f50..c81977d7f4 100644 --- a/plugins/modules/system/listen_ports_facts.py +++ b/plugins/modules/system/listen_ports_facts.py @@ -137,7 +137,7 @@ ansible_facts: import re import platform -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/system/locale_gen.py b/plugins/modules/system/locale_gen.py index 9a5b84f071..c142da1ceb 100644 --- a/plugins/modules/system/locale_gen.py +++ b/plugins/modules/system/locale_gen.py @@ -40,7 +40,7 @@ import re from subprocess import Popen, PIPE, call from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native LOCALE_NORMALIZATION = { ".utf8": ".UTF-8", diff --git a/plugins/modules/system/nosh.py b/plugins/modules/system/nosh.py index 0f7de471d3..4fe3020393 100644 --- a/plugins/modules/system/nosh.py +++ b/plugins/modules/system/nosh.py @@ -315,7 +315,7 @@ import json from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.service import fail_if_missing -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def run_sys_ctl(module, args): diff --git a/plugins/modules/system/openwrt_init.py b/plugins/modules/system/openwrt_init.py index 817ed9f4b5..afc3c3a956 100644 --- a/plugins/modules/system/openwrt_init.py +++ b/plugins/modules/system/openwrt_init.py @@ -72,7 +72,7 @@ RETURN = ''' import os import glob from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils.common.text.converters import to_bytes, to_native module = None init_script = None diff --git a/plugins/modules/system/pam_limits.py b/plugins/modules/system/pam_limits.py index bde41d44f1..17b1ea1304 100644 --- a/plugins/modules/system/pam_limits.py +++ b/plugins/modules/system/pam_limits.py @@ -138,7 +138,7 @@ import re import tempfile from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def _assert_is_valid_value(module, item, value, prefix=''): diff --git a/plugins/modules/system/runit.py b/plugins/modules/system/runit.py index 30cd611b29..053c77ff07 100644 --- a/plugins/modules/system/runit.py +++ b/plugins/modules/system/runit.py @@ -84,7 +84,7 @@ import os import re from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class Sv(object): diff --git a/plugins/modules/system/sefcontext.py b/plugins/modules/system/sefcontext.py index 457e2e236b..73c79662bc 100644 --- a/plugins/modules/system/sefcontext.py +++ b/plugins/modules/system/sefcontext.py @@ -102,7 +102,7 @@ RETURN = r''' import traceback from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native SELINUX_IMP_ERR = None try: diff --git a/plugins/modules/system/selinux_permissive.py b/plugins/modules/system/selinux_permissive.py index 0d1f9f5985..7289705192 100644 --- a/plugins/modules/system/selinux_permissive.py +++ b/plugins/modules/system/selinux_permissive.py @@ -63,7 +63,7 @@ except ImportError: SEOBJECT_IMP_ERR = traceback.format_exc() from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/system/selogin.py b/plugins/modules/system/selogin.py index 7036dad958..53b077f954 100644 --- a/plugins/modules/system/selogin.py +++ b/plugins/modules/system/selogin.py @@ -113,7 +113,7 @@ except ImportError: from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def semanage_login_add(module, login, seuser, do_reload, serange='s0', sestore=''): diff --git a/plugins/modules/system/seport.py b/plugins/modules/system/seport.py index 71df8d6be9..c2eee25ae1 100644 --- a/plugins/modules/system/seport.py +++ b/plugins/modules/system/seport.py @@ -109,7 +109,7 @@ except ImportError: HAVE_SEOBJECT = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def get_runtime_status(ignore_selinux_state=False): diff --git a/plugins/modules/system/ssh_config.py b/plugins/modules/system/ssh_config.py index be177baaaf..49525849f1 100644 --- a/plugins/modules/system/ssh_config.py +++ b/plugins/modules/system/ssh_config.py @@ -157,7 +157,7 @@ except ImportError: STORM_IMP_ERR = traceback.format_exc() from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class SSHConfig(): diff --git a/plugins/modules/system/svc.py b/plugins/modules/system/svc.py index e921567074..f49f904d93 100644 --- a/plugins/modules/system/svc.py +++ b/plugins/modules/system/svc.py @@ -91,7 +91,7 @@ import re import traceback from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def _load_dist_subclass(cls, *args, **kwargs): diff --git a/plugins/modules/web_infrastructure/deploy_helper.py b/plugins/modules/web_infrastructure/deploy_helper.py index a07281819b..f879594bc3 100644 --- a/plugins/modules/web_infrastructure/deploy_helper.py +++ b/plugins/modules/web_infrastructure/deploy_helper.py @@ -274,7 +274,7 @@ import time import traceback from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class DeployHelper(object): diff --git a/plugins/modules/web_infrastructure/htpasswd.py b/plugins/modules/web_infrastructure/htpasswd.py index 6ff041316f..b15a946448 100644 --- a/plugins/modules/web_infrastructure/htpasswd.py +++ b/plugins/modules/web_infrastructure/htpasswd.py @@ -97,7 +97,7 @@ import tempfile import traceback from distutils.version import LooseVersion from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native PASSLIB_IMP_ERR = None try: diff --git a/plugins/modules/web_infrastructure/jenkins_build.py b/plugins/modules/web_infrastructure/jenkins_build.py index 68f64f7a7b..43dc667ace 100644 --- a/plugins/modules/web_infrastructure/jenkins_build.py +++ b/plugins/modules/web_infrastructure/jenkins_build.py @@ -127,7 +127,7 @@ except ImportError: python_jenkins_installed = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class JenkinsBuild: diff --git a/plugins/modules/web_infrastructure/jenkins_job.py b/plugins/modules/web_infrastructure/jenkins_job.py index 6fb775d22a..9993a996e0 100644 --- a/plugins/modules/web_infrastructure/jenkins_job.py +++ b/plugins/modules/web_infrastructure/jenkins_job.py @@ -167,7 +167,7 @@ except ImportError: python_jenkins_installed = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class JenkinsJob(object): diff --git a/plugins/modules/web_infrastructure/jenkins_job_info.py b/plugins/modules/web_infrastructure/jenkins_job_info.py index c927e5b954..9dcf5776c9 100644 --- a/plugins/modules/web_infrastructure/jenkins_job_info.py +++ b/plugins/modules/web_infrastructure/jenkins_job_info.py @@ -146,7 +146,7 @@ except ImportError: HAS_JENKINS = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def get_jenkins_connection(module): diff --git a/plugins/modules/web_infrastructure/jenkins_plugin.py b/plugins/modules/web_infrastructure/jenkins_plugin.py index be335fcfd3..20fd8554bc 100644 --- a/plugins/modules/web_infrastructure/jenkins_plugin.py +++ b/plugins/modules/web_infrastructure/jenkins_plugin.py @@ -273,7 +273,7 @@ from ansible.module_utils.six.moves import http_cookiejar as cookiejar from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils.urls import fetch_url, url_argument_spec from ansible.module_utils.six import text_type, binary_type -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native import base64 import hashlib import io diff --git a/plugins/modules/web_infrastructure/jenkins_script.py b/plugins/modules/web_infrastructure/jenkins_script.py index 68f06c2758..6d3b3d2253 100644 --- a/plugins/modules/web_infrastructure/jenkins_script.py +++ b/plugins/modules/web_infrastructure/jenkins_script.py @@ -107,7 +107,7 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves import http_cookiejar as cookiejar from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils.urls import fetch_url -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def is_csrf_protection_enabled(module): diff --git a/plugins/modules/web_infrastructure/jira.py b/plugins/modules/web_infrastructure/jira.py index 4c10974126..9cd86341a1 100644 --- a/plugins/modules/web_infrastructure/jira.py +++ b/plugins/modules/web_infrastructure/jira.py @@ -390,7 +390,7 @@ import traceback from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper, cause_changes from ansible.module_utils.six.moves.urllib.request import pathname2url -from ansible.module_utils._text import to_text, to_bytes, to_native +from ansible.module_utils.common.text.converters import to_text, to_bytes, to_native from ansible.module_utils.urls import fetch_url diff --git a/plugins/modules/web_infrastructure/nginx_status_info.py b/plugins/modules/web_infrastructure/nginx_status_info.py index a13a57a637..ada6881714 100644 --- a/plugins/modules/web_infrastructure/nginx_status_info.py +++ b/plugins/modules/web_infrastructure/nginx_status_info.py @@ -94,7 +94,7 @@ data: import re from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text class NginxStatusInfo(object): diff --git a/plugins/modules/web_infrastructure/rundeck_acl_policy.py b/plugins/modules/web_infrastructure/rundeck_acl_policy.py index 8c2043d22c..6356f5a166 100644 --- a/plugins/modules/web_infrastructure/rundeck_acl_policy.py +++ b/plugins/modules/web_infrastructure/rundeck_acl_policy.py @@ -123,7 +123,7 @@ after: # import module snippets from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url, url_argument_spec -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text import json import re diff --git a/plugins/modules/web_infrastructure/rundeck_project.py b/plugins/modules/web_infrastructure/rundeck_project.py index 5c84648207..ef78299596 100644 --- a/plugins/modules/web_infrastructure/rundeck_project.py +++ b/plugins/modules/web_infrastructure/rundeck_project.py @@ -103,7 +103,7 @@ after: # import module snippets from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.urls import fetch_url, url_argument_spec import json diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group.py b/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group.py index b4aca155dc..70a0a78fd8 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group.py @@ -188,7 +188,7 @@ result: """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py index 6d230c1a71..88356a2e54 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py @@ -101,7 +101,7 @@ result: """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py b/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py index e940f4168e..81dffe223b 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py @@ -132,7 +132,7 @@ result: """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py index ad315df9a0..02542532f7 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py @@ -79,7 +79,7 @@ result: """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_dns_host.py b/plugins/modules/web_infrastructure/sophos_utm/utm_dns_host.py index 1f080abfa0..76d463ccba 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_dns_host.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_dns_host.py @@ -128,7 +128,7 @@ result: """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address.py b/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address.py index ecf08871fc..a8b3cc1f2b 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address.py @@ -108,7 +108,7 @@ result: """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py index c1d0f7d880..3f623d5a86 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py @@ -75,7 +75,7 @@ result: """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py index caa0085c25..0dd460509a 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py @@ -307,7 +307,7 @@ result: """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_exception.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_exception.py index ed241af1fe..6d606abf89 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_exception.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_exception.py @@ -204,7 +204,7 @@ result: """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend.py index 8dba3640db..a738bfab6b 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend.py @@ -234,7 +234,7 @@ result: """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py index 450bd16168..263b976045 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py @@ -120,7 +120,7 @@ result: """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location.py index 7c4bc8b6cf..99d56030be 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location.py @@ -178,7 +178,7 @@ result: """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py index 1125c4fada..afc0f5efcd 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py @@ -101,7 +101,7 @@ result: """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/web_infrastructure/taiga_issue.py b/plugins/modules/web_infrastructure/taiga_issue.py index ae8f31c0ef..f05550276e 100644 --- a/plugins/modules/web_infrastructure/taiga_issue.py +++ b/plugins/modules/web_infrastructure/taiga_issue.py @@ -117,7 +117,7 @@ import traceback from os import getenv from os.path import isfile from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native TAIGA_IMP_ERR = None try: diff --git a/tests/unit/mock/loader.py b/tests/unit/mock/loader.py index 907ec9b928..756d532e68 100644 --- a/tests/unit/mock/loader.py +++ b/tests/unit/mock/loader.py @@ -9,7 +9,7 @@ import os from ansible.errors import AnsibleParserError from ansible.parsing.dataloader import DataLoader -from ansible.module_utils._text import to_bytes, to_text +from ansible.module_utils.common.text.converters import to_bytes, to_text class DictDataLoader(DataLoader): diff --git a/tests/unit/mock/procenv.py b/tests/unit/mock/procenv.py index 616a75bbd3..5673863e16 100644 --- a/tests/unit/mock/procenv.py +++ b/tests/unit/mock/procenv.py @@ -13,7 +13,7 @@ from contextlib import contextmanager from io import BytesIO, StringIO from ansible_collections.community.general.tests.unit.compat import unittest from ansible.module_utils.six import PY3 -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes @contextmanager diff --git a/tests/unit/mock/vault_helper.py b/tests/unit/mock/vault_helper.py index b54629da49..6bd2db9c32 100644 --- a/tests/unit/mock/vault_helper.py +++ b/tests/unit/mock/vault_helper.py @@ -3,7 +3,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes from ansible.parsing.vault import VaultSecret diff --git a/tests/unit/plugins/module_utils/conftest.py b/tests/unit/plugins/module_utils/conftest.py index 8bc13c4d55..61ed0acd27 100644 --- a/tests/unit/plugins/module_utils/conftest.py +++ b/tests/unit/plugins/module_utils/conftest.py @@ -12,7 +12,7 @@ import pytest import ansible.module_utils.basic from ansible.module_utils.six import PY3, string_types -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes from ansible.module_utils.common._collections_compat import MutableMapping diff --git a/tests/unit/plugins/modules/conftest.py b/tests/unit/plugins/modules/conftest.py index a7d1e0475f..9d8c52e6c5 100644 --- a/tests/unit/plugins/modules/conftest.py +++ b/tests/unit/plugins/modules/conftest.py @@ -9,7 +9,7 @@ import json import pytest from ansible.module_utils.six import string_types -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes from ansible.module_utils.common._collections_compat import MutableMapping diff --git a/tests/unit/plugins/modules/monitoring/test_circonus_annotation.py b/tests/unit/plugins/modules/monitoring/test_circonus_annotation.py index b64a20f8ae..b380e857b1 100644 --- a/tests/unit/plugins/modules/monitoring/test_circonus_annotation.py +++ b/tests/unit/plugins/modules/monitoring/test_circonus_annotation.py @@ -11,7 +11,7 @@ import uuid from urllib3.response import HTTPResponse from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes from ansible_collections.community.general.plugins.modules.monitoring import circonus_annotation from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args diff --git a/tests/unit/plugins/modules/net_tools/test_nmcli.py b/tests/unit/plugins/modules/net_tools/test_nmcli.py index 8724bd4f60..911ffd1217 100644 --- a/tests/unit/plugins/modules/net_tools/test_nmcli.py +++ b/tests/unit/plugins/modules/net_tools/test_nmcli.py @@ -8,7 +8,7 @@ import json import pytest -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text from ansible_collections.community.general.plugins.modules.net_tools import nmcli pytestmark = pytest.mark.usefixtures('patch_ansible_module') diff --git a/tests/unit/plugins/modules/packaging/os/test_rhn_register.py b/tests/unit/plugins/modules/packaging/os/test_rhn_register.py index ae2f44aeda..9dde4bae7d 100644 --- a/tests/unit/plugins/modules/packaging/os/test_rhn_register.py +++ b/tests/unit/plugins/modules/packaging/os/test_rhn_register.py @@ -8,7 +8,7 @@ import os from ansible_collections.community.general.tests.unit.compat.mock import mock_open from ansible.module_utils import basic -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native import ansible.module_utils.six from ansible.module_utils.six.moves import xmlrpc_client from ansible_collections.community.general.plugins.modules.packaging.os import rhn_register diff --git a/tests/unit/plugins/modules/remote_management/lenovoxcc/test_xcc_redfish_command.py b/tests/unit/plugins/modules/remote_management/lenovoxcc/test_xcc_redfish_command.py index 38a6652fb1..418474c578 100644 --- a/tests/unit/plugins/modules/remote_management/lenovoxcc/test_xcc_redfish_command.py +++ b/tests/unit/plugins/modules/remote_management/lenovoxcc/test_xcc_redfish_command.py @@ -8,7 +8,7 @@ from ansible_collections.community.general.tests.unit.compat import mock from ansible_collections.community.general.tests.unit.compat.mock import patch from ansible_collections.community.general.tests.unit.compat import unittest from ansible.module_utils import basic -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes import ansible_collections.community.general.plugins.modules.remote_management.lenovoxcc.xcc_redfish_command as module from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args, exit_json, fail_json diff --git a/tests/unit/plugins/modules/system/test_ufw.py b/tests/unit/plugins/modules/system/test_ufw.py index 3374c49322..44882e0e93 100644 --- a/tests/unit/plugins/modules/system/test_ufw.py +++ b/tests/unit/plugins/modules/system/test_ufw.py @@ -6,7 +6,7 @@ __metaclass__ = type from ansible_collections.community.general.tests.unit.compat import unittest from ansible_collections.community.general.tests.unit.compat.mock import patch from ansible.module_utils import basic -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes import ansible_collections.community.general.plugins.modules.system.ufw as module import json diff --git a/tests/unit/plugins/modules/utils.py b/tests/unit/plugins/modules/utils.py index 1a28072be1..6a00fd25fc 100644 --- a/tests/unit/plugins/modules/utils.py +++ b/tests/unit/plugins/modules/utils.py @@ -8,7 +8,7 @@ import json from ansible_collections.community.general.tests.unit.compat import unittest from ansible_collections.community.general.tests.unit.compat.mock import patch from ansible.module_utils import basic -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes def set_module_args(args): diff --git a/tests/unit/plugins/modules/web_infrastructure/test_jenkins_build.py b/tests/unit/plugins/modules/web_infrastructure/test_jenkins_build.py index 3774871329..687ef0f766 100644 --- a/tests/unit/plugins/modules/web_infrastructure/test_jenkins_build.py +++ b/tests/unit/plugins/modules/web_infrastructure/test_jenkins_build.py @@ -6,7 +6,7 @@ __metaclass__ = type from ansible_collections.community.general.tests.unit.compat import unittest from ansible_collections.community.general.tests.unit.compat.mock import patch from ansible.module_utils import basic -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes from ansible_collections.community.general.plugins.modules.web_infrastructure import jenkins_build import json From acf7b106c93dd7a2de3ce609b5fda512c352e528 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 27 Jun 2021 19:39:08 +1200 Subject: [PATCH 0162/2828] _mount module utils - fixed sanity checks (#2883) * updated _mount.py, removed ignore lines * added changelog fragment --- changelogs/fragments/2883-_mount-fixed-sanity-checks.yml | 2 ++ plugins/module_utils/_mount.py | 4 ++++ tests/sanity/ignore-2.10.txt | 2 -- tests/sanity/ignore-2.11.txt | 2 -- tests/sanity/ignore-2.12.txt | 2 -- tests/sanity/ignore-2.9.txt | 2 -- 6 files changed, 6 insertions(+), 8 deletions(-) create mode 100644 changelogs/fragments/2883-_mount-fixed-sanity-checks.yml diff --git a/changelogs/fragments/2883-_mount-fixed-sanity-checks.yml b/changelogs/fragments/2883-_mount-fixed-sanity-checks.yml new file mode 100644 index 0000000000..35496e1233 --- /dev/null +++ b/changelogs/fragments/2883-_mount-fixed-sanity-checks.yml @@ -0,0 +1,2 @@ +bugfixes: + - _mount module utils - fixed the sanity checks (https://github.com/ansible-collections/community.general/pull/2883). diff --git a/plugins/module_utils/_mount.py b/plugins/module_utils/_mount.py index 62feb354cc..58be0e8b68 100644 --- a/plugins/module_utils/_mount.py +++ b/plugins/module_utils/_mount.py @@ -48,6 +48,10 @@ # agrees to be bound by the terms and conditions of this License # Agreement. +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + import os diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index c9d750f417..bdb3ca4e9a 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -1,6 +1,4 @@ plugins/module_utils/cloud.py pylint:bad-option-value # a pylint test that is disabled was modified over time -plugins/module_utils/_mount.py future-import-boilerplate -plugins/module_utils/_mount.py metaclass-boilerplate plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-choice diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 1311638dbc..34889a2651 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -1,5 +1,3 @@ -plugins/module_utils/_mount.py future-import-boilerplate -plugins/module_utils/_mount.py metaclass-boilerplate plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-choice diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt index f5b7d772fc..6e14759c9d 100644 --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -1,5 +1,3 @@ -plugins/module_utils/_mount.py future-import-boilerplate -plugins/module_utils/_mount.py metaclass-boilerplate plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-choice diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index c8c5ff0d25..33f3d183d4 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -1,6 +1,4 @@ plugins/module_utils/cloud.py pylint:bad-option-value # a pylint test that is disabled was modified over time -plugins/module_utils/_mount.py future-import-boilerplate -plugins/module_utils/_mount.py metaclass-boilerplate plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/cloud/rackspace/rax.py use-argspec-type-path From 1b94d092099be2b882a83b750c87d7c3438b28ab Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 27 Jun 2021 09:57:51 +0200 Subject: [PATCH 0163/2828] Add option type validation. (#2878) --- .../fragments/2878-validate-certs-bool.yml | 2 ++ plugins/callback/nrdp.py | 33 +++++++++++-------- 2 files changed, 21 insertions(+), 14 deletions(-) create mode 100644 changelogs/fragments/2878-validate-certs-bool.yml diff --git a/changelogs/fragments/2878-validate-certs-bool.yml b/changelogs/fragments/2878-validate-certs-bool.yml new file mode 100644 index 0000000000..e636f4981b --- /dev/null +++ b/changelogs/fragments/2878-validate-certs-bool.yml @@ -0,0 +1,2 @@ +minor_changes: +- "nrdp callback plugin - parameters are now converted to strings, except ``validate_certs`` which is converted to boolean (https://github.com/ansible-collections/community.general/pull/2878)." diff --git a/plugins/callback/nrdp.py b/plugins/callback/nrdp.py index f17785a92f..744c2d2ed4 100644 --- a/plugins/callback/nrdp.py +++ b/plugins/callback/nrdp.py @@ -10,22 +10,23 @@ DOCUMENTATION = ''' name: nrdp type: notification author: "Remi VERCHERE (@rverchere)" - short_description: post task result to a nagios server through nrdp + short_description: Post task results to a Nagios server through nrdp description: - - this callback send playbook result to nagios - - nagios shall use NRDP to recive passive events - - the passive check is sent to a dedicated host/service for ansible + - This callback send playbook result to Nagios. + - Nagios shall use NRDP to recive passive events. + - The passive check is sent to a dedicated host/service for Ansible. options: url: - description: url of the nrdp server - required: True + description: URL of the nrdp server. + required: true env: - name : NRDP_URL ini: - section: callback_nrdp key: url + type: string validate_certs: - description: (bool) validate the SSL certificate of the nrdp server. (For HTTPS url) + description: Validate the SSL certificate of the nrdp server. (Used for HTTPS URLs.) env: - name: NRDP_VALIDATE_CERTS ini: @@ -33,32 +34,36 @@ DOCUMENTATION = ''' key: validate_nrdp_certs - section: callback_nrdp key: validate_certs - default: False + type: boolean + default: false aliases: [ validate_nrdp_certs ] token: - description: token to be allowed to push nrdp events - required: True + description: Token to be allowed to push nrdp events. + required: true env: - name: NRDP_TOKEN ini: - section: callback_nrdp key: token + type: string hostname: - description: hostname where the passive check is linked to - required: True + description: Hostname where the passive check is linked to. + required: true env: - name : NRDP_HOSTNAME ini: - section: callback_nrdp key: hostname + type: string servicename: - description: service where the passive check is linked to - required: True + description: Service where the passive check is linked to. + required: true env: - name : NRDP_SERVICENAME ini: - section: callback_nrdp key: servicename + type: string ''' import os From 147425ef936faf2c7c20a0565cdf61781fac08df Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 27 Jun 2021 10:00:01 +0200 Subject: [PATCH 0164/2828] ini_file: fix regression reported in #2578 (#2875) * Add regression test. * Add more Unicode tests. * Add fix. * Add changelog. * Work completely with Unicode. * Update plugins/modules/files/ini_file.py Co-authored-by: quidame Co-authored-by: quidame --- .../fragments/2875-ini_file-unicode.yml | 2 + plugins/modules/files/ini_file.py | 51 +++++++++++-------- .../targets/ini_file/tasks/main.yml | 40 +++++++++++++++ 3 files changed, 73 insertions(+), 20 deletions(-) create mode 100644 changelogs/fragments/2875-ini_file-unicode.yml diff --git a/changelogs/fragments/2875-ini_file-unicode.yml b/changelogs/fragments/2875-ini_file-unicode.yml new file mode 100644 index 0000000000..eaf1ff9ffb --- /dev/null +++ b/changelogs/fragments/2875-ini_file-unicode.yml @@ -0,0 +1,2 @@ +bugfixes: +- "ini_file - fix Unicode processing for Python 2 (https://github.com/ansible-collections/community.general/pull/2875)." \ No newline at end of file diff --git a/plugins/modules/files/ini_file.py b/plugins/modules/files/ini_file.py index d318d04d57..7d6a988e85 100644 --- a/plugins/modules/files/ini_file.py +++ b/plugins/modules/files/ini_file.py @@ -112,6 +112,7 @@ import tempfile import traceback from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_bytes, to_text def match_opt(option, line): @@ -128,6 +129,13 @@ def do_ini(module, filename, section=None, option=None, value=None, state='present', backup=False, no_extra_spaces=False, create=True, allow_no_value=False): + if section is not None: + section = to_text(section) + if option is not None: + option = to_text(option) + if value is not None: + value = to_text(value) + diff = dict( before='', after='', @@ -144,33 +152,33 @@ def do_ini(module, filename, section=None, option=None, value=None, ini_lines = [] else: with io.open(filename, 'r', encoding="utf-8-sig") as ini_file: - ini_lines = ini_file.readlines() + ini_lines = [to_text(line) for line in ini_file.readlines()] if module._diff: - diff['before'] = ''.join(ini_lines) + diff['before'] = u''.join(ini_lines) changed = False # ini file could be empty if not ini_lines: - ini_lines.append('\n') + ini_lines.append(u'\n') # last line of file may not contain a trailing newline - if ini_lines[-1] == "" or ini_lines[-1][-1] != '\n': - ini_lines[-1] += '\n' + if ini_lines[-1] == u"" or ini_lines[-1][-1] != u'\n': + ini_lines[-1] += u'\n' changed = True # append fake section lines to simplify the logic # At top: # Fake random section to do not match any other in the file # Using commit hash as fake section name - fake_section_name = "ad01e11446efb704fcdbdb21f2c43757423d91c5" + fake_section_name = u"ad01e11446efb704fcdbdb21f2c43757423d91c5" # Insert it at the beginning - ini_lines.insert(0, '[%s]' % fake_section_name) + ini_lines.insert(0, u'[%s]' % fake_section_name) # At bottom: - ini_lines.append('[') + ini_lines.append(u'[') # If no section is defined, fake section is used if not section: @@ -180,21 +188,23 @@ def do_ini(module, filename, section=None, option=None, value=None, section_start = 0 msg = 'OK' if no_extra_spaces: - assignment_format = '%s=%s\n' + assignment_format = u'%s=%s\n' else: - assignment_format = '%s = %s\n' + assignment_format = u'%s = %s\n' + + non_blank_non_comment_pattern = re.compile(to_text(r'^[ \t]*([#;].*)?$')) for index, line in enumerate(ini_lines): - if line.startswith('[%s]' % section): + if line.startswith(u'[%s]' % section): within_section = True section_start = index - elif line.startswith('['): + elif line.startswith(u'['): if within_section: if state == 'present': # insert missing option line at the end of the section for i in range(index, 0, -1): # search backwards for previous non-blank or non-comment line - if not re.match(r'^[ \t]*([#;].*)?$', ini_lines[i - 1]): + if not non_blank_non_comment_pattern.match(ini_lines[i - 1]): if option and value: ini_lines.insert(i, assignment_format % (option, value)) msg = 'option added' @@ -216,7 +226,7 @@ def do_ini(module, filename, section=None, option=None, value=None, # change the existing option line if match_opt(option, line): if not value and allow_no_value: - newline = '%s\n' % option + newline = u'%s\n' % option else: newline = assignment_format % (option, value) option_changed = ini_lines[index] != newline @@ -229,7 +239,7 @@ def do_ini(module, filename, section=None, option=None, value=None, index = index + 1 while index < len(ini_lines): line = ini_lines[index] - if line.startswith('['): + if line.startswith(u'['): break if match_active_opt(option, line): del ini_lines[index] @@ -249,28 +259,29 @@ def do_ini(module, filename, section=None, option=None, value=None, del ini_lines[-1:] if not within_section and state == 'present': - ini_lines.append('[%s]\n' % section) + ini_lines.append(u'[%s]\n' % section) msg = 'section and option added' if option and value is not None: ini_lines.append(assignment_format % (option, value)) elif option and value is None and allow_no_value: - ini_lines.append('%s\n' % option) + ini_lines.append(u'%s\n' % option) else: msg = 'only section added' changed = True if module._diff: - diff['after'] = ''.join(ini_lines) + diff['after'] = u''.join(ini_lines) backup_file = None if changed and not module.check_mode: if backup: backup_file = module.backup_local(filename) + encoded_ini_lines = [to_bytes(line) for line in ini_lines] try: tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir) - f = os.fdopen(tmpfd, 'w') - f.writelines(ini_lines) + f = os.fdopen(tmpfd, 'wb') + f.writelines(encoded_ini_lines) f.close() except IOError: module.fail_json(msg="Unable to create temporary file %s", traceback=traceback.format_exc()) diff --git a/tests/integration/targets/ini_file/tasks/main.yml b/tests/integration/targets/ini_file/tasks/main.yml index be5835669b..210dafe2ca 100644 --- a/tests/integration/targets/ini_file/tasks/main.yml +++ b/tests/integration/targets/ini_file/tasks/main.yml @@ -514,3 +514,43 @@ assert: that: - content16 == expected16 + +# Regression test for https://github.com/ansible-collections/community.general/pull/2578#issuecomment-868092282 +- name: Create UTF-8 test file + copy: + content: !!binary | + W2FwcDptYWluXQphdmFpbGFibGVfbGFuZ3VhZ2VzID0gZW4gZnIgZXMgZGUgcHQgamEgbHQgemhf + VFcgaWQgZGEgcHRfQlIgcnUgc2wgaXQgbmxfTkwgdWsgdGEgc2kgY3MgbmIgaHUKIyBGdWxsIGxh + bmd1YWdlIG5hbWVzIGluIG5hdGl2ZSBsYW5ndWFnZSAoY29tbWEgc2VwYXJhdGVkKQphdmFpbGFi + bGVfbGFuZ3VhZ2VzX2Z1bGwgPSBFbmdsaXNoLCBGcmFuw6dhaXMsIEVzcGHDsW9sLCBEZXV0c2No + LCBQb3J0dWd1w6pzLCDml6XmnKzoqp4sIExpZXR1dm9zLCDkuK3mlocsIEluZG9uZXNpYSwgRGFu + c2ssIFBvcnR1Z3XDqnMgKEJyYXNpbCksINCg0YPRgdGB0LrQuNC5LCBTbG92ZW7FocSNaW5hLCBJ + dGFsaWFubywgTmVkZXJsYW5kcywg0KPQutGA0LDRl9C90YHRjNC60LAsIOCupOCuruCuv+CutOCv + jSwg4LeD4LeS4LaC4LeE4La9LCDEjGVza3ksIEJva23DpWwsIE1hZ3lhcgo= + dest: '{{ output_file }}' +- name: Add entries + ini_file: + section: "{{ item.section }}" + option: "{{ item.option }}" + value: "{{ item.value }}" + path: '{{ output_file }}' + create: true + loop: + - section: app:main + option: sqlalchemy.url + value: postgresql://app:secret@database/app + - section: handler_filelog + option: args + value: (sys.stderr,) + - section: handler_filelog + option: class + value: StreamHandler + - section: handler_exc_handler + option: args + value: (sys.stderr,) + - section: båz + option: fföø + value: ḃâŗ + - section: båz + option: fföø + value: bar From 9dd91f949a9cf8ac5becc12d8b079bbc6ccb9dc7 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 27 Jun 2021 22:38:04 +1200 Subject: [PATCH 0165/2828] terraform - added check_destroy (#2874) * added check_destroy This is based on the work done in PR 398. Authors from that PR: Co-authored-by: effaamponsah Co-authored-by: m-yosefpor Co-authored-by: Felix Fontein Co-authored-by: Andrew Klychkov * added changelog fragment * Update plugins/modules/cloud/misc/terraform.py Co-authored-by: Ajpantuso * Update changelogs/fragments/2874-terraform-check-destroy.yml Co-authored-by: Amin Vakil Co-authored-by: effaamponsah Co-authored-by: Felix Fontein Co-authored-by: Andrew Klychkov Co-authored-by: Ajpantuso Co-authored-by: Amin Vakil --- .../2874-terraform-check-destroy.yml | 2 ++ plugins/modules/cloud/misc/terraform.py | 26 ++++++++++++++----- 2 files changed, 21 insertions(+), 7 deletions(-) create mode 100644 changelogs/fragments/2874-terraform-check-destroy.yml diff --git a/changelogs/fragments/2874-terraform-check-destroy.yml b/changelogs/fragments/2874-terraform-check-destroy.yml new file mode 100644 index 0000000000..e41d1aebc0 --- /dev/null +++ b/changelogs/fragments/2874-terraform-check-destroy.yml @@ -0,0 +1,2 @@ +minor_changes: + - terraform - add ``check_destroy`` optional parameter to check for deletion of resources before it is applied (https://github.com/ansible-collections/community.general/pull/2874). diff --git a/plugins/modules/cloud/misc/terraform.py b/plugins/modules/cloud/misc/terraform.py index 86521ed264..c5619b8eb0 100644 --- a/plugins/modules/cloud/misc/terraform.py +++ b/plugins/modules/cloud/misc/terraform.py @@ -130,6 +130,13 @@ options: default: false type: bool version_added: '1.3.0' + check_destroy: + description: + - Apply only when no resources are destroyed. Note that this only prevents "destroy" actions, + but not "destroy and re-create" actions. This option is ignored when I(state=absent). + type: bool + default: false + version_added: '3.3.0' notes: - To just run a `terraform plan`, use check mode. requirements: [ "terraform" ] @@ -313,7 +320,7 @@ def build_plan(command, project_path, variables_args, state_file, targets, state plan_command = [command[0], 'plan', '-input=false', '-no-color', '-detailed-exitcode', '-out', plan_path] - for t in (module.params.get('targets') or []): + for t in targets: plan_command.extend(['-target', t]) plan_command.extend(_state_args(state_file)) @@ -340,21 +347,22 @@ def main(): project_path=dict(required=True, type='path'), binary_path=dict(type='path'), plugin_paths=dict(type='list', elements='path'), - workspace=dict(required=False, type='str', default='default'), + workspace=dict(type='str', default='default'), purge_workspace=dict(type='bool', default=False), state=dict(default='present', choices=['present', 'absent', 'planned']), variables=dict(type='dict'), - variables_files=dict(aliases=['variables_file'], type='list', elements='path', default=None), + variables_files=dict(aliases=['variables_file'], type='list', elements='path'), plan_file=dict(type='path'), state_file=dict(type='path'), targets=dict(type='list', elements='str', default=[]), lock=dict(type='bool', default=True), lock_timeout=dict(type='int',), force_init=dict(type='bool', default=False), - backend_config=dict(type='dict', default=None), - backend_config_files=dict(type='list', elements='path', default=None), - init_reconfigure=dict(required=False, type='bool', default=False), + backend_config=dict(type='dict'), + backend_config_files=dict(type='list', elements='path'), + init_reconfigure=dict(type='bool', default=False), overwrite_init=dict(type='bool', default=True), + check_destroy=dict(type='bool', default=False), ), required_if=[('state', 'planned', ['plan_file'])], supports_check_mode=True, @@ -375,6 +383,7 @@ def main(): backend_config_files = module.params.get('backend_config_files') init_reconfigure = module.params.get('init_reconfigure') overwrite_init = module.params.get('overwrite_init') + check_destroy = module.params.get('check_destroy') if bin_path is not None: command = [bin_path] @@ -444,9 +453,12 @@ def main(): else: plan_file, needs_application, out, err, command = build_plan(command, project_path, variables_args, state_file, module.params.get('targets'), state, plan_file) + if state == 'present' and check_destroy and '- destroy' in out: + module.fail_json(msg="Aborting command because it would destroy some resources. " + "Consider switching the 'check_destroy' to false to suppress this error") command.append(plan_file) - if needs_application and not module.check_mode and not state == 'planned': + if needs_application and not module.check_mode and state != 'planned': rc, out, err = module.run_command(command, check_rc=False, cwd=project_path) if rc != 0: if workspace_ctx["current"] != workspace: From 2fb08775775b7ec3b8a4d4cad2781ee3b6a06263 Mon Sep 17 00:00:00 2001 From: Stef Graces Date: Sun, 27 Jun 2021 14:01:06 +0200 Subject: [PATCH 0166/2828] Fix/gitlab project user workspace (#2881) * Add ability to create project under a user * Add changelog * Change minor_changes in changelog As suggested in this comment https://github.com/ansible-collections/community.general/pull/2824#discussion_r653411741 * Fix user's namespace * Delete changelog * Add changelog * Fix changelog Co-authored-by: Felix Fontein Co-authored-by: Amin Vakil * Change user_group_id to namespace_group_id Co-authored-by: Felix Fontein * Change to namespace_id Co-authored-by: Felix Fontein Co-authored-by: Amin Vakil --- .../2881-gitlab_project-fix_workspace_user.yaml | 3 +++ .../source_control/gitlab/gitlab_project.py | 14 +++++++------- 2 files changed, 10 insertions(+), 7 deletions(-) create mode 100644 changelogs/fragments/2881-gitlab_project-fix_workspace_user.yaml diff --git a/changelogs/fragments/2881-gitlab_project-fix_workspace_user.yaml b/changelogs/fragments/2881-gitlab_project-fix_workspace_user.yaml new file mode 100644 index 0000000000..0de8368b7f --- /dev/null +++ b/changelogs/fragments/2881-gitlab_project-fix_workspace_user.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - gitlab_project - user projects are created using namespace ID now, instead of user ID (https://github.com/ansible-collections/community.general/pull/2881). diff --git a/plugins/modules/source_control/gitlab/gitlab_project.py b/plugins/modules/source_control/gitlab/gitlab_project.py index 73def710c3..61d1ac0cb1 100644 --- a/plugins/modules/source_control/gitlab/gitlab_project.py +++ b/plugins/modules/source_control/gitlab/gitlab_project.py @@ -345,22 +345,22 @@ def main(): gitlab_project = GitLabProject(module, gitlab_instance) namespace = None - user_group_id = None + namespace_id = None if group_identifier: group = findGroup(gitlab_instance, group_identifier) if group is None: module.fail_json(msg="Failed to create project: group %s doesn't exists" % group_identifier) - user_group_id = group.id + namespace_id = group.id else: - user = gitlab_instance.users.list(username=gitlab_instance.user.username)[0] - user_group_id = user.id + namespace = gitlab_instance.namespaces.list(search=gitlab_instance.user.username)[0] + namespace_id = namespace.id - if not user_group_id: - module.fail_json(msg="Failed to find the user/group id which required to find namespace") + if not namespace_id: + module.fail_json(msg="Failed to find the namespace or group ID which is required to look up the namespace") try: - namespace = gitlab_instance.namespaces.get(user_group_id) + namespace = gitlab_instance.namespaces.get(namespace_id) except gitlab.exceptions.GitlabGetError as e: module.fail_json(msg="Failed to find the namespace for the given user: %s" % to_native(e)) From 199ead85d0bfed2242cda1bb086f097603dce9c2 Mon Sep 17 00:00:00 2001 From: quidame Date: Sun, 27 Jun 2021 14:56:43 +0200 Subject: [PATCH 0167/2828] java_keystore: fix keystore type (#2516) * fix keystore type; update unit tests * add changelog fragment * document new param 'keystore_type' * add keystore_type support (backward compatible) * check JKS format with magic bytes * update integration tests * revert first changes in unit tests * update changelog fragment * fix magic bytes for python2/python3 * fix integration tests (irrelevant check_mode) * fix unit test (keystore removed before failure => changed=true) * fix typo * fix spelling * shorten a branch * mock is_jks_or_pkcs12 * fix function path in unit tests * Apply suggestions from code review (spelling) Co-authored-by: Ajpantuso * rename a method (module + unit tests) * move ArgumentSpec class content to main() * refactor create() to not loose existing keystore in case of error * update unit tests * add integration test (error handling) * fix keystore backup cleanup Co-authored-by: Ajpantuso --- .../2516_fix_2515_keystore_type_jks.yml | 4 + plugins/modules/system/java_keystore.py | 144 +++++++++++++----- .../targets/java_keystore/tasks/tests.yml | 115 ++++++++++++++ .../modules/system/test_java_keystore.py | 114 ++++++++++---- 4 files changed, 308 insertions(+), 69 deletions(-) create mode 100644 changelogs/fragments/2516_fix_2515_keystore_type_jks.yml diff --git a/changelogs/fragments/2516_fix_2515_keystore_type_jks.yml b/changelogs/fragments/2516_fix_2515_keystore_type_jks.yml new file mode 100644 index 0000000000..767081dac9 --- /dev/null +++ b/changelogs/fragments/2516_fix_2515_keystore_type_jks.yml @@ -0,0 +1,4 @@ +--- +bugfixes: + - "java_keystore - add parameter ``keystore_type`` to control output file format and override ``keytool``'s + default, which depends on Java version (https://github.com/ansible-collections/community.general/issues/2515)." diff --git a/plugins/modules/system/java_keystore.py b/plugins/modules/system/java_keystore.py index 8293801f1b..5cc2e9258a 100644 --- a/plugins/modules/system/java_keystore.py +++ b/plugins/modules/system/java_keystore.py @@ -1,8 +1,8 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# Copyright: (c) 2016, Guillaume Grossetie # Copyright: (c) 2021, quidame +# Copyright: (c) 2016, Guillaume Grossetie # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) @@ -97,6 +97,24 @@ options: - openssl - cryptography version_added: 3.1.0 + keystore_type: + description: + - Type of the Java keystore. + - When this option is omitted and the keystore doesn't already exist, the + behavior follows C(keytool)'s default store type which depends on + Java version; C(pkcs12) since Java 9 and C(jks) prior (may also + be C(pkcs12) if new default has been backported to this version). + - When this option is omitted and the keystore already exists, the current + type is left untouched, unless another option leads to overwrite the + keystore (in that case, this option behaves like for keystore creation). + - When I(keystore_type) is set, the keystore is created with this type if + it doesn't already exist, or is overwritten to match the given type in + case of mismatch. + type: str + choices: + - jks + - pkcs12 + version_added: 3.3.0 requirements: - openssl in PATH (when I(ssl_backend=openssl)) - keytool in PATH @@ -107,12 +125,16 @@ author: extends_documentation_fragment: - files seealso: + - module: community.crypto.openssl_pkcs12 - module: community.general.java_cert notes: - I(certificate) and I(private_key) require that their contents are available on the controller (either inline in a playbook, or with the C(file) lookup), while I(certificate_path) and I(private_key_path) require that the files are available on the target host. + - By design, any change of a value of options I(keystore_type), I(name) or + I(password), as well as changes of key or certificate materials will cause + the existing I(dest) to be overwritten. ''' EXAMPLES = ''' @@ -156,6 +178,12 @@ msg: type: str sample: "Unable to find the current certificate fingerprint in ..." +err: + description: Output from stderr of keytool/openssl command after error of given command. + returned: failure + type: str + sample: "Keystore password is too short - must be at least 6 characters\n" + rc: description: keytool/openssl command execution return value returned: changed and failure @@ -176,7 +204,7 @@ import tempfile from ansible.module_utils.six import PY2 from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text +from ansible.module_utils.common.text.converters import to_bytes, to_native try: from cryptography.hazmat.primitives.serialization.pkcs12 import serialize_key_and_certificates @@ -201,6 +229,7 @@ except ImportError: class JavaKeystore: def __init__(self, module): self.module = module + self.result = dict() self.keytool_bin = module.get_bin_path('keytool', True) @@ -211,6 +240,7 @@ class JavaKeystore: self.password = module.params['password'] self.private_key = module.params['private_key'] self.ssl_backend = module.params['ssl_backend'] + self.keystore_type = module.params['keystore_type'] if self.ssl_backend == 'openssl': self.openssl_bin = module.get_bin_path('openssl', True) @@ -312,6 +342,9 @@ class JavaKeystore: rc=rc ) + if self.keystore_type not in (None, self.current_type()): + return "keystore type mismatch" + stored_certificate_match = re.search(r"SHA256: ([\w:]+)", stored_certificate_fingerprint_out) if not stored_certificate_match: return self.module.fail_json( @@ -322,6 +355,14 @@ class JavaKeystore: return stored_certificate_match.group(1) + def current_type(self): + magic_bytes = b'\xfe\xed\xfe\xed' + with open(self.keystore_path, 'rb') as fd: + header = fd.read(4) + if header == magic_bytes: + return 'jks' + return 'pkcs12' + def cert_changed(self): current_certificate_fingerprint = self.read_certificate_fingerprint() stored_certificate_fingerprint = self.read_stored_certificate_fingerprint() @@ -389,6 +430,8 @@ class JavaKeystore: with open(keystore_p12_path, 'wb') as p12_file: p12_file.write(pkcs12_bundle) + self.result.update(msg="PKCS#12 bundle created by cryptography backend") + def openssl_create_pkcs12_bundle(self, keystore_p12_path): export_p12_cmd = [self.openssl_bin, "pkcs12", "-export", "-name", self.name, "-in", self.certificate_path, "-inkey", self.private_key_path, "-out", keystore_p12_path, "-passout", "stdin"] @@ -401,19 +444,22 @@ class JavaKeystore: cmd_stdin = "%s\n" % self.keypass cmd_stdin += "%s\n%s" % (self.password, self.password) - (rc, export_p12_out, dummy) = self.module.run_command( + (rc, export_p12_out, export_p12_err) = self.module.run_command( export_p12_cmd, data=cmd_stdin, environ_update=None, check_rc=False ) + self.result = dict(msg=export_p12_out, cmd=export_p12_cmd, rc=rc) if rc != 0: - self.module.fail_json(msg=export_p12_out, cmd=export_p12_cmd, rc=rc) + self.result['err'] = export_p12_err + self.module.fail_json(**self.result) def create(self): + """Create the keystore, or replace it with a rollback in case of + keytool failure. + """ if self.module.check_mode: - return {'changed': True} - - if os.path.exists(self.keystore_path): - os.remove(self.keystore_path) + self.result['changed'] = True + return self.result keystore_p12_path = create_path() self.module.add_cleanup_file(keystore_p12_path) @@ -423,6 +469,13 @@ class JavaKeystore: else: self.openssl_create_pkcs12_bundle(keystore_p12_path) + if self.keystore_type == 'pkcs12': + # Preserve properties of the destination file, if any. + self.module.atomic_move(keystore_p12_path, self.keystore_path) + self.update_permissions() + self.result['changed'] = True + return self.result + import_keystore_cmd = [self.keytool_bin, "-importkeystore", "-destkeystore", self.keystore_path, "-srckeystore", keystore_p12_path, @@ -430,19 +483,38 @@ class JavaKeystore: "-alias", self.name, "-noprompt"] - (rc, import_keystore_out, dummy) = self.module.run_command( + if self.keystore_type == 'jks': + keytool_help = self.module.run_command([self.keytool_bin, '-importkeystore', '-help']) + if '-deststoretype' in keytool_help[1] + keytool_help[2]: + import_keystore_cmd.insert(4, "-deststoretype") + import_keystore_cmd.insert(5, self.keystore_type) + + keystore_backup = None + if self.exists(): + keystore_backup = self.keystore_path + '.tmpbak' + # Preserve properties of the source file + self.module.preserved_copy(self.keystore_path, keystore_backup) + os.remove(self.keystore_path) + + (rc, import_keystore_out, import_keystore_err) = self.module.run_command( import_keystore_cmd, data='%s\n%s\n%s' % (self.password, self.password, self.password), check_rc=False ) - if rc != 0: - return self.module.fail_json(msg=import_keystore_out, cmd=import_keystore_cmd, rc=rc) + + self.result = dict(msg=import_keystore_out, cmd=import_keystore_cmd, rc=rc) + + # keytool may return 0 whereas the keystore has not been created. + if rc != 0 or not self.exists(): + if keystore_backup is not None: + self.module.preserved_copy(keystore_backup, self.keystore_path) + os.remove(keystore_backup) + self.result['err'] = import_keystore_err + return self.module.fail_json(**self.result) self.update_permissions() - return { - 'changed': True, - 'msg': import_keystore_out, - 'cmd': import_keystore_cmd, - 'rc': rc - } + if keystore_backup is not None: + os.remove(keystore_backup) + self.result['changed'] = True + return self.result def exists(self): return os.path.exists(self.keystore_path) @@ -465,15 +537,15 @@ def create_file(content): def hex_decode(s): if PY2: return s.decode('hex') - else: - return s.hex() + return s.hex() -class ArgumentSpec(object): - def __init__(self): - self.supports_check_mode = True - self.add_file_common_args = True - argument_spec = dict( +def main(): + choose_between = (['certificate', 'certificate_path'], + ['private_key', 'private_key_path']) + + module = AnsibleModule( + argument_spec=dict( name=dict(type='str', required=True), dest=dict(type='path', required=True), certificate=dict(type='str', no_log=True), @@ -483,25 +555,13 @@ class ArgumentSpec(object): private_key_passphrase=dict(type='str', no_log=True), password=dict(type='str', required=True, no_log=True), ssl_backend=dict(type='str', default='openssl', choices=['openssl', 'cryptography']), + keystore_type=dict(type='str', choices=['jks', 'pkcs12']), force=dict(type='bool', default=False), - ) - choose_between = ( - ['certificate', 'certificate_path'], - ['private_key', 'private_key_path'], - ) - self.argument_spec = argument_spec - self.required_one_of = choose_between - self.mutually_exclusive = choose_between - - -def main(): - spec = ArgumentSpec() - module = AnsibleModule( - argument_spec=spec.argument_spec, - required_one_of=spec.required_one_of, - mutually_exclusive=spec.mutually_exclusive, - supports_check_mode=spec.supports_check_mode, - add_file_common_args=spec.add_file_common_args, + ), + required_one_of=choose_between, + mutually_exclusive=choose_between, + supports_check_mode=True, + add_file_common_args=True, ) module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') diff --git a/tests/integration/targets/java_keystore/tasks/tests.yml b/tests/integration/targets/java_keystore/tasks/tests.yml index b892dd1d29..8510a64165 100644 --- a/tests/integration/targets/java_keystore/tasks/tests.yml +++ b/tests/integration/targets/java_keystore/tasks/tests.yml @@ -24,6 +24,7 @@ private_key_passphrase: "{{ item.passphrase | d(omit) }}" password: changeit ssl_backend: "{{ ssl_backend }}" + keystore_type: "{{ item.keystore_type | d(omit) }}" loop: "{{ java_keystore_certs }}" check_mode: yes register: result_check @@ -91,6 +92,98 @@ loop: "{{ java_keystore_new_certs }}" register: result_pw_change + +- name: Create a Java keystore for the given certificates (force keystore type pkcs12, check mode) + community.general.java_keystore: + <<: *java_keystore_params + name: foobar + password: hunter2 + keystore_type: pkcs12 + loop: "{{ java_keystore_new_certs }}" + check_mode: yes + register: result_type_pkcs12_check + +- name: Create a Java keystore for the given certificates (force keystore type jks, check mode) + community.general.java_keystore: + <<: *java_keystore_params + name: foobar + password: hunter2 + keystore_type: jks + loop: "{{ java_keystore_new_certs }}" + check_mode: yes + register: result_type_jks_check + +- name: Create a Java keystore for the given certificates (force keystore type jks) + community.general.java_keystore: + <<: *java_keystore_params + name: foobar + password: hunter2 + keystore_type: jks + loop: "{{ java_keystore_new_certs }}" + register: result_type_jks + + +- name: Stat keystore (before failure) + ansible.builtin.stat: + path: "{{ output_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.jks' }}" + loop: "{{ java_keystore_new_certs }}" + register: result_stat_before + +- name: Fail to create a Java keystore for the given certificates (password too short) + community.general.java_keystore: + <<: *java_keystore_params + name: foobar + password: short + keystore_type: jks + loop: "{{ java_keystore_new_certs }}" + register: result_fail_jks + ignore_errors: true + +- name: Stat keystore (after failure) + ansible.builtin.stat: + path: "{{ output_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.jks' }}" + loop: "{{ java_keystore_new_certs }}" + register: result_stat_after + + +- name: Create a Java keystore for the given certificates (keystore type changed, check mode) + community.general.java_keystore: + <<: *java_keystore_params + name: foobar + password: hunter2 + keystore_type: pkcs12 + loop: "{{ java_keystore_new_certs }}" + check_mode: yes + register: result_type_change_check + +- name: Create a Java keystore for the given certificates (keystore type changed) + community.general.java_keystore: + <<: *java_keystore_params + name: foobar + password: hunter2 + keystore_type: pkcs12 + loop: "{{ java_keystore_new_certs }}" + register: result_type_change + + +- name: Create a Java keystore for the given certificates (omit keystore type, check mode) + community.general.java_keystore: + <<: *java_keystore_params + name: foobar + password: hunter2 + loop: "{{ java_keystore_new_certs }}" + check_mode: yes + register: result_type_omit_check + +- name: Create a Java keystore for the given certificates (omit keystore type) + community.general.java_keystore: + <<: *java_keystore_params + name: foobar + password: hunter2 + loop: "{{ java_keystore_new_certs }}" + register: result_type_omit + + - name: Check that the remote certificates have not been removed ansible.builtin.file: path: "{{ output_dir ~ '/' ~ item.name ~ '.pem' }}" @@ -118,3 +211,25 @@ - result_alias_change_check is changed - result_pw_change is changed - result_pw_change_check is changed + + # We don't know if we start from jks or pkcs12 format, anyway check mode + # and actual mode must return the same 'changed' state, and 'jks' and + # 'pkcs12' must give opposite results on a same host. + - result_type_jks_check.changed != result_type_pkcs12_check.changed + - result_type_jks_check.changed == result_type_jks.changed + + - result_type_change is changed + - result_type_change_check is changed + - result_type_omit is not changed + - result_type_omit_check is not changed + + # keystore properties must remain the same after failure + - result_fail_jks is failed + - result_stat_before.results[0].stat.uid == result_stat_after.results[0].stat.uid + - result_stat_before.results[1].stat.uid == result_stat_after.results[1].stat.uid + - result_stat_before.results[0].stat.gid == result_stat_after.results[0].stat.gid + - result_stat_before.results[1].stat.gid == result_stat_after.results[1].stat.gid + - result_stat_before.results[0].stat.mode == result_stat_after.results[0].stat.mode + - result_stat_before.results[1].stat.mode == result_stat_after.results[1].stat.mode + - result_stat_before.results[0].stat.checksum == result_stat_after.results[0].stat.checksum + - result_stat_before.results[1].stat.checksum == result_stat_after.results[1].stat.checksum diff --git a/tests/unit/plugins/modules/system/test_java_keystore.py b/tests/unit/plugins/modules/system/test_java_keystore.py index 7d582a3e99..7d078ac0f9 100644 --- a/tests/unit/plugins/modules/system/test_java_keystore.py +++ b/tests/unit/plugins/modules/system/test_java_keystore.py @@ -14,7 +14,25 @@ from ansible_collections.community.general.tests.unit.plugins.modules.utils impo from ansible_collections.community.general.tests.unit.compat.mock import patch from ansible_collections.community.general.tests.unit.compat.mock import Mock from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.modules.system.java_keystore import JavaKeystore, ArgumentSpec +from ansible_collections.community.general.plugins.modules.system.java_keystore import JavaKeystore + + +module_argument_spec = dict( + name=dict(type='str', required=True), + dest=dict(type='path', required=True), + certificate=dict(type='str', no_log=True), + certificate_path=dict(type='path'), + private_key=dict(type='str', no_log=True), + private_key_path=dict(type='path', no_log=False), + private_key_passphrase=dict(type='str', no_log=True), + password=dict(type='str', required=True, no_log=True), + ssl_backend=dict(type='str', default='openssl', choices=['openssl', 'cryptography']), + keystore_type=dict(type='str', choices=['jks', 'pkcs12']), + force=dict(type='bool', default=False), +) +module_supports_check_mode = True +module_choose_between = (['certificate', 'certificate_path'], + ['private_key', 'private_key_path']) class TestCreateJavaKeystore(ModuleTestCase): @@ -25,11 +43,13 @@ class TestCreateJavaKeystore(ModuleTestCase): super(TestCreateJavaKeystore, self).setUp() orig_exists = os.path.exists - self.spec = ArgumentSpec() self.mock_create_file = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.create_file') self.mock_create_path = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.create_path') + self.mock_current_type = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.JavaKeystore.current_type') self.mock_run_command = patch('ansible.module_utils.basic.AnsibleModule.run_command') self.mock_get_bin_path = patch('ansible.module_utils.basic.AnsibleModule.get_bin_path') + self.mock_preserved_copy = patch('ansible.module_utils.basic.AnsibleModule.preserved_copy') + self.mock_atomic_move = patch('ansible.module_utils.basic.AnsibleModule.atomic_move') self.mock_os_path_exists = patch('os.path.exists', side_effect=lambda path: True if path == '/path/to/keystore.jks' else orig_exists(path)) self.mock_selinux_context = patch('ansible.module_utils.basic.AnsibleModule.selinux_context', @@ -38,8 +58,11 @@ class TestCreateJavaKeystore(ModuleTestCase): side_effect=lambda path: (False, None)) self.run_command = self.mock_run_command.start() self.get_bin_path = self.mock_get_bin_path.start() + self.preserved_copy = self.mock_preserved_copy.start() + self.atomic_move = self.mock_atomic_move.start() self.create_file = self.mock_create_file.start() self.create_path = self.mock_create_path.start() + self.current_type = self.mock_current_type.start() self.selinux_context = self.mock_selinux_context.start() self.is_special_selinux_path = self.mock_is_special_selinux_path.start() self.os_path_exists = self.mock_os_path_exists.start() @@ -49,8 +72,11 @@ class TestCreateJavaKeystore(ModuleTestCase): super(TestCreateJavaKeystore, self).tearDown() self.mock_create_file.stop() self.mock_create_path.stop() + self.mock_current_type.stop() self.mock_run_command.stop() self.mock_get_bin_path.stop() + self.mock_preserved_copy.stop() + self.mock_atomic_move.stop() self.mock_selinux_context.stop() self.mock_is_special_selinux_path.stop() self.mock_os_path_exists.stop() @@ -65,8 +91,10 @@ class TestCreateJavaKeystore(ModuleTestCase): )) module = AnsibleModule( - argument_spec=self.spec.argument_spec, - supports_check_mode=self.spec.supports_check_mode + argument_spec=module_argument_spec, + supports_check_mode=module_supports_check_mode, + mutually_exclusive=module_choose_between, + required_one_of=module_choose_between ) with patch('os.remove', return_value=True): @@ -96,8 +124,10 @@ class TestCreateJavaKeystore(ModuleTestCase): )) module = AnsibleModule( - argument_spec=self.spec.argument_spec, - supports_check_mode=self.spec.supports_check_mode + argument_spec=module_argument_spec, + supports_check_mode=module_supports_check_mode, + mutually_exclusive=module_choose_between, + required_one_of=module_choose_between ) module.exit_json = Mock() @@ -106,7 +136,7 @@ class TestCreateJavaKeystore(ModuleTestCase): with patch('os.remove', return_value=True): self.create_path.side_effect = ['/tmp/tmp1cyp12xa'] self.create_file.side_effect = ['/tmp/tmpvalcrt32', '/tmp/tmpwh4key0c', ''] - self.run_command.side_effect = [(1, '', ''), (0, '', '')] + self.run_command.side_effect = [(1, '', 'Oops'), (0, '', '')] self.get_bin_path.side_effect = ['keytool', 'openssl', ''] jks = JavaKeystore(module) jks.create() @@ -118,6 +148,7 @@ class TestCreateJavaKeystore(ModuleTestCase): "-passout", "stdin", "-passin", "stdin"], msg='', + err='Oops', rc=1 ) @@ -131,8 +162,10 @@ class TestCreateJavaKeystore(ModuleTestCase): )) module = AnsibleModule( - argument_spec=self.spec.argument_spec, - supports_check_mode=self.spec.supports_check_mode + argument_spec=module_argument_spec, + supports_check_mode=module_supports_check_mode, + mutually_exclusive=module_choose_between, + required_one_of=module_choose_between ) module.exit_json = Mock() @@ -141,7 +174,7 @@ class TestCreateJavaKeystore(ModuleTestCase): with patch('os.remove', return_value=True): self.create_path.side_effect = ['/tmp/tmp1cyp12xa'] self.create_file.side_effect = ['/tmp/tmpvalcrt32', '/tmp/tmpwh4key0c', ''] - self.run_command.side_effect = [(1, '', ''), (0, '', '')] + self.run_command.side_effect = [(1, '', 'Oops'), (0, '', '')] self.get_bin_path.side_effect = ['keytool', 'openssl', ''] jks = JavaKeystore(module) jks.create() @@ -152,6 +185,7 @@ class TestCreateJavaKeystore(ModuleTestCase): "-out", "/tmp/tmp1cyp12xa", "-passout", "stdin"], msg='', + err='Oops', rc=1 ) @@ -165,8 +199,10 @@ class TestCreateJavaKeystore(ModuleTestCase): )) module = AnsibleModule( - argument_spec=self.spec.argument_spec, - supports_check_mode=self.spec.supports_check_mode + argument_spec=module_argument_spec, + supports_check_mode=module_supports_check_mode, + mutually_exclusive=module_choose_between, + required_one_of=module_choose_between ) module.exit_json = Mock() @@ -175,7 +211,7 @@ class TestCreateJavaKeystore(ModuleTestCase): with patch('os.remove', return_value=True): self.create_path.side_effect = ['/tmp/tmpgrzm2ah7'] self.create_file.side_effect = ['/tmp/etacifitrec', '/tmp/yek_etavirp', ''] - self.run_command.side_effect = [(0, '', ''), (1, '', '')] + self.run_command.side_effect = [(0, '', ''), (1, '', 'Oops')] self.get_bin_path.side_effect = ['keytool', 'openssl', ''] jks = JavaKeystore(module) jks.create() @@ -185,6 +221,7 @@ class TestCreateJavaKeystore(ModuleTestCase): "-srckeystore", "/tmp/tmpgrzm2ah7", "-srcstoretype", "pkcs12", "-alias", "test", "-noprompt"], msg='', + err='Oops', rc=1 ) @@ -195,20 +232,28 @@ class TestCertChanged(ModuleTestCase): def setUp(self): """Setup.""" super(TestCertChanged, self).setUp() - self.spec = ArgumentSpec() self.mock_create_file = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.create_file') + self.mock_current_type = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.JavaKeystore.current_type') self.mock_run_command = patch('ansible.module_utils.basic.AnsibleModule.run_command') self.mock_get_bin_path = patch('ansible.module_utils.basic.AnsibleModule.get_bin_path') + self.mock_preserved_copy = patch('ansible.module_utils.basic.AnsibleModule.preserved_copy') + self.mock_atomic_move = patch('ansible.module_utils.basic.AnsibleModule.atomic_move') self.run_command = self.mock_run_command.start() self.create_file = self.mock_create_file.start() self.get_bin_path = self.mock_get_bin_path.start() + self.current_type = self.mock_current_type.start() + self.preserved_copy = self.mock_preserved_copy.start() + self.atomic_move = self.mock_atomic_move.start() def tearDown(self): """Teardown.""" super(TestCertChanged, self).tearDown() self.mock_create_file.stop() + self.mock_current_type.stop() self.mock_run_command.stop() self.mock_get_bin_path.stop() + self.mock_preserved_copy.stop() + self.mock_atomic_move.stop() def test_cert_unchanged_same_fingerprint(self): set_module_args(dict( @@ -220,14 +265,17 @@ class TestCertChanged(ModuleTestCase): )) module = AnsibleModule( - argument_spec=self.spec.argument_spec, - supports_check_mode=self.spec.supports_check_mode + argument_spec=module_argument_spec, + supports_check_mode=module_supports_check_mode, + mutually_exclusive=module_choose_between, + required_one_of=module_choose_between ) with patch('os.remove', return_value=True): self.create_file.side_effect = ['/tmp/placeholder', ''] self.run_command.side_effect = [(0, 'foo=abcd:1234:efgh', ''), (0, 'SHA256: abcd:1234:efgh', '')] self.get_bin_path.side_effect = ['keytool', 'openssl', ''] + self.current_type.side_effect = ['jks'] jks = JavaKeystore(module) result = jks.cert_changed() self.assertFalse(result, 'Fingerprint is identical') @@ -242,19 +290,22 @@ class TestCertChanged(ModuleTestCase): )) module = AnsibleModule( - argument_spec=self.spec.argument_spec, - supports_check_mode=self.spec.supports_check_mode + argument_spec=module_argument_spec, + supports_check_mode=module_supports_check_mode, + mutually_exclusive=module_choose_between, + required_one_of=module_choose_between ) with patch('os.remove', return_value=True): self.create_file.side_effect = ['/tmp/placeholder', ''] self.run_command.side_effect = [(0, 'foo=abcd:1234:efgh', ''), (0, 'SHA256: wxyz:9876:stuv', '')] self.get_bin_path.side_effect = ['keytool', 'openssl', ''] + self.current_type.side_effect = ['jks'] jks = JavaKeystore(module) result = jks.cert_changed() self.assertTrue(result, 'Fingerprint mismatch') - def test_cert_changed_fail_alias_does_not_exist(self): + def test_cert_changed_alias_does_not_exist(self): set_module_args(dict( certificate='cert-foo', private_key='private-foo', @@ -264,8 +315,10 @@ class TestCertChanged(ModuleTestCase): )) module = AnsibleModule( - argument_spec=self.spec.argument_spec, - supports_check_mode=self.spec.supports_check_mode + argument_spec=module_argument_spec, + supports_check_mode=module_supports_check_mode, + mutually_exclusive=module_choose_between, + required_one_of=module_choose_between ) with patch('os.remove', return_value=True): @@ -287,8 +340,10 @@ class TestCertChanged(ModuleTestCase): )) module = AnsibleModule( - argument_spec=self.spec.argument_spec, - supports_check_mode=self.spec.supports_check_mode + argument_spec=module_argument_spec, + supports_check_mode=module_supports_check_mode, + mutually_exclusive=module_choose_between, + required_one_of=module_choose_between ) with patch('os.remove', return_value=True): @@ -310,8 +365,10 @@ class TestCertChanged(ModuleTestCase): )) module = AnsibleModule( - argument_spec=self.spec.argument_spec, - supports_check_mode=self.spec.supports_check_mode + argument_spec=module_argument_spec, + supports_check_mode=module_supports_check_mode, + mutually_exclusive=module_choose_between, + required_one_of=module_choose_between ) module.exit_json = Mock() @@ -321,6 +378,7 @@ class TestCertChanged(ModuleTestCase): self.create_file.side_effect = ['/tmp/tmpdj6bvvme', ''] self.run_command.side_effect = [(1, '', 'Oops'), (0, 'SHA256: wxyz:9876:stuv', '')] self.get_bin_path.side_effect = ['keytool', 'openssl', ''] + self.current_type.side_effect = ['jks'] jks = JavaKeystore(module) jks.cert_changed() module.fail_json.assert_called_once_with( @@ -340,8 +398,10 @@ class TestCertChanged(ModuleTestCase): )) module = AnsibleModule( - argument_spec=self.spec.argument_spec, - supports_check_mode=self.spec.supports_check_mode + argument_spec=module_argument_spec, + supports_check_mode=module_supports_check_mode, + mutually_exclusive=module_choose_between, + required_one_of=module_choose_between ) module.exit_json = Mock() From c7cf6f2eb7b3795d6e9401104b9f7b3cbd46359d Mon Sep 17 00:00:00 2001 From: Stef Graces Date: Sun, 27 Jun 2021 16:09:41 +0200 Subject: [PATCH 0168/2828] gitlab_project - Add ability to create project under a user (#2824) * Add ability to create project under a user * Add changelog * Add username option * Update changelogs/fragments/2824-gitlab_project-project-under-user.yml Co-authored-by: Felix Fontein * Make group and username mutually exclusive Co-authored-by: Felix Fontein --- .../2824-gitlab_project-project-under-user.yml | 3 +++ .../modules/source_control/gitlab/gitlab_project.py | 13 ++++++++++++- 2 files changed, 15 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2824-gitlab_project-project-under-user.yml diff --git a/changelogs/fragments/2824-gitlab_project-project-under-user.yml b/changelogs/fragments/2824-gitlab_project-project-under-user.yml new file mode 100644 index 0000000000..7fa18941a0 --- /dev/null +++ b/changelogs/fragments/2824-gitlab_project-project-under-user.yml @@ -0,0 +1,3 @@ +--- +minor_changes: + - gitlab_project - projects can be created under other user's namespaces with the new ``username`` option (https://github.com/ansible-collections/community.general/pull/2824). diff --git a/plugins/modules/source_control/gitlab/gitlab_project.py b/plugins/modules/source_control/gitlab/gitlab_project.py index 61d1ac0cb1..c916246b78 100644 --- a/plugins/modules/source_control/gitlab/gitlab_project.py +++ b/plugins/modules/source_control/gitlab/gitlab_project.py @@ -109,6 +109,11 @@ options: required: false default: false version_added: "2.0.0" + username: + description: + - Used to create a personal project under a user's name. + type: str + version_added: "3.3.0" ''' EXAMPLES = r''' @@ -302,6 +307,7 @@ def main(): import_url=dict(type='str'), state=dict(type='str', default="present", choices=["absent", "present"]), lfs_enabled=dict(default=False, type='bool'), + username=dict(type='str'), )) module = AnsibleModule( @@ -309,6 +315,7 @@ def main(): mutually_exclusive=[ ['api_username', 'api_token'], ['api_password', 'api_token'], + ['group', 'username'], ], required_together=[ ['api_username', 'api_password'], @@ -332,6 +339,7 @@ def main(): import_url = module.params['import_url'] state = module.params['state'] lfs_enabled = module.params['lfs_enabled'] + username = module.params['username'] if not HAS_GITLAB_PACKAGE: module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) @@ -353,7 +361,10 @@ def main(): namespace_id = group.id else: - namespace = gitlab_instance.namespaces.list(search=gitlab_instance.user.username)[0] + if username: + namespace = gitlab_instance.namespaces.list(search=username)[0] + else: + namespace = gitlab_instance.namespaces.list(search=gitlab_instance.user.username)[0] namespace_id = namespace.id if not namespace_id: From 0a9cf3811880bd3e7640b637ffef2978df8f5429 Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Sun, 27 Jun 2021 19:10:49 +0430 Subject: [PATCH 0169/2828] yum_versionlock: fix idempotency when using wildcard (asterisk) (#2787) * Check idempotency on yum_versionlock * Lock packages wildcard * fix formatting Co-authored-by: Felix Fontein * Fix formatting in asserts * little closer but not still there * Import fnmatch * Change check_mode logic * Add check_mode for add * Add changelog Co-authored-by: Felix Fontein --- ...ck-fix_idempotency_when_using_wildcard.yml | 3 +++ .../modules/packaging/os/yum_versionlock.py | 21 +++++++++--------- .../targets/yum_versionlock/tasks/main.yml | 22 +++++++++++++++---- 3 files changed, 32 insertions(+), 14 deletions(-) create mode 100644 changelogs/fragments/2787-yum_versionlock-fix_idempotency_when_using_wildcard.yml diff --git a/changelogs/fragments/2787-yum_versionlock-fix_idempotency_when_using_wildcard.yml b/changelogs/fragments/2787-yum_versionlock-fix_idempotency_when_using_wildcard.yml new file mode 100644 index 0000000000..9fb569ec42 --- /dev/null +++ b/changelogs/fragments/2787-yum_versionlock-fix_idempotency_when_using_wildcard.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - yum_versionlock - fix idempotency when using wildcard (asterisk) in ``name`` option (https://github.com/ansible-collections/community.general/issues/2761). diff --git a/plugins/modules/packaging/os/yum_versionlock.py b/plugins/modules/packaging/os/yum_versionlock.py index 6dfb3d20ba..62110bf00a 100644 --- a/plugins/modules/packaging/os/yum_versionlock.py +++ b/plugins/modules/packaging/os/yum_versionlock.py @@ -76,6 +76,7 @@ state: from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native +from fnmatch import fnmatch class YumVersionLock: @@ -125,23 +126,23 @@ def main(): if state in ('present'): command = 'add' for single_pkg in packages: - if single_pkg not in versionlock_packages: - if module.check_mode: - changed = True - continue + if not any(fnmatch(pkg.split(":", 1)[-1], single_pkg) for pkg in versionlock_packages.split()): packages_list.append(single_pkg) if packages_list: - changed = yum_v.ensure_state(packages_list, command) + if module.check_mode: + changed = True + else: + changed = yum_v.ensure_state(packages_list, command) elif state in ('absent'): command = 'delete' for single_pkg in packages: - if single_pkg in versionlock_packages: - if module.check_mode: - changed = True - continue + if any(fnmatch(pkg, single_pkg) for pkg in versionlock_packages.split()): packages_list.append(single_pkg) if packages_list: - changed = yum_v.ensure_state(packages_list, command) + if module.check_mode: + changed = True + else: + changed = yum_v.ensure_state(packages_list, command) module.exit_json( changed=changed, diff --git a/tests/integration/targets/yum_versionlock/tasks/main.yml b/tests/integration/targets/yum_versionlock/tasks/main.yml index d1a1522087..2e551b48ca 100644 --- a/tests/integration/targets/yum_versionlock/tasks/main.yml +++ b/tests/integration/targets/yum_versionlock/tasks/main.yml @@ -29,6 +29,18 @@ state: present register: lock_all_packages + - name: Lock all packages again + community.general.yum_versionlock: + name: "{{ yum_updates.results | map(attribute='name') | list }}" + state: present + register: lock_all_packages_again + + - name: Lock packages wildcard + community.general.yum_versionlock: + name: "nss*" + state: present + register: lock_nss_wildcard + # This should fail when it needs user interaction and missing -y is on purpose. - name: Update all packages (not really) command: yum update --setopt=obsoletes=0 @@ -54,10 +66,12 @@ - name: Assert everything is fine assert: that: - - "{{ lock_all_packages.changed }}" - - "{{ not update_all_locked_packages.changed }}" - - "{{ unlock_all_packages.changed }}" - - "{{ update_all_packages.changed }}" + - lock_all_packages is changed + - lock_all_packages_again is not changed + - lock_nss_wildcard is not changed + - update_all_locked_packages is not changed + - unlock_all_packages is changed + - update_all_packages is changed when: yum_updates.results | length != 0 - name: Remove installed packages in case it was not installed From debb15efbe7b819a5dc01e6053aa3b2b964c0260 Mon Sep 17 00:00:00 2001 From: Martin Date: Sun, 27 Jun 2021 18:47:11 +0200 Subject: [PATCH 0170/2828] pacman: Descriptive state documentation (#2894) * pacman: Descriptive state documentation * Update plugins/modules/packaging/os/pacman.py Co-authored-by: Felix Fontein Co-authored-by: Martin Rys Co-authored-by: Felix Fontein --- plugins/modules/packaging/os/pacman.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/plugins/modules/packaging/os/pacman.py b/plugins/modules/packaging/os/pacman.py index 859c90a6c4..372d13cd49 100644 --- a/plugins/modules/packaging/os/pacman.py +++ b/plugins/modules/packaging/os/pacman.py @@ -30,9 +30,12 @@ options: state: description: - - Desired state of the package. + - Whether to install (C(present) or C(installed), C(latest)), or remove (C(absent) or C(removed)) a package. + - C(present) and C(installed) will simply ensure that a desired package is installed. + - C(latest) will update the specified package if it is not of the latest available version. + - C(absent) and C(removed) will remove the specified package. default: present - choices: [ absent, latest, present, installed, removed ] + choices: [ absent, installed, latest, present, removed ] type: str force: From 2d1527a56423d950ce2f12e66d37253dbfdf02e5 Mon Sep 17 00:00:00 2001 From: omula Date: Mon, 28 Jun 2021 20:46:44 +0200 Subject: [PATCH 0171/2828] [nmcli] add connection.slave-type for teamed devices (#2827) * [nmcli] add connection.slave-type for teamed devices * [nmcli] add fragment with changes for #2827 * [nmcli] add tests for network team * [nmcli] fix testing Co-authored-by: Oriol MULA VALLS --- .../fragments/2827-nmcli_fix_team_slave.yml | 2 + plugins/modules/net_tools/nmcli.py | 4 + .../plugins/modules/net_tools/test_nmcli.py | 145 ++++++++++++++++++ 3 files changed, 151 insertions(+) create mode 100644 changelogs/fragments/2827-nmcli_fix_team_slave.yml diff --git a/changelogs/fragments/2827-nmcli_fix_team_slave.yml b/changelogs/fragments/2827-nmcli_fix_team_slave.yml new file mode 100644 index 0000000000..02f001c4f5 --- /dev/null +++ b/changelogs/fragments/2827-nmcli_fix_team_slave.yml @@ -0,0 +1,2 @@ +bugfixes: + - nmcli - fixes team-slave configuration by adding connection.slave-type (https://github.com/ansible-collections/community.general/issues/766). diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index 657df3bd2a..7ed515fc75 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -833,6 +833,10 @@ class Nmcli(object): 'bridge-port.hairpin-mode': self.hairpin, 'bridge-port.priority': self.slavepriority, }) + elif self.type == 'team-slave': + options.update({ + 'connection.slave-type': 'team', + }) elif self.tunnel_conn_type: options.update({ 'ip-tunnel.local': self.ip_tunnel_local, diff --git a/tests/unit/plugins/modules/net_tools/test_nmcli.py b/tests/unit/plugins/modules/net_tools/test_nmcli.py index 911ffd1217..ba526b1d65 100644 --- a/tests/unit/plugins/modules/net_tools/test_nmcli.py +++ b/tests/unit/plugins/modules/net_tools/test_nmcli.py @@ -257,6 +257,50 @@ bridge-port.hairpin-mode: yes bridge-port.priority: 32 """ +TESTCASE_TEAM = [ + { + 'type': 'team', + 'conn_name': 'non_existent_nw_device', + 'ifname': 'team0_non_existant', + 'state': 'present', + '_ansible_check_mode': False, + } +] + +TESTCASE_TEAM_SHOW_OUTPUT = """\ +connection.id: non_existent_nw_device +connection.interface-name: team0_non_existant +connection.autoconnect: yes +connection.type: team +ipv4.ignore-auto-dns: no +ipv4.ignore-auto-routes: no +ipv4.never-default: no +ipv4.may-fail: yes +ipv6.method: auto +ipv6.ignore-auto-dns: no +ipv6.ignore-auto-routes: no +""" + +TESTCASE_TEAM_SLAVE = [ + { + 'type': 'team-slave', + 'conn_name': 'non_existent_nw_slaved_device', + 'ifname': 'generic_slaved_non_existant', + 'master': 'team0_non_existant', + 'state': 'present', + '_ansible_check_mode': False, + } +] + +TESTCASE_TEAM_SLAVE_SHOW_OUTPUT = """\ +connection.id: non_existent_nw_slaved_device +connection.interface-name: generic_slaved_non_existant +connection.autoconnect: yes +connection.master: team0_non_existant +connection.slave-type: team +802-3-ethernet.mtu: auto +""" + TESTCASE_VLAN = [ { 'type': 'vlan', @@ -495,6 +539,20 @@ def mocked_bridge_slave_unchanged(mocker): execute_return=(0, TESTCASE_BRIDGE_SLAVE_SHOW_OUTPUT, "")) +@pytest.fixture +def mocked_team_connection_unchanged(mocker): + mocker_set(mocker, + connection_exists=True, + execute_return=(0, TESTCASE_TEAM_SHOW_OUTPUT, "")) + + +@pytest.fixture +def mocked_team_slave_connection_unchanged(mocker): + mocker_set(mocker, + connection_exists=True, + execute_return=(0, TESTCASE_TEAM_SLAVE_SHOW_OUTPUT, "")) + + @pytest.fixture def mocked_vlan_connection_unchanged(mocker): mocker_set(mocker, @@ -952,6 +1010,93 @@ def test_bridge_slave_unchanged(mocked_bridge_slave_unchanged, capfd): assert not results['changed'] +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_TEAM, indirect=['patch_ansible_module']) +def test_team_connection_create(mocked_generic_connection_create, capfd): + """ + Test : Team connection created + """ + with pytest.raises(SystemExit): + nmcli.main() + + assert nmcli.Nmcli.execute_command.call_count == 1 + arg_list = nmcli.Nmcli.execute_command.call_args_list + args, kwargs = arg_list[0] + + assert args[0][0] == '/usr/bin/nmcli' + assert args[0][1] == 'con' + assert args[0][2] == 'add' + assert args[0][3] == 'type' + assert args[0][4] == 'team' + assert args[0][5] == 'con-name' + assert args[0][6] == 'non_existent_nw_device' + + for param in ['connection.autoconnect', 'connection.interface-name', 'team0_non_existant']: + assert param in args[0] + + out, err = capfd.readouterr() + results = json.loads(out) + assert not results.get('failed') + assert results['changed'] + + +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_TEAM, indirect=['patch_ansible_module']) +def test_team_connection_unchanged(mocked_team_connection_unchanged, capfd): + """ + Test : Team connection unchanged + """ + with pytest.raises(SystemExit): + nmcli.main() + + out, err = capfd.readouterr() + results = json.loads(out) + assert not results.get('failed') + assert not results['changed'] + + +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_TEAM_SLAVE, indirect=['patch_ansible_module']) +def test_create_team_slave(mocked_generic_connection_create, capfd): + """ + Test if Team_slave created + """ + + with pytest.raises(SystemExit): + nmcli.main() + + assert nmcli.Nmcli.execute_command.call_count == 1 + arg_list = nmcli.Nmcli.execute_command.call_args_list + args, kwargs = arg_list[0] + + assert args[0][0] == '/usr/bin/nmcli' + assert args[0][1] == 'con' + assert args[0][2] == 'add' + assert args[0][3] == 'type' + assert args[0][4] == 'team-slave' + assert args[0][5] == 'con-name' + assert args[0][6] == 'non_existent_nw_slaved_device' + + for param in ['connection.autoconnect', 'connection.interface-name', 'connection.master', 'team0_non_existant', 'connection.slave-type']: + assert param in args[0] + + out, err = capfd.readouterr() + results = json.loads(out) + assert not results.get('failed') + assert results['changed'] + + +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_TEAM_SLAVE, indirect=['patch_ansible_module']) +def test_team_slave_connection_unchanged(mocked_team_slave_connection_unchanged, capfd): + """ + Test : Team slave connection unchanged + """ + with pytest.raises(SystemExit): + nmcli.main() + + out, err = capfd.readouterr() + results = json.loads(out) + assert not results.get('failed') + assert not results['changed'] + + @pytest.mark.parametrize('patch_ansible_module', TESTCASE_VLAN, indirect=['patch_ansible_module']) def test_create_vlan_con(mocked_generic_connection_create, capfd): """ From 9c7b539ef62b4b18e31f3b8aeb1243fd77c9404b Mon Sep 17 00:00:00 2001 From: NivKeidan <51288016+NivKeidan@users.noreply.github.com> Date: Tue, 29 Jun 2021 08:56:59 +0300 Subject: [PATCH 0172/2828] Add fallback url for jenkins plugin (#1334) * uncoupled updates_url from plugin download urls added new parameters: versioned_plugins_url, latest_plugins_url * parameters updates_url, latest_plugins_url and versioned_plugins_url changed type to list of strings to implement fallback URLs usage added type conversion if they are string (backward compatibility) * removed type conversion this is handled by ansible validation fix: dont fail if first url fails * added fallback: if installation from plugin manager fails, try downloading the plugin manually * fixed test failures * PEP8 indent fix * changelog fragment * added debug outputs for new url fallback behavior * added version_added in description for latest_plugins_url Co-authored-by: Felix Fontein * added version_added in description for versioned_plugins_url Co-authored-by: Felix Fontein * Update changelogs/fragments/1334-jenkins-plugin-fallback-urls.yaml Co-authored-by: Felix Fontein * improve backwards-compatibility add optional arg to allow custom update-center.json targets * pep8 fixes * fix inconsistency in argument documentation * Apply suggestions from code review Co-authored-by: Amin Vakil * add unit tests * fix pep8 * Apply suggestions from code review Co-authored-by: Felix Fontein Co-authored-by: Amin Vakil --- .../1334-jenkins-plugin-fallback-urls.yaml | 2 + .../web_infrastructure/jenkins_plugin.py | 211 +++++++++++++----- .../web_infrastructure/test_jenkins_plugin.py | 37 +++ 3 files changed, 188 insertions(+), 62 deletions(-) create mode 100644 changelogs/fragments/1334-jenkins-plugin-fallback-urls.yaml diff --git a/changelogs/fragments/1334-jenkins-plugin-fallback-urls.yaml b/changelogs/fragments/1334-jenkins-plugin-fallback-urls.yaml new file mode 100644 index 0000000000..be0a86fa5b --- /dev/null +++ b/changelogs/fragments/1334-jenkins-plugin-fallback-urls.yaml @@ -0,0 +1,2 @@ +minor_changes: + - jenkins_plugin - add fallback url(s) for failure of plugin installation/download (https://github.com/ansible-collections/community.general/pull/1334). diff --git a/plugins/modules/web_infrastructure/jenkins_plugin.py b/plugins/modules/web_infrastructure/jenkins_plugin.py index 20fd8554bc..a280b50aa6 100644 --- a/plugins/modules/web_infrastructure/jenkins_plugin.py +++ b/plugins/modules/web_infrastructure/jenkins_plugin.py @@ -66,12 +66,33 @@ options: C(latest) is specified. default: 86400 updates_url: - type: str + type: list + elements: str description: - - URL of the Update Centre. - - Used as the base URL to download the plugins and the - I(update-center.json) JSON file. - default: https://updates.jenkins.io + - A list of base URL(s) to retrieve I(update-center.json), and direct plugin files from. + - This can be a list since community.general 3.3.0. + default: ['https://updates.jenkins.io', 'http://mirrors.jenkins.io'] + update_json_url_segment: + type: list + elements: str + description: + - A list of URL segment(s) to retrieve the update center json file from. + default: ['update-center.json', 'updates/update-center.json'] + version_added: 3.3.0 + latest_plugins_url_segments: + type: list + elements: str + description: + - Path inside the I(updates_url) to get latest plugins from. + default: ['latest'] + version_added: 3.3.0 + versioned_plugins_url_segments: + type: list + elements: str + description: + - Path inside the I(updates_url) to get specific version of plugins from. + default: ['download/plugins', 'plugins'] + version_added: 3.3.0 url: type: str description: @@ -283,6 +304,10 @@ import tempfile import time +class FailedInstallingWithPluginManager(Exception): + pass + + class JenkinsPlugin(object): def __init__(self, module): # To be able to call fail_json @@ -330,9 +355,42 @@ class JenkinsPlugin(object): return json_data + def _get_urls_data(self, urls, what=None, msg_status=None, msg_exception=None, **kwargs): + # Compose default messages + if msg_status is None: + msg_status = "Cannot get %s" % what + + if msg_exception is None: + msg_exception = "Retrieval of %s failed." % what + + errors = {} + for url in urls: + err_msg = None + try: + self.module.debug("fetching url: %s" % url) + response, info = fetch_url( + self.module, url, timeout=self.timeout, cookies=self.cookies, + headers=self.crumb, **kwargs) + + if info['status'] == 200: + return response + else: + err_msg = ("%s. fetching url %s failed. response code: %s" % (msg_status, url, info['status'])) + if info['status'] > 400: # extend error message + err_msg = "%s. response body: %s" % (err_msg, info['body']) + except Exception as e: + err_msg = "%s. fetching url %s failed. error msg: %s" % (msg_status, url, to_native(e)) + finally: + if err_msg is not None: + self.module.debug(err_msg) + errors[url] = err_msg + + # failed on all urls + self.module.fail_json(msg=msg_exception, details=errors) + def _get_url_data( self, url, what=None, msg_status=None, msg_exception=None, - **kwargs): + dont_fail=False, **kwargs): # Compose default messages if msg_status is None: msg_status = "Cannot get %s" % what @@ -347,9 +405,15 @@ class JenkinsPlugin(object): headers=self.crumb, **kwargs) if info['status'] != 200: - self.module.fail_json(msg=msg_status, details=info['msg']) + if dont_fail: + raise FailedInstallingWithPluginManager(info['msg']) + else: + self.module.fail_json(msg=msg_status, details=info['msg']) except Exception as e: - self.module.fail_json(msg=msg_exception, details=to_native(e)) + if dont_fail: + raise FailedInstallingWithPluginManager(e) + else: + self.module.fail_json(msg=msg_exception, details=to_native(e)) return response @@ -394,6 +458,39 @@ class JenkinsPlugin(object): break + def _install_with_plugin_manager(self): + if not self.module.check_mode: + # Install the plugin (with dependencies) + install_script = ( + 'd = Jenkins.instance.updateCenter.getPlugin("%s")' + '.deploy(); d.get();' % self.params['name']) + + if self.params['with_dependencies']: + install_script = ( + 'Jenkins.instance.updateCenter.getPlugin("%s")' + '.getNeededDependencies().each{it.deploy()}; %s' % ( + self.params['name'], install_script)) + + script_data = { + 'script': install_script + } + data = urlencode(script_data) + + # Send the installation request + r = self._get_url_data( + "%s/scriptText" % self.url, + msg_status="Cannot install plugin.", + msg_exception="Plugin installation has failed.", + data=data, + dont_fail=True) + + hpi_file = '%s/plugins/%s.hpi' % ( + self.params['jenkins_home'], + self.params['name']) + + if os.path.isfile(hpi_file): + os.remove(hpi_file) + def install(self): changed = False plugin_file = ( @@ -402,39 +499,13 @@ class JenkinsPlugin(object): self.params['name'])) if not self.is_installed and self.params['version'] in [None, 'latest']: - if not self.module.check_mode: - # Install the plugin (with dependencies) - install_script = ( - 'd = Jenkins.instance.updateCenter.getPlugin("%s")' - '.deploy(); d.get();' % self.params['name']) + try: + self._install_with_plugin_manager() + changed = True + except FailedInstallingWithPluginManager: # Fallback to manually downloading the plugin + pass - if self.params['with_dependencies']: - install_script = ( - 'Jenkins.instance.updateCenter.getPlugin("%s")' - '.getNeededDependencies().each{it.deploy()}; %s' % ( - self.params['name'], install_script)) - - script_data = { - 'script': install_script - } - data = urlencode(script_data) - - # Send the installation request - r = self._get_url_data( - "%s/scriptText" % self.url, - msg_status="Cannot install plugin.", - msg_exception="Plugin installation has failed.", - data=data) - - hpi_file = '%s/plugins/%s.hpi' % ( - self.params['jenkins_home'], - self.params['name']) - - if os.path.isfile(hpi_file): - os.remove(hpi_file) - - changed = True - else: + if not changed: # Check if the plugin directory exists if not os.path.isdir(self.params['jenkins_home']): self.module.fail_json( @@ -449,26 +520,17 @@ class JenkinsPlugin(object): if self.params['version'] in [None, 'latest']: # Take latest version - plugin_url = ( - "%s/latest/%s.hpi" % ( - self.params['updates_url'], - self.params['name'])) + plugin_urls = self._get_latest_plugin_urls() else: # Take specific version - plugin_url = ( - "{0}/download/plugins/" - "{1}/{2}/{1}.hpi".format( - self.params['updates_url'], - self.params['name'], - self.params['version'])) - + plugin_urls = self._get_versioned_plugin_urls() if ( self.params['updates_expiration'] == 0 or self.params['version'] not in [None, 'latest'] or checksum_old is None): # Download the plugin file directly - r = self._download_plugin(plugin_url) + r = self._download_plugin(plugin_urls) # Write downloaded plugin into file if checksums don't match if checksum_old is None: @@ -498,7 +560,7 @@ class JenkinsPlugin(object): # If the latest version changed, download it if checksum_old != to_bytes(plugin_data['sha1']): if not self.module.check_mode: - r = self._download_plugin(plugin_url) + r = self._download_plugin(plugin_urls) self._write_file(plugin_file, r) changed = True @@ -521,6 +583,27 @@ class JenkinsPlugin(object): return changed + def _get_latest_plugin_urls(self): + urls = [] + for base_url in self.params['updates_url']: + for update_segment in self.params['latest_plugins_url_segments']: + urls.append("{0}/{1}/{2}.hpi".format(base_url, update_segment, self.params['name'])) + return urls + + def _get_versioned_plugin_urls(self): + urls = [] + for base_url in self.params['updates_url']: + for versioned_segment in self.params['versioned_plugins_url_segments']: + urls.append("{0}/{1}/{2}/{3}/{2}.hpi".format(base_url, versioned_segment, self.params['name'], self.params['version'])) + return urls + + def _get_update_center_urls(self): + urls = [] + for base_url in self.params['updates_url']: + for update_json in self.params['update_json_url_segment']: + urls.append("{0}/{1}".format(base_url, update_json)) + return urls + def _download_updates(self): updates_filename = 'jenkins-plugin-cache.json' updates_dir = os.path.expanduser('~/.ansible/tmp') @@ -540,11 +623,11 @@ class JenkinsPlugin(object): # Download the updates file if needed if download_updates: - url = "%s/update-center.json" % self.params['updates_url'] + urls = self._get_update_center_urls() # Get the data - r = self._get_url_data( - url, + r = self._get_urls_data( + urls, msg_status="Remote updates not found.", msg_exception="Updates download failed.") @@ -602,15 +685,14 @@ class JenkinsPlugin(object): return data['plugins'][self.params['name']] - def _download_plugin(self, plugin_url): + def _download_plugin(self, plugin_urls): # Download the plugin - r = self._get_url_data( - plugin_url, + + return self._get_urls_data( + plugin_urls, msg_status="Plugin not found.", msg_exception="Plugin download failed.") - return r - def _write_file(self, f, data): # Store the plugin into a temp file and then move it tmp_f_fd, tmp_f = tempfile.mkstemp() @@ -721,7 +803,12 @@ def main(): default='present'), timeout=dict(default=30, type="int"), updates_expiration=dict(default=86400, type="int"), - updates_url=dict(default='https://updates.jenkins.io'), + updates_url=dict(type="list", elements="str", default=['https://updates.jenkins.io', + 'http://mirrors.jenkins.io']), + update_json_url_segment=dict(type="list", elements="str", default=['update-center.json', + 'updates/update-center.json']), + latest_plugins_url_segments=dict(type="list", elements="str", default=['latest']), + versioned_plugins_url_segments=dict(type="list", elements="str", default=['download/plugins', 'plugins']), url=dict(default='http://localhost:8080'), url_password=dict(no_log=True), version=dict(), diff --git a/tests/unit/plugins/modules/web_infrastructure/test_jenkins_plugin.py b/tests/unit/plugins/modules/web_infrastructure/test_jenkins_plugin.py index ccfeb24536..b928ad824c 100644 --- a/tests/unit/plugins/modules/web_infrastructure/test_jenkins_plugin.py +++ b/tests/unit/plugins/modules/web_infrastructure/test_jenkins_plugin.py @@ -151,3 +151,40 @@ def test__get_json_data(mocker): 'CSRF') assert isinstance(json_data, Mapping) + + +def test__new_fallback_urls(mocker): + "test generation of new fallback URLs" + + params = { + "url": "http://fake.jenkins.server", + "timeout": 30, + "name": "test-plugin", + "version": "1.2.3", + "updates_url": ["https://some.base.url"], + "latest_plugins_url_segments": ["test_latest"], + "versioned_plugins_url_segments": ["ansible", "versioned_plugins"], + "update_json_url_segment": ["unreachable", "updates/update-center.json"], + } + module = mocker.Mock() + module.params = params + + JenkinsPlugin._csrf_enabled = pass_function + JenkinsPlugin._get_installed_plugins = pass_function + + jenkins_plugin = JenkinsPlugin(module) + + latest_urls = jenkins_plugin._get_latest_plugin_urls() + assert isInList(latest_urls, "https://some.base.url/test_latest/test-plugin.hpi") + versioned_urls = jenkins_plugin._get_versioned_plugin_urls() + assert isInList(versioned_urls, "https://some.base.url/versioned_plugins/test-plugin/1.2.3/test-plugin.hpi") + json_urls = jenkins_plugin._get_update_center_urls() + assert isInList(json_urls, "https://some.base.url/updates/update-center.json") + + +def isInList(l, i): + print("checking if %s in %s" % (i, l)) + for item in l: + if item == i: + return True + return False From 677e88b2574b3f859aeb0dd3fbecaa9aa0d04638 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 29 Jun 2021 12:38:45 +0200 Subject: [PATCH 0173/2828] The next release will be 3.4.0. --- galaxy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/galaxy.yml b/galaxy.yml index c559415eb2..640f4151d3 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,6 +1,6 @@ namespace: community name: general -version: 3.3.0 +version: 3.4.0 readme: README.md authors: - Ansible (https://github.com/ansible) From 0e829e6a23548ee948bfccc456498554832a56af Mon Sep 17 00:00:00 2001 From: Gaetan2907 <48204380+Gaetan2907@users.noreply.github.com> Date: Wed, 30 Jun 2021 15:01:17 +0200 Subject: [PATCH 0174/2828] Fix bug when 2 identical executions in same auth flow (#2904) * Fix bug when 2 identical executions in same auth flow * Add changelog fragment * Fix unit tests * Update changelogs/fragments/2904-fix-bug-when-2-identical-executions-in-same-auth-flow.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- ...when-2-identical-executions-in-same-auth-flow.yml | 3 +++ .../identity/keycloak/keycloak_authentication.py | 12 ++++++------ .../keycloak/test_keycloak_authentication.py | 6 +++--- 3 files changed, 12 insertions(+), 9 deletions(-) create mode 100644 changelogs/fragments/2904-fix-bug-when-2-identical-executions-in-same-auth-flow.yml diff --git a/changelogs/fragments/2904-fix-bug-when-2-identical-executions-in-same-auth-flow.yml b/changelogs/fragments/2904-fix-bug-when-2-identical-executions-in-same-auth-flow.yml new file mode 100644 index 0000000000..21fde3eb58 --- /dev/null +++ b/changelogs/fragments/2904-fix-bug-when-2-identical-executions-in-same-auth-flow.yml @@ -0,0 +1,3 @@ +bugfixes: + - keycloak_authentication - fix bug when two identical executions are in the same authentication flow + (https://github.com/ansible-collections/community.general/pull/2904). diff --git a/plugins/modules/identity/keycloak/keycloak_authentication.py b/plugins/modules/identity/keycloak/keycloak_authentication.py index 98b6378dac..9fd04eb70b 100644 --- a/plugins/modules/identity/keycloak/keycloak_authentication.py +++ b/plugins/modules/identity/keycloak/keycloak_authentication.py @@ -200,11 +200,11 @@ def create_or_update_executions(kc, config, realm='master'): try: changed = False if "authenticationExecutions" in config: + # Get existing executions on the Keycloak server for this alias + existing_executions = kc.get_executions_representation(config, realm=realm) for new_exec_index, new_exec in enumerate(config["authenticationExecutions"], start=0): if new_exec["index"] is not None: new_exec_index = new_exec["index"] - # Get existing executions on the Keycloak server for this alias - existing_executions = kc.get_executions_representation(config, realm=realm) exec_found = False # Get flowalias parent if given if new_exec["flowAlias"] is not None: @@ -222,6 +222,9 @@ def create_or_update_executions(kc, config, realm='master'): # Compare the executions to see if it need changes if not is_struct_included(new_exec, existing_executions[exec_index], exclude_key) or exec_index != new_exec_index: changed = True + id_to_update = existing_executions[exec_index]["id"] + # Remove exec from list in case 2 exec with same name + existing_executions[exec_index].clear() elif new_exec["providerId"] is not None: kc.create_execution(new_exec, flowAlias=flow_alias_parent, realm=realm) changed = True @@ -229,13 +232,10 @@ def create_or_update_executions(kc, config, realm='master'): kc.create_subflow(new_exec["displayName"], flow_alias_parent, realm=realm) changed = True if changed: - # Get existing executions on the Keycloak server for this alias - existing_executions = kc.get_executions_representation(config, realm=realm) - exec_index = find_exec_in_executions(new_exec, existing_executions) if exec_index != -1: # Update the existing execution updated_exec = { - "id": existing_executions[exec_index]["id"] + "id": id_to_update } # add the execution configuration if new_exec["authenticationConfig"] is not None: diff --git a/tests/unit/plugins/modules/identity/keycloak/test_keycloak_authentication.py b/tests/unit/plugins/modules/identity/keycloak/test_keycloak_authentication.py index 91e34eea7b..db0168aa83 100644 --- a/tests/unit/plugins/modules/identity/keycloak/test_keycloak_authentication.py +++ b/tests/unit/plugins/modules/identity/keycloak/test_keycloak_authentication.py @@ -343,7 +343,7 @@ class TestKeycloakAuthentication(ModuleTestCase): self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1) self.assertEqual(len(mock_copy_auth_flow.mock_calls), 0) self.assertEqual(len(mock_create_empty_auth_flow.mock_calls), 1) - self.assertEqual(len(mock_get_executions_representation.mock_calls), 3) + self.assertEqual(len(mock_get_executions_representation.mock_calls), 2) self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 0) # Verify that the module's changed status matches what is expected @@ -434,7 +434,7 @@ class TestKeycloakAuthentication(ModuleTestCase): self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1) self.assertEqual(len(mock_copy_auth_flow.mock_calls), 0) self.assertEqual(len(mock_create_empty_auth_flow.mock_calls), 0) - self.assertEqual(len(mock_get_executions_representation.mock_calls), 3) + self.assertEqual(len(mock_get_executions_representation.mock_calls), 2) self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 0) # Verify that the module's changed status matches what is expected @@ -611,7 +611,7 @@ class TestKeycloakAuthentication(ModuleTestCase): self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1) self.assertEqual(len(mock_copy_auth_flow.mock_calls), 0) self.assertEqual(len(mock_create_empty_auth_flow.mock_calls), 1) - self.assertEqual(len(mock_get_executions_representation.mock_calls), 3) + self.assertEqual(len(mock_get_executions_representation.mock_calls), 2) self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 1) # Verify that the module's changed status matches what is expected From a97d82be88407384f977f50270bf314b1f76d22f Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Wed, 30 Jun 2021 17:36:56 +0430 Subject: [PATCH 0175/2828] Add integration tests for snap (#2907) * Add integration tests for snap * Also test on fedora and remove snapd if it was not installed * disable test for now --- tests/integration/targets/snap/aliases | 6 ++ tests/integration/targets/snap/tasks/main.yml | 72 +++++++++++++++++++ 2 files changed, 78 insertions(+) create mode 100644 tests/integration/targets/snap/aliases create mode 100644 tests/integration/targets/snap/tasks/main.yml diff --git a/tests/integration/targets/snap/aliases b/tests/integration/targets/snap/aliases new file mode 100644 index 0000000000..d7f5ce60c5 --- /dev/null +++ b/tests/integration/targets/snap/aliases @@ -0,0 +1,6 @@ +shippable/posix/group1 +skip/aix +skip/freebsd +skip/osx +skip/macos +disabled #FIXME 2609 diff --git a/tests/integration/targets/snap/tasks/main.yml b/tests/integration/targets/snap/tasks/main.yml new file mode 100644 index 0000000000..e015122ff2 --- /dev/null +++ b/tests/integration/targets/snap/tasks/main.yml @@ -0,0 +1,72 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- name: install snapd + apt: + name: snapd + state: present + register: snapd_install_ubuntu + when: ansible_distribution == 'Ubuntu' + +- name: install snapd + dnf: + name: snapd + state: present + register: snapd_install_fedora + when: ansible_distribution == 'Fedora' + +- block: + - name: install package + community.general.snap: + name: hello-world + state: present + register: install + + - name: install package again + community.general.snap: + name: hello-world + state: present + register: install_again + + - name: Assert package has been installed just once + assert: + that: + - install is changed + - install_again is not changed + + - name: check package has been installed correctly + command: hello-world + + - name: remove package + community.general.snap: + name: hello-world + state: absent + register: remove + + - name: remove package again + community.general.snap: + name: hello-world + state: absent + register: remove_again + + - name: Assert package has been removed just once + assert: + that: + - remove is changed + - remove_again is not changed + when: ansible_distribution in ['Ubuntu','Fedora'] + +- name: Remove snapd in case it was not installed + apt: + name: snapd + state: absent + when: snapd_install_ubuntu is changed and ansible_distribution == 'Ubuntu' + +- name: Remove snapd in case it was not installed + dnf: + name: snapd + state: absent + when: snapd_install_fedora is changed and ansible_distribution == 'Fedora' From c63dc624b77a45585b323c67096d2980e687b9c4 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 1 Jul 2021 18:53:48 +0200 Subject: [PATCH 0176/2828] Fix snap module, and module helper behavior on rc != 0 in output (#2912) * Try some snap fixes. * Fix logic. * Try to run tests privileged. * Prevent failure on rc != 0. * Fix formatting. * Revert "Try to run tests privileged." This reverts commit 77ca91f5020233304a8ef6d9f504663de8e3136c. * Try to run tests on RHEL instead. * Make sure that snapd is running. * Add changelog fragment. * str -> to_native. * Make sure that installed binary is actually found. * Add check mode tests. * Mention #2835 in changelog fragment. --- .../fragments/2912-snap-module-helper.yml | 3 + plugins/module_utils/mh/base.py | 5 +- plugins/modules/packaging/os/snap.py | 26 +++--- tests/integration/targets/snap/aliases | 2 +- .../targets/snap/defaults/main.yml | 4 + .../targets/snap/handlers/main.yml | 5 ++ tests/integration/targets/snap/meta/main.yml | 3 + .../integration/targets/snap/tasks/Debian.yml | 1 + .../integration/targets/snap/tasks/Fedora.yml | 1 + .../integration/targets/snap/tasks/RedHat.yml | 1 + .../targets/snap/tasks/default.yml | 15 ++++ tests/integration/targets/snap/tasks/main.yml | 88 ++++++++++++------- .../targets/snap/tasks/nothing.yml | 2 + 13 files changed, 111 insertions(+), 45 deletions(-) create mode 100644 changelogs/fragments/2912-snap-module-helper.yml create mode 100644 tests/integration/targets/snap/defaults/main.yml create mode 100644 tests/integration/targets/snap/handlers/main.yml create mode 100644 tests/integration/targets/snap/meta/main.yml create mode 120000 tests/integration/targets/snap/tasks/Debian.yml create mode 120000 tests/integration/targets/snap/tasks/Fedora.yml create mode 120000 tests/integration/targets/snap/tasks/RedHat.yml create mode 100644 tests/integration/targets/snap/tasks/default.yml create mode 100644 tests/integration/targets/snap/tasks/nothing.yml diff --git a/changelogs/fragments/2912-snap-module-helper.yml b/changelogs/fragments/2912-snap-module-helper.yml new file mode 100644 index 0000000000..cb9935a5e4 --- /dev/null +++ b/changelogs/fragments/2912-snap-module-helper.yml @@ -0,0 +1,3 @@ +bugfixes: + - module_helper module utils - avoid failing when non-zero ``rc`` is present on regular exit (https://github.com/ansible-collections/community.general/pull/2912). + - snap - fix various bugs which prevented the module from working at all, and which resulted in ``state=absent`` fail on absent snaps (https://github.com/ansible-collections/community.general/issues/2835, https://github.com/ansible-collections/community.general/issues/2906, https://github.com/ansible-collections/community.general/pull/2912). diff --git a/plugins/module_utils/mh/base.py b/plugins/module_utils/mh/base.py index e0de7f2fdd..a120c2556e 100644 --- a/plugins/module_utils/mh/base.py +++ b/plugins/module_utils/mh/base.py @@ -59,4 +59,7 @@ class ModuleHelperBase(object): self.__init_module__() self.__run__() self.__quit_module__() - self.module.exit_json(changed=self.has_changed(), **self.output) + output = self.output + if 'failed' not in output: + output['failed'] = False + self.module.exit_json(changed=self.has_changed(), **output) diff --git a/plugins/modules/packaging/os/snap.py b/plugins/modules/packaging/os/snap.py index fab2558ccf..8051b90445 100644 --- a/plugins/modules/packaging/os/snap.py +++ b/plugins/modules/packaging/os/snap.py @@ -107,6 +107,8 @@ snaps_removed: import re +from ansible.module_utils.common.text.converters import to_native + from ansible_collections.community.general.plugins.module_utils.module_helper import ( CmdStateModuleHelper, ArgFormat, ModuleHelperException ) @@ -123,7 +125,7 @@ __state_map = dict( def _state_map(value): - return __state_map[value] + return [__state_map[value]] class Snap(CmdStateModuleHelper): @@ -163,20 +165,20 @@ class Snap(CmdStateModuleHelper): results[i].append(output[i]) return [ - '; '.join(results[0]), + '; '.join([to_native(x) for x in results[0]]), self._first_non_zero(results[1]), '\n'.join(results[2]), '\n'.join(results[3]), ] def snap_exists(self, snap_name): - return 0 == self.run_command(params=[{'state': 'info'}, {'name': [snap_name]}])[0] + return 0 == self.run_command(params=[{'state': 'info'}, {'name': snap_name}])[0] def is_snap_installed(self, snap_name): - return 0 == self.run_command(params=[{'state': 'list'}, {'name': [snap_name]}])[0] + return 0 == self.run_command(params=[{'state': 'list'}, {'name': snap_name}])[0] def is_snap_enabled(self, snap_name): - rc, out, err = self.run_command(params=[{'state': 'list'}, {'name': [snap_name]}]) + rc, out, err = self.run_command(params=[{'state': 'list'}, {'name': snap_name}]) if rc != 0: return None result = out.splitlines()[1] @@ -196,7 +198,7 @@ class Snap(CmdStateModuleHelper): self.validate_input_snaps() # if snap doesnt exist, it will explode when trying to install self.vars.meta('classic').set(output=True) self.vars.meta('channel').set(output=True) - actionable_snaps = [s for s in self.vars.name if self.is_snap_installed(s)] + actionable_snaps = [s for s in self.vars.name if not self.is_snap_installed(s)] if not actionable_snaps: return self.changed = True @@ -207,9 +209,9 @@ class Snap(CmdStateModuleHelper): has_one_pkg_params = bool(self.vars.classic) or self.vars.channel != 'stable' has_multiple_snaps = len(actionable_snaps) > 1 if has_one_pkg_params and has_multiple_snaps: - commands = [params + [s] for s in actionable_snaps] + commands = [params + [{'actionable_snaps': [s]}] for s in actionable_snaps] else: - commands = [params + actionable_snaps] + commands = [params + [{'actionable_snaps': actionable_snaps}]] self.vars.cmd, rc, out, err = self._run_multiple_commands(commands) if rc == 0: return @@ -227,7 +229,7 @@ class Snap(CmdStateModuleHelper): def state_absent(self): self.validate_input_snaps() # if snap doesnt exist, it will be absent by definition - actionable_snaps = [s for s in self.vars.name if not self.is_snap_installed(s)] + actionable_snaps = [s for s in self.vars.name if self.is_snap_installed(s)] if not actionable_snaps: return self.changed = True @@ -235,7 +237,7 @@ class Snap(CmdStateModuleHelper): if self.module.check_mode: return params = ['classic', 'channel', 'state'] # get base cmd parts - commands = [params + actionable_snaps] + commands = [params + [{'actionable_snaps': actionable_snaps}]] self.vars.cmd, rc, out, err = self._run_multiple_commands(commands) if rc == 0: return @@ -253,7 +255,7 @@ class Snap(CmdStateModuleHelper): if self.module.check_mode: return params = ['classic', 'channel', 'state'] # get base cmd parts - commands = [params + actionable_snaps] + commands = [params + [{'actionable_snaps': actionable_snaps}]] self.vars.cmd, rc, out, err = self._run_multiple_commands(commands) if rc == 0: return @@ -271,7 +273,7 @@ class Snap(CmdStateModuleHelper): if self.module.check_mode: return params = ['classic', 'channel', 'state'] # get base cmd parts - commands = [params + actionable_snaps] + commands = [params + [{'actionable_snaps': actionable_snaps}]] self.vars.cmd, rc, out, err = self._run_multiple_commands(commands) if rc == 0: return diff --git a/tests/integration/targets/snap/aliases b/tests/integration/targets/snap/aliases index d7f5ce60c5..ee303bf346 100644 --- a/tests/integration/targets/snap/aliases +++ b/tests/integration/targets/snap/aliases @@ -3,4 +3,4 @@ skip/aix skip/freebsd skip/osx skip/macos -disabled #FIXME 2609 +skip/docker diff --git a/tests/integration/targets/snap/defaults/main.yml b/tests/integration/targets/snap/defaults/main.yml new file mode 100644 index 0000000000..2290001f7e --- /dev/null +++ b/tests/integration/targets/snap/defaults/main.yml @@ -0,0 +1,4 @@ +has_snap: false + +snap_packages: + - snapd diff --git a/tests/integration/targets/snap/handlers/main.yml b/tests/integration/targets/snap/handlers/main.yml new file mode 100644 index 0000000000..a80cc98e49 --- /dev/null +++ b/tests/integration/targets/snap/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: Remove snapd + package: + name: "{{ snap_packages }}" + state: absent diff --git a/tests/integration/targets/snap/meta/main.yml b/tests/integration/targets/snap/meta/main.yml new file mode 100644 index 0000000000..0e51c36ebd --- /dev/null +++ b/tests/integration/targets/snap/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - setup_pkg_mgr + - setup_epel diff --git a/tests/integration/targets/snap/tasks/Debian.yml b/tests/integration/targets/snap/tasks/Debian.yml new file mode 120000 index 0000000000..0abaec1677 --- /dev/null +++ b/tests/integration/targets/snap/tasks/Debian.yml @@ -0,0 +1 @@ +default.yml \ No newline at end of file diff --git a/tests/integration/targets/snap/tasks/Fedora.yml b/tests/integration/targets/snap/tasks/Fedora.yml new file mode 120000 index 0000000000..0abaec1677 --- /dev/null +++ b/tests/integration/targets/snap/tasks/Fedora.yml @@ -0,0 +1 @@ +default.yml \ No newline at end of file diff --git a/tests/integration/targets/snap/tasks/RedHat.yml b/tests/integration/targets/snap/tasks/RedHat.yml new file mode 120000 index 0000000000..0abaec1677 --- /dev/null +++ b/tests/integration/targets/snap/tasks/RedHat.yml @@ -0,0 +1 @@ +default.yml \ No newline at end of file diff --git a/tests/integration/targets/snap/tasks/default.yml b/tests/integration/targets/snap/tasks/default.yml new file mode 100644 index 0000000000..4cc38f7bf2 --- /dev/null +++ b/tests/integration/targets/snap/tasks/default.yml @@ -0,0 +1,15 @@ +--- +- name: Install snapd + package: + name: "{{ snap_packages }}" + state: present + notify: Remove snapd + +- name: Make sure that snapd is running + service: + name: snapd + state: started + +- name: Inform that snap is installed + set_fact: + has_snap: true diff --git a/tests/integration/targets/snap/tasks/main.yml b/tests/integration/targets/snap/tasks/main.yml index e015122ff2..73604d3895 100644 --- a/tests/integration/targets/snap/tasks/main.yml +++ b/tests/integration/targets/snap/tasks/main.yml @@ -4,28 +4,46 @@ # and should not be used as examples of how to write Ansible roles # #################################################################### -- name: install snapd - apt: - name: snapd - state: present - register: snapd_install_ubuntu - when: ansible_distribution == 'Ubuntu' - -- name: install snapd - dnf: - name: snapd - state: present - register: snapd_install_fedora - when: ansible_distribution == 'Fedora' +- name: Include distribution specific tasks + include_tasks: "{{ lookup('first_found', params) }}" + vars: + params: + files: + - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml" + - "{{ ansible_facts.os_family }}-{{ ansible_facts.distribution_major_version }}.yml" + - "{{ ansible_facts.distribution }}.yml" + - "{{ ansible_facts.os_family }}.yml" + - "nothing.yml" + paths: + - "{{ role_path }}/tasks" - block: - - name: install package + - name: Make sure package is not installed + community.general.snap: + name: hello-world + state: absent + + - name: Install package (check mode) + community.general.snap: + name: hello-world + state: present + register: install_check + check_mode: true + + - name: Install package community.general.snap: name: hello-world state: present register: install - - name: install package again + - name: Install package again (check mode) + community.general.snap: + name: hello-world + state: present + register: install_again_check + check_mode: true + + - name: Install package again community.general.snap: name: hello-world state: present @@ -35,18 +53,36 @@ assert: that: - install is changed + - install_check is changed - install_again is not changed + - install_again_check is not changed - - name: check package has been installed correctly + - name: Check package has been installed correctly command: hello-world + environment: + PATH: /var/lib/snapd/snap/bin/ - - name: remove package + - name: Remove package (check mode) + community.general.snap: + name: hello-world + state: absent + register: remove_check + check_mode: true + + - name: Remove package community.general.snap: name: hello-world state: absent register: remove - - name: remove package again + - name: Remove package again (check mode) + community.general.snap: + name: hello-world + state: absent + register: remove_again_check + check_mode: true + + - name: Remove package again community.general.snap: name: hello-world state: absent @@ -56,17 +92,7 @@ assert: that: - remove is changed + - remove_check is changed - remove_again is not changed - when: ansible_distribution in ['Ubuntu','Fedora'] - -- name: Remove snapd in case it was not installed - apt: - name: snapd - state: absent - when: snapd_install_ubuntu is changed and ansible_distribution == 'Ubuntu' - -- name: Remove snapd in case it was not installed - dnf: - name: snapd - state: absent - when: snapd_install_fedora is changed and ansible_distribution == 'Fedora' + - remove_again_check is not changed + when: has_snap diff --git a/tests/integration/targets/snap/tasks/nothing.yml b/tests/integration/targets/snap/tasks/nothing.yml new file mode 100644 index 0000000000..11642d1fcd --- /dev/null +++ b/tests/integration/targets/snap/tasks/nothing.yml @@ -0,0 +1,2 @@ +--- +# Do nothing From 00aa1250eec0582953e8a39faab8228b4ffd6cab Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Fri, 2 Jul 2021 15:22:00 +0430 Subject: [PATCH 0177/2828] Add integration test for classic snap (#2920) * Add integration test for classic snap * Add comments and check remove without classic * Comment new tests for now --- tests/integration/targets/snap/tasks/main.yml | 47 +++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/tests/integration/targets/snap/tasks/main.yml b/tests/integration/targets/snap/tasks/main.yml index 73604d3895..6e877cd0de 100644 --- a/tests/integration/targets/snap/tasks/main.yml +++ b/tests/integration/targets/snap/tasks/main.yml @@ -95,4 +95,51 @@ - remove_check is changed - remove_again is not changed - remove_again_check is not changed + +# - name: Make sure package from classic snap is not installed +# community.general.snap: +# name: nvim +# state: absent +# +# - name: Install package from classic snap +# community.general.snap: +# name: nvim +# state: present +# classic: true +# register: classic_install +# +# # testing classic idempotency +# - name: Install package from classic snap again +# community.general.snap: +# name: nvim +# state: present +# classic: true +# register: classic_install_again +# +# - name: Assert package has been installed just once +# assert: +# that: +# - classic_install is changed +# - classic_install_again is not changed +# +# # this is just testing if a package which has been installed +# # with true classic can be removed without setting classic to true +# - name: Remove package from classic snap without setting classic to true +# community.general.snap: +# name: nvim +# state: absent +# register: classic_remove_without_true_classic +# +# - name: Remove package from classic snap with setting classic to true +# community.general.snap: +# name: nvim +# state: absent +# classic: true +# register: classic_remove_with_true_classic +# +# - name: Assert package has been removed without setting classic to true +# assert: +# that: +# - classic_remove_without_ture_classic is changed +# - classic_remove_with_ture_classic is not changed when: has_snap From ffe505a798a774473d7f5a200a8f1153c853fa3e Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Fri, 2 Jul 2021 15:30:40 -0400 Subject: [PATCH 0178/2828] archive - fix removal failures for nested files with tar archives (#2923) * Initial commit * Adding changelog fragment --- .../fragments/2923-archive-remove-bugfix.yml | 4 +++ plugins/modules/files/archive.py | 15 ++++---- .../targets/archive/tasks/remove.yml | 34 ++++++++++++++++++- 3 files changed, 45 insertions(+), 8 deletions(-) create mode 100644 changelogs/fragments/2923-archive-remove-bugfix.yml diff --git a/changelogs/fragments/2923-archive-remove-bugfix.yml b/changelogs/fragments/2923-archive-remove-bugfix.yml new file mode 100644 index 0000000000..4bef5ef459 --- /dev/null +++ b/changelogs/fragments/2923-archive-remove-bugfix.yml @@ -0,0 +1,4 @@ +--- +bugfixes: + - archive - fixed task failure when using the ``remove`` option with a ``path`` containing nested files for + ``format``s other than ``zip`` (https://github.com/ansible-collections/community.general/issues/2919). diff --git a/plugins/modules/files/archive.py b/plugins/modules/files/archive.py index 5cdd6630d1..a2d3376613 100644 --- a/plugins/modules/files/archive.py +++ b/plugins/modules/files/archive.py @@ -399,13 +399,14 @@ class Archive(object): def remove_targets(self): for path in self.successes: - try: - if os.path.isdir(path): - shutil.rmtree(path) - else: - os.remove(path) - except OSError: - self.errors.append(_to_native(path)) + if os.path.exists(path): + try: + if os.path.isdir(path): + shutil.rmtree(path) + else: + os.remove(path) + except OSError: + self.errors.append(_to_native(path)) for path in self.paths: try: if os.path.isdir(path): diff --git a/tests/integration/targets/archive/tasks/remove.yml b/tests/integration/targets/archive/tasks/remove.yml index 9600eb9f6d..9f085e901a 100644 --- a/tests/integration/targets/archive/tasks/remove.yml +++ b/tests/integration/targets/archive/tasks/remove.yml @@ -148,7 +148,39 @@ - name: verify that excluded sub file is still present file: path={{ output_dir }}/tmpdir/sub/subfile.txt state=file -- name: remove temporary directory +- name: prep our files in tmpdir again + copy: src={{ item }} dest={{ output_dir }}/tmpdir/{{ item }} + with_items: + - foo.txt + - bar.txt + - empty.txt + - sub + - sub/subfile.txt + +- name: archive using gz and remove src directory + archive: + path: + - "{{ output_dir }}/tmpdir/" + dest: "{{ output_dir }}/archive_remove_05.gz" + format: gz + remove: yes + exclude_path: "{{ output_dir }}/tmpdir/sub/subfile.txt" + register: archive_remove_result_05 + +- name: verify that the files archived + file: path={{ output_dir }}/archive_remove_05.gz state=file + +- name: Verify source files were removed file: path: "{{ output_dir }}/tmpdir" state: absent + register: archive_source_file_removal_05 + +- name: Verify that task status is success + assert: + that: + - archive_remove_result_05 is success + - archive_source_file_removal_05 is not changed + +- name: remove our gz + file: path="{{ output_dir }}/archive_remove_05.gz" state=absent From a0915036f9e98976061f4b65dfa5dd361e18ff0e Mon Sep 17 00:00:00 2001 From: Shahar Mor Date: Fri, 2 Jul 2021 22:42:50 +0300 Subject: [PATCH 0179/2828] npm - fix installing from package.json (#2924) correctly handle cases where a dependency does not have a `version` property because it is either missing or invalid --- .../fragments/2924-npm-fix-package-json.yml | 3 +++ plugins/modules/packaging/language/npm.py | 5 +++-- .../modules/packaging/language/test_npm.py | 19 +++++++++++++++++++ 3 files changed, 25 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/2924-npm-fix-package-json.yml diff --git a/changelogs/fragments/2924-npm-fix-package-json.yml b/changelogs/fragments/2924-npm-fix-package-json.yml new file mode 100644 index 0000000000..ce4a416cf7 --- /dev/null +++ b/changelogs/fragments/2924-npm-fix-package-json.yml @@ -0,0 +1,3 @@ +bugfixes: + - npm - correctly handle cases where a dependency does not have a ``version`` property because it is either missing or invalid + (https://github.com/ansible-collections/community.general/issues/2917). diff --git a/plugins/modules/packaging/language/npm.py b/plugins/modules/packaging/language/npm.py index 283b8e0be7..de316d397f 100644 --- a/plugins/modules/packaging/language/npm.py +++ b/plugins/modules/packaging/language/npm.py @@ -216,7 +216,6 @@ class Npm(object): self.module.fail_json(msg="Failed to parse NPM output with error %s" % to_native(e)) if 'dependencies' in data: for dep, props in data['dependencies'].items(): - dep_version = dep + '@' + str(props['version']) if 'missing' in props and props['missing']: missing.append(dep) @@ -224,7 +223,9 @@ class Npm(object): missing.append(dep) else: installed.append(dep) - installed.append(dep_version) + if 'version' in props and props['version']: + dep_version = dep + '@' + str(props['version']) + installed.append(dep_version) if self.name_version and self.name_version not in installed: missing.append(self.name) # Named dependency not installed diff --git a/tests/unit/plugins/modules/packaging/language/test_npm.py b/tests/unit/plugins/modules/packaging/language/test_npm.py index abdacc6aef..89de549915 100644 --- a/tests/unit/plugins/modules/packaging/language/test_npm.py +++ b/tests/unit/plugins/modules/packaging/language/test_npm.py @@ -52,6 +52,25 @@ class NPMModuleTestCase(ModuleTestCase): call(['/testbin/npm', 'install', '--global', 'coffee-script'], check_rc=True, cwd=None), ]) + def test_present_missing(self): + set_module_args({ + 'name': 'coffee-script', + 'global': 'true', + 'state': 'present', + }) + self.module_main_command.side_effect = [ + (0, '{"dependencies": {"coffee-script": {"missing" : true}}}', ''), + (0, '{}', ''), + ] + + result = self.module_main(AnsibleExitJson) + + self.assertTrue(result['changed']) + self.module_main_command.assert_has_calls([ + call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None), + call(['/testbin/npm', 'install', '--global', 'coffee-script'], check_rc=True, cwd=None), + ]) + def test_present_version(self): set_module_args({ 'name': 'coffee-script', From 9b02230477391cd4bda861bb3e6a5d2640407363 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 4 Jul 2021 02:31:30 +1200 Subject: [PATCH 0180/2828] snap - fixed param order (#2918) * fixed param order * added changelog fragment * rebased and uncommented tests per PR * added /snap link in RH * typo in tests * Update tests/integration/targets/snap/tasks/default.yml Co-authored-by: Felix Fontein --- .../fragments/2918-snap-param-order.yml | 2 + plugins/modules/packaging/os/snap.py | 8 +- .../targets/snap/tasks/default.yml | 6 ++ tests/integration/targets/snap/tasks/main.yml | 92 +++++++++---------- 4 files changed, 58 insertions(+), 50 deletions(-) create mode 100644 changelogs/fragments/2918-snap-param-order.yml diff --git a/changelogs/fragments/2918-snap-param-order.yml b/changelogs/fragments/2918-snap-param-order.yml new file mode 100644 index 0000000000..85b907f8b6 --- /dev/null +++ b/changelogs/fragments/2918-snap-param-order.yml @@ -0,0 +1,2 @@ +bugfixes: + - snap - fixed the order of the ``--classic`` parameter in the command line invocation (https://github.com/ansible-collections/community.general/issues/2916). diff --git a/plugins/modules/packaging/os/snap.py b/plugins/modules/packaging/os/snap.py index 8051b90445..6da8b0e766 100644 --- a/plugins/modules/packaging/os/snap.py +++ b/plugins/modules/packaging/os/snap.py @@ -133,10 +133,10 @@ class Snap(CmdStateModuleHelper): module = dict( argument_spec={ 'name': dict(type='list', elements='str', required=True), - 'state': dict(type='str', required=False, default='present', + 'state': dict(type='str', default='present', choices=['absent', 'present', 'enabled', 'disabled']), - 'classic': dict(type='bool', required=False, default=False), - 'channel': dict(type='str', required=False, default='stable'), + 'classic': dict(type='bool', default=False), + 'channel': dict(type='str', default='stable'), }, supports_check_mode=True, ) @@ -205,7 +205,7 @@ class Snap(CmdStateModuleHelper): self.vars.snaps_installed = actionable_snaps if self.module.check_mode: return - params = ['classic', 'channel', 'state'] # get base cmd parts + params = ['state', 'classic', 'channel'] # get base cmd parts has_one_pkg_params = bool(self.vars.classic) or self.vars.channel != 'stable' has_multiple_snaps = len(actionable_snaps) > 1 if has_one_pkg_params and has_multiple_snaps: diff --git a/tests/integration/targets/snap/tasks/default.yml b/tests/integration/targets/snap/tasks/default.yml index 4cc38f7bf2..938addc33a 100644 --- a/tests/integration/targets/snap/tasks/default.yml +++ b/tests/integration/targets/snap/tasks/default.yml @@ -10,6 +10,12 @@ name: snapd state: started +- name: Create link /snap + file: + src: /var/lib/snapd/snap + dest: /snap + state: link + - name: Inform that snap is installed set_fact: has_snap: true diff --git a/tests/integration/targets/snap/tasks/main.yml b/tests/integration/targets/snap/tasks/main.yml index 6e877cd0de..0f8c9b4c26 100644 --- a/tests/integration/targets/snap/tasks/main.yml +++ b/tests/integration/targets/snap/tasks/main.yml @@ -96,50 +96,50 @@ - remove_again is not changed - remove_again_check is not changed -# - name: Make sure package from classic snap is not installed -# community.general.snap: -# name: nvim -# state: absent -# -# - name: Install package from classic snap -# community.general.snap: -# name: nvim -# state: present -# classic: true -# register: classic_install -# -# # testing classic idempotency -# - name: Install package from classic snap again -# community.general.snap: -# name: nvim -# state: present -# classic: true -# register: classic_install_again -# -# - name: Assert package has been installed just once -# assert: -# that: -# - classic_install is changed -# - classic_install_again is not changed -# -# # this is just testing if a package which has been installed -# # with true classic can be removed without setting classic to true -# - name: Remove package from classic snap without setting classic to true -# community.general.snap: -# name: nvim -# state: absent -# register: classic_remove_without_true_classic -# -# - name: Remove package from classic snap with setting classic to true -# community.general.snap: -# name: nvim -# state: absent -# classic: true -# register: classic_remove_with_true_classic -# -# - name: Assert package has been removed without setting classic to true -# assert: -# that: -# - classic_remove_without_ture_classic is changed -# - classic_remove_with_ture_classic is not changed + - name: Make sure package from classic snap is not installed + community.general.snap: + name: nvim + state: absent + + - name: Install package from classic snap + community.general.snap: + name: nvim + state: present + classic: true + register: classic_install + + # testing classic idempotency + - name: Install package from classic snap again + community.general.snap: + name: nvim + state: present + classic: true + register: classic_install_again + + - name: Assert package has been installed just once + assert: + that: + - classic_install is changed + - classic_install_again is not changed + + # this is just testing if a package which has been installed + # with true classic can be removed without setting classic to true + - name: Remove package from classic snap without setting classic to true + community.general.snap: + name: nvim + state: absent + register: classic_remove_without_true_classic + + - name: Remove package from classic snap with setting classic to true + community.general.snap: + name: nvim + state: absent + classic: true + register: classic_remove_with_true_classic + + - name: Assert package has been removed without setting classic to true + assert: + that: + - classic_remove_without_true_classic is changed + - classic_remove_with_true_classic is not changed when: has_snap From b2b487753264417b161f9299d7386d71a1f857cf Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Mon, 5 Jul 2021 23:16:19 +0430 Subject: [PATCH 0181/2828] lvol: honor check_mode on thinpool (#2935) * lvol: support check_mode on thinpool * add changelog * Add %s when needed * correct changelog sentence Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../fragments/2935-lvol-support_check_mode_thinpool.yml | 3 +++ plugins/modules/system/lvol.py | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/2935-lvol-support_check_mode_thinpool.yml diff --git a/changelogs/fragments/2935-lvol-support_check_mode_thinpool.yml b/changelogs/fragments/2935-lvol-support_check_mode_thinpool.yml new file mode 100644 index 0000000000..3efbe59860 --- /dev/null +++ b/changelogs/fragments/2935-lvol-support_check_mode_thinpool.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - lvol - honor ``check_mode`` on thinpool (https://github.com/ansible-collections/community.general/issues/2934). diff --git a/plugins/modules/system/lvol.py b/plugins/modules/system/lvol.py index fafa7db38a..e8b0ab838b 100644 --- a/plugins/modules/system/lvol.py +++ b/plugins/modules/system/lvol.py @@ -471,9 +471,9 @@ def main(): if size_opt == 'l': module.fail_json(changed=False, msg="Thin volume sizing with percentage not supported.") size_opt = 'V' - cmd = "%s %s -n %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, yesopt, lv, size_opt, size, size_unit, opts, vg, thinpool) + cmd = "%s %s %s -n %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, thinpool) elif thinpool and not lv: - cmd = "%s %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, yesopt, size_opt, size, size_unit, opts, vg, thinpool) + cmd = "%s %s %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, opts, vg, thinpool) else: cmd = "%s %s %s -n %s -%s %s%s %s %s %s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, pvs) rc, dummy, err = module.run_command(cmd) From c0740ca3985d827e309d9bff868374a5fb86bce7 Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Wed, 7 Jul 2021 00:36:36 +0430 Subject: [PATCH 0182/2828] pacman: fix changed status when ignorepkg has been defined (#2936) * pacman: fix returned code when ignorepkg has been defined * add changelog * make ignored check preciser --- ...status_when_ignorepkg_has_been_defined.yml | 3 +++ plugins/modules/packaging/os/pacman.py | 21 ++++++++++++------- 2 files changed, 17 insertions(+), 7 deletions(-) create mode 100644 changelogs/fragments/2936-pacman-fix_changed_status_when_ignorepkg_has_been_defined.yml diff --git a/changelogs/fragments/2936-pacman-fix_changed_status_when_ignorepkg_has_been_defined.yml b/changelogs/fragments/2936-pacman-fix_changed_status_when_ignorepkg_has_been_defined.yml new file mode 100644 index 0000000000..815ffa4aee --- /dev/null +++ b/changelogs/fragments/2936-pacman-fix_changed_status_when_ignorepkg_has_been_defined.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - pacman - fix changed status when ignorepkg has been defined (https://github.com/ansible-collections/community.general/issues/1758). diff --git a/plugins/modules/packaging/os/pacman.py b/plugins/modules/packaging/os/pacman.py index 372d13cd49..ea138fa614 100644 --- a/plugins/modules/packaging/os/pacman.py +++ b/plugins/modules/packaging/os/pacman.py @@ -254,16 +254,23 @@ def upgrade(module, pacman_path): # e.g., "ansible 2.7.1-1 -> 2.7.2-1" regex = re.compile(r'([\w+\-.@]+) (\S+-\S+) -> (\S+-\S+)') for p in data: - m = regex.search(p) - packages.append(m.group(1)) - if module._diff: - diff['before'] += "%s-%s\n" % (m.group(1), m.group(2)) - diff['after'] += "%s-%s\n" % (m.group(1), m.group(3)) + if '[ignored]' not in p: + m = regex.search(p) + packages.append(m.group(1)) + if module._diff: + diff['before'] += "%s-%s\n" % (m.group(1), m.group(2)) + diff['after'] += "%s-%s\n" % (m.group(1), m.group(3)) if module.check_mode: - module.exit_json(changed=True, msg="%s package(s) would be upgraded" % (len(data)), packages=packages, diff=diff) + if packages: + module.exit_json(changed=True, msg="%s package(s) would be upgraded" % (len(data)), packages=packages, diff=diff) + else: + module.exit_json(changed=False, msg='Nothing to upgrade', packages=packages) rc, stdout, stderr = module.run_command(cmdupgrade, check_rc=False) if rc == 0: - module.exit_json(changed=True, msg='System upgraded', packages=packages, diff=diff) + if packages: + module.exit_json(changed=True, msg='System upgraded', packages=packages, diff=diff) + else: + module.exit_json(changed=False, msg='Nothing to upgrade', packages=packages) else: module.fail_json(msg="Could not upgrade") else: From 56acd4356fcd88186f8f6766ae072258012706ad Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 8 Jul 2021 17:20:01 +1200 Subject: [PATCH 0183/2828] MH - dicts and lists change-tracking is fixed (#2951) * dicts and lists change-tracking is fixed * added changelog fragment * fixed sanity check --- .../fragments/2951-mh-vars-deepcopy.yml | 2 ++ plugins/module_utils/mh/mixins/vars.py | 6 ++-- .../module_utils/test_module_helper.py | 28 +++++++++++++++++++ 3 files changed, 34 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/2951-mh-vars-deepcopy.yml diff --git a/changelogs/fragments/2951-mh-vars-deepcopy.yml b/changelogs/fragments/2951-mh-vars-deepcopy.yml new file mode 100644 index 0000000000..339cca3aa7 --- /dev/null +++ b/changelogs/fragments/2951-mh-vars-deepcopy.yml @@ -0,0 +1,2 @@ +bugfixes: + - module_helper module utils - fixed change-tracking for dictionaries and lists (https://github.com/ansible-collections/community.general/pull/2951). diff --git a/plugins/module_utils/mh/mixins/vars.py b/plugins/module_utils/mh/mixins/vars.py index 7c936e04ac..a11110ed60 100644 --- a/plugins/module_utils/mh/mixins/vars.py +++ b/plugins/module_utils/mh/mixins/vars.py @@ -6,6 +6,8 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type +import copy + class VarMeta(object): NOTHING = object() @@ -30,11 +32,11 @@ class VarMeta(object): if fact is not None: self.fact = fact if initial_value is not self.NOTHING: - self.initial_value = initial_value + self.initial_value = copy.deepcopy(initial_value) def set_value(self, value): if not self.init: - self.initial_value = value + self.initial_value = copy.deepcopy(value) self.init = True self.value = value return self diff --git a/tests/unit/plugins/module_utils/test_module_helper.py b/tests/unit/plugins/module_utils/test_module_helper.py index 6452784182..f40a0f10ee 100644 --- a/tests/unit/plugins/module_utils/test_module_helper.py +++ b/tests/unit/plugins/module_utils/test_module_helper.py @@ -151,17 +151,45 @@ def test_vardict(): assert vd.meta('a').diff is False assert vd.meta('a').change is False vd['b'] = 456 + assert vd.meta('b').output is True + assert vd.meta('b').diff is False + assert vd.meta('b').change is False vd.set_meta('a', diff=True, change=True) vd.set_meta('b', diff=True, output=False) vd['c'] = 789 + assert vd.has_changed('c') is False vd['a'] = 'new_a' + assert vd.has_changed('a') is True vd['c'] = 'new_c' + assert vd.has_changed('c') is False + vd['b'] = 'new_b' + assert vd.has_changed('b') is False assert vd.a == 'new_a' assert vd.c == 'new_c' assert vd.output() == {'a': 'new_a', 'c': 'new_c'} assert vd.diff() == {'before': {'a': 123}, 'after': {'a': 'new_a'}}, "diff={0}".format(vd.diff()) +def test_variable_meta_change(): + vd = VarDict() + vd.set('a', 123, change=True) + vd.set('b', [4, 5, 6], change=True) + vd.set('c', {'m': 7, 'n': 8, 'o': 9}, change=True) + vd.set('d', {'a1': {'a11': 33, 'a12': 34}}, change=True) + + vd.a = 1234 + assert vd.has_changed('a') is True + vd.b.append(7) + assert vd.b == [4, 5, 6, 7] + assert vd.has_changed('b') + vd.c.update({'p': 10}) + assert vd.c == {'m': 7, 'n': 8, 'o': 9, 'p': 10} + assert vd.has_changed('c') + vd.d['a1'].update({'a13': 35}) + assert vd.d == {'a1': {'a11': 33, 'a12': 34, 'a13': 35}} + assert vd.has_changed('d') + + class MockMH(object): changed = None From 518ace25621ee7ed08debd4d87509b0b2be33f2c Mon Sep 17 00:00:00 2001 From: John R Barker Date: Thu, 8 Jul 2021 13:10:33 +0100 Subject: [PATCH 0184/2828] Update commit-rights.md aminvakil is no longer involved with the Ansible Community due to United States export controls and economic sanctions laws apply to U.S. persons, entities, and controlled software and technology that is of U.S. origin or that enters the U.S., including open source software. --- commit-rights.md | 1 - 1 file changed, 1 deletion(-) diff --git a/commit-rights.md b/commit-rights.md index 9b39d47b2c..58743e5048 100644 --- a/commit-rights.md +++ b/commit-rights.md @@ -68,7 +68,6 @@ Individuals who have been asked to become a part of this group have generally be | Name | GitHub ID | IRC Nick | Other | | ------------------- | -------------------- | ------------------ | -------------------- | | Alexei Znamensky | russoz | russoz | | -| Amin Vakil | aminvakil | aminvakil | | | Andrew Klychkov | andersson007 | andersson007_ | | | Felix Fontein | felixfontein | felixfontein | | | John R Barker | gundalow | gundalow | | From d97a9b5961a9008c4d404810590cee7cb3e3703a Mon Sep 17 00:00:00 2001 From: Tong He <68936428+unnecessary-username@users.noreply.github.com> Date: Fri, 9 Jul 2021 02:32:46 -0400 Subject: [PATCH 0185/2828] jenkins_job_info: Remove necessities of password or token. (#2948) * Remove necessities on password or token. * Upper case letter -> Lower case letter Co-authored-by: Amin Vakil * Documentation update. * C -> I Co-authored-by: Amin Vakil --- ..._info-remove_necessities_on_password_or_token.yml | 2 ++ .../modules/web_infrastructure/jenkins_job_info.py | 12 +++++++----- 2 files changed, 9 insertions(+), 5 deletions(-) create mode 100644 changelogs/fragments/2948-jenkins_job_info-remove_necessities_on_password_or_token.yml diff --git a/changelogs/fragments/2948-jenkins_job_info-remove_necessities_on_password_or_token.yml b/changelogs/fragments/2948-jenkins_job_info-remove_necessities_on_password_or_token.yml new file mode 100644 index 0000000000..99259d6301 --- /dev/null +++ b/changelogs/fragments/2948-jenkins_job_info-remove_necessities_on_password_or_token.yml @@ -0,0 +1,2 @@ +minor_changes: + - jenkins_job_info - the ``password`` and ``token`` parameters can also be omitted to retrieve only public information (https://github.com/ansible-collections/community.general/pull/2948). diff --git a/plugins/modules/web_infrastructure/jenkins_job_info.py b/plugins/modules/web_infrastructure/jenkins_job_info.py index 9dcf5776c9..fc079857a6 100644 --- a/plugins/modules/web_infrastructure/jenkins_job_info.py +++ b/plugins/modules/web_infrastructure/jenkins_job_info.py @@ -33,12 +33,12 @@ options: type: str description: - Password to authenticate with the Jenkins server. - - This is a required parameter, if C(token) is not provided. + - This is mutually exclusive with I(token). token: type: str description: - API token used to authenticate with the Jenkins server. - - This is a required parameter, if C(password) is not provided. + - This is mutually exclusive with I(password). url: type: str description: @@ -59,6 +59,11 @@ author: ''' EXAMPLES = ''' +# Get all Jenkins jobs anonymously +- community.general.jenkins_job_info: + user: admin + register: my_jenkins_job_info + # Get all Jenkins jobs using basic auth - community.general.jenkins_job_info: user: admin @@ -232,9 +237,6 @@ def main(): ['password', 'token'], ['name', 'glob'], ], - required_one_of=[ - ['password', 'token'], - ], supports_check_mode=True, ) From 1b80a9c5879f343a915b281da0cffaff79c2ca22 Mon Sep 17 00:00:00 2001 From: Gaetan2907 <48204380+Gaetan2907@users.noreply.github.com> Date: Fri, 9 Jul 2021 07:33:35 +0100 Subject: [PATCH 0186/2828] Add option to the keycloak_client module (#2949) * Add authentication_flow_binding_overrides option to the keycloak_client module * Add changelog fragment * Update changelogs/fragments/2949-add_authentication-flow-binding_keycloak-client.yml Co-authored-by: Amin Vakil * Update plugins/modules/identity/keycloak/keycloak_client.py Co-authored-by: Amin Vakil * Update plugins/modules/identity/keycloak/keycloak_client.py Co-authored-by: Amin Vakil * Add unit test authentication_flow_binding_overrides feature on keycloak_client module Co-authored-by: Amin Vakil --- ...ntication-flow-binding_keycloak-client.yml | 3 + .../identity/keycloak/keycloak_client.py | 11 ++ .../identity/keycloak/test_keycloak_client.py | 150 ++++++++++++++++++ 3 files changed, 164 insertions(+) create mode 100644 changelogs/fragments/2949-add_authentication-flow-binding_keycloak-client.yml create mode 100644 tests/unit/plugins/modules/identity/keycloak/test_keycloak_client.py diff --git a/changelogs/fragments/2949-add_authentication-flow-binding_keycloak-client.yml b/changelogs/fragments/2949-add_authentication-flow-binding_keycloak-client.yml new file mode 100644 index 0000000000..cdc0d4ae69 --- /dev/null +++ b/changelogs/fragments/2949-add_authentication-flow-binding_keycloak-client.yml @@ -0,0 +1,3 @@ +--- +minor_changes: + - keycloak_client - add ``authentication_flow_binding_overrides`` option (https://github.com/ansible-collections/community.general/pull/2949). diff --git a/plugins/modules/identity/keycloak/keycloak_client.py b/plugins/modules/identity/keycloak/keycloak_client.py index e3e39fc173..e37997e752 100644 --- a/plugins/modules/identity/keycloak/keycloak_client.py +++ b/plugins/modules/identity/keycloak/keycloak_client.py @@ -318,6 +318,14 @@ options: aliases: - authorizationSettings + authentication_flow_binding_overrides: + description: + - Override realm authentication flow bindings. + type: dict + aliases: + - authenticationFlowBindingOverrides + version_added: 3.4.0 + protocol_mappers: description: - a list of dicts defining protocol mappers for this client. @@ -593,6 +601,8 @@ EXAMPLES = ''' default_roles: - test01 - test02 + authentication_flow_binding_overrides: + browser: 4c90336b-bf1d-4b87-916d-3677ba4e5fbb protocol_mappers: - config: access.token.claim: True @@ -745,6 +755,7 @@ def main(): use_template_config=dict(type='bool', aliases=['useTemplateConfig']), use_template_scope=dict(type='bool', aliases=['useTemplateScope']), use_template_mappers=dict(type='bool', aliases=['useTemplateMappers']), + authentication_flow_binding_overrides=dict(type='dict', aliases=['authenticationFlowBindingOverrides']), protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec, aliases=['protocolMappers']), authorization_settings=dict(type='dict', aliases=['authorizationSettings']), ) diff --git a/tests/unit/plugins/modules/identity/keycloak/test_keycloak_client.py b/tests/unit/plugins/modules/identity/keycloak/test_keycloak_client.py new file mode 100644 index 0000000000..e017a5985c --- /dev/null +++ b/tests/unit/plugins/modules/identity/keycloak/test_keycloak_client.py @@ -0,0 +1,150 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from contextlib import contextmanager + +from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.general.tests.unit.compat.mock import call, patch +from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, \ + ModuleTestCase, set_module_args + +from ansible_collections.community.general.plugins.modules.identity.keycloak import keycloak_client + +from itertools import count + +from ansible.module_utils.six import StringIO + + +@contextmanager +def patch_keycloak_api(get_client_by_clientid=None, get_client_by_id=None, update_client=None, create_client=None, + delete_client=None): + """Mock context manager for patching the methods in PwPolicyIPAClient that contact the IPA server + + Patches the `login` and `_post_json` methods + + Keyword arguments are passed to the mock object that patches `_post_json` + + No arguments are passed to the mock object that patches `login` because no tests require it + + Example:: + + with patch_ipa(return_value={}) as (mock_login, mock_post): + ... + """ + + obj = keycloak_client.KeycloakAPI + with patch.object(obj, 'get_client_by_clientid', side_effect=get_client_by_clientid) as mock_get_client_by_clientid: + with patch.object(obj, 'get_client_by_id', side_effect=get_client_by_id) as mock_get_client_by_id: + with patch.object(obj, 'create_client', side_effect=create_client) as mock_create_client: + with patch.object(obj, 'update_client', side_effect=update_client) as mock_update_client: + with patch.object(obj, 'delete_client', side_effect=delete_client) as mock_delete_client: + yield mock_get_client_by_clientid, mock_get_client_by_id, mock_create_client, mock_update_client, mock_delete_client + + +def get_response(object_with_future_response, method, get_id_call_count): + if callable(object_with_future_response): + return object_with_future_response() + if isinstance(object_with_future_response, dict): + return get_response( + object_with_future_response[method], method, get_id_call_count) + if isinstance(object_with_future_response, list): + call_number = next(get_id_call_count) + return get_response( + object_with_future_response[call_number], method, get_id_call_count) + return object_with_future_response + + +def build_mocked_request(get_id_user_count, response_dict): + def _mocked_requests(*args, **kwargs): + url = args[0] + method = kwargs['method'] + future_response = response_dict.get(url, None) + return get_response(future_response, method, get_id_user_count) + + return _mocked_requests + + +def create_wrapper(text_as_string): + """Allow to mock many times a call to one address. + Without this function, the StringIO is empty for the second call. + """ + + def _create_wrapper(): + return StringIO(text_as_string) + + return _create_wrapper + + +def mock_good_connection(): + token_response = { + 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper( + '{"access_token": "alongtoken"}'), } + return patch( + 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + side_effect=build_mocked_request(count(), token_response), + autospec=True + ) + + +class TestKeycloakRealm(ModuleTestCase): + def setUp(self): + super(TestKeycloakRealm, self).setUp() + self.module = keycloak_client + + def test_authentication_flow_binding_overrides_feature(self): + """Add a new realm""" + + module_args = { + 'auth_keycloak_url': 'https: // auth.example.com / auth', + 'token': '{{ access_token }}', + 'state': 'present', + 'realm': 'master', + 'client_id': 'test', + 'authentication_flow_binding_overrides': { + 'browser': '4c90336b-bf1d-4b87-916d-3677ba4e5fbb' + } + } + return_value_get_client_by_clientid = [ + None, + { + "authenticationFlowBindingOverrides": { + "browser": "f9502b6d-d76a-4efe-8331-2ddd853c9f9c" + }, + "clientId": "onboardingid", + "enabled": "true", + "protocol": "openid-connect", + "redirectUris": [ + "*" + ] + } + ] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_client_by_clientid=return_value_get_client_by_clientid) \ + as (mock_get_client_by_clientid, mock_get_client_by_id, mock_create_client, mock_update_client, mock_delete_client): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(mock_get_client_by_clientid.call_count, 2) + self.assertEqual(mock_get_client_by_id.call_count, 0) + self.assertEqual(mock_create_client.call_count, 1) + self.assertEqual(mock_update_client.call_count, 0) + self.assertEqual(mock_delete_client.call_count, 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + +if __name__ == '__main__': + unittest.main() From 288fe1cfc6a64165521840e72edbb9c513d62eb2 Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Sat, 10 Jul 2021 06:58:30 -0400 Subject: [PATCH 0187/2828] archive - adding dest_state return value and enhancing integration tests. (#2913) * Initial commit * Adding changelog fragment * fixing changelog fragment * Updating documentation * Applying review suggestions --- .../fragments/2913-archive-dest_state.yml | 4 + plugins/modules/files/archive.py | 11 + .../targets/archive/tasks/broken-link.yml | 22 - .../targets/archive/tasks/main.yml | 434 ++---------------- .../targets/archive/tasks/remove.yml | 186 -------- .../targets/archive/tests/broken-link.yml | 31 ++ .../targets/archive/tests/core.yml | 188 ++++++++ .../targets/archive/tests/exclusions.yml | 40 ++ .../targets/archive/tests/remove.yml | 207 +++++++++ 9 files changed, 521 insertions(+), 602 deletions(-) create mode 100644 changelogs/fragments/2913-archive-dest_state.yml delete mode 100644 tests/integration/targets/archive/tasks/broken-link.yml delete mode 100644 tests/integration/targets/archive/tasks/remove.yml create mode 100644 tests/integration/targets/archive/tests/broken-link.yml create mode 100644 tests/integration/targets/archive/tests/core.yml create mode 100644 tests/integration/targets/archive/tests/exclusions.yml create mode 100644 tests/integration/targets/archive/tests/remove.yml diff --git a/changelogs/fragments/2913-archive-dest_state.yml b/changelogs/fragments/2913-archive-dest_state.yml new file mode 100644 index 0000000000..9e9e67434e --- /dev/null +++ b/changelogs/fragments/2913-archive-dest_state.yml @@ -0,0 +1,4 @@ +--- +minor_changes: + - archive - added ``dest_state`` return value to describe final state of ``dest`` after successful task execution + (https://github.com/ansible-collections/community.general/pull/2913). diff --git a/plugins/modules/files/archive.py b/plugins/modules/files/archive.py index a2d3376613..822ea1cd9d 100644 --- a/plugins/modules/files/archive.py +++ b/plugins/modules/files/archive.py @@ -137,6 +137,16 @@ state: The state of the input C(path). type: str returned: always +dest_state: + description: + - The state of the I(dest) file. + - C(absent) when the file does not exist. + - C(archive) when the file is an archive. + - C(compress) when the file is compressed, but not an archive. + - C(incomplete) when the file is an archive, but some files under I(path) were not found. + type: str + returned: success + version_added: 3.4.0 missing: description: Any files that were missing from the source. type: list @@ -435,6 +445,7 @@ class Archive(object): return { 'archived': [_to_native(p) for p in self.successes], 'dest': _to_native(self.destination), + 'dest_state': self.destination_state, 'changed': self.changed, 'arcroot': _to_native(self.root), 'missing': [_to_native(p) for p in self.not_found], diff --git a/tests/integration/targets/archive/tasks/broken-link.yml b/tests/integration/targets/archive/tasks/broken-link.yml deleted file mode 100644 index b1e0fb752b..0000000000 --- a/tests/integration/targets/archive/tasks/broken-link.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -- name: Create broken link - file: - src: /nowhere - dest: "{{ output_dir }}/nowhere.txt" - state: link - force: yes - -- name: Archive broken link (tar.gz) - archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_broken_link.tar.gz" - -- name: Archive broken link (tar.bz2) - archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_broken_link.tar.bz2" - -- name: Archive broken link (zip) - archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_broken_link.zip" diff --git a/tests/integration/targets/archive/tasks/main.yml b/tests/integration/targets/archive/tasks/main.yml index 35a8f1edf3..e0757b0ead 100644 --- a/tests/integration/targets/archive/tasks/main.yml +++ b/tests/integration/targets/archive/tasks/main.yml @@ -22,6 +22,7 @@ # along with Ansible. If not, see . # Make sure we start fresh +# Test setup - name: Ensure zip is present to create test archive (yum) yum: name=zip state=latest when: ansible_facts.pkg_mgr == 'yum' @@ -82,400 +83,45 @@ - sub - sub/subfile.txt -- name: archive using gz - archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_01.gz" - format: gz - register: archive_gz_result_01 - -- debug: msg="{{ archive_gz_result_01 }}" - -- name: verify that the files archived - file: path={{output_dir}}/archive_01.gz state=file - -- name: check if gz file exists and includes all text files - assert: - that: - - "{{ archive_gz_result_01.changed }}" - - "{{ 'archived' in archive_gz_result_01 }}" - - "{{ archive_gz_result_01['archived'] | length }} == 3" - -- name: archive using zip - archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_01.zip" - format: zip - register: archive_zip_result_01 - -- debug: msg="{{ archive_zip_result_01 }}" - -- name: verify that the files archived - file: path={{output_dir}}/archive_01.zip state=file - -- name: check if zip file exists - assert: - that: - - "{{ archive_zip_result_01.changed }}" - - "{{ 'archived' in archive_zip_result_01 }}" - - "{{ archive_zip_result_01['archived'] | length }} == 3" - -- name: archive using bz2 - archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_01.bz2" - format: bz2 - register: archive_bz2_result_01 - -- debug: msg="{{ archive_bz2_result_01 }}" - -- name: verify that the files archived - file: path={{output_dir}}/archive_01.bz2 state=file - -- name: check if bzip file exists - assert: - that: - - "{{ archive_bz2_result_01.changed }}" - - "{{ 'archived' in archive_bz2_result_01 }}" - - "{{ archive_bz2_result_01['archived'] | length }} == 3" - -- name: archive using xz - archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_01.xz" - format: xz - register: archive_xz_result_01 - -- debug: msg="{{ archive_xz_result_01 }}" - -- name: verify that the files archived - file: path={{output_dir}}/archive_01.xz state=file - -- name: check if xz file exists - assert: - that: - - "{{ archive_xz_result_01.changed }}" - - "{{ 'archived' in archive_xz_result_01 }}" - - "{{ archive_xz_result_01['archived'] | length }} == 3" - -- name: archive and set mode to 0600 - archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_02.gz" - format: gz - mode: "u+rwX,g-rwx,o-rwx" - register: archive_bz2_result_02 - -- name: Test that the file modes were changed - stat: - path: "{{ output_dir }}/archive_02.gz" - register: archive_02_gz_stat - -- debug: msg="{{ archive_02_gz_stat}}" - -- name: Test that the file modes were changed - assert: - that: - - archive_02_gz_stat is not changed - - "archive_02_gz_stat.stat.mode == '0600'" - - "'archived' in archive_bz2_result_02" - - "{{ archive_bz2_result_02['archived']| length}} == 3" - -- name: remove our gz - file: path="{{ output_dir }}/archive_02.gz" state=absent - - -- name: archive and set mode to 0600 - archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_02.zip" - format: zip - mode: "u+rwX,g-rwx,o-rwx" - register: archive_zip_result_02 - -- name: Test that the file modes were changed - stat: - path: "{{ output_dir }}/archive_02.zip" - register: archive_02_zip_stat - -- name: Test that the file modes were changed - assert: - that: - - archive_02_zip_stat is not changed - - "archive_02_zip_stat.stat.mode == '0600'" - - "'archived' in archive_zip_result_02" - - "{{ archive_zip_result_02['archived']| length}} == 3" - -- name: remove our zip - file: path="{{ output_dir }}/archive_02.zip" state=absent - - -- name: archive and set mode to 0600 - archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_02.bz2" - format: bz2 - mode: "u+rwX,g-rwx,o-rwx" - register: archive_bz2_result_02 - -- name: Test that the file modes were changed - stat: - path: "{{ output_dir }}/archive_02.bz2" - register: archive_02_bz2_stat - -- name: Test that the file modes were changed - assert: - that: - - archive_02_bz2_stat is not changed - - "archive_02_bz2_stat.stat.mode == '0600'" - - "'archived' in archive_bz2_result_02" - - "{{ archive_bz2_result_02['archived']| length}} == 3" - -- name: remove our bz2 - file: path="{{ output_dir }}/archive_02.bz2" state=absent - -- name: archive and set mode to 0600 - archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_02.xz" - format: xz - mode: "u+rwX,g-rwx,o-rwx" - register: archive_xz_result_02 - -- name: Test that the file modes were changed - stat: - path: "{{ output_dir }}/archive_02.xz" - register: archive_02_xz_stat - -- name: Test that the file modes were changed - assert: - that: - - archive_02_xz_stat is not changed - - "archive_02_xz_stat.stat.mode == '0600'" - - "'archived' in archive_xz_result_02" - - "{{ archive_xz_result_02['archived']| length}} == 3" - -- name: remove our xz - file: path="{{ output_dir }}/archive_02.xz" state=absent - -- name: archive multiple files as list - archive: - path: - - "{{ output_dir }}/empty.txt" - - "{{ output_dir }}/foo.txt" - - "{{ output_dir }}/bar.txt" - dest: "{{ output_dir }}/archive_list.gz" - format: gz - register: archive_gz_list_result - -- name: verify that the files archived - file: path={{output_dir}}/archive_list.gz state=file - -- name: check if gz file exists and includes all text files - assert: - that: - - "{{ archive_gz_list_result.changed }}" - - "{{ 'archived' in archive_gz_list_result }}" - - "{{ archive_gz_list_result['archived'] | length }} == 3" - -- name: remove our gz - file: path="{{ output_dir }}/archive_list.gz" state=absent - -- name: test that gz archive that contains non-ascii filenames - archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/test-archive-nonascii-くらとみ.tar.gz" - format: gz - register: nonascii_result_0 - -- name: Check that file is really there - stat: - path: "{{ output_dir }}/test-archive-nonascii-くらとみ.tar.gz" - register: nonascii_stat0 - -- name: Assert that nonascii tests succeeded - assert: - that: - - nonascii_result_0 is changed - - "nonascii_stat0.stat.exists == true" - -- name: remove nonascii test - file: path="{{ output_dir }}/test-archive-nonascii-くらとみ.tar.gz" state=absent - -- name: test that bz2 archive that contains non-ascii filenames - archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/test-archive-nonascii-くらとみ.bz2" - format: bz2 - register: nonascii_result_1 - -- name: Check that file is really there - stat: - path: "{{ output_dir }}/test-archive-nonascii-くらとみ.bz2" - register: nonascii_stat_1 - -- name: Assert that nonascii tests succeeded - assert: - that: - - nonascii_result_1 is changed - - "nonascii_stat_1.stat.exists == true" - -- name: remove nonascii test - file: path="{{ output_dir }}/test-archive-nonascii-くらとみ.bz2" state=absent - -- name: test that xz archive that contains non-ascii filenames - archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/test-archive-nonascii-くらとみ.xz" - format: xz - register: nonascii_result_1 - -- name: Check that file is really there - stat: - path: "{{ output_dir }}/test-archive-nonascii-くらとみ.xz" - register: nonascii_stat_1 - -- name: Assert that nonascii tests succeeded - assert: - that: - - nonascii_result_1 is changed - - "nonascii_stat_1.stat.exists == true" - -- name: remove nonascii test - file: path="{{ output_dir }}/test-archive-nonascii-くらとみ.xz" state=absent - -- name: test that zip archive that contains non-ascii filenames - archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/test-archive-nonascii-くらとみ.zip" - format: zip - register: nonascii_result_2 - -- name: Check that file is really there - stat: - path: "{{ output_dir }}/test-archive-nonascii-くらとみ.zip" - register: nonascii_stat_2 - -- name: Assert that nonascii tests succeeded - assert: - that: - - nonascii_result_2 is changed - - "nonascii_stat_2.stat.exists == true" - -- name: remove nonascii test - file: path="{{ output_dir }}/test-archive-nonascii-くらとみ.zip" state=absent - -- name: Test exclusion_patterns option - archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/test-archive-exclusion-patterns.tgz" - exclusion_patterns: b?r.* - register: exclusion_patterns_result - -- name: Assert that exclusion_patterns only archives included files - assert: - that: - - exclusion_patterns_result is changed - - "'bar.txt' not in exclusion_patterns_result.archived" - -- name: Test that excluded paths do not influence archive root - archive: - path: - - "{{ output_dir }}/sub/subfile.txt" - - "{{ output_dir }}" - exclude_path: - - "{{ output_dir }}" - dest: "{{ output_dir }}/test-archive-root.tgz" - register: archive_root_result - -- name: Assert that excluded paths do not influence archive root - assert: - that: - - archive_root_result.arcroot != output_dir - -- name: Remove archive root test - file: - path: "{{ output_dir }}/test-archive-root.tgz" - state: absent - -- name: Test Single Target with format={{ item }} - archive: - path: "{{ output_dir }}/foo.txt" - dest: "{{ output_dir }}/test-single-target.{{ item }}" - format: "{{ item }}" - register: "single_target_test" - loop: - - zip - - tar - - gz - - bz2 - - xz - -# Dummy tests until ``dest_state`` result value can be implemented -- name: Assert that single target tests are effective - assert: - that: - - single_target_test.results[0] is changed - - single_target_test.results[1] is changed - - single_target_test.results[2] is changed - - single_target_test.results[3] is changed - - single_target_test.results[4] is changed - -- name: Retrieve contents of single target archives - ansible.builtin.unarchive: - src: "{{ output_dir }}/test-single-target.zip" - dest: . - list_files: true - check_mode: true - ignore_errors: true - register: single_target_test_contents - -- name: Assert that file names in single-file zip archives are preserved - assert: - that: - - "'oo.txt' not in single_target_test_contents.files" - - "'foo.txt' in single_target_test_contents.files" - # ``unarchive`` fails for RHEL and FreeBSD on ansible 2.x - when: single_target_test_contents is success and single_target_test_contents is not skipped - -- name: Remove single target test with format={{ item }} - file: - path: "{{ output_dir }}/test-single-target.{{ item }}" - state: absent - loop: - - zip - - tar - - gz - - bz2 - - xz - -- name: Test that missing files result in incomplete state - archive: - path: - - "{{ output_dir }}/*.txt" - - "{{ output_dir }}/dne.txt" - exclude_path: "{{ output_dir }}/foo.txt" - dest: "{{ output_dir }}/test-incomplete-archive.tgz" - register: incomplete_archive_result - -- name: Assert that incomplete archive has incomplete state - assert: - that: - - incomplete_archive_result is changed - - "'{{ output_dir }}/dne.txt' in incomplete_archive_result.missing" - - "'{{ output_dir }}/foo.txt' not in incomplete_archive_result.missing" - -- name: Remove incomplete archive - file: - path: "{{ output_dir }}/test-incomplete-archive.tgz" - state: absent - +- name: Define formats to test + set_fact: + formats: + - tar + - zip + - gz + - bz2 + - xz + +# Run tests +- name: Run core tests + include_tasks: + file: ../tests/core.yml + loop: "{{ formats }}" + loop_control: + loop_var: format + +- name: Run exclusions tests + include_tasks: + file: ../tests/exclusions.yml + loop: "{{ formats }}" + loop_control: + loop_var: format + +- name: Run remove tests + include_tasks: + file: ../tests/remove.yml + loop: "{{ formats }}" + loop_control: + loop_var: format + +- name: Run broken link tests + include_tasks: + file: ../tests/broken-link.yml + loop: "{{ formats }}" + loop_control: + loop_var: format + +# Test cleanup - name: Remove backports.lzma if previously installed (pip) pip: name=backports.lzma state=absent when: backports_lzma_pip is changed - -- name: import remove tests - import_tasks: remove.yml - -- name: import broken-link tests - import_tasks: broken-link.yml diff --git a/tests/integration/targets/archive/tasks/remove.yml b/tests/integration/targets/archive/tasks/remove.yml deleted file mode 100644 index 9f085e901a..0000000000 --- a/tests/integration/targets/archive/tasks/remove.yml +++ /dev/null @@ -1,186 +0,0 @@ ---- -- name: archive using gz and remove src files - archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_remove_01.gz" - format: gz - remove: yes - register: archive_remove_result_01 - -- debug: msg="{{ archive_remove_result_01 }}" - -- name: verify that the files archived - file: path={{ output_dir }}/archive_remove_01.gz state=file - -- name: check if gz file exists and includes all text files and src files has been removed - assert: - that: - - "{{ archive_remove_result_01.changed }}" - - "{{ 'archived' in archive_remove_result_01 }}" - - "{{ archive_remove_result_01['archived'] | length }} == 3" - -- name: remove our gz - file: path="{{ output_dir }}/archive_remove_01.gz" state=absent - -- name: check if src files has been removed - assert: - that: - - "'{{ output_dir }}/{{ item }}' is not exists" - with_items: - - foo.txt - - bar.txt - - empty.txt - -- name: prep our files again - copy: src={{ item }} dest={{ output_dir }}/{{ item }} - with_items: - - foo.txt - - bar.txt - - empty.txt - -- name: create a temporary directory to be check if it will be removed - file: - path: "{{ output_dir }}/tmpdir" - state: directory - -- name: prep our files in tmpdir - copy: src={{ item }} dest={{ output_dir }}/tmpdir/{{ item }} - with_items: - - foo.txt - - bar.txt - - empty.txt - -- name: archive using gz and remove src directory - archive: - path: "{{ output_dir }}/tmpdir" - dest: "{{ output_dir }}/archive_remove_02.gz" - format: gz - remove: yes - register: archive_remove_result_02 - -- debug: msg="{{ archive_remove_result_02 }}" - -- name: verify that the files archived - file: path={{ output_dir }}/archive_remove_02.gz state=file - -- name: check if gz file exists and includes all text files - assert: - that: - - "{{ archive_remove_result_02.changed }}" - - "{{ 'archived' in archive_remove_result_02 }}" - - "{{ archive_remove_result_02['archived'] | length }} == 3" - -- name: remove our gz - file: path="{{ output_dir }}/archive_remove_02.gz" state=absent - -- name: check if src folder has been removed - assert: - that: - - "'{{ output_dir }}/tmpdir' is not exists" - -- name: create temporary directory again - file: - path: "{{ output_dir }}/tmpdir" - state: directory - -- name: prep our files in tmpdir again - copy: src={{ item }} dest={{ output_dir }}/tmpdir/{{ item }} - with_items: - - foo.txt - - bar.txt - - empty.txt - -- name: archive using gz and remove src directory excluding one file - archive: - path: "{{ output_dir }}/tmpdir/*" - dest: "{{ output_dir }}/archive_remove_03.gz" - format: gz - remove: yes - exclude_path: "{{ output_dir }}/tmpdir/empty.txt" - register: archive_remove_result_03 - -- debug: msg="{{ archive_remove_result_03 }}" - -- name: verify that the files archived - file: path={{ output_dir }}/archive_remove_03.gz state=file - -- name: check if gz file exists and includes all text files - assert: - that: - - "{{ archive_remove_result_03.changed }}" - - "{{ 'archived' in archive_remove_result_03 }}" - - "{{ archive_remove_result_03['archived'] | length }} == 2" - -- name: remove our gz - file: path="{{ output_dir }}/archive_remove_03.gz" state=absent - -- name: verify that excluded file is still present - file: path={{ output_dir }}/tmpdir/empty.txt state=file - -- name: prep our files in tmpdir again - copy: src={{ item }} dest={{ output_dir }}/tmpdir/{{ item }} - with_items: - - foo.txt - - bar.txt - - empty.txt - - sub - - sub/subfile.txt - -- name: archive using gz and remove src directory - archive: - path: - - "{{ output_dir }}/tmpdir/*.txt" - - "{{ output_dir }}/tmpdir/sub/*" - dest: "{{ output_dir }}/archive_remove_04.gz" - format: gz - remove: yes - exclude_path: "{{ output_dir }}/tmpdir/sub/subfile.txt" - register: archive_remove_result_04 - -- debug: msg="{{ archive_remove_result_04 }}" - -- name: verify that the files archived - file: path={{ output_dir }}/archive_remove_04.gz state=file - -- name: remove our gz - file: path="{{ output_dir }}/archive_remove_04.gz" state=absent - -- name: verify that excluded sub file is still present - file: path={{ output_dir }}/tmpdir/sub/subfile.txt state=file - -- name: prep our files in tmpdir again - copy: src={{ item }} dest={{ output_dir }}/tmpdir/{{ item }} - with_items: - - foo.txt - - bar.txt - - empty.txt - - sub - - sub/subfile.txt - -- name: archive using gz and remove src directory - archive: - path: - - "{{ output_dir }}/tmpdir/" - dest: "{{ output_dir }}/archive_remove_05.gz" - format: gz - remove: yes - exclude_path: "{{ output_dir }}/tmpdir/sub/subfile.txt" - register: archive_remove_result_05 - -- name: verify that the files archived - file: path={{ output_dir }}/archive_remove_05.gz state=file - -- name: Verify source files were removed - file: - path: "{{ output_dir }}/tmpdir" - state: absent - register: archive_source_file_removal_05 - -- name: Verify that task status is success - assert: - that: - - archive_remove_result_05 is success - - archive_source_file_removal_05 is not changed - -- name: remove our gz - file: path="{{ output_dir }}/archive_remove_05.gz" state=absent diff --git a/tests/integration/targets/archive/tests/broken-link.yml b/tests/integration/targets/archive/tests/broken-link.yml new file mode 100644 index 0000000000..cc1e07aaf1 --- /dev/null +++ b/tests/integration/targets/archive/tests/broken-link.yml @@ -0,0 +1,31 @@ +--- +- block: + - name: Create link - broken link ({{ format }}) + file: + src: /nowhere + dest: "{{ output_dir }}/nowhere.txt" + state: link + force: yes + + - name: Archive - broken link ({{ format }}) + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/archive_broken_link.{{ format }}" + format: "{{ format }}" + + - name: Verify archive exists - broken link ({{ format }}) + file: + path: "{{ output_dir }}/archive_broken_link.{{ format }}" + state: file + + - name: Remove archive - broken link ({{ format }}) + file: + path: "{{ output_dir }}/archive_broken_link.{{ format }}" + state: absent + + - name: Remove link - broken link ({{ format }}) + file: + path: "{{ output_dir }}/nowhere.txt" + state: absent + # 'zip' does not support symlink's + when: format != 'zip' diff --git a/tests/integration/targets/archive/tests/core.yml b/tests/integration/targets/archive/tests/core.yml new file mode 100644 index 0000000000..f12e5083cc --- /dev/null +++ b/tests/integration/targets/archive/tests/core.yml @@ -0,0 +1,188 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Test code for the archive module. +# (c) 2017, Abhijeet Kasurde + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# Make sure we start fresh + +# Core functionality tests +- name: Archive - no options ({{ format }}) + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/archive_no_opts.{{ format }}" + format: "{{ format }}" + register: archive_no_options + +- name: Verify that archive exists - no options ({{ format }}) + file: + path: "{{output_dir}}/archive_no_opts.{{ format }}" + state: file + +- name: Verify that archive result is changed and includes all files - no options ({{ format }}) + assert: + that: + - archive_no_options is changed + - "archive_no_options.dest_state == 'archive'" + - "{{ archive_no_options.archived | length }} == 3" + - +- name: Remove the archive - no options ({{ format }}) + file: + path: "{{ output_dir }}/archive_no_options.{{ format }}" + state: absent + +- name: Archive - file options ({{ format }}) + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/archive_file_options.{{ format }}" + format: "{{ format }}" + mode: "u+rwX,g-rwx,o-rwx" + register: archive_file_options + +- name: Retrieve archive file information - file options ({{ format }}) + stat: + path: "{{ output_dir }}/archive_file_options.{{ format }}" + register: archive_file_options_stat + +- name: Test that the file modes were changed + assert: + that: + - archive_file_options_stat is not changed + - "archive_file_options.mode == '0600'" + - "{{ archive_file_options.archived | length }} == 3" + +- name: Remove the archive - file options ({{ format }}) + file: + path: "{{ output_dir }}/archive_file_options.{{ format }}" + state: absent + +- name: Archive - non-ascii ({{ format }}) + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/archive_nonascii_くらとみ.{{ format }}" + format: "{{ format }}" + register: archive_nonascii + +- name: Retrieve archive file information - non-ascii ({{ format }}) + stat: + path: "{{ output_dir }}/archive_nonascii_くらとみ.{{ format }}" + register: archive_nonascii_stat + +- name: Test that archive exists - non-ascii ({{ format }}) + assert: + that: + - archive_nonascii is changed + - archive_nonascii_stat.stat.exists == true + +- name: Remove the archive - non-ascii ({{ format }}) + file: + path: "{{ output_dir }}/archive_nonascii_くらとみ.{{ format }}" + state: absent + +- name: Archive - single target ({{ format }}) + archive: + path: "{{ output_dir }}/foo.txt" + dest: "{{ output_dir }}/archive_single_target.{{ format }}" + format: "{{ format }}" + register: archive_single_target + +- name: Assert archive has correct state - single target ({{ format }}) + assert: + that: + - archive_single_target.dest_state == state_map[format] + vars: + state_map: + tar: archive + zip: archive + gz: compress + bz2: compress + xz: compress + +- block: + - name: Retrieve contents of archive - single target ({{ format }}) + ansible.builtin.unarchive: + src: "{{ output_dir }}/archive_single_target.{{ format }}" + dest: . + list_files: true + check_mode: true + ignore_errors: true + register: archive_single_target_contents + + - name: Assert that file names are preserved - single target ({{ format }}) + assert: + that: + - "'oo.txt' not in archive_single_target_contents.files" + - "'foo.txt' in archive_single_target_contents.files" + # ``unarchive`` fails for RHEL and FreeBSD on ansible 2.x + when: archive_single_target_contents is success and archive_single_target_contents is not skipped + when: "format == 'zip'" + +- name: Remove archive - single target ({{ format }}) + file: + path: "{{ output_dir }}/archive_single_target.{{ format }}" + state: absent + +- name: Archive - path list ({{ format }}) + archive: + path: + - "{{ output_dir }}/empty.txt" + - "{{ output_dir }}/foo.txt" + - "{{ output_dir }}/bar.txt" + dest: "{{ output_dir }}/archive_path_list.{{ format }}" + format: "{{ format }}" + register: archive_path_list + +- name: Verify that archive exists - path list ({{ format }}) + file: + path: "{{output_dir}}/archive_path_list.{{ format }}" + state: file + +- name: Assert that archive contains all files - path list ({{ format }}) + assert: + that: + - archive_path_list is changed + - "{{ archive_path_list.archived | length }} == 3" + +- name: Remove archive - path list ({{ format }}) + file: + path: "{{ output_dir }}/archive_path_list.{{ format }}" + state: absent + +- name: Archive - missing paths ({{ format }}) + archive: + path: + - "{{ output_dir }}/*.txt" + - "{{ output_dir }}/dne.txt" + exclude_path: "{{ output_dir }}/foo.txt" + dest: "{{ output_dir }}/archive_missing_paths.{{ format }}" + format: "{{ format }}" + register: archive_missing_paths + +- name: Assert that incomplete archive has incomplete state - missing paths ({{ format }}) + assert: + that: + - archive_missing_paths is changed + - "archive_missing_paths.dest_state == 'incomplete'" + - "'{{ output_dir }}/dne.txt' in archive_missing_paths.missing" + - "'{{ output_dir }}/foo.txt' not in archive_missing_paths.missing" + +- name: Remove archive - missing paths ({{ format }}) + file: + path: "{{ output_dir }}/archive_missing_paths.{{ format }}" + state: absent diff --git a/tests/integration/targets/archive/tests/exclusions.yml b/tests/integration/targets/archive/tests/exclusions.yml new file mode 100644 index 0000000000..0b65f85851 --- /dev/null +++ b/tests/integration/targets/archive/tests/exclusions.yml @@ -0,0 +1,40 @@ +--- +- name: Archive - exclusion patterns ({{ format }}) + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/archive_exclusion_patterns.{{ format }}" + format: "{{ format }}" + exclusion_patterns: b?r.* + register: archive_exclusion_patterns + +- name: Assert that only included files are archived - exclusion patterns ({{ format }}) + assert: + that: + - archive_exclusion_patterns is changed + - "'bar.txt' not in archive_exclusion_patterns.archived" + +- name: Remove archive - exclusion patterns ({{ format }}) + file: + path: "{{ output_dir }}/archive_exclusion_patterns.{{ format }}" + state: absent + +- name: Archive - exclude path ({{ format }}) + archive: + path: + - "{{ output_dir }}/sub/subfile.txt" + - "{{ output_dir }}" + exclude_path: + - "{{ output_dir }}" + dest: "{{ output_dir }}/archive_exclude_paths.{{ format }}" + format: "{{ format }}" + register: archive_excluded_paths + +- name: Assert that excluded paths do not influence archive root - exclude path ({{ format }}) + assert: + that: + - archive_excluded_paths.arcroot != output_dir + +- name: Remove archive - exclude path ({{ format }}) + file: + path: "{{ output_dir }}/archive_exclude_paths.{{ format }}" + state: absent diff --git a/tests/integration/targets/archive/tests/remove.yml b/tests/integration/targets/archive/tests/remove.yml new file mode 100644 index 0000000000..26849ac850 --- /dev/null +++ b/tests/integration/targets/archive/tests/remove.yml @@ -0,0 +1,207 @@ +--- +- name: Archive - remove source files ({{ format }}) + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/archive_remove_source_files.{{ format }}" + format: "{{ format }}" + remove: yes + register: archive_remove_source_files + +- name: Verify archive exists - remove source files ({{ format }}) + file: + path: "{{ output_dir }}/archive_remove_source_files.{{ format }}" + state: file + +- name: Verify all files were archived - remove source files ({{ format }}) + assert: + that: + - archive_remove_source_files is changed + - "{{ archive_remove_source_files.archived | length }} == 3" + +- name: Remove Archive - remove source files ({{ format }}) + file: + path: "{{ output_dir }}/archive_remove_source_files.{{ format }}" + state: absent + +- name: Assert that source files were removed - remove source files ({{ format }}) + assert: + that: + - "'{{ output_dir }}/{{ item }}' is not exists" + with_items: + - foo.txt + - bar.txt + - empty.txt + +- name: Copy source files - remove source directory ({{ format }}) + copy: + src: "{{ item }}" + dest: "{{ output_dir }}/{{ item }}" + with_items: + - foo.txt + - bar.txt + - empty.txt + +- name: Create temporary directory - remove source directory ({{ format }}) + file: + path: "{{ output_dir }}/tmpdir" + state: directory + +- name: Copy source files to temporary directory - remove source directory ({{ format }}) + copy: + src: "{{ item }}" + dest: "{{ output_dir }}/tmpdir/{{ item }}" + with_items: + - foo.txt + - bar.txt + - empty.txt + +- name: Archive - remove source directory ({{ format }}) + archive: + path: "{{ output_dir }}/tmpdir" + dest: "{{ output_dir }}/archive_remove_source_directory.{{ format }}" + format: "{{ format }}" + remove: yes + register: archive_remove_source_directory + +- name: Verify archive exists - remove source directory ({{ format }}) + file: + path: "{{ output_dir }}/archive_remove_source_directory.{{ format }}" + state: file + +- name: Verify archive contains all files - remove source directory ({{ format }}) + assert: + that: + - archive_remove_source_directory is changed + - "{{ archive_remove_source_directory.archived | length }} == 3" + +- name: Remove archive - remove source directory ({{ format }}) + file: + path: "{{ output_dir }}/archive_remove_source_directory.{{ format }}" + state: absent + +- name: Verify source directory was removed - remove source directory ({{ format }}) + assert: + that: + - "'{{ output_dir }}/tmpdir' is not exists" + +- name: Create temporary directory - remove source excluding path ({{ format }}) + file: + path: "{{ output_dir }}/tmpdir" + state: directory + +- name: Copy source files to temporary directory - remove source excluding path ({{ format }}) + copy: + src: "{{ item }}" + dest: "{{ output_dir }}/tmpdir/{{ item }}" + with_items: + - foo.txt + - bar.txt + - empty.txt + +- name: Archive - remove source excluding path ({{ format }}) + archive: + path: "{{ output_dir }}/tmpdir/*" + dest: "{{ output_dir }}/archive_remove_source_excluding_path.{{ format }}" + format: "{{ format }}" + remove: yes + exclude_path: "{{ output_dir }}/tmpdir/empty.txt" + register: archive_remove_source_excluding_path + +- name: Verify archive exists - remove source excluding path ({{ format }}) + file: + path: "{{ output_dir }}/archive_remove_source_excluding_path.{{ format }}" + state: file + +- name: Verify all files except excluded are archived - remove source excluding path ({{ format }}) + assert: + that: + - archive_remove_source_excluding_path is changed + - "{{ archive_remove_source_excluding_path.archived | length }} == 2" + +- name: Remove archive - remove source excluding path ({{ format }}) + file: + path: "{{ output_dir }}/archive_remove_source_excluding_path.{{ format }}" + state: absent + +- name: Verify that excluded file still exists - remove source excluding path ({{ format }}) + file: + path: "{{ output_dir }}/tmpdir/empty.txt" + state: file + +- name: Copy source files to temporary directory - remove source excluding sub path ({{ format }}) + copy: + src: "{{ item }}" + dest: "{{ output_dir }}/tmpdir/{{ item }}" + with_items: + - foo.txt + - bar.txt + - empty.txt + - sub + - sub/subfile.txt + +- name: Archive - remove source excluding sub path ({{ format }}) + archive: + path: + - "{{ output_dir }}/tmpdir/*.txt" + - "{{ output_dir }}/tmpdir/sub/*" + dest: "{{ output_dir }}/archive_remove_source_excluding_sub_path.{{ format }}" + format: "{{ format }}" + remove: yes + exclude_path: "{{ output_dir }}/tmpdir/sub/subfile.txt" + register: archive_remove_source_excluding_sub_path + +- name: Verify archive exists - remove source excluding sub path ({{ format }}) + file: + path: "{{ output_dir }}/archive_remove_source_excluding_sub_path.{{ format }}" + state: file + +- name: Remove archive - remove source excluding sub path ({{ format }}) + file: + path: "{{ output_dir }}/archive_remove_source_excluding_sub_path.{{ format }}" + state: absent + +- name: Verify that sub path still exists - remove source excluding sub path ({{ format }}) + file: + path: "{{ output_dir }}/tmpdir/sub/subfile.txt" + state: file + +- name: Copy source files to temporary directory - remove source with nested paths ({{ format }}) + copy: + src: "{{ item }}" + dest: "{{ output_dir }}/tmpdir/{{ item }}" + with_items: + - foo.txt + - bar.txt + - empty.txt + - sub + - sub/subfile.txt + +- name: Archive - remove source with nested paths ({{ format }}) + archive: + path: "{{ output_dir }}/tmpdir/" + dest: "{{ output_dir }}/archive_remove_source_nested_paths.{{ format }}" + format: "{{ format }}" + remove: yes + register: archive_remove_nested_paths + +- name: Verify archive exists - remove source with nested paths ({{ format }}) + file: + path: "{{ output_dir }}/archive_remove_source_nested_paths.{{ format }}" + state: file + +- name: Verify source files were removed - remove source with nested paths ({{ format }}) + file: + path: "{{ output_dir }}/tmpdir" + state: absent + register: archive_remove_nested_paths_status + +- name: Assert tasks status - remove source with nested paths ({{ format }}) + assert: + that: + - archive_remove_nested_paths is success + - archive_remove_nested_paths_status is not changed + +- name: Remove archive - remove source with nested paths ({{ format }}) + file: + path: "{{ output_dir }}/archive_remove_source_nested_paths.{{ format }}" + state: absent From ad8c4e4de6cbbf6d2f6a366cb70451682e5a2684 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sat, 10 Jul 2021 23:01:16 +1200 Subject: [PATCH 0188/2828] added comments to the ignore files (#2972) --- tests/sanity/ignore-2.10.txt | 20 ++++++++++---------- tests/sanity/ignore-2.11.txt | 20 ++++++++++---------- tests/sanity/ignore-2.12.txt | 20 ++++++++++---------- tests/sanity/ignore-2.9.txt | 18 +++++++++--------- 4 files changed, 39 insertions(+), 39 deletions(-) diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index bdb3ca4e9a..6c60a4c6f8 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -18,17 +18,17 @@ plugins/modules/cloud/univention/udm_user.py validate-modules:parameter-list-no- plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-state-invalid-choice -plugins/modules/notification/grove.py validate-modules:invalid-argument-name +plugins/modules/notification/grove.py validate-modules:invalid-argument-name # invalid alias - removed in 4.0.0 plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid -plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid -plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid -plugins/modules/packaging/os/homebrew_cask.py validate-modules:parameter-invalid -plugins/modules/packaging/os/opkg.py validate-modules:parameter-invalid -plugins/modules/packaging/os/pacman.py validate-modules:parameter-invalid +plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/homebrew_cask.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/opkg.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/pacman.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/packaging/os/redhat_subscription.py validate-modules:return-syntax-error -plugins/modules/packaging/os/slackpkg.py validate-modules:parameter-invalid -plugins/modules/packaging/os/urpmi.py validate-modules:parameter-invalid -plugins/modules/packaging/os/xbps.py validate-modules:parameter-invalid +plugins/modules/packaging/os/slackpkg.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/urpmi.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/xbps.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/remote_management/hpilo/hpilo_boot.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hpilo_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hponcfg.py validate-modules:parameter-type-not-in-doc @@ -47,7 +47,7 @@ plugins/modules/system/parted.py validate-modules:parameter-state-invalid-choice plugins/modules/system/puppet.py use-argspec-type-path plugins/modules/system/puppet.py validate-modules:doc-default-does-not-match-spec # show_diff is not documented plugins/modules/system/puppet.py validate-modules:parameter-type-not-in-doc -plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc +plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc # param removed in 4.0.0 plugins/modules/system/ssh_config.py use-argspec-type-path # Required since module uses other methods to specify path plugins/modules/system/xfconf.py validate-modules:parameter-state-invalid-choice plugins/modules/system/xfconf.py validate-modules:return-syntax-error diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 34889a2651..e3785767b1 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -17,17 +17,17 @@ plugins/modules/cloud/univention/udm_user.py validate-modules:parameter-list-no- plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-state-invalid-choice -plugins/modules/notification/grove.py validate-modules:invalid-argument-name +plugins/modules/notification/grove.py validate-modules:invalid-argument-name # invalid alias - removed in 4.0.0 plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid -plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid -plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid -plugins/modules/packaging/os/homebrew_cask.py validate-modules:parameter-invalid -plugins/modules/packaging/os/opkg.py validate-modules:parameter-invalid -plugins/modules/packaging/os/pacman.py validate-modules:parameter-invalid +plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/homebrew_cask.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/opkg.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/pacman.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/packaging/os/redhat_subscription.py validate-modules:return-syntax-error -plugins/modules/packaging/os/slackpkg.py validate-modules:parameter-invalid -plugins/modules/packaging/os/urpmi.py validate-modules:parameter-invalid -plugins/modules/packaging/os/xbps.py validate-modules:parameter-invalid +plugins/modules/packaging/os/slackpkg.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/urpmi.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/xbps.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/remote_management/hpilo/hpilo_boot.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hpilo_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hponcfg.py validate-modules:parameter-type-not-in-doc @@ -46,7 +46,7 @@ plugins/modules/system/parted.py validate-modules:parameter-state-invalid-choice plugins/modules/system/puppet.py use-argspec-type-path plugins/modules/system/puppet.py validate-modules:doc-default-does-not-match-spec # show_diff is not documented plugins/modules/system/puppet.py validate-modules:parameter-type-not-in-doc -plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc +plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc # param removed in 4.0.0 plugins/modules/system/ssh_config.py use-argspec-type-path # Required since module uses other methods to specify path plugins/modules/system/xfconf.py validate-modules:parameter-state-invalid-choice plugins/modules/system/xfconf.py validate-modules:return-syntax-error diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt index 6e14759c9d..197868474b 100644 --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -17,17 +17,17 @@ plugins/modules/cloud/univention/udm_user.py validate-modules:parameter-list-no- plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-state-invalid-choice -plugins/modules/notification/grove.py validate-modules:invalid-argument-name +plugins/modules/notification/grove.py validate-modules:invalid-argument-name # invalid alias - removed in 4.0.0 plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid -plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid -plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid -plugins/modules/packaging/os/homebrew_cask.py validate-modules:parameter-invalid -plugins/modules/packaging/os/opkg.py validate-modules:parameter-invalid -plugins/modules/packaging/os/pacman.py validate-modules:parameter-invalid +plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/homebrew_cask.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/opkg.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/pacman.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/packaging/os/redhat_subscription.py validate-modules:return-syntax-error -plugins/modules/packaging/os/slackpkg.py validate-modules:parameter-invalid -plugins/modules/packaging/os/urpmi.py validate-modules:parameter-invalid -plugins/modules/packaging/os/xbps.py validate-modules:parameter-invalid +plugins/modules/packaging/os/slackpkg.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/urpmi.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/xbps.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/remote_management/hpilo/hpilo_boot.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hpilo_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hponcfg.py validate-modules:parameter-type-not-in-doc @@ -46,7 +46,7 @@ plugins/modules/system/parted.py validate-modules:parameter-state-invalid-choice plugins/modules/system/puppet.py use-argspec-type-path plugins/modules/system/puppet.py validate-modules:doc-default-does-not-match-spec # show_diff is not documented plugins/modules/system/puppet.py validate-modules:parameter-type-not-in-doc -plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc +plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc # param removed in 4.0.0 plugins/modules/system/ssh_config.py use-argspec-type-path # Required since module uses other methods to specify path plugins/modules/system/xfconf.py validate-modules:parameter-state-invalid-choice plugins/modules/system/xfconf.py validate-modules:return-syntax-error diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 33f3d183d4..e21faf2ce3 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -12,15 +12,15 @@ plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:undo plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid -plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid -plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid -plugins/modules/packaging/os/homebrew_cask.py validate-modules:parameter-invalid -plugins/modules/packaging/os/opkg.py validate-modules:parameter-invalid -plugins/modules/packaging/os/pacman.py validate-modules:parameter-invalid +plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/homebrew_cask.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/opkg.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/pacman.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/packaging/os/redhat_subscription.py validate-modules:return-syntax-error -plugins/modules/packaging/os/slackpkg.py validate-modules:parameter-invalid -plugins/modules/packaging/os/urpmi.py validate-modules:parameter-invalid -plugins/modules/packaging/os/xbps.py validate-modules:parameter-invalid +plugins/modules/packaging/os/slackpkg.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/urpmi.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/xbps.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/remote_management/hpilo/hpilo_boot.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hpilo_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hponcfg.py validate-modules:parameter-type-not-in-doc @@ -65,7 +65,7 @@ plugins/modules/system/iptables_state.py validate-modules:undocumented-parameter plugins/modules/system/launchd.py use-argspec-type-path # False positive plugins/modules/system/puppet.py use-argspec-type-path plugins/modules/system/puppet.py validate-modules:parameter-type-not-in-doc -plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc +plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc # deprecated param - removed in 4.0.0 plugins/modules/system/ssh_config.py use-argspec-type-path # Required since module uses other methods to specify path plugins/modules/system/xfconf.py validate-modules:return-syntax-error plugins/modules/web_infrastructure/jenkins_plugin.py use-argspec-type-path From 1990f79d8a69ae6fef7457d853c54d9348aad2fa Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sat, 10 Jul 2021 23:03:41 +1200 Subject: [PATCH 0189/2828] launchd - fixed validation check (#2960) * replaced use of expanduser() with value from HOME var * fixed sanity check * added changelog fragment --- changelogs/fragments/2960-launchd-validation-check.yaml | 2 ++ plugins/modules/system/launchd.py | 2 +- tests/sanity/ignore-2.10.txt | 1 - tests/sanity/ignore-2.11.txt | 1 - tests/sanity/ignore-2.12.txt | 1 - tests/sanity/ignore-2.9.txt | 1 - 6 files changed, 3 insertions(+), 5 deletions(-) create mode 100644 changelogs/fragments/2960-launchd-validation-check.yaml diff --git a/changelogs/fragments/2960-launchd-validation-check.yaml b/changelogs/fragments/2960-launchd-validation-check.yaml new file mode 100644 index 0000000000..15cb3c3fa5 --- /dev/null +++ b/changelogs/fragments/2960-launchd-validation-check.yaml @@ -0,0 +1,2 @@ +bugfixes: + - launchd - fixed sanity check in the module's code (https://github.com/ansible-collections/community.general/pull/2960). diff --git a/plugins/modules/system/launchd.py b/plugins/modules/system/launchd.py index 30a5ed02b2..e8d82ff318 100644 --- a/plugins/modules/system/launchd.py +++ b/plugins/modules/system/launchd.py @@ -159,7 +159,7 @@ class Plist: """Finds the plist file associated with a service""" launchd_paths = [ - os.path.expanduser('~/Library/LaunchAgents'), + os.path.join(os.getenv('HOME'), 'Library/LaunchAgents'), '/Library/LaunchAgents', '/Library/LaunchDaemons', '/System/Library/LaunchAgents', diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 6c60a4c6f8..f313df3620 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -41,7 +41,6 @@ plugins/modules/remote_management/manageiq/manageiq_tags.py validate-modules:par plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-invalid plugins/modules/system/gconftool2.py validate-modules:parameter-state-invalid-choice plugins/modules/system/iptables_state.py validate-modules:undocumented-parameter -plugins/modules/system/launchd.py use-argspec-type-path # False positive plugins/modules/system/osx_defaults.py validate-modules:parameter-state-invalid-choice plugins/modules/system/parted.py validate-modules:parameter-state-invalid-choice plugins/modules/system/puppet.py use-argspec-type-path diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index e3785767b1..6858d92104 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -40,7 +40,6 @@ plugins/modules/remote_management/manageiq/manageiq_tags.py validate-modules:par plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-invalid plugins/modules/system/gconftool2.py validate-modules:parameter-state-invalid-choice plugins/modules/system/iptables_state.py validate-modules:undocumented-parameter -plugins/modules/system/launchd.py use-argspec-type-path # False positive plugins/modules/system/osx_defaults.py validate-modules:parameter-state-invalid-choice plugins/modules/system/parted.py validate-modules:parameter-state-invalid-choice plugins/modules/system/puppet.py use-argspec-type-path diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt index 197868474b..9b0e047d57 100644 --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -40,7 +40,6 @@ plugins/modules/remote_management/manageiq/manageiq_tags.py validate-modules:par plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-invalid plugins/modules/system/gconftool2.py validate-modules:parameter-state-invalid-choice plugins/modules/system/iptables_state.py validate-modules:undocumented-parameter -plugins/modules/system/launchd.py use-argspec-type-path # False positive plugins/modules/system/osx_defaults.py validate-modules:parameter-state-invalid-choice plugins/modules/system/parted.py validate-modules:parameter-state-invalid-choice plugins/modules/system/puppet.py use-argspec-type-path diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index e21faf2ce3..65611001b1 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -62,7 +62,6 @@ plugins/modules/net_tools/nios/nios_zone.py validate-modules:deprecation-mismatc plugins/modules/net_tools/nios/nios_zone.py validate-modules:invalid-documentation plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-invalid plugins/modules/system/iptables_state.py validate-modules:undocumented-parameter -plugins/modules/system/launchd.py use-argspec-type-path # False positive plugins/modules/system/puppet.py use-argspec-type-path plugins/modules/system/puppet.py validate-modules:parameter-type-not-in-doc plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc # deprecated param - removed in 4.0.0 From 0e90ff48b5e0d3c3b51542fc843d3d873d04a2d5 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sat, 10 Jul 2021 23:05:20 +1200 Subject: [PATCH 0190/2828] rax_mon_notification_plan - fixed validation check (#2955) * fixed validation-modules for plugins/modules/cloud/rackspace/rax_mon_notification_plan.py * fixed sanity check * added changelog fragment --- ..._notification_plan-added-elements-to-list-params.yaml | 2 ++ .../modules/cloud/rackspace/rax_mon_notification_plan.py | 9 ++++++--- tests/sanity/ignore-2.10.txt | 1 - tests/sanity/ignore-2.11.txt | 1 - tests/sanity/ignore-2.12.txt | 1 - 5 files changed, 8 insertions(+), 6 deletions(-) create mode 100644 changelogs/fragments/2955-rax_mon_notification_plan-added-elements-to-list-params.yaml diff --git a/changelogs/fragments/2955-rax_mon_notification_plan-added-elements-to-list-params.yaml b/changelogs/fragments/2955-rax_mon_notification_plan-added-elements-to-list-params.yaml new file mode 100644 index 0000000000..9ff6f01f7d --- /dev/null +++ b/changelogs/fragments/2955-rax_mon_notification_plan-added-elements-to-list-params.yaml @@ -0,0 +1,2 @@ +minor_changes: + - rax_mon_notification_plan - fixed validation checks by specifying type ``str`` as the ``elements`` of parameters ``ok_state``, ``warning_state`` and ``critical_state`` (https://github.com/ansible-collections/community.general/pull/2955). diff --git a/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py b/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py index a4b8920dc7..d5294cd509 100644 --- a/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py +++ b/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py @@ -32,16 +32,19 @@ options: required: true critical_state: type: list + elements: str description: - Notification list to use when the alarm state is CRITICAL. Must be an array of valid rax_mon_notification ids. warning_state: type: list + elements: str description: - Notification list to use when the alarm state is WARNING. Must be an array of valid rax_mon_notification ids. ok_state: type: list + elements: str description: - Notification list to use when the alarm state is OK. Must be an array of valid rax_mon_notification ids. @@ -150,9 +153,9 @@ def main(): dict( state=dict(default='present', choices=['present', 'absent']), label=dict(required=True), - critical_state=dict(type='list'), - warning_state=dict(type='list'), - ok_state=dict(type='list') + critical_state=dict(type='list', elements='str'), + warning_state=dict(type='list', elements='str'), + ok_state=dict(type='list', elements='str'), ) ) diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index f313df3620..d01c3762dc 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -5,7 +5,6 @@ plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-cho plugins/modules/cloud/rackspace/rax.py use-argspec-type-path # fix needed plugins/modules/cloud/rackspace/rax_files.py validate-modules:parameter-state-invalid-choice plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path -plugins/modules/cloud/rackspace/rax_mon_notification_plan.py validate-modules:parameter-list-no-elements plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path # fix needed, expanduser() applied to dict values plugins/modules/cloud/scaleway/scaleway_organization_info.py validate-modules:return-syntax-error plugins/modules/cloud/smartos/vmadm.py validate-modules:parameter-type-not-in-doc diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 6858d92104..2106c993d3 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -4,7 +4,6 @@ plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-cho plugins/modules/cloud/rackspace/rax.py use-argspec-type-path # fix needed plugins/modules/cloud/rackspace/rax_files.py validate-modules:parameter-state-invalid-choice plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path -plugins/modules/cloud/rackspace/rax_mon_notification_plan.py validate-modules:parameter-list-no-elements plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path # fix needed, expanduser() applied to dict values plugins/modules/cloud/scaleway/scaleway_organization_info.py validate-modules:return-syntax-error plugins/modules/cloud/smartos/vmadm.py validate-modules:parameter-type-not-in-doc diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt index 9b0e047d57..a30ff2e4ed 100644 --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -4,7 +4,6 @@ plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-cho plugins/modules/cloud/rackspace/rax.py use-argspec-type-path # fix needed plugins/modules/cloud/rackspace/rax_files.py validate-modules:parameter-state-invalid-choice plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path -plugins/modules/cloud/rackspace/rax_mon_notification_plan.py validate-modules:parameter-list-no-elements plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path # fix needed, expanduser() applied to dict values plugins/modules/cloud/scaleway/scaleway_organization_info.py validate-modules:return-syntax-error plugins/modules/cloud/smartos/vmadm.py validate-modules:parameter-type-not-in-doc From 4ae392e5de059fcb2cd4a5d6a6127d23d4417f6c Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 10 Jul 2021 13:31:54 +0200 Subject: [PATCH 0191/2828] Temporarily disable passwordstore lookup tests on macOS and OSX. (#2979) --- tests/integration/targets/lookup_passwordstore/aliases | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/integration/targets/lookup_passwordstore/aliases b/tests/integration/targets/lookup_passwordstore/aliases index 8b108917a0..7cc72b73d4 100644 --- a/tests/integration/targets/lookup_passwordstore/aliases +++ b/tests/integration/targets/lookup_passwordstore/aliases @@ -3,3 +3,5 @@ destructive skip/aix skip/rhel skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller +skip/osx # FIXME https://github.com/ansible-collections/community.general/issues/2978 +skip/macos # FIXME https://github.com/ansible-collections/community.general/issues/2978 From 9023d4dba1c635e9839448e975a8c0c0fdf1fdff Mon Sep 17 00:00:00 2001 From: quidame Date: Sat, 10 Jul 2021 16:37:31 +0200 Subject: [PATCH 0192/2828] filesystem: extend support for FreeBSD (#2902) * extend support for FreeBSD * Check if FS exists with `fstyp` if `blkid` fails to find FS signature (fix a potential data loss) * Add support for FreeBSD special devices (character devices). * Add support for FreeBSD native fstype (UFS). * Update DOCUMENTATION accordingly. * add/update integration tests * Add tests for `fstype=ufs` on FreeBSD. * Run `remove_fs` tests (`state=absent`) on FreeBSD. * Run `overwrite_another_fs` tests on FreeBSD. * add a changelog fragment * fix indentation * restrict new tests to regular files * fix typo * fix searching of providersize (block count) * add '-y' option to growfs command * remove references to versions older than the collection itself * bump version adding new feats to 3.4.0 * reformat *collection* and *version added* for better DOCUMENTATION parsing * skip tests for FreeBSD < 12.2 * run tests for FreeBSD >= 12.2 * re-enable tests for FreeBSD < 12.2 and give it a try with group1 * util-linux not available on FreeBSD < 12.2 --- ...2902-filesystem_extend_freebsd_support.yml | 6 ++ plugins/modules/system/filesystem.py | 93 ++++++++++++++----- tests/integration/targets/filesystem/aliases | 2 +- .../targets/filesystem/defaults/main.yml | 6 ++ .../filesystem/tasks/create_device.yml | 21 ++++- .../targets/filesystem/tasks/create_fs.yml | 21 +++-- .../filesystem/tasks/freebsd_setup.yml | 10 ++ .../targets/filesystem/tasks/main.yml | 25 +++++ .../filesystem/tasks/overwrite_another_fs.yml | 12 +-- .../targets/filesystem/tasks/remove_fs.yml | 12 +-- 10 files changed, 162 insertions(+), 46 deletions(-) create mode 100644 changelogs/fragments/2902-filesystem_extend_freebsd_support.yml create mode 100644 tests/integration/targets/filesystem/tasks/freebsd_setup.yml diff --git a/changelogs/fragments/2902-filesystem_extend_freebsd_support.yml b/changelogs/fragments/2902-filesystem_extend_freebsd_support.yml new file mode 100644 index 0000000000..1518d0190f --- /dev/null +++ b/changelogs/fragments/2902-filesystem_extend_freebsd_support.yml @@ -0,0 +1,6 @@ +--- +minor_changes: + - filesystem - extend support for FreeBSD. Avoid potential data loss by checking + existence of a filesystem with ``fstyp`` (native command) if ``blkid`` (foreign + command) doesn't find one. Add support for character devices and ``ufs`` filesystem + type (https://github.com/ansible-collections/community.general/pull/2902). diff --git a/plugins/modules/system/filesystem.py b/plugins/modules/system/filesystem.py index cbb0e5e95e..4f1d6ee0d1 100644 --- a/plugins/modules/system/filesystem.py +++ b/plugins/modules/system/filesystem.py @@ -1,6 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- +# Copyright: (c) 2021, quidame # Copyright: (c) 2013, Alexander Bulimov # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -12,6 +13,7 @@ DOCUMENTATION = ''' --- author: - Alexander Bulimov (@abulimov) + - quidame (@quidame) module: filesystem short_description: Makes a filesystem description: @@ -30,25 +32,22 @@ options: default: present version_added: 1.3.0 fstype: - choices: [ btrfs, ext2, ext3, ext4, ext4dev, f2fs, lvm, ocfs2, reiserfs, xfs, vfat, swap ] + choices: [ btrfs, ext2, ext3, ext4, ext4dev, f2fs, lvm, ocfs2, reiserfs, xfs, vfat, swap, ufs ] description: - Filesystem type to be created. This option is required with C(state=present) (or if I(state) is omitted). - - reiserfs support was added in 2.2. - - lvm support was added in 2.5. - - since 2.5, I(dev) can be an image file. - - vfat support was added in 2.5 - - ocfs2 support was added in 2.6 - - f2fs support was added in 2.7 - - swap support was added in 2.8 + - ufs support has been added in community.general 3.4.0. type: str aliases: [type] dev: description: - - Target path to block device or regular file. - - On systems not using block devices but character devices instead (as - FreeBSD), this module only works when applying to regular files, aka - disk images. + - Target path to block device (Linux) or character device (FreeBSD) or + regular file (both). + - When setting Linux-specific filesystem types on FreeBSD, this module + only works when applying to regular files, aka disk images. + - Currently C(lvm) (Linux-only) and C(ufs) (FreeBSD-only) don't support + a regular file as their target I(dev). + - Support for character devices on FreeBSD has been added in community.general 3.4.0. type: path required: yes aliases: [device] @@ -60,7 +59,7 @@ options: resizefs: description: - If C(yes), if the block device and filesystem size differ, grow the filesystem into the space. - - Supported for C(ext2), C(ext3), C(ext4), C(ext4dev), C(f2fs), C(lvm), C(xfs) and C(vfat) filesystems. + - Supported for C(ext2), C(ext3), C(ext4), C(ext4dev), C(f2fs), C(lvm), C(xfs), C(ufs) and C(vfat) filesystems. Attempts to resize other filesystem types will fail. - XFS Will only grow if mounted. Currently, the module is based on commands from C(util-linux) package to perform operations, so resizing of XFS is @@ -73,16 +72,24 @@ options: - List of options to be passed to mkfs command. type: str requirements: - - Uses tools related to the I(fstype) (C(mkfs)) and the C(blkid) command. - - When I(resizefs) is enabled, C(blockdev) command is required too. + - Uses specific tools related to the I(fstype) for creating or resizing a + filesystem (from packages e2fsprogs, xfsprogs, dosfstools, and so on). + - Uses generic tools mostly related to the Operating System (Linux or + FreeBSD) or available on both, as C(blkid). + - On FreeBSD, either C(util-linux) or C(e2fsprogs) package is required. notes: - - Potential filesystem on I(dev) are checked using C(blkid). In case C(blkid) - isn't able to detect an existing filesystem, this filesystem is overwritten - even if I(force) is C(no). - - On FreeBSD systems, either C(e2fsprogs) or C(util-linux) packages provide - a C(blkid) command that is compatible with this module, when applied to - regular files. + - Potential filesystems on I(dev) are checked using C(blkid). In case C(blkid) + is unable to detect a filesystem (and in case C(fstyp) on FreeBSD is also + unable to detect a filesystem), this filesystem is overwritten even if + I(force) is C(no). + - On FreeBSD systems, both C(e2fsprogs) and C(util-linux) packages provide + a C(blkid) command that is compatible with this module. However, these + packages conflict with each other, and only the C(util-linux) package + provides the command required to not fail when I(state=absent). - This module supports I(check_mode). +seealso: + - module: community.general.filesize + - module: ansible.posix.mount ''' EXAMPLES = ''' @@ -101,6 +108,11 @@ EXAMPLES = ''' community.general.filesystem: dev: /dev/sdb1 state: absent + +- name: Create a filesystem on top of a regular file + community.general.filesystem: + dev: /path/to/disk.img + fstype: vfat ''' from distutils.version import LooseVersion @@ -125,6 +137,10 @@ class Device(object): blockdev_cmd = self.module.get_bin_path("blockdev", required=True) dummy, out, dummy = self.module.run_command([blockdev_cmd, "--getsize64", self.path], check_rc=True) devsize_in_bytes = int(out) + elif stat.S_ISCHR(statinfo.st_mode) and platform.system() == 'FreeBSD': + diskinfo_cmd = self.module.get_bin_path("diskinfo", required=True) + dummy, out, dummy = self.module.run_command([diskinfo_cmd, self.path], check_rc=True) + devsize_in_bytes = int(out.split()[2]) elif os.path.isfile(self.path): devsize_in_bytes = os.path.getsize(self.path) else: @@ -423,6 +439,31 @@ class Swap(Filesystem): MKFS_FORCE_FLAGS = ['-f'] +class UFS(Filesystem): + MKFS = 'newfs' + INFO = 'dumpfs' + GROW = 'growfs' + GROW_MAX_SPACE_FLAGS = ['-y'] + + def get_fs_size(self, dev): + """Get providersize and fragment size and return their product.""" + cmd = self.module.get_bin_path(self.INFO, required=True) + dummy, out, dummy = self.module.run_command([cmd, str(dev)], check_rc=True, environ_update=self.LANG_ENV) + + fragmentsize = providersize = None + for line in out.splitlines(): + if line.startswith('fsize'): + fragmentsize = int(line.split()[1]) + elif 'providersize' in line: + providersize = int(line.split()[-1]) + if None not in (fragmentsize, providersize): + break + else: + raise ValueError(out) + + return fragmentsize * providersize + + FILESYSTEMS = { 'ext2': Ext2, 'ext3': Ext3, @@ -436,6 +477,7 @@ FILESYSTEMS = { 'ocfs2': Ocfs2, 'LVM2_member': LVM, 'swap': Swap, + 'ufs': UFS, } @@ -484,11 +526,16 @@ def main(): dev = Device(module, dev) + # In case blkid/fstyp isn't able to identify an existing filesystem, device + # is considered as empty, then this existing filesystem would be overwritten + # even if force isn't enabled. cmd = module.get_bin_path('blkid', required=True) rc, raw_fs, err = module.run_command([cmd, '-c', os.devnull, '-o', 'value', '-s', 'TYPE', str(dev)]) - # In case blkid isn't able to identify an existing filesystem, device is considered as empty, - # then this existing filesystem would be overwritten even if force isn't enabled. fs = raw_fs.strip() + if not fs and platform.system() == 'FreeBSD': + cmd = module.get_bin_path('fstyp', required=True) + rc, raw_fs, err = module.run_command([cmd, str(dev)]) + fs = raw_fs.strip() if state == "present": if fstype in friendly_names: diff --git a/tests/integration/targets/filesystem/aliases b/tests/integration/targets/filesystem/aliases index 1c80472f94..1ef4c3619a 100644 --- a/tests/integration/targets/filesystem/aliases +++ b/tests/integration/targets/filesystem/aliases @@ -1,5 +1,5 @@ destructive -shippable/posix/group3 +shippable/posix/group1 skip/aix skip/osx skip/macos diff --git a/tests/integration/targets/filesystem/defaults/main.yml b/tests/integration/targets/filesystem/defaults/main.yml index 15ef85aa0e..27672bbea6 100644 --- a/tests/integration/targets/filesystem/defaults/main.yml +++ b/tests/integration/targets/filesystem/defaults/main.yml @@ -23,3 +23,9 @@ tested_filesystems: f2fs: {fssize: '{{ f2fs_fssize|default(60) }}', grow: 'f2fs_version is version("1.10.0", ">=")'} lvm: {fssize: 20, grow: True} swap: {fssize: 10, grow: False} # grow not implemented + ufs: {fssize: 10, grow: True} + + +get_uuid_any: "blkid -c /dev/null -o value -s UUID {{ dev }}" +get_uuid_ufs: "dumpfs {{ dev }} | awk -v sb=superblock -v id=id '$1 == sb && $4 == id {print $6$7}'" +get_uuid_cmd: "{{ get_uuid_ufs if fstype == 'ufs' else get_uuid_any }}" diff --git a/tests/integration/targets/filesystem/tasks/create_device.yml b/tests/integration/targets/filesystem/tasks/create_device.yml index 30fd62e33a..ae314221a5 100644 --- a/tests/integration/targets/filesystem/tasks/create_device.yml +++ b/tests/integration/targets/filesystem/tasks/create_device.yml @@ -19,6 +19,17 @@ ansible.builtin.set_fact: dev: "{{ loop_device_cmd.stdout }}" + - when: fstype == 'ufs' + block: + - name: 'Create a memory disk for UFS' + ansible.builtin.command: + cmd: 'mdconfig -a -f {{ dev }}' + register: memory_disk_cmd + + - name: 'Switch to memory disk target for further tasks' + ansible.builtin.set_fact: + dev: "/dev/{{ memory_disk_cmd.stdout }}" + - include_tasks: '{{ action }}.yml' always: @@ -28,10 +39,16 @@ removes: '{{ dev }}' when: fstype == 'lvm' - - name: 'Clean correct device for LVM' + - name: 'Detach memory disk used for UFS' + ansible.builtin.command: + cmd: 'mdconfig -d -u {{ dev }}' + removes: '{{ dev }}' + when: fstype == 'ufs' + + - name: 'Clean correct device for LVM and UFS' ansible.builtin.set_fact: dev: '{{ image_file }}' - when: fstype == 'lvm' + when: fstype in ['lvm', 'ufs'] - name: 'Remove disk image file' ansible.builtin.file: diff --git a/tests/integration/targets/filesystem/tasks/create_fs.yml b/tests/integration/targets/filesystem/tasks/create_fs.yml index de1a9f18a0..3c92197c0a 100644 --- a/tests/integration/targets/filesystem/tasks/create_fs.yml +++ b/tests/integration/targets/filesystem/tasks/create_fs.yml @@ -12,8 +12,8 @@ - 'fs_result is success' - name: "Get UUID of created filesystem" - ansible.builtin.command: - cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + ansible.builtin.shell: + cmd: "{{ get_uuid_cmd }}" changed_when: false register: uuid @@ -24,8 +24,8 @@ register: fs2_result - name: "Get UUID of the filesystem" - ansible.builtin.command: - cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + ansible.builtin.shell: + cmd: "{{ get_uuid_cmd }}" changed_when: false register: uuid2 @@ -44,8 +44,8 @@ register: fs3_result - name: "Get UUID of the new filesystem" - ansible.builtin.command: - cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + ansible.builtin.shell: + cmd: "{{ get_uuid_cmd }}" changed_when: false register: uuid3 @@ -71,6 +71,11 @@ cmd: 'losetup -c {{ dev }}' when: fstype == 'lvm' + - name: "Resize memory disk for UFS" + ansible.builtin.command: + cmd: 'mdconfig -r -u {{ dev }} -s {{ fssize | int + 1 }}M' + when: fstype == 'ufs' + - name: "Expand filesystem" community.general.filesystem: dev: '{{ dev }}' @@ -79,8 +84,8 @@ register: fs4_result - name: "Get UUID of the filesystem" - ansible.builtin.command: - cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + ansible.builtin.shell: + cmd: "{{ get_uuid_cmd }}" changed_when: false register: uuid4 diff --git a/tests/integration/targets/filesystem/tasks/freebsd_setup.yml b/tests/integration/targets/filesystem/tasks/freebsd_setup.yml new file mode 100644 index 0000000000..e08beca4a8 --- /dev/null +++ b/tests/integration/targets/filesystem/tasks/freebsd_setup.yml @@ -0,0 +1,10 @@ +--- +- name: "Uninstall e2fsprogs" + ansible.builtin.package: + name: e2fsprogs + state: absent + +- name: "Install util-linux" + ansible.builtin.package: + name: util-linux + state: present diff --git a/tests/integration/targets/filesystem/tasks/main.yml b/tests/integration/targets/filesystem/tasks/main.yml index d836c8a15d..4b2c5bdc2a 100644 --- a/tests/integration/targets/filesystem/tasks/main.yml +++ b/tests/integration/targets/filesystem/tasks/main.yml @@ -35,6 +35,10 @@ # Available on FreeBSD but not on testbed (util-linux conflicts with e2fsprogs): wipefs, mkfs.minix - 'not (ansible_system == "FreeBSD" and item.1 in ["overwrite_another_fs", "remove_fs"])' + # Linux limited support + # Not available: ufs (this is FreeBSD's native fs) + - 'not (ansible_system == "Linux" and item.0.key == "ufs")' + # Other limitations and corner cases # f2fs-tools and reiserfs-utils packages not available with RHEL/CentOS on CI @@ -59,3 +63,24 @@ item.0.key == "xfs" and ansible_python.version.major == 2)' loop: "{{ query('dict', tested_filesystems)|product(['create_fs', 'overwrite_another_fs', 'remove_fs'])|list }}" + + +# With FreeBSD extended support (util-linux is not available before 12.2) + +- include_tasks: freebsd_setup.yml + when: + - 'ansible_system == "FreeBSD"' + - 'ansible_distribution_version is version("12.2", ">=")' + +- include_tasks: create_device.yml + vars: + image_file: '{{ remote_tmp_dir }}/img' + fstype: '{{ item.0.key }}' + fssize: '{{ item.0.value.fssize }}' + grow: '{{ item.0.value.grow }}' + action: '{{ item.1 }}' + when: + - 'ansible_system == "FreeBSD"' + - 'ansible_distribution_version is version("12.2", ">=")' + - 'item.0.key in ["xfs", "vfat"]' + loop: "{{ query('dict', tested_filesystems)|product(['create_fs', 'overwrite_another_fs', 'remove_fs'])|list }}" diff --git a/tests/integration/targets/filesystem/tasks/overwrite_another_fs.yml b/tests/integration/targets/filesystem/tasks/overwrite_another_fs.yml index 4bf92836bb..83a623fa75 100644 --- a/tests/integration/targets/filesystem/tasks/overwrite_another_fs.yml +++ b/tests/integration/targets/filesystem/tasks/overwrite_another_fs.yml @@ -10,8 +10,8 @@ cmd: 'mkfs.minix {{ dev }}' - name: 'Get UUID of the new filesystem' - ansible.builtin.command: - cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + ansible.builtin.shell: + cmd: "{{ get_uuid_cmd }}" changed_when: false register: uuid @@ -23,8 +23,8 @@ ignore_errors: True - name: 'Get UUID of the filesystem' - ansible.builtin.command: - cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + ansible.builtin.shell: + cmd: "{{ get_uuid_cmd }}" changed_when: false register: uuid2 @@ -42,8 +42,8 @@ register: fs_result2 - name: 'Get UUID of the new filesystem' - ansible.builtin.command: - cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + ansible.builtin.shell: + cmd: "{{ get_uuid_cmd }}" changed_when: false register: uuid3 diff --git a/tests/integration/targets/filesystem/tasks/remove_fs.yml b/tests/integration/targets/filesystem/tasks/remove_fs.yml index 338d439d60..3127dce559 100644 --- a/tests/integration/targets/filesystem/tasks/remove_fs.yml +++ b/tests/integration/targets/filesystem/tasks/remove_fs.yml @@ -7,8 +7,8 @@ fstype: '{{ fstype }}' - name: "Get filesystem UUID with 'blkid'" - ansible.builtin.command: - cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + ansible.builtin.shell: + cmd: "{{ get_uuid_cmd }}" changed_when: false register: blkid_ref @@ -27,8 +27,8 @@ check_mode: yes - name: "Get filesystem UUID with 'blkid' (should remain the same)" - ansible.builtin.command: - cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + ansible.builtin.shell: + cmd: "{{ get_uuid_cmd }}" changed_when: false register: blkid @@ -46,8 +46,8 @@ register: wipefs - name: "Get filesystem UUID with 'blkid' (should be empty)" - ansible.builtin.command: - cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + ansible.builtin.shell: + cmd: "{{ get_uuid_cmd }}" changed_when: false failed_when: false register: blkid From 111c5de55006cf3af43599eea60edb15b9f66954 Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Sat, 10 Jul 2021 10:39:51 -0400 Subject: [PATCH 0193/2828] proxmox inventory - fix parsing for offline nodes (#2967) * Initial commit * Adding changelog fragment * Applying initial review suggestions --- .../2967-proxmox_inventory-offline-node-fix.yml | 3 +++ plugins/inventory/proxmox.py | 3 +++ tests/unit/plugins/inventory/test_proxmox.py | 12 +++++++++--- 3 files changed, 15 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/2967-proxmox_inventory-offline-node-fix.yml diff --git a/changelogs/fragments/2967-proxmox_inventory-offline-node-fix.yml b/changelogs/fragments/2967-proxmox_inventory-offline-node-fix.yml new file mode 100644 index 0000000000..d52fef4d8a --- /dev/null +++ b/changelogs/fragments/2967-proxmox_inventory-offline-node-fix.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - proxmox inventory plugin - fixed parsing failures when some cluster nodes are offline (https://github.com/ansible-collections/community.general/issues/2931). diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py index be3ecd4365..c99962bcdd 100644 --- a/plugins/inventory/proxmox.py +++ b/plugins/inventory/proxmox.py @@ -369,6 +369,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): if node['type'] == 'node': self.inventory.add_child(nodes_group, node['node']) + if node['status'] == 'offline': + continue + # get node IP address if self.get_option("want_proxmox_nodes_ansible_host"): ip = self._get_node_ip(node['node']) diff --git a/tests/unit/plugins/inventory/test_proxmox.py b/tests/unit/plugins/inventory/test_proxmox.py index c2b0408138..87d47a3cff 100644 --- a/tests/unit/plugins/inventory/test_proxmox.py +++ b/tests/unit/plugins/inventory/test_proxmox.py @@ -9,7 +9,6 @@ __metaclass__ = type import pytest -from ansible.errors import AnsibleError, AnsibleParserError from ansible.inventory.data import InventoryData from ansible_collections.community.general.plugins.inventory.proxmox import InventoryModule @@ -52,7 +51,12 @@ def get_json(url): "disk": 1000, "maxmem": 1000, "uptime": 10000, - "level": ""}] + "level": ""}, + {"type": "node", + "node": "testnode2", + "id": "node/testnode2", + "status": "offline", + "ssl_fingerprint": "yy"}] elif url == "https://localhost:8006/api2/json/pools": # _get_pools return [{"poolid": "test"}] @@ -554,7 +558,6 @@ def test_populate(inventory, mocker): host_qemu_multi_nic = inventory.inventory.get_host('test-qemu-multi-nic') host_qemu_template = inventory.inventory.get_host('test-qemu-template') host_lxc = inventory.inventory.get_host('test-lxc') - host_node = inventory.inventory.get_host('testnode') # check if qemu-test is in the proxmox_pool_test group assert 'proxmox_pool_test' in inventory.inventory.groups @@ -584,3 +587,6 @@ def test_populate(inventory, mocker): # check if qemu template is not present assert host_qemu_template is None + + # check that offline node is in inventory + assert inventory.inventory.get_host('testnode2') From 7a41833e599e04d1f24ad90c17843ad5aec8a958 Mon Sep 17 00:00:00 2001 From: Tyler Schwend Date: Sat, 10 Jul 2021 13:24:09 -0400 Subject: [PATCH 0194/2828] feat: support datadog_monitor composite type (#2958) * feat: support datadog_monitor composite type * docs: note support for composite types * lint * lint: line lengths * doc: changelog frag --- .../2958-datadog_monitor_support_composites.yml | 3 +++ .../modules/monitoring/datadog/datadog_monitor.py | 15 +++++++++++++-- 2 files changed, 16 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/2958-datadog_monitor_support_composites.yml diff --git a/changelogs/fragments/2958-datadog_monitor_support_composites.yml b/changelogs/fragments/2958-datadog_monitor_support_composites.yml new file mode 100644 index 0000000000..394a589994 --- /dev/null +++ b/changelogs/fragments/2958-datadog_monitor_support_composites.yml @@ -0,0 +1,3 @@ +minor_changes: + - datadog_monitor - allow creation of composite datadog monitors + (https://github.com/ansible-collections/community.general/issues/2956). diff --git a/plugins/modules/monitoring/datadog/datadog_monitor.py b/plugins/modules/monitoring/datadog/datadog_monitor.py index 6c0f8cdb02..ab25777ecd 100644 --- a/plugins/modules/monitoring/datadog/datadog_monitor.py +++ b/plugins/modules/monitoring/datadog/datadog_monitor.py @@ -51,7 +51,17 @@ options: description: - The type of the monitor. - The types C(query alert), C(trace-analytics alert) and C(rum alert) were added in community.general 2.1.0. - choices: ['metric alert', 'service check', 'event alert', 'process alert', 'log alert', 'query alert', 'trace-analytics alert', 'rum alert'] + - The type C(composite) was added in community.general 3.4.0. + choices: + - metric alert + - service check + - event alert + - process alert + - log alert + - query alert + - trace-analytics alert + - rum alert + - composite type: str query: description: @@ -209,7 +219,8 @@ def main(): app_key=dict(required=True, no_log=True), state=dict(required=True, choices=['present', 'absent', 'mute', 'unmute']), type=dict(choices=['metric alert', 'service check', 'event alert', 'process alert', - 'log alert', 'query alert', 'trace-analytics alert', 'rum alert']), + 'log alert', 'query alert', 'trace-analytics alert', + 'rum alert', 'composite']), name=dict(required=True), query=dict(), notification_message=dict(no_log=True), From c5cbe2943be0665ba1297c588b51d4d275c73ef4 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 11 Jul 2021 11:43:40 +1200 Subject: [PATCH 0195/2828] =?UTF-8?q?module=5Fhelper=20cmd=20-=20added=20f?= =?UTF-8?q?eature=20flag=20to=20control=20whether=20CmdMixin=20adds=20rc,?= =?UTF-8?q?=20out=20and=20err=20t=E2=80=A6=20(#2922)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * added feature flag to control whether CmdMixin adds rc, out and err to the result of the module * added changelog fragment * changed from a global flag to parameters in run_command * updated changelog * fixed brainless copy-paste of yours truly --- .../2922-mh-cmd-output-feature-flag.yml | 2 ++ plugins/module_utils/mh/mixins/cmd.py | 16 ++++++++++++++-- 2 files changed, 16 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/2922-mh-cmd-output-feature-flag.yml diff --git a/changelogs/fragments/2922-mh-cmd-output-feature-flag.yml b/changelogs/fragments/2922-mh-cmd-output-feature-flag.yml new file mode 100644 index 0000000000..e071e3413b --- /dev/null +++ b/changelogs/fragments/2922-mh-cmd-output-feature-flag.yml @@ -0,0 +1,2 @@ +minor_changes: + - module_helper module utils - added feature flag parameters to ``CmdMixin`` to control whether ``rc``, ``out`` and ``err`` are automatically added to the module output (https://github.com/ansible-collections/community.general/pull/2922). diff --git a/plugins/module_utils/mh/mixins/cmd.py b/plugins/module_utils/mh/mixins/cmd.py index 0367b6173c..aed4174c4f 100644 --- a/plugins/module_utils/mh/mixins/cmd.py +++ b/plugins/module_utils/mh/mixins/cmd.py @@ -152,7 +152,14 @@ class CmdMixin(object): def process_command_output(self, rc, out, err): return rc, out, err - def run_command(self, extra_params=None, params=None, process_output=None, *args, **kwargs): + def run_command(self, + extra_params=None, + params=None, + process_output=None, + publish_rc=True, + publish_out=True, + publish_err=True, + *args, **kwargs): self.vars.cmd_args = self._calculate_args(extra_params, params) options = dict(self.run_command_fixed_options) options['check_rc'] = options.get('check_rc', self.check_rc) @@ -166,7 +173,12 @@ class CmdMixin(object): self.update_output(force_lang=self.force_lang) options['environ_update'] = env_update rc, out, err = self.module.run_command(self.vars.cmd_args, *args, **options) - self.update_output(rc=rc, stdout=out, stderr=err) + if publish_rc: + self.update_output(rc=rc) + if publish_out: + self.update_output(stdout=out) + if publish_err: + self.update_output(stderr=err) if process_output is None: _process = self.process_command_output else: From d56d34bce6249e4b3cbb7ddd9eb13602b2557fec Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 12 Jul 2021 02:34:59 +1200 Subject: [PATCH 0196/2828] added missing copyright notes to MH integration tests (#2990) --- tests/integration/targets/module_helper/tasks/main.yml | 3 +++ tests/integration/targets/module_helper/tasks/mdepfail.yml | 3 +++ tests/integration/targets/module_helper/tasks/msimple.yml | 3 +++ tests/integration/targets/module_helper/tasks/mstate.yml | 3 +++ 4 files changed, 12 insertions(+) diff --git a/tests/integration/targets/module_helper/tasks/main.yml b/tests/integration/targets/module_helper/tasks/main.yml index 05c41c2a38..8ac7c8ae60 100644 --- a/tests/integration/targets/module_helper/tasks/main.yml +++ b/tests/integration/targets/module_helper/tasks/main.yml @@ -1,3 +1,6 @@ +# (c) 2021, Alexei Znamensky +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + - include_tasks: msimple.yml - include_tasks: mdepfail.yml - include_tasks: mstate.yml diff --git a/tests/integration/targets/module_helper/tasks/mdepfail.yml b/tests/integration/targets/module_helper/tasks/mdepfail.yml index d22738a778..ad8fc5d57d 100644 --- a/tests/integration/targets/module_helper/tasks/mdepfail.yml +++ b/tests/integration/targets/module_helper/tasks/mdepfail.yml @@ -1,3 +1,6 @@ +# (c) 2021, Alexei Znamensky +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + - name: test failing dependency mdepfail: a: 123 diff --git a/tests/integration/targets/module_helper/tasks/msimple.yml b/tests/integration/targets/module_helper/tasks/msimple.yml index deb386f2b5..4f032fd177 100644 --- a/tests/integration/targets/module_helper/tasks/msimple.yml +++ b/tests/integration/targets/module_helper/tasks/msimple.yml @@ -1,3 +1,6 @@ +# (c) 2021, Alexei Znamensky +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + - name: test msimple 1 msimple: a: 80 diff --git a/tests/integration/targets/module_helper/tasks/mstate.yml b/tests/integration/targets/module_helper/tasks/mstate.yml index 53329a3c70..c4dfdb9a0e 100644 --- a/tests/integration/targets/module_helper/tasks/mstate.yml +++ b/tests/integration/targets/module_helper/tasks/mstate.yml @@ -1,3 +1,6 @@ +# (c) 2021, Alexei Znamensky +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + - name: test mstate 1 mstate: a: 80 From 5079ef0e82c1fb7acfd23e0dbb82b1ecdfee858d Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sun, 11 Jul 2021 16:38:58 +0200 Subject: [PATCH 0197/2828] feature request template - replace ansible-core with community.general - looks like a C&P error (#2992) --- .github/ISSUE_TEMPLATE/feature_request.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index 5f89dec77a..e676ff25ef 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -21,7 +21,7 @@ body: placeholder: >- I am trying to do X with the collection from the main branch on GitHub and I think that implementing a feature Y would be very helpful for me and - every other user of ansible-core because of Z. + every other user of community.general because of Z. validations: required: true From 3fc97bf80aae8c695288e8920df3ea48b151a369 Mon Sep 17 00:00:00 2001 From: Gaetan2907 <48204380+Gaetan2907@users.noreply.github.com> Date: Tue, 13 Jul 2021 05:57:16 +0100 Subject: [PATCH 0198/2828] Keycloak: Improve diff mode on keycloak_authentication module (#2963) * Fix diff mode when updating authentication flow with keycloak_authentication module * Add changelog fragment * Fix unit test * Update plugins/modules/identity/keycloak/keycloak_authentication.py Co-authored-by: Ajpantuso * Update changelogs/fragments/2963-improve-diff-mode-on-keycloak_authentication.yml Co-authored-by: Ajpantuso * Update documentation of create_or_update_executions function (return tuple instead of dict) * Update plugins/modules/identity/keycloak/keycloak_authentication.py Co-authored-by: Ajpantuso * Update plugins/modules/identity/keycloak/keycloak_authentication.py Co-authored-by: Ajpantuso Co-authored-by: Ajpantuso --- ...e-diff-mode-on-keycloak_authentication.yml | 3 +++ .../keycloak/keycloak_authentication.py | 25 ++++++++++++++----- .../keycloak/test_keycloak_authentication.py | 6 ++--- 3 files changed, 25 insertions(+), 9 deletions(-) create mode 100644 changelogs/fragments/2963-improve-diff-mode-on-keycloak_authentication.yml diff --git a/changelogs/fragments/2963-improve-diff-mode-on-keycloak_authentication.yml b/changelogs/fragments/2963-improve-diff-mode-on-keycloak_authentication.yml new file mode 100644 index 0000000000..fa5f133d7d --- /dev/null +++ b/changelogs/fragments/2963-improve-diff-mode-on-keycloak_authentication.yml @@ -0,0 +1,3 @@ +--- +minor_changes: +- keycloak_authentication - enhanced diff mode to also return before and after state when the authentication flow is updated (https://github.com/ansible-collections/community.general/pull/2963). diff --git a/plugins/modules/identity/keycloak/keycloak_authentication.py b/plugins/modules/identity/keycloak/keycloak_authentication.py index 9fd04eb70b..8a33409b58 100644 --- a/plugins/modules/identity/keycloak/keycloak_authentication.py +++ b/plugins/modules/identity/keycloak/keycloak_authentication.py @@ -196,9 +196,15 @@ def create_or_update_executions(kc, config, realm='master'): :param config: Representation of the authentication flow including it's executions. :param realm: Realm :return: True if executions have been modified. False otherwise. + :return: tuple (changed, dict(before, after) + WHERE + bool changed indicates if changes have been made + dict(str, str) shows state before and after creation/update """ try: changed = False + after = "" + before = "" if "authenticationExecutions" in config: # Get existing executions on the Keycloak server for this alias existing_executions = kc.get_executions_representation(config, realm=realm) @@ -221,17 +227,21 @@ def create_or_update_executions(kc, config, realm='master'): exclude_key.append(key) # Compare the executions to see if it need changes if not is_struct_included(new_exec, existing_executions[exec_index], exclude_key) or exec_index != new_exec_index: - changed = True + exec_found = True + before += str(existing_executions[exec_index]) + '\n' id_to_update = existing_executions[exec_index]["id"] # Remove exec from list in case 2 exec with same name existing_executions[exec_index].clear() elif new_exec["providerId"] is not None: kc.create_execution(new_exec, flowAlias=flow_alias_parent, realm=realm) - changed = True + exec_found = True + after += str(new_exec) + '\n' elif new_exec["displayName"] is not None: kc.create_subflow(new_exec["displayName"], flow_alias_parent, realm=realm) + exec_found = True + after += str(new_exec) + '\n' + if exec_found: changed = True - if changed: if exec_index != -1: # Update the existing execution updated_exec = { @@ -248,7 +258,8 @@ def create_or_update_executions(kc, config, realm='master'): kc.update_authentication_executions(flow_alias_parent, updated_exec, realm=realm) diff = exec_index - new_exec_index kc.change_execution_priority(updated_exec["id"], diff, realm=realm) - return changed + after += str(kc.get_executions_representation(config, realm=realm)[new_exec_index]) + '\n' + return changed, dict(before=before, after=after) except Exception as e: kc.module.fail_json(msg='Could not create or update executions for authentication flow %s in realm %s: %s' % (config["alias"], realm, str(e))) @@ -358,8 +369,10 @@ def main(): # Configure the executions for the flow if module.check_mode: module.exit_json(**result) - if create_or_update_executions(kc=kc, config=new_auth_repr, realm=realm): - result['changed'] = True + changed, diff = create_or_update_executions(kc=kc, config=new_auth_repr, realm=realm) + result['changed'] |= changed + if module._diff: + result['diff'] = diff # Get executions created exec_repr = kc.get_executions_representation(config=new_auth_repr, realm=realm) if exec_repr is not None: diff --git a/tests/unit/plugins/modules/identity/keycloak/test_keycloak_authentication.py b/tests/unit/plugins/modules/identity/keycloak/test_keycloak_authentication.py index db0168aa83..91e34eea7b 100644 --- a/tests/unit/plugins/modules/identity/keycloak/test_keycloak_authentication.py +++ b/tests/unit/plugins/modules/identity/keycloak/test_keycloak_authentication.py @@ -343,7 +343,7 @@ class TestKeycloakAuthentication(ModuleTestCase): self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1) self.assertEqual(len(mock_copy_auth_flow.mock_calls), 0) self.assertEqual(len(mock_create_empty_auth_flow.mock_calls), 1) - self.assertEqual(len(mock_get_executions_representation.mock_calls), 2) + self.assertEqual(len(mock_get_executions_representation.mock_calls), 3) self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 0) # Verify that the module's changed status matches what is expected @@ -434,7 +434,7 @@ class TestKeycloakAuthentication(ModuleTestCase): self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1) self.assertEqual(len(mock_copy_auth_flow.mock_calls), 0) self.assertEqual(len(mock_create_empty_auth_flow.mock_calls), 0) - self.assertEqual(len(mock_get_executions_representation.mock_calls), 2) + self.assertEqual(len(mock_get_executions_representation.mock_calls), 3) self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 0) # Verify that the module's changed status matches what is expected @@ -611,7 +611,7 @@ class TestKeycloakAuthentication(ModuleTestCase): self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1) self.assertEqual(len(mock_copy_auth_flow.mock_calls), 0) self.assertEqual(len(mock_create_empty_auth_flow.mock_calls), 1) - self.assertEqual(len(mock_get_executions_representation.mock_calls), 2) + self.assertEqual(len(mock_get_executions_representation.mock_calls), 3) self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 1) # Verify that the module's changed status matches what is expected From 9ffc1ef393b56d35e65f8ea48723b903da3d1fe8 Mon Sep 17 00:00:00 2001 From: omula Date: Wed, 14 Jul 2021 08:24:27 +0200 Subject: [PATCH 0199/2828] [nmcli] add runner and runner-hwaddr-policy for network teaming (#2901) * [nmcli] add runner and runner-hwaddr-policy for network teaming * [nmcli] delete extra space * Update plugins/modules/net_tools/nmcli.py * Update plugins/modules/net_tools/nmcli.py * [nmcli] add changelog fragment * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Amin Vakil Co-authored-by: Oriol MULA VALLS Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> Co-authored-by: Amin Vakil --- changelogs/fragments/2901-nmcli_teaming.yml | 2 ++ plugins/modules/net_tools/nmcli.py | 32 ++++++++++++++++++- .../plugins/modules/net_tools/test_nmcli.py | 26 +++++++++++++++ 3 files changed, 59 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2901-nmcli_teaming.yml diff --git a/changelogs/fragments/2901-nmcli_teaming.yml b/changelogs/fragments/2901-nmcli_teaming.yml new file mode 100644 index 0000000000..4178b9c6f5 --- /dev/null +++ b/changelogs/fragments/2901-nmcli_teaming.yml @@ -0,0 +1,2 @@ +minor_changes: + - nmcli - add ``runner`` and ``runner_hwaddr_policy`` options (https://github.com/ansible-collections/community.general/issues/2901). diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index 7ed515fc75..1750f9f99f 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -57,7 +57,7 @@ options: choices: [ bond, bond-slave, bridge, bridge-slave, ethernet, generic, infiniband, ipip, sit, team, team-slave, vlan, vxlan, wifi ] mode: description: - - This is the type of device or network connection that you wish to create for a bond, team or bridge. + - This is the type of device or network connection that you wish to create for a bond or bridge. type: str choices: [ 802.3ad, active-backup, balance-alb, balance-rr, balance-tlb, balance-xor, broadcast ] default: balance-rr @@ -265,6 +265,20 @@ options: frame was received on. type: bool default: yes + runner: + description: + - This is the type of device or network connection that you wish to create for a team. + type: str + choices: [ broadcast, roundrobin, activebackup, loadbalance, lacp ] + default: roundrobin + version_added: 3.4.0 + runner_hwaddr_policy: + description: + - This defines the policy of how hardware addresses of team device and port devices + should be set during the team lifetime. + type: str + choices: [ same_all, by_active, only_active ] + version_added: 3.4.0 vlanid: description: - This is only used with VLAN - VLAN ID in range <0-4095>. @@ -719,6 +733,8 @@ class Nmcli(object): self.hairpin = module.params['hairpin'] self.path_cost = module.params['path_cost'] self.mac = module.params['mac'] + self.runner = module.params['runner'] + self.runner_hwaddr_policy = module.params['runner_hwaddr_policy'] self.vlanid = module.params['vlanid'] self.vlandev = module.params['vlandev'] self.flags = module.params['flags'] @@ -826,6 +842,11 @@ class Nmcli(object): 'bridge.priority': self.priority, 'bridge.stp': self.stp, }) + elif self.type == 'team': + options.update({ + 'team.runner': self.runner, + 'team.runner-hwaddr-policy': self.runner_hwaddr_policy, + }) elif self.type == 'bridge-slave': options.update({ 'connection.slave-type': 'bridge', @@ -1214,6 +1235,11 @@ def main(): ageingtime=dict(type='int', default=300), hairpin=dict(type='bool', default=True), path_cost=dict(type='int', default=100), + # team specific vars + runner=dict(type='str', default='roundrobin', + choices=['broadcast', 'roundrobin', 'activebackup', 'loadbalance', 'lacp']), + # team active-backup runner specific options + runner_hwaddr_policy=dict(type='str', choices=['same_all', 'by_active', 'only_active']), # vlan specific vars vlanid=dict(type='int'), vlandev=dict(type='str'), @@ -1245,6 +1271,10 @@ def main(): # check for issues if nmcli.conn_name is None: nmcli.module.fail_json(msg="Please specify a name for the connection") + # team checks + if nmcli.type == "team": + if nmcli.runner_hwaddr_policy and not nmcli.runner == "activebackup": + nmcli.module.fail_json(msg="Runner-hwaddr-policy is only allowed for runner activebackup") # team-slave checks if nmcli.type == 'team-slave': if nmcli.master is None: diff --git a/tests/unit/plugins/modules/net_tools/test_nmcli.py b/tests/unit/plugins/modules/net_tools/test_nmcli.py index ba526b1d65..63ec60537c 100644 --- a/tests/unit/plugins/modules/net_tools/test_nmcli.py +++ b/tests/unit/plugins/modules/net_tools/test_nmcli.py @@ -279,8 +279,20 @@ ipv4.may-fail: yes ipv6.method: auto ipv6.ignore-auto-dns: no ipv6.ignore-auto-routes: no +team.runner: roundrobin """ +TESTCASE_TEAM_HWADDR_POLICY_FAILS = [ + { + 'type': 'team', + 'conn_name': 'non_existent_nw_device', + 'ifname': 'team0_non_existant', + 'runner_hwaddr_policy': 'by_active', + 'state': 'present', + '_ansible_check_mode': False, + } +] + TESTCASE_TEAM_SLAVE = [ { 'type': 'team-slave', @@ -1053,6 +1065,20 @@ def test_team_connection_unchanged(mocked_team_connection_unchanged, capfd): assert not results['changed'] +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_TEAM_HWADDR_POLICY_FAILS, indirect=['patch_ansible_module']) +def test_team_connection_create_hwaddr_policy_fails(mocked_generic_connection_create, capfd): + """ + Test : Team connection created + """ + with pytest.raises(SystemExit): + nmcli.main() + + out, err = capfd.readouterr() + results = json.loads(out) + assert results.get('failed') + assert results['msg'] == "Runner-hwaddr-policy is only allowed for runner activebackup" + + @pytest.mark.parametrize('patch_ansible_module', TESTCASE_TEAM_SLAVE, indirect=['patch_ansible_module']) def test_create_team_slave(mocked_generic_connection_create, capfd): """ From 28193b699ba0fc207fc4352b5cf99e46e5d3f707 Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Wed, 14 Jul 2021 09:26:12 +0300 Subject: [PATCH 0200/2828] Update README.md (#3003) --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 6f13fe150c..26a63ab4b2 100644 --- a/README.md +++ b/README.md @@ -58,7 +58,9 @@ See [Ansible Using collections](https://docs.ansible.com/ansible/latest/user_gui ## Contributing to this collection -The content of this collection is made by good people like you, a community of individuals collaborating on making the world better through developing automation software. +The content of this collection is made by good people just like you, a community of individuals collaborating on making the world better through developing automation software. + +We are actively accepting new contributors. All types of contributions are very welcome. From a3a40f6de316716acc1c61f94683a546202aede1 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Wed, 14 Jul 2021 23:04:35 +1200 Subject: [PATCH 0201/2828] pamd - fixed single line issue (#2989) * fixed pamd single line issue * added changelog fragment * supported case for 0 lines, improved test --- .../fragments/2989-pamd-single-line.yaml | 2 + plugins/modules/system/pamd.py | 13 +++-- tests/integration/targets/pamd/aliases | 5 ++ tests/integration/targets/pamd/tasks/main.yml | 56 +++++++++++++++++++ 4 files changed, 72 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/2989-pamd-single-line.yaml create mode 100644 tests/integration/targets/pamd/aliases create mode 100644 tests/integration/targets/pamd/tasks/main.yml diff --git a/changelogs/fragments/2989-pamd-single-line.yaml b/changelogs/fragments/2989-pamd-single-line.yaml new file mode 100644 index 0000000000..359e160785 --- /dev/null +++ b/changelogs/fragments/2989-pamd-single-line.yaml @@ -0,0 +1,2 @@ +bugfixes: + - pamd - fixed problem with files containing only one or two lines (https://github.com/ansible-collections/community.general/issues/2925). diff --git a/plugins/modules/system/pamd.py b/plugins/modules/system/pamd.py index 39b3f32e44..738a23ee43 100644 --- a/plugins/modules/system/pamd.py +++ b/plugins/modules/system/pamd.py @@ -733,14 +733,19 @@ class PamdService(object): lines = [] current_line = self._head + mark = "# Updated by Ansible - %s" % datetime.now().isoformat() while current_line is not None: lines.append(str(current_line)) current_line = current_line.next - if lines[1].startswith("# Updated by Ansible"): - lines.pop(1) - - lines.insert(1, "# Updated by Ansible - " + datetime.now().isoformat()) + if len(lines) <= 1: + lines.insert(0, "") + lines.insert(1, mark) + else: + if lines[1].startswith("# Updated by Ansible"): + lines[1] = mark + else: + lines.insert(1, mark) return '\n'.join(lines) + '\n' diff --git a/tests/integration/targets/pamd/aliases b/tests/integration/targets/pamd/aliases new file mode 100644 index 0000000000..abe0a21e22 --- /dev/null +++ b/tests/integration/targets/pamd/aliases @@ -0,0 +1,5 @@ +shippable/posix/group1 +skip/aix +skip/freebsd +skip/osx +skip/macos diff --git a/tests/integration/targets/pamd/tasks/main.yml b/tests/integration/targets/pamd/tasks/main.yml new file mode 100644 index 0000000000..3e0fb4ee32 --- /dev/null +++ b/tests/integration/targets/pamd/tasks/main.yml @@ -0,0 +1,56 @@ +# (c) 2021, Alexei Znamensky +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +- name: Set value for temp limit configuration + set_fact: + test_pamd_file: "/tmp/pamd_file" + +- name: Copy temporary pam.d file + copy: + content: "session required pam_lastlog.so silent showfailed" + dest: "{{ test_pamd_file }}" + +- name: Test working on a single-line file works (2925) + community.general.pamd: + path: /tmp + name: pamd_file + type: session + control: required + module_path: pam_lastlog.so + module_arguments: silent + state: args_absent + register: pamd_file_output + +- name: Check if changes made + assert: + that: + - pamd_file_output is changed + +- name: Copy temporary pam.d file + copy: + content: "" + dest: "{{ test_pamd_file }}" + +# This test merely demonstrates that, as-is, module will not perform any changes on an empty file +# All the existing values for "state" will first search for a rule matching type, control, module_path +# and will not perform any change whatsoever if no existing rules match. +- name: Test working on a empty file works (2925) + community.general.pamd: + path: /tmp + name: pamd_file + type: session + control: required + module_path: pam_lastlog.so + module_arguments: silent + register: pamd_file_output_empty + +- name: Read back the file + slurp: + src: "{{ test_pamd_file }}" + register: pamd_file_slurp + +- name: Check if changes made + assert: + that: + - pamd_file_output_empty is not changed + - pamd_file_slurp.content|b64decode == '' From ea822c7bdd9cbeccc4541c2f95280442c6f213ab Mon Sep 17 00:00:00 2001 From: Scott Seekamp Date: Fri, 16 Jul 2021 11:02:34 -0600 Subject: [PATCH 0202/2828] Redfish Bootoverride Disable behaves incorrectly (#3006) * https://github.com/ansible-collections/community.general/issues/3005 Bypass the boot device argument check when the command is: DisableBootOverride as it isn't needed to perform this operation. * Add changelog fragment --- ...dfish_command-bootoverride-argument-check.yaml | 3 +++ plugins/module_utils/redfish_utils.py | 15 ++++++++------- 2 files changed, 11 insertions(+), 7 deletions(-) create mode 100644 changelogs/fragments/3006-redfish_command-bootoverride-argument-check.yaml diff --git a/changelogs/fragments/3006-redfish_command-bootoverride-argument-check.yaml b/changelogs/fragments/3006-redfish_command-bootoverride-argument-check.yaml new file mode 100644 index 0000000000..680d3dea83 --- /dev/null +++ b/changelogs/fragments/3006-redfish_command-bootoverride-argument-check.yaml @@ -0,0 +1,3 @@ +bugfixes: + - redfish_command - fix extraneous error caused by missing ``bootdevice`` argument + when using the ``DisableBootOverride`` sub-command (https://github.com/ansible-collections/community.general/issues/3005). diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index c39c02a42e..8d293f0056 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -1582,13 +1582,14 @@ class RedfishUtils(object): boot = data[key] - annotation = 'BootSourceOverrideTarget@Redfish.AllowableValues' - if annotation in boot: - allowable_values = boot[annotation] - if isinstance(allowable_values, list) and bootdevice not in allowable_values: - return {'ret': False, - 'msg': "Boot device %s not in list of allowable values (%s)" % - (bootdevice, allowable_values)} + if override_enabled != 'Disabled': + annotation = 'BootSourceOverrideTarget@Redfish.AllowableValues' + if annotation in boot: + allowable_values = boot[annotation] + if isinstance(allowable_values, list) and bootdevice not in allowable_values: + return {'ret': False, + 'msg': "Boot device %s not in list of allowable values (%s)" % + (bootdevice, allowable_values)} # read existing values cur_enabled = boot.get('BootSourceOverrideEnabled') From 9b1c6f0743f87264ce658b5d548e358dfe9af740 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Fri, 16 Jul 2021 19:29:00 +0200 Subject: [PATCH 0203/2828] Enable tests (#3015) * Enable tests. * Fix error message check. * Fix boolean tests. * Adjust to Python version. --- tests/integration/targets/filter_groupby/aliases | 2 +- .../integration/targets/filter_groupby/tasks/main.yml | 2 +- tests/integration/targets/module_helper/aliases | 2 +- .../targets/module_helper/tasks/mdepfail.yml | 4 ++-- .../targets/module_helper/tasks/msimple.yml | 10 +++++----- .../integration/targets/module_helper/tasks/mstate.yml | 10 +++++----- 6 files changed, 15 insertions(+), 15 deletions(-) diff --git a/tests/integration/targets/filter_groupby/aliases b/tests/integration/targets/filter_groupby/aliases index 6e79abdd02..3e81d77f98 100644 --- a/tests/integration/targets/filter_groupby/aliases +++ b/tests/integration/targets/filter_groupby/aliases @@ -1,2 +1,2 @@ -shippable/posix/group4 +shippable/posix/group3 skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller diff --git a/tests/integration/targets/filter_groupby/tasks/main.yml b/tests/integration/targets/filter_groupby/tasks/main.yml index 29036a3bc5..219e047d4d 100644 --- a/tests/integration/targets/filter_groupby/tasks/main.yml +++ b/tests/integration/targets/filter_groupby/tasks/main.yml @@ -42,4 +42,4 @@ - assert: that: - - result.msg == "Multiple sequence entries have attribute value 'a'" + - result.msg == "Multiple sequence entries have attribute value 'a'" or result.msg == "Multiple sequence entries have attribute value u'a'" diff --git a/tests/integration/targets/module_helper/aliases b/tests/integration/targets/module_helper/aliases index 3005e4b26d..765b70da79 100644 --- a/tests/integration/targets/module_helper/aliases +++ b/tests/integration/targets/module_helper/aliases @@ -1 +1 @@ -shippable/posix/group4 +shippable/posix/group2 diff --git a/tests/integration/targets/module_helper/tasks/mdepfail.yml b/tests/integration/targets/module_helper/tasks/mdepfail.yml index ad8fc5d57d..01523513a3 100644 --- a/tests/integration/targets/module_helper/tasks/mdepfail.yml +++ b/tests/integration/targets/module_helper/tasks/mdepfail.yml @@ -10,8 +10,8 @@ - name: assert failing dependency assert: that: - - result.failed is true + - result is failed - '"Failed to import" in result.msg' - '"nopackagewiththisname" in result.msg' - - '"ModuleNotFoundError:" in result.exception' + - '"ModuleNotFoundError:" in result.exception or "ImportError:" in result.exception' - '"nopackagewiththisname" in result.exception' diff --git a/tests/integration/targets/module_helper/tasks/msimple.yml b/tests/integration/targets/module_helper/tasks/msimple.yml index 4f032fd177..4d2ff9b798 100644 --- a/tests/integration/targets/module_helper/tasks/msimple.yml +++ b/tests/integration/targets/module_helper/tasks/msimple.yml @@ -11,7 +11,7 @@ that: - simple1.a == 80 - simple1.abc == "abc" - - simple1.changed is false + - simple1 is not changed - simple1.value is none - name: test msimple 2 @@ -26,8 +26,8 @@ - simple2.a == 101 - 'simple2.msg == "Module failed with exception: a >= 100"' - simple2.abc == "abc" - - simple2.failed is true - - simple2.changed is false + - simple2 is failed + - simple2 is not changed - simple2.value is none - name: test msimple 3 @@ -42,7 +42,7 @@ - simple3.a == 2 - simple3.b == "potatoespotatoes" - simple3.c == "NoneNone" - - simple3.changed is false + - simple3 is not changed - name: test msimple 4 msimple: @@ -54,4 +54,4 @@ that: - simple4.c == "abc change" - simple4.abc == "changed abc" - - simple4.changed is true + - simple4 is changed diff --git a/tests/integration/targets/module_helper/tasks/mstate.yml b/tests/integration/targets/module_helper/tasks/mstate.yml index c4dfdb9a0e..6476f76429 100644 --- a/tests/integration/targets/module_helper/tasks/mstate.yml +++ b/tests/integration/targets/module_helper/tasks/mstate.yml @@ -16,7 +16,7 @@ - state1.b == "banana" - state1.c == "cashew" - state1.result == "abc" - - state1.changed is false + - state1 is not changed - name: test mstate 2 mstate: @@ -32,7 +32,7 @@ - state2.b == "banana" - state2.c == "cashew" - state2.result == "80bananacashew" - - state2.changed is true + - state2 is changed - name: test mstate 3 mstate: @@ -47,7 +47,7 @@ - state3.a == 3 - state3.b == "banana" - state3.result == "bananabananabanana" - - state3.changed is true + - state3 is changed - name: test mstate 4 mstate: @@ -62,7 +62,7 @@ - state4.a == 4 - state4.c == "cashew" - state4.result == "cashewcashewcashewcashew" - - state4.changed is true + - state4 is changed - name: test mstate 5 mstate: @@ -79,4 +79,4 @@ - state5.b == "foo" - state5.c == "bar" - state5.result == "foobarfoobarfoobarfoobarfoobar" - - state5.changed is true + - state5 is changed From 27ba98a68eafe5f1563cafb7b720f02d3f7c1f12 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Fri, 16 Jul 2021 19:52:09 +0200 Subject: [PATCH 0204/2828] Check targets (#3019) * Add extra sanity test to check aliases files. * Remove invalid target name. --- tests/integration/targets/mqtt/aliases | 1 - tests/sanity/extra/aliases.json | 11 +++++ tests/sanity/extra/aliases.py | 63 ++++++++++++++++++++++++++ 3 files changed, 74 insertions(+), 1 deletion(-) create mode 100644 tests/sanity/extra/aliases.json create mode 100755 tests/sanity/extra/aliases.py diff --git a/tests/integration/targets/mqtt/aliases b/tests/integration/targets/mqtt/aliases index 0a4db0379e..9a30a5a281 100644 --- a/tests/integration/targets/mqtt/aliases +++ b/tests/integration/targets/mqtt/aliases @@ -1,4 +1,3 @@ -notification/mqtt shippable/posix/group1 skip/aix skip/osx diff --git a/tests/sanity/extra/aliases.json b/tests/sanity/extra/aliases.json new file mode 100644 index 0000000000..dabdcd6a1d --- /dev/null +++ b/tests/sanity/extra/aliases.json @@ -0,0 +1,11 @@ +{ + "include_symlinks": false, + "prefixes": [ + ".azure-pipelines/azure-pipelines.yml", + "tests/integration/targets/" + ], + "output": "path-message", + "requirements": [ + "PyYAML" + ] +} diff --git a/tests/sanity/extra/aliases.py b/tests/sanity/extra/aliases.py new file mode 100755 index 0000000000..8791238f5f --- /dev/null +++ b/tests/sanity/extra/aliases.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +"""Check extra collection docs with antsibull-lint.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import sys + +import yaml + + +def main(): + """Main entry point.""" + paths = sys.argv[1:] or sys.stdin.read().splitlines() + paths = [path for path in paths if path.endswith('/aliases')] + + with open('.azure-pipelines/azure-pipelines.yml', 'rb') as f: + azp = yaml.safe_load(f) + + allowed_targets = set(['shippable/cloud/group1']) + for stage in azp['stages']: + if stage['stage'].startswith(('Sanity', 'Unit', 'Cloud', 'Summary')): + continue + for job in stage['jobs']: + for group in job['parameters']['groups']: + allowed_targets.add('shippable/posix/group{0}'.format(group)) + + for path in paths: + targets = [] + skip = False + with open(path, 'r') as f: + for line in f: + if '#' in line: + line = line[:line.find('#')] + line = line.strip() + if line.startswith('needs/'): + continue + if line.startswith('skip/'): + continue + if line.startswith('cloud/'): + continue + if line in ('unsupported', 'disabled', 'hidden'): + skip = True + if line in ('destructive', ): + continue + if '/' not in line: + continue + targets.append(line) + if skip: + continue + if not targets: + if 'targets/setup_' in path: + continue + print('%s: %s' % (path, 'found no targets')) + for target in targets: + if target not in allowed_targets: + print('%s: %s' % (path, 'found invalid target "{0}"'.format(target))) + + +if __name__ == '__main__': + main() From 7734430f23a8c2472583543d4e4919aa37bf632f Mon Sep 17 00:00:00 2001 From: Werner Dijkerman Date: Sat, 17 Jul 2021 08:49:09 +0200 Subject: [PATCH 0205/2828] Added module for creating protected branches (#2781) * Added module for creating protected branches * Applied some changes due to comments and added a test that currently fails * Changing no_access to nobody due to comment on PR * Changing the description to clarify it a bit more * Added working tests for module 'gitlab_protected_branch' * Fixing lint issues * Added doc that minimum of v2.3.0 is needed to work correctly * Fixed the requirements notation * Check the version of the module * Hopefully fixed the tests by skipping it when lower version of 2.3.0 is installed * Fix lint issues * Applying changes due to comments in PR * Remove commented code * Removing the trailing dot ... Co-authored-by: jenkins-x-bot Co-authored-by: Werner Dijkerman --- plugins/modules/gitlab_protected_branch.py | 1 + .../gitlab/gitlab_protected_branch.py | 201 ++++++++++++++++++ .../modules/source_control/gitlab/gitlab.py | 38 +++- .../gitlab/test_gitlab_protected_branch.py | 81 +++++++ 4 files changed, 319 insertions(+), 2 deletions(-) create mode 120000 plugins/modules/gitlab_protected_branch.py create mode 100644 plugins/modules/source_control/gitlab/gitlab_protected_branch.py create mode 100644 tests/unit/plugins/modules/source_control/gitlab/test_gitlab_protected_branch.py diff --git a/plugins/modules/gitlab_protected_branch.py b/plugins/modules/gitlab_protected_branch.py new file mode 120000 index 0000000000..7af5b500ce --- /dev/null +++ b/plugins/modules/gitlab_protected_branch.py @@ -0,0 +1 @@ +source_control/gitlab/gitlab_protected_branch.py \ No newline at end of file diff --git a/plugins/modules/source_control/gitlab/gitlab_protected_branch.py b/plugins/modules/source_control/gitlab/gitlab_protected_branch.py new file mode 100644 index 0000000000..f61f2b9fa1 --- /dev/null +++ b/plugins/modules/source_control/gitlab/gitlab_protected_branch.py @@ -0,0 +1,201 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Werner Dijkerman (ikben@werner-dijkerman.nl) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: gitlab_protected_branch +short_description: (un)Marking existing branches for protection +version_added: 3.4.0 +description: + - (un)Marking existing branches for protection. +author: + - "Werner Dijkerman (@dj-wasabi)" +requirements: + - python >= 2.7 + - python-gitlab >= 2.3.0 +extends_documentation_fragment: +- community.general.auth_basic + +options: + state: + description: + - Create or delete proteced branch. + default: present + type: str + choices: ["present", "absent"] + api_token: + description: + - GitLab access token with API permissions. + required: true + type: str + project: + description: + - The path and name of the project. + required: true + type: str + name: + description: + - The name of the branch that needs to be protected. + - Can make use a wildcard charachter for like C(production/*) or just have C(main) or C(develop) as value. + required: true + type: str + merge_access_levels: + description: + - Access levels allowed to merge. + default: maintainer + type: str + choices: ["maintainer", "developer", "nobody"] + push_access_level: + description: + - Access levels allowed to push. + default: maintainer + type: str + choices: ["maintainer", "developer", "nobody"] +''' + + +EXAMPLES = ''' +- name: Create protected branch on main + community.general.gitlab_protected_branch: + api_url: https://gitlab.com + api_token: secret_access_token + project: "dj-wasabi/collection.general" + name: main + merge_access_levels: maintainer + push_access_level: nobody + +''' + +RETURN = ''' +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.api import basic_auth_argument_spec +from distutils.version import LooseVersion + +GITLAB_IMP_ERR = None +try: + import gitlab + HAS_GITLAB_PACKAGE = True +except Exception: + GITLAB_IMP_ERR = traceback.format_exc() + HAS_GITLAB_PACKAGE = False + +from ansible_collections.community.general.plugins.module_utils.gitlab import gitlabAuthentication + + +class GitlabProtectedBranch(object): + + def __init__(self, module, project, gitlab_instance): + self.repo = gitlab_instance + self._module = module + self.project = self.get_project(project) + self.ACCESS_LEVEL = { + 'nobody': gitlab.NO_ACCESS, + 'developer': gitlab.DEVELOPER_ACCESS, + 'maintainer': gitlab.MAINTAINER_ACCESS + } + + def get_project(self, project_name): + return self.repo.projects.get(project_name) + + def protected_branch_exist(self, name): + try: + return self.project.protectedbranches.get(name) + except Exception as e: + return False + + def create_protected_branch(self, name, merge_access_levels, push_access_level): + if self._module.check_mode: + return True + merge = self.ACCESS_LEVEL[merge_access_levels] + push = self.ACCESS_LEVEL[push_access_level] + self.project.protectedbranches.create({ + 'name': name, + 'merge_access_level': merge, + 'push_access_level': push + }) + + def compare_protected_branch(self, name, merge_access_levels, push_access_level): + configured_merge = self.ACCESS_LEVEL[merge_access_levels] + configured_push = self.ACCESS_LEVEL[push_access_level] + current = self.protected_branch_exist(name=name) + current_merge = current.merge_access_levels[0]['access_level'] + current_push = current.push_access_levels[0]['access_level'] + if current: + if current.name == name and current_merge == configured_merge and current_push == configured_push: + return True + return False + + def delete_protected_branch(self, name): + if self._module.check_mode: + return True + return self.project.protectedbranches.delete(name) + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update( + api_token=dict(type='str', required=True, no_log=True), + project=dict(type='str', required=True), + name=dict(type='str', required=True), + merge_access_levels=dict(type='str', default="maintainer", choices=["maintainer", "developer", "nobody"]), + push_access_level=dict(type='str', default="maintainer", choices=["maintainer", "developer", "nobody"]), + state=dict(type='str', default="present", choices=["absent", "present"]), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_password', 'api_token'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token'] + ], + supports_check_mode=True + ) + + project = module.params['project'] + name = module.params['name'] + merge_access_levels = module.params['merge_access_levels'] + push_access_level = module.params['push_access_level'] + state = module.params['state'] + + if not HAS_GITLAB_PACKAGE: + module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) + + gitlab_version = gitlab.__version__ + if LooseVersion(gitlab_version) < LooseVersion('2.3.0'): + module.fail_json(msg="community.general.gitlab_proteched_branch requires python-gitlab Python module >= 2.3.0 (installed version: [%s])." + " Please upgrade python-gitlab to version 2.3.0 or above." % gitlab_version) + + gitlab_instance = gitlabAuthentication(module) + this_gitlab = GitlabProtectedBranch(module=module, project=project, gitlab_instance=gitlab_instance) + + p_branch = this_gitlab.protected_branch_exist(name=name) + if not p_branch and state == "present": + this_gitlab.create_protected_branch(name=name, merge_access_levels=merge_access_levels, push_access_level=push_access_level) + module.exit_json(changed=True, msg="Created the proteched branch.") + elif p_branch and state == "present": + if not this_gitlab.compare_protected_branch(name, merge_access_levels, push_access_level): + this_gitlab.delete_protected_branch(name=name) + this_gitlab.create_protected_branch(name=name, merge_access_levels=merge_access_levels, push_access_level=push_access_level) + module.exit_json(changed=True, msg="Recreated the proteched branch.") + elif p_branch and state == "absent": + this_gitlab.delete_protected_branch(name=name) + module.exit_json(changed=True, msg="Deleted the proteched branch.") + module.exit_json(changed=False, msg="No changes are needed.") + + +if __name__ == '__main__': + main() diff --git a/tests/unit/plugins/modules/source_control/gitlab/gitlab.py b/tests/unit/plugins/modules/source_control/gitlab/gitlab.py index eb8099d37b..5feff78b43 100644 --- a/tests/unit/plugins/modules/source_control/gitlab/gitlab.py +++ b/tests/unit/plugins/modules/source_control/gitlab/gitlab.py @@ -13,7 +13,7 @@ from httmock import urlmatch # noqa from ansible_collections.community.general.tests.unit.compat import unittest -from gitlab import Gitlab +import gitlab class FakeAnsibleModule(object): @@ -33,7 +33,7 @@ class GitlabModuleTestCase(unittest.TestCase): self.mock_module = FakeAnsibleModule() - self.gitlab_instance = Gitlab("http://localhost", private_token="private_token", api_version=4) + self.gitlab_instance = gitlab.Gitlab("http://localhost", private_token="private_token", api_version=4) # Python 2.7+ is needed for python-gitlab @@ -45,6 +45,14 @@ def python_version_match_requirement(): return sys.version_info >= GITLAB_MINIMUM_PYTHON_VERSION +def python_gitlab_module_version(): + return gitlab.__version__ + + +def python_gitlab_version_match_requirement(): + return "2.3.0" + + # Skip unittest test case if python version don't match requirement def unitest_python_version_check_requirement(unittest_testcase): if not python_version_match_requirement(): @@ -467,6 +475,32 @@ def resp_delete_project(url, request): return response(204, content, headers, None, 5, request) +@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/protected_branches/master", method="get") +def resp_get_protected_branch(url, request): + headers = {'content-type': 'application/json'} + content = ('{"id": 1, "name": "master", "push_access_levels": [{"access_level": 40, "access_level_description": "Maintainers"}],' + '"merge_access_levels": [{"access_level": 40, "access_level_description": "Maintainers"}],' + '"allow_force_push":false, "code_owner_approval_required": false}') + content = content.encode("utf-8") + return response(200, content, headers, None, 5, request) + + +@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/protected_branches/master", method="get") +def resp_get_protected_branch_not_exist(url, request): + headers = {'content-type': 'application/json'} + content = ('') + content = content.encode("utf-8") + return response(404, content, headers, None, 5, request) + + +@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/protected_branches/master", method="delete") +def resp_delete_protected_branch(url, request): + headers = {'content-type': 'application/json'} + content = ('') + content = content.encode("utf-8") + return response(204, content, headers, None, 5, request) + + ''' HOOK API ''' diff --git a/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_protected_branch.py b/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_protected_branch.py new file mode 100644 index 0000000000..026efb19d8 --- /dev/null +++ b/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_protected_branch.py @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +from distutils.version import LooseVersion + +from ansible_collections.community.general.plugins.modules.source_control.gitlab.gitlab_protected_branch import GitlabProtectedBranch + + +def _dummy(x): + """Dummy function. Only used as a placeholder for toplevel definitions when the test is going + to be skipped anyway""" + return x + + +pytestmark = [] +try: + from .gitlab import (GitlabModuleTestCase, + python_version_match_requirement, python_gitlab_module_version, + python_gitlab_version_match_requirement, + resp_get_protected_branch, resp_get_project_by_name, + resp_get_protected_branch_not_exist, + resp_delete_protected_branch, resp_get_user) + + # GitLab module requirements + if python_version_match_requirement(): + from gitlab.v4.objects import Project + gitlab_req_version = python_gitlab_version_match_requirement() + gitlab_module_version = python_gitlab_module_version() + if LooseVersion(gitlab_module_version) < LooseVersion(gitlab_req_version): + pytestmark.append(pytest.mark.skip("Could not load gitlab module required for testing (Wrong version)")) +except ImportError: + pytestmark.append(pytest.mark.skip("Could not load gitlab module required for testing")) + +# Unit tests requirements +try: + from httmock import with_httmock # noqa +except ImportError: + pytestmark.append(pytest.mark.skip("Could not load httmock module required for testing")) + with_httmock = _dummy + + +class TestGitlabProtectedBranch(GitlabModuleTestCase): + @with_httmock(resp_get_project_by_name) + @with_httmock(resp_get_user) + def setUp(self): + super(TestGitlabProtectedBranch, self).setUp() + + self.gitlab_instance.user = self.gitlab_instance.users.get(1) + self.moduleUtil = GitlabProtectedBranch(module=self.mock_module, project="foo-bar/diaspora-client", gitlab_instance=self.gitlab_instance) + + @with_httmock(resp_get_protected_branch) + def test_protected_branch_exist(self): + rvalue = self.moduleUtil.protected_branch_exist(name="master") + self.assertEqual(rvalue.name, "master") + + @with_httmock(resp_get_protected_branch_not_exist) + def test_protected_branch_exist_not_exist(self): + rvalue = self.moduleUtil.protected_branch_exist(name="master") + self.assertEqual(rvalue, False) + + @with_httmock(resp_get_protected_branch) + def test_compare_protected_branch(self): + rvalue = self.moduleUtil.compare_protected_branch(name="master", merge_access_levels="maintainer", push_access_level="maintainer") + self.assertEqual(rvalue, True) + + @with_httmock(resp_get_protected_branch) + def test_compare_protected_branch_different_settings(self): + rvalue = self.moduleUtil.compare_protected_branch(name="master", merge_access_levels="developer", push_access_level="maintainer") + self.assertEqual(rvalue, False) + + @with_httmock(resp_get_protected_branch) + @with_httmock(resp_delete_protected_branch) + def test_delete_protected_branch(self): + rvalue = self.moduleUtil.delete_protected_branch(name="master") + self.assertEqual(rvalue, None) From 7b9687f75885f168de52f935dc36f1d35b8b7ed7 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 19 Jul 2021 03:36:59 +0200 Subject: [PATCH 0206/2828] Fix snap's channel option. (#3028) --- changelogs/fragments/3028-snap-channel.yml | 2 ++ plugins/modules/packaging/os/snap.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/3028-snap-channel.yml diff --git a/changelogs/fragments/3028-snap-channel.yml b/changelogs/fragments/3028-snap-channel.yml new file mode 100644 index 0000000000..c3aea4b5a0 --- /dev/null +++ b/changelogs/fragments/3028-snap-channel.yml @@ -0,0 +1,2 @@ +bugfixes: +- "snap - fix formatting of ``--channel`` argument when the ``channel`` option is used (https://github.com/ansible-collections/community.general/pull/3028)." diff --git a/plugins/modules/packaging/os/snap.py b/plugins/modules/packaging/os/snap.py index 6da8b0e766..de6fedccdc 100644 --- a/plugins/modules/packaging/os/snap.py +++ b/plugins/modules/packaging/os/snap.py @@ -145,7 +145,7 @@ class Snap(CmdStateModuleHelper): actionable_snaps=dict(fmt=lambda v: v), state=dict(fmt=_state_map), classic=dict(fmt="--classic", style=ArgFormat.BOOLEAN), - channel=dict(fmt=lambda v: [] if v == 'stable' else ['--channel', '{0}']), + channel=dict(fmt=lambda v: [] if v == 'stable' else ['--channel', '{0}'.format(v)]), ) check_rc = False From 9fd2ba60df27b3ae2b194f4e0372c438957be572 Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Mon, 19 Jul 2021 02:14:23 -0400 Subject: [PATCH 0207/2828] archive - staging idempotency fix (#2987) * Initial Commit * Fixing PY26 filter * Adding changelog fragment * Removing checksum related code * Removing list comparisons due to Jinja errors * Applying review suggestions * Applying review suggestions - typos --- .../2987-archive-stage-idempotency-fix.yml | 4 + plugins/modules/files/archive.py | 44 +++--- .../targets/archive/tasks/main.yml | 7 + .../targets/archive/tests/core.yml | 2 +- .../targets/archive/tests/idempotency.yml | 141 ++++++++++++++++++ 5 files changed, 179 insertions(+), 19 deletions(-) create mode 100644 changelogs/fragments/2987-archive-stage-idempotency-fix.yml create mode 100644 tests/integration/targets/archive/tests/idempotency.yml diff --git a/changelogs/fragments/2987-archive-stage-idempotency-fix.yml b/changelogs/fragments/2987-archive-stage-idempotency-fix.yml new file mode 100644 index 0000000000..5c9e980935 --- /dev/null +++ b/changelogs/fragments/2987-archive-stage-idempotency-fix.yml @@ -0,0 +1,4 @@ +--- +minor_changes: + - archive - refactoring prior to fix for idempotency checks. The fix will be a breaking change and only appear + in community.general 4.0.0 (https://github.com/ansible-collections/community.general/pull/2987). diff --git a/plugins/modules/files/archive.py b/plugins/modules/files/archive.py index 822ea1cd9d..91a8f688f5 100644 --- a/plugins/modules/files/archive.py +++ b/plugins/modules/files/archive.py @@ -298,6 +298,8 @@ class Archive(object): msg='Error, must specify "dest" when archiving multiple files or trees' ) + self.original_size = self.destination_size() + def add(self, path, archive_name): try: self._add(_to_native_ascii(path), _to_native(archive_name)) @@ -315,7 +317,7 @@ class Archive(object): self.destination_state = STATE_ARCHIVED else: try: - f_out = self._open_compressed_file(_to_native_ascii(self.destination)) + f_out = self._open_compressed_file(_to_native_ascii(self.destination), 'wb') with open(path, 'rb') as f_in: shutil.copyfileobj(f_in, f_out) f_out.close() @@ -368,9 +370,15 @@ class Archive(object): msg='Errors when writing archive at %s: %s' % (_to_native(self.destination), '; '.join(self.errors)) ) + def compare_with_original(self): + self.changed |= self.original_size != self.destination_size() + def destination_exists(self): return self.destination and os.path.exists(self.destination) + def destination_readable(self): + return self.destination and os.access(self.destination, os.R_OK) + def destination_size(self): return os.path.getsize(self.destination) if self.destination_exists() else 0 @@ -407,6 +415,15 @@ class Archive(object): def has_unfound_targets(self): return bool(self.not_found) + def remove_single_target(self, path): + try: + os.remove(path) + except OSError as e: + self.module.fail_json( + path=_to_native(path), + msg='Unable to remove source file: %s' % _to_native(e), exception=format_exc() + ) + def remove_targets(self): for path in self.successes: if os.path.exists(path): @@ -453,14 +470,14 @@ class Archive(object): 'expanded_exclude_paths': [_to_native(p) for p in self.expanded_exclude_paths], } - def _open_compressed_file(self, path): + def _open_compressed_file(self, path, mode): f = None if self.format == 'gz': - f = gzip.open(path, 'wb') + f = gzip.open(path, mode) elif self.format == 'bz2': - f = bz2.BZ2File(path, 'wb') + f = bz2.BZ2File(path, mode) elif self.format == 'xz': - f = lzma.LZMAFile(path, 'wb') + f = lzma.LZMAFile(path, mode) else: self.module.fail_json(msg="%s is not a valid format" % self.format) @@ -542,7 +559,7 @@ class TarArchive(Archive): return None if matches_exclusion_patterns(tarinfo.name, self.exclusion_patterns) else tarinfo def py26_filter(path): - return matches_exclusion_patterns(path, self.exclusion_patterns) + return legacy_filter(path, self.exclusion_patterns) if PY27: self.file.add(path, archive_name, recursive=False, filter=py27_filter) @@ -580,7 +597,6 @@ def main(): check_mode = module.check_mode archive = get_archive(module) - size = archive.destination_size() archive.find_targets() if not archive.has_targets(): @@ -592,10 +608,9 @@ def main(): else: archive.add_targets() archive.destination_state = STATE_INCOMPLETE if archive.has_unfound_targets() else STATE_ARCHIVED + archive.compare_with_original() if archive.remove: archive.remove_targets() - if archive.destination_size() != size: - archive.changed = True else: if check_mode: if not archive.destination_exists(): @@ -603,16 +618,9 @@ def main(): else: path = archive.paths[0] archive.add_single_target(path) - if archive.destination_size() != size: - archive.changed = True + archive.compare_with_original() if archive.remove: - try: - os.remove(path) - except OSError as e: - module.fail_json( - path=_to_native(path), - msg='Unable to remove source file: %s' % _to_native(e), exception=format_exc() - ) + archive.remove_single_target(path) if archive.destination_exists(): archive.update_permissions() diff --git a/tests/integration/targets/archive/tasks/main.yml b/tests/integration/targets/archive/tasks/main.yml index e0757b0ead..1e2c9f9c27 100644 --- a/tests/integration/targets/archive/tasks/main.yml +++ b/tests/integration/targets/archive/tasks/main.yml @@ -121,6 +121,13 @@ loop_control: loop_var: format +- name: Run Idempotency tests + include_tasks: + file: ../tests/idempotency.yml + loop: "{{ formats }}" + loop_control: + loop_var: format + # Test cleanup - name: Remove backports.lzma if previously installed (pip) pip: name=backports.lzma state=absent diff --git a/tests/integration/targets/archive/tests/core.yml b/tests/integration/targets/archive/tests/core.yml index f12e5083cc..d008e9c122 100644 --- a/tests/integration/targets/archive/tests/core.yml +++ b/tests/integration/targets/archive/tests/core.yml @@ -41,7 +41,7 @@ - archive_no_options is changed - "archive_no_options.dest_state == 'archive'" - "{{ archive_no_options.archived | length }} == 3" - - + - name: Remove the archive - no options ({{ format }}) file: path: "{{ output_dir }}/archive_no_options.{{ format }}" diff --git a/tests/integration/targets/archive/tests/idempotency.yml b/tests/integration/targets/archive/tests/idempotency.yml new file mode 100644 index 0000000000..f53f768164 --- /dev/null +++ b/tests/integration/targets/archive/tests/idempotency.yml @@ -0,0 +1,141 @@ +--- +- name: Archive - file content idempotency ({{ format }}) + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/archive_file_content_idempotency.{{ format }}" + format: "{{ format }}" + register: file_content_idempotency_before + +- name: Modify file - file content idempotency ({{ format }}) + lineinfile: + line: bar.txt + regexp: "^foo.txt$" + path: "{{ output_dir }}/foo.txt" + +- name: Archive second time - file content idempotency ({{ format }}) + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/archive_file_content_idempotency.{{ format }}" + format: "{{ format }}" + register: file_content_idempotency_after + +# After idempotency fix result will be reliably changed for all formats +- name: Assert task status is changed - file content idempotency ({{ format }}) + assert: + that: + - file_content_idempotency_after is not changed + when: "format in ('tar', 'zip')" + +- name: Remove archive - file content idempotency ({{ format }}) + file: + path: "{{ output_dir }}/archive_file_content_idempotency.{{ format }}" + state: absent + +- name: Modify file back - file content idempotency ({{ format }}) + lineinfile: + line: foo.txt + regexp: "^bar.txt$" + path: "{{ output_dir }}/foo.txt" + +- name: Archive - file name idempotency ({{ format }}) + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/archive_file_name_idempotency.{{ format }}" + format: "{{ format }}" + register: file_name_idempotency_before + +- name: Rename file - file name idempotency ({{ format }}) + command: "mv {{ output_dir}}/foo.txt {{ output_dir }}/fii.txt" + +- name: Archive again - file name idempotency ({{ format }}) + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/archive_file_name_idempotency.{{ format }}" + format: "{{ format }}" + register: file_name_idempotency_after + +# After idempotency fix result will be reliably changed for all formats +- name: Check task status - file name idempotency ({{ format }}) + assert: + that: + - file_name_idempotency_after is not changed + when: "format in ('tar', 'zip')" + +- name: Remove archive - file name idempotency ({{ format }}) + file: + path: "{{ output_dir }}/archive_file_name_idempotency.{{ format }}" + state: absent + +- name: Rename file back - file name idempotency ({{ format }}) + command: "mv {{ output_dir }}/fii.txt {{ output_dir }}/foo.txt" + +- name: Archive - single file content idempotency ({{ format }}) + archive: + path: "{{ output_dir }}/foo.txt" + dest: "{{ output_dir }}/archive_single_file_content_idempotency.{{ format }}" + format: "{{ format }}" + register: single_file_content_idempotency_before + +- name: Modify file - single file content idempotency ({{ format }}) + lineinfile: + line: bar.txt + regexp: "^foo.txt$" + path: "{{ output_dir }}/foo.txt" + +- name: Archive second time - single file content idempotency ({{ format }}) + archive: + path: "{{ output_dir }}/foo.txt" + dest: "{{ output_dir }}/archive_single_file_content_idempotency.{{ format }}" + format: "{{ format }}" + register: single_file_content_idempotency_after + +# After idempotency fix result will be reliably changed for all formats +- name: Assert task status is changed - single file content idempotency ({{ format }}) + assert: + that: + - single_file_content_idempotency_after is not changed + when: "format in ('tar', 'zip')" + +- name: Remove archive - single file content idempotency ({{ format }}) + file: + path: "{{ output_dir }}/archive_single_file_content_idempotency.{{ format }}" + state: absent + +- name: Modify file back - single file content idempotency ({{ format }}) + lineinfile: + line: foo.txt + regexp: "^bar.txt$" + path: "{{ output_dir }}/foo.txt" + +- name: Archive - single file name idempotency ({{ format }}) + archive: + path: "{{ output_dir }}/foo.txt" + dest: "{{ output_dir }}/archive_single_file_name_idempotency.{{ format }}" + format: "{{ format }}" + register: single_file_name_idempotency_before + +- name: Rename file - single file name idempotency ({{ format }}) + command: "mv {{ output_dir}}/foo.txt {{ output_dir }}/fii.txt" + +- name: Archive again - single file name idempotency ({{ format }}) + archive: + path: "{{ output_dir }}/fii.txt" + dest: "{{ output_dir }}/archive_single_file_name_idempotency.{{ format }}" + format: "{{ format }}" + register: single_file_name_idempotency_after + + +# After idempotency fix result will be reliably changed for all formats +- name: Check task status - single file name idempotency ({{ format }}) + assert: + that: + - single_file_name_idempotency_after is not changed + when: "format in ('tar', 'zip')" + +- name: Remove archive - single file name idempotency ({{ format }}) + file: + path: "{{ output_dir }}/archive_single_file_name_idempotency.{{ format }}" + state: absent + +- name: Rename file back - single file name idempotency ({{ format }}) + command: "mv {{ output_dir }}/fii.txt {{ output_dir }}/foo.txt" From a3607a745e4856117173b9115de65336d4175a4b Mon Sep 17 00:00:00 2001 From: suukit Date: Mon, 19 Jul 2021 11:52:32 +0200 Subject: [PATCH 0208/2828] Feature/gitlab project configuration (#3002) * added - only_allow_merge_if_all_discussions_are_resolved - only_allow_merge_if_all_discussions_are_resolved - only_allow_merge_if_pipeline_succeeds - only_allow_merge_if_pipeline_succeeds - packages_enabled - remove_source_branch_after_merge - squash_option * minor fix * added changelog * Fixedlinter findings * changed version_added to 3.4 -> check requires to do so * Update changelogs/fragments/3001-enhance_gitlab_module.yml Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_project.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_project.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_project.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_project.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_project.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_project.py Co-authored-by: Felix Fontein * rework due to review of felixfontein: - changed option description to full sentences - change default behaviour of new properties * Requested changes Co-authored-by: Max Bidlingmaier Co-authored-by: Felix Fontein --- .../fragments/3001-enhance_gitlab_module.yml | 2 + .../source_control/gitlab/gitlab_project.py | 72 ++++++++++++++++++- 2 files changed, 73 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/3001-enhance_gitlab_module.yml diff --git a/changelogs/fragments/3001-enhance_gitlab_module.yml b/changelogs/fragments/3001-enhance_gitlab_module.yml new file mode 100644 index 0000000000..e39985530e --- /dev/null +++ b/changelogs/fragments/3001-enhance_gitlab_module.yml @@ -0,0 +1,2 @@ +minor_changes: + - gitlab_project - add new options ``allow_merge_on_skipped_pipeline``, ``only_allow_merge_if_all_discussions_are_resolved``, ``only_allow_merge_if_pipeline_succeeds``, ``packages_enabled``, ``remove_source_branch_after_merge``, ``squash_option`` (https://github.com/ansible-collections/community.general/pull/3002). diff --git a/plugins/modules/source_control/gitlab/gitlab_project.py b/plugins/modules/source_control/gitlab/gitlab_project.py index c916246b78..b3a6ca2064 100644 --- a/plugins/modules/source_control/gitlab/gitlab_project.py +++ b/plugins/modules/source_control/gitlab/gitlab_project.py @@ -114,6 +114,38 @@ options: - Used to create a personal project under a user's name. type: str version_added: "3.3.0" + allow_merge_on_skipped_pipeline: + description: + - Allow merge when skipped pipelines exist. + type: bool + version_added: "3.4.0" + only_allow_merge_if_all_discussions_are_resolved: + description: + - All discussions on a merge request (MR) have to be resolved. + type: bool + version_added: "3.4.0" + only_allow_merge_if_pipeline_succeeds: + description: + - Only allow merges if pipeline succeeded. + type: bool + version_added: "3.4.0" + packages_enabled: + description: + - Enable GitLab package repository. + type: bool + version_added: "3.4.0" + remove_source_branch_after_merge: + description: + - Remove the source branch after merge. + type: bool + version_added: "3.4.0" + squash_option: + description: + - Squash commits when merging. + type: str + choices: ["never", "always", "default_off", "default_on"] + version_added: "3.4.0" + ''' EXAMPLES = r''' @@ -214,6 +246,12 @@ class GitLabProject(object): 'snippets_enabled': options['snippets_enabled'], 'visibility': options['visibility'], 'lfs_enabled': options['lfs_enabled'], + 'allow_merge_on_skipped_pipeline': options['allow_merge_on_skipped_pipeline'], + 'only_allow_merge_if_all_discussions_are_resolved': options['only_allow_merge_if_all_discussions_are_resolved'], + 'only_allow_merge_if_pipeline_succeeds': options['only_allow_merge_if_pipeline_succeeds'], + 'packages_enabled': options['packages_enabled'], + 'remove_source_branch_after_merge': options['remove_source_branch_after_merge'], + 'squash_option': options['squash_option'], } # Because we have already call userExists in main() if self.projectObject is None: @@ -221,6 +259,7 @@ class GitLabProject(object): 'path': options['path'], 'import_url': options['import_url'], }) + project_options = self.getOptionsWithValue(project_options) project = self.createProject(namespace, project_options) changed = True else: @@ -254,6 +293,17 @@ class GitLabProject(object): return project + ''' + @param arguments Attributes of the project + ''' + def getOptionsWithValue(self, arguments): + ret_arguments = dict() + for arg_key, arg_value in arguments.items(): + if arguments[arg_key] is not None: + ret_arguments[arg_key] = arg_value + + return ret_arguments + ''' @param project Project Object @param arguments Attributes of the project @@ -308,6 +358,12 @@ def main(): state=dict(type='str', default="present", choices=["absent", "present"]), lfs_enabled=dict(default=False, type='bool'), username=dict(type='str'), + allow_merge_on_skipped_pipeline=dict(type='bool'), + only_allow_merge_if_all_discussions_are_resolved=dict(type='bool'), + only_allow_merge_if_pipeline_succeeds=dict(type='bool'), + packages_enabled=dict(type='bool'), + remove_source_branch_after_merge=dict(type='bool'), + squash_option=dict(type='str', choices=['never', 'always', 'default_off', 'default_on']), )) module = AnsibleModule( @@ -340,6 +396,12 @@ def main(): state = module.params['state'] lfs_enabled = module.params['lfs_enabled'] username = module.params['username'] + allow_merge_on_skipped_pipeline = module.params['allow_merge_on_skipped_pipeline'] + only_allow_merge_if_all_discussions_are_resolved = module.params['only_allow_merge_if_all_discussions_are_resolved'] + only_allow_merge_if_pipeline_succeeds = module.params['only_allow_merge_if_pipeline_succeeds'] + packages_enabled = module.params['packages_enabled'] + remove_source_branch_after_merge = module.params['remove_source_branch_after_merge'] + squash_option = module.params['squash_option'] if not HAS_GITLAB_PACKAGE: module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) @@ -386,6 +448,7 @@ def main(): module.exit_json(changed=False, msg="Project deleted or does not exists") if state == 'present': + if gitlab_project.createOrUpdateProject(project_name, namespace, { "path": project_path, "description": project_description, @@ -396,7 +459,14 @@ def main(): "snippets_enabled": snippets_enabled, "visibility": visibility, "import_url": import_url, - "lfs_enabled": lfs_enabled}): + "lfs_enabled": lfs_enabled, + "allow_merge_on_skipped_pipeline": allow_merge_on_skipped_pipeline, + "only_allow_merge_if_all_discussions_are_resolved": only_allow_merge_if_all_discussions_are_resolved, + "only_allow_merge_if_pipeline_succeeds": only_allow_merge_if_pipeline_succeeds, + "packages_enabled": packages_enabled, + "remove_source_branch_after_merge": remove_source_branch_after_merge, + "squash_option": squash_option, + }): module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name, project=gitlab_project.projectObject._attrs) module.exit_json(changed=False, msg="No need to update the project %s" % project_name, project=gitlab_project.projectObject._attrs) From d7c6ba89f89cf7bf56a2a798d30f1c450d5a6611 Mon Sep 17 00:00:00 2001 From: Laurent Paumier <30328363+laurpaum@users.noreply.github.com> Date: Mon, 19 Jul 2021 23:17:39 +0200 Subject: [PATCH 0209/2828] Add Keycloak roles module (#2930) * implement simple realm and client role * fix documentation * code cleanup * separate realm and client roles functions * remove blank lines * add tests * fix linefeeds * fix indentation * fix error message * fix documentation * fix documentation * keycloak_role integration tests * keycloak_role integration tests * remove extra blank line * add version_added tag Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../identity/keycloak/keycloak.py | 195 +++++++++- .../identity/keycloak/keycloak_role.py | 363 ++++++++++++++++++ plugins/modules/keycloak_role.py | 1 + .../integration/targets/keycloak_role/aliases | 1 + .../targets/keycloak_role/tasks/main.yml | 246 ++++++++++++ .../targets/keycloak_role/vars/main.yml | 10 + .../identity/keycloak/test_keycloak_role.py | 326 ++++++++++++++++ 7 files changed, 1141 insertions(+), 1 deletion(-) create mode 100644 plugins/modules/identity/keycloak/keycloak_role.py create mode 120000 plugins/modules/keycloak_role.py create mode 100644 tests/integration/targets/keycloak_role/aliases create mode 100644 tests/integration/targets/keycloak_role/tasks/main.yml create mode 100644 tests/integration/targets/keycloak_role/vars/main.yml create mode 100644 tests/unit/plugins/modules/identity/keycloak/test_keycloak_role.py diff --git a/plugins/module_utils/identity/keycloak/keycloak.py b/plugins/module_utils/identity/keycloak/keycloak.py index b11289a634..8521650f16 100644 --- a/plugins/module_utils/identity/keycloak/keycloak.py +++ b/plugins/module_utils/identity/keycloak/keycloak.py @@ -43,8 +43,14 @@ URL_REALM = "{url}/admin/realms/{realm}" URL_TOKEN = "{url}/realms/{realm}/protocol/openid-connect/token" URL_CLIENT = "{url}/admin/realms/{realm}/clients/{id}" URL_CLIENTS = "{url}/admin/realms/{realm}/clients" + URL_CLIENT_ROLES = "{url}/admin/realms/{realm}/clients/{id}/roles" +URL_CLIENT_ROLE = "{url}/admin/realms/{realm}/clients/{id}/roles/{name}" +URL_CLIENT_ROLE_COMPOSITES = "{url}/admin/realms/{realm}/clients/{id}/roles/{name}/composites" + URL_REALM_ROLES = "{url}/admin/realms/{realm}/roles" +URL_REALM_ROLE = "{url}/admin/realms/{realm}/roles/{name}" +URL_REALM_ROLE_COMPOSITES = "{url}/admin/realms/{realm}/roles/{name}/composites" URL_CLIENTTEMPLATE = "{url}/admin/realms/{realm}/client-templates/{id}" URL_CLIENTTEMPLATES = "{url}/admin/realms/{realm}/client-templates" @@ -632,10 +638,197 @@ class KeycloakAPI(object): try: return open_url(group_url, method='DELETE', headers=self.restheaders, validate_certs=self.validate_certs) - except Exception as e: self.module.fail_json(msg="Unable to delete group %s: %s" % (groupid, str(e))) + def get_realm_roles(self, realm='master'): + """ Obtains role representations for roles in a realm + + :param realm: realm to be queried + :return: list of dicts of role representations + """ + rolelist_url = URL_REALM_ROLES.format(url=self.baseurl, realm=realm) + try: + return json.loads(to_native(open_url(rolelist_url, method='GET', headers=self.restheaders, + validate_certs=self.validate_certs).read())) + except ValueError as e: + self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of roles for realm %s: %s' + % (realm, str(e))) + except Exception as e: + self.module.fail_json(msg='Could not obtain list of roles for realm %s: %s' + % (realm, str(e))) + + def get_realm_role(self, name, realm='master'): + """ Fetch a keycloak role from the provided realm using the role's name. + + If the role does not exist, None is returned. + :param name: Name of the role to fetch. + :param realm: Realm in which the role resides; default 'master'. + """ + role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=name) + try: + return json.loads(to_native(open_url(role_url, method="GET", headers=self.restheaders, + validate_certs=self.validate_certs).read())) + except HTTPError as e: + if e.code == 404: + return None + else: + self.module.fail_json(msg='Could not fetch role %s in realm %s: %s' + % (name, realm, str(e))) + except Exception as e: + self.module.fail_json(msg='Could not fetch role %s in realm %s: %s' + % (name, realm, str(e))) + + def create_realm_role(self, rolerep, realm='master'): + """ Create a Keycloak realm role. + + :param rolerep: a RoleRepresentation of the role to be created. Must contain at minimum the field name. + :return: HTTPResponse object on success + """ + roles_url = URL_REALM_ROLES.format(url=self.baseurl, realm=realm) + try: + return open_url(roles_url, method='POST', headers=self.restheaders, + data=json.dumps(rolerep), validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Could not create role %s in realm %s: %s' + % (rolerep['name'], realm, str(e))) + + def update_realm_role(self, rolerep, realm='master'): + """ Update an existing realm role. + + :param rolerep: A RoleRepresentation of the updated role. + :return HTTPResponse object on success + """ + role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=rolerep['name']) + try: + return open_url(role_url, method='PUT', headers=self.restheaders, + data=json.dumps(rolerep), validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Could not update role %s in realm %s: %s' + % (rolerep['name'], realm, str(e))) + + def delete_realm_role(self, name, realm='master'): + """ Delete a realm role. + + :param name: The name of the role. + :param realm: The realm in which this role resides, default "master". + """ + role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=name) + try: + return open_url(role_url, method='DELETE', headers=self.restheaders, + validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Unable to delete role %s in realm %s: %s' + % (name, realm, str(e))) + + def get_client_roles(self, clientid, realm='master'): + """ Obtains role representations for client roles in a specific client + + :param clientid: Client id to be queried + :param realm: Realm to be queried + :return: List of dicts of role representations + """ + cid = self.get_client_id(clientid, realm=realm) + if cid is None: + self.module.fail_json(msg='Could not find client %s in realm %s' + % (clientid, realm)) + rolelist_url = URL_CLIENT_ROLES.format(url=self.baseurl, realm=realm, id=cid) + try: + return json.loads(to_native(open_url(rolelist_url, method='GET', headers=self.restheaders, + validate_certs=self.validate_certs).read())) + except ValueError as e: + self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of roles for client %s in realm %s: %s' + % (clientid, realm, str(e))) + except Exception as e: + self.module.fail_json(msg='Could not obtain list of roles for client %s in realm %s: %s' + % (clientid, realm, str(e))) + + def get_client_role(self, name, clientid, realm='master'): + """ Fetch a keycloak client role from the provided realm using the role's name. + + :param name: Name of the role to fetch. + :param clientid: Client id for the client role + :param realm: Realm in which the role resides + :return: Dict of role representation + If the role does not exist, None is returned. + """ + cid = self.get_client_id(clientid, realm=realm) + if cid is None: + self.module.fail_json(msg='Could not find client %s in realm %s' + % (clientid, realm)) + role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=name) + try: + return json.loads(to_native(open_url(role_url, method="GET", headers=self.restheaders, + validate_certs=self.validate_certs).read())) + except HTTPError as e: + if e.code == 404: + return None + else: + self.module.fail_json(msg='Could not fetch role %s in client %s of realm %s: %s' + % (name, clientid, realm, str(e))) + except Exception as e: + self.module.fail_json(msg='Could not fetch role %s for client %s in realm %s: %s' + % (name, clientid, realm, str(e))) + + def create_client_role(self, rolerep, clientid, realm='master'): + """ Create a Keycloak client role. + + :param rolerep: a RoleRepresentation of the role to be created. Must contain at minimum the field name. + :param clientid: Client id for the client role + :param realm: Realm in which the role resides + :return: HTTPResponse object on success + """ + cid = self.get_client_id(clientid, realm=realm) + if cid is None: + self.module.fail_json(msg='Could not find client %s in realm %s' + % (clientid, realm)) + roles_url = URL_CLIENT_ROLES.format(url=self.baseurl, realm=realm, id=cid) + try: + return open_url(roles_url, method='POST', headers=self.restheaders, + data=json.dumps(rolerep), validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Could not create role %s for client %s in realm %s: %s' + % (rolerep['name'], clientid, realm, str(e))) + + def update_client_role(self, rolerep, clientid, realm="master"): + """ Update an existing client role. + + :param rolerep: A RoleRepresentation of the updated role. + :param clientid: Client id for the client role + :param realm: Realm in which the role resides + :return HTTPResponse object on success + """ + cid = self.get_client_id(clientid, realm=realm) + if cid is None: + self.module.fail_json(msg='Could not find client %s in realm %s' + % (clientid, realm)) + role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=rolerep['name']) + try: + return open_url(role_url, method='PUT', headers=self.restheaders, + data=json.dumps(rolerep), validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Could not update role %s for client %s in realm %s: %s' + % (rolerep['name'], clientid, realm, str(e))) + + def delete_client_role(self, name, clientid, realm="master"): + """ Delete a role. One of name or roleid must be provided. + + :param name: The name of the role. + :param clientid: Client id for the client role + :param realm: Realm in which the role resides + """ + cid = self.get_client_id(clientid, realm=realm) + if cid is None: + self.module.fail_json(msg='Could not find client %s in realm %s' + % (clientid, realm)) + role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=name) + try: + return open_url(role_url, method='DELETE', headers=self.restheaders, + validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Unable to delete role %s for client %s in realm %s: %s' + % (name, clientid, realm, str(e))) + def get_authentication_flow_by_alias(self, alias, realm='master'): """ Get an authentication flow by it's alias diff --git a/plugins/modules/identity/keycloak/keycloak_role.py b/plugins/modules/identity/keycloak/keycloak_role.py new file mode 100644 index 0000000000..23ed7cfeed --- /dev/null +++ b/plugins/modules/identity/keycloak/keycloak_role.py @@ -0,0 +1,363 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2019, Adam Goossens +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: keycloak_role + +short_description: Allows administration of Keycloak roles via Keycloak API + +version_added: 3.4.0 + +description: + - This module allows you to add, remove or modify Keycloak roles via the Keycloak REST API. + It requires access to the REST API via OpenID Connect; the user connecting and the client being + used must have the requisite access rights. In a default Keycloak installation, admin-cli + and an admin user would work, as would a separate client definition with the scope tailored + to your needs and a user having the expected roles. + + - The names of module options are snake_cased versions of the camelCase ones found in the + Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). + + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will + be returned that way by this module. You may pass single values for attributes when calling the module, + and this will be translated into a list suitable for the API. + + +options: + state: + description: + - State of the role. + - On C(present), the role will be created if it does not yet exist, or updated with the parameters you provide. + - On C(absent), the role will be removed if it exists. + default: 'present' + type: str + choices: + - present + - absent + + name: + type: str + required: true + description: + - Name of the role. + - This parameter is required. + + description: + type: str + description: + - The role description. + + realm: + type: str + description: + - The Keycloak realm under which this role resides. + default: 'master' + + client_id: + type: str + description: + - If the role is a client role, the client id under which it resides. + - If this parameter is absent, the role is considered a realm role. + + attributes: + type: dict + description: + - A dict of key/value pairs to set as custom attributes for the role. + - Values may be single values (e.g. a string) or a list of strings. + +extends_documentation_fragment: +- community.general.keycloak + + +author: + - Laurent Paumier (@laurpaum) +''' + +EXAMPLES = ''' +- name: Create a Keycloak realm role, authentication with credentials + community.general.keycloak_role: + name: my-new-kc-role + realm: MyCustomRealm + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Create a Keycloak realm role, authentication with token + community.general.keycloak_role: + name: my-new-kc-role + realm: MyCustomRealm + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + delegate_to: localhost + +- name: Create a Keycloak client role + community.general.keycloak_role: + name: my-new-kc-role + realm: MyCustomRealm + client_id: MyClient + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Delete a Keycloak role + community.general.keycloak_role: + name: my-role-for-deletion + state: absent + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Create a keycloak role with some custom attributes + community.general.keycloak_role: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + name: my-new-role + attributes: + attrib1: value1 + attrib2: value2 + attrib3: + - with + - numerous + - individual + - list + - items + delegate_to: localhost +''' + +RETURN = ''' +msg: + description: Message as to what action was taken + returned: always + type: str + sample: "Role myrole has been updated" + +proposed: + description: Role representation of proposed changes to role + returned: always + type: dict + sample: { + "description": "My updated test description" + } +existing: + description: Role representation of existing role + returned: always + type: dict + sample: { + "attributes": {}, + "clientRole": true, + "composite": false, + "containerId": "9f03eb61-a826-4771-a9fd-930e06d2d36a", + "description": "My client test role", + "id": "561703dd-0f38-45ff-9a5a-0c978f794547", + "name": "myrole" + } +end_state: + description: Role representation of role after module execution (sample is truncated) + returned: always + type: dict + sample: { + "attributes": {}, + "clientRole": true, + "composite": false, + "containerId": "9f03eb61-a826-4771-a9fd-930e06d2d36a", + "description": "My updated client test role", + "id": "561703dd-0f38-45ff-9a5a-0c978f794547", + "name": "myrole" + } +''' + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + meta_args = dict( + state=dict(type='str', default='present', choices=['present', 'absent']), + name=dict(type='str', required=True), + description=dict(type='str'), + realm=dict(type='str', default='master'), + client_id=dict(type='str'), + attributes=dict(type='dict'), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), + required_together=([['auth_realm', 'auth_username', 'auth_password']])) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + clientid = module.params.get('client_id') + name = module.params.get('name') + state = module.params.get('state') + + # attributes in Keycloak have their values returned as lists + # via the API. attributes is a dict, so we'll transparently convert + # the values to lists. + if module.params.get('attributes') is not None: + for key, val in module.params['attributes'].items(): + module.params['attributes'][key] = [val] if not isinstance(val, list) else val + + # convert module parameters to client representation parameters (if they belong in there) + role_params = [x for x in module.params + if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'client_id', 'composites'] and + module.params.get(x) is not None] + + # does the role already exist? + if clientid is None: + before_role = kc.get_realm_role(name, realm) + else: + before_role = kc.get_client_role(name, clientid, realm) + + if before_role is None: + before_role = dict() + + # build a changeset + changeset = dict() + + for param in role_params: + new_param_value = module.params.get(param) + old_value = before_role[param] if param in before_role else None + if new_param_value != old_value: + changeset[camel(param)] = new_param_value + + # prepare the new role + updated_role = before_role.copy() + updated_role.update(changeset) + + result['proposed'] = changeset + result['existing'] = before_role + + # if before_role is none, the role doesn't exist. + if before_role == dict(): + if state == 'absent': + # nothing to do. + if module._diff: + result['diff'] = dict(before='', after='') + result['changed'] = False + result['end_state'] = dict() + result['msg'] = 'Role does not exist; doing nothing.' + module.exit_json(**result) + + # for 'present', create a new role. + result['changed'] = True + + if name is None: + module.fail_json(msg='name must be specified when creating a new role') + + if module._diff: + result['diff'] = dict(before='', after=updated_role) + + if module.check_mode: + module.exit_json(**result) + + # do it for real! + if clientid is None: + kc.create_realm_role(updated_role, realm) + after_role = kc.get_realm_role(name, realm) + else: + kc.create_client_role(updated_role, clientid, realm) + after_role = kc.get_client_role(name, clientid, realm) + + result['end_state'] = after_role + + result['msg'] = 'Role {name} has been created'.format(name=name) + module.exit_json(**result) + + else: + if state == 'present': + # no changes + if updated_role == before_role: + result['changed'] = False + result['end_state'] = updated_role + result['msg'] = "No changes required to role {name}.".format(name=name) + module.exit_json(**result) + + # update the existing role + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=before_role, after=updated_role) + + if module.check_mode: + module.exit_json(**result) + + # do the update + if clientid is None: + kc.update_realm_role(updated_role, realm) + after_role = kc.get_realm_role(name, realm) + else: + kc.update_client_role(updated_role, clientid, realm) + after_role = kc.get_client_role(name, clientid, realm) + + result['end_state'] = after_role + + result['msg'] = "Role {name} has been updated".format(name=name) + module.exit_json(**result) + + elif state == 'absent': + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=before_role, after='') + + if module.check_mode: + module.exit_json(**result) + + # delete for real + if clientid is None: + kc.delete_realm_role(name, realm) + else: + kc.delete_client_role(name, clientid, realm) + + result['end_state'] = dict() + + result['msg'] = "Role {name} has been deleted".format(name=name) + module.exit_json(**result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_role.py b/plugins/modules/keycloak_role.py new file mode 120000 index 0000000000..48554b3a5f --- /dev/null +++ b/plugins/modules/keycloak_role.py @@ -0,0 +1 @@ +./identity/keycloak/keycloak_role.py \ No newline at end of file diff --git a/tests/integration/targets/keycloak_role/aliases b/tests/integration/targets/keycloak_role/aliases new file mode 100644 index 0000000000..ad7ccf7ada --- /dev/null +++ b/tests/integration/targets/keycloak_role/aliases @@ -0,0 +1 @@ +unsupported diff --git a/tests/integration/targets/keycloak_role/tasks/main.yml b/tests/integration/targets/keycloak_role/tasks/main.yml new file mode 100644 index 0000000000..683cfc8677 --- /dev/null +++ b/tests/integration/targets/keycloak_role/tasks/main.yml @@ -0,0 +1,246 @@ +--- +- name: Create realm + community.general.keycloak_realm: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + id: "{{ realm }}" + realm: "{{ realm }}" + state: present + +- name: Create client + community.general.keycloak_client: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_id }}" + state: present + register: client + +- name: Create new realm role + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + name: "{{ role }}" + description: "{{ description_1 }}" + state: present + register: result + +- name: Debug + debug: + var: result + +- name: Assert realm role created + assert: + that: + - result is changed + - result.existing == {} + - result.end_state.name == "{{ role }}" + - result.end_state.containerId == "{{ realm }}" + +- name: Create existing realm role + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + name: "{{ role }}" + description: "{{ description_1 }}" + state: present + register: result + +- name: Debug + debug: + var: result + +- name: Assert realm role unchanged + assert: + that: + - result is not changed + +- name: Update realm role + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + name: "{{ role }}" + description: "{{ description_2 }}" + state: present + register: result + +- name: Debug + debug: + var: result + +- name: Assert realm role updated + assert: + that: + - result is changed + - result.existing.description == "{{ description_1 }}" + - result.end_state.description == "{{ description_2 }}" + +- name: Delete existing realm role + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + name: "{{ role }}" + state: absent + register: result + +- name: Debug + debug: + var: result + +- name: Assert realm role deleted + assert: + that: + - result is changed + - result.end_state == {} + +- name: Delete absent realm role + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + name: "{{ role }}" + state: absent + register: result + +- name: Debug + debug: + var: result + +- name: Assert realm role unchanged + assert: + that: + - result is not changed + - result.end_state == {} + +- name: Create new client role + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_id }}" + name: "{{ role }}" + description: "{{ description_1 }}" + state: present + register: result + +- name: Debug + debug: + var: result + +- name: Assert client role created + assert: + that: + - result is changed + - result.existing == {} + - result.end_state.name == "{{ role }}" + - result.end_state.containerId == "{{ client.end_state.id }}" + +- name: Create existing client role + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_id }}" + name: "{{ role }}" + description: "{{ description_1 }}" + state: present + register: result + +- name: Debug + debug: + var: result + +- name: Assert client role unchanged + assert: + that: + - result is not changed + +- name: Update client role + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_id }}" + name: "{{ role }}" + description: "{{ description_2 }}" + state: present + register: result + +- name: Debug + debug: + var: result + +- name: Assert client role updated + assert: + that: + - result is changed + - result.existing.description == "{{ description_1 }}" + - result.end_state.description == "{{ description_2 }}" + +- name: Delete existing client role + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_id }}" + name: "{{ role }}" + state: absent + register: result + +- name: Debug + debug: + var: result + +- name: Assert client role deleted + assert: + that: + - result is changed + - result.end_state == {} + +- name: Delete absent client role + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_id }}" + name: "{{ role }}" + state: absent + register: result + +- name: Debug + debug: + var: result + +- name: Assert client role unchanged + assert: + that: + - result is not changed + - result.end_state == {} diff --git a/tests/integration/targets/keycloak_role/vars/main.yml b/tests/integration/targets/keycloak_role/vars/main.yml new file mode 100644 index 0000000000..0a725dc4a6 --- /dev/null +++ b/tests/integration/targets/keycloak_role/vars/main.yml @@ -0,0 +1,10 @@ +--- +url: http://localhost:8080/auth +admin_realm: master +admin_user: admin +admin_password: password +realm: myrealm +client_id: myclient +role: myrole +description_1: desc 1 +description_2: desc 2 diff --git a/tests/unit/plugins/modules/identity/keycloak/test_keycloak_role.py b/tests/unit/plugins/modules/identity/keycloak/test_keycloak_role.py new file mode 100644 index 0000000000..cffae17807 --- /dev/null +++ b/tests/unit/plugins/modules/identity/keycloak/test_keycloak_role.py @@ -0,0 +1,326 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from contextlib import contextmanager + +from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.general.tests.unit.compat.mock import call, patch +from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args + +from ansible_collections.community.general.plugins.modules.identity.keycloak import keycloak_role + +from itertools import count + +from ansible.module_utils.six import StringIO + + +@contextmanager +def patch_keycloak_api(get_realm_role, create_realm_role=None, update_realm_role=None, delete_realm_role=None): + """Mock context manager for patching the methods in PwPolicyIPAClient that contact the IPA server + + Patches the `login` and `_post_json` methods + + Keyword arguments are passed to the mock object that patches `_post_json` + + No arguments are passed to the mock object that patches `login` because no tests require it + + Example:: + + with patch_ipa(return_value={}) as (mock_login, mock_post): + ... + """ + + obj = keycloak_role.KeycloakAPI + with patch.object(obj, 'get_realm_role', side_effect=get_realm_role) as mock_get_realm_role: + with patch.object(obj, 'create_realm_role', side_effect=create_realm_role) as mock_create_realm_role: + with patch.object(obj, 'update_realm_role', side_effect=update_realm_role) as mock_update_realm_role: + with patch.object(obj, 'delete_realm_role', side_effect=delete_realm_role) as mock_delete_realm_role: + yield mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role + + +def get_response(object_with_future_response, method, get_id_call_count): + if callable(object_with_future_response): + return object_with_future_response() + if isinstance(object_with_future_response, dict): + return get_response( + object_with_future_response[method], method, get_id_call_count) + if isinstance(object_with_future_response, list): + call_number = next(get_id_call_count) + return get_response( + object_with_future_response[call_number], method, get_id_call_count) + return object_with_future_response + + +def build_mocked_request(get_id_user_count, response_dict): + def _mocked_requests(*args, **kwargs): + url = args[0] + method = kwargs['method'] + future_response = response_dict.get(url, None) + return get_response(future_response, method, get_id_user_count) + return _mocked_requests + + +def create_wrapper(text_as_string): + """Allow to mock many times a call to one address. + Without this function, the StringIO is empty for the second call. + """ + def _create_wrapper(): + return StringIO(text_as_string) + return _create_wrapper + + +def mock_good_connection(): + token_response = { + 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), } + return patch( + 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + side_effect=build_mocked_request(count(), token_response), + autospec=True + ) + + +class TestKeycloakRealmRole(ModuleTestCase): + def setUp(self): + super(TestKeycloakRealmRole, self).setUp() + self.module = keycloak_role + + def test_create_when_absent(self): + """Add a new realm role""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + 'validate_certs': True, + 'realm': 'realm-name', + 'name': 'role-name', + 'description': 'role-description', + } + return_value_absent = [ + None, + { + "attributes": {}, + "clientRole": False, + "composite": False, + "containerId": "realm-name", + "description": "role-description", + "id": "90f1cdb6-be88-496e-89c6-da1fb6bc6966", + "name": "role-name", + } + ] + return_value_created = [None] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_realm_role=return_value_absent, create_realm_role=return_value_created) \ + as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_realm_role.mock_calls), 2) + self.assertEqual(len(mock_create_realm_role.mock_calls), 1) + self.assertEqual(len(mock_update_realm_role.mock_calls), 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_create_when_present_with_change(self): + """Update with change a realm role""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + 'validate_certs': True, + 'realm': 'realm-name', + 'name': 'role-name', + 'description': 'new-role-description', + } + return_value_present = [ + { + "attributes": {}, + "clientRole": False, + "composite": False, + "containerId": "realm-name", + "description": "role-description", + "id": "90f1cdb6-be88-496e-89c6-da1fb6bc6966", + "name": "role-name", + }, + { + "attributes": {}, + "clientRole": False, + "composite": False, + "containerId": "realm-name", + "description": "new-role-description", + "id": "90f1cdb6-be88-496e-89c6-da1fb6bc6966", + "name": "role-name", + } + ] + return_value_updated = [None] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_realm_role=return_value_present, update_realm_role=return_value_updated) \ + as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_realm_role.mock_calls), 2) + self.assertEqual(len(mock_create_realm_role.mock_calls), 0) + self.assertEqual(len(mock_update_realm_role.mock_calls), 1) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_create_when_present_no_change(self): + """Update without change a realm role""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + 'validate_certs': True, + 'realm': 'realm-name', + 'name': 'role-name', + 'description': 'role-description', + } + return_value_present = [ + { + "attributes": {}, + "clientRole": False, + "composite": False, + "containerId": "realm-name", + "description": "role-description", + "id": "90f1cdb6-be88-496e-89c6-da1fb6bc6966", + "name": "role-name", + }, + { + "attributes": {}, + "clientRole": False, + "composite": False, + "containerId": "realm-name", + "description": "role-description", + "id": "90f1cdb6-be88-496e-89c6-da1fb6bc6966", + "name": "role-name", + } + ] + return_value_updated = [None] + changed = False + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_realm_role=return_value_present, update_realm_role=return_value_updated) \ + as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_realm_role.mock_calls), 1) + self.assertEqual(len(mock_create_realm_role.mock_calls), 0) + self.assertEqual(len(mock_update_realm_role.mock_calls), 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_delete_when_absent(self): + """Remove an absent realm role""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + 'validate_certs': True, + 'realm': 'realm-name', + 'name': 'role-name', + 'state': 'absent' + } + return_value_absent = [None] + return_value_deleted = [None] + changed = False + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_realm_role=return_value_absent, delete_realm_role=return_value_deleted) \ + as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_realm_role.mock_calls), 1) + self.assertEqual(len(mock_delete_realm_role.mock_calls), 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_delete_when_present(self): + """Remove a present realm role""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + 'validate_certs': True, + 'realm': 'realm-name', + 'name': 'role-name', + 'state': 'absent' + } + return_value_absent = [ + { + "attributes": {}, + "clientRole": False, + "composite": False, + "containerId": "realm-name", + "description": "role-description", + "id": "90f1cdb6-be88-496e-89c6-da1fb6bc6966", + "name": "role-name", + } + ] + return_value_deleted = [None] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_realm_role=return_value_absent, delete_realm_role=return_value_deleted) \ + as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_realm_role.mock_calls), 1) + self.assertEqual(len(mock_delete_realm_role.mock_calls), 1) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + +if __name__ == '__main__': + unittest.main() From 4a392372a873c349bcba020f4ba1eb6d28e27a4e Mon Sep 17 00:00:00 2001 From: Gaetan2907 <48204380+Gaetan2907@users.noreply.github.com> Date: Mon, 19 Jul 2021 22:39:02 +0100 Subject: [PATCH 0210/2828] Keycloak: add clientscope management (#2905) * Add new keycloak_clienscope module * Add description and protocol parameter + Indentation Fix * Add protocolMappers parameter * Add documentation and Fix updatating of protocolMappers * Update plugins/modules/identity/keycloak/keycloak_clientscope.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_clientscope.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_clientscope.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_clientscope.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_clientscope.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_clientscope.py Co-authored-by: Felix Fontein * Add sanitize_cr(clientscoperep) function to sanitize the clientscope representation * Add unit tests for clientscope Keycloak module * Apply suggestions from code review Co-authored-by: Felix Fontein --- .../identity/keycloak/keycloak.py | 238 +++++++ .../identity/keycloak/keycloak_clientscope.py | 492 ++++++++++++++ plugins/modules/keycloak_clientscope.py | 1 + .../keycloak/test_keycloak_clientscope.py | 614 ++++++++++++++++++ 4 files changed, 1345 insertions(+) create mode 100644 plugins/modules/identity/keycloak/keycloak_clientscope.py create mode 120000 plugins/modules/keycloak_clientscope.py create mode 100644 tests/unit/plugins/modules/identity/keycloak/test_keycloak_clientscope.py diff --git a/plugins/module_utils/identity/keycloak/keycloak.py b/plugins/module_utils/identity/keycloak/keycloak.py index 8521650f16..75ef2bba02 100644 --- a/plugins/module_utils/identity/keycloak/keycloak.py +++ b/plugins/module_utils/identity/keycloak/keycloak.py @@ -57,6 +57,11 @@ URL_CLIENTTEMPLATES = "{url}/admin/realms/{realm}/client-templates" URL_GROUPS = "{url}/admin/realms/{realm}/groups" URL_GROUP = "{url}/admin/realms/{realm}/groups/{groupid}" +URL_CLIENTSCOPES = "{url}/admin/realms/{realm}/client-scopes" +URL_CLIENTSCOPE = "{url}/admin/realms/{realm}/client-scopes/{id}" +URL_CLIENTSCOPE_PROTOCOLMAPPERS = "{url}/admin/realms/{realm}/client-scopes/{id}/protocol-mappers/models" +URL_CLIENTSCOPE_PROTOCOLMAPPER = "{url}/admin/realms/{realm}/client-scopes/{id}/protocol-mappers/models/{mapper_id}" + URL_AUTHENTICATION_FLOWS = "{url}/admin/realms/{realm}/authentication/flows" URL_AUTHENTICATION_FLOW = "{url}/admin/realms/{realm}/authentication/flows/{id}" URL_AUTHENTICATION_FLOW_COPY = "{url}/admin/realms/{realm}/authentication/flows/{copyfrom}/copy" @@ -511,6 +516,239 @@ class KeycloakAPI(object): self.module.fail_json(msg='Could not delete client template %s in realm %s: %s' % (id, realm, str(e))) + def get_clientscopes(self, realm="master"): + """ Fetch the name and ID of all clientscopes on the Keycloak server. + + To fetch the full data of the group, make a subsequent call to + get_clientscope_by_clientscopeid, passing in the ID of the group you wish to return. + + :param realm: Realm in which the clientscope resides; default 'master'. + :return The clientscopes of this realm (default "master") + """ + clientscopes_url = URL_CLIENTSCOPES.format(url=self.baseurl, realm=realm) + try: + return json.loads(to_native(open_url(clientscopes_url, method="GET", headers=self.restheaders, + validate_certs=self.validate_certs).read())) + except Exception as e: + self.module.fail_json(msg="Could not fetch list of clientscopes in realm %s: %s" + % (realm, str(e))) + + def get_clientscope_by_clientscopeid(self, cid, realm="master"): + """ Fetch a keycloak clientscope from the provided realm using the clientscope's unique ID. + + If the clientscope does not exist, None is returned. + + gid is a UUID provided by the Keycloak API + :param cid: UUID of the clientscope to be returned + :param realm: Realm in which the clientscope resides; default 'master'. + """ + clientscope_url = URL_CLIENTSCOPE.format(url=self.baseurl, realm=realm, id=cid) + try: + return json.loads(to_native(open_url(clientscope_url, method="GET", headers=self.restheaders, + validate_certs=self.validate_certs).read())) + + except HTTPError as e: + if e.code == 404: + return None + else: + self.module.fail_json(msg="Could not fetch clientscope %s in realm %s: %s" + % (cid, realm, str(e))) + except Exception as e: + self.module.fail_json(msg="Could not clientscope group %s in realm %s: %s" + % (cid, realm, str(e))) + + def get_clientscope_by_name(self, name, realm="master"): + """ Fetch a keycloak clientscope within a realm based on its name. + + The Keycloak API does not allow filtering of the clientscopes resource by name. + As a result, this method first retrieves the entire list of clientscopes - name and ID - + then performs a second query to fetch the group. + + If the clientscope does not exist, None is returned. + :param name: Name of the clientscope to fetch. + :param realm: Realm in which the clientscope resides; default 'master' + """ + try: + all_clientscopes = self.get_clientscopes(realm=realm) + + for clientscope in all_clientscopes: + if clientscope['name'] == name: + return self.get_clientscope_by_clientscopeid(clientscope['id'], realm=realm) + + return None + + except Exception as e: + self.module.fail_json(msg="Could not fetch clientscope %s in realm %s: %s" + % (name, realm, str(e))) + + def create_clientscope(self, clientscoperep, realm="master"): + """ Create a Keycloak clientscope. + + :param clientscoperep: a ClientScopeRepresentation of the clientscope to be created. Must contain at minimum the field name. + :return: HTTPResponse object on success + """ + clientscopes_url = URL_CLIENTSCOPES.format(url=self.baseurl, realm=realm) + try: + return open_url(clientscopes_url, method='POST', headers=self.restheaders, + data=json.dumps(clientscoperep), validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg="Could not create clientscope %s in realm %s: %s" + % (clientscoperep['name'], realm, str(e))) + + def update_clientscope(self, clientscoperep, realm="master"): + """ Update an existing clientscope. + + :param grouprep: A GroupRepresentation of the updated group. + :return HTTPResponse object on success + """ + clientscope_url = URL_CLIENTSCOPE.format(url=self.baseurl, realm=realm, id=clientscoperep['id']) + + try: + return open_url(clientscope_url, method='PUT', headers=self.restheaders, + data=json.dumps(clientscoperep), validate_certs=self.validate_certs) + + except Exception as e: + self.module.fail_json(msg='Could not update clientscope %s in realm %s: %s' + % (clientscoperep['name'], realm, str(e))) + + def delete_clientscope(self, name=None, cid=None, realm="master"): + """ Delete a clientscope. One of name or cid must be provided. + + Providing the clientscope ID is preferred as it avoids a second lookup to + convert a clientscope name to an ID. + + :param name: The name of the clientscope. A lookup will be performed to retrieve the clientscope ID. + :param cid: The ID of the clientscope (preferred to name). + :param realm: The realm in which this group resides, default "master". + """ + + if cid is None and name is None: + # prefer an exception since this is almost certainly a programming error in the module itself. + raise Exception("Unable to delete group - one of group ID or name must be provided.") + + # only lookup the name if cid isn't provided. + # in the case that both are provided, prefer the ID, since it's one + # less lookup. + if cid is None and name is not None: + for clientscope in self.get_clientscopes(realm=realm): + if clientscope['name'] == name: + cid = clientscope['id'] + break + + # if the group doesn't exist - no problem, nothing to delete. + if cid is None: + return None + + # should have a good cid by here. + clientscope_url = URL_CLIENTSCOPE.format(realm=realm, id=cid, url=self.baseurl) + try: + return open_url(clientscope_url, method='DELETE', headers=self.restheaders, + validate_certs=self.validate_certs) + + except Exception as e: + self.module.fail_json(msg="Unable to delete clientscope %s: %s" % (cid, str(e))) + + def get_clientscope_protocolmappers(self, cid, realm="master"): + """ Fetch the name and ID of all clientscopes on the Keycloak server. + + To fetch the full data of the group, make a subsequent call to + get_clientscope_by_clientscopeid, passing in the ID of the group you wish to return. + + :param cid: id of clientscope (not name). + :param realm: Realm in which the clientscope resides; default 'master'. + :return The protocolmappers of this realm (default "master") + """ + protocolmappers_url = URL_CLIENTSCOPE_PROTOCOLMAPPERS.format(id=cid, url=self.baseurl, realm=realm) + try: + return json.loads(to_native(open_url(protocolmappers_url, method="GET", headers=self.restheaders, + validate_certs=self.validate_certs).read())) + except Exception as e: + self.module.fail_json(msg="Could not fetch list of protocolmappers in realm %s: %s" + % (realm, str(e))) + + def get_clientscope_protocolmapper_by_protocolmapperid(self, pid, cid, realm="master"): + """ Fetch a keycloak clientscope from the provided realm using the clientscope's unique ID. + + If the clientscope does not exist, None is returned. + + gid is a UUID provided by the Keycloak API + + :param cid: UUID of the protocolmapper to be returned + :param cid: UUID of the clientscope to be returned + :param realm: Realm in which the clientscope resides; default 'master'. + """ + protocolmapper_url = URL_CLIENTSCOPE_PROTOCOLMAPPER.format(url=self.baseurl, realm=realm, id=cid, mapper_id=pid) + try: + return json.loads(to_native(open_url(protocolmapper_url, method="GET", headers=self.restheaders, + validate_certs=self.validate_certs).read())) + + except HTTPError as e: + if e.code == 404: + return None + else: + self.module.fail_json(msg="Could not fetch protocolmapper %s in realm %s: %s" + % (pid, realm, str(e))) + except Exception as e: + self.module.fail_json(msg="Could not fetch protocolmapper %s in realm %s: %s" + % (cid, realm, str(e))) + + def get_clientscope_protocolmapper_by_name(self, cid, name, realm="master"): + """ Fetch a keycloak clientscope within a realm based on its name. + + The Keycloak API does not allow filtering of the clientscopes resource by name. + As a result, this method first retrieves the entire list of clientscopes - name and ID - + then performs a second query to fetch the group. + + If the clientscope does not exist, None is returned. + :param cid: Id of the clientscope (not name). + :param name: Name of the protocolmapper to fetch. + :param realm: Realm in which the clientscope resides; default 'master' + """ + try: + all_protocolmappers = self.get_clientscope_protocolmappers(cid, realm=realm) + + for protocolmapper in all_protocolmappers: + if protocolmapper['name'] == name: + return self.get_clientscope_protocolmapper_by_protocolmapperid(protocolmapper['id'], cid, realm=realm) + + return None + + except Exception as e: + self.module.fail_json(msg="Could not fetch protocolmapper %s in realm %s: %s" + % (name, realm, str(e))) + + def create_clientscope_protocolmapper(self, cid, mapper_rep, realm="master"): + """ Create a Keycloak clientscope protocolmapper. + + :param cid: Id of the clientscope. + :param mapper_rep: a ProtocolMapperRepresentation of the protocolmapper to be created. Must contain at minimum the field name. + :return: HTTPResponse object on success + """ + protocolmappers_url = URL_CLIENTSCOPE_PROTOCOLMAPPERS.format(url=self.baseurl, id=cid, realm=realm) + try: + return open_url(protocolmappers_url, method='POST', headers=self.restheaders, + data=json.dumps(mapper_rep), validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg="Could not create protocolmapper %s in realm %s: %s" + % (mapper_rep['name'], realm, str(e))) + + def update_clientscope_protocolmappers(self, cid, mapper_rep, realm="master"): + """ Update an existing clientscope. + + :param cid: Id of the clientscope. + :param mapper_rep: A ProtocolMapperRepresentation of the updated protocolmapper. + :return HTTPResponse object on success + """ + protocolmapper_url = URL_CLIENTSCOPE_PROTOCOLMAPPER.format(url=self.baseurl, realm=realm, id=cid, mapper_id=mapper_rep['id']) + + try: + return open_url(protocolmapper_url, method='PUT', headers=self.restheaders, + data=json.dumps(mapper_rep), validate_certs=self.validate_certs) + + except Exception as e: + self.module.fail_json(msg='Could not update protocolmappers for clientscope %s in realm %s: %s' + % (mapper_rep, realm, str(e))) + def get_groups(self, realm="master"): """ Fetch the name and ID of all groups on the Keycloak server. diff --git a/plugins/modules/identity/keycloak/keycloak_clientscope.py b/plugins/modules/identity/keycloak/keycloak_clientscope.py new file mode 100644 index 0000000000..c05050aae5 --- /dev/null +++ b/plugins/modules/identity/keycloak/keycloak_clientscope.py @@ -0,0 +1,492 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: keycloak_clientscope + +short_description: Allows administration of Keycloak client_scopes via Keycloak API + +version_added: 3.4.0 + +description: + - This module allows you to add, remove or modify Keycloak client_scopes via the Keycloak REST API. + It requires access to the REST API via OpenID Connect; the user connecting and the client being + used must have the requisite access rights. In a default Keycloak installation, admin-cli + and an admin user would work, as would a separate client definition with the scope tailored + to your needs and a user having the expected roles. + + - The names of module options are snake_cased versions of the camelCase ones found in the + Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). + + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will + be returned that way by this module. You may pass single values for attributes when calling the module, + and this will be translated into a list suitable for the API. + + - When updating a client_scope, where possible provide the client_scope ID to the module. This removes a lookup + to the API to translate the name into the client_scope ID. + + +options: + state: + description: + - State of the client_scope. + - On C(present), the client_scope will be created if it does not yet exist, or updated with the parameters you provide. + - On C(absent), the client_scope will be removed if it exists. + default: 'present' + type: str + choices: + - present + - absent + + name: + type: str + description: + - Name of the client_scope. + - This parameter is required only when creating or updating the client_scope. + + realm: + type: str + description: + - They Keycloak realm under which this client_scope resides. + default: 'master' + + id: + type: str + description: + - The unique identifier for this client_scope. + - This parameter is not required for updating or deleting a client_scope but + providing it will reduce the number of API calls required. + + description: + type: str + description: + - Description for this client_scope. + - This parameter is not required for updating or deleting a client_scope. + + protocol: + description: + - Type of client. + choices: ['openid-connect', 'saml', 'wsfed'] + type: str + + protocol_mappers: + description: + - A list of dicts defining protocol mappers for this client. + - This is 'protocolMappers' in the Keycloak REST API. + aliases: + - protocolMappers + type: list + elements: dict + suboptions: + protocol: + description: + - This specifies for which protocol this protocol mapper + - is active. + choices: ['openid-connect', 'saml', 'wsfed'] + type: str + + protocolMapper: + description: + - "The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is + impossible to provide since this may be extended through SPIs by the user of Keycloak, + by default Keycloak as of 3.4 ships with at least:" + - C(docker-v2-allow-all-mapper) + - C(oidc-address-mapper) + - C(oidc-full-name-mapper) + - C(oidc-group-membership-mapper) + - C(oidc-hardcoded-claim-mapper) + - C(oidc-hardcoded-role-mapper) + - C(oidc-role-name-mapper) + - C(oidc-script-based-protocol-mapper) + - C(oidc-sha256-pairwise-sub-mapper) + - C(oidc-usermodel-attribute-mapper) + - C(oidc-usermodel-client-role-mapper) + - C(oidc-usermodel-property-mapper) + - C(oidc-usermodel-realm-role-mapper) + - C(oidc-usersessionmodel-note-mapper) + - C(saml-group-membership-mapper) + - C(saml-hardcode-attribute-mapper) + - C(saml-hardcode-role-mapper) + - C(saml-role-list-mapper) + - C(saml-role-name-mapper) + - C(saml-user-attribute-mapper) + - C(saml-user-property-mapper) + - C(saml-user-session-note-mapper) + - An exhaustive list of available mappers on your installation can be obtained on + the admin console by going to Server Info -> Providers and looking under + 'protocol-mapper'. + type: str + + name: + description: + - The name of this protocol mapper. + type: str + + id: + description: + - Usually a UUID specifying the internal ID of this protocol mapper instance. + type: str + + config: + description: + - Dict specifying the configuration options for the protocol mapper; the + contents differ depending on the value of I(protocolMapper) and are not documented + other than by the source of the mappers and its parent class(es). An example is given + below. It is easiest to obtain valid config values by dumping an already-existing + protocol mapper configuration through check-mode in the C(existing) return value. + type: dict + + attributes: + type: dict + description: + - A dict of key/value pairs to set as custom attributes for the client_scope. + - Values may be single values (for example a string) or a list of strings. + +extends_documentation_fragment: +- community.general.keycloak + + +author: + - Gaëtan Daubresse (@Gaetan2907) +''' + +EXAMPLES = ''' +- name: Create a Keycloak client_scopes, authentication with credentials + community.general.keycloak_clientscope: + name: my-new-kc-clientscope + realm: MyCustomRealm + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Create a Keycloak client_scopes, authentication with token + community.general.keycloak_clientscope: + name: my-new-kc-clientscope + realm: MyCustomRealm + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + delegate_to: localhost + +- name: Delete a keycloak client_scopes + community.general.keycloak_clientscope: + id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' + state: absent + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Delete a Keycloak client_scope based on name + community.general.keycloak_clientscope: + name: my-clientscope-for-deletion + state: absent + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Update the name of a Keycloak client_scope + community.general.keycloak_clientscope: + id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' + name: an-updated-kc-clientscope-name + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Create a Keycloak client_scope with some custom attributes + community.general.keycloak_clientscope: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + name: my-new_clientscope + description: description-of-clientscope + protocol: openid-connect + protocol_mappers: + - config: + access.token.claim: True + claim.name: "family_name" + id.token.claim: True + jsonType.label: String + user.attribute: lastName + userinfo.token.claim: True + name: family name + protocol: openid-connect + protocolMapper: oidc-usermodel-property-mapper + - config: + attribute.name: Role + attribute.nameformat: Basic + single: false + name: role list + protocol: saml + protocolMapper: saml-role-list-mapper + attributes: + attrib1: value1 + attrib2: value2 + attrib3: + - with + - numerous + - individual + - list + - items + delegate_to: localhost +''' + +RETURN = ''' +msg: + description: Message as to what action was taken + returned: always + type: str + sample: "Client_scope testclientscope has been updated" + +proposed: + description: client_scope representation of proposed changes to client_scope + returned: always + type: dict + sample: { + clientId: "test" + } +existing: + description: client_scope representation of existing client_scope (sample is truncated) + returned: always + type: dict + sample: { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256", + } + } +end_state: + description: client_scope representation of client_scope after module execution (sample is truncated) + returned: always + type: dict + sample: { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256", + } + } +''' + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError, is_struct_included +from ansible.module_utils.basic import AnsibleModule + + +def sanitize_cr(clientscoperep): + """ Removes probably sensitive details from a clientscoperep representation + + :param clientscoperep: the clientscoperep dict to be sanitized + :return: sanitized clientrep dict + """ + result = clientscoperep.copy() + if 'secret' in result: + result['secret'] = 'no_log' + if 'attributes' in result: + if 'saml.signing.private.key' in result['attributes']: + result['attributes']['saml.signing.private.key'] = 'no_log' + return result + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + protmapper_spec = dict( + id=dict(type='str'), + name=dict(type='str'), + protocol=dict(type='str', choices=['openid-connect', 'saml', 'wsfed']), + protocolMapper=dict(type='str'), + config=dict(type='dict'), + ) + + meta_args = dict( + state=dict(default='present', choices=['present', 'absent']), + realm=dict(default='master'), + id=dict(type='str'), + name=dict(type='str'), + description=dict(type='str'), + protocol=dict(type='str', choices=['openid-connect', 'saml', 'wsfed']), + attributes=dict(type='dict'), + protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec, aliases=['protocolMappers']), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['id', 'name'], + ['token', 'auth_realm', 'auth_username', 'auth_password']]), + required_together=([['auth_realm', 'auth_username', 'auth_password']])) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + state = module.params.get('state') + cid = module.params.get('id') + name = module.params.get('name') + protocol_mappers = module.params.get('protocol_mappers') + + before_clientscope = None # current state of the clientscope, for merging. + + # does the clientscope already exist? + if cid is None: + before_clientscope = kc.get_clientscope_by_name(name, realm=realm) + else: + before_clientscope = kc.get_clientscope_by_clientscopeid(cid, realm=realm) + + before_clientscope = {} if before_clientscope is None else before_clientscope + + clientscope_params = [x for x in module.params + if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm'] and + module.params.get(x) is not None] + + # Build a proposed changeset from parameters given to this module + changeset = dict() + + for clientscope_param in clientscope_params: + new_param_value = module.params.get(clientscope_param) + + # some lists in the Keycloak API are sorted, some are not. + if isinstance(new_param_value, list): + if clientscope_param in ['attributes']: + try: + new_param_value = sorted(new_param_value) + except TypeError: + pass + # Unfortunately, the ansible argument spec checker introduces variables with null values when + # they are not specified + if clientscope_param == 'protocol_mappers': + new_param_value = [dict((k, v) for k, v in x.items() if x[k] is not None) for x in new_param_value] + changeset[camel(clientscope_param)] = new_param_value + + # prepare the new clientscope + updated_clientscope = before_clientscope.copy() + updated_clientscope.update(changeset) + + # if before_clientscope is none, the clientscope doesn't exist. + if before_clientscope == {}: + if state == 'absent': + # nothing to do. + if module._diff: + result['diff'] = dict(before='', after='') + result['msg'] = 'Clientscope does not exist; doing nothing.' + result['end_state'] = dict() + module.exit_json(**result) + + # for 'present', create a new clientscope. + result['changed'] = True + if name is None: + module.fail_json(msg='name must be specified when creating a new clientscope') + + if module._diff: + result['diff'] = dict(before='', after=sanitize_cr(updated_clientscope)) + + if module.check_mode: + module.exit_json(**result) + + # do it for real! + kc.create_clientscope(updated_clientscope, realm=realm) + after_clientscope = kc.get_clientscope_by_name(name, realm) + + result['end_state'] = sanitize_cr(after_clientscope) + result['msg'] = 'Clientscope {name} has been created with ID {id}'.format(name=after_clientscope['name'], + id=after_clientscope['id']) + + else: + if state == 'present': + # no changes + if updated_clientscope == before_clientscope: + result['changed'] = False + result['end_state'] = sanitize_cr(updated_clientscope) + result['msg'] = "No changes required to clientscope {name}.".format(name=before_clientscope['name']) + module.exit_json(**result) + + # update the existing clientscope + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=sanitize_cr(before_clientscope), after=sanitize_cr(updated_clientscope)) + + if module.check_mode: + module.exit_json(**result) + + # do the clientscope update + kc.update_clientscope(updated_clientscope, realm=realm) + + # do the protocolmappers update + if protocol_mappers is not None: + for protocol_mapper in protocol_mappers: + # update if protocolmapper exist + current_protocolmapper = kc.get_clientscope_protocolmapper_by_name(updated_clientscope['id'], protocol_mapper['name'], realm=realm) + if current_protocolmapper is not None: + protocol_mapper['id'] = current_protocolmapper['id'] + kc.update_clientscope_protocolmappers(updated_clientscope['id'], protocol_mapper, realm=realm) + # create otherwise + else: + kc.create_clientscope_protocolmapper(updated_clientscope['id'], protocol_mapper, realm=realm) + + after_clientscope = kc.get_clientscope_by_clientscopeid(updated_clientscope['id'], realm=realm) + + result['end_state'] = after_clientscope + result['msg'] = "Clientscope {id} has been updated".format(id=after_clientscope['id']) + + module.exit_json(**result) + + elif state == 'absent': + result['end_state'] = dict() + + if module._diff: + result['diff'] = dict(before=sanitize_cr(before_clientscope), after='') + + if module.check_mode: + module.exit_json(**result) + + # delete for real + cid = before_clientscope['id'] + kc.delete_clientscope(cid=cid, realm=realm) + + result['changed'] = True + result['msg'] = "Clientscope {name} has been deleted".format(name=before_clientscope['name']) + + module.exit_json(**result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_clientscope.py b/plugins/modules/keycloak_clientscope.py new file mode 120000 index 0000000000..01468a5c8e --- /dev/null +++ b/plugins/modules/keycloak_clientscope.py @@ -0,0 +1 @@ +identity/keycloak/keycloak_clientscope.py \ No newline at end of file diff --git a/tests/unit/plugins/modules/identity/keycloak/test_keycloak_clientscope.py b/tests/unit/plugins/modules/identity/keycloak/test_keycloak_clientscope.py new file mode 100644 index 0000000000..0954562d95 --- /dev/null +++ b/tests/unit/plugins/modules/identity/keycloak/test_keycloak_clientscope.py @@ -0,0 +1,614 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from contextlib import contextmanager + +from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.general.tests.unit.compat.mock import call, patch +from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, \ + ModuleTestCase, set_module_args + +from ansible_collections.community.general.plugins.modules.identity.keycloak import keycloak_clientscope + +from itertools import count + +from ansible.module_utils.six import StringIO + + +@contextmanager +def patch_keycloak_api(get_clientscope_by_name=None, get_clientscope_by_clientscopeid=None, create_clientscope=None, + update_clientscope=None, get_clientscope_protocolmapper_by_name=None, + update_clientscope_protocolmappers=None, create_clientscope_protocolmapper=None, + delete_clientscope=None): + """Mock context manager for patching the methods in PwPolicyIPAClient that contact the IPA server + + Patches the `login` and `_post_json` methods + + Keyword arguments are passed to the mock object that patches `_post_json` + + No arguments are passed to the mock object that patches `login` because no tests require it + + Example:: + + with patch_ipa(return_value={}) as (mock_login, mock_post): + ... + """ + + """ + get_clientscope_by_clientscopeid + delete_clientscope + """ + + obj = keycloak_clientscope.KeycloakAPI + with patch.object(obj, 'get_clientscope_by_name', side_effect=get_clientscope_by_name) \ + as mock_get_clientscope_by_name: + with patch.object(obj, 'get_clientscope_by_clientscopeid', side_effect=get_clientscope_by_clientscopeid) \ + as mock_get_clientscope_by_clientscopeid: + with patch.object(obj, 'create_clientscope', side_effect=create_clientscope) \ + as mock_create_clientscope: + with patch.object(obj, 'update_clientscope', return_value=update_clientscope) \ + as mock_update_clientscope: + with patch.object(obj, 'get_clientscope_protocolmapper_by_name', + side_effect=get_clientscope_protocolmapper_by_name) \ + as mock_get_clientscope_protocolmapper_by_name: + with patch.object(obj, 'update_clientscope_protocolmappers', + side_effect=update_clientscope_protocolmappers) \ + as mock_update_clientscope_protocolmappers: + with patch.object(obj, 'create_clientscope_protocolmapper', + side_effect=create_clientscope_protocolmapper) \ + as mock_create_clientscope_protocolmapper: + with patch.object(obj, 'delete_clientscope', side_effect=delete_clientscope) \ + as mock_delete_clientscope: + yield mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope, \ + mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name, mock_update_clientscope_protocolmappers, \ + mock_create_clientscope_protocolmapper, mock_delete_clientscope + + +def get_response(object_with_future_response, method, get_id_call_count): + if callable(object_with_future_response): + return object_with_future_response() + if isinstance(object_with_future_response, dict): + return get_response( + object_with_future_response[method], method, get_id_call_count) + if isinstance(object_with_future_response, list): + call_number = next(get_id_call_count) + return get_response( + object_with_future_response[call_number], method, get_id_call_count) + return object_with_future_response + + +def build_mocked_request(get_id_user_count, response_dict): + def _mocked_requests(*args, **kwargs): + url = args[0] + method = kwargs['method'] + future_response = response_dict.get(url, None) + return get_response(future_response, method, get_id_user_count) + + return _mocked_requests + + +def create_wrapper(text_as_string): + """Allow to mock many times a call to one address. + Without this function, the StringIO is empty for the second call. + """ + + def _create_wrapper(): + return StringIO(text_as_string) + + return _create_wrapper + + +def mock_good_connection(): + token_response = { + 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper( + '{"access_token": "alongtoken"}'), } + return patch( + 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + side_effect=build_mocked_request(count(), token_response), + autospec=True + ) + + +class TestKeycloakAuthentication(ModuleTestCase): + def setUp(self): + super(TestKeycloakAuthentication, self).setUp() + self.module = keycloak_clientscope + + def test_create_clientscope(self): + """Add a new authentication flow from copy of an other flow""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'realm': 'realm-name', + 'state': 'present', + 'name': 'my-new-kc-clientscope' + } + return_value_get_clientscope_by_name = [ + None, + { + "attributes": {}, + "id": "73fec1d2-f032-410c-8177-583104d01305", + "name": "my-new-kc-clientscope" + }] + + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) \ + as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope, + mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name, + mock_update_clientscope_protocolmappers, + mock_create_clientscope_protocolmapper, mock_delete_clientscope): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + # Verify number of call on each mock + self.assertEqual(mock_get_clientscope_by_name.call_count, 2) + self.assertEqual(mock_create_clientscope.call_count, 1) + self.assertEqual(mock_get_clientscope_by_clientscopeid.call_count, 0) + self.assertEqual(mock_update_clientscope.call_count, 0) + self.assertEqual(mock_get_clientscope_protocolmapper_by_name.call_count, 0) + self.assertEqual(mock_update_clientscope_protocolmappers.call_count, 0) + self.assertEqual(mock_create_clientscope_protocolmapper.call_count, 0) + self.assertEqual(mock_delete_clientscope.call_count, 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_create_clientscope_idempotency(self): + """Add a new authentication flow from copy of an other flow""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'realm': 'realm-name', + 'state': 'present', + 'name': 'my-new-kc-clientscope' + } + return_value_get_clientscope_by_name = [{ + "attributes": {}, + "id": "73fec1d2-f032-410c-8177-583104d01305", + "name": "my-new-kc-clientscope" + }] + + changed = False + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) \ + as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope, + mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name, + mock_update_clientscope_protocolmappers, + mock_create_clientscope_protocolmapper, mock_delete_clientscope): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + # Verify number of call on each mock + self.assertEqual(mock_get_clientscope_by_name.call_count, 1) + self.assertEqual(mock_create_clientscope.call_count, 0) + self.assertEqual(mock_get_clientscope_by_clientscopeid.call_count, 0) + self.assertEqual(mock_update_clientscope.call_count, 0) + self.assertEqual(mock_get_clientscope_protocolmapper_by_name.call_count, 0) + self.assertEqual(mock_update_clientscope_protocolmappers.call_count, 0) + self.assertEqual(mock_create_clientscope_protocolmapper.call_count, 0) + self.assertEqual(mock_delete_clientscope.call_count, 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_delete_clientscope(self): + """Add a new authentication flow from copy of an other flow""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'realm': 'realm-name', + 'state': 'absent', + 'name': 'my-new-kc-clientscope' + } + return_value_get_clientscope_by_name = [{ + "attributes": {}, + "id": "73fec1d2-f032-410c-8177-583104d01305", + "name": "my-new-kc-clientscope" + }] + + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) \ + as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope, + mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name, + mock_update_clientscope_protocolmappers, + mock_create_clientscope_protocolmapper, mock_delete_clientscope): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + # Verify number of call on each mock + self.assertEqual(mock_get_clientscope_by_name.call_count, 1) + self.assertEqual(mock_create_clientscope.call_count, 0) + self.assertEqual(mock_get_clientscope_by_clientscopeid.call_count, 0) + self.assertEqual(mock_update_clientscope.call_count, 0) + self.assertEqual(mock_get_clientscope_protocolmapper_by_name.call_count, 0) + self.assertEqual(mock_update_clientscope_protocolmappers.call_count, 0) + self.assertEqual(mock_create_clientscope_protocolmapper.call_count, 0) + self.assertEqual(mock_delete_clientscope.call_count, 1) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_delete_clientscope_idempotency(self): + """Add a new authentication flow from copy of an other flow""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'realm': 'realm-name', + 'state': 'absent', + 'name': 'my-new-kc-clientscope' + } + return_value_get_clientscope_by_name = [None] + + changed = False + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) \ + as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope, + mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name, + mock_update_clientscope_protocolmappers, + mock_create_clientscope_protocolmapper, mock_delete_clientscope): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + # Verify number of call on each mock + self.assertEqual(mock_get_clientscope_by_name.call_count, 1) + self.assertEqual(mock_create_clientscope.call_count, 0) + self.assertEqual(mock_get_clientscope_by_clientscopeid.call_count, 0) + self.assertEqual(mock_update_clientscope.call_count, 0) + self.assertEqual(mock_get_clientscope_protocolmapper_by_name.call_count, 0) + self.assertEqual(mock_update_clientscope_protocolmappers.call_count, 0) + self.assertEqual(mock_create_clientscope_protocolmapper.call_count, 0) + self.assertEqual(mock_delete_clientscope.call_count, 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_create_clientscope_with_protocolmappers(self): + """Add a new authentication flow from copy of an other flow""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'realm': 'realm-name', + 'state': 'present', + 'name': 'my-new-kc-clientscope', + 'protocolMappers': [ + { + 'protocol': 'openid-connect', + 'config': { + 'full.path': 'true', + 'id.token.claim': 'true', + 'access.token.claim': 'true', + 'userinfo.token.claim': 'true', + 'claim.name': 'protocol1', + }, + 'name': 'protocol1', + 'protocolMapper': 'oidc-group-membership-mapper', + }, + { + 'protocol': 'openid-connect', + 'config': { + 'full.path': 'false', + 'id.token.claim': 'false', + 'access.token.claim': 'false', + 'userinfo.token.claim': 'false', + 'claim.name': 'protocol2', + }, + 'name': 'protocol2', + 'protocolMapper': 'oidc-group-membership-mapper', + }, + { + 'protocol': 'openid-connect', + 'config': { + 'full.path': 'true', + 'id.token.claim': 'false', + 'access.token.claim': 'true', + 'userinfo.token.claim': 'false', + 'claim.name': 'protocol3', + }, + 'name': 'protocol3', + 'protocolMapper': 'oidc-group-membership-mapper', + }, + ] + } + return_value_get_clientscope_by_name = [ + None, + { + "attributes": {}, + "id": "890ec72e-fe1d-4308-9f27-485ef7eaa182", + "name": "my-new-kc-clientscope", + "protocolMappers": [ + { + "config": { + "access.token.claim": "false", + "claim.name": "protocol2", + "full.path": "false", + "id.token.claim": "false", + "userinfo.token.claim": "false" + }, + "consentRequired": "false", + "id": "a7f19adb-cc58-41b1-94ce-782dc255139b", + "name": "protocol2", + "protocol": "openid-connect", + "protocolMapper": "oidc-group-membership-mapper" + }, + { + "config": { + "access.token.claim": "true", + "claim.name": "protocol3", + "full.path": "true", + "id.token.claim": "false", + "userinfo.token.claim": "false" + }, + "consentRequired": "false", + "id": "2103a559-185a-40f4-84ae-9ab311d5b812", + "name": "protocol3", + "protocol": "openid-connect", + "protocolMapper": "oidc-group-membership-mapper" + }, + { + "config": { + "access.token.claim": "true", + "claim.name": "protocol1", + "full.path": "true", + "id.token.claim": "true", + "userinfo.token.claim": "true" + }, + "consentRequired": "false", + "id": "bbf6390f-e95f-4c20-882b-9dad328363b9", + "name": "protocol1", + "protocol": "openid-connect", + "protocolMapper": "oidc-group-membership-mapper" + }] + }] + + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) \ + as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope, + mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name, + mock_update_clientscope_protocolmappers, + mock_create_clientscope_protocolmapper, mock_delete_clientscope): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + # Verify number of call on each mock + self.assertEqual(mock_get_clientscope_by_name.call_count, 2) + self.assertEqual(mock_create_clientscope.call_count, 1) + self.assertEqual(mock_get_clientscope_by_clientscopeid.call_count, 0) + self.assertEqual(mock_update_clientscope.call_count, 0) + self.assertEqual(mock_get_clientscope_protocolmapper_by_name.call_count, 0) + self.assertEqual(mock_update_clientscope_protocolmappers.call_count, 0) + self.assertEqual(mock_create_clientscope_protocolmapper.call_count, 0) + self.assertEqual(mock_delete_clientscope.call_count, 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_update_clientscope_with_protocolmappers(self): + """Add a new authentication flow from copy of an other flow""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'realm': 'realm-name', + 'state': 'present', + 'name': 'my-new-kc-clientscope', + 'protocolMappers': [ + { + 'protocol': 'openid-connect', + 'config': { + 'full.path': 'false', + 'id.token.claim': 'false', + 'access.token.claim': 'false', + 'userinfo.token.claim': 'false', + 'claim.name': 'protocol1_updated', + }, + 'name': 'protocol1', + 'protocolMapper': 'oidc-group-membership-mapper', + }, + { + 'protocol': 'openid-connect', + 'config': { + 'full.path': 'true', + 'id.token.claim': 'false', + 'access.token.claim': 'false', + 'userinfo.token.claim': 'false', + 'claim.name': 'protocol2_updated', + }, + 'name': 'protocol2', + 'protocolMapper': 'oidc-group-membership-mapper', + }, + { + 'protocol': 'openid-connect', + 'config': { + 'full.path': 'true', + 'id.token.claim': 'true', + 'access.token.claim': 'true', + 'userinfo.token.claim': 'true', + 'claim.name': 'protocol3_updated', + }, + 'name': 'protocol3', + 'protocolMapper': 'oidc-group-membership-mapper', + }, + ] + } + return_value_get_clientscope_by_name = [{ + "attributes": {}, + "id": "890ec72e-fe1d-4308-9f27-485ef7eaa182", + "name": "my-new-kc-clientscope", + "protocolMappers": [ + { + "config": { + "access.token.claim": "true", + "claim.name": "groups", + "full.path": "true", + "id.token.claim": "true", + "userinfo.token.claim": "true" + }, + "consentRequired": "false", + "id": "e077007a-367a-444f-91ef-70277a1d868d", + "name": "groups", + "protocol": "saml", + "protocolMapper": "oidc-group-membership-mapper" + }, + { + "config": { + "access.token.claim": "true", + "claim.name": "groups", + "full.path": "true", + "id.token.claim": "true", + "userinfo.token.claim": "true" + }, + "consentRequired": "false", + "id": "06c518aa-c627-43cc-9a82-d8467b508d34", + "name": "groups", + "protocol": "openid-connect", + "protocolMapper": "oidc-group-membership-mapper" + }, + { + "config": { + "access.token.claim": "true", + "claim.name": "groups", + "full.path": "true", + "id.token.claim": "true", + "userinfo.token.claim": "true" + }, + "consentRequired": "false", + "id": "1d03c557-d97e-40f4-ac35-6cecd74ea70d", + "name": "groups", + "protocol": "wsfed", + "protocolMapper": "oidc-group-membership-mapper" + } + ] + }] + return_value_get_clientscope_by_clientscopeid = [{ + "attributes": {}, + "id": "2286032f-451e-44d5-8be6-e45aac7983a1", + "name": "my-new-kc-clientscope", + "protocolMappers": [ + { + "config": { + "access.token.claim": "true", + "claim.name": "protocol1_updated", + "full.path": "true", + "id.token.claim": "false", + "userinfo.token.claim": "false" + }, + "consentRequired": "false", + "id": "a7f19adb-cc58-41b1-94ce-782dc255139b", + "name": "protocol2", + "protocol": "openid-connect", + "protocolMapper": "oidc-group-membership-mapper" + }, + { + "config": { + "access.token.claim": "true", + "claim.name": "protocol1_updated", + "full.path": "true", + "id.token.claim": "false", + "userinfo.token.claim": "false" + }, + "consentRequired": "false", + "id": "2103a559-185a-40f4-84ae-9ab311d5b812", + "name": "protocol3", + "protocol": "openid-connect", + "protocolMapper": "oidc-group-membership-mapper" + }, + { + "config": { + "access.token.claim": "false", + "claim.name": "protocol1_updated", + "full.path": "false", + "id.token.claim": "false", + "userinfo.token.claim": "false" + }, + "consentRequired": "false", + "id": "bbf6390f-e95f-4c20-882b-9dad328363b9", + "name": "protocol1", + "protocol": "openid-connect", + "protocolMapper": "oidc-group-membership-mapper" + } + ] + }] + + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name, + get_clientscope_by_clientscopeid=return_value_get_clientscope_by_clientscopeid) \ + as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope, + mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name, + mock_update_clientscope_protocolmappers, + mock_create_clientscope_protocolmapper, mock_delete_clientscope): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + # Verify number of call on each mock + self.assertEqual(mock_get_clientscope_by_name.call_count, 1) + self.assertEqual(mock_create_clientscope.call_count, 0) + self.assertEqual(mock_get_clientscope_by_clientscopeid.call_count, 1) + self.assertEqual(mock_update_clientscope.call_count, 1) + self.assertEqual(mock_get_clientscope_protocolmapper_by_name.call_count, 3) + self.assertEqual(mock_update_clientscope_protocolmappers.call_count, 3) + self.assertEqual(mock_create_clientscope_protocolmapper.call_count, 0) + self.assertEqual(mock_delete_clientscope.call_count, 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + +if __name__ == '__main__': + unittest.main() From 11cdb1b661b302aca06edb62b17f0f42a4daa609 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 19 Jul 2021 23:39:57 +0200 Subject: [PATCH 0211/2828] Next expected release is 3.5.0. --- galaxy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/galaxy.yml b/galaxy.yml index 640f4151d3..0f19d8d443 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,6 +1,6 @@ namespace: community name: general -version: 3.4.0 +version: 3.5.0 readme: README.md authors: - Ansible (https://github.com/ansible) From 38e70ae0e4c6086c5470583ee4fc5c7e7684284d Mon Sep 17 00:00:00 2001 From: Max Bidlingmaier Date: Thu, 22 Jul 2021 16:55:09 +0200 Subject: [PATCH 0212/2828] github_repo: support GitHub on premise installations (#3039) * added possibility to manage on prem github * added changelog * fixed module tests * Update changelogs/fragments/3038-enhance_github_repo_api_url.yml Co-authored-by: Felix Fontein * Update plugins/modules/source_control/github/github_repo.py Co-authored-by: Felix Fontein Co-authored-by: Max Bidlingmaier Co-authored-by: Felix Fontein --- .../3038-enhance_github_repo_api_url.yml | 2 ++ .../source_control/github/github_repo.py | 19 +++++++++++++++---- .../source_control/github/test_github_repo.py | 18 ++++++++++++------ 3 files changed, 29 insertions(+), 10 deletions(-) create mode 100644 changelogs/fragments/3038-enhance_github_repo_api_url.yml diff --git a/changelogs/fragments/3038-enhance_github_repo_api_url.yml b/changelogs/fragments/3038-enhance_github_repo_api_url.yml new file mode 100644 index 0000000000..19eda0f66d --- /dev/null +++ b/changelogs/fragments/3038-enhance_github_repo_api_url.yml @@ -0,0 +1,2 @@ +minor_changes: + - github_repo - add new option ``api_url`` to allow working with on premises installations (https://github.com/ansible-collections/community.general/pull/3038). diff --git a/plugins/modules/source_control/github/github_repo.py b/plugins/modules/source_control/github/github_repo.py index 587111fe5a..b5403c6a8d 100644 --- a/plugins/modules/source_control/github/github_repo.py +++ b/plugins/modules/source_control/github/github_repo.py @@ -66,6 +66,12 @@ options: - When I(state) is C(present), the repository will be created in the current user profile. type: str required: false + api_url: + description: + - URL to the GitHub API if not using github.com but you own instance. + type: str + default: 'https://api.github.com' + version_added: "3.5.0" requirements: - PyGithub>=1.54 notes: @@ -119,11 +125,14 @@ except Exception: HAS_GITHUB_PACKAGE = False -def authenticate(username=None, password=None, access_token=None): +def authenticate(username=None, password=None, access_token=None, api_url=None): + if not api_url: + return None + if access_token: - return Github(base_url="https://api.github.com", login_or_token=access_token) + return Github(base_url=api_url, login_or_token=access_token) else: - return Github(base_url="https://api.github.com", login_or_token=username, password=password) + return Github(base_url=api_url, login_or_token=username, password=password) def create_repo(gh, name, organization=None, private=False, description='', check_mode=False): @@ -185,7 +194,8 @@ def delete_repo(gh, name, organization=None, check_mode=False): def run_module(params, check_mode=False): gh = authenticate( - username=params['username'], password=params['password'], access_token=params['access_token']) + username=params['username'], password=params['password'], access_token=params['access_token'], + api_url=params['api_url']) if params['state'] == "absent": return delete_repo( gh=gh, @@ -216,6 +226,7 @@ def main(): organization=dict(type='str', required=False, default=None), private=dict(type='bool', required=False, default=False), description=dict(type='str', required=False, default=''), + api_url=dict(type='str', required=False, default='https://api.github.com'), ) module = AnsibleModule( argument_spec=module_args, diff --git a/tests/unit/plugins/modules/source_control/github/test_github_repo.py b/tests/unit/plugins/modules/source_control/github/test_github_repo.py index 56ec9b7ec7..b3e4f9027f 100644 --- a/tests/unit/plugins/modules/source_control/github/test_github_repo.py +++ b/tests/unit/plugins/modules/source_control/github/test_github_repo.py @@ -159,7 +159,8 @@ class TestGithubRepo(unittest.TestCase): "name": "myrepo", "description": "Just for fun", "private": False, - "state": "present" + "state": "present", + "api_url": "https://api.github.com" }) self.assertEqual(result['changed'], True) @@ -177,7 +178,8 @@ class TestGithubRepo(unittest.TestCase): "name": "myrepo", "description": "Just for fun", "private": True, - "state": "present" + "state": "present", + "api_url": "https://api.github.com" }) self.assertEqual(result['changed'], True) self.assertEqual(result['repo']['private'], True) @@ -194,7 +196,8 @@ class TestGithubRepo(unittest.TestCase): "name": "myrepo", "description": "Just for fun", "private": True, - "state": "present" + "state": "present", + "api_url": "https://api.github.com" }) self.assertEqual(result['changed'], True) self.assertEqual(result['repo']['private'], True) @@ -211,7 +214,8 @@ class TestGithubRepo(unittest.TestCase): "name": "myrepo", "description": "Just for fun", "private": False, - "state": "absent" + "state": "absent", + "api_url": "https://api.github.com" }) self.assertEqual(result['changed'], True) @@ -227,7 +231,8 @@ class TestGithubRepo(unittest.TestCase): "name": "myrepo", "description": "Just for fun", "private": False, - "state": "absent" + "state": "absent", + "api_url": "https://api.github.com" }) self.assertEqual(result['changed'], True) @@ -243,7 +248,8 @@ class TestGithubRepo(unittest.TestCase): "name": "myrepo", "description": "Just for fun", "private": True, - "state": "absent" + "state": "absent", + "api_url": "https://api.github.com" }) self.assertEqual(result['changed'], False) From 32e9a0c25066099887dbd21679d1c83ac85c9f45 Mon Sep 17 00:00:00 2001 From: Jeffrey van Pelt Date: Thu, 22 Jul 2021 22:55:07 +0200 Subject: [PATCH 0213/2828] Proxmox inventory: Added snapshots fact (#3044) * Added snapshots fact * Added changelog * Made linter happy again * Processed feedback * Fix changelog type * Punctuation ;-) * Punctuation ;-), take 2 --- .../3044-proxmox-inventory-snapshots.yml | 2 ++ plugins/inventory/proxmox.py | 15 +++++++++++++-- tests/unit/plugins/inventory/test_proxmox.py | 16 ++++++++++++++++ 3 files changed, 31 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/3044-proxmox-inventory-snapshots.yml diff --git a/changelogs/fragments/3044-proxmox-inventory-snapshots.yml b/changelogs/fragments/3044-proxmox-inventory-snapshots.yml new file mode 100644 index 0000000000..d6a324ea30 --- /dev/null +++ b/changelogs/fragments/3044-proxmox-inventory-snapshots.yml @@ -0,0 +1,2 @@ +minor_changes: + - proxmox inventory plugin - added snapshots to host facts (https://github.com/ansible-collections/community.general/pull/3044). diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py index c99962bcdd..f0f5a4e418 100644 --- a/plugins/inventory/proxmox.py +++ b/plugins/inventory/proxmox.py @@ -325,6 +325,15 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): status_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), status_key.lower())) self.inventory.set_variable(name, status_key, status) + def _get_vm_snapshots(self, node, vmid, vmtype, name): + ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/snapshot" % (self.proxmox_url, node, vmtype, vmid)) + + snapshots_key = 'snapshots' + snapshots_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), snapshots_key.lower())) + + snapshots = [snapshot['name'] for snapshot in ret if snapshot['name'] != 'current'] + self.inventory.set_variable(name, snapshots_key, snapshots) + def to_safe(self, word): '''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups #> ProxmoxInventory.to_safe("foo-bar baz") @@ -393,9 +402,10 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): elif lxc['status'] == 'running': self.inventory.add_child(running_group, lxc['name']) - # get LXC config for facts + # get LXC config and snapshots for facts if self.get_option('want_facts'): self._get_vm_config(node['node'], lxc['vmid'], 'lxc', lxc['name']) + self._get_vm_snapshots(node['node'], lxc['vmid'], 'lxc', lxc['name']) self._apply_constructable(lxc["name"], self.inventory.get_host(lxc['name']).get_vars()) @@ -417,9 +427,10 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): elif qemu['status'] == 'running': self.inventory.add_child(running_group, qemu['name']) - # get QEMU config for facts + # get QEMU config and snapshots for facts if self.get_option('want_facts'): self._get_vm_config(node['node'], qemu['vmid'], 'qemu', qemu['name']) + self._get_vm_snapshots(node['node'], qemu['vmid'], 'qemu', qemu['name']) self._apply_constructable(qemu["name"], self.inventory.get_host(qemu['name']).get_vars()) diff --git a/tests/unit/plugins/inventory/test_proxmox.py b/tests/unit/plugins/inventory/test_proxmox.py index 87d47a3cff..12927551f8 100644 --- a/tests/unit/plugins/inventory/test_proxmox.py +++ b/tests/unit/plugins/inventory/test_proxmox.py @@ -522,6 +522,21 @@ def get_json(url): } +def get_vm_snapshots(node, vmtype, vmid, name): + return [ + {"description": "", + "name": "clean", + "snaptime": 1000, + "vmstate": 0 + }, + {"name": "current", + "digest": "1234689abcdf", + "running": 0, + "description": "You are here!", + "parent": "clean" + }] + + def get_vm_status(node, vmtype, vmid, name): return True @@ -549,6 +564,7 @@ def test_populate(inventory, mocker): inventory._get_auth = mocker.MagicMock(side_effect=get_auth) inventory._get_json = mocker.MagicMock(side_effect=get_json) inventory._get_vm_status = mocker.MagicMock(side_effect=get_vm_status) + inventory._get_vm_snapshots = mocker.MagicMock(side_effect=get_vm_snapshots) inventory.get_option = mocker.MagicMock(side_effect=get_option) inventory._populate() From 35e0a612179b3e96be456bb2b200de8c449c30f5 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Fri, 23 Jul 2021 18:16:14 +1200 Subject: [PATCH 0214/2828] missed composer when created commments in ignore files (#3051) --- tests/sanity/ignore-2.10.txt | 2 +- tests/sanity/ignore-2.11.txt | 2 +- tests/sanity/ignore-2.12.txt | 2 +- tests/sanity/ignore-2.9.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index d01c3762dc..6060d0f2d7 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -18,7 +18,7 @@ plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-state-invalid-choice plugins/modules/notification/grove.py validate-modules:invalid-argument-name # invalid alias - removed in 4.0.0 -plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid +plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/packaging/os/homebrew_cask.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 2106c993d3..7313abf061 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -17,7 +17,7 @@ plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-state-invalid-choice plugins/modules/notification/grove.py validate-modules:invalid-argument-name # invalid alias - removed in 4.0.0 -plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid +plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/packaging/os/homebrew_cask.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt index a30ff2e4ed..2ef7ced11e 100644 --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -17,7 +17,7 @@ plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-state-invalid-choice plugins/modules/notification/grove.py validate-modules:invalid-argument-name # invalid alias - removed in 4.0.0 -plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid +plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/packaging/os/homebrew_cask.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 65611001b1..b2846cc863 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -11,7 +11,7 @@ plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:para plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter -plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid +plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/packaging/os/homebrew_cask.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 From 9631de49396ab0706df4a38c1cbab6a779a125dc Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Fri, 23 Jul 2021 18:17:14 +1200 Subject: [PATCH 0215/2828] fixed doc in xfconf (#3050) --- plugins/modules/system/xfconf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/modules/system/xfconf.py b/plugins/modules/system/xfconf.py index dc560e7775..e8aed0a759 100644 --- a/plugins/modules/system/xfconf.py +++ b/plugins/modules/system/xfconf.py @@ -14,7 +14,7 @@ author: - "Alexei Znamensky (@russoz)" short_description: Edit XFCE4 Configurations description: - - This module allows for the manipulation of Xfce 4 Configuration via + - This module allows for the manipulation of Xfce 4 Configuration with the help of xfconf-query. Please see the xfconf-query(1) man pages for more details. options: channel: From 99c28313e4fdfb4665cbaf2c6d35396c7fa1394d Mon Sep 17 00:00:00 2001 From: The Right Honourable Reverend Date: Sat, 24 Jul 2021 13:40:08 -0500 Subject: [PATCH 0216/2828] proxmox inventory plugin: Easy fix (#3052) * Don't know why this works but it does. Plugin was crashing on this line on Python 3.9.2 deployed on qemu image with debian bullseye. It doesn't crash anymore. * Create 3052_proxmox_inventory_plugin.yml * Update changelogs/fragments/3052_proxmox_inventory_plugin.yml Co-authored-by: Ajpantuso Co-authored-by: Ajpantuso --- changelogs/fragments/3052_proxmox_inventory_plugin.yml | 2 ++ plugins/inventory/proxmox.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/3052_proxmox_inventory_plugin.yml diff --git a/changelogs/fragments/3052_proxmox_inventory_plugin.yml b/changelogs/fragments/3052_proxmox_inventory_plugin.yml new file mode 100644 index 0000000000..dfd4dddea9 --- /dev/null +++ b/changelogs/fragments/3052_proxmox_inventory_plugin.yml @@ -0,0 +1,2 @@ +bugfixes: + - proxmox inventory plugin - fixed plugin failure when a ``qemu`` guest has no ``template`` key (https://github.com/ansible-collections/community.general/pull/3052). diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py index f0f5a4e418..33a564f333 100644 --- a/plugins/inventory/proxmox.py +++ b/plugins/inventory/proxmox.py @@ -413,7 +413,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): node_qemu_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), ('%s_qemu' % node['node']).lower())) self.inventory.add_group(node_qemu_group) for qemu in self._get_qemu_per_node(node['node']): - if qemu['template']: + if qemu.get('template'): continue self.inventory.add_host(qemu['name']) From d057b2e3b262af437b0115622069a60580919173 Mon Sep 17 00:00:00 2001 From: Max Bidlingmaier Date: Sat, 24 Jul 2021 21:13:09 +0200 Subject: [PATCH 0217/2828] gitlab_group_members/gitlab_project_members - fix pagination issue (#3054) * Fix * fixed linter stuff * typo in section name of changlog fragment Co-authored-by: Max Bidlingmaier --- .../3041-fix_gitlab_group_members_gitlab_project_mambers.yml | 3 +++ plugins/modules/source_control/gitlab/gitlab_group_members.py | 2 +- .../modules/source_control/gitlab/gitlab_project_members.py | 2 +- 3 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/3041-fix_gitlab_group_members_gitlab_project_mambers.yml diff --git a/changelogs/fragments/3041-fix_gitlab_group_members_gitlab_project_mambers.yml b/changelogs/fragments/3041-fix_gitlab_group_members_gitlab_project_mambers.yml new file mode 100644 index 0000000000..d1be8b78d3 --- /dev/null +++ b/changelogs/fragments/3041-fix_gitlab_group_members_gitlab_project_mambers.yml @@ -0,0 +1,3 @@ +bugfixes: + - gitlab_group_members - fixes issue when gitlab group has more then 20 members, pagination problem (https://github.com/ansible-collections/community.general/issues/3041). + - gitlab_project_members - fixes issue when gitlab group has more then 20 members, pagination problem (https://github.com/ansible-collections/community.general/issues/3041). diff --git a/plugins/modules/source_control/gitlab/gitlab_group_members.py b/plugins/modules/source_control/gitlab/gitlab_group_members.py index 8a3da2a41b..50779e6445 100644 --- a/plugins/modules/source_control/gitlab/gitlab_group_members.py +++ b/plugins/modules/source_control/gitlab/gitlab_group_members.py @@ -109,7 +109,7 @@ class GitLabGroup(object): # get all members in a group def get_members_in_a_group(self, gitlab_group_id): group = self._gitlab.groups.get(gitlab_group_id) - return group.members.list() + return group.members.list(all=True) # check if the user is a member of the group def is_user_a_member(self, members, gitlab_user_id): diff --git a/plugins/modules/source_control/gitlab/gitlab_project_members.py b/plugins/modules/source_control/gitlab/gitlab_project_members.py index 8e23dca426..0ae8f4b25c 100644 --- a/plugins/modules/source_control/gitlab/gitlab_project_members.py +++ b/plugins/modules/source_control/gitlab/gitlab_project_members.py @@ -130,7 +130,7 @@ class GitLabProjectMembers(object): # get all members in a project def get_members_in_a_project(self, gitlab_project_id): project = self._gitlab.projects.get(gitlab_project_id) - return project.members.list() + return project.members.list(all=True) # check if the user is a member of the project def is_user_a_member(self, members, gitlab_user_id): From 31189e96458f199d1cd0a1c384057a2a85a5ff8d Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Sat, 24 Jul 2021 16:10:56 -0400 Subject: [PATCH 0218/2828] archive - fixing determination of archive root when root is '/' (#3036) * Initial commit * Fixing units and path joins * Ensuring paths are consistently ordered * Adding changelog fragment * Using os.path.join to ensure trailing slashes are present * optimizing use of root in add_targets * Applying initial review suggestions --- .../fragments/3036-archive-root-path-fix.yml | 4 + plugins/modules/files/archive.py | 95 +++++++++---------- .../plugins/modules/files/test_archive.py | 73 ++++++++++++++ 3 files changed, 122 insertions(+), 50 deletions(-) create mode 100644 changelogs/fragments/3036-archive-root-path-fix.yml create mode 100644 tests/unit/plugins/modules/files/test_archive.py diff --git a/changelogs/fragments/3036-archive-root-path-fix.yml b/changelogs/fragments/3036-archive-root-path-fix.yml new file mode 100644 index 0000000000..fa460f82b9 --- /dev/null +++ b/changelogs/fragments/3036-archive-root-path-fix.yml @@ -0,0 +1,4 @@ +--- +bugfixes: + - archive - fixing archive root determination when longest common root is ``/`` + (https://github.com/ansible-collections/community.general/pull/3036). diff --git a/plugins/modules/files/archive.py b/plugins/modules/files/archive.py index 91a8f688f5..30c4de5aa8 100644 --- a/plugins/modules/files/archive.py +++ b/plugins/modules/files/archive.py @@ -204,7 +204,6 @@ else: LZMA_IMP_ERR = format_exc() HAS_LZMA = False -PATH_SEP = to_bytes(os.sep) PY27 = version_info[0:2] >= (2, 7) STATE_ABSENT = 'absent' @@ -213,16 +212,12 @@ STATE_COMPRESSED = 'compress' STATE_INCOMPLETE = 'incomplete' -def _to_bytes(s): - return to_bytes(s, errors='surrogate_or_strict') +def common_path(paths): + empty = b'' if paths and isinstance(paths[0], six.binary_type) else '' - -def _to_native(s): - return to_native(s, errors='surrogate_or_strict') - - -def _to_native_ascii(s): - return to_native(s, errors='surrogate_or_strict', encoding='ascii') + return os.path.join( + os.path.dirname(os.path.commonprefix([os.path.join(os.path.dirname(p), empty) for p in paths])), empty + ) def expand_paths(paths): @@ -239,10 +234,6 @@ def expand_paths(paths): return expanded_path, is_globby -def is_archive(path): - return re.search(br'\.(tar|tar\.(gz|bz2|xz)|tgz|tbz2|zip)$', os.path.basename(path), re.IGNORECASE) - - def legacy_filter(path, exclusion_patterns): return matches_exclusion_patterns(path, exclusion_patterns) @@ -251,6 +242,26 @@ def matches_exclusion_patterns(path, exclusion_patterns): return any(fnmatch(path, p) for p in exclusion_patterns) +def is_archive(path): + return re.search(br'\.(tar|tar\.(gz|bz2|xz)|tgz|tbz2|zip)$', os.path.basename(path), re.IGNORECASE) + + +def strip_prefix(prefix, string): + return string[len(prefix):] if string.startswith(prefix) else string + + +def _to_bytes(s): + return to_bytes(s, errors='surrogate_or_strict') + + +def _to_native(s): + return to_native(s, errors='surrogate_or_strict') + + +def _to_native_ascii(s): + return to_native(s, errors='surrogate_or_strict', encoding='ascii') + + @six.add_metaclass(abc.ABCMeta) class Archive(object): def __init__(self, module): @@ -266,7 +277,6 @@ class Archive(object): self.destination_state = STATE_ABSENT self.errors = [] self.file = None - self.root = b'' self.successes = [] self.targets = [] self.not_found = [] @@ -275,7 +285,7 @@ class Archive(object): self.expanded_paths, has_globs = expand_paths(paths) self.expanded_exclude_paths = expand_paths(module.params['exclude_path'])[0] - self.paths = list(set(self.expanded_paths) - set(self.expanded_exclude_paths)) + self.paths = sorted(set(self.expanded_paths) - set(self.expanded_exclude_paths)) if not self.paths: module.fail_json( @@ -285,6 +295,8 @@ class Archive(object): msg='Error, no source paths were found' ) + self.root = common_path(self.paths) + if not self.must_archive: self.must_archive = any([has_globs, os.path.isdir(self.paths[0]), len(self.paths) > 1]) @@ -298,6 +310,9 @@ class Archive(object): msg='Error, must specify "dest" when archiving multiple files or trees' ) + if self.remove: + self._check_removal_safety() + self.original_size = self.destination_size() def add(self, path, archive_name): @@ -310,9 +325,8 @@ class Archive(object): def add_single_target(self, path): if self.format in ('zip', 'tar'): - archive_name = re.sub(br'^%s' % re.escape(self.root), b'', path) self.open() - self.add(path, archive_name) + self.add(path, strip_prefix(self.root, path)) self.close() self.destination_state = STATE_ARCHIVED else: @@ -333,25 +347,18 @@ class Archive(object): def add_targets(self): self.open() try: - match_root = re.compile(br'^%s' % re.escape(self.root)) for target in self.targets: if os.path.isdir(target): for directory_path, directory_names, file_names in os.walk(target, topdown=True): - if not directory_path.endswith(PATH_SEP): - directory_path += PATH_SEP - for directory_name in directory_names: - full_path = directory_path + directory_name - archive_name = match_root.sub(b'', full_path) - self.add(full_path, archive_name) + full_path = os.path.join(directory_path, directory_name) + self.add(full_path, strip_prefix(self.root, full_path)) for file_name in file_names: - full_path = directory_path + file_name - archive_name = match_root.sub(b'', full_path) - self.add(full_path, archive_name) + full_path = os.path.join(directory_path, file_name) + self.add(full_path, strip_prefix(self.root, full_path)) else: - archive_name = match_root.sub(b'', target) - self.add(target, archive_name) + self.add(target, strip_prefix(self.root, target)) except Exception as e: if self.format in ('zip', 'tar'): archive_format = self.format @@ -384,26 +391,6 @@ class Archive(object): def find_targets(self): for path in self.paths: - # Use the longest common directory name among all the files as the archive root path - if self.root == b'': - self.root = os.path.dirname(path) + PATH_SEP - else: - for i in range(len(self.root)): - if path[i] != self.root[i]: - break - - if i < len(self.root): - self.root = os.path.dirname(self.root[0:i + 1]) - - self.root += PATH_SEP - # Don't allow archives to be created anywhere within paths to be removed - if self.remove and os.path.isdir(path): - prefix = path if path.endswith(PATH_SEP) else path + PATH_SEP - if self.destination.startswith(prefix): - self.module.fail_json( - path=', '.join(self.paths), - msg='Error, created archive can not be contained in source paths when remove=true' - ) if not os.path.lexists(path): self.not_found.append(path) else: @@ -470,6 +457,14 @@ class Archive(object): 'expanded_exclude_paths': [_to_native(p) for p in self.expanded_exclude_paths], } + def _check_removal_safety(self): + for path in self.paths: + if os.path.isdir(path) and self.destination.startswith(os.path.join(path, b'')): + self.module.fail_json( + path=b', '.join(self.paths), + msg='Error, created archive can not be contained in source paths when remove=true' + ) + def _open_compressed_file(self, path, mode): f = None if self.format == 'gz': diff --git a/tests/unit/plugins/modules/files/test_archive.py b/tests/unit/plugins/modules/files/test_archive.py new file mode 100644 index 0000000000..9fae51e7b7 --- /dev/null +++ b/tests/unit/plugins/modules/files/test_archive.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.tests.unit.compat.mock import Mock, patch +from ansible_collections.community.general.tests.unit.plugins.modules.utils import ModuleTestCase, set_module_args +from ansible_collections.community.general.plugins.modules.files.archive import get_archive, common_path + + +class TestArchive(ModuleTestCase): + def setUp(self): + super(TestArchive, self).setUp() + + self.mock_os_path_isdir = patch('os.path.isdir') + self.os_path_isdir = self.mock_os_path_isdir.start() + + def tearDown(self): + self.os_path_isdir = self.mock_os_path_isdir.stop() + + def test_archive_removal_safety(self): + set_module_args( + dict( + path=['/foo', '/bar', '/baz'], + dest='/foo/destination.tgz', + remove=True + ) + ) + + module = AnsibleModule( + argument_spec=dict( + path=dict(type='list', elements='path', required=True), + format=dict(type='str', default='gz', choices=['bz2', 'gz', 'tar', 'xz', 'zip']), + dest=dict(type='path'), + exclude_path=dict(type='list', elements='path', default=[]), + exclusion_patterns=dict(type='list', elements='path'), + force_archive=dict(type='bool', default=False), + remove=dict(type='bool', default=False), + ), + add_file_common_args=True, + supports_check_mode=True, + ) + + self.os_path_isdir.side_effect = [True, False, False, True] + + module.fail_json = Mock() + + archive = get_archive(module) + + module.fail_json.assert_called_once_with( + path=b', '.join(archive.paths), + msg='Error, created archive can not be contained in source paths when remove=true' + ) + + +PATHS = ( + ([], ''), + (['/'], '/'), + ([b'/'], b'/'), + (['/foo', '/bar', '/baz', '/foobar', '/barbaz', '/foo/bar'], '/'), + ([b'/foo', b'/bar', b'/baz', b'/foobar', b'/barbaz', b'/foo/bar'], b'/'), + (['/foo/bar/baz', '/foo/bar'], '/foo/'), + (['/foo/bar/baz', '/foo/bar/'], '/foo/bar/'), +) + + +@pytest.mark.parametrize("paths,root", PATHS) +def test_common_path(paths, root): + assert common_path(paths) == root From 20f46f76697d96fa2752d46e240f477069abd35c Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 25 Jul 2021 08:30:46 +1200 Subject: [PATCH 0219/2828] xfconf_info - new module (#3045) * xfconf_info initial commit * Update plugins/modules/system/xfconf_info.py Co-authored-by: Felix Fontein * Update plugins/modules/system/xfconf_info.py Co-authored-by: Felix Fontein * Update plugins/modules/system/xfconf_info.py Co-authored-by: Felix Fontein * added register to all examples * Update plugins/modules/system/xfconf_info.py Co-authored-by: Felix Fontein --- plugins/modules/system/xfconf_info.py | 190 ++++++++++++++++++ plugins/modules/xfconf_info.py | 1 + .../modules/system/test_xfconf_info.py | 171 ++++++++++++++++ 3 files changed, 362 insertions(+) create mode 100644 plugins/modules/system/xfconf_info.py create mode 120000 plugins/modules/xfconf_info.py create mode 100644 tests/unit/plugins/modules/system/test_xfconf_info.py diff --git a/plugins/modules/system/xfconf_info.py b/plugins/modules/system/xfconf_info.py new file mode 100644 index 0000000000..9cef821071 --- /dev/null +++ b/plugins/modules/system/xfconf_info.py @@ -0,0 +1,190 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# (c) 2021, Alexei Znamensky +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: xfconf_info +author: + - "Alexei Znamensky (@russoz)" +short_description: Retrieve XFCE4 configurations +version_added: 3.5.0 +description: + - This module allows retrieving Xfce 4 configurations with the help of C(xfconf-query). +options: + channel: + description: + - > + A Xfconf preference channel is a top-level tree key, inside of the + Xfconf repository that corresponds to the location for which all + application properties/keys are stored. + - If not provided, the module will list all available channels. + type: str + property: + description: + - > + A Xfce preference key is an element in the Xfconf repository + that corresponds to an application preference. + - If provided, then I(channel) is required. + - If not provided and a I(channel) is provided, then the module will list all available properties in that I(channel). + type: str +notes: + - See man xfconf-query(1) for more details. +''' + +EXAMPLES = """ +- name: Get list of all available channels + community.general.xfconf_info: {} + register: result + +- name: Get list of all properties in a specific channel + community.general.xfconf_info: + channel: xsettings + register: result + +- name: Retrieve the DPI value + community.general.xfconf_info: + channel: xsettings + property: /Xft/DPI + register: result + +- name: Get workspace names (4) + community.general.xfconf_info: + channel: xfwm4 + property: /general/workspace_names + register: result +""" + +RETURN = ''' + channels: + description: + - List of available channels. + - Returned when the module receives no parameter at all. + returned: success + type: list + elements: str + sample: + - xfce4-desktop + - displays + - xsettings + - xfwm4 + properties: + description: + - List of available properties for a specific channel. + - Returned by passed only the I(channel) parameter to the module. + returned: success + type: list + elements: str + sample: + - /Gdk/WindowScalingFactor + - /Gtk/ButtonImages + - /Gtk/CursorThemeSize + - /Gtk/DecorationLayout + - /Gtk/FontName + - /Gtk/MenuImages + - /Gtk/MonospaceFontName + - /Net/DoubleClickTime + - /Net/IconThemeName + - /Net/ThemeName + - /Xft/Antialias + - /Xft/Hinting + - /Xft/HintStyle + - /Xft/RGBA + is_array: + description: + - Flag indicating whether the property is an array or not. + returned: success + type: bool + value: + description: + - The value of the property. Empty if the property is of array type. + returned: success + type: str + sample: Monospace 10 + value_array: + description: + - The array value of the property. Empty if the property is not of array type. + returned: success + type: list + elements: str + sample: + - Main + - Work + - Tmp +''' + +from ansible_collections.community.general.plugins.module_utils.module_helper import CmdModuleHelper, ArgFormat + + +class XFConfException(Exception): + pass + + +class XFConfInfo(CmdModuleHelper): + module = dict( + argument_spec=dict( + channel=dict(type='str'), + property=dict(type='str'), + ), + required_by=dict( + property=['channel'] + ), + ) + + command = 'xfconf-query' + command_args_formats = dict( + channel=dict(fmt=['--channel', '{0}']), + property=dict(fmt=['--property', '{0}']), + _list_arg=dict(fmt="--list", style=ArgFormat.BOOLEAN), + ) + check_rc = True + + def __init_module__(self): + self.vars.set("_list_arg", False, output=False) + self.vars.set("is_array", False) + + def process_command_output(self, rc, out, err): + result = out.rstrip() + if "Value is an array with" in result: + result = result.split("\n") + result.pop(0) + result.pop(0) + self.vars.is_array = True + + return result + + def _process_list_properties(self, rc, out, err): + return out.splitlines() + + def _process_list_channels(self, rc, out, err): + lines = out.splitlines() + lines.pop(0) + lines = [s.lstrip() for s in lines] + return lines + + def __run__(self): + self.vars._list_arg = not (bool(self.vars.channel) and bool(self.vars.property)) + output = 'value' + proc = self.process_command_output + if self.vars.channel is None: + output = 'channels' + proc = self._process_list_channels + elif self.vars.property is None: + output = 'properties' + proc = self._process_list_properties + result = self.run_command(params=('_list_arg', 'channel', 'property'), process_output=proc) + if not self.vars._list_arg and self.vars.is_array: + output = "value_array" + self.vars.set(output, result) + + +def main(): + xfconf = XFConfInfo() + xfconf.run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/xfconf_info.py b/plugins/modules/xfconf_info.py new file mode 120000 index 0000000000..5bf95b50b5 --- /dev/null +++ b/plugins/modules/xfconf_info.py @@ -0,0 +1 @@ +system/xfconf_info.py \ No newline at end of file diff --git a/tests/unit/plugins/modules/system/test_xfconf_info.py b/tests/unit/plugins/modules/system/test_xfconf_info.py new file mode 100644 index 0000000000..528622d0ee --- /dev/null +++ b/tests/unit/plugins/modules/system/test_xfconf_info.py @@ -0,0 +1,171 @@ +# Author: Alexei Znamensky (russoz@gmail.com) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + +from ansible_collections.community.general.plugins.modules.system import xfconf_info + +import pytest + +TESTED_MODULE = xfconf_info.__name__ + + +@pytest.fixture +def patch_xfconf_info(mocker): + """ + Function used for mocking some parts of redhat_subscribtion module + """ + mocker.patch('ansible_collections.community.general.plugins.module_utils.mh.module_helper.AnsibleModule.get_bin_path', + return_value='/testbin/xfconf-query') + + +TEST_CASES = [ + [ + {'channel': 'xfwm4', 'property': '/general/inactive_opacity'}, + { + 'id': 'test_simple_property_get', + 'run_command.calls': [ + ( + # Calling of following command will be asserted + ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/inactive_opacity'], + # Was return code checked? + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, + # Mock of returned code, stdout and stderr + (0, '100\n', '',), + ), + ], + 'is_array': False, + 'value': '100', + } + ], + [ + {'channel': 'xfwm4', 'property': '/general/i_dont_exist'}, + { + 'id': 'test_simple_property_get_nonexistent', + 'run_command.calls': [ + ( + # Calling of following command will be asserted + ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/i_dont_exist'], + # Was return code checked? + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, + # Mock of returned code, stdout and stderr + (1, '', 'Property "/general/i_dont_exist" does not exist on channel "xfwm4".\n',), + ), + ], + 'is_array': False, + } + ], + [ + {'property': '/general/i_dont_exist'}, + { + 'id': 'test_property_no_channel', + 'run_command.calls': [], + } + ], + [ + {'channel': 'xfwm4', 'property': '/general/workspace_names'}, + { + 'id': 'test_property_get_array', + 'run_command.calls': [ + ( + # Calling of following command will be asserted + ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/workspace_names'], + # Was return code checked? + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, + # Mock of returned code, stdout and stderr + (0, 'Value is an array with 3 items:\n\nMain\nWork\nTmp\n', '',), + ), + ], + 'is_array': True, + 'value_array': ['Main', 'Work', 'Tmp'], + }, + ], + [ + {}, + { + 'id': 'get_channels', + 'run_command.calls': [ + ( + # Calling of following command will be asserted + ['/testbin/xfconf-query', '--list'], + # Was return code checked? + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, + # Mock of returned code, stdout and stderr + (0, 'Channels:\n a\n b\n c\n', '',), + ), + ], + 'is_array': False, + 'channels': ['a', 'b', 'c'], + }, + ], + [ + {'channel': 'xfwm4'}, + { + 'id': 'get_properties', + 'run_command.calls': [ + ( + # Calling of following command will be asserted + ['/testbin/xfconf-query', '--list', '--channel', 'xfwm4'], + # Was return code checked? + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, + # Mock of returned code, stdout and stderr + (0, '/general/wrap_cycle\n/general/wrap_layout\n/general/wrap_resistance\n/general/wrap_windows\n' + '/general/wrap_workspaces\n/general/zoom_desktop\n', '',), + ), + ], + 'is_array': False, + 'properties': [ + '/general/wrap_cycle', + '/general/wrap_layout', + '/general/wrap_resistance', + '/general/wrap_windows', + '/general/wrap_workspaces', + '/general/zoom_desktop', + ], + }, + ], +] +TEST_CASES_IDS = [item[1]['id'] for item in TEST_CASES] + + +@pytest.mark.parametrize('patch_ansible_module, testcase', + TEST_CASES, + ids=TEST_CASES_IDS, + indirect=['patch_ansible_module']) +@pytest.mark.usefixtures('patch_ansible_module') +def test_xfconf_info(mocker, capfd, patch_xfconf_info, testcase): + """ + Run unit tests for test cases listen in TEST_CASES + """ + + # Mock function used for running commands first + call_results = [item[2] for item in testcase['run_command.calls']] + mock_run_command = mocker.patch( + 'ansible_collections.community.general.plugins.module_utils.mh.module_helper.AnsibleModule.run_command', + side_effect=call_results) + + # Try to run test case + with pytest.raises(SystemExit): + xfconf_info.main() + + out, err = capfd.readouterr() + results = json.loads(out) + print("testcase =\n%s" % testcase) + print("results =\n%s" % results) + + for conditional_test_result in ('value_array', 'value', 'is_array', 'properties', 'channels'): + if conditional_test_result in testcase: + assert conditional_test_result in results, "'{0}' not found in {1}".format(conditional_test_result, results) + assert results[conditional_test_result] == testcase[conditional_test_result], \ + "'{0}': '{1}' != '{2}'".format(conditional_test_result, results[conditional_test_result], testcase[conditional_test_result]) + + assert mock_run_command.call_count == len(testcase['run_command.calls']) + if mock_run_command.call_count: + call_args_list = [(item[0][0], item[1]) for item in mock_run_command.call_args_list] + expected_call_args_list = [(item[0], item[1]) for item in testcase['run_command.calls']] + print("call args list =\n%s" % call_args_list) + print("expected args list =\n%s" % expected_call_args_list) + assert call_args_list == expected_call_args_list From dc3e16113d09e32fb750632b79ef224fccc8feaf Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 25 Jul 2021 10:00:10 +0200 Subject: [PATCH 0220/2828] Add BOTMETA extra sanity test (#3064) * Add BOTMETA sanity test. * Make compile with Python 2.6. --- .github/BOTMETA.yml | 7 +- tests/sanity/extra/botmeta.json | 11 ++ tests/sanity/extra/botmeta.py | 184 ++++++++++++++++++++++++++++++++ 3 files changed, 199 insertions(+), 3 deletions(-) create mode 100644 tests/sanity/extra/botmeta.json create mode 100755 tests/sanity/extra/botmeta.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 8df7297720..55f34d3041 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -159,11 +159,13 @@ files: $module_utils/redfish_utils.py: maintainers: $team_redfish labels: redfish_utils - $module_utils/remote_management/lxca/common.py: navalkp prabhosa + $module_utils/remote_management/lxca/common.py: + maintainers: navalkp prabhosa $module_utils/scaleway.py: maintainers: $team_scaleway labels: cloud scaleway - $module_utils/storage/hpe3par/hpe3par.py: farhan7500 gautamphegde + $module_utils/storage/hpe3par/hpe3par.py: + maintainers: farhan7500 gautamphegde $module_utils/utm_utils.py: maintainers: $team_e_spirit labels: utm_utils @@ -508,7 +510,6 @@ files: $modules/notification/osx_say.py: maintainers: ansible mpdehaan labels: _osx_say - deprecated: true $modules/notification/bearychat.py: maintainers: tonyseek $modules/notification/campfire.py: diff --git a/tests/sanity/extra/botmeta.json b/tests/sanity/extra/botmeta.json new file mode 100644 index 0000000000..cba49c90cd --- /dev/null +++ b/tests/sanity/extra/botmeta.json @@ -0,0 +1,11 @@ +{ + "include_symlinks": false, + "prefixes": [ + ".github/BOTMETA.yml" + ], + "output": "path-line-column-message", + "requirements": [ + "PyYAML", + "voluptuous==0.12.1" + ] +} diff --git a/tests/sanity/extra/botmeta.py b/tests/sanity/extra/botmeta.py new file mode 100755 index 0000000000..e8ea819394 --- /dev/null +++ b/tests/sanity/extra/botmeta.py @@ -0,0 +1,184 @@ +#!/usr/bin/env python +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +"""Check BOTMETA file.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import ast +import os +import re +import sys + +import yaml + +from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA +from voluptuous import Required, Schema, Invalid +from voluptuous.humanize import humanize_error + + +REPORT_MISSING_MAINTAINERS = False + +FILENAME = '.github/BOTMETA.yml' + +LIST_ENTRIES = frozenset(('supershipit', 'maintainers', 'labels', 'keywords', 'notify', 'ignore')) + +AUTHOR_REGEX = re.compile(r'^\w.*\(@([\w-]+)\)(?![\w.])$') + + +def read_authors(filename): + data = {} + try: + with open(filename, 'rb') as b_module_data: + M = ast.parse(b_module_data.read()) + + for child in M.body: + if isinstance(child, ast.Assign): + for t in child.targets: + try: + theid = t.id + except AttributeError: + # skip errors can happen when trying to use the normal code + continue + + if theid == 'DOCUMENTATION': + if isinstance(child.value, ast.Dict): + data = ast.literal_eval(child.value) + else: + data = yaml.safe_load(child.value.s) + + except Exception as e: + print('%s:%d:%d: Cannot load DOCUMENTATION: %s' % (filename, 0, 0, e)) + return [] + + author = data.get('author') or [] + if isinstance(author, str): + author = [author] + return author + + +def validate(filename, filedata): + if filename.startswith('plugins/doc_fragments/'): + return + # Compile lis tof all active and inactive maintainers + all_maintainers = filedata['maintainers'] + filedata['ignore'] + if not all_maintainers: + if REPORT_MISSING_MAINTAINERS: + print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'No (active or inactive) maintainer mentioned for %s' % filename)) + return + if filename.startswith('plugins/filter/'): + return + maintainers = read_authors(filename) + for maintainer in maintainers: + m = AUTHOR_REGEX.match(maintainer) + if m: + maintainer = m.group(1) + if maintainer not in all_maintainers: + msg = 'Author %s not mentioned as active or inactive maintainer for %s (mentioned are: %s)' % ( + maintainer, filename, ', '.join(all_maintainers)) + if REPORT_MISSING_MAINTAINERS: + print('%s:%d:%d: %s' % (FILENAME, 0, 0, msg)) + + +def main(): + """Main entry point.""" + paths = sys.argv[1:] or sys.stdin.read().splitlines() + paths = [path for path in paths if path.endswith('/aliases')] + + try: + with open(FILENAME, 'rb') as f: + botmeta = yaml.safe_load(f) + except yaml.error.MarkedYAMLError as ex: + print('%s:%d:%d: YAML load failed: %s' % (FILENAME, ex.context_mark.line + + 1, ex.context_mark.column + 1, re.sub(r'\s+', ' ', str(ex)))) + return + except Exception as ex: # pylint: disable=broad-except + print('%s:%d:%d: YAML load failed: %s' % + (FILENAME, 0, 0, re.sub(r'\s+', ' ', str(ex)))) + return + + # Validate schema + + MacroSchema = Schema({ + (str): str, + }, extra=PREVENT_EXTRA) + + FilesSchema = Schema({ + (str): { + ('supershipit'): str, + ('support'): Any('community'), + ('maintainers'): str, + ('labels'): str, + ('keywords'): str, + ('notify'): str, + ('ignore'): str, + }, + }, extra=PREVENT_EXTRA) + + schema = Schema({ + ('automerge'): bool, + ('macros'): MacroSchema, + ('files'): FilesSchema, + }, extra=PREVENT_EXTRA) + + try: + schema(botmeta) + except MultipleInvalid as ex: + for error in ex.errors: + # No way to get line/column numbers + print('%s:%d:%d: %s' % (FILENAME, 0, 0, humanize_error(botmeta, error))) + return + + # Preprocess (substitute macros, convert to lists) + macros = botmeta.get('macros') or {} + macro_re = re.compile(r'\$([a-zA-Z_]+)') + + def convert_macros(text, macros): + def f(m): + return macros[m.group(1)] + + return macro_re.sub(f, text) + + files = {} + try: + for file, filedata in (botmeta.get('files') or {}).items(): + file = convert_macros(file, macros) + filedata = dict((k, convert_macros(v, macros)) for k, v in filedata.items()) + files[file] = filedata + for k, v in filedata.items(): + if k in LIST_ENTRIES: + filedata[k] = v.split() + except KeyError as e: + print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'Found unknown macro %s' % e)) + return + + # Scan all files + for dirpath, dirnames, filenames in os.walk('plugins/'): + for file in filenames: + if file.endswith('.pyc'): + continue + filename = os.path.join(dirpath, file) + if os.path.islink(filename): + continue + if os.path.isfile(filename): + matching_files = [] + for file, filedata in files.items(): + if filename.startswith(file): + matching_files.append((file, filedata)) + if not matching_files: + print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'Did not find any entry for %s' % filename)) + + matching_files.sort(key=lambda kv: kv[0]) + filedata = dict() + for k in LIST_ENTRIES: + filedata[k] = [] + for dummy, data in matching_files: + for k, v in data.items(): + if k in LIST_ENTRIES: + v = filedata[k] + v + filedata[k] = v + validate(filename, filedata) + + +if __name__ == '__main__': + main() From d54d2fa4a6ae305b452246b6255dd0d1aaf6bb12 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 25 Jul 2021 21:14:30 +1200 Subject: [PATCH 0221/2828] xfconf - deprecate get state in favour of the xfconf_info module (#3049) * Deprecate get state in favour of the xfconf_info module * added changelog fragment * added comments in ignore files * Update changelogs/fragments/3049-xfconf-deprecate-get.yaml bummer, forgot that Co-authored-by: Felix Fontein * Update plugins/modules/system/xfconf.py Co-authored-by: Felix Fontein * Update plugins/modules/system/xfconf.py Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- changelogs/fragments/3049-xfconf-deprecate-get.yaml | 2 ++ plugins/modules/system/xfconf.py | 5 +++++ tests/sanity/ignore-2.10.txt | 2 +- tests/sanity/ignore-2.11.txt | 2 +- tests/sanity/ignore-2.12.txt | 2 +- 5 files changed, 10 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/3049-xfconf-deprecate-get.yaml diff --git a/changelogs/fragments/3049-xfconf-deprecate-get.yaml b/changelogs/fragments/3049-xfconf-deprecate-get.yaml new file mode 100644 index 0000000000..359b39301e --- /dev/null +++ b/changelogs/fragments/3049-xfconf-deprecate-get.yaml @@ -0,0 +1,2 @@ +deprecated_features: + - xfconf - deprecate the ``get`` state. The new module ``xfconf_info`` should be used instead (https://github.com/ansible-collections/community.general/pull/3049). diff --git a/plugins/modules/system/xfconf.py b/plugins/modules/system/xfconf.py index e8aed0a759..001613fc23 100644 --- a/plugins/modules/system/xfconf.py +++ b/plugins/modules/system/xfconf.py @@ -48,6 +48,7 @@ options: type: str description: - The action to take upon the property/value. + - State C(get) is deprecated and will be removed in community.general 5.0.0. Please use the module M(community.general.xfconf_info) instead. choices: [ get, present, absent ] default: "present" force_array: @@ -225,6 +226,10 @@ class XFConfProperty(CmdMixin, StateMixin, ModuleHelper): def state_get(self): self.vars.value = self.vars.previous_value self.vars.previous_value = None + self.module.deprecate( + msg="State 'get' is deprecated. Please use the module community.general.xfconf_info instead", + version="5.0.0", collection_name="community.general" + ) def state_absent(self): if not self.module.check_mode: diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 6060d0f2d7..9e6c483071 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -47,7 +47,7 @@ plugins/modules/system/puppet.py validate-modules:doc-default-does-not-match-spe plugins/modules/system/puppet.py validate-modules:parameter-type-not-in-doc plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc # param removed in 4.0.0 plugins/modules/system/ssh_config.py use-argspec-type-path # Required since module uses other methods to specify path -plugins/modules/system/xfconf.py validate-modules:parameter-state-invalid-choice +plugins/modules/system/xfconf.py validate-modules:parameter-state-invalid-choice # state get removed in 5.0.0 plugins/modules/system/xfconf.py validate-modules:return-syntax-error plugins/modules/web_infrastructure/jenkins_plugin.py use-argspec-type-path tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.6 # django generated code diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 7313abf061..79a90853f2 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -46,7 +46,7 @@ plugins/modules/system/puppet.py validate-modules:doc-default-does-not-match-spe plugins/modules/system/puppet.py validate-modules:parameter-type-not-in-doc plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc # param removed in 4.0.0 plugins/modules/system/ssh_config.py use-argspec-type-path # Required since module uses other methods to specify path -plugins/modules/system/xfconf.py validate-modules:parameter-state-invalid-choice +plugins/modules/system/xfconf.py validate-modules:parameter-state-invalid-choice # state get removed in 5.0.0 plugins/modules/system/xfconf.py validate-modules:return-syntax-error plugins/modules/web_infrastructure/jenkins_plugin.py use-argspec-type-path tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.6 # django generated code diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt index 2ef7ced11e..4d1d5a783c 100644 --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -46,6 +46,6 @@ plugins/modules/system/puppet.py validate-modules:doc-default-does-not-match-spe plugins/modules/system/puppet.py validate-modules:parameter-type-not-in-doc plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc # param removed in 4.0.0 plugins/modules/system/ssh_config.py use-argspec-type-path # Required since module uses other methods to specify path -plugins/modules/system/xfconf.py validate-modules:parameter-state-invalid-choice +plugins/modules/system/xfconf.py validate-modules:parameter-state-invalid-choice # state get removed in 5.0.0 plugins/modules/system/xfconf.py validate-modules:return-syntax-error plugins/modules/web_infrastructure/jenkins_plugin.py use-argspec-type-path From 20db4fc5604472f9014d670974e33e7e9d704276 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 25 Jul 2021 23:53:38 +1200 Subject: [PATCH 0222/2828] replace NBSP (non-blocking space) character with a regular SPACE char (#3071) --- plugins/modules/monitoring/stackdriver.py | 6 +++--- .../targets/alternatives/tasks/main.yml | 4 ++-- .../targets/alternatives/tasks/setup_test.yml | 4 ++-- .../targets/alternatives/tasks/test.yml | 8 ++++---- .../alternatives/tasks/tests_set_priority.yml | 6 +++--- .../integration/targets/filesystem/tasks/main.yml | 4 ++-- .../integration/targets/filesystem/tasks/setup.yml | 2 +- .../integration/targets/npm/tasks/no_bin_links.yml | 10 +++++----- tests/integration/targets/npm/tasks/test.yml | 10 +++++----- .../targets/setup_openldap/tasks/main.yml | 4 ++-- .../targets/setup_postgresql_db/tasks/main.yml | 2 +- .../targets/supervisorctl/tasks/install_Linux.yml | 2 +- .../targets/supervisorctl/tasks/main.yml | 4 ++-- .../supervisorctl/tasks/start_supervisord.yml | 2 +- .../supervisorctl/tasks/stop_supervisord.yml | 2 +- .../targets/supervisorctl/tasks/test.yml | 2 +- .../targets/supervisorctl/tasks/test_start.yml | 14 +++++++------- .../targets/supervisorctl/tasks/test_stop.yml | 2 +- .../supervisorctl/templates/supervisord.conf | 2 +- 19 files changed, 45 insertions(+), 45 deletions(-) diff --git a/plugins/modules/monitoring/stackdriver.py b/plugins/modules/monitoring/stackdriver.py index 8eacdbfe49..fa6bacb951 100644 --- a/plugins/modules/monitoring/stackdriver.py +++ b/plugins/modules/monitoring/stackdriver.py @@ -46,16 +46,16 @@ options: msg: type: str description: - - The contents of the annotation message, in plain text.  Limited to 256 characters. Required for annotation. + - The contents of the annotation message, in plain text. Limited to 256 characters. Required for annotation. annotated_by: type: str description: - - The person or robot who the annotation should be attributed to. + - The person or robot who the annotation should be attributed to. default: "Ansible" level: type: str description: - - one of INFO/WARN/ERROR, defaults to INFO if not supplied.  May affect display. + - one of INFO/WARN/ERROR, defaults to INFO if not supplied. May affect display. choices: ['INFO', 'WARN', 'ERROR'] default: 'INFO' instance_id: diff --git a/tests/integration/targets/alternatives/tasks/main.yml b/tests/integration/targets/alternatives/tasks/main.yml index 3dc799df3e..3503afe1b3 100644 --- a/tests/integration/targets/alternatives/tasks/main.yml +++ b/tests/integration/targets/alternatives/tasks/main.yml @@ -12,7 +12,7 @@ ############## # Test parameters: - # link parameter present / absent ('with_link' variable) + # link parameter present / absent ('with_link' variable) # with / without alternatives defined in alternatives file ('with_alternatives' variable) # auto / manual ('mode' variable) @@ -56,7 +56,7 @@ path: '{{ item }}' state: absent with_items: - - '{{ alternatives_dir }}/dummy' + - '{{ alternatives_dir }}/dummy' - file: path: '/usr/bin/dummy{{ item }}' diff --git a/tests/integration/targets/alternatives/tasks/setup_test.yml b/tests/integration/targets/alternatives/tasks/setup_test.yml index 6a55c6ba7e..4475514745 100644 --- a/tests/integration/targets/alternatives/tasks/setup_test.yml +++ b/tests/integration/targets/alternatives/tasks/setup_test.yml @@ -1,11 +1,11 @@ - template: src: dummy_alternative - dest: '{{ alternatives_dir }}/dummy' + dest: '{{ alternatives_dir }}/dummy' owner: root group: root mode: '0644' when: with_alternatives or ansible_os_family != 'RedHat' - file: - path: '{{ alternatives_dir }}/dummy' + path: '{{ alternatives_dir }}/dummy' state: absent when: not with_alternatives and ansible_os_family == 'RedHat' diff --git a/tests/integration/targets/alternatives/tasks/test.yml b/tests/integration/targets/alternatives/tasks/test.yml index e5cf2d99cc..92721a995d 100644 --- a/tests/integration/targets/alternatives/tasks/test.yml +++ b/tests/integration/targets/alternatives/tasks/test.yml @@ -5,7 +5,7 @@ - name: set alternative (using link parameter) alternatives: name: dummy - path: '/usr/bin/dummy{{ item }}' + path: '/usr/bin/dummy{{ item }}' link: '/usr/bin/dummy' register: alternative @@ -20,7 +20,7 @@ - name: set alternative (without link parameter) alternatives: name: dummy - path: '/usr/bin/dummy{{ item }}' + path: '/usr/bin/dummy{{ item }}' register: alternative - name: check expected command was executed @@ -40,11 +40,11 @@ - 'cmd.stdout == "dummy" ~ item' - name: 'check mode (manual: alternatives file existed, it has been updated)' - shell: 'head -n1 {{ alternatives_dir }}/dummy | grep "^manual$"' + shell: 'head -n1 {{ alternatives_dir }}/dummy | grep "^manual$"' when: ansible_os_family != 'RedHat' or with_alternatives or item != 1 - name: 'check mode (auto: alternatives file didn''t exist, it has been created)' - shell: 'head -n1 {{ alternatives_dir }}/dummy | grep "^auto$"' + shell: 'head -n1 {{ alternatives_dir }}/dummy | grep "^auto$"' when: ansible_os_family == 'RedHat' and not with_alternatives and item == 1 - name: check that alternative has been updated diff --git a/tests/integration/targets/alternatives/tasks/tests_set_priority.yml b/tests/integration/targets/alternatives/tasks/tests_set_priority.yml index 7e27817583..ab79f62a3c 100644 --- a/tests/integration/targets/alternatives/tasks/tests_set_priority.yml +++ b/tests/integration/targets/alternatives/tasks/tests_set_priority.yml @@ -3,7 +3,7 @@ name: dummy path: '/usr/bin/dummy{{ item }}' link: /usr/bin/dummy - priority: '{{ 60 + item|int }}' + priority: '{{ 60 + item|int }}' register: alternative - name: execute dummy command @@ -11,13 +11,13 @@ register: cmd - name: check if link group is in manual mode - shell: 'head -n1 {{ alternatives_dir }}/dummy | grep "^manual$"' + shell: 'head -n1 {{ alternatives_dir }}/dummy | grep "^manual$"' - name: check expected command was executed assert: that: - 'alternative is changed' - - 'cmd.stdout == "dummy{{ item }}"' + - 'cmd.stdout == "dummy{{ item }}"' - name: check that alternative has been updated command: "grep -Pzq '/bin/dummy{{ item }}\\n{{ 60 + item|int }}' '{{ alternatives_dir }}/dummy'" diff --git a/tests/integration/targets/filesystem/tasks/main.yml b/tests/integration/targets/filesystem/tasks/main.yml index 4b2c5bdc2a..24259107fd 100644 --- a/tests/integration/targets/filesystem/tasks/main.yml +++ b/tests/integration/targets/filesystem/tasks/main.yml @@ -5,9 +5,9 @@ #################################################################### - ansible.builtin.debug: - msg: '{{ role_name }}' + msg: '{{ role_name }}' - ansible.builtin.debug: - msg: '{{ role_path|basename }}' + msg: '{{ role_path|basename }}' - import_tasks: setup.yml - include_vars: "{{ lookup('first_found', search) }}" diff --git a/tests/integration/targets/filesystem/tasks/setup.yml b/tests/integration/targets/filesystem/tasks/setup.yml index 9ca4b983d0..597692e25a 100644 --- a/tests/integration/targets/filesystem/tasks/setup.yml +++ b/tests/integration/targets/filesystem/tasks/setup.yml @@ -100,7 +100,7 @@ - name: "Install dosfstools and lvm2 (Linux)" ansible.builtin.package: - name: '{{ item }}' + name: '{{ item }}' with_items: - dosfstools - lvm2 diff --git a/tests/integration/targets/npm/tasks/no_bin_links.yml b/tests/integration/targets/npm/tasks/no_bin_links.yml index fdbc88c4eb..5c89f70517 100644 --- a/tests/integration/targets/npm/tasks/no_bin_links.yml +++ b/tests/integration/targets/npm/tasks/no_bin_links.yml @@ -6,7 +6,7 @@ - vars: # sample: node-v8.2.0-linux-x64.tar.xz - node_path: '{{ remote_dir }}/{{ nodejs_path }}/bin' + node_path: '{{ remote_dir }}/{{ nodejs_path }}/bin' package: 'ncp' block: - shell: npm --version @@ -20,12 +20,12 @@ - name: 'Install simple package with no_bin_links disabled' npm: path: '{{ remote_dir }}' - executable: '{{ node_path }}/npm' + executable: '{{ node_path }}/npm' state: present name: '{{ package }}' no_bin_links: false environment: - PATH: '{{ node_path }}:{{ ansible_env.PATH }}' + PATH: '{{ node_path }}:{{ ansible_env.PATH }}' register: npm_install_no_bin_links_disabled - name: 'Make sure .bin folder has been created' @@ -41,12 +41,12 @@ - name: 'Install simple package with no_bin_links enabled' npm: path: '{{ remote_dir }}' - executable: '{{ node_path }}/npm' + executable: '{{ node_path }}/npm' state: present name: '{{ package }}' no_bin_links: true environment: - PATH: '{{ node_path }}:{{ ansible_env.PATH }}' + PATH: '{{ node_path }}:{{ ansible_env.PATH }}' register: npm_install_no_bin_links_enabled - name: 'Make sure .bin folder has not been created' diff --git a/tests/integration/targets/npm/tasks/test.yml b/tests/integration/targets/npm/tasks/test.yml index ea2dd5b9f9..d254710f0b 100644 --- a/tests/integration/targets/npm/tasks/test.yml +++ b/tests/integration/targets/npm/tasks/test.yml @@ -5,7 +5,7 @@ - vars: # sample: node-v8.2.0-linux-x64.tar.xz - node_path: '{{ remote_dir }}/{{ nodejs_path }}/bin' + node_path: '{{ remote_dir }}/{{ nodejs_path }}/bin' package: 'iconv-lite' block: - shell: npm --version @@ -19,11 +19,11 @@ - name: 'Install simple package without dependency' npm: path: '{{ remote_dir }}' - executable: '{{ node_path }}/npm' + executable: '{{ node_path }}/npm' state: present name: '{{ package }}' environment: - PATH: '{{ node_path }}:{{ ansible_env.PATH }}' + PATH: '{{ node_path }}:{{ ansible_env.PATH }}' register: npm_install - assert: @@ -38,7 +38,7 @@ state: present name: '{{ package }}' environment: - PATH: '{{ node_path }}:{{ ansible_env.PATH }}' + PATH: '{{ node_path }}:{{ ansible_env.PATH }}' register: npm_reinstall - name: Check there is no change @@ -59,7 +59,7 @@ state: present name: '{{ package }}' environment: - PATH: '{{ node_path }}:{{ ansible_env.PATH }}' + PATH: '{{ node_path }}:{{ ansible_env.PATH }}' register: npm_fix_install - name: Check result is changed and successful diff --git a/tests/integration/targets/setup_openldap/tasks/main.yml b/tests/integration/targets/setup_openldap/tasks/main.yml index 4fd27058a6..dcf2cc7834 100644 --- a/tests/integration/targets/setup_openldap/tasks/main.yml +++ b/tests/integration/targets/setup_openldap/tasks/main.yml @@ -50,8 +50,8 @@ - name: Copy initial config ldif file become: True copy: - src: 'files/{{ item }}' - dest: '/tmp/{{ item }}' + src: 'files/{{ item }}' + dest: '/tmp/{{ item }}' owner: root group: root mode: '0644' diff --git a/tests/integration/targets/setup_postgresql_db/tasks/main.yml b/tests/integration/targets/setup_postgresql_db/tasks/main.yml index f535ecdcf9..33e9024ba1 100644 --- a/tests/integration/targets/setup_postgresql_db/tasks/main.yml +++ b/tests/integration/targets/setup_postgresql_db/tasks/main.yml @@ -100,7 +100,7 @@ when: ansible_os_family == "RedHat" and ansible_service_mgr != "systemd" - name: Initialize postgres (Debian) - shell: . /usr/share/postgresql-common/maintscripts-functions && set_system_locale && /usr/bin/pg_createcluster -u postgres {{ pg_ver }} main + shell: . /usr/share/postgresql-common/maintscripts-functions && set_system_locale && /usr/bin/pg_createcluster -u postgres {{ pg_ver }} main args: creates: /etc/postgresql/{{ pg_ver }}/ when: ansible_os_family == 'Debian' diff --git a/tests/integration/targets/supervisorctl/tasks/install_Linux.yml b/tests/integration/targets/supervisorctl/tasks/install_Linux.yml index af1790ccad..ef2dab5eae 100644 --- a/tests/integration/targets/supervisorctl/tasks/install_Linux.yml +++ b/tests/integration/targets/supervisorctl/tasks/install_Linux.yml @@ -5,6 +5,6 @@ - name: disable supervisord system service service: - name: '{{ supervisor_service_name }}' + name: '{{ supervisor_service_name }}' state: stopped enabled: no diff --git a/tests/integration/targets/supervisorctl/tasks/main.yml b/tests/integration/targets/supervisorctl/tasks/main.yml index a6ad10bdad..2a7ecdcfc0 100644 --- a/tests/integration/targets/supervisorctl/tasks/main.yml +++ b/tests/integration/targets/supervisorctl/tasks/main.yml @@ -21,7 +21,7 @@ - '{{ ansible_os_family }}.yml' - 'defaults.yml' - - include_tasks: '{{ item }}' + - include_tasks: '{{ item }}' with_first_found: - files: - 'install_{{ ansible_distribution }}.yml' # CentOS @@ -39,7 +39,7 @@ when: ansible_os_family != 'RedHat' or ansible_distribution_major_version|int > 6 always: - - include_tasks: '{{ item }}' + - include_tasks: '{{ item }}' when: ansible_os_family != 'RedHat' or ansible_distribution_major_version|int > 6 with_first_found: - files: diff --git a/tests/integration/targets/supervisorctl/tasks/start_supervisord.yml b/tests/integration/targets/supervisorctl/tasks/start_supervisord.yml index 9067a27322..1354bc8632 100644 --- a/tests/integration/targets/supervisorctl/tasks/start_supervisord.yml +++ b/tests/integration/targets/supervisorctl/tasks/start_supervisord.yml @@ -1,5 +1,5 @@ - name: start supervisord - command: 'supervisord -c {{ remote_dir }}/supervisord.conf' + command: 'supervisord -c {{ remote_dir }}/supervisord.conf' - name: wait_for supervisord ansible.builtin.wait_for: diff --git a/tests/integration/targets/supervisorctl/tasks/stop_supervisord.yml b/tests/integration/targets/supervisorctl/tasks/stop_supervisord.yml index 1bf48f2139..4da09da222 100644 --- a/tests/integration/targets/supervisorctl/tasks/stop_supervisord.yml +++ b/tests/integration/targets/supervisorctl/tasks/stop_supervisord.yml @@ -1,2 +1,2 @@ - name: stop supervisord - command: "supervisorctl -c {{ remote_dir }}/supervisord.conf {% if credentials.username %}-u {{ credentials.username }} -p {{ credentials.password }}{% endif %} shutdown" + command: "supervisorctl -c {{ remote_dir }}/supervisord.conf {% if credentials.username %}-u {{ credentials.username }} -p {{ credentials.password }}{% endif %} shutdown" diff --git a/tests/integration/targets/supervisorctl/tasks/test.yml b/tests/integration/targets/supervisorctl/tasks/test.yml index bfd2a06e17..9b43c21dec 100644 --- a/tests/integration/targets/supervisorctl/tasks/test.yml +++ b/tests/integration/targets/supervisorctl/tasks/test.yml @@ -1,7 +1,7 @@ - name: generate supervisor configuration template: src: supervisord.conf - dest: '{{ remote_dir }}/supervisord.conf' + dest: '{{ remote_dir }}/supervisord.conf' - block: - import_tasks: start_supervisord.yml diff --git a/tests/integration/targets/supervisorctl/tasks/test_start.yml b/tests/integration/targets/supervisorctl/tasks/test_start.yml index cc56ac5a99..c05a7dd400 100644 --- a/tests/integration/targets/supervisorctl/tasks/test_start.yml +++ b/tests/integration/targets/supervisorctl/tasks/test_start.yml @@ -2,7 +2,7 @@ supervisorctl: name: 'pys:py1' state: started - config: '{{ remote_dir }}/supervisord.conf' + config: '{{ remote_dir }}/supervisord.conf' register: result when: credentials.username == '' @@ -16,7 +16,7 @@ register: result_with_auth when: credentials.username != '' -- command: "supervisorctl -c {{ remote_dir }}/supervisord.conf {% if credentials.username %}-u {{ credentials.username }} -p {{ credentials.password }}{% endif %} status" +- command: "supervisorctl -c {{ remote_dir }}/supervisord.conf {% if credentials.username %}-u {{ credentials.username }} -p {{ credentials.password }}{% endif %} status" - name: check that service is started assert: @@ -32,7 +32,7 @@ supervisorctl: name: pys:py1 state: started - config: '{{ remote_dir }}/supervisord.conf' + config: '{{ remote_dir }}/supervisord.conf' register: result when: credentials.username == '' @@ -65,7 +65,7 @@ supervisorctl: name: 'pys:py1' state: started - config: '{{ remote_dir }}/supervisord.conf' + config: '{{ remote_dir }}/supervisord.conf' register: result when: credentials.username == '' @@ -110,7 +110,7 @@ supervisorctl: name: 'pys:py1' state: started - config: '{{ remote_dir }}/supervisord_not_here.conf' + config: '{{ remote_dir }}/supervisord_not_here.conf' register: result failed_when: result is success or result is not failed @@ -118,7 +118,7 @@ supervisorctl: name: 'invalid' state: started - config: '{{ remote_dir }}/supervisord.conf' + config: '{{ remote_dir }}/supervisord.conf' register: result failed_when: result is skip or (result is success or result is not failed) when: credentials.username == '' @@ -127,7 +127,7 @@ supervisorctl: name: 'invalid' state: started - config: '{{ remote_dir }}/supervisord.conf' + config: '{{ remote_dir }}/supervisord.conf' username: '{{ credentials.username }}wrong_creds' password: '{{ credentials.password }}same_here' register: result diff --git a/tests/integration/targets/supervisorctl/tasks/test_stop.yml b/tests/integration/targets/supervisorctl/tasks/test_stop.yml index 5c76a6813c..729f0ebd42 100644 --- a/tests/integration/targets/supervisorctl/tasks/test_stop.yml +++ b/tests/integration/targets/supervisorctl/tasks/test_stop.yml @@ -18,7 +18,7 @@ register: result_with_auth when: credentials.username != '' -- command: "supervisorctl -c {{ remote_dir }}/supervisord.conf {% if credentials.username %}-u {{ credentials.username }} -p {{ credentials.password }}{% endif %} status" +- command: "supervisorctl -c {{ remote_dir }}/supervisord.conf {% if credentials.username %}-u {{ credentials.username }} -p {{ credentials.password }}{% endif %} status" - name: check that service is stopped assert: diff --git a/tests/integration/targets/supervisorctl/templates/supervisord.conf b/tests/integration/targets/supervisorctl/templates/supervisord.conf index 2f80e02b72..28b6ac09f9 100644 --- a/tests/integration/targets/supervisorctl/templates/supervisord.conf +++ b/tests/integration/targets/supervisorctl/templates/supervisord.conf @@ -36,7 +36,7 @@ password = {{ credentials.password }} {% endif %} [supervisorctl] -serverurl=unix://{{ supervisord_sock_path.path }}/supervisord.sock +serverurl=unix://{{ supervisord_sock_path.path }}/supervisord.sock [rpcinterface:supervisor] supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface From 95ceb53676b5359d03a1173fe5b92759c54cc9c3 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 26 Jul 2021 08:03:45 +1200 Subject: [PATCH 0223/2828] taiga_issue - bugfix + pythonification (#3067) * taiga_issue - bugfix + pythonification * added changelog fragment --- changelogs/fragments/3067-taiga-bugfix.yaml | 2 + .../modules/web_infrastructure/taiga_issue.py | 50 +++++++++---------- 2 files changed, 26 insertions(+), 26 deletions(-) create mode 100644 changelogs/fragments/3067-taiga-bugfix.yaml diff --git a/changelogs/fragments/3067-taiga-bugfix.yaml b/changelogs/fragments/3067-taiga-bugfix.yaml new file mode 100644 index 0000000000..dfd3b531b0 --- /dev/null +++ b/changelogs/fragments/3067-taiga-bugfix.yaml @@ -0,0 +1,2 @@ +bugfixes: + - taiga - some constructs in the module fixed to work also in Python 3 (https://github.com/ansible-collections/community.general/pull/3067). diff --git a/plugins/modules/web_infrastructure/taiga_issue.py b/plugins/modules/web_infrastructure/taiga_issue.py index f05550276e..729757590d 100644 --- a/plugins/modules/web_infrastructure/taiga_issue.py +++ b/plugins/modules/web_infrastructure/taiga_issue.py @@ -129,7 +129,7 @@ except ImportError: TAIGA_MODULE_IMPORTED = False -def manage_issue(module, taiga_host, project_name, issue_subject, issue_priority, +def manage_issue(taiga_host, project_name, issue_subject, issue_priority, issue_status, issue_type, issue_severity, issue_description, issue_attachment, issue_attachment_description, issue_tags, state, check_mode=False): @@ -157,34 +157,34 @@ def manage_issue(module, taiga_host, project_name, issue_subject, issue_priority username = getenv('TAIGA_USERNAME') password = getenv('TAIGA_PASSWORD') if not any([username, password]): - return (False, changed, "Missing credentials", {}) + return False, changed, "Missing credentials", {} api.auth(username=username, password=password) user_id = api.me().id - project_list = filter(lambda x: x.name == project_name, api.projects.list(member=user_id)) + project_list = list(filter(lambda x: x.name == project_name, api.projects.list(member=user_id))) if len(project_list) != 1: - return (False, changed, "Unable to find project %s" % project_name, {}) + return False, changed, "Unable to find project %s" % project_name, {} project = project_list[0] project_id = project.id - priority_list = filter(lambda x: x.name == issue_priority, api.priorities.list(project=project_id)) + priority_list = list(filter(lambda x: x.name == issue_priority, api.priorities.list(project=project_id))) if len(priority_list) != 1: - return (False, changed, "Unable to find issue priority %s for project %s" % (issue_priority, project_name), {}) + return False, changed, "Unable to find issue priority %s for project %s" % (issue_priority, project_name), {} priority_id = priority_list[0].id - status_list = filter(lambda x: x.name == issue_status, api.issue_statuses.list(project=project_id)) + status_list = list(filter(lambda x: x.name == issue_status, api.issue_statuses.list(project=project_id))) if len(status_list) != 1: - return (False, changed, "Unable to find issue status %s for project %s" % (issue_status, project_name), {}) + return False, changed, "Unable to find issue status %s for project %s" % (issue_status, project_name), {} status_id = status_list[0].id - type_list = filter(lambda x: x.name == issue_type, project.list_issue_types()) + type_list = list(filter(lambda x: x.name == issue_type, project.list_issue_types())) if len(type_list) != 1: - return (False, changed, "Unable to find issue type %s for project %s" % (issue_type, project_name), {}) + return False, changed, "Unable to find issue type %s for project %s" % (issue_type, project_name), {} type_id = type_list[0].id - severity_list = filter(lambda x: x.name == issue_severity, project.list_severities()) + severity_list = list(filter(lambda x: x.name == issue_severity, project.list_severities())) if len(severity_list) != 1: - return (False, changed, "Unable to find severity %s for project %s" % (issue_severity, project_name), {}) + return False, changed, "Unable to find severity %s for project %s" % (issue_severity, project_name), {} severity_id = severity_list[0].id issue = { @@ -199,7 +199,7 @@ def manage_issue(module, taiga_host, project_name, issue_subject, issue_priority } # An issue is identified by the project_name, the issue_subject and the issue_type - matching_issue_list = filter(lambda x: x.subject == issue_subject and x.type == type_id, project.list_issues()) + matching_issue_list = list(filter(lambda x: x.subject == issue_subject and x.type == type_id, project.list_issues())) matching_issue_list_len = len(matching_issue_list) if matching_issue_list_len == 0: @@ -209,16 +209,17 @@ def manage_issue(module, taiga_host, project_name, issue_subject, issue_priority changed = True if not check_mode: # Create the issue - new_issue = project.add_issue(issue_subject, priority_id, status_id, type_id, severity_id, tags=issue_tags, description=issue_description) + new_issue = project.add_issue(issue_subject, priority_id, status_id, type_id, severity_id, tags=issue_tags, + description=issue_description) if issue_attachment: new_issue.attach(issue_attachment, description=issue_attachment_description) issue["attachment"] = issue_attachment issue["attachment_description"] = issue_attachment_description - return (True, changed, "Issue created", issue) + return True, changed, "Issue created", issue else: # If does not exist, do nothing - return (True, changed, "Issue does not exist", {}) + return True, changed, "Issue does not exist", {} elif matching_issue_list_len == 1: # The issue exists in the project @@ -228,19 +229,19 @@ def manage_issue(module, taiga_host, project_name, issue_subject, issue_priority if not check_mode: # Delete the issue matching_issue_list[0].delete() - return (True, changed, "Issue deleted", {}) + return True, changed, "Issue deleted", {} else: # Do nothing - return (True, changed, "Issue already exists", {}) + return True, changed, "Issue already exists", {} else: # More than 1 matching issue - return (False, changed, "More than one issue with subject %s in project %s" % (issue_subject, project_name), {}) + return False, changed, "More than one issue with subject %s in project %s" % (issue_subject, project_name), {} except TaigaException as exc: msg = "An exception happened: %s" % to_native(exc) - return (False, changed, msg, {}) + return False, changed, msg, {} def main(): @@ -257,15 +258,13 @@ def main(): attachment=dict(type='path', required=False, default=None), attachment_description=dict(type='str', required=False, default=""), tags=dict(required=False, default=[], type='list', elements='str'), - state=dict(type='str', required=False, choices=['present', 'absent'], - default='present'), + state=dict(type='str', required=False, choices=['present', 'absent'], default='present'), ), supports_check_mode=True ) if not TAIGA_MODULE_IMPORTED: - module.fail_json(msg=missing_required_lib("python-taiga"), - exception=TAIGA_IMP_ERR) + module.fail_json(msg=missing_required_lib("python-taiga"), exception=TAIGA_IMP_ERR) taiga_host = module.params['taiga_host'] project_name = module.params['project'] @@ -285,7 +284,6 @@ def main(): state = module.params['state'] return_status, changed, msg, issue_attr_dict = manage_issue( - module, taiga_host, project_name, issue_subject, @@ -301,7 +299,7 @@ def main(): check_mode=module.check_mode ) if return_status: - if len(issue_attr_dict) > 0: + if issue_attr_dict: module.exit_json(changed=changed, msg=msg, issue=issue_attr_dict) else: module.exit_json(changed=changed, msg=msg) From c8b2d7c1e5682dc64f538953cbafa70d72b7e5c2 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 26 Jul 2021 08:04:23 +1200 Subject: [PATCH 0224/2828] supervisorctl - bugfix + using ansible validation + pythonification (#3068) * supervisorctl - bugfix + pythonification * added changelog fragment * rollback check on the binpath --- .../fragments/3068-supervisorctl-bugfix.yaml | 4 +++ .../web_infrastructure/supervisorctl.py | 35 +++++++++---------- 2 files changed, 21 insertions(+), 18 deletions(-) create mode 100644 changelogs/fragments/3068-supervisorctl-bugfix.yaml diff --git a/changelogs/fragments/3068-supervisorctl-bugfix.yaml b/changelogs/fragments/3068-supervisorctl-bugfix.yaml new file mode 100644 index 0000000000..6571e211b6 --- /dev/null +++ b/changelogs/fragments/3068-supervisorctl-bugfix.yaml @@ -0,0 +1,4 @@ +bugfixes: + - supervisorctl - state ``signalled`` was not working (https://github.com/ansible-collections/community.general/pull/3068). +minor_changes: + - supervisorctl - using standard Ansible mechanism to validate ``signalled`` state required parameter (https://github.com/ansible-collections/community.general/pull/3068). diff --git a/plugins/modules/web_infrastructure/supervisorctl.py b/plugins/modules/web_infrastructure/supervisorctl.py index 5524beea98..f44af0befe 100644 --- a/plugins/modules/web_infrastructure/supervisorctl.py +++ b/plugins/modules/web_infrastructure/supervisorctl.py @@ -101,16 +101,20 @@ from ansible.module_utils.basic import AnsibleModule, is_executable def main(): arg_spec = dict( name=dict(type='str', required=True), - config=dict(required=False, type='path'), - server_url=dict(type='str', required=False), - username=dict(type='str', required=False), - password=dict(type='str', required=False, no_log=True), - supervisorctl_path=dict(required=False, type='path'), + config=dict(type='path'), + server_url=dict(type='str'), + username=dict(type='str'), + password=dict(type='str', no_log=True), + supervisorctl_path=dict(type='path'), state=dict(type='str', required=True, choices=['present', 'started', 'restarted', 'stopped', 'absent', 'signalled']), - signal=dict(type='str', required=False) + signal=dict(type='str'), ) - module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) + module = AnsibleModule( + argument_spec=arg_spec, + supports_check_mode=True, + required_if=[('state', 'signalled', ['signal'])], + ) name = module.params['name'] is_group = False @@ -146,9 +150,6 @@ def main(): if password: supervisorctl_args.extend(['-p', password]) - if state == 'signalled' and not signal: - module.fail_json(msg="State 'signalled' requires a 'signal' value") - def run_supervisorctl(cmd, name=None, **kwargs): args = list(supervisorctl_args) # copy the master args args.append(cmd) @@ -231,26 +232,24 @@ def main(): if module.check_mode: module.exit_json(changed=True) run_supervisorctl('reread', check_rc=True) - rc, out, err = run_supervisorctl('add', name) + dummy, out, dummy = run_supervisorctl('add', name) if '%s: added process group' % name in out: module.exit_json(changed=True, name=name, state=state) else: module.fail_json(msg=out, name=name, state=state) + # from this point onwards, if there are no matching processes, module cannot go on. + if len(processes) == 0: + module.fail_json(name=name, msg="ERROR (no such process)") + if state == 'started': - if len(processes) == 0: - module.fail_json(name=name, msg="ERROR (no such process)") take_action_on_processes(processes, lambda s: s not in ('RUNNING', 'STARTING'), 'start', 'started') if state == 'stopped': - if len(processes) == 0: - module.fail_json(name=name, msg="ERROR (no such process)") take_action_on_processes(processes, lambda s: s in ('RUNNING', 'STARTING'), 'stop', 'stopped') if state == 'signalled': - if len(processes) == 0: - module.fail_json(name=name, msg="ERROR (no such process)") - take_action_on_processes(processes, lambda s: s in ('RUNNING'), "signal %s" % signal, 'signalled') + take_action_on_processes(processes, lambda s: s in ('RUNNING',), "signal %s" % signal, 'signalled') if __name__ == '__main__': From ac0388100259e6cfb054454658e78351bad6fa65 Mon Sep 17 00:00:00 2001 From: Yvan Watchman Date: Mon, 26 Jul 2021 06:33:01 +0200 Subject: [PATCH 0225/2828] Succesful clone from proxmox_kvm should return new vm id, not id from cloned vm. (#3034) * Clone sucess should return new vm id, not id from cloned vm. * add changelog fragment * Update changelogs/fragments/3034-promox-kvm-return-new-id.yaml Co-authored-by: Felix Fontein Co-authored-by: Yvan E. Watchman Co-authored-by: Felix Fontein --- changelogs/fragments/3034-promox-kvm-return-new-id.yaml | 3 +++ plugins/modules/cloud/misc/proxmox_kvm.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/3034-promox-kvm-return-new-id.yaml diff --git a/changelogs/fragments/3034-promox-kvm-return-new-id.yaml b/changelogs/fragments/3034-promox-kvm-return-new-id.yaml new file mode 100644 index 0000000000..8cbd769a04 --- /dev/null +++ b/changelogs/fragments/3034-promox-kvm-return-new-id.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - proxmox_kvm - fix result of clone, now returns ``newid`` instead of ``vmid`` (https://github.com/ansible-collections/community.general/pull/3034). diff --git a/plugins/modules/cloud/misc/proxmox_kvm.py b/plugins/modules/cloud/misc/proxmox_kvm.py index 939c72a126..159968ce6e 100644 --- a/plugins/modules/cloud/misc/proxmox_kvm.py +++ b/plugins/modules/cloud/misc/proxmox_kvm.py @@ -1303,7 +1303,7 @@ def main(): if update: module.exit_json(changed=True, vmid=vmid, msg="VM %s with vmid %s updated" % (name, vmid)) elif clone is not None: - module.exit_json(changed=True, vmid=vmid, msg="VM %s with newid %s cloned from vm with vmid %s" % (name, newid, vmid)) + module.exit_json(changed=True, vmid=newid, msg="VM %s with newid %s cloned from vm with vmid %s" % (name, newid, vmid)) else: module.exit_json(changed=True, msg="VM %s with vmid %s deployed" % (name, vmid), **results) except Exception as e: From 21d5668c97306225e046e9da5ce8a0e623161eec Mon Sep 17 00:00:00 2001 From: quidame Date: Mon, 26 Jul 2021 11:42:13 +0200 Subject: [PATCH 0226/2828] java_cert: import certificate+key bundle from pkcs12 (#3080) * import certificate+key bundle from pkcs12 * fix typo/syntax * fix variable name * fix passwords order and improve error handling * add changelog fragment * enter keystore pass only once if keystore already exists, and twice at creation * nomalize tests - Replace `command` tasks by dedicated (community.crypto) modules. - Add spaces around jinja2 variable names. - Call modules by their FQCNs. * Add tests to check keystore has a private key fix tests for RedHat/CentOS < 8 (run openssl command as an alternative to `openssl_pkcs12` module) --- ...3080-java_cert-2460-import_private_key.yml | 4 + plugins/modules/system/java_cert.py | 61 ++++++- .../targets/java_cert/defaults/main.yml | 4 +- .../targets/java_cert/tasks/main.yml | 48 +++--- .../targets/java_cert/tasks/state_change.yml | 161 +++++++++++++----- 5 files changed, 203 insertions(+), 75 deletions(-) create mode 100644 changelogs/fragments/3080-java_cert-2460-import_private_key.yml diff --git a/changelogs/fragments/3080-java_cert-2460-import_private_key.yml b/changelogs/fragments/3080-java_cert-2460-import_private_key.yml new file mode 100644 index 0000000000..465c484673 --- /dev/null +++ b/changelogs/fragments/3080-java_cert-2460-import_private_key.yml @@ -0,0 +1,4 @@ +--- +bugfixes: + - java_cert - import private key as well as public certificate from PKCS#12 + (https://github.com/ansible-collections/community.general/issues/2460). diff --git a/plugins/modules/system/java_cert.py b/plugins/modules/system/java_cert.py index 1c507f9277..515d5269c9 100644 --- a/plugins/modules/system/java_cert.py +++ b/plugins/modules/system/java_cert.py @@ -11,15 +11,15 @@ DOCUMENTATION = r''' --- module: java_cert -short_description: Uses keytool to import/remove key from java keystore (cacerts) +short_description: Uses keytool to import/remove certificate to/from java keystore (cacerts) description: - - This is a wrapper module around keytool, which can be used to import/remove - certificates from a given java keystore. + - This is a wrapper module around keytool, which can be used to import certificates + and optionally private keys to a given java keystore, or remove them from it. options: cert_url: description: - Basic URL to fetch SSL certificate from. - - One of C(cert_url) or C(cert_path) is required to load certificate. + - Exactly one of C(cert_url), C(cert_path) or C(pkcs12_path) is required to load certificate. type: str cert_port: description: @@ -30,7 +30,7 @@ options: cert_path: description: - Local path to load certificate from. - - One of C(cert_url) or C(cert_path) is required to load certificate. + - Exactly one of C(cert_url), C(cert_path) or C(pkcs12_path) is required to load certificate. type: path cert_alias: description: @@ -46,6 +46,10 @@ options: pkcs12_path: description: - Local path to load PKCS12 keystore from. + - Unlike C(cert_url) and C(cert_path), the PKCS12 keystore embeds the private key matching + the certificate, and is used to import both the certificate and its private key into the + java keystore. + - Exactly one of C(cert_url), C(cert_path) or C(pkcs12_path) is required to load certificate. type: path pkcs12_password: description: @@ -267,6 +271,7 @@ def _export_public_cert_from_pkcs12(module, executable, pkcs_file, alias, passwo export_cmd = [ executable, "-list", + "-noprompt", "-keystore", pkcs_file, "-alias", @@ -336,6 +341,44 @@ def _download_cert_url(module, executable, url, port): return fetch_out +def import_pkcs12_path(module, executable, pkcs12_path, pkcs12_pass, pkcs12_alias, + keystore_path, keystore_pass, keystore_alias, keystore_type): + ''' Import pkcs12 from path into keystore located on + keystore_path as alias ''' + import_cmd = [ + executable, + "-importkeystore", + "-noprompt", + "-srcstoretype", + "pkcs12", + "-srckeystore", + pkcs12_path, + "-srcalias", + pkcs12_alias, + "-destkeystore", + keystore_path, + "-destalias", + keystore_alias + ] + import_cmd += _get_keystore_type_keytool_parameters(keystore_type) + + secret_data = "%s\n%s" % (keystore_pass, pkcs12_pass) + # Password of a new keystore must be entered twice, for confirmation + if not os.path.exists(keystore_path): + secret_data = "%s\n%s" % (keystore_pass, secret_data) + + # Use local certificate from local path and import it to a java keystore + (import_rc, import_out, import_err) = module.run_command(import_cmd, data=secret_data, check_rc=False) + + diff = {'before': '\n', 'after': '%s\n' % keystore_alias} + if import_rc == 0 and os.path.exists(keystore_path): + module.exit_json(changed=True, msg=import_out, + rc=import_rc, cmd=import_cmd, stdout=import_out, + error=import_err, diff=diff) + else: + module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd, error=import_err) + + def import_cert_path(module, executable, path, keystore_path, keystore_pass, alias, keystore_type, trust_cacert): ''' Import certificate from path into keystore located on keystore_path as alias ''' @@ -522,8 +565,12 @@ def main(): # The existing certificate must first be deleted before we insert the correct one delete_cert(module, executable, keystore_path, keystore_pass, cert_alias, keystore_type, exit_after=False) - import_cert_path(module, executable, new_certificate, keystore_path, - keystore_pass, cert_alias, keystore_type, trust_cacert) + if pkcs12_path: + import_pkcs12_path(module, executable, pkcs12_path, pkcs12_pass, pkcs12_alias, + keystore_path, keystore_pass, cert_alias, keystore_type) + else: + import_cert_path(module, executable, new_certificate, keystore_path, + keystore_pass, cert_alias, keystore_type, trust_cacert) module.exit_json(changed=False) diff --git a/tests/integration/targets/java_cert/defaults/main.yml b/tests/integration/targets/java_cert/defaults/main.yml index 6416f306af..8e63493600 100644 --- a/tests/integration/targets/java_cert/defaults/main.yml +++ b/tests/integration/targets/java_cert/defaults/main.yml @@ -5,9 +5,11 @@ test_keystore2_path: "{{ output_dir }}/keystore2.jks" test_keystore2_password: changeit test_cert_path: "{{ output_dir }}/cert.pem" test_key_path: "{{ output_dir }}/key.pem" +test_csr_path: "{{ output_dir }}/req.csr" test_cert2_path: "{{ output_dir }}/cert2.pem" test_key2_path: "{{ output_dir }}/key2.pem" +test_csr2_path: "{{ output_dir }}/req2.csr" test_pkcs_path: "{{ output_dir }}/cert.p12" test_pkcs2_path: "{{ output_dir }}/cert2.p12" test_ssl: setupSSLServer.py -test_ssl_port: 21500 \ No newline at end of file +test_ssl_port: 21500 diff --git a/tests/integration/targets/java_cert/tasks/main.yml b/tests/integration/targets/java_cert/tasks/main.yml index 8172db5c15..20550740da 100644 --- a/tests/integration/targets/java_cert/tasks/main.yml +++ b/tests/integration/targets/java_cert/tasks/main.yml @@ -7,32 +7,34 @@ block: - name: prep pkcs12 file - copy: src="{{ test_pkcs12_path }}" dest="{{output_dir}}/{{ test_pkcs12_path }}" + ansible.builtin.copy: + src: "{{ test_pkcs12_path }}" + dest: "{{ output_dir }}/{{ test_pkcs12_path }}" - name: import pkcs12 - java_cert: - pkcs12_path: "{{output_dir}}/{{ test_pkcs12_path }}" + community.general.java_cert: + pkcs12_path: "{{ output_dir }}/{{ test_pkcs12_path }}" pkcs12_password: changeit pkcs12_alias: default cert_alias: default - keystore_path: "{{output_dir}}/{{ test_keystore_path }}" + keystore_path: "{{ output_dir }}/{{ test_keystore_path }}" keystore_pass: changeme_keystore keystore_create: yes state: present register: result_success - name: verify success - assert: + ansible.builtin.assert: that: - result_success is successful - name: import pkcs12 with wrong password - java_cert: - pkcs12_path: "{{output_dir}}/{{ test_pkcs12_path }}" + community.general.java_cert: + pkcs12_path: "{{ output_dir }}/{{ test_pkcs12_path }}" pkcs12_password: wrong_pass pkcs12_alias: default cert_alias: default_new - keystore_path: "{{output_dir}}/{{ test_keystore_path }}" + keystore_path: "{{ output_dir }}/{{ test_keystore_path }}" keystore_pass: changeme_keystore keystore_create: yes state: present @@ -40,16 +42,16 @@ register: result_wrong_pass - name: verify fail with wrong import password - assert: + ansible.builtin.assert: that: - result_wrong_pass is failed - name: test fail on mutually exclusive params - java_cert: + community.general.java_cert: cert_path: ca.crt - pkcs12_path: "{{output_dir}}/{{ test_pkcs12_path }}" + pkcs12_path: "{{ output_dir }}/{{ test_pkcs12_path }}" cert_alias: default - keystore_path: "{{output_dir}}/{{ test_keystore_path }}" + keystore_path: "{{ output_dir }}/{{ test_keystore_path }}" keystore_pass: changeme_keystore keystore_create: yes state: present @@ -57,26 +59,26 @@ register: result_excl_params - name: verify failed exclusive params - assert: + ansible.builtin.assert: that: - result_excl_params is failed - name: test fail on missing required params - java_cert: - keystore_path: "{{output_dir}}/{{ test_keystore_path }}" + community.general.java_cert: + keystore_path: "{{ output_dir }}/{{ test_keystore_path }}" keystore_pass: changeme_keystore state: absent ignore_errors: true register: result_missing_required_param - name: verify failed missing required params - assert: + ansible.builtin.assert: that: - result_missing_required_param is failed - name: delete object based on cert_alias parameter - java_cert: - keystore_path: "{{output_dir}}/{{ test_keystore_path }}" + community.general.java_cert: + keystore_path: "{{ output_dir }}/{{ test_keystore_path }}" keystore_pass: changeme_keystore cert_alias: default state: absent @@ -84,15 +86,15 @@ register: result_alias_deleted - name: verify object successfully deleted - assert: + ansible.builtin.assert: that: - result_alias_deleted is successful - - name: include extended test suite + - name: include extended test suite import_tasks: state_change.yml - name: cleanup environment - file: + ansible.builtin.file: path: "{{ item }}" state: absent loop: @@ -101,7 +103,9 @@ - "{{ test_keystore2_path }}" - "{{ test_cert_path }}" - "{{ test_key_path }}" + - "{{ test_csr_path }}" - "{{ test_cert2_path }}" - "{{ test_key2_path }}" + - "{{ test_csr2_path }}" - "{{ test_pkcs_path }}" - - "{{ test_pkcs2_path }}" \ No newline at end of file + - "{{ test_pkcs2_path }}" diff --git a/tests/integration/targets/java_cert/tasks/state_change.yml b/tests/integration/targets/java_cert/tasks/state_change.yml index 8cee41106f..38ef62cd0f 100644 --- a/tests/integration/targets/java_cert/tasks/state_change.yml +++ b/tests/integration/targets/java_cert/tasks/state_change.yml @@ -1,36 +1,96 @@ --- -- name: Generate the self signed cert used as a place holder to create the java keystore - command: openssl req -x509 -newkey rsa:4096 -keyout {{ test_key_path }} -out {{ test_cert_path }} -days 365 -nodes -subj '/CN=localhost' - args: - creates: "{{ test_key_path }}" +# +# Prepare X509 and PKCS#12 materials +# + +- name: Create private keys + community.crypto.openssl_privatekey: + path: "{{ item }}" + mode: "u=rw,go=" + loop: + - "{{ test_key_path }}" + - "{{ test_key2_path }}" + +- name: Generate CSR for self-signed certificate used as a placeholder to create the java keystore + community.crypto.openssl_csr: + path: "{{ test_csr_path }}" + privatekey_path: "{{ test_key_path }}" + commonName: "localhost" + +- name: Generate CSR for self-signed certificate used for testing + community.crypto.openssl_csr: + path: "{{ test_csr2_path }}" + privatekey_path: "{{ test_key2_path }}" + commonName: "localhost" + +- name: Generate the self-signed cert used as a placeholder to create the java keystore + community.crypto.x509_certificate: + path: "{{ test_cert_path }}" + csr_path: "{{ test_csr_path }}" + privatekey_path: "{{ test_key_path }}" + provider: selfsigned - name: Generate the self signed cert we will use for testing - command: openssl req -x509 -newkey rsa:4096 -keyout '{{ test_key2_path }}' -out '{{ test_cert2_path }}' -days 365 -nodes -subj '/CN=localhost' - args: - creates: "{{ test_key2_path }}" + community.crypto.x509_certificate: + path: "{{ test_cert2_path }}" + csr_path: "{{ test_csr2_path }}" + privatekey_path: "{{ test_key2_path }}" + provider: selfsigned - name: Create the pkcs12 archive from the test x509 cert - command: > - openssl pkcs12 - -in {{ test_cert_path }} - -inkey {{ test_key_path }} - -export - -name test_pkcs12_cert - -out {{ test_pkcs_path }} - -passout pass:"{{ test_keystore2_password }}" + community.crypto.openssl_pkcs12: + name: "test_pkcs12_cert" + path: "{{ test_pkcs_path }}" + passphrase: "{{ test_keystore2_password }}" + certificate_path: "{{ test_cert_path }}" + privatekey_path: "{{ test_key_path }}" + when: + - "not (ansible_os_family == 'RedHat' and ansible_distribution_version is version('8.0', '<'))" + +- name: Create the pkcs12 archive from the test x509 cert (command) + ansible.builtin.command: + cmd: > + openssl pkcs12 -export + -in {{ test_cert_path }} + -inkey {{ test_key_path }} + -name test_pkcs12_cert + -out {{ test_pkcs_path }} + -passout stdin + stdin: "{{ test_keystore2_password }}" + when: + - "ansible_os_family == 'RedHat'" + - "ansible_distribution_version is version('8.0', '<')" - name: Create the pkcs12 archive from the certificate we will be trying to add to the keystore - command: > - openssl pkcs12 - -in {{ test_cert2_path }} - -inkey {{ test_key2_path }} - -export - -name test_pkcs12_cert - -out {{ test_pkcs2_path }} - -passout pass:"{{ test_keystore2_password }}" + community.crypto.openssl_pkcs12: + name: "test_pkcs12_cert" + path: "{{ test_pkcs2_path }}" + passphrase: "{{ test_keystore2_password }}" + certificate_path: "{{ test_cert2_path }}" + privatekey_path: "{{ test_key2_path }}" + when: + - "not (ansible_os_family == 'RedHat' and ansible_distribution_version is version('8.0', '<'))" + +- name: Create the pkcs12 archive from the certificate we will be trying to add to the keystore (command) + ansible.builtin.command: + cmd: > + openssl pkcs12 -export + -in {{ test_cert2_path }} + -inkey {{ test_key2_path }} + -name test_pkcs12_cert + -out {{ test_pkcs2_path }} + -passout stdin + stdin: "{{ test_keystore2_password }}" + when: + - "ansible_os_family == 'RedHat'" + - "ansible_distribution_version is version('8.0', '<')" + +# +# Run tests +# - name: try to create the test keystore based on the just created pkcs12, keystore_create flag not enabled - java_cert: + community.general.java_cert: cert_alias: test_pkcs12_cert pkcs12_alias: test_pkcs12_cert pkcs12_path: "{{ test_pkcs_path }}" @@ -41,12 +101,12 @@ register: result_x509_changed - name: Verify the x509 status is failed - assert: + ansible.builtin.assert: that: - result_x509_changed is failed - name: Create the test keystore based on the just created pkcs12 - java_cert: + community.general.java_cert: cert_alias: test_pkcs12_cert pkcs12_alias: test_pkcs12_cert pkcs12_path: "{{ test_pkcs_path }}" @@ -55,8 +115,19 @@ keystore_pass: "{{ test_keystore2_password }}" keystore_create: yes +- name: List newly created keystore content + ansible.builtin.command: + cmd: "keytool -list -keystore {{ test_keystore2_path }}" + stdin: "{{ test_keystore2_password }}" + register: keytool_list_keystore + +- name: Assert that the keystore has a private key entry + ansible.builtin.assert: + that: + - "keytool_list_keystore.stdout_lines[5] is match('test_pkcs12_cert,.*, PrivateKeyEntry, $')" + - name: try to import from pkcs12 a non existing alias - java_cert: + community.general.java_cert: cert_alias: test_pkcs12_cert pkcs12_alias: non_existing_alias pkcs12_path: "{{ test_pkcs_path }}" @@ -68,12 +139,12 @@ register: result_x509_changed - name: Verify the x509 status is failed - assert: + ansible.builtin.assert: that: - result_x509_changed is failed - name: import initial test certificate from file path - java_cert: + community.general.java_cert: cert_alias: test_cert cert_path: "{{ test_cert_path }}" keystore_path: "{{ test_keystore2_path }}" @@ -83,7 +154,7 @@ register: result_x509_changed - name: Verify the x509 status is changed - assert: + ansible.builtin.assert: that: - result_x509_changed is changed @@ -92,7 +163,7 @@ If the java_cert has been updated properly, then this task will report changed each time since the module will be comparing the hash of the certificate instead of validating that the alias simply exists - java_cert: + community.general.java_cert: cert_alias: test_cert cert_path: "{{ test_cert2_path }}" keystore_path: "{{ test_keystore2_path }}" @@ -101,13 +172,13 @@ register: result_x509_changed - name: Verify the x509 status is changed - assert: + ansible.builtin.assert: that: - result_x509_changed is changed - name: | We also want to make sure that the status doesnt change if we import the same cert - java_cert: + community.general.java_cert: cert_alias: test_cert cert_path: "{{ test_cert2_path }}" keystore_path: "{{ test_keystore2_path }}" @@ -116,13 +187,13 @@ register: result_x509_succeeded - name: Verify the x509 status is ok - assert: + ansible.builtin.assert: that: - result_x509_succeeded is succeeded - name: > Ensure the original pkcs12 cert is in the keystore - java_cert: + community.general.java_cert: cert_alias: test_pkcs12_cert pkcs12_alias: test_pkcs12_cert pkcs12_path: "{{ test_pkcs_path }}" @@ -134,7 +205,7 @@ - name: | Perform the same test, but we will now be testing the pkcs12 functionality If we add a different pkcs12 cert with the same alias, we should have a changed result, NOT the same - java_cert: + community.general.java_cert: cert_alias: test_pkcs12_cert pkcs12_alias: test_pkcs12_cert pkcs12_path: "{{ test_pkcs2_path }}" @@ -145,13 +216,13 @@ register: result_pkcs12_changed - name: Verify the pkcs12 status is changed - assert: + ansible.builtin.assert: that: - result_pkcs12_changed is changed - name: | We are requesting the same cert now, so the status should show OK - java_cert: + community.general.java_cert: cert_alias: test_pkcs12_cert pkcs12_alias: test_pkcs12_cert pkcs12_path: "{{ test_pkcs2_path }}" @@ -161,7 +232,7 @@ register: result_pkcs12_succeeded - name: Verify the pkcs12 status is ok - assert: + ansible.builtin.assert: that: - result_pkcs12_succeeded is succeeded @@ -178,7 +249,7 @@ - name: | Download the original cert.pem from our temporary server. The current cert should contain cert2.pem. Importing this cert should return a status of changed - java_cert: + community.general.java_cert: cert_alias: test_cert_localhost cert_url: localhost cert_port: "{{ test_ssl_port }}" @@ -188,12 +259,12 @@ register: result_url_changed - name: Verify that the url status is changed - assert: + ansible.builtin.assert: that: - result_url_changed is changed - name: Ensure we can remove the x509 cert - java_cert: + community.general.java_cert: cert_alias: test_cert keystore_path: "{{ test_keystore2_path }}" keystore_pass: "{{ test_keystore2_password }}" @@ -201,12 +272,12 @@ register: result_x509_absent - name: Verify the x509 cert is absent - assert: + ansible.builtin.assert: that: - result_x509_absent is changed - name: Ensure we can remove the certificate imported from pkcs12 archive - java_cert: + community.general.java_cert: cert_alias: test_pkcs12_cert keystore_path: "{{ test_keystore2_path }}" keystore_pass: "{{ test_keystore2_password }}" @@ -214,6 +285,6 @@ register: result_pkcs12_absent - name: Verify the pkcs12 archive is absent - assert: + ansible.builtin.assert: that: - result_pkcs12_absent is changed From 4982eaf935f9a41e054dbe8e8f2f1b1af8334d6b Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 26 Jul 2021 11:44:41 +0200 Subject: [PATCH 0227/2828] Update BOTMETA, fix some plugin authors, improve BOTMETA extra sanity test (#3069) * Update BOTMETA, fix some plugin authors, improve BOTMETA extra sanity test. * Linting. --- .github/BOTMETA.yml | 269 +++++++++++++++++++++++++------- plugins/become/doas.py | 2 +- plugins/become/dzdo.py | 2 +- plugins/become/ksu.py | 2 +- plugins/become/machinectl.py | 2 +- plugins/become/pbrun.py | 2 +- plugins/become/pfexec.py | 2 +- plugins/become/pmrun.py | 2 +- plugins/connection/funcd.py | 2 +- plugins/lookup/dependent.py | 1 + tests/sanity/extra/botmeta.json | 3 - tests/sanity/extra/botmeta.py | 93 ++++++----- 12 files changed, 276 insertions(+), 106 deletions(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 55f34d3041..b91d01d44e 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -2,6 +2,7 @@ automerge: true files: plugins/: supershipit: quidame Ajpantuso + changelogs/: {} changelogs/fragments/: support: community $actions: @@ -12,17 +13,63 @@ files: maintainers: nitzmahone samdoran aminvakil $becomes/: labels: become + $becomes/doas.py: + maintainers: $team_ansible_core + $becomes/dzdo.py: + maintainers: $team_ansible_core + $becomes/ksu.py: + maintainers: $team_ansible_core + $becomes/machinectl.py: + maintainers: $team_ansible_core + $becomes/pbrun.py: + maintainers: $team_ansible_core + $becomes/pfexec.py: + maintainers: $team_ansible_core + $becomes/pmrun.py: + maintainers: $team_ansible_core + $becomes/sesu.py: + maintainers: nekonyuu + $becomes/sudosu.py: + maintainers: dagwieers + $caches/: + labels: cache + $caches/memcached.py: {} + $caches/pickle.py: + maintainers: bcoca + $caches/redis.py: {} + $caches/yaml.py: + maintainers: bcoca $callbacks/: labels: callbacks + $callbacks/cgroup_memory_recap.py: {} + $callbacks/context_demo.py: {} + $callbacks/counter_enabled.py: {} + $callbacks/dense.py: + maintainers: dagwieers + $callbacks/diy.py: + maintainers: theque5t + $callbacks/hipchat.py: {} + $callbacks/jabber.py: {} $callbacks/loganalytics.py: maintainers: zhcli + $callbacks/logdna.py: {} + $callbacks/logentries.py: {} + $callbacks/log_plays.py: {} $callbacks/logstash.py: maintainers: ujenmr + $callbacks/mail.py: + maintainers: dagwieers + $callbacks/nrdp.py: + maintainers: rverchere + $callbacks/null.py: {} $callbacks/say.py: notify: chris-short maintainers: $team_macos labels: macos say keywords: brew cask darwin homebrew macosx macports osx + $callbacks/selective.py: {} + $callbacks/slack.py: {} + $callbacks/splunk.py: {} $callbacks/sumologic.py: maintainers: ryancurrah labels: sumologic @@ -31,16 +78,26 @@ files: $callbacks/unixy.py: maintainers: akatch labels: unixy + $callbacks/yaml.py: {} $connections/: labels: connections - $connections/kubectl.py: - maintainers: chouseknecht fabianvf flaper87 maxamillion - labels: k8s kubectl + $connections/chroot.py: {} + $connections/funcd.py: + maintainers: mscherer + $connections/iocage.py: {} + $connections/jail.py: + maintainers: $team_ansible_core + $connections/lxc.py: {} $connections/lxd.py: maintainers: mattclay labels: lxd + $connections/qubes.py: + maintainers: kushaldas $connections/saltstack.py: + maintainers: mscherer labels: saltstack + $connections/zone.py: + maintainers: $team_ansible_core $doc_fragments/: labels: docs_fragments $doc_fragments/hpe3par.py: @@ -60,6 +117,8 @@ files: maintainers: giner $filters/from_csv.py: maintainers: Ajpantuso + $filters/groupby: + maintainers: felixfontein $filters/hashids: maintainers: Ajpantuso $filters/jc.py: @@ -72,53 +131,83 @@ files: maintainers: resmo $filters/version_sort.py: maintainers: ericzolf - $httpapis/: - maintainers: $team_networking - labels: networking - $httpapis/ftd.py: - maintainers: $team_networking annikulin - labels: cisco ftd networking - keywords: firepower ftd $inventories/: labels: inventories + $inventories/cobbler.py: + maintainers: opoplawski + $inventories/gitlab_runners.py: + maintainers: morph027 $inventories/linode.py: maintainers: $team_linode labels: cloud linode keywords: linode dynamic inventory script $inventories/lxd.py: maintainers: conloos + $inventories/nmap.py: {} + $inventories/online.py: + maintainers: sieben $inventories/proxmox.py: maintainers: $team_virt ilijamt $inventories/scaleway.py: maintainers: $team_scaleway labels: cloud scaleway + $inventories/stackpath_compute.py: + maintainers: shayrybak + $inventories/virtualbox.py: {} $lookups/: labels: lookups - $lookups/onepass: - maintainers: samdoran - labels: onepassword - $lookups/conjur_variable.py: - notify: cyberark-bizdev - maintainers: $team_cyberark_conjur - labels: conjur_variable + $lookups/cartesian.py: {} + $lookups/chef_databag.py: {} + $lookups/consul_kv.py: {} + $lookups/credstash.py: {} $lookups/cyberarkpassword.py: notify: cyberark-bizdev labels: cyberarkpassword + $lookups/dependent.py: + maintainers: felixfontein $lookups/dig.py: maintainers: jpmens labels: dig - $lookups/tss.py: - maintainers: amigus + $lookups/dnstxt.py: + maintainers: jpmens $lookups/dsv.py: maintainers: amigus + $lookups/etcd3.py: + maintainers: eric-belhomme + $lookups/etcd.py: + maintainers: jpmens + $lookups/filetree.py: + maintainers: dagwieers + $lookups/flattened.py: {} + $lookups/hiera.py: + maintainers: jparrill + $lookups/keyring.py: {} + $lookups/lastpass.py: {} + $lookups/lmdb_kv.py: + maintainers: jpmens $lookups/manifold.py: maintainers: galanoff labels: manifold $lookups/nios: maintainers: $team_networking sganesh-infoblox labels: infoblox networking + $lookups/onepass: + maintainers: samdoran + labels: onepassword + $lookups/onepassword.py: + maintainers: azenk scottsb + $lookups/onepassword_raw.py: + maintainers: azenk scottsb + $lookups/passwordstore.py: {} + $lookups/random_pet.py: + maintainers: Akasurde $lookups/random_string.py: maintainers: Akasurde + $lookups/redis.py: + maintainers: jpmens + $lookups/shelvefile.py: {} + $lookups/tss.py: + maintainers: amigus $module_utils/: labels: module_utils $module_utils/gitlab.py: @@ -196,33 +285,27 @@ files: maintainers: zbal $modules/cloud/lxc/lxc_container.py: maintainers: cloudnull - $modules/cloud/lxc/lxc_profile.py: - maintainers: conloos $modules/cloud/lxd/: ignore: hnakamur + $modules/cloud/lxd/lxd_profile.py: + maintainers: conloos $modules/cloud/memset/: maintainers: glitchcrab $modules/cloud/misc/cloud_init_data_facts.py: maintainers: resmo - $modules/cloud/misc/proxmox.py: - maintainers: $team_virt UnderGreen - labels: proxmox virt - ignore: skvidal - keywords: kvm libvirt proxmox qemu - $modules/cloud/misc/proxmox_kvm.py: - maintainers: $team_virt helldorado - labels: proxmox_kvm virt - ignore: skvidal - keywords: kvm libvirt proxmox qemu - $modules/cloud/misc/proxmox_snap.py: + $modules/cloud/misc/proxmox: maintainers: $team_virt labels: proxmox virt keywords: kvm libvirt proxmox qemu - $modules/cloud/misc/proxmox_template.py: - maintainers: $team_virt UnderGreen - labels: proxmox_template virt + $modules/cloud/misc/proxmox.py: + maintainers: UnderGreen + ignore: skvidal + $modules/cloud/misc/proxmox_kvm.py: + maintainers: helldorado + ignore: skvidal + $modules/cloud/misc/proxmox_template.py: + maintainers: UnderGreen ignore: skvidal - keywords: kvm libvirt proxmox qemu $modules/cloud/misc/rhevm.py: maintainers: $team_virt TimothyVandenbrande labels: rhevm virt @@ -264,16 +347,40 @@ files: maintainers: omgjlk sivel $modules/cloud/rackspace/: ignore: ryansb sivel + $modules/cloud/rackspace/rax_cbs.py: + maintainers: claco + $modules/cloud/rackspace/rax_cbs_attachments.py: + maintainers: claco + $modules/cloud/rackspace/rax_cdb.py: + maintainers: jails + $modules/cloud/rackspace/rax_cdb_user.py: + maintainers: jails + $modules/cloud/rackspace/rax_cdb_database.py: + maintainers: jails $modules/cloud/rackspace/rax_clb.py: maintainers: claco $modules/cloud/rackspace/rax_clb_nodes.py: maintainers: neuroid $modules/cloud/rackspace/rax_clb_ssl.py: maintainers: smashwilson + $modules/cloud/rackspace/rax_files.py: + maintainers: angstwad + $modules/cloud/rackspace/rax_files_objects.py: + maintainers: angstwad $modules/cloud/rackspace/rax_identity.py: maintainers: claco $modules/cloud/rackspace/rax_network.py: maintainers: claco omgjlk + $modules/cloud/rackspace/rax_mon_alarm.py: + maintainers: smashwilson + $modules/cloud/rackspace/rax_mon_check.py: + maintainers: smashwilson + $modules/cloud/rackspace/rax_mon_entity.py: + maintainers: smashwilson + $modules/cloud/rackspace/rax_mon_notification.py: + maintainers: smashwilson + $modules/cloud/rackspace/rax_mon_notification_plan.py: + maintainers: smashwilson $modules/cloud/rackspace/rax_queue.py: maintainers: claco $modules/cloud/scaleway/: @@ -285,13 +392,17 @@ files: $modules/cloud/scaleway/scaleway_ip_info.py: maintainers: Spredzy $modules/cloud/scaleway/scaleway_organization_info.py: - maintainers: sieben + maintainers: sieben Spredzy $modules/cloud/scaleway/scaleway_security_group.py: maintainers: DenBeke $modules/cloud/scaleway/scaleway_security_group_info.py: - maintainers: sieben + maintainers: sieben Spredzy $modules/cloud/scaleway/scaleway_security_group_rule.py: maintainers: DenBeke + $modules/cloud/scaleway/scaleway_server_info.py: + maintainers: Spredzy + $modules/cloud/scaleway/scaleway_snapshot_info.py: + maintainers: Spredzy $modules/cloud/scaleway/scaleway_volume.py: labels: scaleway_volume ignore: hekonsek @@ -343,6 +454,8 @@ files: maintainers: john-westcott-iv $modules/database/misc/redis.py: maintainers: slok + $modules/database/misc/redis_info.py: + maintainers: levonet $modules/database/misc/riak.py: maintainers: drewkerrigan jsmartin $modules/database/mssql/mssql_db.py: @@ -358,10 +471,14 @@ files: maintainers: quidame $modules/files/ini_file.py: maintainers: jpmens noseka1 + $modules/files/iso_create.py: + maintainers: Tomorrow9 $modules/files/iso_extract.py: maintainers: dagwieers jhoekx ribbons $modules/files/read_csv.py: maintainers: dagwieers + $modules/files/sapcar_extract.py: + maintainers: RainerLeber $modules/files/xattr.py: maintainers: bcoca labels: xattr @@ -379,15 +496,22 @@ files: maintainers: jparrill $modules/identity/keycloak/: maintainers: $team_keycloak + $modules/identity/keycloak/keycloak_authentication.py: + maintainers: elfelip Gaetan2907 + $modules/identity/keycloak/keycloak_clientscope.py: + maintainers: Gaetan2907 $modules/identity/keycloak/keycloak_group.py: maintainers: adamgoossens $modules/identity/keycloak/keycloak_realm.py: maintainers: kris2kris + $modules/identity/keycloak/keycloak_role.py: + maintainers: laurpaum $modules/identity/onepassword_info.py: maintainers: Rylon $modules/identity/opendj/opendj_backendprop.py: maintainers: dj-wasabi $modules/monitoring/airbrake_deployment.py: + maintainers: phumpal labels: airbrake_deployment ignore: bpennypacker $modules/monitoring/bigpanda.py: @@ -398,6 +522,8 @@ files: maintainers: n0ts labels: datadog_event ignore: arturaz + $modules/monitoring/datadog/datadog_downtime.py: + maintainers: Datadog $modules/monitoring/datadog/datadog_monitor.py: maintainers: skornehl $modules/monitoring/honeybadger_deployment.py: @@ -461,6 +587,8 @@ files: maintainers: drcapulet $modules/net_tools/dnsmadeeasy.py: maintainers: briceburg + $modules/net_tools/gandi_livedns.py: + maintainers: gthiemonge $modules/net_tools/haproxy.py: maintainers: ravibhure Normo $modules/net_tools/: @@ -490,11 +618,25 @@ files: maintainers: nbuchwitz $modules/net_tools/omapi_host.py: maintainers: amasolov + $modules/net_tools/pritunl/: + maintainers: Lowess $modules/net_tools/nios/: maintainers: $team_networking labels: infoblox networking + $modules/net_tools/nios/nios_a_record.py: + maintainers: brampling + $modules/net_tools/nios/nios_aaaa_record.py: + maintainers: brampling + $modules/net_tools/nios/nios_cname_record.py: + maintainers: brampling $modules/net_tools/nios/nios_fixed_address.py: maintainers: sjaiswal + $modules/net_tools/nios/nios_member.py: + maintainers: krisvasudevan + $modules/net_tools/nios/nios_mx_record.py: + maintainers: brampling + $modules/net_tools/nios/nios_naptr_record.py: + maintainers: brampling $modules/net_tools/nios/nios_nsgroup.py: maintainers: ebirn sjaiswal $modules/net_tools/nios/nios_ptr_record.py: @@ -507,17 +649,16 @@ files: maintainers: alcamie101 $modules/net_tools/snmp_facts.py: maintainers: ogenstad ujwalkomarla - $modules/notification/osx_say.py: - maintainers: ansible mpdehaan - labels: _osx_say $modules/notification/bearychat.py: maintainers: tonyseek $modules/notification/campfire.py: maintainers: fabulops $modules/notification/catapult.py: maintainers: Jmainguy - $modules/notification/cisco_spark.py: + $modules/notification/cisco_webex.py: maintainers: drew-russell + $modules/notification/discord.py: + maintainers: cwollinger $modules/notification/flowdock.py: maintainers: mcodd $modules/notification/grove.py: @@ -545,7 +686,7 @@ files: $modules/notification/pushbullet.py: maintainers: willybarro $modules/notification/pushover.py: - maintainers: weaselkeeper + maintainers: weaselkeeper wopfel $modules/notification/rocketchat.py: maintainers: Deepakkothandan labels: rocketchat @@ -559,7 +700,7 @@ files: $modules/notification/syslogger.py: maintainers: garbled1 $modules/notification/telegram.py: - maintainers: tyouxa loms + maintainers: tyouxa loms lomserman $modules/notification/twilio.py: maintainers: makaimc $modules/notification/typetalk.py: @@ -597,6 +738,8 @@ files: maintainers: tdtrask labels: apk ignore: kbrebanov + $modules/packaging/os/apt_repo.py: + maintainers: obirvalger $modules/packaging/os/apt_rpm.py: maintainers: evgkrsk $modules/packaging/os/copr.py: @@ -788,6 +931,8 @@ files: maintainers: markuman $modules/source_control/gitlab/gitlab_runner.py: maintainers: SamyCoenen + $modules/source_control/gitlab/gitlab_user.py: + maintainers: LennertMertens stgrace $modules/source_control/hg.py: maintainers: yeukhon $modules/storage/emc/emc_vnx_sg_member.py: @@ -796,13 +941,6 @@ files: maintainers: farhan7500 gautamphegde $modules/storage/ibm/: maintainers: tzure - $modules/storage/infinidat/: - maintainers: vmalloc GR360RY - $modules/storage/netapp/: - maintainers: $team_netapp - $modules/storage/purestorage/: - maintainers: $team_purestorage - labels: pure_storage $modules/storage/vexata/: maintainers: vexata $modules/storage/zfs/: @@ -821,6 +959,8 @@ files: maintainers: mulby labels: alternatives ignore: DavidWittman + $modules/system/aix_lvol.py: + maintainers: adejoux $modules/system/awall.py: maintainers: tdtrask $modules/system/beadm.py: @@ -856,7 +996,7 @@ files: $modules/system/java_cert.py: maintainers: haad absynth76 $modules/system/java_keystore.py: - maintainers: Mogztter + maintainers: Mogztter quidame $modules/system/kernel_blacklist.py: maintainers: matze $modules/system/launchd.py: @@ -870,7 +1010,7 @@ files: $modules/system/lvg.py: maintainers: abulimov $modules/system/lvol.py: - maintainers: abulimov jhoekx + maintainers: abulimov jhoekx zigaSRC unkaputtbar112 $modules/system/make.py: maintainers: LinusU $modules/system/mksysb.py: @@ -924,6 +1064,8 @@ files: maintainers: $team_solaris pmarkham labels: solaris keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool + $modules/system/ssh_config.py: + maintainers: gaqzi Akasurde $modules/system/svc.py: maintainers: bcoca $modules/system/syspatch.py: @@ -939,10 +1081,13 @@ files: maintainers: ahtik ovcharenko pyykkis labels: ufw $modules/system/vdo.py: - maintainers: rhawalsh + maintainers: rhawalsh bgurney-rh $modules/system/xfconf.py: maintainers: russoz jbenden labels: xfconf + $modules/system/xfconf_info.py: + maintainers: russoz + labels: xfconf $modules/system/xfs_quota.py: maintainers: bushvin $modules/web_infrastructure/apache2_mod_proxy.py: @@ -964,6 +1109,8 @@ files: $modules/web_infrastructure/jboss.py: maintainers: $team_jboss jhoekx labels: jboss + $modules/web_infrastructure/jenkins_build.py: + maintainers: brettmilford unnecessary-username $modules/web_infrastructure/jenkins_job.py: maintainers: sermilrod $modules/web_infrastructure/jenkins_job_info.py: @@ -973,7 +1120,7 @@ files: $modules/web_infrastructure/jenkins_script.py: maintainers: hogarthj $modules/web_infrastructure/jira.py: - maintainers: Slezhuk tarka + maintainers: Slezhuk tarka pertoft DWSR labels: jira $modules/web_infrastructure/nginx_status_info.py: maintainers: resmo @@ -988,6 +1135,14 @@ files: $modules/web_infrastructure/sophos_utm/utm_proxy_exception.py: maintainers: $team_e_spirit RickS-C137 keywords: sophos utm + $modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py: + maintainers: stearz + $modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py: + maintainers: stearz + $modules/web_infrastructure/sophos_utm/utm_network_interface_address.py: + maintainers: steamx + $modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py: + maintainers: steamx $modules/web_infrastructure/supervisorctl.py: maintainers: inetfuture mattupstate $modules/web_infrastructure/taiga_issue.py: @@ -1007,17 +1162,18 @@ files: macros: actions: plugins/action becomes: plugins/become + caches: plugins/cache callbacks: plugins/callback cliconfs: plugins/cliconf connections: plugins/connection doc_fragments: plugins/doc_fragments filters: plugins/filter - httpapis: plugins/httpapi inventories: plugins/inventory lookups: plugins/lookup module_utils: plugins/module_utils modules: plugins/modules terminals: plugins/terminal + team_ansible_core: team_aix: MorrisA bcoca d-little flynn1973 gforster kairoaraujo marvin-sinister mator molekuul ramooncamacho wtcross team_bsd: JoergFiedler MacLemon bcoca dch jasperla mekanix opoplawski overhacked tuxillo team_consul: sgargan @@ -1033,7 +1189,6 @@ macros: team_linode: InTheCloudDan decentral1se displague rmcintosh Charliekenney23 LBGarber team_macos: Akasurde kyleabenson martinm82 danieljaouen indrajitr team_manageiq: abellotti cben gtanzillo yaacov zgalor dkorn evertmulder - team_netapp: amit0701 carchi8py hulquest lmprice lonico ndswartz schmots1 team_networking: NilashishC Qalthos danielmellado ganeshrn justjais trishnaguha sganesh-infoblox privateip team_opennebula: ilicmilan meerkampdvv rsmontero xorel nilsding team_oracle: manojmeda mross22 nalsaber diff --git a/plugins/become/doas.py b/plugins/become/doas.py index 431e33cd6d..7cf4a79c7b 100644 --- a/plugins/become/doas.py +++ b/plugins/become/doas.py @@ -9,7 +9,7 @@ DOCUMENTATION = ''' short_description: Do As user description: - This become plugins allows your remote/login user to execute commands as another user via the doas utility. - author: ansible (@core) + author: Ansible Core Team options: become_user: description: User you 'become' to execute the task diff --git a/plugins/become/dzdo.py b/plugins/become/dzdo.py index 05fcb6192d..1aef8edb69 100644 --- a/plugins/become/dzdo.py +++ b/plugins/become/dzdo.py @@ -8,7 +8,7 @@ DOCUMENTATION = ''' short_description: Centrify's Direct Authorize description: - This become plugins allows your remote/login user to execute commands as another user via the dzdo utility. - author: ansible (@core) + author: Ansible Core Team options: become_user: description: User you 'become' to execute the task diff --git a/plugins/become/ksu.py b/plugins/become/ksu.py index f5600c1d70..1ee47b0fa3 100644 --- a/plugins/become/ksu.py +++ b/plugins/become/ksu.py @@ -9,7 +9,7 @@ DOCUMENTATION = ''' short_description: Kerberos substitute user description: - This become plugins allows your remote/login user to execute commands as another user via the ksu utility. - author: ansible (@core) + author: Ansible Core Team options: become_user: description: User you 'become' to execute the task diff --git a/plugins/become/machinectl.py b/plugins/become/machinectl.py index f9a2873f63..aebb0891b0 100644 --- a/plugins/become/machinectl.py +++ b/plugins/become/machinectl.py @@ -9,7 +9,7 @@ DOCUMENTATION = ''' short_description: Systemd's machinectl privilege escalation description: - This become plugins allows your remote/login user to execute commands as another user via the machinectl utility. - author: ansible (@core) + author: Ansible Core Team options: become_user: description: User you 'become' to execute the task diff --git a/plugins/become/pbrun.py b/plugins/become/pbrun.py index a464309c0d..fe28e61c2b 100644 --- a/plugins/become/pbrun.py +++ b/plugins/become/pbrun.py @@ -9,7 +9,7 @@ DOCUMENTATION = ''' short_description: PowerBroker run description: - This become plugins allows your remote/login user to execute commands as another user via the pbrun utility. - author: ansible (@core) + author: Ansible Core Team options: become_user: description: User you 'become' to execute the task diff --git a/plugins/become/pfexec.py b/plugins/become/pfexec.py index 256275dca2..2b37044c93 100644 --- a/plugins/become/pfexec.py +++ b/plugins/become/pfexec.py @@ -9,7 +9,7 @@ DOCUMENTATION = ''' short_description: profile based execution description: - This become plugins allows your remote/login user to execute commands as another user via the pfexec utility. - author: ansible (@core) + author: Ansible Core Team options: become_user: description: diff --git a/plugins/become/pmrun.py b/plugins/become/pmrun.py index 597ea69d2f..8cb24fa937 100644 --- a/plugins/become/pmrun.py +++ b/plugins/become/pmrun.py @@ -9,7 +9,7 @@ DOCUMENTATION = ''' short_description: Privilege Manager run description: - This become plugins allows your remote/login user to execute commands as another user via the pmrun utility. - author: ansible (@core) + author: Ansible Core Team options: become_exe: description: Sudo executable diff --git a/plugins/connection/funcd.py b/plugins/connection/funcd.py index 109e251146..afea840ee8 100644 --- a/plugins/connection/funcd.py +++ b/plugins/connection/funcd.py @@ -8,7 +8,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = ''' - author: Michael Scherer (@msherer) + author: Michael Scherer (@mscherer) name: funcd short_description: Use funcd to connect to target description: diff --git a/plugins/lookup/dependent.py b/plugins/lookup/dependent.py index a22a98476c..c9ce58567d 100644 --- a/plugins/lookup/dependent.py +++ b/plugins/lookup/dependent.py @@ -7,6 +7,7 @@ __metaclass__ = type DOCUMENTATION = """ name: dependent short_description: Composes a list with nested elements of other lists or dicts which can depend on previous loop variables +author: Felix Fontein (@felixfontein) version_added: 3.1.0 description: - "Takes the input lists and returns a list with elements that are lists, dictionaries, diff --git a/tests/sanity/extra/botmeta.json b/tests/sanity/extra/botmeta.json index cba49c90cd..c546ab5fd7 100644 --- a/tests/sanity/extra/botmeta.json +++ b/tests/sanity/extra/botmeta.json @@ -1,8 +1,5 @@ { "include_symlinks": false, - "prefixes": [ - ".github/BOTMETA.yml" - ], "output": "path-line-column-message", "requirements": [ "PyYAML", diff --git a/tests/sanity/extra/botmeta.py b/tests/sanity/extra/botmeta.py index e8ea819394..43bd087aa5 100755 --- a/tests/sanity/extra/botmeta.py +++ b/tests/sanity/extra/botmeta.py @@ -57,8 +57,19 @@ def read_authors(filename): return author +def extract_author_name(author): + m = AUTHOR_REGEX.match(author) + if m: + return m.group(1) + if author == 'Ansible Core Team': + return '$team_ansible_core' + return None + + def validate(filename, filedata): - if filename.startswith('plugins/doc_fragments/'): + if not filename.startswith('plugins/'): + return + if filename.startswith(('plugins/doc_fragments/', 'plugins/module_utils/')): return # Compile lis tof all active and inactive maintainers all_maintainers = filedata['maintainers'] + filedata['ignore'] @@ -70,21 +81,16 @@ def validate(filename, filedata): return maintainers = read_authors(filename) for maintainer in maintainers: - m = AUTHOR_REGEX.match(maintainer) - if m: - maintainer = m.group(1) - if maintainer not in all_maintainers: - msg = 'Author %s not mentioned as active or inactive maintainer for %s (mentioned are: %s)' % ( - maintainer, filename, ', '.join(all_maintainers)) - if REPORT_MISSING_MAINTAINERS: - print('%s:%d:%d: %s' % (FILENAME, 0, 0, msg)) + maintainer = extract_author_name(maintainer) + if maintainer is not None and maintainer not in all_maintainers: + msg = 'Author %s not mentioned as active or inactive maintainer for %s (mentioned are: %s)' % ( + maintainer, filename, ', '.join(all_maintainers)) + if REPORT_MISSING_MAINTAINERS: + print('%s:%d:%d: %s' % (FILENAME, 0, 0, msg)) def main(): """Main entry point.""" - paths = sys.argv[1:] or sys.stdin.read().splitlines() - paths = [path for path in paths if path.endswith('/aliases')] - try: with open(FILENAME, 'rb') as f: botmeta = yaml.safe_load(f) @@ -100,7 +106,7 @@ def main(): # Validate schema MacroSchema = Schema({ - (str): str, + (str): Any(str, None), }, extra=PREVENT_EXTRA) FilesSchema = Schema({ @@ -135,7 +141,11 @@ def main(): def convert_macros(text, macros): def f(m): - return macros[m.group(1)] + macro = m.group(1) + replacement = (macros[macro] or '') + if macro == 'team_ansible_core': + return '$team_ansible_core %s' % replacement + return replacement return macro_re.sub(f, text) @@ -153,31 +163,38 @@ def main(): return # Scan all files - for dirpath, dirnames, filenames in os.walk('plugins/'): - for file in filenames: - if file.endswith('.pyc'): - continue - filename = os.path.join(dirpath, file) - if os.path.islink(filename): - continue - if os.path.isfile(filename): - matching_files = [] - for file, filedata in files.items(): - if filename.startswith(file): - matching_files.append((file, filedata)) - if not matching_files: - print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'Did not find any entry for %s' % filename)) + unmatched = set(files) + for dirs in ('plugins', 'tests', 'changelogs'): + for dirpath, dirnames, filenames in os.walk(dirs): + for file in sorted(filenames): + if file.endswith('.pyc'): + continue + filename = os.path.join(dirpath, file) + if os.path.islink(filename): + continue + if os.path.isfile(filename): + matching_files = [] + for file, filedata in files.items(): + if filename.startswith(file): + matching_files.append((file, filedata)) + if file in unmatched: + unmatched.remove(file) + if not matching_files: + print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'Did not find any entry for %s' % filename)) - matching_files.sort(key=lambda kv: kv[0]) - filedata = dict() - for k in LIST_ENTRIES: - filedata[k] = [] - for dummy, data in matching_files: - for k, v in data.items(): - if k in LIST_ENTRIES: - v = filedata[k] + v - filedata[k] = v - validate(filename, filedata) + matching_files.sort(key=lambda kv: kv[0]) + filedata = dict() + for k in LIST_ENTRIES: + filedata[k] = [] + for dummy, data in matching_files: + for k, v in data.items(): + if k in LIST_ENTRIES: + v = filedata[k] + v + filedata[k] = v + validate(filename, filedata) + + for file in unmatched: + print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'Entry %s was not used' % file)) if __name__ == '__main__': From 024e7419da8ec50a94799d5fa22004c2298b7935 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 26 Jul 2021 16:54:00 +0200 Subject: [PATCH 0228/2828] BOTMETA: enforce entries for new plugins/modules, add documentation for creating new plugins/modules (#3088) * More BOTMETA improvements. * Improve BOTMETA test, start reporting missing entries for new plugins/modules. * Add instructions for creating new plugins and modules. --- .github/BOTMETA.yml | 24 ++++++++++++++---------- CONTRIBUTING.md | 31 +++++++++++++++++++++++++++++++ tests/sanity/extra/botmeta.py | 24 ++++++++++-------------- 3 files changed, 55 insertions(+), 24 deletions(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index b91d01d44e..fb08599a13 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -123,10 +123,12 @@ files: maintainers: Ajpantuso $filters/jc.py: maintainers: kellyjonbrazil + $filters/json_query.py: {} $filters/list.py: maintainers: vbotka $filters/path_join_shim.py: maintainers: felixfontein + $filters/random_mac.py: {} $filters/time.py: maintainers: resmo $filters/version_sort.py: @@ -204,7 +206,7 @@ files: $lookups/random_string.py: maintainers: Akasurde $lookups/redis.py: - maintainers: jpmens + maintainers: $team_ansible_core jpmens $lookups/shelvefile.py: {} $lookups/tss.py: maintainers: amigus @@ -591,8 +593,6 @@ files: maintainers: gthiemonge $modules/net_tools/haproxy.py: maintainers: ravibhure Normo - $modules/net_tools/: - maintainers: nerzhul $modules/net_tools/infinity/infinity.py: maintainers: MeganLiu $modules/net_tools/ip_netns.py: @@ -616,8 +616,10 @@ files: ignore: andyhky $modules/net_tools/netcup_dns.py: maintainers: nbuchwitz + $modules/net_tools/nsupdate.py: + maintainers: nerzhul $modules/net_tools/omapi_host.py: - maintainers: amasolov + maintainers: amasolov nerzhul $modules/net_tools/pritunl/: maintainers: Lowess $modules/net_tools/nios/: @@ -692,7 +694,7 @@ files: labels: rocketchat ignore: ramondelafuente $modules/notification/say.py: - maintainers: ansible mpdehaan + maintainers: $team_ansible_core mpdehaan $modules/notification/sendgrid.py: maintainers: makaimc $modules/notification/slack.py: @@ -717,7 +719,7 @@ files: $modules/packaging/language/easy_install.py: maintainers: mattupstate $modules/packaging/language/gem.py: - maintainers: ansible johanwiren + maintainers: $team_ansible_core johanwiren labels: gem $modules/packaging/language/maven_artifact.py: maintainers: tumbl3w33d turb @@ -978,7 +980,7 @@ files: $modules/system/dpkg_divert.py: maintainers: quidame $modules/system/facter.py: - maintainers: ansible gamethis + maintainers: $team_ansible_core gamethis labels: facter $modules/system/filesystem.py: maintainers: pilou- abulimov quidame @@ -1023,7 +1025,7 @@ files: $modules/system/nosh.py: maintainers: tacatac $modules/system/ohai.py: - maintainers: ansible mpdehaan + maintainers: $team_ansible_core mpdehaan labels: ohai $modules/system/open_iscsi.py: maintainers: srvg @@ -1104,7 +1106,7 @@ files: $modules/web_infrastructure/gunicorn.py: maintainers: agmezr $modules/web_infrastructure/htpasswd.py: - maintainers: ansible + maintainers: $team_ansible_core labels: htpasswd $modules/web_infrastructure/jboss.py: maintainers: $team_jboss jhoekx @@ -1124,7 +1126,9 @@ files: labels: jira $modules/web_infrastructure/nginx_status_info.py: maintainers: resmo - $modules/web_infrastructure/: + $modules/web_infrastructure/rundeck_acl_policy.py: + maintainers: nerzhul + $modules/web_infrastructure/rundeck_project.py: maintainers: nerzhul $modules/web_infrastructure/sophos_utm/: maintainers: $team_e_spirit diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4dfde91fca..ba30ed1e02 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -34,3 +34,34 @@ You can also read [our Quick-start development guide](https://github.com/ansible If you want to test a PR locally, refer to [our testing guide](https://github.com/ansible/community-docs/blob/main/test_pr_locally_guide.rst) for instructions on how do it quickly. If you find any inconsistencies or places in this document which can be improved, feel free to raise an issue or pull request to fix it. + +## Creating new modules or plugins + +Creating new modules and plugins requires a bit more work than other Pull Requests. + +1. Please make sure that your new module or plugin is of interest to a larger audience. Very specialized modules or plugins that + can only be used by very few people should better be added to more specialized collections. + +2. When creating a new module or plugin, please make sure that you follow various guidelines: + + - Follow [development conventions](https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_best_practices.html); + - Follow [documentation standards](https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_documenting.html) and + the [Ansible style guide](https://docs.ansible.com/ansible/devel/dev_guide/style_guide/index.html#style-guide); + - Make sure your modules and plugins are [GPL-3.0-or-later](https://www.gnu.org/licenses/gpl-3.0-standalone.html) licensed + (new module_utils can also be [BSD-2-clause](https://opensource.org/licenses/BSD-2-Clause) licensed); + - Make sure that new plugins and modules have tests (unit tests, integration tests, or both); it is preferable to have some tests + which run in CI. + +3. For modules and action plugins, make sure to create your module/plugin in the correct subdirectory, and create a symbolic link + from `plugins/modules/` respectively `plugins/action/` to the actual module/plugin code. (Other plugin types should not use + subdirectories.) + + - Action plugins need to be accompanied by a module, even if the module file only contains documentation + (`DOCUMENTATION`, `EXAMPLES` and `RETURN`). The module must have the same name and directory path in `plugins/modules/` + than the action plugin has in `plugins/action/`. + +4. Make sure to add a BOTMETA entry for your new module/plugin in `.github/BOTMETA.yml`. Search for other plugins/modules in the + same directory to see how entries could look. You should list all authors either as `maintainers` or under `ignore`. People + listed as `maintainers` will be pinged for new issues and PRs that modify the module/plugin or its tests. + + When you add a new plugin/module, we expect that you perform maintainer duty for at least some time after contributing it. diff --git a/tests/sanity/extra/botmeta.py b/tests/sanity/extra/botmeta.py index 43bd087aa5..b5c49b5a4b 100755 --- a/tests/sanity/extra/botmeta.py +++ b/tests/sanity/extra/botmeta.py @@ -17,7 +17,7 @@ from voluptuous import Required, Schema, Invalid from voluptuous.humanize import humanize_error -REPORT_MISSING_MAINTAINERS = False +REPORT_NO_MAINTAINERS = False FILENAME = '.github/BOTMETA.yml' @@ -73,20 +73,16 @@ def validate(filename, filedata): return # Compile lis tof all active and inactive maintainers all_maintainers = filedata['maintainers'] + filedata['ignore'] - if not all_maintainers: - if REPORT_MISSING_MAINTAINERS: - print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'No (active or inactive) maintainer mentioned for %s' % filename)) - return - if filename.startswith('plugins/filter/'): - return - maintainers = read_authors(filename) - for maintainer in maintainers: - maintainer = extract_author_name(maintainer) - if maintainer is not None and maintainer not in all_maintainers: - msg = 'Author %s not mentioned as active or inactive maintainer for %s (mentioned are: %s)' % ( - maintainer, filename, ', '.join(all_maintainers)) - if REPORT_MISSING_MAINTAINERS: + if not filename.startswith('plugins/filter/'): + maintainers = read_authors(filename) + for maintainer in maintainers: + maintainer = extract_author_name(maintainer) + if maintainer is not None and maintainer not in all_maintainers: + msg = 'Author %s not mentioned as active or inactive maintainer for %s (mentioned are: %s)' % ( + maintainer, filename, ', '.join(all_maintainers)) print('%s:%d:%d: %s' % (FILENAME, 0, 0, msg)) + if not all_maintainers and REPORT_NO_MAINTAINERS: + print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'No (active or inactive) maintainer mentioned for %s' % filename)) def main(): From 7da2c16b4a48d420e8522bc0a5d292a0a9a9ca65 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Wed, 28 Jul 2021 04:24:29 +1200 Subject: [PATCH 0229/2828] added supports_check_mode=True to info/facts modules (#3084) * added supports_check_mode=True to info/facts modules * added changelog fragment * rolled back vertica_info * rolled back utm_proxy_*_info * updated changelog fragment with latest adjustments * Update changelogs/fragments/3084-info-checkmode.yaml Co-authored-by: Felix Fontein * added check mode to xenserver_facts + oneview_*_info * added check mode to utm_proxy_*_info * updated changelog Co-authored-by: Felix Fontein --- changelogs/fragments/3084-info-checkmode.yaml | 24 +++++++++++++++++++ plugins/module_utils/oneview.py | 4 ++-- .../cloud/alicloud/ali_instance_info.py | 5 +++- .../cloud/memset/memset_memstore_info.py | 2 +- .../cloud/memset/memset_server_info.py | 2 +- plugins/modules/cloud/misc/xenserver_facts.py | 4 +++- plugins/modules/cloud/rackspace/rax_facts.py | 1 + .../cloud/smartos/smartos_image_info.py | 2 +- plugins/modules/net_tools/snmp_facts.py | 2 +- .../oneview/oneview_datacenter_info.py | 5 +++- .../oneview/oneview_enclosure_info.py | 5 +++- .../oneview/oneview_ethernet_network_info.py | 5 +++- .../oneview/oneview_fc_network_info.py | 5 +++- .../oneview/oneview_fcoe_network_info.py | 5 +++- ...oneview_logical_interconnect_group_info.py | 5 +++- .../oneview/oneview_network_set_info.py | 5 +++- .../oneview/oneview_san_manager_info.py | 5 +++- .../redfish/idrac_redfish_info.py | 2 +- .../remote_management/redfish/redfish_info.py | 2 +- plugins/modules/system/xfconf_info.py | 1 + .../sophos_utm/utm_aaa_group_info.py | 3 ++- .../sophos_utm/utm_ca_host_key_cert_info.py | 3 ++- .../utm_network_interface_address_info.py | 3 ++- .../sophos_utm/utm_proxy_frontend_info.py | 5 ++-- .../sophos_utm/utm_proxy_location_info.py | 5 ++-- 25 files changed, 85 insertions(+), 25 deletions(-) create mode 100644 changelogs/fragments/3084-info-checkmode.yaml diff --git a/changelogs/fragments/3084-info-checkmode.yaml b/changelogs/fragments/3084-info-checkmode.yaml new file mode 100644 index 0000000000..4e9fa85075 --- /dev/null +++ b/changelogs/fragments/3084-info-checkmode.yaml @@ -0,0 +1,24 @@ +bugfixes: + - ali_instance_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - memset_memstore_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - memset_server_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - xenserver_facts - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - rax_facts - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - smartos_image_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - snmp_facts - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - oneview_datacenter_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - oneview_enclosure_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - oneview_ethernet_network_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - oneview_fc_network_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - oneview_fcoe_network_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - oneview_logical_interconnect_group_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - oneview_network_set_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - oneview_san_manager_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - idrac_redfish_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - redfish_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - xfconf_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - utm_aaa_group_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - utm_ca_host_key_cert_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - utm_network_interface_address_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - utm_proxy_frontend_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - utm_proxy_location_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). diff --git a/plugins/module_utils/oneview.py b/plugins/module_utils/oneview.py index 3ebb057ca7..66e1d6d4c7 100644 --- a/plugins/module_utils/oneview.py +++ b/plugins/module_utils/oneview.py @@ -201,7 +201,7 @@ class OneViewModuleBase(object): resource_client = None - def __init__(self, additional_arg_spec=None, validate_etag_support=False): + def __init__(self, additional_arg_spec=None, validate_etag_support=False, supports_check_mode=False): """ OneViewModuleBase constructor. @@ -210,7 +210,7 @@ class OneViewModuleBase(object): """ argument_spec = self._build_argument_spec(additional_arg_spec, validate_etag_support) - self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) + self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=supports_check_mode) self._check_hpe_oneview_sdk() self._create_oneview_client() diff --git a/plugins/modules/cloud/alicloud/ali_instance_info.py b/plugins/modules/cloud/alicloud/ali_instance_info.py index 23665bbcad..06df6cb4f1 100644 --- a/plugins/modules/cloud/alicloud/ali_instance_info.py +++ b/plugins/modules/cloud/alicloud/ali_instance_info.py @@ -386,7 +386,10 @@ def main(): filters=dict(type='dict') ) ) - module = AnsibleModule(argument_spec=argument_spec) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) if HAS_FOOTMARK is False: module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR) diff --git a/plugins/modules/cloud/memset/memset_memstore_info.py b/plugins/modules/cloud/memset/memset_memstore_info.py index df5ede1a62..e880b46009 100644 --- a/plugins/modules/cloud/memset/memset_memstore_info.py +++ b/plugins/modules/cloud/memset/memset_memstore_info.py @@ -149,7 +149,7 @@ def main(): api_key=dict(required=True, type='str', no_log=True), name=dict(required=True, type='str') ), - supports_check_mode=False + supports_check_mode=True, ) # populate the dict with the user-provided vars. diff --git a/plugins/modules/cloud/memset/memset_server_info.py b/plugins/modules/cloud/memset/memset_server_info.py index 50fe39fd99..853e2c884d 100644 --- a/plugins/modules/cloud/memset/memset_server_info.py +++ b/plugins/modules/cloud/memset/memset_server_info.py @@ -274,7 +274,7 @@ def main(): api_key=dict(required=True, type='str', no_log=True), name=dict(required=True, type='str') ), - supports_check_mode=False + supports_check_mode=True, ) # populate the dict with the user-provided vars. diff --git a/plugins/modules/cloud/misc/xenserver_facts.py b/plugins/modules/cloud/misc/xenserver_facts.py index 25923cb288..bc01c56ecb 100644 --- a/plugins/modules/cloud/misc/xenserver_facts.py +++ b/plugins/modules/cloud/misc/xenserver_facts.py @@ -160,7 +160,9 @@ def get_srs(session): def main(): - module = AnsibleModule({}) + module = AnsibleModule( + supports_check_mode=True, + ) if not HAVE_XENAPI: module.fail_json(changed=False, msg="python xen api required for this module") diff --git a/plugins/modules/cloud/rackspace/rax_facts.py b/plugins/modules/cloud/rackspace/rax_facts.py index 386ca7cfa9..f9fd89556f 100644 --- a/plugins/modules/cloud/rackspace/rax_facts.py +++ b/plugins/modules/cloud/rackspace/rax_facts.py @@ -124,6 +124,7 @@ def main(): required_together=rax_required_together(), mutually_exclusive=[['address', 'id', 'name']], required_one_of=[['address', 'id', 'name']], + supports_check_mode=True, ) if not HAS_PYRAX: diff --git a/plugins/modules/cloud/smartos/smartos_image_info.py b/plugins/modules/cloud/smartos/smartos_image_info.py index f1c75bc26c..369559f52a 100644 --- a/plugins/modules/cloud/smartos/smartos_image_info.py +++ b/plugins/modules/cloud/smartos/smartos_image_info.py @@ -97,7 +97,7 @@ def main(): argument_spec=dict( filters=dict(default=None), ), - supports_check_mode=False, + supports_check_mode=True, ) image_facts = ImageFacts(module) diff --git a/plugins/modules/net_tools/snmp_facts.py b/plugins/modules/net_tools/snmp_facts.py index 221eda30f9..e9d0ebc94c 100644 --- a/plugins/modules/net_tools/snmp_facts.py +++ b/plugins/modules/net_tools/snmp_facts.py @@ -288,7 +288,7 @@ def main(): ['username', 'level', 'integrity', 'authkey'], ['privacy', 'privkey'], ), - supports_check_mode=False, + supports_check_mode=True, ) m_args = module.params diff --git a/plugins/modules/remote_management/oneview/oneview_datacenter_info.py b/plugins/modules/remote_management/oneview/oneview_datacenter_info.py index 13ab883330..04d4fc0c7e 100644 --- a/plugins/modules/remote_management/oneview/oneview_datacenter_info.py +++ b/plugins/modules/remote_management/oneview/oneview_datacenter_info.py @@ -116,7 +116,10 @@ class DatacenterInfoModule(OneViewModuleBase): ) def __init__(self): - super(DatacenterInfoModule, self).__init__(additional_arg_spec=self.argument_spec) + super(DatacenterInfoModule, self).__init__( + additional_arg_spec=self.argument_spec, + supports_check_mode=True, + ) def execute_module(self): diff --git a/plugins/modules/remote_management/oneview/oneview_enclosure_info.py b/plugins/modules/remote_management/oneview/oneview_enclosure_info.py index 1889dc1a4f..a9bbb8e799 100644 --- a/plugins/modules/remote_management/oneview/oneview_enclosure_info.py +++ b/plugins/modules/remote_management/oneview/oneview_enclosure_info.py @@ -163,7 +163,10 @@ class EnclosureInfoModule(OneViewModuleBase): ) def __init__(self): - super(EnclosureInfoModule, self).__init__(additional_arg_spec=self.argument_spec) + super(EnclosureInfoModule, self).__init__( + additional_arg_spec=self.argument_spec, + supports_check_mode=True, + ) def execute_module(self): diff --git a/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py b/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py index 4021b768f9..63a9e1efae 100644 --- a/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py +++ b/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py @@ -114,7 +114,10 @@ class EthernetNetworkInfoModule(OneViewModuleBase): ) def __init__(self): - super(EthernetNetworkInfoModule, self).__init__(additional_arg_spec=self.argument_spec) + super(EthernetNetworkInfoModule, self).__init__( + additional_arg_spec=self.argument_spec, + supports_check_mode=True, + ) self.resource_client = self.oneview_client.ethernet_networks diff --git a/plugins/modules/remote_management/oneview/oneview_fc_network_info.py b/plugins/modules/remote_management/oneview/oneview_fc_network_info.py index 21d9673b51..86430402fe 100644 --- a/plugins/modules/remote_management/oneview/oneview_fc_network_info.py +++ b/plugins/modules/remote_management/oneview/oneview_fc_network_info.py @@ -83,7 +83,10 @@ class FcNetworkInfoModule(OneViewModuleBase): params=dict(required=False, type='dict') ) - super(FcNetworkInfoModule, self).__init__(additional_arg_spec=argument_spec) + super(FcNetworkInfoModule, self).__init__( + additional_arg_spec=argument_spec, + supports_check_mode=True, + ) def execute_module(self): diff --git a/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py b/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py index e207670a9a..b0ede13820 100644 --- a/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py +++ b/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py @@ -82,7 +82,10 @@ class FcoeNetworkInfoModule(OneViewModuleBase): params=dict(type='dict'), ) - super(FcoeNetworkInfoModule, self).__init__(additional_arg_spec=argument_spec) + super(FcoeNetworkInfoModule, self).__init__( + additional_arg_spec=argument_spec, + supports_check_mode=True, + ) def execute_module(self): diff --git a/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py b/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py index 1f7f3c9613..e8670a33a8 100644 --- a/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py +++ b/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py @@ -96,7 +96,10 @@ class LogicalInterconnectGroupInfoModule(OneViewModuleBase): params=dict(type='dict'), ) - super(LogicalInterconnectGroupInfoModule, self).__init__(additional_arg_spec=argument_spec) + super(LogicalInterconnectGroupInfoModule, self).__init__( + additional_arg_spec=argument_spec, + supports_check_mode=True, + ) def execute_module(self): if self.module.params.get('name'): diff --git a/plugins/modules/remote_management/oneview/oneview_network_set_info.py b/plugins/modules/remote_management/oneview/oneview_network_set_info.py index bc76cb36b1..5cb7463b4c 100644 --- a/plugins/modules/remote_management/oneview/oneview_network_set_info.py +++ b/plugins/modules/remote_management/oneview/oneview_network_set_info.py @@ -135,7 +135,10 @@ class NetworkSetInfoModule(OneViewModuleBase): ) def __init__(self): - super(NetworkSetInfoModule, self).__init__(additional_arg_spec=self.argument_spec) + super(NetworkSetInfoModule, self).__init__( + additional_arg_spec=self.argument_spec, + supports_check_mode=True, + ) def execute_module(self): diff --git a/plugins/modules/remote_management/oneview/oneview_san_manager_info.py b/plugins/modules/remote_management/oneview/oneview_san_manager_info.py index 5dbc28afc2..c80ef474cc 100644 --- a/plugins/modules/remote_management/oneview/oneview_san_manager_info.py +++ b/plugins/modules/remote_management/oneview/oneview_san_manager_info.py @@ -90,7 +90,10 @@ class SanManagerInfoModule(OneViewModuleBase): ) def __init__(self): - super(SanManagerInfoModule, self).__init__(additional_arg_spec=self.argument_spec) + super(SanManagerInfoModule, self).__init__( + additional_arg_spec=self.argument_spec, + supports_check_mode=True, + ) self.resource_client = self.oneview_client.san_managers def execute_module(self): diff --git a/plugins/modules/remote_management/redfish/idrac_redfish_info.py b/plugins/modules/remote_management/redfish/idrac_redfish_info.py index cb1aa8f34f..fb137acca3 100644 --- a/plugins/modules/remote_management/redfish/idrac_redfish_info.py +++ b/plugins/modules/remote_management/redfish/idrac_redfish_info.py @@ -191,7 +191,7 @@ def main(): mutually_exclusive=[ ('username', 'auth_token'), ], - supports_check_mode=False + supports_check_mode=True, ) category = module.params['category'] diff --git a/plugins/modules/remote_management/redfish/redfish_info.py b/plugins/modules/remote_management/redfish/redfish_info.py index 41d5bfb04a..49bd7c6ee3 100644 --- a/plugins/modules/remote_management/redfish/redfish_info.py +++ b/plugins/modules/remote_management/redfish/redfish_info.py @@ -318,7 +318,7 @@ def main(): mutually_exclusive=[ ('username', 'auth_token'), ], - supports_check_mode=False + supports_check_mode=True, ) # admin credentials used for authentication diff --git a/plugins/modules/system/xfconf_info.py b/plugins/modules/system/xfconf_info.py index 9cef821071..766267dd3d 100644 --- a/plugins/modules/system/xfconf_info.py +++ b/plugins/modules/system/xfconf_info.py @@ -132,6 +132,7 @@ class XFConfInfo(CmdModuleHelper): required_by=dict( property=['channel'] ), + supports_check_mode=True, ) command = 'xfconf-query' diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py index 88356a2e54..d5660ab73c 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py @@ -110,7 +110,8 @@ def main(): module = UTMModule( argument_spec=dict( name=dict(type='str', required=True) - ) + ), + supports_check_mode=True, ) try: UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute() diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py index 02542532f7..9aa16d4aca 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py @@ -88,7 +88,8 @@ def main(): module = UTMModule( argument_spec=dict( name=dict(type='str', required=True) - ) + ), + supports_check_mode=True, ) try: # This is needed because the bool value only accepts int values in the backend diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py index 3f623d5a86..700799ab59 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py @@ -84,7 +84,8 @@ def main(): module = UTMModule( argument_spec=dict( name=dict(type='str', required=True) - ) + ), + supports_check_mode=True, ) try: UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute() diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py index 263b976045..62a832d7c6 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py @@ -128,8 +128,9 @@ def main(): key_to_check_for_changes = [] module = UTMModule( argument_spec=dict( - name=dict(type='str', required=True) - ) + name=dict(type='str', required=True), + ), + supports_check_mode=True, ) try: UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute() diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py index afc0f5efcd..99174a89b1 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py @@ -109,8 +109,9 @@ def main(): key_to_check_for_changes = [] module = UTMModule( argument_spec=dict( - name=dict(type='str', required=True) - ) + name=dict(type='str', required=True), + ), + supports_check_mode=True, ) try: UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute() From 87baa5860ad98811b03466ca78238afe85d9264b Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 27 Jul 2021 20:26:26 +0200 Subject: [PATCH 0230/2828] Actually enable BOTMETA sanity test to force new plugins/modules to have BOTMETA entry. (#3096) --- tests/sanity/extra/botmeta.py | 40 +++++++++++++++++++++++++++++++++-- 1 file changed, 38 insertions(+), 2 deletions(-) diff --git a/tests/sanity/extra/botmeta.py b/tests/sanity/extra/botmeta.py index b5c49b5a4b..f84c7535f2 100755 --- a/tests/sanity/extra/botmeta.py +++ b/tests/sanity/extra/botmeta.py @@ -17,7 +17,40 @@ from voluptuous import Required, Schema, Invalid from voluptuous.humanize import humanize_error -REPORT_NO_MAINTAINERS = False +IGNORE_NO_MAINTAINERS = [ + 'plugins/cache/memcached.py', + 'plugins/cache/redis.py', + 'plugins/callback/cgroup_memory_recap.py', + 'plugins/callback/context_demo.py', + 'plugins/callback/counter_enabled.py', + 'plugins/callback/hipchat.py', + 'plugins/callback/jabber.py', + 'plugins/callback/log_plays.py', + 'plugins/callback/logdna.py', + 'plugins/callback/logentries.py', + 'plugins/callback/null.py', + 'plugins/callback/selective.py', + 'plugins/callback/slack.py', + 'plugins/callback/splunk.py', + 'plugins/callback/yaml.py', + 'plugins/inventory/nmap.py', + 'plugins/inventory/virtualbox.py', + 'plugins/connection/chroot.py', + 'plugins/connection/iocage.py', + 'plugins/connection/lxc.py', + 'plugins/lookup/cartesian.py', + 'plugins/lookup/chef_databag.py', + 'plugins/lookup/consul_kv.py', + 'plugins/lookup/credstash.py', + 'plugins/lookup/cyberarkpassword.py', + 'plugins/lookup/flattened.py', + 'plugins/lookup/keyring.py', + 'plugins/lookup/lastpass.py', + 'plugins/lookup/passwordstore.py', + 'plugins/lookup/shelvefile.py', + 'plugins/filter/json_query.py', + 'plugins/filter/random_mac.py', +] FILENAME = '.github/BOTMETA.yml' @@ -81,8 +114,11 @@ def validate(filename, filedata): msg = 'Author %s not mentioned as active or inactive maintainer for %s (mentioned are: %s)' % ( maintainer, filename, ', '.join(all_maintainers)) print('%s:%d:%d: %s' % (FILENAME, 0, 0, msg)) - if not all_maintainers and REPORT_NO_MAINTAINERS: + should_have_no_maintainer = filename in IGNORE_NO_MAINTAINERS + if not all_maintainers and not should_have_no_maintainer: print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'No (active or inactive) maintainer mentioned for %s' % filename)) + if all_maintainers and should_have_no_maintainer: + print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'Please remove %s from the ignore list of %s' % (filename, sys.argv[0]))) def main(): From 5be4adc434643d024186f964d437208e29677546 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Wed, 28 Jul 2021 17:49:37 +1200 Subject: [PATCH 0231/2828] ejabberd_user - refactoring and simplification (#3093) * ejabberd_user - refactoring and simplification * added changelog fragment * Update changelogs/fragments/3093-ejabberd_user-refactor.yaml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../3093-ejabberd_user-refactor.yaml | 2 + .../web_infrastructure/ejabberd_user.py | 59 +++++-------------- 2 files changed, 16 insertions(+), 45 deletions(-) create mode 100644 changelogs/fragments/3093-ejabberd_user-refactor.yaml diff --git a/changelogs/fragments/3093-ejabberd_user-refactor.yaml b/changelogs/fragments/3093-ejabberd_user-refactor.yaml new file mode 100644 index 0000000000..875ef6da71 --- /dev/null +++ b/changelogs/fragments/3093-ejabberd_user-refactor.yaml @@ -0,0 +1,2 @@ +bugfixes: + - ejabberd_user - replaced in-code check with ``required_if``, using ``get_bin_path()`` for the command, passing args to ``run_command()`` as list instead of string (https://github.com/ansible-collections/community.general/pull/3093). diff --git a/plugins/modules/web_infrastructure/ejabberd_user.py b/plugins/modules/web_infrastructure/ejabberd_user.py index be63c92080..e6cdd72b5e 100644 --- a/plugins/modules/web_infrastructure/ejabberd_user.py +++ b/plugins/modules/web_infrastructure/ejabberd_user.py @@ -72,11 +72,6 @@ import syslog from ansible.module_utils.basic import AnsibleModule -class EjabberdUserException(Exception): - """ Base exception for EjabberdUser class object """ - pass - - class EjabberdUser(object): """ This object represents a user resource for an ejabberd server. The object manages user creation and deletion using ejabberdctl. The following @@ -99,12 +94,7 @@ class EjabberdUser(object): changed. It will return True if the user does not match the supplied credentials and False if it does not """ - try: - options = [self.user, self.host, self.pwd] - (rc, out, err) = self.run_command('check_password', options) - except EjabberdUserException: - (rc, out, err) = (1, None, "required attribute(s) missing") - return rc + return self.run_command('check_password', [self.user, self.host, self.pwd]) @property def exists(self): @@ -112,12 +102,7 @@ class EjabberdUser(object): host specified. If the user exists True is returned, otherwise False is returned """ - try: - options = [self.user, self.host] - (rc, out, err) = self.run_command('check_account', options) - except EjabberdUserException: - (rc, out, err) = (1, None, "required attribute(s) missing") - return not bool(int(rc)) + return self.run_command('check_account', [self.user, self.host]) def log(self, entry): """ This method will log information to the local syslog facility """ @@ -129,44 +114,25 @@ class EjabberdUser(object): """ This method will run the any command specified and return the returns using the Ansible common module """ - if not all(options): - raise EjabberdUserException - - cmd = 'ejabberdctl %s ' % cmd - cmd += " ".join(options) - self.log('command: %s' % cmd) - return self.module.run_command(cmd.split()) + cmd = [self.module.get_bin_path('ejabberdctl'), cmd] + options + self.log('command: %s' % " ".join(cmd)) + return self.module.run_command(cmd) def update(self): """ The update method will update the credentials for the user provided """ - try: - options = [self.user, self.host, self.pwd] - (rc, out, err) = self.run_command('change_password', options) - except EjabberdUserException: - (rc, out, err) = (1, None, "required attribute(s) missing") - return (rc, out, err) + return self.run_command('change_password', [self.user, self.host, self.pwd]) def create(self): """ The create method will create a new user on the host with the password provided """ - try: - options = [self.user, self.host, self.pwd] - (rc, out, err) = self.run_command('register', options) - except EjabberdUserException: - (rc, out, err) = (1, None, "required attribute(s) missing") - return (rc, out, err) + return self.run_command('register', [self.user, self.host, self.pwd]) def delete(self): """ The delete method will delete the user from the host """ - try: - options = [self.user, self.host] - (rc, out, err) = self.run_command('unregister', options) - except EjabberdUserException: - (rc, out, err) = (1, None, "required attribute(s) missing") - return (rc, out, err) + return self.run_command('unregister', [self.user, self.host]) def main(): @@ -174,11 +140,14 @@ def main(): argument_spec=dict( host=dict(required=True, type='str'), username=dict(required=True, type='str'), - password=dict(default=None, type='str', no_log=True), + password=dict(type='str', no_log=True), state=dict(default='present', choices=['present', 'absent']), - logging=dict(default=False, type='bool') + logging=dict(default=False, type='bool') # deprecate in favour of c.g.syslogger? ), - supports_check_mode=True + required_if=[ + ('state', 'present', ['password']), + ], + supports_check_mode=True, ) obj = EjabberdUser(module) From 0b70b3baff58f26e6a3dac12ed125d3fe4e9195a Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Wed, 28 Jul 2021 18:22:18 +1200 Subject: [PATCH 0232/2828] django_manage - using list instead of string in run_command() (#3098) * django_manage - using list instead of string in run_command() * added changelog fragment --- .../3098-django_manage-cmd-list.yaml | 2 + .../web_infrastructure/django_manage.py | 40 +++++++++---------- 2 files changed, 21 insertions(+), 21 deletions(-) create mode 100644 changelogs/fragments/3098-django_manage-cmd-list.yaml diff --git a/changelogs/fragments/3098-django_manage-cmd-list.yaml b/changelogs/fragments/3098-django_manage-cmd-list.yaml new file mode 100644 index 0000000000..8522059ff6 --- /dev/null +++ b/changelogs/fragments/3098-django_manage-cmd-list.yaml @@ -0,0 +1,2 @@ +bugfixes: + - django_manage - refactor to call ``run_command()`` passing command as a list instead of string (https://github.com/ansible-collections/community.general/pull/3098). diff --git a/plugins/modules/web_infrastructure/django_manage.py b/plugins/modules/web_infrastructure/django_manage.py index ba38abd90e..98ffdc446b 100644 --- a/plugins/modules/web_infrastructure/django_manage.py +++ b/plugins/modules/web_infrastructure/django_manage.py @@ -256,20 +256,20 @@ def main(): argument_spec=dict( command=dict(required=True, type='str'), project_path=dict(required=True, type='path', aliases=['app_path', 'chdir']), - settings=dict(default=None, required=False, type='path'), - pythonpath=dict(default=None, required=False, type='path', aliases=['python_path']), - virtualenv=dict(default=None, required=False, type='path', aliases=['virtual_env']), + settings=dict(type='path'), + pythonpath=dict(type='path', aliases=['python_path']), + virtualenv=dict(type='path', aliases=['virtual_env']), - apps=dict(default=None, required=False), - cache_table=dict(default=None, required=False, type='str'), - clear=dict(default=False, required=False, type='bool'), - database=dict(default=None, required=False, type='str'), - failfast=dict(default=False, required=False, type='bool', aliases=['fail_fast']), - fixtures=dict(default=None, required=False, type='str'), - testrunner=dict(default=None, required=False, type='str', aliases=['test_runner']), - skip=dict(default=None, required=False, type='bool'), - merge=dict(default=None, required=False, type='bool'), - link=dict(default=None, required=False, type='bool'), + apps=dict(), + cache_table=dict(type='str'), + clear=dict(default=False, type='bool'), + database=dict(type='str'), + failfast=dict(default=False, type='bool', aliases=['fail_fast']), + fixtures=dict(type='str'), + testrunner=dict(type='str', aliases=['test_runner']), + skip=dict(type='bool'), + merge=dict(type='bool'), + link=dict(type='bool'), ), ) @@ -279,8 +279,6 @@ def main(): for param in specific_params: value = module.params[param] - if param in specific_boolean_params: - value = module.boolean(value) if value and param not in command_allowed_param_map[command]: module.fail_json(msg='%s param is incompatible with command=%s' % (param, command)) @@ -290,23 +288,23 @@ def main(): _ensure_virtualenv(module) - cmd = "./manage.py %s" % (command, ) + cmd = ["./manage.py", command] if command in noinput_commands: - cmd = '%s --noinput' % cmd + cmd.append("--noinput") for param in general_params: if module.params[param]: - cmd = '%s --%s=%s' % (cmd, param, module.params[param]) + cmd.append('--%s=%s' % (param, module.params[param])) for param in specific_boolean_params: - if module.boolean(module.params[param]): - cmd = '%s --%s' % (cmd, param) + if module.params[param]: + cmd.append('--%s' % param) # these params always get tacked on the end of the command for param in end_of_command_params: if module.params[param]: - cmd = '%s %s' % (cmd, module.params[param]) + cmd.append(module.params[param]) rc, out, err = module.run_command(cmd, cwd=project_path) if rc != 0: From 549dfaae6415999755f2ca722b39e8c314c67aa1 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Wed, 28 Jul 2021 18:43:09 +1200 Subject: [PATCH 0233/2828] gunicorn - minor refactoring (#3092) * minor refactoring in gunicorn module * added changelog fragment * reworked the gunicorn bin path part of the code, per PR * Update changelogs/fragments/3092-gunicorn-refactor.yaml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../fragments/3092-gunicorn-refactor.yaml | 2 ++ .../modules/web_infrastructure/gunicorn.py | 19 +++++++------------ 2 files changed, 9 insertions(+), 12 deletions(-) create mode 100644 changelogs/fragments/3092-gunicorn-refactor.yaml diff --git a/changelogs/fragments/3092-gunicorn-refactor.yaml b/changelogs/fragments/3092-gunicorn-refactor.yaml new file mode 100644 index 0000000000..114e865add --- /dev/null +++ b/changelogs/fragments/3092-gunicorn-refactor.yaml @@ -0,0 +1,2 @@ +minor_changes: + - gunicorn - search for ``gunicorn`` binary in more paths (https://github.com/ansible-collections/community.general/pull/3092). diff --git a/plugins/modules/web_infrastructure/gunicorn.py b/plugins/modules/web_infrastructure/gunicorn.py index 5703055623..4c9e5da45b 100644 --- a/plugins/modules/web_infrastructure/gunicorn.py +++ b/plugins/modules/web_infrastructure/gunicorn.py @@ -101,14 +101,12 @@ gunicorn: import os import time -# import ansible utils from ansible.module_utils.basic import AnsibleModule def search_existing_config(config, option): ''' search in config file for specified option ''' if config and os.path.isfile(config): - data_config = None with open(config, 'r') as f: for line in f: if option in line: @@ -135,15 +133,12 @@ def main(): module = AnsibleModule( argument_spec=dict( app=dict(required=True, type='str', aliases=['name']), - venv=dict(required=False, type='path', default=None, aliases=['virtualenv']), - config=dict(required=False, default=None, type='path', aliases=['conf']), - chdir=dict(required=False, type='path', default=None), - pid=dict(required=False, type='path', default=None), - user=dict(required=False, type='str'), - worker=dict(required=False, - type='str', - choices=['sync', 'eventlet', 'gevent', 'tornado ', 'gthread', 'gaiohttp'] - ), + venv=dict(type='path', aliases=['virtualenv']), + config=dict(type='path', aliases=['conf']), + chdir=dict(type='path'), + pid=dict(type='path'), + user=dict(type='str'), + worker=dict(type='str', choices=['sync', 'eventlet', 'gevent', 'tornado ', 'gthread', 'gaiohttp']), ) ) @@ -165,7 +160,7 @@ def main(): if venv: gunicorn_command = "/".join((venv, 'bin', 'gunicorn')) else: - gunicorn_command = 'gunicorn' + gunicorn_command = module.get_bin_path('gunicorn') # to daemonize the process options = ["-D"] From cde95641635feeedeafce484bfef62cd668ad5bd Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 29 Jul 2021 17:49:52 +1200 Subject: [PATCH 0234/2828] deploy_helper - changed in-code condition to required_if (#3104) * changed in-code condition to required_if * added changelog fragment --- .../3104-deploy_helper-required_if.yaml | 2 ++ .../web_infrastructure/deploy_helper.py | 23 +++++++++---------- 2 files changed, 13 insertions(+), 12 deletions(-) create mode 100644 changelogs/fragments/3104-deploy_helper-required_if.yaml diff --git a/changelogs/fragments/3104-deploy_helper-required_if.yaml b/changelogs/fragments/3104-deploy_helper-required_if.yaml new file mode 100644 index 0000000000..ee48461003 --- /dev/null +++ b/changelogs/fragments/3104-deploy_helper-required_if.yaml @@ -0,0 +1,2 @@ +bugfixes: + - deploy_helper - improved parameter checking by using standard Ansible construct (https://github.com/ansible-collections/community.general/pull/3104). diff --git a/plugins/modules/web_infrastructure/deploy_helper.py b/plugins/modules/web_infrastructure/deploy_helper.py index f879594bc3..f73c9c1f18 100644 --- a/plugins/modules/web_infrastructure/deploy_helper.py +++ b/plugins/modules/web_infrastructure/deploy_helper.py @@ -359,8 +359,6 @@ class DeployHelper(object): self.module.fail_json(msg="%s exists but is not a symbolic link" % path) def create_link(self, source, link_name): - changed = False - if os.path.islink(link_name): norm_link = os.path.normpath(os.path.realpath(link_name)) norm_source = os.path.normpath(os.path.realpath(source)) @@ -458,15 +456,18 @@ def main(): module = AnsibleModule( argument_spec=dict( path=dict(aliases=['dest'], required=True, type='path'), - release=dict(required=False, type='str', default=None), - releases_path=dict(required=False, type='str', default='releases'), - shared_path=dict(required=False, type='path', default='shared'), - current_path=dict(required=False, type='path', default='current'), - keep_releases=dict(required=False, type='int', default=5), - clean=dict(required=False, type='bool', default=True), - unfinished_filename=dict(required=False, type='str', default='DEPLOY_UNFINISHED'), - state=dict(required=False, choices=['present', 'absent', 'clean', 'finalize', 'query'], default='present') + release=dict(type='str'), + releases_path=dict(type='str', default='releases'), + shared_path=dict(type='path', default='shared'), + current_path=dict(type='path', default='current'), + keep_releases=dict(type='int', default=5), + clean=dict(type='bool', default=True), + unfinished_filename=dict(type='str', default='DEPLOY_UNFINISHED'), + state=dict(choices=['present', 'absent', 'clean', 'finalize', 'query'], default='present') ), + required_if=[ + ('state', 'finalize', ['release']), + ], add_file_common_args=True, supports_check_mode=True ) @@ -493,8 +494,6 @@ def main(): result['ansible_facts'] = {'deploy_helper': facts} elif deploy_helper.state == 'finalize': - if not deploy_helper.release: - module.fail_json(msg="'release' is a required parameter for state=finalize (try the 'deploy_helper.new_release' fact)") if deploy_helper.keep_releases <= 0: module.fail_json(msg="'keep_releases' should be at least 1") From 2935b011edae0184e219dc157259bf986e9fb251 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Fri, 30 Jul 2021 08:30:20 +1200 Subject: [PATCH 0235/2828] ansible_galaxy_install - new module (#2933) * initial commit * multiple changes: - added a proper process_command_output() - adjusted the output_params fields (and removed other *_params fields) * added RETURN documentation, plus few adjustments * fixed sanity tests * updated BOTMETA.yml * further adjustments * integration tests - first commit * removed unused files from integration test * added role installation tests * removed extraneous cmd line option * added requirement-file installation tests * adjusted documentation and output variable names * fixed integration test * Update plugins/modules/packaging/language/ansible_galaxy_install.py Co-authored-by: Ajpantuso * Update plugins/modules/packaging/language/ansible_galaxy_install.py Co-authored-by: Ajpantuso * Update plugins/modules/packaging/language/ansible_galaxy_install.py Co-authored-by: Ajpantuso * Update plugins/modules/packaging/language/ansible_galaxy_install.py Co-authored-by: Ajpantuso * Update plugins/modules/packaging/language/ansible_galaxy_install.py Co-authored-by: Ajpantuso * Update plugins/modules/packaging/language/ansible_galaxy_install.py Co-authored-by: Ajpantuso * Update plugins/modules/packaging/language/ansible_galaxy_install.py Co-authored-by: Ajpantuso * Update plugins/modules/packaging/language/ansible_galaxy_install.py Co-authored-by: Felix Fontein * Update tests/integration/targets/ansible_galaxy_install/aliases Co-authored-by: Felix Fontein * Per comments in the PR: - fixed missing paths case - fixed install parsing (regexp) for ansible-galaxy collection install in v2.10 * changed the collection installed in test to something unlikely to come embedded in Ansible itself * fixed logic for Ansible 2.9 * kill trailing whitespace * changed default language from C.UTF-8 to en_US.UTF-8 * updated c.g version * skipping test in python 2.6, as ansible-galaxy no longer supports it in devel * Multiple changes: - improved docs on ansible 2.9 and python 2.6 - removed method __changed__() - unnecessary since tracking changes in the ansible29_change var - renamed methods __run29__() and __run210plus__() to __setup29__() and __setup210plus__(), respectively - ansible 2.9 warning for requirements_file only when type is "both" * sanity fix * further adjustments * removed extraneous doc * changed method to determine remote ansible version * do not allow type=both in Ansible 2.9 * Update plugins/modules/packaging/language/ansible_galaxy_install.py Co-authored-by: Felix Fontein * Update plugins/modules/packaging/language/ansible_galaxy_install.py Co-authored-by: Felix Fontein * changed method names per PR Co-authored-by: Ajpantuso Co-authored-by: Felix Fontein --- .github/BOTMETA.yml | 2 + plugins/modules/ansible_galaxy_install.py | 1 + .../language/ansible_galaxy_install.py | 318 ++++++++++++++++++ .../targets/ansible_galaxy_install/aliases | 3 + .../ansible_galaxy_install/files/test.yml | 11 + .../ansible_galaxy_install/tasks/main.yml | 95 ++++++ 6 files changed, 430 insertions(+) create mode 120000 plugins/modules/ansible_galaxy_install.py create mode 100644 plugins/modules/packaging/language/ansible_galaxy_install.py create mode 100644 tests/integration/targets/ansible_galaxy_install/aliases create mode 100644 tests/integration/targets/ansible_galaxy_install/files/test.yml create mode 100644 tests/integration/targets/ansible_galaxy_install/tasks/main.yml diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index fb08599a13..859d88bb84 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -707,6 +707,8 @@ files: maintainers: makaimc $modules/notification/typetalk.py: maintainers: tksmd + $modules/packaging/language/ansible_galaxy_install.py: + maintainers: russoz $modules/packaging/language/bower.py: maintainers: mwarkentin $modules/packaging/language/bundler.py: diff --git a/plugins/modules/ansible_galaxy_install.py b/plugins/modules/ansible_galaxy_install.py new file mode 120000 index 0000000000..369d39dbe1 --- /dev/null +++ b/plugins/modules/ansible_galaxy_install.py @@ -0,0 +1 @@ +packaging/language/ansible_galaxy_install.py \ No newline at end of file diff --git a/plugins/modules/packaging/language/ansible_galaxy_install.py b/plugins/modules/packaging/language/ansible_galaxy_install.py new file mode 100644 index 0000000000..9e9b5cc4f6 --- /dev/null +++ b/plugins/modules/packaging/language/ansible_galaxy_install.py @@ -0,0 +1,318 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# (c) 2021, Alexei Znamensky +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +module: ansible_galaxy_install +author: + - "Alexei Znamensky (@russoz)" +short_description: Install Ansible roles or collections using ansible-galaxy +version_added: 3.5.0 +description: + - This module allows the installation of Ansible collections or roles using C(ansible-galaxy). +notes: + - > + B(Ansible 2.9/2.10): The C(ansible-galaxy) command changed significantly between Ansible 2.9 and + ansible-base 2.10 (later ansible-core 2.11). See comments in the parameters. +requirements: + - Ansible 2.9, ansible-base 2.10, or ansible-core 2.11 or newer +options: + type: + description: + - The type of installation performed by C(ansible-galaxy). + - If I(type) is C(both), then I(requirements_file) must be passed and it may contain both roles and collections. + - "Note however that the opposite is not true: if using a I(requirements_file), then I(type) can be any of the three choices." + - "B(Ansible 2.9): The option C(both) will have the same effect as C(role)." + type: str + choices: [collection, role, both] + required: true + name: + description: + - Name of the collection or role being installed. + - Versions can be specified with C(ansible-galaxy) usual formats. For example, C(community.docker:1.6.1) or C(ansistrano.deploy,3.8.0). + - I(name) and I(requirements_file) are mutually exclusive. + type: str + requirements_file: + description: + - Path to a file containing a list of requirements to be installed. + - It works for I(type) equals to C(collection) and C(role). + - I(name) and I(requirements_file) are mutually exclusive. + - "B(Ansible 2.9): It can only be used to install either I(type=role) or I(type=collection), but not both at the same run." + type: path + dest: + description: + - The path to the directory containing your collections or roles, according to the value of I(type). + - > + Please notice that C(ansible-galaxy) will not install collections with I(type=both), when I(requirements_file) + contains both roles and collections and I(dest) is specified. + type: path + force: + description: + - Force overwriting an existing role or collection. + - Using I(force=true) is mandatory when downgrading. + - "B(Ansible 2.9 and 2.10): Must be C(true) to upgrade roles and collections." + type: bool + default: false + ack_ansible29: + description: + - Acknowledge using Ansible 2.9 with its limitations, and prevents the module from generating warnings about them. + - This option is completely ignored if using a version Ansible greater than C(2.9.x). + type: bool + default: false +""" + +EXAMPLES = """ +- name: Install collection community.network + community.general.ansible_galaxy_install: + type: collection + name: community.network + +- name: Install role at specific path + community.general.ansible_galaxy_install: + type: role + name: ansistrano.deploy + dest: /ansible/roles + +- name: Install collections and roles together + community.general.ansible_galaxy_install: + type: both + requirements_file: requirements.yml + +- name: Force-install collection community.network at specific version + community.general.ansible_galaxy_install: + type: collection + name: community.network:3.0.2 + force: true + +""" + +RETURN = """ + type: + description: The value of the I(type) parameter. + type: str + returned: always + name: + description: The value of the I(name) parameter. + type: str + returned: always + dest: + description: The value of the I(dest) parameter. + type: str + returned: always + requirements_file: + description: The value of the I(requirements_file) parameter. + type: str + returned: always + force: + description: The value of the I(force) parameter. + type: bool + returned: always + installed_roles: + description: + - If I(requirements_file) is specified instead, returns dictionary with all the roles installed per path. + - If I(name) is specified, returns that role name and the version installed per path. + - "B(Ansible 2.9): Returns empty because C(ansible-galaxy) has no C(list) subcommand." + type: dict + returned: always when installing roles + contains: + "": + description: Roles and versions for that path. + type: dict + sample: + /home/user42/.ansible/roles: + ansistrano.deploy: 3.9.0 + baztian.xfce: v0.0.3 + /custom/ansible/roles: + ansistrano.deploy: 3.8.0 + installed_collections: + description: + - If I(requirements_file) is specified instead, returns dictionary with all the collections installed per path. + - If I(name) is specified, returns that collection name and the version installed per path. + - "B(Ansible 2.9): Returns empty because C(ansible-galaxy) has no C(list) subcommand." + type: dict + returned: always when installing collections + contains: + "": + description: Collections and versions for that path + type: dict + sample: + /home/az/.ansible/collections/ansible_collections: + community.docker: 1.6.0 + community.general: 3.0.2 + /custom/ansible/ansible_collections: + community.general: 3.1.0 + new_collections: + description: New collections installed by this module. + returned: success + type: dict + sample: + community.general: 3.1.0 + community.docker: 1.6.1 + new_roles: + description: New roles installed by this module. + returned: success + type: dict + sample: + ansistrano.deploy: 3.8.0 + baztian.xfce: v0.0.3 +""" + +import re + +from ansible_collections.community.general.plugins.module_utils.module_helper import CmdModuleHelper, ArgFormat + + +class AnsibleGalaxyInstall(CmdModuleHelper): + _RE_GALAXY_VERSION = re.compile(r'^ansible-galaxy(?: \[core)? (?P\d+\.\d+\.\d+)(?:\.\w+)?(?:\])?') + _RE_LIST_PATH = re.compile(r'^# (?P.*)$') + _RE_LIST_COLL = re.compile(r'^(?P\w+\.\w+)\s+(?P[\d\.]+)\s*$') + _RE_LIST_ROLE = re.compile(r'^- (?P\w+\.\w+),\s+(?P[\d\.]+)\s*$') + _RE_INSTALL_OUTPUT = None # Set after determining ansible version, see __init_module__() + ansible_version = None + is_ansible29 = None + + output_params = ('type', 'name', 'dest', 'requirements_file', 'force') + module = dict( + argument_spec=dict( + type=dict(type='str', choices=('collection', 'role', 'both'), required=True), + name=dict(type='str'), + requirements_file=dict(type='path'), + dest=dict(type='path'), + force=dict(type='bool', default=False), + ack_ansible29=dict(type='bool', default=False), + ), + mutually_exclusive=[('name', 'requirements_file')], + required_one_of=[('name', 'requirements_file')], + required_if=[('type', 'both', ['requirements_file'])], + supports_check_mode=False, + ) + + command = 'ansible-galaxy' + command_args_formats = dict( + type=dict(fmt=lambda v: [] if v == 'both' else [v]), + galaxy_cmd=dict(), + requirements_file=dict(fmt=('-r', '{0}'),), + dest=dict(fmt=('-p', '{0}'),), + force=dict(fmt="--force", style=ArgFormat.BOOLEAN), + ) + force_lang = "en_US.UTF-8" + check_rc = True + + def _get_ansible_galaxy_version(self): + ansible_galaxy = self.module.get_bin_path("ansible-galaxy", required=True) + dummy, out, dummy = self.module.run_command([ansible_galaxy, "--version"], check_rc=True) + line = out.splitlines()[0] + match = self._RE_GALAXY_VERSION.match(line) + if not match: + raise RuntimeError("Unable to determine ansible-galaxy version from: {0}".format(line)) + version = match.group("version") + version = tuple(int(x) for x in version.split('.')[:3]) + return version + + def __init_module__(self): + self.ansible_version = self._get_ansible_galaxy_version() + self.is_ansible29 = self.ansible_version < (2, 10) + if self.is_ansible29: + self._RE_INSTALL_OUTPUT = re.compile(r"^(?:.*Installing '(?P\w+\.\w+):(?P[\d\.]+)'.*" + r'|- (?P\w+\.\w+) \((?P[\d\.]+)\)' + r' was installed successfully)$') + else: + # Collection install output changed: + # ansible-base 2.10: "coll.name (x.y.z)" + # ansible-core 2.11+: "coll.name:x.y.z" + self._RE_INSTALL_OUTPUT = re.compile(r'^(?:(?P\w+\.\w+)(?: \(|:)(?P[\d\.]+)\)?' + r'|- (?P\w+\.\w+) \((?P[\d\.]+)\))' + r' was installed successfully$') + + @staticmethod + def _process_output_list(*args): + if "None of the provided paths were usable" in args[1]: + return [] + return args[1].splitlines() + + def _list_element(self, _type, path_re, elem_re): + params = ({'type': _type}, {'galaxy_cmd': 'list'}, 'dest') + elems = self.run_command(params=params, + publish_rc=False, publish_out=False, publish_err=False, + process_output=self._process_output_list, + check_rc=False) + elems_dict = {} + current_path = None + for line in elems: + if line.startswith("#"): + match = path_re.match(line) + if not match: + continue + if self.vars.dest is not None and match.group('path') != self.vars.dest: + current_path = None + continue + current_path = match.group('path') if match else None + elems_dict[current_path] = {} + + elif current_path is not None: + match = elem_re.match(line) + if not match or (self.vars.name is not None and match.group('elem') != self.vars.name): + continue + elems_dict[current_path][match.group('elem')] = match.group('version') + return elems_dict + + def _list_collections(self): + return self._list_element('collection', self._RE_LIST_PATH, self._RE_LIST_COLL) + + def _list_roles(self): + return self._list_element('role', self._RE_LIST_PATH, self._RE_LIST_ROLE) + + def _setup29(self): + self.vars.set("new_collections", {}) + self.vars.set("new_roles", {}) + self.vars.set("ansible29_change", False, change=True, output=False) + if not self.vars.ack_ansible29: + self.module.warn("Ansible 2.9 or older: unable to retrieve lists of roles and collections already installed") + if self.vars.requirements_file is not None and self.vars.type == 'both': + self.module.warn("Ansible 2.9 or older: will install only roles from requirement files") + + def _setup210plus(self): + self.vars.set("new_collections", {}, change=True) + self.vars.set("new_roles", {}, change=True) + if self.vars.type != "collection": + self.vars.installed_roles = self._list_roles() + if self.vars.type != "roles": + self.vars.installed_collections = self._list_collections() + + def __run__(self): + if self.is_ansible29: + if self.vars.type == 'both': + raise ValueError("Type 'both' not supported in Ansible 2.9") + self._setup29() + else: + self._setup210plus() + params = ('type', {'galaxy_cmd': 'install'}, 'force', 'dest', 'requirements_file', 'name') + self.run_command(params=params) + + def process_command_output(self, rc, out, err): + for line in out.splitlines(): + match = self._RE_INSTALL_OUTPUT.match(line) + if not match: + continue + if match.group("collection"): + self.vars.new_collections[match.group("collection")] = match.group("cversion") + if self.is_ansible29: + self.vars.ansible29_change = True + elif match.group("role"): + self.vars.new_roles[match.group("role")] = match.group("rversion") + if self.is_ansible29: + self.vars.ansible29_change = True + + +def main(): + galaxy = AnsibleGalaxyInstall() + galaxy.run() + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/ansible_galaxy_install/aliases b/tests/integration/targets/ansible_galaxy_install/aliases new file mode 100644 index 0000000000..ca7873ddab --- /dev/null +++ b/tests/integration/targets/ansible_galaxy_install/aliases @@ -0,0 +1,3 @@ +destructive +shippable/posix/group3 +skip/python2.6 diff --git a/tests/integration/targets/ansible_galaxy_install/files/test.yml b/tests/integration/targets/ansible_galaxy_install/files/test.yml new file mode 100644 index 0000000000..9d2848e087 --- /dev/null +++ b/tests/integration/targets/ansible_galaxy_install/files/test.yml @@ -0,0 +1,11 @@ +--- +roles: + # Install a role from Ansible Galaxy. + - name: geerlingguy.java + version: 1.9.6 + +collections: + # Install a collection from Ansible Galaxy. + - name: geerlingguy.php_roles + version: 0.9.3 + source: https://galaxy.ansible.com diff --git a/tests/integration/targets/ansible_galaxy_install/tasks/main.yml b/tests/integration/targets/ansible_galaxy_install/tasks/main.yml new file mode 100644 index 0000000000..232c96aff5 --- /dev/null +++ b/tests/integration/targets/ansible_galaxy_install/tasks/main.yml @@ -0,0 +1,95 @@ +--- +################################################### +- name: Install collection netbox.netbox + community.general.ansible_galaxy_install: + type: collection + name: netbox.netbox + register: install_c0 + +- name: Assert collection was installed + assert: + that: + - install_c0 is changed + - '"netbox.netbox" in install_c0.new_collections' + +- name: Install collection netbox.netbox (again) + community.general.ansible_galaxy_install: + type: collection + name: netbox.netbox + register: install_c1 + +- name: Assert collection was not installed + assert: + that: + - install_c1 is not changed + +################################################### +- name: Install role ansistrano.deploy + community.general.ansible_galaxy_install: + type: role + name: ansistrano.deploy + register: install_r0 + +- name: Assert collection was installed + assert: + that: + - install_r0 is changed + - '"ansistrano.deploy" in install_r0.new_roles' + +- name: Install role ansistrano.deploy (again) + community.general.ansible_galaxy_install: + type: role + name: ansistrano.deploy + register: install_r1 + +- name: Assert role was not installed + assert: + that: + - install_r1 is not changed + +################################################### +- name: + set_fact: + reqs_file: '{{ output_dir }}/reqs.yaml' + +- name: Copy requirements file + copy: + src: 'files/test.yml' + dest: '{{ reqs_file }}' + +- name: Install from requirements file + community.general.ansible_galaxy_install: + type: both + requirements_file: "{{ reqs_file }}" + register: install_rq0 + ignore_errors: true + +- name: Assert requirements file was installed (Ansible >2.9) + assert: + that: + - install_rq0 is changed + - '"geerlingguy.java" in install_rq0.new_roles' + - '"geerlingguy.php_roles" in install_rq0.new_collections' + when: + - (ansible_version.major != 2 or ansible_version.minor != 9) + +- name: Assert requirements file was installed (Ansible 2.9) + assert: + that: + - install_rq0 is failed + - install_rq0 is not changed + when: + - ansible_version.major == 2 + - ansible_version.minor == 9 + +- name: Install from requirements file (again) + community.general.ansible_galaxy_install: + type: both + requirements_file: "{{ reqs_file }}" + register: install_rq1 + ignore_errors: true + +- name: Assert requirements file was not installed + assert: + that: + - install_rq1 is not changed From d974ca32ae1b2cd17066cd7e8dbb60f7c923ed67 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sat, 31 Jul 2021 04:00:14 +1200 Subject: [PATCH 0236/2828] removed extraneous dependency in integration test (#3114) --- tests/integration/targets/apache2_module/meta/main.yml | 2 -- tests/integration/targets/archive/meta/main.yml | 1 - tests/integration/targets/deploy_helper/meta/main.yml | 2 -- tests/integration/targets/flatpak/meta/main.yml | 1 - tests/integration/targets/flatpak_remote/meta/main.yml | 1 - tests/integration/targets/gem/meta/main.yml | 1 - tests/integration/targets/hg/meta/main.yml | 1 - tests/integration/targets/iso_create/meta/main.yml | 1 - tests/integration/targets/iso_extract/meta/main.yml | 1 - tests/integration/targets/launchd/meta/main.yml | 4 ---- tests/integration/targets/locale_gen/meta/main.yml | 2 -- tests/integration/targets/zypper/meta/main.yml | 2 -- tests/integration/targets/zypper_repository/meta/main.yml | 2 -- 13 files changed, 21 deletions(-) delete mode 100644 tests/integration/targets/apache2_module/meta/main.yml delete mode 100644 tests/integration/targets/deploy_helper/meta/main.yml delete mode 100644 tests/integration/targets/launchd/meta/main.yml delete mode 100644 tests/integration/targets/locale_gen/meta/main.yml delete mode 100644 tests/integration/targets/zypper/meta/main.yml delete mode 100644 tests/integration/targets/zypper_repository/meta/main.yml diff --git a/tests/integration/targets/apache2_module/meta/main.yml b/tests/integration/targets/apache2_module/meta/main.yml deleted file mode 100644 index 07faa21776..0000000000 --- a/tests/integration/targets/apache2_module/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - prepare_tests diff --git a/tests/integration/targets/archive/meta/main.yml b/tests/integration/targets/archive/meta/main.yml index ca521ab1ef..5438ced5c3 100644 --- a/tests/integration/targets/archive/meta/main.yml +++ b/tests/integration/targets/archive/meta/main.yml @@ -1,3 +1,2 @@ dependencies: - setup_pkg_mgr - - prepare_tests diff --git a/tests/integration/targets/deploy_helper/meta/main.yml b/tests/integration/targets/deploy_helper/meta/main.yml deleted file mode 100644 index 07faa21776..0000000000 --- a/tests/integration/targets/deploy_helper/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - prepare_tests diff --git a/tests/integration/targets/flatpak/meta/main.yml b/tests/integration/targets/flatpak/meta/main.yml index 314f77eba9..258cd4345c 100644 --- a/tests/integration/targets/flatpak/meta/main.yml +++ b/tests/integration/targets/flatpak/meta/main.yml @@ -1,3 +1,2 @@ dependencies: - - prepare_tests - setup_flatpak_remote diff --git a/tests/integration/targets/flatpak_remote/meta/main.yml b/tests/integration/targets/flatpak_remote/meta/main.yml index 314f77eba9..258cd4345c 100644 --- a/tests/integration/targets/flatpak_remote/meta/main.yml +++ b/tests/integration/targets/flatpak_remote/meta/main.yml @@ -1,3 +1,2 @@ dependencies: - - prepare_tests - setup_flatpak_remote diff --git a/tests/integration/targets/gem/meta/main.yml b/tests/integration/targets/gem/meta/main.yml index ca521ab1ef..5438ced5c3 100644 --- a/tests/integration/targets/gem/meta/main.yml +++ b/tests/integration/targets/gem/meta/main.yml @@ -1,3 +1,2 @@ dependencies: - setup_pkg_mgr - - prepare_tests diff --git a/tests/integration/targets/hg/meta/main.yml b/tests/integration/targets/hg/meta/main.yml index ca521ab1ef..5438ced5c3 100644 --- a/tests/integration/targets/hg/meta/main.yml +++ b/tests/integration/targets/hg/meta/main.yml @@ -1,3 +1,2 @@ dependencies: - setup_pkg_mgr - - prepare_tests diff --git a/tests/integration/targets/iso_create/meta/main.yml b/tests/integration/targets/iso_create/meta/main.yml index ca521ab1ef..5438ced5c3 100644 --- a/tests/integration/targets/iso_create/meta/main.yml +++ b/tests/integration/targets/iso_create/meta/main.yml @@ -1,3 +1,2 @@ dependencies: - setup_pkg_mgr - - prepare_tests diff --git a/tests/integration/targets/iso_extract/meta/main.yml b/tests/integration/targets/iso_extract/meta/main.yml index bdc4dfe016..0e51c36ebd 100644 --- a/tests/integration/targets/iso_extract/meta/main.yml +++ b/tests/integration/targets/iso_extract/meta/main.yml @@ -1,4 +1,3 @@ dependencies: - setup_pkg_mgr - - prepare_tests - setup_epel diff --git a/tests/integration/targets/launchd/meta/main.yml b/tests/integration/targets/launchd/meta/main.yml deleted file mode 100644 index 039249398e..0000000000 --- a/tests/integration/targets/launchd/meta/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- - -dependencies: - - prepare_tests diff --git a/tests/integration/targets/locale_gen/meta/main.yml b/tests/integration/targets/locale_gen/meta/main.yml deleted file mode 100644 index 07faa21776..0000000000 --- a/tests/integration/targets/locale_gen/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - prepare_tests diff --git a/tests/integration/targets/zypper/meta/main.yml b/tests/integration/targets/zypper/meta/main.yml deleted file mode 100644 index 07faa21776..0000000000 --- a/tests/integration/targets/zypper/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - prepare_tests diff --git a/tests/integration/targets/zypper_repository/meta/main.yml b/tests/integration/targets/zypper_repository/meta/main.yml deleted file mode 100644 index 07faa21776..0000000000 --- a/tests/integration/targets/zypper_repository/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - prepare_tests From d9533c44aa4895ba9a3303926153e375d571b80b Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sat, 31 Jul 2021 04:07:38 +1200 Subject: [PATCH 0237/2828] apache2_module - multiple improvements (#3106) * multiple improvements * added changelog fragment * comment and name in int test files * added notes to the documentation * removed the extraneous changelog frag * Update plugins/modules/web_infrastructure/apache2_module.py * adjusted doc text for sanity check * Update plugins/modules/web_infrastructure/apache2_module.py Co-authored-by: Felix Fontein * removed extraneous dependency in integration test Co-authored-by: Felix Fontein --- .../fragments/3106-apache2_module-review.yaml | 2 + .../web_infrastructure/apache2_module.py | 35 ++++----- .../apache2_module/tasks/actualtest.yml | 74 ++++++++----------- .../targets/apache2_module/tasks/main.yml | 24 +++++- 4 files changed, 71 insertions(+), 64 deletions(-) create mode 100644 changelogs/fragments/3106-apache2_module-review.yaml diff --git a/changelogs/fragments/3106-apache2_module-review.yaml b/changelogs/fragments/3106-apache2_module-review.yaml new file mode 100644 index 0000000000..d7840b2511 --- /dev/null +++ b/changelogs/fragments/3106-apache2_module-review.yaml @@ -0,0 +1,2 @@ +minor_changes: + - apache2_module - minor refactoring improving code quality, readability and speed (https://github.com/ansible-collections/community.general/pull/3106). diff --git a/plugins/modules/web_infrastructure/apache2_module.py b/plugins/modules/web_infrastructure/apache2_module.py index 4cc0ef8b37..d85ed0158f 100644 --- a/plugins/modules/web_infrastructure/apache2_module.py +++ b/plugins/modules/web_infrastructure/apache2_module.py @@ -49,6 +49,9 @@ options: type: bool default: False requirements: ["a2enmod","a2dismod"] +notes: + - This does not work on RedHat-based distributions. It does work on Debian- and SuSE-based distributions. + Whether it works on others depend on whether the C(a2enmod) and C(a2dismod) tools are available or not. ''' EXAMPLES = ''' @@ -109,13 +112,14 @@ import re # import module snippets from ansible.module_utils.basic import AnsibleModule +_re_threaded = re.compile(r'threaded: *yes') + def _run_threaded(module): control_binary = _get_ctl_binary(module) + result, stdout, stderr = module.run_command([control_binary, "-V"]) - result, stdout, stderr = module.run_command("%s -V" % control_binary) - - return bool(re.search(r'threaded:[ ]*yes', stdout)) + return bool(_re_threaded.search(stdout)) def _get_ctl_binary(module): @@ -124,15 +128,12 @@ def _get_ctl_binary(module): if ctl_binary is not None: return ctl_binary - module.fail_json( - msg="Neither of apache2ctl nor apachctl found." - " At least one apache control binary is necessary." - ) + module.fail_json(msg="Neither of apache2ctl nor apachctl found. At least one apache control binary is necessary.") def _module_is_enabled(module): control_binary = _get_ctl_binary(module) - result, stdout, stderr = module.run_command("%s -M" % control_binary) + result, stdout, stderr = module.run_command([control_binary, "-M"]) if result != 0: error_msg = "Error executing %s: %s" % (control_binary, stderr) @@ -168,7 +169,7 @@ def create_apache_identifier(name): # re expressions to extract subparts of names re_workarounds = [ - ('php', r'^(php\d)\.'), + ('php', re.compile(r'^(php\d)\.')), ] for a2enmod_spelling, module_name in text_workarounds: @@ -178,7 +179,7 @@ def create_apache_identifier(name): for search, reexpr in re_workarounds: if search in name: try: - rematch = re.search(reexpr, name) + rematch = reexpr.search(name) return rematch.group(1) + '_module' except AttributeError: pass @@ -201,15 +202,15 @@ def _set_state(module, state): result=success_msg, warnings=module.warnings) - a2mod_binary = module.get_bin_path(a2mod_binary) + a2mod_binary = [module.get_bin_path(a2mod_binary)] if a2mod_binary is None: module.fail_json(msg="%s not found. Perhaps this system does not use %s to manage apache" % (a2mod_binary, a2mod_binary)) if not want_enabled and force: # force exists only for a2dismod on debian - a2mod_binary += ' -f' + a2mod_binary.append('-f') - result, stdout, stderr = module.run_command("%s %s" % (a2mod_binary, name)) + result, stdout, stderr = module.run_command(a2mod_binary + [name]) if _module_is_enabled(module) == want_enabled: module.exit_json(changed=True, @@ -241,10 +242,10 @@ def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True), - identifier=dict(required=False, type='str'), - force=dict(required=False, type='bool', default=False), + identifier=dict(type='str'), + force=dict(type='bool', default=False), state=dict(default='present', choices=['absent', 'present']), - ignore_configcheck=dict(required=False, type='bool', default=False), + ignore_configcheck=dict(type='bool', default=False), ), supports_check_mode=True, ) @@ -253,7 +254,7 @@ def main(): name = module.params['name'] if name == 'cgi' and _run_threaded(module): - module.fail_json(msg="Your MPM seems to be threaded. No automatic actions on module %s possible." % name) + module.fail_json(msg="Your MPM seems to be threaded. No automatic actions on module cgi possible.") if not module.params['identifier']: module.params['identifier'] = create_apache_identifier(module.params['name']) diff --git a/tests/integration/targets/apache2_module/tasks/actualtest.yml b/tests/integration/targets/apache2_module/tasks/actualtest.yml index 24ba4f27cd..886e746f07 100644 --- a/tests/integration/targets/apache2_module/tasks/actualtest.yml +++ b/tests/integration/targets/apache2_module/tasks/actualtest.yml @@ -13,40 +13,25 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -- name: install apache via apt - apt: - name: "{{item}}" - state: present - when: "ansible_os_family == 'Debian'" - with_items: - - apache2 - - libapache2-mod-evasive - -- name: install apache via zypper - community.general.zypper: - name: apache2 - state: present - when: "ansible_os_family == 'Suse'" - - name: disable userdir module - apache2_module: + community.general.apache2_module: name: userdir state: absent register: userdir_first_disable - name: disable userdir module, second run - apache2_module: + community.general.apache2_module: name: userdir state: absent register: disable -- name: ensure apache2_module is idempotent +- name: ensure community.general.apache2_module is idempotent assert: that: - disable is not changed - name: enable userdir module - apache2_module: + community.general.apache2_module: name: userdir state: present register: enable @@ -57,18 +42,18 @@ - enable is changed - name: enable userdir module, second run - apache2_module: + community.general.apache2_module: name: userdir state: present register: enabletwo -- name: ensure apache2_module is idempotent +- name: ensure community.general.apache2_module is idempotent assert: that: - 'not enabletwo.changed' - name: disable userdir module, final run - apache2_module: + community.general.apache2_module: name: userdir state: absent register: disablefinal @@ -79,13 +64,13 @@ - 'disablefinal.changed' - name: set userdir to original state - apache2_module: + community.general.apache2_module: name: userdir state: present when: userdir_first_disable is changed - name: ensure autoindex enabled - apache2_module: + community.general.apache2_module: name: autoindex state: present @@ -93,55 +78,56 @@ when: "ansible_os_family == 'Debian'" block: - name: force disable of autoindex # bug #2499 - apache2_module: + community.general.apache2_module: name: autoindex state: absent force: True - name: reenable autoindex - apache2_module: + community.general.apache2_module: name: autoindex state: present - - name: enable evasive module, test https://github.com/ansible/ansible/issues/22635 - apache2_module: - name: evasive - state: present - + # mod_evasive is enabled by default upon the installation, so disable first and enable second, to preserve the config - name: disable evasive module - apache2_module: + community.general.apache2_module: name: evasive state: absent + - name: enable evasive module, test https://github.com/ansible/ansible/issues/22635 + community.general.apache2_module: + name: evasive + state: present + - name: use identifier to enable module, fix for https://github.com/ansible/ansible/issues/33669 - apache2_module: + community.general.apache2_module: name: dump_io state: present ignore_errors: True register: enable_dumpio_wrong - name: disable dump_io - apache2_module: + community.general.apache2_module: name: dump_io identifier: dumpio_module state: absent - name: use identifier to enable module, fix for https://github.com/ansible/ansible/issues/33669 - apache2_module: + community.general.apache2_module: name: dump_io identifier: dumpio_module state: present register: enable_dumpio_correct_1 - name: ensure idempotency with identifier - apache2_module: + community.general.apache2_module: name: dump_io identifier: dumpio_module state: present register: enable_dumpio_correct_2 - name: disable dump_io - apache2_module: + community.general.apache2_module: name: dump_io identifier: dumpio_module state: absent @@ -153,7 +139,7 @@ - enable_dumpio_correct_2 is not changed - name: disable mpm modules - apache2_module: + community.general.apache2_module: name: "{{ item }}" state: absent ignore_configcheck: True @@ -163,7 +149,7 @@ - mpm_prefork - name: enabled mpm_event - apache2_module: + community.general.apache2_module: name: mpm_event state: present ignore_configcheck: True @@ -175,7 +161,7 @@ - 'enabledmpmevent.changed' - name: switch between mpm_event and mpm_worker - apache2_module: + community.general.apache2_module: name: "{{ item.name }}" state: "{{ item.state }}" ignore_configcheck: True @@ -186,7 +172,7 @@ state: present - name: ensure mpm_worker is already enabled - apache2_module: + community.general.apache2_module: name: mpm_worker state: present register: enabledmpmworker @@ -197,7 +183,7 @@ - 'not enabledmpmworker.changed' - name: try to disable all mpm modules with configcheck - apache2_module: + community.general.apache2_module: name: "{{item}}" state: absent with_items: @@ -214,7 +200,7 @@ with_items: "{{ remove_with_configcheck.results }}" - name: try to disable all mpm modules without configcheck - apache2_module: + community.general.apache2_module: name: "{{item}}" state: absent ignore_configcheck: True @@ -224,7 +210,7 @@ - mpm_prefork - name: enabled mpm_event to restore previous state - apache2_module: + community.general.apache2_module: name: mpm_event state: present ignore_configcheck: True diff --git a/tests/integration/targets/apache2_module/tasks/main.yml b/tests/integration/targets/apache2_module/tasks/main.yml index 2ec308857a..d840ff60e8 100644 --- a/tests/integration/targets/apache2_module/tasks/main.yml +++ b/tests/integration/targets/apache2_module/tasks/main.yml @@ -5,8 +5,22 @@ #################################################################### +- name: install apache via apt + apt: + name: "{{item}}" + state: present + when: "ansible_os_family == 'Debian'" + with_items: + - apache2 + - libapache2-mod-evasive -- name: +- name: install apache via zypper + community.general.zypper: + name: apache2 + state: present + when: "ansible_os_family == 'Suse'" + +- name: test apache2_module block: - name: get list of enabled modules shell: apache2ctl -M | sort @@ -17,8 +31,12 @@ - name: get list of enabled modules shell: apache2ctl -M | sort register: modules_after - - debug: var=modules_before - - debug: var=modules_after + - name: modules_before + debug: + var: modules_before + - name: modules_after + debug: + var: modules_after - name: ensure that all test modules are disabled again assert: that: modules_before.stdout == modules_after.stdout From 43fe26d83cd405786ed6d000ecf278f3bb6a76c4 Mon Sep 17 00:00:00 2001 From: Gaetan2907 <48204380+Gaetan2907@users.noreply.github.com> Date: Fri, 30 Jul 2021 19:03:57 +0100 Subject: [PATCH 0238/2828] Keycloak: add client_rolemapping management (#2941) * Add Keycloak kc_client_rolemapping module * Fix documentation * Add unit tests for keycloak_client_rolemapping Keycloak module * Update plugins/modules/identity/keycloak/keycloak_client_rolemapping.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_client_rolemapping.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_client_rolemapping.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_client_rolemapping.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_client_rolemapping.py Co-authored-by: Felix Fontein * Fix documentation * Update plugins/modules/identity/keycloak/keycloak_client_rolemapping.py Co-authored-by: Felix Fontein * Minor fix * Add check mode * Refactoring: rename function from get_client_roles to get_client_roles_by_id * BOTMETA.yml: keycloak_client_rolemapping - add myself as maintainer * Update plugins/modules/identity/keycloak/keycloak_client_rolemapping.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_client_rolemapping.py Co-authored-by: Felix Fontein --- .github/BOTMETA.yml | 2 + .../identity/keycloak/keycloak.py | 123 +++- .../keycloak/keycloak_client_rolemapping.py | 347 +++++++++++ .../modules/keycloak_client_rolemapping.py | 1 + .../test_keycloak_client_rolemapping.py | 572 ++++++++++++++++++ 5 files changed, 1043 insertions(+), 2 deletions(-) create mode 100644 plugins/modules/identity/keycloak/keycloak_client_rolemapping.py create mode 120000 plugins/modules/keycloak_client_rolemapping.py create mode 100644 tests/unit/plugins/modules/identity/keycloak/test_keycloak_client_rolemapping.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 859d88bb84..4912a03ba4 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -502,6 +502,8 @@ files: maintainers: elfelip Gaetan2907 $modules/identity/keycloak/keycloak_clientscope.py: maintainers: Gaetan2907 + $modules/identity/keycloak/keycloak_client_rolemapping.py: + maintainers: Gaetan2907 $modules/identity/keycloak/keycloak_group.py: maintainers: adamgoossens $modules/identity/keycloak/keycloak_realm.py: diff --git a/plugins/module_utils/identity/keycloak/keycloak.py b/plugins/module_utils/identity/keycloak/keycloak.py index 75ef2bba02..c782e3690c 100644 --- a/plugins/module_utils/identity/keycloak/keycloak.py +++ b/plugins/module_utils/identity/keycloak/keycloak.py @@ -62,6 +62,10 @@ URL_CLIENTSCOPE = "{url}/admin/realms/{realm}/client-scopes/{id}" URL_CLIENTSCOPE_PROTOCOLMAPPERS = "{url}/admin/realms/{realm}/client-scopes/{id}/protocol-mappers/models" URL_CLIENTSCOPE_PROTOCOLMAPPER = "{url}/admin/realms/{realm}/client-scopes/{id}/protocol-mappers/models/{mapper_id}" +URL_CLIENT_ROLEMAPPINGS = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}" +URL_CLIENT_ROLEMAPPINGS_AVAILABLE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/available" +URL_CLIENT_ROLEMAPPINGS_COMPOSITE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/composite" + URL_AUTHENTICATION_FLOWS = "{url}/admin/realms/{realm}/authentication/flows" URL_AUTHENTICATION_FLOW = "{url}/admin/realms/{realm}/authentication/flows/{id}" URL_AUTHENTICATION_FLOW_COPY = "{url}/admin/realms/{realm}/authentication/flows/{copyfrom}/copy" @@ -376,8 +380,8 @@ class KeycloakAPI(object): def create_client(self, clientrep, realm="master"): """ Create a client in keycloak - :param clientrep: Client representation of client to be created. Must at least contain field clientId - :param realm: realm for client to be created + :param clientrep: Client representation of client to be created. Must at least contain field clientId. + :param realm: realm for client to be created. :return: HTTPResponse object on success """ client_url = URL_CLIENTS.format(url=self.baseurl, realm=realm) @@ -405,6 +409,121 @@ class KeycloakAPI(object): self.module.fail_json(msg='Could not delete client %s in realm %s: %s' % (id, realm, str(e))) + def get_client_roles_by_id(self, cid, realm="master"): + """ Fetch the roles of the a client on the Keycloak server. + + :param cid: ID of the client from which to obtain the rolemappings. + :param realm: Realm from which to obtain the rolemappings. + :return: The rollemappings of specified group and client of the realm (default "master"). + """ + client_roles_url = URL_CLIENT_ROLES.format(url=self.baseurl, realm=realm, id=cid) + try: + return json.loads(to_native(open_url(client_roles_url, method="GET", headers=self.restheaders, + validate_certs=self.validate_certs).read())) + except Exception as e: + self.module.fail_json(msg="Could not fetch rolemappings for client %s in realm %s: %s" + % (cid, realm, str(e))) + + def get_client_role_by_name(self, gid, cid, name, realm="master"): + """ Get the role ID of a client. + + :param gid: ID of the group from which to obtain the rolemappings. + :param cid: ID of the client from which to obtain the rolemappings. + :param name: Name of the role. + :param realm: Realm from which to obtain the rolemappings. + :return: The ID of the role, None if not found. + """ + rolemappings = self.get_client_roles_by_id(cid, realm=realm) + for role in rolemappings: + if name == role['name']: + return role['id'] + return None + + def get_client_rolemapping_by_id(self, gid, cid, rid, realm='master'): + """ Obtain client representation by id + + :param gid: ID of the group from which to obtain the rolemappings. + :param cid: ID of the client from which to obtain the rolemappings. + :param rid: ID of the role. + :param realm: client from this realm + :return: dict of rolemapping representation or None if none matching exist + """ + rolemappings_url = URL_CLIENT_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid) + try: + rolemappings = json.loads(to_native(open_url(rolemappings_url, method="GET", headers=self.restheaders, + validate_certs=self.validate_certs).read())) + for role in rolemappings: + if rid == role['id']: + return role + except Exception as e: + self.module.fail_json(msg="Could not fetch rolemappings for client %s in group %s, realm %s: %s" + % (cid, gid, realm, str(e))) + return None + + def get_client_available_rolemappings(self, gid, cid, realm="master"): + """ Fetch the available role of a client in a specified goup on the Keycloak server. + + :param gid: ID of the group from which to obtain the rolemappings. + :param cid: ID of the client from which to obtain the rolemappings. + :param realm: Realm from which to obtain the rolemappings. + :return: The rollemappings of specified group and client of the realm (default "master"). + """ + available_rolemappings_url = URL_CLIENT_ROLEMAPPINGS_AVAILABLE.format(url=self.baseurl, realm=realm, id=gid, client=cid) + try: + return json.loads(to_native(open_url(available_rolemappings_url, method="GET", headers=self.restheaders, + validate_certs=self.validate_certs).read())) + except Exception as e: + self.module.fail_json(msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s" + % (cid, gid, realm, str(e))) + + def get_client_composite_rolemappings(self, gid, cid, realm="master"): + """ Fetch the composite role of a client in a specified group on the Keycloak server. + + :param gid: ID of the group from which to obtain the rolemappings. + :param cid: ID of the client from which to obtain the rolemappings. + :param realm: Realm from which to obtain the rolemappings. + :return: The rollemappings of specified group and client of the realm (default "master"). + """ + available_rolemappings_url = URL_CLIENT_ROLEMAPPINGS_COMPOSITE.format(url=self.baseurl, realm=realm, id=gid, client=cid) + try: + return json.loads(to_native(open_url(available_rolemappings_url, method="GET", headers=self.restheaders, + validate_certs=self.validate_certs).read())) + except Exception as e: + self.module.fail_json(msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s" + % (cid, gid, realm, str(e))) + + def add_group_rolemapping(self, gid, cid, role_rep, realm="master"): + """ Fetch the composite role of a client in a specified goup on the Keycloak server. + + :param gid: ID of the group from which to obtain the rolemappings. + :param cid: ID of the client from which to obtain the rolemappings. + :param role_rep: Representation of the role to assign. + :param realm: Realm from which to obtain the rolemappings. + :return: None. + """ + available_rolemappings_url = URL_CLIENT_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid) + try: + open_url(available_rolemappings_url, method="POST", headers=self.restheaders, data=json.dumps(role_rep), validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s" + % (cid, gid, realm, str(e))) + + def delete_group_rolemapping(self, gid, cid, role_rep, realm="master"): + """ Delete the rolemapping of a client in a specified group on the Keycloak server. + + :param gid: ID of the group from which to obtain the rolemappings. + :param cid: ID of the client from which to obtain the rolemappings. + :param role_rep: Representation of the role to assign. + :param realm: Realm from which to obtain the rolemappings. + :return: None. + """ + available_rolemappings_url = URL_CLIENT_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid) + try: + open_url(available_rolemappings_url, method="DELETE", headers=self.restheaders, validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg="Could not delete available rolemappings for client %s in group %s, realm %s: %s" + % (cid, gid, realm, str(e))) + def get_client_templates(self, realm='master'): """ Obtains client template representations for client templates in a realm diff --git a/plugins/modules/identity/keycloak/keycloak_client_rolemapping.py b/plugins/modules/identity/keycloak/keycloak_client_rolemapping.py new file mode 100644 index 0000000000..e3d43d7919 --- /dev/null +++ b/plugins/modules/identity/keycloak/keycloak_client_rolemapping.py @@ -0,0 +1,347 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: keycloak_client_rolemapping + +short_description: Allows administration of Keycloak client_rolemapping with the Keycloak API +version_added: 3.5.0 + +description: + - This module allows you to add, remove or modify Keycloak client_rolemapping with the Keycloak REST API. + It requires access to the REST API via OpenID Connect; the user connecting and the client being + used must have the requisite access rights. In a default Keycloak installation, admin-cli + and an admin user would work, as would a separate client definition with the scope tailored + to your needs and a user having the expected roles. + + - The names of module options are snake_cased versions of the camelCase ones found in the + Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). + + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will + be returned that way by this module. You may pass single values for attributes when calling the module, + and this will be translated into a list suitable for the API. + + - When updating a client_rolemapping, where possible provide the role ID to the module. This removes a lookup + to the API to translate the name into the role ID. + + +options: + state: + description: + - State of the client_rolemapping. + - On C(present), the client_rolemapping will be created if it does not yet exist, or updated with the parameters you provide. + - On C(absent), the client_rolemapping will be removed if it exists. + default: 'present' + type: str + choices: + - present + - absent + + realm: + type: str + description: + - They Keycloak realm under which this role_representation resides. + default: 'master' + + group_name: + type: str + description: + - Name of the group to be mapped. + - This parameter is required (can be replaced by gid for less API call). + + gid: + type: str + description: + - Id of the group to be mapped. + - This parameter is not required for updating or deleting the rolemapping but + providing it will reduce the number of API calls required. + + client_id: + type: str + description: + - Name of the client to be mapped (different than I(cid)). + - This parameter is required (can be replaced by cid for less API call). + + cid: + type: str + description: + - Id of the client to be mapped. + - This parameter is not required for updating or deleting the rolemapping but + providing it will reduce the number of API calls required. + + roles: + description: + - Roles to be mapped to the group. + type: list + elements: dict + suboptions: + name: + type: str + description: + - Name of the role_representation. + - This parameter is required only when creating or updating the role_representation. + id: + type: str + description: + - The unique identifier for this role_representation. + - This parameter is not required for updating or deleting a role_representation but + providing it will reduce the number of API calls required. + +extends_documentation_fragment: +- community.general.keycloak + + +author: + - Gaëtan Daubresse (@Gaetan2907) +''' + +EXAMPLES = ''' +- name: Map a client role to a group, authentication with credentials + community.general.keycloak_client_rolemappings: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + state: present + client_id: client1 + group_name: group1 + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost + +- name: Map a client role to a group, authentication with token + community.general.keycloak_client_rolemappings: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + state: present + client_id: client1 + group_name: group1 + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost + +- name: Unmap client role from a group + community.general.keycloak_client_rolemappings: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + state: absent + client_id: client1 + group_name: group1 + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost + +''' + +RETURN = ''' +msg: + description: Message as to what action was taken + returned: always + type: str + sample: "Role role1 assigned to group group1." + +proposed: + description: role_representation representation of proposed changes to client_rolemapping. + returned: always + type: dict + sample: { + clientId: "test" + } +existing: + description: + - role_representation representation of existing role_representation. + - The sample is truncated. + returned: always + type: dict + sample: { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256", + } + } +end_state: + description: + - role_representation representation of role_representation after module execution. + - The sample is truncated. + returned: always + type: dict + sample: { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256", + } + } +''' + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError, is_struct_included +from ansible.module_utils.basic import AnsibleModule + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + roles_spec = dict( + name=dict(type='str'), + id=dict(type='str'), + ) + + meta_args = dict( + state=dict(default='present', choices=['present', 'absent']), + realm=dict(default='master'), + gid=dict(type='str'), + group_name=dict(type='str'), + cid=dict(type='str'), + client_id=dict(type='str'), + roles=dict(type='list', elements='dict', options=roles_spec), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), + required_together=([['auth_realm', 'auth_username', 'auth_password']])) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + state = module.params.get('state') + cid = module.params.get('cid') + client_id = module.params.get('client_id') + gid = module.params.get('gid') + group_name = module.params.get('group_name') + roles = module.params.get('roles') + + # Check the parameters + if cid is None and client_id is None: + module.fail_json(msg='Either the `client_id` or `cid` has to be specified.') + if gid is None and group_name is None: + module.fail_json(msg='Either the `group_name` or `gid` has to be specified.') + + # Get the potential missing parameters + if gid is None: + group_rep = kc.get_group_by_name(group_name, realm=realm) + if group_rep is not None: + gid = group_rep['id'] + else: + module.fail_json(msg='Could not fetch group %s:' % group_name) + if cid is None: + cid = kc.get_client_id(client_id, realm=realm) + if cid is None: + module.fail_json(msg='Could not fetch client %s:' % client_id) + if roles is None: + module.exit_json(msg="Nothing to do (no roles specified).") + else: + for role_index, role in enumerate(roles, start=0): + if role['name'] is None and role['id'] is None: + module.fail_json(msg='Either the `name` or `id` has to be specified on each role.') + # Fetch missing role_id + if role['id'] is None: + role_id = kc.get_client_role_by_name(gid, cid, role['name'], realm=realm) + if role_id is not None: + role['id'] = role_id + else: + module.fail_json(msg='Could not fetch role %s:' % (role['name'])) + # Fetch missing role_name + else: + role['name'] = kc.get_client_rolemapping_by_id(gid, cid, role['id'], realm=realm)['name'] + if role['name'] is None: + module.fail_json(msg='Could not fetch role %s' % (role['id'])) + + # Get effective client-level role mappings + available_roles_before = kc.get_client_available_rolemappings(gid, cid, realm=realm) + assigned_roles_before = kc.get_client_composite_rolemappings(gid, cid, realm=realm) + + result['existing'] = assigned_roles_before + result['proposed'] = roles + + update_roles = [] + for role_index, role in enumerate(roles, start=0): + # Fetch roles to assign if state present + if state == 'present': + for available_role in available_roles_before: + if role['name'] == available_role['name']: + update_roles.append({ + 'id': role['id'], + 'name': role['name'], + }) + # Fetch roles to remove if state absent + else: + for assigned_role in assigned_roles_before: + if role['name'] == assigned_role['name']: + update_roles.append({ + 'id': role['id'], + 'name': role['name'], + }) + + if len(update_roles): + if state == 'present': + # Assign roles + result['changed'] = True + if module._diff: + result['diff'] = dict(before=assigned_roles_before, after=update_roles) + if module.check_mode: + module.exit_json(**result) + kc.add_group_rolemapping(gid, cid, update_roles, realm=realm) + result['msg'] = 'Roles %s assigned to group %s.' % (update_roles, group_name) + assigned_roles_after = kc.get_client_composite_rolemappings(gid, cid, realm=realm) + result['end_state'] = assigned_roles_after + module.exit_json(**result) + else: + # Remove mapping of role + result['changed'] = True + if module._diff: + result['diff'] = dict(before=assigned_roles_before, after=update_roles) + if module.check_mode: + module.exit_json(**result) + kc.delete_group_rolemapping(gid, cid, update_roles, realm=realm) + result['msg'] = 'Roles %s removed from group %s.' % (update_roles, group_name) + assigned_roles_after = kc.get_client_composite_rolemappings(gid, cid, realm=realm) + result['end_state'] = assigned_roles_after + module.exit_json(**result) + # Do nothing + else: + result['changed'] = False + result['msg'] = 'Nothing to do, roles %s are correctly mapped with group %s.' % (roles, group_name) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_client_rolemapping.py b/plugins/modules/keycloak_client_rolemapping.py new file mode 120000 index 0000000000..02243ca68d --- /dev/null +++ b/plugins/modules/keycloak_client_rolemapping.py @@ -0,0 +1 @@ +identity/keycloak/keycloak_client_rolemapping.py \ No newline at end of file diff --git a/tests/unit/plugins/modules/identity/keycloak/test_keycloak_client_rolemapping.py b/tests/unit/plugins/modules/identity/keycloak/test_keycloak_client_rolemapping.py new file mode 100644 index 0000000000..8e753bc6d0 --- /dev/null +++ b/tests/unit/plugins/modules/identity/keycloak/test_keycloak_client_rolemapping.py @@ -0,0 +1,572 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from contextlib import contextmanager + +from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.general.tests.unit.compat.mock import call, patch +from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args + +from ansible_collections.community.general.plugins.modules.identity.keycloak import keycloak_client_rolemapping + +from itertools import count + +from ansible.module_utils.six import StringIO + + +@contextmanager +def patch_keycloak_api(get_group_by_name=None, get_client_id=None, get_client_role_by_name=None, + get_client_rolemapping_by_id=None, get_client_available_rolemappings=None, + get_client_composite_rolemappings=None, add_group_rolemapping=None, + delete_group_rolemapping=None): + """Mock context manager for patching the methods in PwPolicyIPAClient that contact the IPA server + + Patches the `login` and `_post_json` methods + + Keyword arguments are passed to the mock object that patches `_post_json` + + No arguments are passed to the mock object that patches `login` because no tests require it + + Example:: + + with patch_ipa(return_value={}) as (mock_login, mock_post): + ... + """ + + obj = keycloak_client_rolemapping.KeycloakAPI + with patch.object(obj, 'get_group_by_name', + side_effect=get_group_by_name) as mock_get_group_by_name: + with patch.object(obj, 'get_client_id', + side_effect=get_client_id) as mock_get_client_id: + with patch.object(obj, 'get_client_role_by_name', + side_effect=get_client_role_by_name) as mock_get_client_role_by_name: + with patch.object(obj, 'get_client_rolemapping_by_id', + side_effect=get_client_rolemapping_by_id) as mock_get_client_rolemapping_by_id: + with patch.object(obj, 'get_client_available_rolemappings', + side_effect=get_client_available_rolemappings) as mock_get_client_available_rolemappings: + with patch.object(obj, 'get_client_composite_rolemappings', + side_effect=get_client_composite_rolemappings) as mock_get_client_composite_rolemappings: + with patch.object(obj, 'add_group_rolemapping', + side_effect=add_group_rolemapping) as mock_add_group_rolemapping: + with patch.object(obj, 'delete_group_rolemapping', + side_effect=delete_group_rolemapping) as mock_delete_group_rolemapping: + yield mock_get_group_by_name, mock_get_client_id, mock_get_client_role_by_name, mock_add_group_rolemapping, \ + mock_get_client_rolemapping_by_id, mock_get_client_available_rolemappings, mock_get_client_composite_rolemappings, \ + mock_delete_group_rolemapping + + +def get_response(object_with_future_response, method, get_id_call_count): + if callable(object_with_future_response): + return object_with_future_response() + if isinstance(object_with_future_response, dict): + return get_response( + object_with_future_response[method], method, get_id_call_count) + if isinstance(object_with_future_response, list): + call_number = next(get_id_call_count) + return get_response( + object_with_future_response[call_number], method, get_id_call_count) + return object_with_future_response + + +def build_mocked_request(get_id_user_count, response_dict): + def _mocked_requests(*args, **kwargs): + url = args[0] + method = kwargs['method'] + future_response = response_dict.get(url, None) + return get_response(future_response, method, get_id_user_count) + return _mocked_requests + + +def create_wrapper(text_as_string): + """Allow to mock many times a call to one address. + Without this function, the StringIO is empty for the second call. + """ + def _create_wrapper(): + return StringIO(text_as_string) + return _create_wrapper + + +def mock_good_connection(): + token_response = { + 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), } + return patch( + 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + side_effect=build_mocked_request(count(), token_response), + autospec=True + ) + + +class TestKeycloakRealm(ModuleTestCase): + def setUp(self): + super(TestKeycloakRealm, self).setUp() + self.module = keycloak_client_rolemapping + + def test_map_clientrole_to_group_with_name(self): + """Add a new realm""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + 'realm': 'realm-name', + 'state': 'present', + 'client_id': 'test_client', + 'group_name': 'test_group', + 'roles': [ + { + 'name': 'test_role1', + }, + { + 'name': 'test_role1', + }, + ], + } + return_value_get_group_by_name = [{ + "access": { + "manage": "true", + "manageMembership": "true", + "view": "true" + }, + "attributes": "{}", + "clientRoles": "{}", + "id": "92f2400e-0ecb-4185-8950-12dcef616c2b", + "name": "test_group", + "path": "/test_group", + "realmRoles": "[]", + "subGroups": "[]" + }] + return_value_get_client_id = "c0f8490c-b224-4737-a567-20223e4c1727" + return_value_get_client_role_by_name = "e91af074-cfd5-40ee-8ef5-ae0ae1ce69fe" + return_value_get_client_available_rolemappings = [[ + { + "clientRole": "true", + "composite": "false", + "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", + "id": "c2bf2edb-da94-4f2f-b9f2-196dfee3fe4d", + "name": "test_role2" + }, + { + "clientRole": "true", + "composite": "false", + "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", + "id": "00a2d9a9-924e-49fa-8cde-c539c010ef6e", + "name": "test_role1" + } + ]] + return_value_get_client_composite_rolemappings = [ + None, + [ + { + "clientRole": "true", + "composite": "false", + "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", + "id": "c2bf2edb-da94-4f2f-b9f2-196dfee3fe4d", + "name": "test_role2" + }, + { + "clientRole": "true", + "composite": "false", + "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", + "id": "00a2d9a9-924e-49fa-8cde-c539c010ef6e", + "name": "test_role1" + } + ] + ] + + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id, + get_client_role_by_name=return_value_get_client_role_by_name, + get_client_available_rolemappings=return_value_get_client_available_rolemappings, + get_client_composite_rolemappings=return_value_get_client_composite_rolemappings) \ + as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_by_name, mock_add_group_rolemapping, + mock_get_client_rolemapping_by_id, mock_get_client_available_rolemappings, mock_get_client_composite_rolemappings, + mock_delete_group_rolemapping): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(mock_get_group_by_name.call_count, 1) + self.assertEqual(mock_get_client_id.call_count, 1) + self.assertEqual(mock_add_group_rolemapping.call_count, 1) + self.assertEqual(mock_get_client_rolemapping_by_id.call_count, 0) + self.assertEqual(mock_get_client_available_rolemappings.call_count, 1) + self.assertEqual(mock_get_client_composite_rolemappings.call_count, 2) + self.assertEqual(mock_delete_group_rolemapping.call_count, 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_map_clientrole_to_group_with_name_idempotency(self): + """Add a new realm""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + 'realm': 'realm-name', + 'state': 'present', + 'client_id': 'test_client', + 'group_name': 'test_group', + 'roles': [ + { + 'name': 'test_role1', + }, + { + 'name': 'test_role1', + }, + ], + } + return_value_get_group_by_name = [{ + "access": { + "manage": "true", + "manageMembership": "true", + "view": "true" + }, + "attributes": "{}", + "clientRoles": "{}", + "id": "92f2400e-0ecb-4185-8950-12dcef616c2b", + "name": "test_group", + "path": "/test_group", + "realmRoles": "[]", + "subGroups": "[]" + }] + return_value_get_client_id = "c0f8490c-b224-4737-a567-20223e4c1727" + return_value_get_client_role_by_name = "e91af074-cfd5-40ee-8ef5-ae0ae1ce69fe" + return_value_get_client_available_rolemappings = [[]] + return_value_get_client_composite_rolemappings = [[ + { + "clientRole": "true", + "composite": "false", + "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", + "id": "c2bf2edb-da94-4f2f-b9f2-196dfee3fe4d", + "name": "test_role2" + }, + { + "clientRole": "true", + "composite": "false", + "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", + "id": "00a2d9a9-924e-49fa-8cde-c539c010ef6e", + "name": "test_role1" + } + ]] + + changed = False + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id, + get_client_role_by_name=return_value_get_client_role_by_name, + get_client_available_rolemappings=return_value_get_client_available_rolemappings, + get_client_composite_rolemappings=return_value_get_client_composite_rolemappings) \ + as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_by_name, mock_add_group_rolemapping, + mock_get_client_rolemapping_by_id, mock_get_client_available_rolemappings, mock_get_client_composite_rolemappings, + mock_delete_group_rolemapping): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(mock_get_group_by_name.call_count, 1) + self.assertEqual(mock_get_client_id.call_count, 1) + self.assertEqual(mock_add_group_rolemapping.call_count, 0) + self.assertEqual(mock_get_client_rolemapping_by_id.call_count, 0) + self.assertEqual(mock_get_client_available_rolemappings.call_count, 1) + self.assertEqual(mock_get_client_composite_rolemappings.call_count, 1) + self.assertEqual(mock_delete_group_rolemapping.call_count, 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_map_clientrole_to_group_with_id(self): + """Add a new realm""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + 'realm': 'realm-name', + 'state': 'present', + 'cid': 'c0f8490c-b224-4737-a567-20223e4c1727', + 'gid': '92f2400e-0ecb-4185-8950-12dcef616c2b', + 'roles': [ + { + 'name': 'test_role1', + }, + { + 'name': 'test_role1', + }, + ], + } + return_value_get_group_by_name = [{ + "access": { + "manage": "true", + "manageMembership": "true", + "view": "true" + }, + "attributes": "{}", + "clientRoles": "{}", + "id": "92f2400e-0ecb-4185-8950-12dcef616c2b", + "name": "test_group", + "path": "/test_group", + "realmRoles": "[]", + "subGroups": "[]" + }] + return_value_get_client_id = "c0f8490c-b224-4737-a567-20223e4c1727" + return_value_get_client_role_by_name = "e91af074-cfd5-40ee-8ef5-ae0ae1ce69fe" + return_value_get_client_available_rolemappings = [[ + { + "clientRole": "true", + "composite": "false", + "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", + "id": "c2bf2edb-da94-4f2f-b9f2-196dfee3fe4d", + "name": "test_role2" + }, + { + "clientRole": "true", + "composite": "false", + "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", + "id": "00a2d9a9-924e-49fa-8cde-c539c010ef6e", + "name": "test_role1" + } + ]] + return_value_get_client_composite_rolemappings = [ + None, + [ + { + "clientRole": "true", + "composite": "false", + "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", + "id": "c2bf2edb-da94-4f2f-b9f2-196dfee3fe4d", + "name": "test_role2" + }, + { + "clientRole": "true", + "composite": "false", + "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", + "id": "00a2d9a9-924e-49fa-8cde-c539c010ef6e", + "name": "test_role1" + } + ] + ] + + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id, + get_client_role_by_name=return_value_get_client_role_by_name, + get_client_available_rolemappings=return_value_get_client_available_rolemappings, + get_client_composite_rolemappings=return_value_get_client_composite_rolemappings) \ + as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_by_name, mock_add_group_rolemapping, + mock_get_client_rolemapping_by_id, mock_get_client_available_rolemappings, mock_get_client_composite_rolemappings, + mock_delete_group_rolemapping): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(mock_get_group_by_name.call_count, 0) + self.assertEqual(mock_get_client_id.call_count, 0) + self.assertEqual(mock_add_group_rolemapping.call_count, 1) + self.assertEqual(mock_get_client_rolemapping_by_id.call_count, 0) + self.assertEqual(mock_get_client_available_rolemappings.call_count, 1) + self.assertEqual(mock_get_client_composite_rolemappings.call_count, 2) + self.assertEqual(mock_delete_group_rolemapping.call_count, 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_remove_clientrole_from_group(self): + """Add a new realm""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + 'realm': 'realm-name', + 'state': 'absent', + 'client_id': 'test_client', + 'group_name': 'test_group', + 'roles': [ + { + 'name': 'test_role1', + }, + { + 'name': 'test_role1', + }, + ], + } + return_value_get_group_by_name = [{ + "access": { + "manage": "true", + "manageMembership": "true", + "view": "true" + }, + "attributes": "{}", + "clientRoles": "{}", + "id": "92f2400e-0ecb-4185-8950-12dcef616c2b", + "name": "test_group", + "path": "/test_group", + "realmRoles": "[]", + "subGroups": "[]" + }] + return_value_get_client_id = "c0f8490c-b224-4737-a567-20223e4c1727" + return_value_get_client_role_by_name = "e91af074-cfd5-40ee-8ef5-ae0ae1ce69fe" + return_value_get_client_available_rolemappings = [[]] + return_value_get_client_composite_rolemappings = [ + [ + { + "clientRole": "true", + "composite": "false", + "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", + "id": "c2bf2edb-da94-4f2f-b9f2-196dfee3fe4d", + "name": "test_role2" + }, + { + "clientRole": "true", + "composite": "false", + "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", + "id": "00a2d9a9-924e-49fa-8cde-c539c010ef6e", + "name": "test_role1" + } + ], + [] + ] + + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id, + get_client_role_by_name=return_value_get_client_role_by_name, + get_client_available_rolemappings=return_value_get_client_available_rolemappings, + get_client_composite_rolemappings=return_value_get_client_composite_rolemappings) \ + as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_by_name, mock_add_group_rolemapping, + mock_get_client_rolemapping_by_id, mock_get_client_available_rolemappings, mock_get_client_composite_rolemappings, + mock_delete_group_rolemapping): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(mock_get_group_by_name.call_count, 1) + self.assertEqual(mock_get_client_id.call_count, 1) + self.assertEqual(mock_add_group_rolemapping.call_count, 0) + self.assertEqual(mock_get_client_rolemapping_by_id.call_count, 0) + self.assertEqual(mock_get_client_available_rolemappings.call_count, 1) + self.assertEqual(mock_get_client_composite_rolemappings.call_count, 2) + self.assertEqual(mock_delete_group_rolemapping.call_count, 1) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_remove_clientrole_from_group_idempotency(self): + """Add a new realm""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + 'realm': 'realm-name', + 'state': 'absent', + 'client_id': 'test_client', + 'group_name': 'test_group', + 'roles': [ + { + 'name': 'test_role1', + }, + { + 'name': 'test_role1', + }, + ], + } + return_value_get_group_by_name = [{ + "access": { + "manage": "true", + "manageMembership": "true", + "view": "true" + }, + "attributes": "{}", + "clientRoles": "{}", + "id": "92f2400e-0ecb-4185-8950-12dcef616c2b", + "name": "test_group", + "path": "/test_group", + "realmRoles": "[]", + "subGroups": "[]" + }] + return_value_get_client_id = "c0f8490c-b224-4737-a567-20223e4c1727" + return_value_get_client_role_by_name = "e91af074-cfd5-40ee-8ef5-ae0ae1ce69fe" + return_value_get_client_available_rolemappings = [ + [ + { + "clientRole": "true", + "composite": "false", + "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", + "id": "c2bf2edb-da94-4f2f-b9f2-196dfee3fe4d", + "name": "test_role2" + }, + { + "clientRole": "true", + "composite": "false", + "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", + "id": "00a2d9a9-924e-49fa-8cde-c539c010ef6e", + "name": "test_role1" + } + ] + ] + return_value_get_client_composite_rolemappings = [[]] + + changed = False + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id, + get_client_role_by_name=return_value_get_client_role_by_name, + get_client_available_rolemappings=return_value_get_client_available_rolemappings, + get_client_composite_rolemappings=return_value_get_client_composite_rolemappings) \ + as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_by_name, mock_add_group_rolemapping, + mock_get_client_rolemapping_by_id, mock_get_client_available_rolemappings, mock_get_client_composite_rolemappings, + mock_delete_group_rolemapping): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(mock_get_group_by_name.call_count, 1) + self.assertEqual(mock_get_client_id.call_count, 1) + self.assertEqual(mock_add_group_rolemapping.call_count, 0) + self.assertEqual(mock_get_client_rolemapping_by_id.call_count, 0) + self.assertEqual(mock_get_client_available_rolemappings.call_count, 1) + self.assertEqual(mock_get_client_composite_rolemappings.call_count, 1) + self.assertEqual(mock_delete_group_rolemapping.call_count, 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + +if __name__ == '__main__': + unittest.main() From 9ccce821136789d4786038a57c565d97a7e21e22 Mon Sep 17 00:00:00 2001 From: Yvan Watchman Date: Sat, 31 Jul 2021 07:43:45 +0200 Subject: [PATCH 0239/2828] Feature: implement hpilo_info system power info (#3079) * report power state of host * Modify sample information * add changelog fragment * apply feedback from github community * apply feedback Co-authored-by: Yvan E. Watchman --- .../fragments/3079-report-power-state-hpilo.yaml | 3 +++ .../modules/remote_management/hpilo/hpilo_info.py | 13 +++++++++++++ 2 files changed, 16 insertions(+) create mode 100644 changelogs/fragments/3079-report-power-state-hpilo.yaml diff --git a/changelogs/fragments/3079-report-power-state-hpilo.yaml b/changelogs/fragments/3079-report-power-state-hpilo.yaml new file mode 100644 index 0000000000..e057e3395f --- /dev/null +++ b/changelogs/fragments/3079-report-power-state-hpilo.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - hpilo_info - added ``host_power_status`` return value to report power state of machine with ``OFF``, ``ON`` or ``UNKNOWN`` (https://github.com/ansible-collections/community.general/pull/3079). diff --git a/plugins/modules/remote_management/hpilo/hpilo_info.py b/plugins/modules/remote_management/hpilo/hpilo_info.py index f373b58639..2b6c30abd6 100644 --- a/plugins/modules/remote_management/hpilo/hpilo_info.py +++ b/plugins/modules/remote_management/hpilo/hpilo_info.py @@ -113,6 +113,15 @@ hw_uuid: returned: always type: str sample: 123456ABC78901D2 + +host_power_status: + description: + - Power status of host. + - Will be one of C(ON), C(OFF) and C(UNKNOWN). + returned: always + type: str + sample: ON + version_added: 3.5.0 ''' import re @@ -177,6 +186,7 @@ def main(): # TODO: Count number of CPUs, DIMMs and total memory try: data = ilo.get_host_data() + power_state = ilo.get_host_power_status() except hpilo.IloCommunicationError as e: module.fail_json(msg=to_native(e)) @@ -243,6 +253,9 @@ def main(): # reformat into a text friendly format info['hw_memory_total'] = "{0} GB".format(info['hw_memory_total']) + # Report host state + info['host_power_status'] = power_state or 'UNKNOWN' + module.exit_json(**info) From 5f8d6a73d3123e37eacb77712f5b083a670230a9 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sat, 31 Jul 2021 19:09:38 +1200 Subject: [PATCH 0240/2828] fixed RETURN doc (#3120) --- plugins/modules/source_control/github/github_issue.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/modules/source_control/github/github_issue.py b/plugins/modules/source_control/github/github_issue.py index 66d26c8301..88fe8f7b51 100644 --- a/plugins/modules/source_control/github/github_issue.py +++ b/plugins/modules/source_control/github/github_issue.py @@ -41,7 +41,7 @@ author: ''' RETURN = ''' -get_status: +issue_status: description: State of the GitHub issue type: str returned: success From 789f06dffec644dd0605138936fe16ab44e8ac05 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sat, 31 Jul 2021 19:10:54 +1200 Subject: [PATCH 0241/2828] removed extraneous dependency in integration test (#3119) --- tests/integration/targets/prepare_tests/tasks/main.yml | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 tests/integration/targets/prepare_tests/tasks/main.yml diff --git a/tests/integration/targets/prepare_tests/tasks/main.yml b/tests/integration/targets/prepare_tests/tasks/main.yml deleted file mode 100644 index e69de29bb2..0000000000 From 73c27d6a0e739bf94c10f347aa195048cba185eb Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 1 Aug 2021 22:35:08 +1200 Subject: [PATCH 0242/2828] utf8 marker batch1 (#3127) * added utf-8 markers to all .py files in plugins/{action,cache,callback} * added utf-8 markers to all .py files in plugins/connection * added utf-8 markers to all .py files in plugins/doc_fragments --- plugins/action/system/iptables_state.py | 1 + plugins/action/system/shutdown.py | 1 + plugins/cache/memcached.py | 1 + plugins/cache/pickle.py | 1 + plugins/cache/redis.py | 1 + plugins/cache/yaml.py | 1 + plugins/callback/context_demo.py | 1 + plugins/callback/counter_enabled.py | 1 + plugins/callback/dense.py | 1 + plugins/callback/hipchat.py | 1 + plugins/callback/jabber.py | 1 + plugins/callback/log_plays.py | 1 + plugins/callback/loganalytics.py | 1 + plugins/callback/logdna.py | 1 + plugins/callback/logentries.py | 1 + plugins/callback/logstash.py | 1 + plugins/callback/null.py | 1 + plugins/callback/say.py | 1 + plugins/callback/selective.py | 1 + plugins/callback/slack.py | 1 + plugins/callback/syslog_json.py | 1 + plugins/callback/unixy.py | 1 + plugins/callback/yaml.py | 1 + plugins/connection/chroot.py | 1 + plugins/connection/funcd.py | 1 + plugins/connection/iocage.py | 1 + plugins/connection/jail.py | 1 + plugins/connection/lxc.py | 1 + plugins/connection/lxd.py | 1 + plugins/connection/qubes.py | 1 + plugins/connection/saltstack.py | 1 + plugins/connection/zone.py | 1 + plugins/doc_fragments/hpe3par.py | 1 + plugins/doc_fragments/hwc.py | 1 + plugins/doc_fragments/oracle.py | 1 + plugins/doc_fragments/oracle_creatable_resource.py | 1 + plugins/doc_fragments/oracle_display_name_option.py | 1 + plugins/doc_fragments/oracle_name_option.py | 1 + plugins/doc_fragments/oracle_tags.py | 1 + plugins/doc_fragments/oracle_wait_options.py | 1 + plugins/doc_fragments/vexata.py | 1 + 41 files changed, 41 insertions(+) diff --git a/plugins/action/system/iptables_state.py b/plugins/action/system/iptables_state.py index 6884e77713..93e4bc2ed4 100644 --- a/plugins/action/system/iptables_state.py +++ b/plugins/action/system/iptables_state.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright: (c) 2020, quidame # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/action/system/shutdown.py b/plugins/action/system/shutdown.py index 953b73778b..4995ef8d8b 100644 --- a/plugins/action/system/shutdown.py +++ b/plugins/action/system/shutdown.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright: (c) 2020, Amin Vakil # Copyright: (c) 2016-2018, Matt Davis # Copyright: (c) 2018, Sam Doran diff --git a/plugins/cache/memcached.py b/plugins/cache/memcached.py index 5c9e54aaa0..fb2a778fc3 100644 --- a/plugins/cache/memcached.py +++ b/plugins/cache/memcached.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2014, Brian Coca, Josh Drake, et al # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/cache/pickle.py b/plugins/cache/pickle.py index 38a93e2e28..b790e73a4c 100644 --- a/plugins/cache/pickle.py +++ b/plugins/cache/pickle.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2017, Brian Coca # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/cache/redis.py b/plugins/cache/redis.py index 20616096ae..6b5f2c4ad0 100644 --- a/plugins/cache/redis.py +++ b/plugins/cache/redis.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2014, Brian Coca, Josh Drake, et al # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/cache/yaml.py b/plugins/cache/yaml.py index b47d74038c..b676dd0dbb 100644 --- a/plugins/cache/yaml.py +++ b/plugins/cache/yaml.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2017, Brian Coca # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/context_demo.py b/plugins/callback/context_demo.py index 2441f4063f..39c912acae 100644 --- a/plugins/callback/context_demo.py +++ b/plugins/callback/context_demo.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (C) 2012, Michael DeHaan, # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/counter_enabled.py b/plugins/callback/counter_enabled.py index 2b8c270024..352c773b9b 100644 --- a/plugins/callback/counter_enabled.py +++ b/plugins/callback/counter_enabled.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2018, Ivan Aragones Muniesa # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) ''' diff --git a/plugins/callback/dense.py b/plugins/callback/dense.py index abbf05ef48..38d3e1bee7 100644 --- a/plugins/callback/dense.py +++ b/plugins/callback/dense.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2016, Dag Wieers # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/hipchat.py b/plugins/callback/hipchat.py index e097ac8eb6..771c425df8 100644 --- a/plugins/callback/hipchat.py +++ b/plugins/callback/hipchat.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (C) 2014, Matt Martz # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/jabber.py b/plugins/callback/jabber.py index 83476a85c5..c57e08804a 100644 --- a/plugins/callback/jabber.py +++ b/plugins/callback/jabber.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright (C) 2016 maxn nikolaev.makc@gmail.com # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/log_plays.py b/plugins/callback/log_plays.py index df3482f483..24acf3fc95 100644 --- a/plugins/callback/log_plays.py +++ b/plugins/callback/log_plays.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (C) 2012, Michael DeHaan, # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/loganalytics.py b/plugins/callback/loganalytics.py index ef1ea02f87..ccc7649218 100644 --- a/plugins/callback/loganalytics.py +++ b/plugins/callback/loganalytics.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) diff --git a/plugins/callback/logdna.py b/plugins/callback/logdna.py index 165005d0bd..ddb4c477da 100644 --- a/plugins/callback/logdna.py +++ b/plugins/callback/logdna.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2018, Samir Musali # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/logentries.py b/plugins/callback/logentries.py index d78bff331c..344bd219cd 100644 --- a/plugins/callback/logentries.py +++ b/plugins/callback/logentries.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2015, Logentries.com, Jimmy Tang # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/logstash.py b/plugins/callback/logstash.py index ef862fdb42..95da7fa95a 100644 --- a/plugins/callback/logstash.py +++ b/plugins/callback/logstash.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (C) 2020, Yevhen Khmelenko # (C) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/null.py b/plugins/callback/null.py index cda8603167..9eb5198d0c 100644 --- a/plugins/callback/null.py +++ b/plugins/callback/null.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/say.py b/plugins/callback/say.py index e3efd3e63b..309777e241 100644 --- a/plugins/callback/say.py +++ b/plugins/callback/say.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2012, Michael DeHaan, # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/selective.py b/plugins/callback/selective.py index 8d882d89bd..b1e09c8236 100644 --- a/plugins/callback/selective.py +++ b/plugins/callback/selective.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) Fastly, inc 2016 # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/slack.py b/plugins/callback/slack.py index 74d338dbcc..c791bf6a36 100644 --- a/plugins/callback/slack.py +++ b/plugins/callback/slack.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (C) 2014-2015, Matt Martz # (C) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/syslog_json.py b/plugins/callback/syslog_json.py index a9547526ee..73543614a8 100644 --- a/plugins/callback/syslog_json.py +++ b/plugins/callback/syslog_json.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/unixy.py b/plugins/callback/unixy.py index aaca1bd8cc..dec2ab0c8c 100644 --- a/plugins/callback/unixy.py +++ b/plugins/callback/unixy.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright: (c) 2017, Allyson Bowles <@akatch> # Copyright: (c) 2012-2014, Michael DeHaan # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/yaml.py b/plugins/callback/yaml.py index da931d6b73..d4036c808e 100644 --- a/plugins/callback/yaml.py +++ b/plugins/callback/yaml.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/connection/chroot.py b/plugins/connection/chroot.py index c4c427aa0a..3e15947031 100644 --- a/plugins/connection/chroot.py +++ b/plugins/connection/chroot.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Based on local.py (c) 2012, Michael DeHaan # # (c) 2013, Maykel Moya diff --git a/plugins/connection/funcd.py b/plugins/connection/funcd.py index afea840ee8..caf9d06c60 100644 --- a/plugins/connection/funcd.py +++ b/plugins/connection/funcd.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Based on local.py (c) 2012, Michael DeHaan # Based on chroot.py (c) 2013, Maykel Moya # Copyright (c) 2013, Michael Scherer diff --git a/plugins/connection/iocage.py b/plugins/connection/iocage.py index e97867e58f..94761d5c17 100644 --- a/plugins/connection/iocage.py +++ b/plugins/connection/iocage.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Based on jail.py # (c) 2013, Michael Scherer # (c) 2015, Toshio Kuratomi diff --git a/plugins/connection/jail.py b/plugins/connection/jail.py index cee08ed8fd..c3de25c753 100644 --- a/plugins/connection/jail.py +++ b/plugins/connection/jail.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Based on local.py by Michael DeHaan # and chroot.py by Maykel Moya # Copyright (c) 2013, Michael Scherer diff --git a/plugins/connection/lxc.py b/plugins/connection/lxc.py index b18919efd3..d5c7a7ebbe 100644 --- a/plugins/connection/lxc.py +++ b/plugins/connection/lxc.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2015, Joerg Thalheim # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/connection/lxd.py b/plugins/connection/lxd.py index d523234449..31ff13c776 100644 --- a/plugins/connection/lxd.py +++ b/plugins/connection/lxd.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2016 Matt Clay # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/connection/qubes.py b/plugins/connection/qubes.py index ca221a7fac..fd72f38e2f 100644 --- a/plugins/connection/qubes.py +++ b/plugins/connection/qubes.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Based on the buildah connection plugin # Copyright (c) 2017 Ansible Project # 2018 Kushal Das diff --git a/plugins/connection/saltstack.py b/plugins/connection/saltstack.py index f8e3680aea..3d56083bb6 100644 --- a/plugins/connection/saltstack.py +++ b/plugins/connection/saltstack.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Based on local.py (c) 2012, Michael DeHaan # Based on chroot.py (c) 2013, Maykel Moya # Based on func.py diff --git a/plugins/connection/zone.py b/plugins/connection/zone.py index b12cffe28d..a859b5e32f 100644 --- a/plugins/connection/zone.py +++ b/plugins/connection/zone.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Based on local.py (c) 2012, Michael DeHaan # and chroot.py (c) 2013, Maykel Moya # and jail.py (c) 2013, Michael Scherer diff --git a/plugins/doc_fragments/hpe3par.py b/plugins/doc_fragments/hpe3par.py index fa51ccdb91..e16ead4207 100644 --- a/plugins/doc_fragments/hpe3par.py +++ b/plugins/doc_fragments/hpe3par.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright: (c) 2018, Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/doc_fragments/hwc.py b/plugins/doc_fragments/hwc.py index 80cd0465d7..c6c5dd23bd 100644 --- a/plugins/doc_fragments/hwc.py +++ b/plugins/doc_fragments/hwc.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright: (c) 2018, Huawei Inc. # GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/doc_fragments/oracle.py b/plugins/doc_fragments/oracle.py index 5ad04a2220..94ed18107d 100644 --- a/plugins/doc_fragments/oracle.py +++ b/plugins/doc_fragments/oracle.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/doc_fragments/oracle_creatable_resource.py b/plugins/doc_fragments/oracle_creatable_resource.py index 468eaabe3f..f76e7146b3 100644 --- a/plugins/doc_fragments/oracle_creatable_resource.py +++ b/plugins/doc_fragments/oracle_creatable_resource.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/doc_fragments/oracle_display_name_option.py b/plugins/doc_fragments/oracle_display_name_option.py index 01f92f183b..b9ce0d92fe 100644 --- a/plugins/doc_fragments/oracle_display_name_option.py +++ b/plugins/doc_fragments/oracle_display_name_option.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/doc_fragments/oracle_name_option.py b/plugins/doc_fragments/oracle_name_option.py index 9a7b0226f7..dd9b98816e 100644 --- a/plugins/doc_fragments/oracle_name_option.py +++ b/plugins/doc_fragments/oracle_name_option.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/doc_fragments/oracle_tags.py b/plugins/doc_fragments/oracle_tags.py index 1d9cae0e8f..e92598c549 100644 --- a/plugins/doc_fragments/oracle_tags.py +++ b/plugins/doc_fragments/oracle_tags.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/doc_fragments/oracle_wait_options.py b/plugins/doc_fragments/oracle_wait_options.py index 248319c2e8..d94f079a86 100644 --- a/plugins/doc_fragments/oracle_wait_options.py +++ b/plugins/doc_fragments/oracle_wait_options.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/doc_fragments/vexata.py b/plugins/doc_fragments/vexata.py index 9f756cc877..920457fa04 100644 --- a/plugins/doc_fragments/vexata.py +++ b/plugins/doc_fragments/vexata.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # # Copyright: (c) 2019, Sandeep Kasargod # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) From 047b7ada3ca0e2a0ab5b6e83de22174839c1741e Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 1 Aug 2021 22:36:53 +1200 Subject: [PATCH 0243/2828] uf8 marker batch2 (#3128) * added utf-8 markers to all .py files in plugins/filter * added utf-8 markers to all .py files in plugins/inventory * added utf-8 markers to all .py files in plugins/lookup --- plugins/filter/dict_kv.py | 1 + plugins/filter/jc.py | 1 + plugins/filter/json_query.py | 1 + plugins/filter/random_mac.py | 1 + plugins/filter/version_sort.py | 1 + plugins/inventory/linode.py | 1 + plugins/inventory/nmap.py | 1 + plugins/inventory/online.py | 1 + plugins/inventory/scaleway.py | 1 + plugins/inventory/stackpath_compute.py | 1 + plugins/inventory/virtualbox.py | 1 + plugins/lookup/cartesian.py | 1 + plugins/lookup/chef_databag.py | 1 + plugins/lookup/consul_kv.py | 1 + plugins/lookup/credstash.py | 1 + plugins/lookup/cyberarkpassword.py | 1 + plugins/lookup/dependent.py | 1 + plugins/lookup/dig.py | 1 + plugins/lookup/dnstxt.py | 1 + plugins/lookup/etcd.py | 1 + plugins/lookup/filetree.py | 1 + plugins/lookup/flattened.py | 1 + plugins/lookup/hiera.py | 1 + plugins/lookup/keyring.py | 1 + plugins/lookup/lastpass.py | 1 + plugins/lookup/lmdb_kv.py | 1 + plugins/lookup/manifold.py | 1 + plugins/lookup/nios.py | 1 + plugins/lookup/passwordstore.py | 1 + plugins/lookup/redis.py | 1 + plugins/lookup/shelvefile.py | 1 + 31 files changed, 31 insertions(+) diff --git a/plugins/filter/dict_kv.py b/plugins/filter/dict_kv.py index b2124ed767..fc1978b977 100644 --- a/plugins/filter/dict_kv.py +++ b/plugins/filter/dict_kv.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright (C) 2020 Stanislav German-Evtushenko (@giner) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/filter/jc.py b/plugins/filter/jc.py index e854128f67..42dcf98234 100644 --- a/plugins/filter/jc.py +++ b/plugins/filter/jc.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2015, Filipe Niero Felisbino # # This file is part of Ansible diff --git a/plugins/filter/json_query.py b/plugins/filter/json_query.py index 673cafa587..9b9ecb93f2 100644 --- a/plugins/filter/json_query.py +++ b/plugins/filter/json_query.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2015, Filipe Niero Felisbino # # This file is part of Ansible diff --git a/plugins/filter/random_mac.py b/plugins/filter/random_mac.py index aa9f59be08..dc04e99a96 100644 --- a/plugins/filter/random_mac.py +++ b/plugins/filter/random_mac.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2020 Ansible Project # # This file is part of Ansible diff --git a/plugins/filter/version_sort.py b/plugins/filter/version_sort.py index 598b8f2088..d228ea62d0 100644 --- a/plugins/filter/version_sort.py +++ b/plugins/filter/version_sort.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright (C) 2021 Eric Lavarde # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/inventory/linode.py b/plugins/inventory/linode.py index 049d67c973..566073a4a8 100644 --- a/plugins/inventory/linode.py +++ b/plugins/inventory/linode.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/inventory/nmap.py b/plugins/inventory/nmap.py index 05a83367af..ade3adc3d4 100644 --- a/plugins/inventory/nmap.py +++ b/plugins/inventory/nmap.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/inventory/online.py b/plugins/inventory/online.py index 2d305bb8d6..a74c6026ea 100644 --- a/plugins/inventory/online.py +++ b/plugins/inventory/online.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright (c) 2018 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/inventory/scaleway.py b/plugins/inventory/scaleway.py index 2e863a2531..b327824f33 100644 --- a/plugins/inventory/scaleway.py +++ b/plugins/inventory/scaleway.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright: (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/inventory/stackpath_compute.py b/plugins/inventory/stackpath_compute.py index 8e6b5bf953..e8477b95f3 100644 --- a/plugins/inventory/stackpath_compute.py +++ b/plugins/inventory/stackpath_compute.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright (c) 2020 Shay Rybak # Copyright (c) 2020 Ansible Project # GNU General Public License v3.0+ diff --git a/plugins/inventory/virtualbox.py b/plugins/inventory/virtualbox.py index 827618131a..672312cd8e 100644 --- a/plugins/inventory/virtualbox.py +++ b/plugins/inventory/virtualbox.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/cartesian.py b/plugins/lookup/cartesian.py index 45eb16d8b0..841f4f8c4d 100644 --- a/plugins/lookup/cartesian.py +++ b/plugins/lookup/cartesian.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2013, Bradley Young # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/chef_databag.py b/plugins/lookup/chef_databag.py index 0a1c6de3ed..d594c7681e 100644 --- a/plugins/lookup/chef_databag.py +++ b/plugins/lookup/chef_databag.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2016, Josh Bradley # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/consul_kv.py b/plugins/lookup/consul_kv.py index 8b9e4e9102..58f450eb65 100644 --- a/plugins/lookup/consul_kv.py +++ b/plugins/lookup/consul_kv.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2015, Steve Gargan # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/credstash.py b/plugins/lookup/credstash.py index 04935ee635..1a87deed41 100644 --- a/plugins/lookup/credstash.py +++ b/plugins/lookup/credstash.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2015, Ensighten # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/cyberarkpassword.py b/plugins/lookup/cyberarkpassword.py index ec6e6fcb56..112e7c1cd8 100644 --- a/plugins/lookup/cyberarkpassword.py +++ b/plugins/lookup/cyberarkpassword.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2017, Edward Nunez # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/dependent.py b/plugins/lookup/dependent.py index c9ce58567d..3f73f88bfa 100644 --- a/plugins/lookup/dependent.py +++ b/plugins/lookup/dependent.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2015-2021, Felix Fontein # (c) 2018 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/dig.py b/plugins/lookup/dig.py index b6c71954f0..6520b0d3ec 100644 --- a/plugins/lookup/dig.py +++ b/plugins/lookup/dig.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2015, Jan-Piet Mens # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/dnstxt.py b/plugins/lookup/dnstxt.py index d52301e7fb..84bff41795 100644 --- a/plugins/lookup/dnstxt.py +++ b/plugins/lookup/dnstxt.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2012, Jan-Piet Mens # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/etcd.py b/plugins/lookup/etcd.py index a3a7c42a3d..ca13442e43 100644 --- a/plugins/lookup/etcd.py +++ b/plugins/lookup/etcd.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2013, Jan-Piet Mens # (m) 2016, Mihai Moldovanu # (m) 2017, Juan Manuel Parrilla diff --git a/plugins/lookup/filetree.py b/plugins/lookup/filetree.py index 06b89bf396..e663fc9515 100644 --- a/plugins/lookup/filetree.py +++ b/plugins/lookup/filetree.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2016 Dag Wieers # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/flattened.py b/plugins/lookup/flattened.py index 515817ed09..d1ddd14f56 100644 --- a/plugins/lookup/flattened.py +++ b/plugins/lookup/flattened.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2013, Serge van Ginderachter # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/hiera.py b/plugins/lookup/hiera.py index a4358f7b1e..658f377d59 100644 --- a/plugins/lookup/hiera.py +++ b/plugins/lookup/hiera.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2017, Juan Manuel Parrilla # (c) 2012-17 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/keyring.py b/plugins/lookup/keyring.py index d5b7d1a154..a98ae7aee9 100644 --- a/plugins/lookup/keyring.py +++ b/plugins/lookup/keyring.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2016, Samuel Boucher # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/lastpass.py b/plugins/lookup/lastpass.py index 5e9f9907bd..3ae51b4c64 100644 --- a/plugins/lookup/lastpass.py +++ b/plugins/lookup/lastpass.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2016, Andrew Zenk # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/lmdb_kv.py b/plugins/lookup/lmdb_kv.py index a417874898..61dc410cc4 100644 --- a/plugins/lookup/lmdb_kv.py +++ b/plugins/lookup/lmdb_kv.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2017-2018, Jan-Piet Mens # (c) 2018 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/manifold.py b/plugins/lookup/manifold.py index 8b270ba0a2..076a475091 100644 --- a/plugins/lookup/manifold.py +++ b/plugins/lookup/manifold.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2018, Arigato Machine Inc. # (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/nios.py b/plugins/lookup/nios.py index 819d8077e6..008e8feffe 100644 --- a/plugins/lookup/nios.py +++ b/plugins/lookup/nios.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # # Copyright 2018 Red Hat | Ansible # diff --git a/plugins/lookup/passwordstore.py b/plugins/lookup/passwordstore.py index 9c545a1cb0..3e936d8b18 100644 --- a/plugins/lookup/passwordstore.py +++ b/plugins/lookup/passwordstore.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2017, Patrick Deelman # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/redis.py b/plugins/lookup/redis.py index a1d5a381b2..fdf3a6e17b 100644 --- a/plugins/lookup/redis.py +++ b/plugins/lookup/redis.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2012, Jan-Piet Mens # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/shelvefile.py b/plugins/lookup/shelvefile.py index 0067472513..175ed49891 100644 --- a/plugins/lookup/shelvefile.py +++ b/plugins/lookup/shelvefile.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2015, Alejandro Guirao # (c) 2012-17 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) From afe2842b1b4bf9661cd2f3f5c660c0833612bcc6 Mon Sep 17 00:00:00 2001 From: quidame Date: Mon, 2 Aug 2021 08:24:31 +0200 Subject: [PATCH 0244/2828] filesize: overwrite default `unsafe_writes` documentation (#3126) * overwrite default `unsafe_writes` documentation * Apply suggestions from code review Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- plugins/modules/files/filesize.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/plugins/modules/files/filesize.py b/plugins/modules/files/filesize.py index f073ff4119..81701438ca 100644 --- a/plugins/modules/files/filesize.py +++ b/plugins/modules/files/filesize.py @@ -87,6 +87,10 @@ options: - I(force=true) and I(sparse=true) are mutually exclusive. type: bool default: false + unsafe_writes: + description: + - This option is silently ignored. This module always modifies file + size in-place. notes: - This module supports C(check_mode) and C(diff). From 857d2eee50685de55ebd64f290c62cd83487d595 Mon Sep 17 00:00:00 2001 From: David Hummel <6109326+hummeltech@users.noreply.github.com> Date: Tue, 3 Aug 2021 23:16:11 -0700 Subject: [PATCH 0245/2828] nmcli: Add support for additional Wi-Fi network options (#3081) * nmcli: Add support for additional Wi-Fi network options * Added `changelog fragment` * Update changelogs/fragments/3081-add-wifi-option-to-nmcli-module.yml Co-authored-by: Ajpantuso Co-authored-by: Ajpantuso --- .../3081-add-wifi-option-to-nmcli-module.yml | 3 ++ plugins/modules/net_tools/nmcli.py | 40 ++++++++++++++- .../plugins/modules/net_tools/test_nmcli.py | 51 +++++++++++++++++++ 3 files changed, 93 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/3081-add-wifi-option-to-nmcli-module.yml diff --git a/changelogs/fragments/3081-add-wifi-option-to-nmcli-module.yml b/changelogs/fragments/3081-add-wifi-option-to-nmcli-module.yml new file mode 100644 index 0000000000..4425d955fc --- /dev/null +++ b/changelogs/fragments/3081-add-wifi-option-to-nmcli-module.yml @@ -0,0 +1,3 @@ +minor_changes: + - nmcli - add ``wifi`` option to support managing Wi-Fi settings such as ``hidden`` or ``mode`` + (https://github.com/ansible-collections/community.general/pull/3081). diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index 1750f9f99f..90fd5bbd0c 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -342,6 +342,14 @@ options: - Name of the Wireless router or the access point. type: str version_added: 3.0.0 + wifi: + description: + - 'The configuration of the Wifi connection. The valid attributes are listed on: + U(https://networkmanager.dev/docs/api/latest/settings-802-11-wireless.html).' + - 'For instance to create a hidden AP mode Wifi connection: + C({hidden: true, mode: ap}).' + type: dict + version_added: 3.5.0 ''' EXAMPLES = r''' @@ -658,6 +666,18 @@ EXAMPLES = r''' autoconnect: true state: present +- name: Create a hidden AP mode wifi connection + community.general.nmcli: + type: wifi + conn_name: ChocoMaster + ifname: wlo1 + ssid: ChocoMaster + wifi: + hidden: true + mode: ap + autoconnect: true + state: present + ''' RETURN = r"""# @@ -750,6 +770,7 @@ class Nmcli(object): self.dhcp_client_id = module.params['dhcp_client_id'] self.zone = module.params['zone'] self.ssid = module.params['ssid'] + self.wifi = module.params['wifi'] self.wifi_sec = module.params['wifi_sec'] if self.method4: @@ -878,8 +899,17 @@ class Nmcli(object): }) elif self.type == 'wifi': options.update({ + '802-11-wireless.ssid': self.ssid, 'connection.slave-type': 'bond' if self.master else None, }) + if self.wifi: + for name, value in self.wifi.items(): + # Disregard 'ssid' via 'wifi.ssid' + if name == 'ssid': + continue + options.update({ + '802-11-wireless.%s' % name: value + }) # Convert settings values based on the situation. for setting, value in options.items(): setting_type = self.settings_type(setting) @@ -978,7 +1008,8 @@ class Nmcli(object): 'ipv4.ignore-auto-routes', 'ipv4.may-fail', 'ipv6.ignore-auto-dns', - 'ipv6.ignore-auto-routes'): + 'ipv6.ignore-auto-routes', + '802-11-wireless.hidden'): return bool elif setting in ('ipv4.dns', 'ipv4.dns-search', @@ -1030,6 +1061,12 @@ class Nmcli(object): if self.type == "wifi": cmd.append('ssid') cmd.append(self.ssid) + if self.wifi: + for name, value in self.wifi.items(): + # Disallow setting 'ssid' via 'wifi.ssid' + if name == 'ssid': + continue + cmd += ['802-11-wireless.%s' % name, value] if self.wifi_sec: for name, value in self.wifi_sec.items(): cmd += ['wifi-sec.%s' % name, value] @@ -1255,6 +1292,7 @@ def main(): ip_tunnel_local=dict(type='str'), ip_tunnel_remote=dict(type='str'), ssid=dict(type='str'), + wifi=dict(type='dict'), wifi_sec=dict(type='dict', no_log=True), ), mutually_exclusive=[['never_default4', 'gw4']], diff --git a/tests/unit/plugins/modules/net_tools/test_nmcli.py b/tests/unit/plugins/modules/net_tools/test_nmcli.py index 63ec60537c..6df320a0c7 100644 --- a/tests/unit/plugins/modules/net_tools/test_nmcli.py +++ b/tests/unit/plugins/modules/net_tools/test_nmcli.py @@ -469,6 +469,22 @@ ipv6.ignore-auto-dns: no ipv6.ignore-auto-routes: no """ +TESTCASE_WIRELESS = [ + { + 'type': 'wifi', + 'conn_name': 'non_existent_nw_device', + 'ifname': 'wireless_non_existant', + 'ip4': '10.10.10.10/24', + 'ssid': 'Brittany', + 'wifi': { + 'hidden': True, + 'mode': 'ap', + }, + 'state': 'present', + '_ansible_check_mode': False, + } +] + def mocker_set(mocker, connection_exists=False, @@ -1530,3 +1546,38 @@ def test_ethernet_connection_static_unchanged(mocked_ethernet_connection_static_ results = json.loads(out) assert not results.get('failed') assert not results['changed'] + + +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_WIRELESS, indirect=['patch_ansible_module']) +def test_create_wireless(mocked_generic_connection_create, capfd): + """ + Test : Create wireless connection + """ + + with pytest.raises(SystemExit): + nmcli.main() + + assert nmcli.Nmcli.execute_command.call_count == 1 + arg_list = nmcli.Nmcli.execute_command.call_args_list + add_args, add_kw = arg_list[0] + + assert add_args[0][0] == '/usr/bin/nmcli' + assert add_args[0][1] == 'con' + assert add_args[0][2] == 'add' + assert add_args[0][3] == 'type' + assert add_args[0][4] == 'wifi' + assert add_args[0][5] == 'con-name' + assert add_args[0][6] == 'non_existent_nw_device' + + add_args_text = list(map(to_text, add_args[0])) + for param in ['connection.interface-name', 'wireless_non_existant', + 'ipv4.addresses', '10.10.10.10/24', + '802-11-wireless.ssid', 'Brittany', + '802-11-wireless.mode', 'ap', + '802-11-wireless.hidden', 'yes']: + assert param in add_args_text + + out, err = capfd.readouterr() + results = json.loads(out) + assert not results.get('failed') + assert results['changed'] From f2df1a7581e7bd8a386ac2a96b04c9925493ca24 Mon Sep 17 00:00:00 2001 From: Reto Kupferschmid Date: Wed, 4 Aug 2021 08:36:45 +0200 Subject: [PATCH 0246/2828] dnsimple update for python-dnsimple >=2.0.0 (#2946) * update dnsimple module * dnsimple: fixes for python-dnsimple >= 2.0.0 * Update plugins/modules/net_tools/dnsimple.py Co-authored-by: Abhijeet Kasurde * rewrite module to support dnsimple-python v1 and v2 * add changelog fragment * fix sanity checks * python 2 fixes * fix dnsimple requirement * add sandbox module parameter * Update changelogs/fragments/2946-python-dnsimple-v2-rewrite.yml Co-authored-by: Felix Fontein * Update plugins/modules/net_tools/dnsimple.py Co-authored-by: Felix Fontein * return only the first traceback * Update plugins/modules/net_tools/dnsimple.py Co-authored-by: Felix Fontein * Update plugins/modules/net_tools/dnsimple.py Co-authored-by: Felix Fontein * use separate classes for python-dnsimple 1 and 2 * add basic tests * fix checks * skip tests for unsupported python versions * Update plugins/modules/net_tools/dnsimple.py Co-authored-by: Felix Fontein * fix conditions Co-authored-by: Abhijeet Kasurde Co-authored-by: Felix Fontein --- .../2946-python-dnsimple-v2-rewrite.yml | 2 + plugins/modules/net_tools/dnsimple.py | 316 ++++++++++++++---- .../modules/net_tools/test_dnsimple.py | 62 ++++ tests/unit/requirements.txt | 6 +- 4 files changed, 316 insertions(+), 70 deletions(-) create mode 100644 changelogs/fragments/2946-python-dnsimple-v2-rewrite.yml create mode 100644 tests/unit/plugins/modules/net_tools/test_dnsimple.py diff --git a/changelogs/fragments/2946-python-dnsimple-v2-rewrite.yml b/changelogs/fragments/2946-python-dnsimple-v2-rewrite.yml new file mode 100644 index 0000000000..32a6341086 --- /dev/null +++ b/changelogs/fragments/2946-python-dnsimple-v2-rewrite.yml @@ -0,0 +1,2 @@ +minor_changes: + - dnsimple - module rewrite to include support for python-dnsimple>=2.0.0; also add ``sandbox`` parameter (https://github.com/ansible-collections/community.general/pull/2946). diff --git a/plugins/modules/net_tools/dnsimple.py b/plugins/modules/net_tools/dnsimple.py index c4314b6539..a575d944cb 100644 --- a/plugins/modules/net_tools/dnsimple.py +++ b/plugins/modules/net_tools/dnsimple.py @@ -14,13 +14,12 @@ module: dnsimple short_description: Interface with dnsimple.com (a DNS hosting service) description: - "Manages domains and records via the DNSimple API, see the docs: U(http://developer.dnsimple.com/)." -notes: - - DNSimple API v1 is deprecated. Please install dnsimple-python>=1.0.0 which uses v2 API. options: account_email: description: - Account email. If omitted, the environment variables C(DNSIMPLE_EMAIL) and C(DNSIMPLE_API_TOKEN) will be looked for. - "If those aren't found, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started)." + - "C(.dnsimple) config files are only supported in dnsimple-python<2.0.0" type: str account_api_token: description: @@ -72,6 +71,14 @@ options: - Only use with C(state) is set to C(present) on a record. type: 'bool' default: no + sandbox: + description: + - Use the DNSimple sandbox environment. + - Requires a dedicated account in the dnsimple sandbox environment. + - Check U(https://developer.dnsimple.com/sandbox/) for more information. + type: 'bool' + default: no + version_added: 3.5.0 requirements: - "dnsimple >= 1.0.0" author: "Alex Coomans (@drcapulet)" @@ -144,38 +151,227 @@ EXAMPLES = ''' RETURN = r"""# """ -import os import traceback from distutils.version import LooseVersion +import re -DNSIMPLE_IMP_ERR = None + +class DNSimpleV1(): + """class which uses dnsimple-python < 2""" + + def __init__(self, account_email, account_api_token, sandbox, module): + """init""" + self.module = module + self.account_email = account_email + self.account_api_token = account_api_token + self.sandbox = sandbox + self.dnsimple_client() + + def dnsimple_client(self): + """creates a dnsimple client object""" + if self.account_email and self.account_api_token: + self.client = DNSimple(sandbox=self.sandbox, email=self.account_email, api_token=self.account_api_token) + else: + self.client = DNSimple(sandbox=self.sandbox) + + def get_all_domains(self): + """returns a list of all domains""" + domain_list = self.client.domains() + return [d['domain'] for d in domain_list] + + def get_domain(self, domain): + """returns a single domain by name or id""" + try: + dr = self.client.domain(domain)['domain'] + except DNSimpleException as e: + exception_string = str(e.args[0]['message']) + if re.match(r"^Domain .+ not found$", exception_string): + dr = None + else: + raise + return dr + + def create_domain(self, domain): + """create a single domain""" + return self.client.add_domain(domain)['domain'] + + def delete_domain(self, domain): + """delete a single domain""" + self.client.delete(domain) + + def get_records(self, domain, dnsimple_filter=None): + """return dns ressource records which match a specified filter""" + return [r['record'] for r in self.client.records(str(domain), params=dnsimple_filter)] + + def delete_record(self, domain, rid): + """delete a single dns ressource record""" + self.client.delete_record(str(domain), rid) + + def update_record(self, domain, rid, ttl=None, priority=None): + """update a single dns ressource record""" + data = {} + if ttl: + data['ttl'] = ttl + if priority: + data['priority'] = priority + return self.client.update_record(str(domain), str(rid), data)['record'] + + def create_record(self, domain, name, record_type, content, ttl=None, priority=None): + """create a single dns ressource record""" + data = { + 'name': name, + 'type': record_type, + 'content': content, + } + if ttl: + data['ttl'] = ttl + if priority: + data['priority'] = priority + return self.client.add_record(str(domain), data)['record'] + + +class DNSimpleV2(): + """class which uses dnsimple-python >= 2""" + + def __init__(self, account_email, account_api_token, sandbox, module): + """init""" + self.module = module + self.account_email = account_email + self.account_api_token = account_api_token + self.sandbox = sandbox + self.pagination_per_page = 30 + self.dnsimple_client() + self.dnsimple_account() + + def dnsimple_client(self): + """creates a dnsimple client object""" + if self.account_email and self.account_api_token: + client = Client(sandbox=self.sandbox, email=self.account_email, access_token=self.account_api_token) + else: + msg = "Option account_email or account_api_token not provided. " \ + "Dnsimple authentiction with a .dnsimple config file is not " \ + "supported with dnsimple-python>=2.0.0" + raise DNSimpleException(msg) + client.identity.whoami() + self.client = client + + def dnsimple_account(self): + """select a dnsimple account. If a user token is used for authentication, + this user must only have access to a single account""" + account = self.client.identity.whoami().data.account + # user supplied a user token instead of account api token + if not account: + accounts = Accounts(self.client).list_accounts().data + if len(accounts) != 1: + msg = "The provided dnsimple token is a user token with multiple accounts." \ + "Use an account token or a user token with access to a single account." \ + "See https://support.dnsimple.com/articles/api-access-token/" + raise DNSimpleException(msg) + account = accounts[0] + self.account = account + + def get_all_domains(self): + """returns a list of all domains""" + domain_list = self._get_paginated_result(self.client.domains.list_domains, account_id=self.account.id) + return [d.__dict__ for d in domain_list] + + def get_domain(self, domain): + """returns a single domain by name or id""" + try: + dr = self.client.domains.get_domain(self.account.id, domain).data.__dict__ + except DNSimpleException as e: + exception_string = str(e.message) + if re.match(r"^Domain .+ not found$", exception_string): + dr = None + else: + raise + return dr + + def create_domain(self, domain): + """create a single domain""" + return self.client.domains.create_domain(self.account.id, domain).data.__dict__ + + def delete_domain(self, domain): + """delete a single domain""" + self.client.domains.delete_domain(self.account.id, domain) + + def get_records(self, zone, dnsimple_filter=None): + """return dns ressource records which match a specified filter""" + records_list = self._get_paginated_result(self.client.zones.list_records, + account_id=self.account.id, + zone=zone, filter=dnsimple_filter) + return [d.__dict__ for d in records_list] + + def delete_record(self, domain, rid): + """delete a single dns ressource record""" + self.client.zones.delete_record(self.account.id, domain, rid) + + def update_record(self, domain, rid, ttl=None, priority=None): + """update a single dns ressource record""" + zr = ZoneRecordUpdateInput(ttl=ttl, priority=priority) + result = self.client.zones.update_record(self.account.id, str(domain), str(rid), zr).data.__dict__ + return result + + def create_record(self, domain, name, record_type, content, ttl=None, priority=None): + """create a single dns ressource record""" + zr = ZoneRecordInput(name=name, type=record_type, content=content, ttl=ttl, priority=priority) + return self.client.zones.create_record(self.account.id, str(domain), zr).data.__dict__ + + def _get_paginated_result(self, operation, **options): + """return all results of a paginated api response""" + records_pagination = operation(per_page=self.pagination_per_page, **options).pagination + result_list = [] + for page in range(1, records_pagination.total_pages + 1): + page_data = operation(per_page=self.pagination_per_page, page=page, **options).data + result_list.extend(page_data) + return result_list + + +DNSIMPLE_IMP_ERR = [] +HAS_DNSIMPLE = False try: - from dnsimple import DNSimple - from dnsimple.dnsimple import __version__ as dnsimple_version - from dnsimple.dnsimple import DNSimpleException + # try to import dnsimple >= 2.0.0 + from dnsimple import Client, DNSimpleException + from dnsimple.service import Accounts + from dnsimple.version import version as dnsimple_version + from dnsimple.struct.zone_record import ZoneRecordUpdateInput, ZoneRecordInput HAS_DNSIMPLE = True except ImportError: - DNSIMPLE_IMP_ERR = traceback.format_exc() - HAS_DNSIMPLE = False + DNSIMPLE_IMP_ERR.append(traceback.format_exc()) -from ansible.module_utils.basic import AnsibleModule, missing_required_lib +if not HAS_DNSIMPLE: + # try to import dnsimple < 2.0.0 + try: + from dnsimple.dnsimple import __version__ as dnsimple_version + from dnsimple import DNSimple + from dnsimple.dnsimple import DNSimpleException + HAS_DNSIMPLE = True + except ImportError: + DNSIMPLE_IMP_ERR.append(traceback.format_exc()) + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback def main(): module = AnsibleModule( argument_spec=dict( - account_email=dict(type='str'), - account_api_token=dict(type='str', no_log=True), + account_email=dict(type='str', fallback=(env_fallback, ['DNSIMPLE_EMAIL'])), + account_api_token=dict(type='str', + no_log=True, + fallback=(env_fallback, ['DNSIMPLE_API_TOKEN'])), domain=dict(type='str'), record=dict(type='str'), record_ids=dict(type='list', elements='str'), - type=dict(type='str', choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', + type=dict(type='str', choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', + 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', + 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL', 'CAA']), ttl=dict(type='int', default=3600), value=dict(type='str'), priority=dict(type='int'), state=dict(type='str', choices=['present', 'absent'], default='present'), solo=dict(type='bool', default=False), + sandbox=dict(type='bool', default=False), ), required_together=[ ['record', 'value'] @@ -184,11 +380,7 @@ def main(): ) if not HAS_DNSIMPLE: - module.fail_json(msg=missing_required_lib('dnsimple'), exception=DNSIMPLE_IMP_ERR) - - if LooseVersion(dnsimple_version) < LooseVersion('1.0.0'): - module.fail_json(msg="Current version of dnsimple Python module [%s] uses 'v1' API which is deprecated." - " Please upgrade to version 1.0.0 and above to use dnsimple 'v2' API." % dnsimple_version) + module.fail_json(msg=missing_required_lib('dnsimple'), exception=DNSIMPLE_IMP_ERR[0]) account_email = module.params.get('account_email') account_api_token = module.params.get('account_api_token') @@ -201,29 +393,29 @@ def main(): priority = module.params.get('priority') state = module.params.get('state') is_solo = module.params.get('solo') + sandbox = module.params.get('sandbox') - if account_email and account_api_token: - client = DNSimple(email=account_email, api_token=account_api_token) - elif os.environ.get('DNSIMPLE_EMAIL') and os.environ.get('DNSIMPLE_API_TOKEN'): - client = DNSimple(email=os.environ.get('DNSIMPLE_EMAIL'), api_token=os.environ.get('DNSIMPLE_API_TOKEN')) - else: - client = DNSimple() + DNSIMPLE_MAJOR_VERSION = LooseVersion(dnsimple_version).version[0] try: + if DNSIMPLE_MAJOR_VERSION > 1: + ds = DNSimpleV2(account_email, account_api_token, sandbox, module) + else: + ds = DNSimpleV1(account_email, account_api_token, sandbox, module) # Let's figure out what operation we want to do - # No domain, return a list if not domain: - domains = client.domains() - module.exit_json(changed=False, result=[d['domain'] for d in domains]) + all_domains = ds.get_all_domains() + module.exit_json(changed=False, result=all_domains) # Domain & No record - if domain and record is None and not record_ids: - domains = [d['domain'] for d in client.domains()] + if record is None and not record_ids: if domain.isdigit(): - dr = next((d for d in domains if d['id'] == int(domain)), None) + typed_domain = int(domain) else: - dr = next((d for d in domains if d['name'] == domain), None) + typed_domain = str(domain) + dr = ds.get_domain(typed_domain) + # domain does not exist if state == 'present': if dr: module.exit_json(changed=False, result=dr) @@ -231,105 +423,91 @@ def main(): if module.check_mode: module.exit_json(changed=True) else: - module.exit_json(changed=True, result=client.add_domain(domain)['domain']) - + response = ds.create_domain(domain) + module.exit_json(changed=True, result=response) # state is absent else: if dr: if not module.check_mode: - client.delete(domain) + ds.delete_domain(domain) module.exit_json(changed=True) else: module.exit_json(changed=False) # need the not none check since record could be an empty string - if domain and record is not None: - records = [r['record'] for r in client.records(str(domain), params={'name': record})] - + if record is not None: if not record_type: module.fail_json(msg="Missing the record type") - if not value: module.fail_json(msg="Missing the record value") - rr = next((r for r in records if r['name'] == record and r['type'] == record_type and r['content'] == value), None) - + records_list = ds.get_records(domain, dnsimple_filter={'name': record}) + rr = next((r for r in records_list if r['name'] == record and r['type'] == record_type and r['content'] == value), None) if state == 'present': changed = False if is_solo: # delete any records that have the same name and record type - same_type = [r['id'] for r in records if r['name'] == record and r['type'] == record_type] + same_type = [r['id'] for r in records_list if r['name'] == record and r['type'] == record_type] if rr: same_type = [rid for rid in same_type if rid != rr['id']] if same_type: if not module.check_mode: for rid in same_type: - client.delete_record(str(domain), rid) + ds.delete_record(domain, rid) changed = True if rr: # check if we need to update if rr['ttl'] != ttl or rr['priority'] != priority: - data = {} - if ttl: - data['ttl'] = ttl - if priority: - data['priority'] = priority if module.check_mode: module.exit_json(changed=True) else: - module.exit_json(changed=True, result=client.update_record(str(domain), str(rr['id']), data)['record']) + response = ds.update_record(domain, rr['id'], ttl, priority) + module.exit_json(changed=True, result=response) else: module.exit_json(changed=changed, result=rr) else: # create it - data = { - 'name': record, - 'type': record_type, - 'content': value, - } - if ttl: - data['ttl'] = ttl - if priority: - data['priority'] = priority if module.check_mode: module.exit_json(changed=True) else: - module.exit_json(changed=True, result=client.add_record(str(domain), data)['record']) - + response = ds.create_record(domain, record, record_type, value, ttl, priority) + module.exit_json(changed=True, result=response) # state is absent else: if rr: if not module.check_mode: - client.delete_record(str(domain), rr['id']) + ds.delete_record(domain, rr['id']) module.exit_json(changed=True) else: module.exit_json(changed=False) # Make sure these record_ids either all exist or none - if domain and record_ids: - current_records = [str(r['record']['id']) for r in client.records(str(domain))] - wanted_records = [str(r) for r in record_ids] + if record_ids: + current_records = ds.get_records(domain, dnsimple_filter=None) + current_record_ids = [str(d['id']) for d in current_records] + wanted_record_ids = [str(r) for r in record_ids] if state == 'present': - difference = list(set(wanted_records) - set(current_records)) + difference = list(set(wanted_record_ids) - set(current_record_ids)) if difference: module.fail_json(msg="Missing the following records: %s" % difference) else: module.exit_json(changed=False) - # state is absent else: - difference = list(set(wanted_records) & set(current_records)) + difference = list(set(wanted_record_ids) & set(current_record_ids)) if difference: if not module.check_mode: for rid in difference: - client.delete_record(str(domain), rid) + ds.delete_record(domain, rid) module.exit_json(changed=True) else: module.exit_json(changed=False) except DNSimpleException as e: - module.fail_json(msg="Unable to contact DNSimple: %s" % e.message) - + if DNSIMPLE_MAJOR_VERSION > 1: + module.fail_json(msg="DNSimple exception: %s" % e.message) + else: + module.fail_json(msg="DNSimple exception: %s" % str(e.args[0]['message'])) module.fail_json(msg="Unknown what you wanted me to do") diff --git a/tests/unit/plugins/modules/net_tools/test_dnsimple.py b/tests/unit/plugins/modules/net_tools/test_dnsimple.py new file mode 100644 index 0000000000..b9dce3c215 --- /dev/null +++ b/tests/unit/plugins/modules/net_tools/test_dnsimple.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from ansible_collections.community.general.plugins.modules.net_tools import dnsimple as dnsimple_module +from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.general.tests.unit.compat.mock import patch +import pytest +import sys + +dnsimple = pytest.importorskip('dnsimple') +mandatory_py_version = pytest.mark.skipif( + sys.version_info < (3, 6), + reason='The dnsimple dependency requires python3.6 or higher' +) + +from dnsimple import DNSimpleException + + +class TestDNSimple(ModuleTestCase): + """Main class for testing dnsimple module.""" + + def setUp(self): + """Setup.""" + super(TestDNSimple, self).setUp() + self.module = dnsimple_module + + def tearDown(self): + """Teardown.""" + super(TestDNSimple, self).tearDown() + + def test_without_required_parameters(self): + """Failure must occurs when all parameters are missing""" + with self.assertRaises(AnsibleFailJson): + set_module_args({}) + self.module.main() + + @patch('dnsimple.service.Identity.whoami') + def test_account_token(self, mock_whoami): + mock_whoami.return_value.data.account = 42 + ds = self.module.DNSimpleV2('fake', 'fake', True, self.module) + self.assertEquals(ds.account, 42) + + @patch('dnsimple.service.Accounts.list_accounts') + @patch('dnsimple.service.Identity.whoami') + def test_user_token_multiple_accounts(self, mock_whoami, mock_accounts): + mock_accounts.return_value.data = [1, 2, 3] + mock_whoami.return_value.data.account = None + with self.assertRaises(DNSimpleException): + self.module.DNSimpleV2('fake', 'fake', True, self.module) + + @patch('dnsimple.service.Accounts.list_accounts') + @patch('dnsimple.service.Identity.whoami') + def test_user_token_single_account(self, mock_whoami, mock_accounts): + mock_accounts.return_value.data = [42] + mock_whoami.return_value.data.account = None + ds = self.module.DNSimpleV2('fake', 'fake', True, self.module) + self.assertEquals(ds.account, 42) diff --git a/tests/unit/requirements.txt b/tests/unit/requirements.txt index 1d082cffb8..c8294bd71a 100644 --- a/tests/unit/requirements.txt +++ b/tests/unit/requirements.txt @@ -21,4 +21,8 @@ lxml semantic_version # requirement for datadog_downtime module -datadog-api-client >= 1.0.0b3 ; python_version >= '3.6' \ No newline at end of file +datadog-api-client >= 1.0.0b3 ; python_version >= '3.6' + +# requirement for dnsimple module +dnsimple >= 2 ; python_version >= '3.6' +dataclasses ; python_version == '3.6' From 75688cb632197a934a93e4e3fa31d3d9a3755751 Mon Sep 17 00:00:00 2001 From: Scott Seekamp Date: Wed, 4 Aug 2021 11:53:43 -0600 Subject: [PATCH 0247/2828] redfish_command: allow setting the BootSourceOverrideMode property (#3135) * For #3134 Expose BootOverrideMode parameter to redfish_command to allow setting by user during run. * Fix trailing whitespace * Add changelog fragment to contribution. * Update changelogs/fragments/3135-add-redfish_command-bootoverridemode.yaml Co-authored-by: Felix Fontein * Update plugins/modules/remote_management/redfish/redfish_command.py Co-authored-by: Felix Fontein * Update plugins/modules/remote_management/redfish/redfish_command.py Co-authored-by: Felix Fontein * Update plugins/modules/remote_management/redfish/redfish_command.py Co-authored-by: Felix Fontein * Update plugins/modules/remote_management/redfish/redfish_command.py Co-authored-by: Felix Fontein * Update changelogs/fragments/3135-add-redfish_command-bootoverridemode.yaml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- ...-add-redfish_command-bootoverridemode.yaml | 2 ++ plugins/module_utils/redfish_utils.py | 8 +++++++- .../redfish/redfish_command.py | 20 ++++++++++++++++++- 3 files changed, 28 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/3135-add-redfish_command-bootoverridemode.yaml diff --git a/changelogs/fragments/3135-add-redfish_command-bootoverridemode.yaml b/changelogs/fragments/3135-add-redfish_command-bootoverridemode.yaml new file mode 100644 index 0000000000..d1f24d4c24 --- /dev/null +++ b/changelogs/fragments/3135-add-redfish_command-bootoverridemode.yaml @@ -0,0 +1,2 @@ +minor_changes: + - redfish_command - add ``boot_override_mode`` argument to BootSourceOverride commands (https://github.com/ansible-collections/community.general/issues/3134). diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index 8d293f0056..94e2c4b7d8 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -1565,6 +1565,7 @@ class RedfishUtils(object): uefi_target = boot_opts.get('uefi_target') boot_next = boot_opts.get('boot_next') override_enabled = boot_opts.get('override_enabled') + boot_override_mode = boot_opts.get('boot_override_mode') if not bootdevice and override_enabled != 'Disabled': return {'ret': False, @@ -1596,6 +1597,10 @@ class RedfishUtils(object): target = boot.get('BootSourceOverrideTarget') cur_uefi_target = boot.get('UefiTargetBootSourceOverride') cur_boot_next = boot.get('BootNext') + cur_override_mode = boot.get('BootSourceOverrideMode') + + if not boot_override_mode: + boot_override_mode = cur_override_mode if override_enabled == 'Disabled': payload = { @@ -1632,12 +1637,13 @@ class RedfishUtils(object): } } else: - if cur_enabled == override_enabled and target == bootdevice: + if cur_enabled == override_enabled and target == bootdevice and cur_override_mode == boot_override_mode: # If properties are already set, no changes needed return {'ret': True, 'changed': False} payload = { 'Boot': { 'BootSourceOverrideEnabled': override_enabled, + 'BootSourceOverrideMode': boot_override_mode, 'BootSourceOverrideTarget': bootdevice } } diff --git a/plugins/modules/remote_management/redfish/redfish_command.py b/plugins/modules/remote_management/redfish/redfish_command.py index 01f1fd771d..72392ec9f3 100644 --- a/plugins/modules/remote_management/redfish/redfish_command.py +++ b/plugins/modules/remote_management/redfish/redfish_command.py @@ -86,6 +86,12 @@ options: - Timeout in seconds for URL requests to OOB controller default: 10 type: int + boot_override_mode: + description: + - Boot mode when using an override. + type: str + choices: [ Legacy, UEFI ] + version_added: 3.5.0 uefi_target: required: false description: @@ -287,6 +293,16 @@ EXAMPLES = ''' username: "{{ username }}" password: "{{ password }}" + - name: Set one-time boot to BiosSetup + community.general.redfish_command: + category: Systems + command: SetOneTimeBoot + bootnext: BiosSetup + boot_override_mode: Legacy + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + - name: Disable persistent boot device override community.general.redfish_command: category: Systems @@ -591,6 +607,7 @@ def main(): timeout=dict(type='int', default=10), uefi_target=dict(), boot_next=dict(), + boot_override_mode=dict(choices=['Legacy', 'UEFI']), resource_id=dict(), update_image_uri=dict(), update_protocol=dict(), @@ -662,7 +679,8 @@ def main(): boot_opts = { 'bootdevice': module.params['bootdevice'], 'uefi_target': module.params['uefi_target'], - 'boot_next': module.params['boot_next'] + 'boot_next': module.params['boot_next'], + 'boot_override_mode': module.params['boot_override_mode'], } # VirtualMedia options From 3dba697e3353a9e9dac9bbfd4e0216d7b62b6c5f Mon Sep 17 00:00:00 2001 From: Reto Kupferschmid Date: Thu, 5 Aug 2021 14:25:42 +0200 Subject: [PATCH 0248/2828] nmcli: manage dummy connections (#3132) * manage dummy connections * add issue reference in changelog fragment * Update changelogs/fragments/3132-nmcli-dummy.yaml Co-authored-by: Ajpantuso * resolve test conflicts Co-authored-by: Ajpantuso --- changelogs/fragments/3132-nmcli-dummy.yaml | 2 + plugins/modules/net_tools/nmcli.py | 12 ++- .../plugins/modules/net_tools/test_nmcli.py | 102 ++++++++++++++++++ 3 files changed, 114 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/3132-nmcli-dummy.yaml diff --git a/changelogs/fragments/3132-nmcli-dummy.yaml b/changelogs/fragments/3132-nmcli-dummy.yaml new file mode 100644 index 0000000000..970bda34e8 --- /dev/null +++ b/changelogs/fragments/3132-nmcli-dummy.yaml @@ -0,0 +1,2 @@ +minor_changes: + - nmcli - add ``dummy`` interface support (https://github.com/ansible-collections/community.general/issues/724). diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index 90fd5bbd0c..bbc1b4770f 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -51,10 +51,11 @@ options: type: description: - This is the type of device or network connection that you wish to create or modify. + - Type C(dummy) is added in community.general 3.5.0. - Type C(generic) is added in Ansible 2.5. - Type C(infiniband) is added in community.general 2.0.0. type: str - choices: [ bond, bond-slave, bridge, bridge-slave, ethernet, generic, infiniband, ipip, sit, team, team-slave, vlan, vxlan, wifi ] + choices: [ bond, bond-slave, bridge, bridge-slave, dummy, ethernet, generic, infiniband, ipip, sit, team, team-slave, vlan, vxlan, wifi ] mode: description: - This is the type of device or network connection that you wish to create for a bond or bridge. @@ -775,6 +776,8 @@ class Nmcli(object): if self.method4: self.ipv4_method = self.method4 + elif self.type == 'dummy' and not self.ip4: + self.ipv4_method = 'disabled' elif self.ip4: self.ipv4_method = 'manual' else: @@ -782,6 +785,8 @@ class Nmcli(object): if self.method6: self.ipv6_method = self.method6 + elif self.type == 'dummy' and not self.ip6: + self.ipv6_method = 'disabled' elif self.ip6: self.ipv6_method = 'manual' else: @@ -938,6 +943,7 @@ class Nmcli(object): return self.type in ( 'bond', 'bridge', + 'dummy', 'ethernet', 'generic', 'infiniband', @@ -956,6 +962,7 @@ class Nmcli(object): @property def mtu_conn_type(self): return self.type in ( + 'dummy', 'ethernet', 'team-slave', ) @@ -1092,7 +1099,7 @@ class Nmcli(object): @property def create_connection_up(self): - if self.type in ('bond', 'ethernet', 'infiniband', 'wifi'): + if self.type in ('bond', 'dummy', 'ethernet', 'infiniband', 'wifi'): if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None): return True elif self.type == 'team': @@ -1218,6 +1225,7 @@ def main(): 'bond-slave', 'bridge', 'bridge-slave', + 'dummy', 'ethernet', 'generic', 'infiniband', diff --git a/tests/unit/plugins/modules/net_tools/test_nmcli.py b/tests/unit/plugins/modules/net_tools/test_nmcli.py index 6df320a0c7..b2307f245a 100644 --- a/tests/unit/plugins/modules/net_tools/test_nmcli.py +++ b/tests/unit/plugins/modules/net_tools/test_nmcli.py @@ -74,6 +74,12 @@ TESTCASE_CONNECTION = [ 'state': 'absent', '_ansible_check_mode': True, }, + { + 'type': 'dummy', + 'conn_name': 'non_existent_nw_device', + 'state': 'absent', + '_ansible_check_mode': True, + }, ] TESTCASE_GENERIC = [ @@ -485,6 +491,40 @@ TESTCASE_WIRELESS = [ } ] +TESTCASE_DUMMY_STATIC = [ + { + 'type': 'dummy', + 'conn_name': 'non_existent_nw_device', + 'ifname': 'dummy_non_existant', + 'ip4': '10.10.10.10/24', + 'gw4': '10.10.10.1', + 'dns4': ['1.1.1.1', '8.8.8.8'], + 'ip6': '2001:db8::1/128', + 'state': 'present', + '_ansible_check_mode': False, + } +] + +TESTCASE_DUMMY_STATIC_SHOW_OUTPUT = """\ +connection.id: non_existent_nw_device +connection.interface-name: dummy_non_existant +connection.autoconnect: yes +802-3-ethernet.mtu: auto +ipv4.method: manual +ipv4.addresses: 10.10.10.10/24 +ipv4.gateway: 10.10.10.1 +ipv4.ignore-auto-dns: no +ipv4.ignore-auto-routes: no +ipv4.never-default: no +ipv4.may-fail: yes +ipv4.dns: 1.1.1.1,8.8.8.8 +ipv6.method: auto +ipv6.ignore-auto-dns: no +ipv6.ignore-auto-routes: no +ipv6.method: manual +ipv6.addresses: 2001:db8::1/128 +""" + def mocker_set(mocker, connection_exists=False, @@ -641,6 +681,13 @@ def mocked_ethernet_connection_dhcp_to_static(mocker): )) +@pytest.fixture +def mocked_dummy_connection_static_unchanged(mocker): + mocker_set(mocker, + connection_exists=True, + execute_return=(0, TESTCASE_DUMMY_STATIC_SHOW_OUTPUT, "")) + + @pytest.mark.parametrize('patch_ansible_module', TESTCASE_BOND, indirect=['patch_ansible_module']) def test_bond_connection_create(mocked_generic_connection_create, capfd): """ @@ -1581,3 +1628,58 @@ def test_create_wireless(mocked_generic_connection_create, capfd): results = json.loads(out) assert not results.get('failed') assert results['changed'] + + +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_DUMMY_STATIC, indirect=['patch_ansible_module']) +def test_create_dummy_static(mocked_generic_connection_create, capfd): + """ + Test : Create dummy connection with static IP configuration + """ + + with pytest.raises(SystemExit): + nmcli.main() + + assert nmcli.Nmcli.execute_command.call_count == 2 + arg_list = nmcli.Nmcli.execute_command.call_args_list + add_args, add_kw = arg_list[0] + + assert add_args[0][0] == '/usr/bin/nmcli' + assert add_args[0][1] == 'con' + assert add_args[0][2] == 'add' + assert add_args[0][3] == 'type' + assert add_args[0][4] == 'dummy' + assert add_args[0][5] == 'con-name' + assert add_args[0][6] == 'non_existent_nw_device' + + add_args_text = list(map(to_text, add_args[0])) + for param in ['connection.interface-name', 'dummy_non_existant', + 'ipv4.addresses', '10.10.10.10/24', + 'ipv4.gateway', '10.10.10.1', + 'ipv4.dns', '1.1.1.1,8.8.8.8', + 'ipv6.addresses', '2001:db8::1/128']: + assert param in add_args_text + + up_args, up_kw = arg_list[1] + assert up_args[0][0] == '/usr/bin/nmcli' + assert up_args[0][1] == 'con' + assert up_args[0][2] == 'up' + assert up_args[0][3] == 'non_existent_nw_device' + + out, err = capfd.readouterr() + results = json.loads(out) + assert not results.get('failed') + assert results['changed'] + + +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_DUMMY_STATIC, indirect=['patch_ansible_module']) +def test_dummy_connection_static_unchanged(mocked_dummy_connection_static_unchanged, capfd): + """ + Test : Dummy connection with static IP configuration unchanged + """ + with pytest.raises(SystemExit): + nmcli.main() + + out, err = capfd.readouterr() + results = json.loads(out) + assert not results.get('failed') + assert not results['changed'] From a73720c103da165823c7eeee4812210f7f3bc774 Mon Sep 17 00:00:00 2001 From: Ricky White Date: Thu, 5 Aug 2021 13:28:32 -0400 Subject: [PATCH 0249/2828] Updated the tss lookup plugin to reflect breaking changes introduced in the underpinning SDK (#3139) * Updated the plugin to reflect breaking changes introduced in the underlying SDK v1.0.0 update. * Added Changelog fragment * Updates based on feedback/review * Added newline to pass CI * Added whitepace for linter * Update changelogs/fragments/3139-tss-lookup-plugin-update-to-make-compatible-with-sdk-v1.yml Co-authored-by: Ajpantuso Co-authored-by: Ajpantuso --- ...-update-to-make-compatible-with-sdk-v1.yml | 3 +++ plugins/lookup/tss.py | 20 ++++++++++++++++--- 2 files changed, 20 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/3139-tss-lookup-plugin-update-to-make-compatible-with-sdk-v1.yml diff --git a/changelogs/fragments/3139-tss-lookup-plugin-update-to-make-compatible-with-sdk-v1.yml b/changelogs/fragments/3139-tss-lookup-plugin-update-to-make-compatible-with-sdk-v1.yml new file mode 100644 index 0000000000..f06fa68ce0 --- /dev/null +++ b/changelogs/fragments/3139-tss-lookup-plugin-update-to-make-compatible-with-sdk-v1.yml @@ -0,0 +1,3 @@ +bugfixes: + - tss lookup plugin - fixed incompatibility with ``python-tss-sdk`` version 1.0.0 + (https://github.com/ansible-collections/community.general/issues/3057, https://github.com/ansible-collections/community.general/pull/3139). diff --git a/plugins/lookup/tss.py b/plugins/lookup/tss.py index b7b7cd85e0..d5e6ea6dcd 100644 --- a/plugins/lookup/tss.py +++ b/plugins/lookup/tss.py @@ -112,16 +112,17 @@ EXAMPLES = r""" - ansible.builtin.debug: msg: the password is {{ secret_password }} """ - +from distutils.version import LooseVersion from ansible.errors import AnsibleError, AnsibleOptionsError sdk_is_missing = False try: + from thycotic import __version__ as sdk_version from thycotic.secrets.server import ( SecretServer, - SecretServerAccessError, SecretServerError, + PasswordGrantAuthorizer, ) except ImportError: sdk_is_missing = True @@ -136,7 +137,20 @@ display = Display() class LookupModule(LookupBase): @staticmethod def Client(server_parameters): - return SecretServer(**server_parameters) + + if LooseVersion(sdk_version) < LooseVersion('1.0.0'): + return SecretServer(**server_parameters) + else: + authorizer = PasswordGrantAuthorizer( + server_parameters["base_url"], + server_parameters["username"], + server_parameters["password"], + server_parameters["token_path_uri"], + ) + + return SecretServer( + server_parameters["base_url"], authorizer, server_parameters["api_path_uri"] + ) def run(self, terms, variables, **kwargs): if sdk_is_missing: From e9494c12f2b21f8f51be92c8e48074f6a697a712 Mon Sep 17 00:00:00 2001 From: rainerleber <39616583+rainerleber@users.noreply.github.com> Date: Thu, 5 Aug 2021 22:42:43 +0200 Subject: [PATCH 0250/2828] Hana query userstore (#3125) * add hdbuserstore ability * add description * fix * add default * add description * add sample * Apply suggestions from code review Co-authored-by: quidame * add fragment, fix required if * remove whitespace * add coding fragment * Apply suggestions from code review Co-authored-by: Felix Fontein * added test for userstore * Update plugins/modules/database/saphana/hana_query.py Co-authored-by: Felix Fontein Co-authored-by: Rainer Leber Co-authored-by: quidame Co-authored-by: Felix Fontein --- .../fragments/3125-hana-query-userstore.yaml | 2 + .../modules/database/saphana/hana_query.py | 42 +++++++++++++++---- .../database/saphana/test_hana_query.py | 36 ++++++++++++++++ 3 files changed, 72 insertions(+), 8 deletions(-) create mode 100644 changelogs/fragments/3125-hana-query-userstore.yaml diff --git a/changelogs/fragments/3125-hana-query-userstore.yaml b/changelogs/fragments/3125-hana-query-userstore.yaml new file mode 100644 index 0000000000..0a626fe7f5 --- /dev/null +++ b/changelogs/fragments/3125-hana-query-userstore.yaml @@ -0,0 +1,2 @@ +minor_changes: + - hana_query - added the abillity to use hdbuserstore (https://github.com/ansible-collections/community.general/pull/3125). diff --git a/plugins/modules/database/saphana/hana_query.py b/plugins/modules/database/saphana/hana_query.py index 9b26134022..ac026d5adc 100644 --- a/plugins/modules/database/saphana/hana_query.py +++ b/plugins/modules/database/saphana/hana_query.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2021, Rainer Leber # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -21,13 +22,21 @@ options: type: str required: true user: - description: A dedicated username. Defaults to C(SYSTEM). + description: A dedicated username. The user could be also in hdbuserstore. Defaults to C(SYSTEM). type: str default: SYSTEM + userstore: + description: If C(true) the user must be in hdbuserstore. + type: bool + default: false + version_added: 3.5.0 password: - description: The password to connect to the database. + description: + - The password to connect to the database. + - "B(Note:) Since the passwords have to be passed as command line arguments, I(userstore=true) should + be used whenever possible, as command line arguments can be seen by other users + on the same machine." type: str - required: true autocommit: description: Autocommit the statement. type: bool @@ -89,6 +98,17 @@ EXAMPLES = r''' - /tmp/HANA_CPU_UtilizationPerCore_2.00.020+.txt - /tmp/HANA.txt host: "localhost" + +- name: Run several queries from user store + community.general.hana_query: + sid: "hdb" + instance: "01" + user: hdbstoreuser + userstore: true + query: + - "select user_name from users;" + - select * from users; + autocommit: False ''' RETURN = r''' @@ -117,16 +137,18 @@ def main(): argument_spec=dict( sid=dict(type='str', required=True), instance=dict(type='str', required=True), - encrypted=dict(type='bool', required=False, default=False), + encrypted=dict(type='bool', default=False), host=dict(type='str', required=False), - user=dict(type='str', required=False, default="SYSTEM"), - password=dict(type='str', required=True, no_log=True), + user=dict(type='str', default="SYSTEM"), + userstore=dict(type='bool', default=False), + password=dict(type='str', no_log=True), database=dict(type='str', required=False), query=dict(type='list', elements='str', required=False), filepath=dict(type='list', elements='path', required=False), - autocommit=dict(type='bool', required=False, default=True), + autocommit=dict(type='bool', default=True), ), required_one_of=[('query', 'filepath')], + required_if=[('userstore', False, ['password'])], supports_check_mode=False, ) rc, out, err, out_raw = [0, [], "", ""] @@ -136,6 +158,7 @@ def main(): sid = (params['sid']).upper() instance = params['instance'] user = params['user'] + userstore = params['userstore'] password = params['password'] autocommit = params['autocommit'] host = params['host'] @@ -161,7 +184,10 @@ def main(): if database is not None: command.extend(['-d', database]) # -x Suppresses additional output, such as the number of selected rows in a result set. - command.extend(['-x', '-i', instance, '-u', user, '-p', password]) + if userstore: + command.extend(['-x', '-U', user]) + else: + command.extend(['-x', '-i', instance, '-u', user, '-p', password]) if filepath is not None: command.extend(['-I']) diff --git a/tests/unit/plugins/modules/database/saphana/test_hana_query.py b/tests/unit/plugins/modules/database/saphana/test_hana_query.py index 4d158c028e..b0fd939604 100644 --- a/tests/unit/plugins/modules/database/saphana/test_hana_query.py +++ b/tests/unit/plugins/modules/database/saphana/test_hana_query.py @@ -64,3 +64,39 @@ class Testhana_query(ModuleTestCase): {'username': 'myuser', 'name': 'my user'}, ]]) self.assertEqual(run_command.call_count, 1) + + def test_hana_userstore_query(self): + """Check that result is processed with userstore.""" + set_module_args({ + 'sid': "HDB", + 'instance': "01", + 'encrypted': False, + 'host': "localhost", + 'user': "SYSTEM", + 'userstore': True, + 'database': "HDB", + 'query': "SELECT * FROM users;" + }) + with patch.object(basic.AnsibleModule, 'run_command') as run_command: + run_command.return_value = 0, 'username,name\n testuser,test user \n myuser, my user \n', '' + with self.assertRaises(AnsibleExitJson) as result: + hana_query.main() + self.assertEqual(result.exception.args[0]['query_result'], [[ + {'username': 'testuser', 'name': 'test user'}, + {'username': 'myuser', 'name': 'my user'}, + ]]) + self.assertEqual(run_command.call_count, 1) + + def test_hana_failed_no_passwd(self): + """Check that result is failed with no password.""" + with self.assertRaises(AnsibleFailJson): + set_module_args({ + 'sid': "HDB", + 'instance': "01", + 'encrypted': False, + 'host': "localhost", + 'user': "SYSTEM", + 'database': "HDB", + 'query': "SELECT * FROM users;" + }) + self.module.main() From 16476f5cb932a61c3db890402cc4bc39664af281 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 5 Aug 2021 23:46:39 +0200 Subject: [PATCH 0251/2828] Update AZP config (#3111) * Update AZP config. * Skip 8.4 as well for django_manage (next to 8.3 and 8.2). * Temporarily skip 8.4 for yum_versionlock. --- .azure-pipelines/azure-pipelines.yml | 4 ++-- tests/integration/targets/django_manage/aliases | 1 + tests/integration/targets/yum_versionlock/aliases | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index 8dc49e5c03..78df8b366f 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -186,8 +186,8 @@ stages: test: macos/11.1 - name: RHEL 7.9 test: rhel/7.9 - - name: RHEL 8.3 - test: rhel/8.3 + - name: RHEL 8.4 + test: rhel/8.4 - name: FreeBSD 12.2 test: freebsd/12.2 - name: FreeBSD 13.0 diff --git a/tests/integration/targets/django_manage/aliases b/tests/integration/targets/django_manage/aliases index e9c002109c..7488aa82d7 100644 --- a/tests/integration/targets/django_manage/aliases +++ b/tests/integration/targets/django_manage/aliases @@ -5,3 +5,4 @@ skip/macos skip/osx skip/rhel8.2 skip/rhel8.3 +skip/rhel8.4 diff --git a/tests/integration/targets/yum_versionlock/aliases b/tests/integration/targets/yum_versionlock/aliases index abe0a21e22..92b8e448f1 100644 --- a/tests/integration/targets/yum_versionlock/aliases +++ b/tests/integration/targets/yum_versionlock/aliases @@ -3,3 +3,4 @@ skip/aix skip/freebsd skip/osx skip/macos +skip/rhel8.4 # TODO make sure that tests work on 8.4 as well! From ff586f1105d35c0efe7c9e858ae6c943f9e4ec58 Mon Sep 17 00:00:00 2001 From: Bartosz <8199062+bartoszkosiorek@users.noreply.github.com> Date: Fri, 6 Aug 2021 10:01:05 +0200 Subject: [PATCH 0252/2828] pkgin: display stdout and stderr in case the error occurs (#3148) * pkgin: display stdout and stderr in case the error occurs * Update changelogs/fragments/pkgin-output-after-error.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- changelogs/fragments/pkgin-output-after-error.yml | 2 ++ plugins/modules/packaging/os/pkgin.py | 12 ++++++------ 2 files changed, 8 insertions(+), 6 deletions(-) create mode 100644 changelogs/fragments/pkgin-output-after-error.yml diff --git a/changelogs/fragments/pkgin-output-after-error.yml b/changelogs/fragments/pkgin-output-after-error.yml new file mode 100644 index 0000000000..a2dd2d6a1e --- /dev/null +++ b/changelogs/fragments/pkgin-output-after-error.yml @@ -0,0 +1,2 @@ +minor_changes: + - pkgin - in case of ``pkgin`` tool failue, display returned standard output ``stdout`` and standard error ``stderr`` to ease debugging (https://github.com/ansible-collections/community.general/issues/3146). diff --git a/plugins/modules/packaging/os/pkgin.py b/plugins/modules/packaging/os/pkgin.py index 2937314fa1..90a711fb6a 100644 --- a/plugins/modules/packaging/os/pkgin.py +++ b/plugins/modules/packaging/os/pkgin.py @@ -251,7 +251,7 @@ def remove_packages(module, packages): format_pkgin_command(module, "remove", package)) if not module.check_mode and query_package(module, package) in [PackageState.PRESENT, PackageState.OUTDATED]: - module.fail_json(msg="failed to remove %s: %s" % (package, out)) + module.fail_json(msg="failed to remove %s: %s" % (package, out), stdout=out, stderr=err) remove_c += 1 @@ -276,14 +276,14 @@ def install_packages(module, packages): format_pkgin_command(module, "install", package)) if not module.check_mode and not query_package(module, package) in [PackageState.PRESENT, PackageState.OUTDATED]: - module.fail_json(msg="failed to install %s: %s" % (package, out)) + module.fail_json(msg="failed to install %s: %s" % (package, out), stdout=out, stderr=err) install_c += 1 if install_c > 0: module.exit_json(changed=True, msg=format_action_message(module, "installed", install_c)) - module.exit_json(changed=False, msg="package(s) already present") + module.exit_json(changed=False, msg="package(s) already present", stdout=out, stderr=err) def update_package_db(module): @@ -296,7 +296,7 @@ def update_package_db(module): else: return True, "updated repository database" else: - module.fail_json(msg="could not update package db") + module.fail_json(msg="could not update package db", stdout=out, stderr=err) def do_upgrade_packages(module, full=False): @@ -312,7 +312,7 @@ def do_upgrade_packages(module, full=False): if re.search('^nothing to do.\n$', out): module.exit_json(changed=False, msg="nothing left to upgrade") else: - module.fail_json(msg="could not %s packages" % cmd) + module.fail_json(msg="could not %s packages" % cmd, stdout=out, stderr=err) def upgrade_packages(module): @@ -332,7 +332,7 @@ def clean_cache(module): # so assume it did. module.exit_json(changed=True, msg="cleaned caches") else: - module.fail_json(msg="could not clean package cache") + module.fail_json(msg="could not clean package cache", stdout=out, stderr=err) def main(): From da11a98cb734e99cc57f4ae6ec09d9199875c39b Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 8 Aug 2021 01:02:21 +1200 Subject: [PATCH 0253/2828] fixed the utf-8 marker (#3162) --- plugins/action/system/iptables_state.py | 2 +- plugins/action/system/shutdown.py | 2 +- plugins/cache/memcached.py | 2 +- plugins/cache/pickle.py | 2 +- plugins/cache/redis.py | 2 +- plugins/cache/yaml.py | 2 +- plugins/callback/context_demo.py | 2 +- plugins/callback/counter_enabled.py | 2 +- plugins/callback/dense.py | 2 +- plugins/callback/hipchat.py | 2 +- plugins/callback/jabber.py | 2 +- plugins/callback/log_plays.py | 2 +- plugins/callback/loganalytics.py | 2 +- plugins/callback/logdna.py | 2 +- plugins/callback/logentries.py | 2 +- plugins/callback/logstash.py | 2 +- plugins/callback/null.py | 2 +- plugins/callback/say.py | 2 +- plugins/callback/selective.py | 2 +- plugins/callback/slack.py | 2 +- plugins/callback/syslog_json.py | 2 +- plugins/callback/unixy.py | 2 +- plugins/callback/yaml.py | 2 +- plugins/connection/chroot.py | 2 +- plugins/connection/funcd.py | 2 +- plugins/connection/iocage.py | 2 +- plugins/connection/jail.py | 2 +- plugins/connection/lxc.py | 2 +- plugins/connection/lxd.py | 2 +- plugins/connection/qubes.py | 2 +- plugins/connection/saltstack.py | 2 +- plugins/connection/zone.py | 2 +- plugins/doc_fragments/hpe3par.py | 2 +- plugins/doc_fragments/hwc.py | 2 +- plugins/doc_fragments/oracle.py | 2 +- plugins/doc_fragments/oracle_creatable_resource.py | 2 +- plugins/doc_fragments/oracle_display_name_option.py | 2 +- plugins/doc_fragments/oracle_name_option.py | 2 +- plugins/doc_fragments/oracle_tags.py | 2 +- plugins/doc_fragments/oracle_wait_options.py | 2 +- plugins/doc_fragments/vexata.py | 2 +- plugins/filter/dict_kv.py | 2 +- plugins/filter/jc.py | 2 +- plugins/filter/json_query.py | 2 +- plugins/filter/random_mac.py | 2 +- plugins/filter/version_sort.py | 2 +- plugins/inventory/linode.py | 2 +- plugins/inventory/nmap.py | 2 +- plugins/inventory/online.py | 2 +- plugins/inventory/scaleway.py | 2 +- plugins/inventory/stackpath_compute.py | 2 +- plugins/inventory/virtualbox.py | 2 +- plugins/lookup/cartesian.py | 2 +- plugins/lookup/chef_databag.py | 2 +- plugins/lookup/consul_kv.py | 2 +- plugins/lookup/credstash.py | 2 +- plugins/lookup/cyberarkpassword.py | 2 +- plugins/lookup/dependent.py | 2 +- plugins/lookup/dig.py | 2 +- plugins/lookup/dnstxt.py | 2 +- plugins/lookup/etcd.py | 2 +- plugins/lookup/filetree.py | 2 +- plugins/lookup/flattened.py | 2 +- plugins/lookup/hiera.py | 2 +- plugins/lookup/keyring.py | 2 +- plugins/lookup/lastpass.py | 2 +- plugins/lookup/lmdb_kv.py | 2 +- plugins/lookup/manifold.py | 2 +- plugins/lookup/nios.py | 2 +- plugins/lookup/passwordstore.py | 2 +- plugins/lookup/redis.py | 2 +- plugins/lookup/shelvefile.py | 2 +- plugins/modules/clustering/nomad/nomad_job.py | 2 +- plugins/modules/clustering/nomad/nomad_job_info.py | 2 +- plugins/modules/web_infrastructure/apache2_module.py | 2 +- 75 files changed, 75 insertions(+), 75 deletions(-) diff --git a/plugins/action/system/iptables_state.py b/plugins/action/system/iptables_state.py index 93e4bc2ed4..b8ae1a5dea 100644 --- a/plugins/action/system/iptables_state.py +++ b/plugins/action/system/iptables_state.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright: (c) 2020, quidame # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/action/system/shutdown.py b/plugins/action/system/shutdown.py index 4995ef8d8b..19813b0847 100644 --- a/plugins/action/system/shutdown.py +++ b/plugins/action/system/shutdown.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright: (c) 2020, Amin Vakil # Copyright: (c) 2016-2018, Matt Davis # Copyright: (c) 2018, Sam Doran diff --git a/plugins/cache/memcached.py b/plugins/cache/memcached.py index fb2a778fc3..ee36628f40 100644 --- a/plugins/cache/memcached.py +++ b/plugins/cache/memcached.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2014, Brian Coca, Josh Drake, et al # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/cache/pickle.py b/plugins/cache/pickle.py index b790e73a4c..1e549d4d66 100644 --- a/plugins/cache/pickle.py +++ b/plugins/cache/pickle.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2017, Brian Coca # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/cache/redis.py b/plugins/cache/redis.py index 6b5f2c4ad0..3c73d8b5be 100644 --- a/plugins/cache/redis.py +++ b/plugins/cache/redis.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2014, Brian Coca, Josh Drake, et al # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/cache/yaml.py b/plugins/cache/yaml.py index b676dd0dbb..e5062b16d1 100644 --- a/plugins/cache/yaml.py +++ b/plugins/cache/yaml.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2017, Brian Coca # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/context_demo.py b/plugins/callback/context_demo.py index 39c912acae..c85cc60cda 100644 --- a/plugins/callback/context_demo.py +++ b/plugins/callback/context_demo.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (C) 2012, Michael DeHaan, # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/counter_enabled.py b/plugins/callback/counter_enabled.py index 352c773b9b..3b6e5e7ad4 100644 --- a/plugins/callback/counter_enabled.py +++ b/plugins/callback/counter_enabled.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2018, Ivan Aragones Muniesa # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) ''' diff --git a/plugins/callback/dense.py b/plugins/callback/dense.py index 38d3e1bee7..af8464631c 100644 --- a/plugins/callback/dense.py +++ b/plugins/callback/dense.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2016, Dag Wieers # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/hipchat.py b/plugins/callback/hipchat.py index 771c425df8..c64b892d9b 100644 --- a/plugins/callback/hipchat.py +++ b/plugins/callback/hipchat.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (C) 2014, Matt Martz # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/jabber.py b/plugins/callback/jabber.py index c57e08804a..b535fa9540 100644 --- a/plugins/callback/jabber.py +++ b/plugins/callback/jabber.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright (C) 2016 maxn nikolaev.makc@gmail.com # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/log_plays.py b/plugins/callback/log_plays.py index 24acf3fc95..2539bd9ade 100644 --- a/plugins/callback/log_plays.py +++ b/plugins/callback/log_plays.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (C) 2012, Michael DeHaan, # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/loganalytics.py b/plugins/callback/loganalytics.py index ccc7649218..04fc646dc4 100644 --- a/plugins/callback/loganalytics.py +++ b/plugins/callback/loganalytics.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) diff --git a/plugins/callback/logdna.py b/plugins/callback/logdna.py index ddb4c477da..0c459bfac2 100644 --- a/plugins/callback/logdna.py +++ b/plugins/callback/logdna.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2018, Samir Musali # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/logentries.py b/plugins/callback/logentries.py index 344bd219cd..ad71a6d448 100644 --- a/plugins/callback/logentries.py +++ b/plugins/callback/logentries.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2015, Logentries.com, Jimmy Tang # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/logstash.py b/plugins/callback/logstash.py index 95da7fa95a..4c4fad8450 100644 --- a/plugins/callback/logstash.py +++ b/plugins/callback/logstash.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (C) 2020, Yevhen Khmelenko # (C) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/null.py b/plugins/callback/null.py index 9eb5198d0c..13ea65b438 100644 --- a/plugins/callback/null.py +++ b/plugins/callback/null.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/say.py b/plugins/callback/say.py index 309777e241..8e8bd507a2 100644 --- a/plugins/callback/say.py +++ b/plugins/callback/say.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2012, Michael DeHaan, # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/selective.py b/plugins/callback/selective.py index b1e09c8236..403eb84b33 100644 --- a/plugins/callback/selective.py +++ b/plugins/callback/selective.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) Fastly, inc 2016 # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/slack.py b/plugins/callback/slack.py index c791bf6a36..5cb402b109 100644 --- a/plugins/callback/slack.py +++ b/plugins/callback/slack.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (C) 2014-2015, Matt Martz # (C) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/syslog_json.py b/plugins/callback/syslog_json.py index 73543614a8..f4865f2a26 100644 --- a/plugins/callback/syslog_json.py +++ b/plugins/callback/syslog_json.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/unixy.py b/plugins/callback/unixy.py index dec2ab0c8c..fd00fae71b 100644 --- a/plugins/callback/unixy.py +++ b/plugins/callback/unixy.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright: (c) 2017, Allyson Bowles <@akatch> # Copyright: (c) 2012-2014, Michael DeHaan # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/yaml.py b/plugins/callback/yaml.py index d4036c808e..24140116ed 100644 --- a/plugins/callback/yaml.py +++ b/plugins/callback/yaml.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/connection/chroot.py b/plugins/connection/chroot.py index 3e15947031..295bd4046b 100644 --- a/plugins/connection/chroot.py +++ b/plugins/connection/chroot.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Based on local.py (c) 2012, Michael DeHaan # # (c) 2013, Maykel Moya diff --git a/plugins/connection/funcd.py b/plugins/connection/funcd.py index caf9d06c60..94d1a3bd9c 100644 --- a/plugins/connection/funcd.py +++ b/plugins/connection/funcd.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Based on local.py (c) 2012, Michael DeHaan # Based on chroot.py (c) 2013, Maykel Moya # Copyright (c) 2013, Michael Scherer diff --git a/plugins/connection/iocage.py b/plugins/connection/iocage.py index 94761d5c17..2fd74313bc 100644 --- a/plugins/connection/iocage.py +++ b/plugins/connection/iocage.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Based on jail.py # (c) 2013, Michael Scherer # (c) 2015, Toshio Kuratomi diff --git a/plugins/connection/jail.py b/plugins/connection/jail.py index c3de25c753..02f5aeeddd 100644 --- a/plugins/connection/jail.py +++ b/plugins/connection/jail.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Based on local.py by Michael DeHaan # and chroot.py by Maykel Moya # Copyright (c) 2013, Michael Scherer diff --git a/plugins/connection/lxc.py b/plugins/connection/lxc.py index d5c7a7ebbe..2aaf1619dc 100644 --- a/plugins/connection/lxc.py +++ b/plugins/connection/lxc.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2015, Joerg Thalheim # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/connection/lxd.py b/plugins/connection/lxd.py index 31ff13c776..63eaf6ca51 100644 --- a/plugins/connection/lxd.py +++ b/plugins/connection/lxd.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2016 Matt Clay # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/connection/qubes.py b/plugins/connection/qubes.py index fd72f38e2f..1de9e10011 100644 --- a/plugins/connection/qubes.py +++ b/plugins/connection/qubes.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Based on the buildah connection plugin # Copyright (c) 2017 Ansible Project # 2018 Kushal Das diff --git a/plugins/connection/saltstack.py b/plugins/connection/saltstack.py index 3d56083bb6..cbd85eaf3e 100644 --- a/plugins/connection/saltstack.py +++ b/plugins/connection/saltstack.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Based on local.py (c) 2012, Michael DeHaan # Based on chroot.py (c) 2013, Maykel Moya # Based on func.py diff --git a/plugins/connection/zone.py b/plugins/connection/zone.py index a859b5e32f..8fbcd8a038 100644 --- a/plugins/connection/zone.py +++ b/plugins/connection/zone.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Based on local.py (c) 2012, Michael DeHaan # and chroot.py (c) 2013, Maykel Moya # and jail.py (c) 2013, Michael Scherer diff --git a/plugins/doc_fragments/hpe3par.py b/plugins/doc_fragments/hpe3par.py index e16ead4207..ad445205d8 100644 --- a/plugins/doc_fragments/hpe3par.py +++ b/plugins/doc_fragments/hpe3par.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/doc_fragments/hwc.py b/plugins/doc_fragments/hwc.py index c6c5dd23bd..ecba2adde8 100644 --- a/plugins/doc_fragments/hwc.py +++ b/plugins/doc_fragments/hwc.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Huawei Inc. # GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/doc_fragments/oracle.py b/plugins/doc_fragments/oracle.py index 94ed18107d..94999c04ec 100644 --- a/plugins/doc_fragments/oracle.py +++ b/plugins/doc_fragments/oracle.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/doc_fragments/oracle_creatable_resource.py b/plugins/doc_fragments/oracle_creatable_resource.py index f76e7146b3..211ca6f9c1 100644 --- a/plugins/doc_fragments/oracle_creatable_resource.py +++ b/plugins/doc_fragments/oracle_creatable_resource.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/doc_fragments/oracle_display_name_option.py b/plugins/doc_fragments/oracle_display_name_option.py index b9ce0d92fe..ff70d45dd9 100644 --- a/plugins/doc_fragments/oracle_display_name_option.py +++ b/plugins/doc_fragments/oracle_display_name_option.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/doc_fragments/oracle_name_option.py b/plugins/doc_fragments/oracle_name_option.py index dd9b98816e..8c4f9c1e39 100644 --- a/plugins/doc_fragments/oracle_name_option.py +++ b/plugins/doc_fragments/oracle_name_option.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/doc_fragments/oracle_tags.py b/plugins/doc_fragments/oracle_tags.py index e92598c549..f95b22c8ed 100644 --- a/plugins/doc_fragments/oracle_tags.py +++ b/plugins/doc_fragments/oracle_tags.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/doc_fragments/oracle_wait_options.py b/plugins/doc_fragments/oracle_wait_options.py index d94f079a86..0312755ffa 100644 --- a/plugins/doc_fragments/oracle_wait_options.py +++ b/plugins/doc_fragments/oracle_wait_options.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/doc_fragments/vexata.py b/plugins/doc_fragments/vexata.py index 920457fa04..d541d5ad85 100644 --- a/plugins/doc_fragments/vexata.py +++ b/plugins/doc_fragments/vexata.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # # Copyright: (c) 2019, Sandeep Kasargod # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/filter/dict_kv.py b/plugins/filter/dict_kv.py index fc1978b977..7ce6c3e44a 100644 --- a/plugins/filter/dict_kv.py +++ b/plugins/filter/dict_kv.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright (C) 2020 Stanislav German-Evtushenko (@giner) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/filter/jc.py b/plugins/filter/jc.py index 42dcf98234..f8fc4ac5bd 100644 --- a/plugins/filter/jc.py +++ b/plugins/filter/jc.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2015, Filipe Niero Felisbino # # This file is part of Ansible diff --git a/plugins/filter/json_query.py b/plugins/filter/json_query.py index 9b9ecb93f2..9c835e8c71 100644 --- a/plugins/filter/json_query.py +++ b/plugins/filter/json_query.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2015, Filipe Niero Felisbino # # This file is part of Ansible diff --git a/plugins/filter/random_mac.py b/plugins/filter/random_mac.py index dc04e99a96..7d25555aa9 100644 --- a/plugins/filter/random_mac.py +++ b/plugins/filter/random_mac.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2020 Ansible Project # # This file is part of Ansible diff --git a/plugins/filter/version_sort.py b/plugins/filter/version_sort.py index d228ea62d0..c59e87c9c6 100644 --- a/plugins/filter/version_sort.py +++ b/plugins/filter/version_sort.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright (C) 2021 Eric Lavarde # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/inventory/linode.py b/plugins/inventory/linode.py index 566073a4a8..c2dcac5392 100644 --- a/plugins/inventory/linode.py +++ b/plugins/inventory/linode.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/inventory/nmap.py b/plugins/inventory/nmap.py index ade3adc3d4..e411006ff0 100644 --- a/plugins/inventory/nmap.py +++ b/plugins/inventory/nmap.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/inventory/online.py b/plugins/inventory/online.py index a74c6026ea..085c258d45 100644 --- a/plugins/inventory/online.py +++ b/plugins/inventory/online.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright (c) 2018 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/inventory/scaleway.py b/plugins/inventory/scaleway.py index b327824f33..86140124c5 100644 --- a/plugins/inventory/scaleway.py +++ b/plugins/inventory/scaleway.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright: (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/inventory/stackpath_compute.py b/plugins/inventory/stackpath_compute.py index e8477b95f3..d777875578 100644 --- a/plugins/inventory/stackpath_compute.py +++ b/plugins/inventory/stackpath_compute.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright (c) 2020 Shay Rybak # Copyright (c) 2020 Ansible Project # GNU General Public License v3.0+ diff --git a/plugins/inventory/virtualbox.py b/plugins/inventory/virtualbox.py index 672312cd8e..89a77c88bb 100644 --- a/plugins/inventory/virtualbox.py +++ b/plugins/inventory/virtualbox.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/cartesian.py b/plugins/lookup/cartesian.py index 841f4f8c4d..98043eba34 100644 --- a/plugins/lookup/cartesian.py +++ b/plugins/lookup/cartesian.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2013, Bradley Young # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/chef_databag.py b/plugins/lookup/chef_databag.py index d594c7681e..f5ccc766c2 100644 --- a/plugins/lookup/chef_databag.py +++ b/plugins/lookup/chef_databag.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2016, Josh Bradley # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/consul_kv.py b/plugins/lookup/consul_kv.py index 58f450eb65..8b779e6aca 100644 --- a/plugins/lookup/consul_kv.py +++ b/plugins/lookup/consul_kv.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2015, Steve Gargan # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/credstash.py b/plugins/lookup/credstash.py index 1a87deed41..9be3527b19 100644 --- a/plugins/lookup/credstash.py +++ b/plugins/lookup/credstash.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2015, Ensighten # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/cyberarkpassword.py b/plugins/lookup/cyberarkpassword.py index 112e7c1cd8..80323c10fd 100644 --- a/plugins/lookup/cyberarkpassword.py +++ b/plugins/lookup/cyberarkpassword.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2017, Edward Nunez # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/dependent.py b/plugins/lookup/dependent.py index 3f73f88bfa..1fb75ece66 100644 --- a/plugins/lookup/dependent.py +++ b/plugins/lookup/dependent.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2015-2021, Felix Fontein # (c) 2018 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/dig.py b/plugins/lookup/dig.py index 6520b0d3ec..f5156b4d1e 100644 --- a/plugins/lookup/dig.py +++ b/plugins/lookup/dig.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2015, Jan-Piet Mens # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/dnstxt.py b/plugins/lookup/dnstxt.py index 84bff41795..868d3dd3a3 100644 --- a/plugins/lookup/dnstxt.py +++ b/plugins/lookup/dnstxt.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2012, Jan-Piet Mens # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/etcd.py b/plugins/lookup/etcd.py index ca13442e43..0c81d0215b 100644 --- a/plugins/lookup/etcd.py +++ b/plugins/lookup/etcd.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2013, Jan-Piet Mens # (m) 2016, Mihai Moldovanu # (m) 2017, Juan Manuel Parrilla diff --git a/plugins/lookup/filetree.py b/plugins/lookup/filetree.py index e663fc9515..1c83486b05 100644 --- a/plugins/lookup/filetree.py +++ b/plugins/lookup/filetree.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2016 Dag Wieers # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/flattened.py b/plugins/lookup/flattened.py index d1ddd14f56..c2e4494fd4 100644 --- a/plugins/lookup/flattened.py +++ b/plugins/lookup/flattened.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2013, Serge van Ginderachter # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/hiera.py b/plugins/lookup/hiera.py index 658f377d59..5b440469eb 100644 --- a/plugins/lookup/hiera.py +++ b/plugins/lookup/hiera.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2017, Juan Manuel Parrilla # (c) 2012-17 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/keyring.py b/plugins/lookup/keyring.py index a98ae7aee9..73f9c5f4a9 100644 --- a/plugins/lookup/keyring.py +++ b/plugins/lookup/keyring.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2016, Samuel Boucher # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/lastpass.py b/plugins/lookup/lastpass.py index 3ae51b4c64..920d33176f 100644 --- a/plugins/lookup/lastpass.py +++ b/plugins/lookup/lastpass.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2016, Andrew Zenk # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/lmdb_kv.py b/plugins/lookup/lmdb_kv.py index 61dc410cc4..9dd46e338a 100644 --- a/plugins/lookup/lmdb_kv.py +++ b/plugins/lookup/lmdb_kv.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2017-2018, Jan-Piet Mens # (c) 2018 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/manifold.py b/plugins/lookup/manifold.py index 076a475091..01bb13cf0b 100644 --- a/plugins/lookup/manifold.py +++ b/plugins/lookup/manifold.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2018, Arigato Machine Inc. # (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/nios.py b/plugins/lookup/nios.py index 008e8feffe..089805c97a 100644 --- a/plugins/lookup/nios.py +++ b/plugins/lookup/nios.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # # Copyright 2018 Red Hat | Ansible # diff --git a/plugins/lookup/passwordstore.py b/plugins/lookup/passwordstore.py index 3e936d8b18..7c00f432b1 100644 --- a/plugins/lookup/passwordstore.py +++ b/plugins/lookup/passwordstore.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2017, Patrick Deelman # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/redis.py b/plugins/lookup/redis.py index fdf3a6e17b..8de7e04cce 100644 --- a/plugins/lookup/redis.py +++ b/plugins/lookup/redis.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2012, Jan-Piet Mens # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/shelvefile.py b/plugins/lookup/shelvefile.py index 175ed49891..56cfdf1143 100644 --- a/plugins/lookup/shelvefile.py +++ b/plugins/lookup/shelvefile.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2015, Alejandro Guirao # (c) 2012-17 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/clustering/nomad/nomad_job.py b/plugins/modules/clustering/nomad/nomad_job.py index a5e1cd3755..341592be50 100644 --- a/plugins/modules/clustering/nomad/nomad_job.py +++ b/plugins/modules/clustering/nomad/nomad_job.py @@ -1,5 +1,5 @@ #!/usr/bin/python -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2020, FERREIRA Christophe # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/clustering/nomad/nomad_job_info.py b/plugins/modules/clustering/nomad/nomad_job_info.py index d913ebeb61..d49111bb4f 100644 --- a/plugins/modules/clustering/nomad/nomad_job_info.py +++ b/plugins/modules/clustering/nomad/nomad_job_info.py @@ -1,5 +1,5 @@ #!/usr/bin/python -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2020, FERREIRA Christophe # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/apache2_module.py b/plugins/modules/web_infrastructure/apache2_module.py index d85ed0158f..c75dc1c30c 100644 --- a/plugins/modules/web_infrastructure/apache2_module.py +++ b/plugins/modules/web_infrastructure/apache2_module.py @@ -1,5 +1,5 @@ #!/usr/bin/python -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2013-2014, Christian Berendt # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) From 6bfa6e40f4c421e3781ccaf80743424c5b60ec0a Mon Sep 17 00:00:00 2001 From: David Hummel <6109326+hummeltech@users.noreply.github.com> Date: Sat, 7 Aug 2021 06:20:44 -0700 Subject: [PATCH 0254/2828] nmcli: Fix change detection for Wi-Fi security options (#3136) * Fixed `wifi_sec` option changes are not detected Also updated `docs` URL and formatting to match that of the `wifi` option * Removed extraneous `appends` to `cmd` in `connection_update` These really should have only been added to `connection_options` whose return values get `extended` onto `cmd` --- ...i-sec-change-detection-to-nmcli-module.yml | 3 ++ plugins/modules/net_tools/nmcli.py | 32 +++++------- .../plugins/modules/net_tools/test_nmcli.py | 51 +++++++++++++++++++ 3 files changed, 66 insertions(+), 20 deletions(-) create mode 100644 changelogs/fragments/3136-add-wifi-sec-change-detection-to-nmcli-module.yml diff --git a/changelogs/fragments/3136-add-wifi-sec-change-detection-to-nmcli-module.yml b/changelogs/fragments/3136-add-wifi-sec-change-detection-to-nmcli-module.yml new file mode 100644 index 0000000000..6cc5e7630d --- /dev/null +++ b/changelogs/fragments/3136-add-wifi-sec-change-detection-to-nmcli-module.yml @@ -0,0 +1,3 @@ +minor_changes: + - nmcli - add ``wifi-sec`` option change detection to support managing secure Wi-Fi connections + (https://github.com/ansible-collections/community.general/pull/3136). diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index bbc1b4770f..92d1e65ef7 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -332,10 +332,10 @@ options: version_added: 2.0.0 wifi_sec: description: - - 'The security configuration of the Wifi connection. The valid attributes are listed on:' - - 'U(https://developer.gnome.org/NetworkManager/stable/settings-802-11-wireless-security.html)' - - 'For instance to use common WPA-PSK auth with a password:' - - '- C({key-mgmt: wpa-psk, psk: my_password})' + - 'The security configuration of the WiFi connection. The valid attributes are listed on: + U(https://networkmanager.dev/docs/api/latest/settings-802-11-wireless-security.html).' + - 'For instance to use common WPA-PSK auth with a password: + C({key-mgmt: wpa-psk, psk: my_password}).' type: dict version_added: 3.0.0 ssid: @@ -345,9 +345,9 @@ options: version_added: 3.0.0 wifi: description: - - 'The configuration of the Wifi connection. The valid attributes are listed on: + - 'The configuration of the WiFi connection. The valid attributes are listed on: U(https://networkmanager.dev/docs/api/latest/settings-802-11-wireless.html).' - - 'For instance to create a hidden AP mode Wifi connection: + - 'For instance to create a hidden AP mode WiFi connection: C({hidden: true, mode: ap}).' type: dict version_added: 3.5.0 @@ -915,6 +915,11 @@ class Nmcli(object): options.update({ '802-11-wireless.%s' % name: value }) + if self.wifi_sec: + for name, value in self.wifi_sec.items(): + options.update({ + '802-11-wireless-security.%s' % name: value + }) # Convert settings values based on the situation. for setting, value in options.items(): setting_type = self.settings_type(setting) @@ -1065,19 +1070,6 @@ class Nmcli(object): else: ifname = self.ifname - if self.type == "wifi": - cmd.append('ssid') - cmd.append(self.ssid) - if self.wifi: - for name, value in self.wifi.items(): - # Disallow setting 'ssid' via 'wifi.ssid' - if name == 'ssid': - continue - cmd += ['802-11-wireless.%s' % name, value] - if self.wifi_sec: - for name, value in self.wifi_sec.items(): - cmd += ['wifi-sec.%s' % name, value] - options = { 'connection.interface-name': ifname, } @@ -1116,7 +1108,7 @@ class Nmcli(object): return self.connection_update('modify') def show_connection(self): - cmd = [self.nmcli_bin, 'con', 'show', self.conn_name] + cmd = [self.nmcli_bin, '--show-secrets', 'con', 'show', self.conn_name] (rc, out, err) = self.execute_command(cmd) diff --git a/tests/unit/plugins/modules/net_tools/test_nmcli.py b/tests/unit/plugins/modules/net_tools/test_nmcli.py index b2307f245a..c1b3e93ed4 100644 --- a/tests/unit/plugins/modules/net_tools/test_nmcli.py +++ b/tests/unit/plugins/modules/net_tools/test_nmcli.py @@ -491,6 +491,22 @@ TESTCASE_WIRELESS = [ } ] +TESTCASE_SECURE_WIRELESS = [ + { + 'type': 'wifi', + 'conn_name': 'non_existent_nw_device', + 'ifname': 'wireless_non_existant', + 'ip4': '10.10.10.10/24', + 'ssid': 'Brittany', + 'wifi_sec': { + 'key-mgmt': 'wpa-psk', + 'psk': 'VERY_SECURE_PASSWORD', + }, + 'state': 'present', + '_ansible_check_mode': False, + } +] + TESTCASE_DUMMY_STATIC = [ { 'type': 'dummy', @@ -1630,6 +1646,41 @@ def test_create_wireless(mocked_generic_connection_create, capfd): assert results['changed'] +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_SECURE_WIRELESS, indirect=['patch_ansible_module']) +def test_create_secure_wireless(mocked_generic_connection_create, capfd): + """ + Test : Create secure wireless connection + """ + + with pytest.raises(SystemExit): + nmcli.main() + + assert nmcli.Nmcli.execute_command.call_count == 1 + arg_list = nmcli.Nmcli.execute_command.call_args_list + add_args, add_kw = arg_list[0] + + assert add_args[0][0] == '/usr/bin/nmcli' + assert add_args[0][1] == 'con' + assert add_args[0][2] == 'add' + assert add_args[0][3] == 'type' + assert add_args[0][4] == 'wifi' + assert add_args[0][5] == 'con-name' + assert add_args[0][6] == 'non_existent_nw_device' + + add_args_text = list(map(to_text, add_args[0])) + for param in ['connection.interface-name', 'wireless_non_existant', + 'ipv4.addresses', '10.10.10.10/24', + '802-11-wireless.ssid', 'Brittany', + '802-11-wireless-security.key-mgmt', 'wpa-psk', + '802-11-wireless-security.psk', 'VERY_SECURE_PASSWORD']: + assert param in add_args_text + + out, err = capfd.readouterr() + results = json.loads(out) + assert not results.get('failed') + assert results['changed'] + + @pytest.mark.parametrize('patch_ansible_module', TESTCASE_DUMMY_STATIC, indirect=['patch_ansible_module']) def test_create_dummy_static(mocked_generic_connection_create, capfd): """ From 771e9de010b2c4cb256f5fe2a9375d63e6eac511 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 8 Aug 2021 20:40:22 +1200 Subject: [PATCH 0255/2828] mass-added the utf-8 marker (#3163) * added the utf-8 marker * fixed the utf-8 marker where it was missde before --- plugins/lookup/nios_next_ip.py | 1 + plugins/lookup/nios_next_network.py | 1 + plugins/module_utils/_mount.py | 1 + plugins/module_utils/_netapp.py | 1 + plugins/module_utils/alicloud_ecs.py | 1 + plugins/module_utils/cloud.py | 1 + plugins/module_utils/database.py | 1 + plugins/module_utils/heroku.py | 1 + plugins/module_utils/hwc_utils.py | 1 + plugins/module_utils/ibm_sa_utils.py | 1 + plugins/module_utils/identity/keycloak/keycloak.py | 1 + plugins/module_utils/known_hosts.py | 1 + plugins/module_utils/linode.py | 1 + plugins/module_utils/manageiq.py | 1 + plugins/module_utils/memset.py | 1 + plugins/module_utils/net_tools/nios/api.py | 1 + plugins/module_utils/oneandone.py | 1 + plugins/module_utils/oneview.py | 1 + plugins/module_utils/online.py | 1 + plugins/module_utils/opennebula.py | 1 + plugins/module_utils/oracle/oci_utils.py | 1 + plugins/module_utils/rax.py | 1 + plugins/module_utils/redfish_utils.py | 1 + plugins/module_utils/redhat.py | 1 + plugins/module_utils/remote_management/lxca/common.py | 1 + plugins/module_utils/scaleway.py | 1 + plugins/module_utils/storage/emc/emc_vnx.py | 1 + plugins/module_utils/storage/hpe3par/hpe3par.py | 1 + plugins/module_utils/univention_umc.py | 2 +- plugins/module_utils/utm_utils.py | 1 + plugins/modules/cloud/centurylink/clc_aa_policy.py | 1 + plugins/modules/cloud/centurylink/clc_alert_policy.py | 1 + plugins/modules/cloud/centurylink/clc_blueprint_package.py | 1 + plugins/modules/cloud/centurylink/clc_firewall_policy.py | 1 + plugins/modules/cloud/centurylink/clc_group.py | 1 + plugins/modules/cloud/centurylink/clc_loadbalancer.py | 1 + plugins/modules/cloud/centurylink/clc_modify_server.py | 1 + plugins/modules/cloud/centurylink/clc_publicip.py | 1 + plugins/modules/cloud/centurylink/clc_server.py | 1 + plugins/modules/cloud/centurylink/clc_server_snapshot.py | 1 + plugins/modules/cloud/heroku/heroku_collaborator.py | 1 + plugins/modules/cloud/linode/linode.py | 1 + plugins/modules/cloud/misc/proxmox.py | 1 + plugins/modules/cloud/misc/proxmox_template.py | 1 + plugins/modules/cloud/misc/xenserver_facts.py | 1 + plugins/modules/cloud/oneandone/oneandone_firewall_policy.py | 1 + plugins/modules/cloud/oneandone/oneandone_load_balancer.py | 1 + plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py | 1 + plugins/modules/cloud/oneandone/oneandone_private_network.py | 1 + plugins/modules/cloud/oneandone/oneandone_public_ip.py | 1 + plugins/modules/cloud/oneandone/oneandone_server.py | 1 + plugins/modules/cloud/opennebula/one_host.py | 1 + plugins/modules/cloud/opennebula/one_template.py | 1 + plugins/modules/cloud/oracle/oci_vcn.py | 1 + plugins/modules/cloud/ovh/ovh_ip_failover.py | 1 + plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py | 1 + plugins/modules/cloud/packet/packet_device.py | 1 + plugins/modules/cloud/packet/packet_sshkey.py | 1 + plugins/modules/cloud/profitbricks/profitbricks.py | 1 + plugins/modules/cloud/profitbricks/profitbricks_datacenter.py | 1 + plugins/modules/cloud/profitbricks/profitbricks_nic.py | 1 + plugins/modules/cloud/profitbricks/profitbricks_volume.py | 1 + .../cloud/profitbricks/profitbricks_volume_attachments.py | 1 + plugins/modules/cloud/pubnub/pubnub_blocks.py | 1 + plugins/modules/cloud/rackspace/rax.py | 1 + plugins/modules/cloud/rackspace/rax_cbs.py | 1 + plugins/modules/cloud/rackspace/rax_cbs_attachments.py | 1 + plugins/modules/cloud/rackspace/rax_cdb.py | 1 + plugins/modules/cloud/rackspace/rax_cdb_database.py | 1 + plugins/modules/cloud/rackspace/rax_cdb_user.py | 1 + plugins/modules/cloud/rackspace/rax_clb.py | 1 + plugins/modules/cloud/rackspace/rax_clb_nodes.py | 1 + plugins/modules/cloud/rackspace/rax_clb_ssl.py | 1 + plugins/modules/cloud/rackspace/rax_dns.py | 1 + plugins/modules/cloud/rackspace/rax_dns_record.py | 1 + plugins/modules/cloud/rackspace/rax_facts.py | 1 + plugins/modules/cloud/rackspace/rax_files.py | 1 + plugins/modules/cloud/rackspace/rax_files_objects.py | 1 + plugins/modules/cloud/rackspace/rax_identity.py | 1 + plugins/modules/cloud/rackspace/rax_keypair.py | 1 + plugins/modules/cloud/rackspace/rax_meta.py | 1 + plugins/modules/cloud/rackspace/rax_mon_alarm.py | 1 + plugins/modules/cloud/rackspace/rax_mon_check.py | 1 + plugins/modules/cloud/rackspace/rax_mon_entity.py | 1 + plugins/modules/cloud/rackspace/rax_mon_notification.py | 1 + plugins/modules/cloud/rackspace/rax_mon_notification_plan.py | 1 + plugins/modules/cloud/rackspace/rax_network.py | 1 + plugins/modules/cloud/rackspace/rax_queue.py | 1 + plugins/modules/cloud/rackspace/rax_scaling_group.py | 1 + plugins/modules/cloud/rackspace/rax_scaling_policy.py | 1 + plugins/modules/cloud/scaleway/scaleway_compute.py | 1 + plugins/modules/cloud/scaleway/scaleway_database_backup.py | 1 + plugins/modules/cloud/scaleway/scaleway_ip.py | 1 + plugins/modules/cloud/scaleway/scaleway_lb.py | 1 + plugins/modules/cloud/scaleway/scaleway_security_group.py | 1 + plugins/modules/cloud/scaleway/scaleway_security_group_rule.py | 1 + plugins/modules/cloud/scaleway/scaleway_sshkey.py | 1 + plugins/modules/cloud/scaleway/scaleway_user_data.py | 1 + plugins/modules/cloud/scaleway/scaleway_volume.py | 1 + plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py | 1 + plugins/modules/cloud/univention/udm_dns_record.py | 2 +- plugins/modules/cloud/univention/udm_dns_zone.py | 2 +- plugins/modules/cloud/univention/udm_group.py | 2 +- plugins/modules/cloud/univention/udm_share.py | 2 +- plugins/modules/cloud/univention/udm_user.py | 2 +- plugins/modules/clustering/consul/consul.py | 1 + plugins/modules/clustering/consul/consul_acl.py | 1 + plugins/modules/clustering/consul/consul_kv.py | 1 + plugins/modules/clustering/etcd3.py | 1 + plugins/modules/clustering/znode.py | 1 + plugins/modules/database/aerospike/aerospike_migrations.py | 1 + plugins/modules/database/influxdb/influxdb_database.py | 1 + plugins/modules/database/influxdb/influxdb_retention_policy.py | 1 + plugins/modules/database/influxdb/influxdb_user.py | 1 + plugins/modules/files/sapcar_extract.py | 1 + plugins/modules/files/xattr.py | 1 + plugins/modules/monitoring/sensu/sensu_client.py | 1 + plugins/modules/monitoring/sensu/sensu_handler.py | 1 + plugins/modules/net_tools/dnsimple.py | 1 + plugins/modules/net_tools/ip_netns.py | 1 + plugins/modules/net_tools/ipinfoio_facts.py | 2 +- plugins/modules/net_tools/ipwcli_dns.py | 1 + plugins/modules/net_tools/lldp.py | 1 + plugins/modules/net_tools/nios/nios_a_record.py | 1 + plugins/modules/net_tools/nios/nios_aaaa_record.py | 1 + plugins/modules/net_tools/nios/nios_cname_record.py | 1 + plugins/modules/net_tools/nios/nios_dns_view.py | 1 + plugins/modules/net_tools/nios/nios_fixed_address.py | 1 + plugins/modules/net_tools/nios/nios_host_record.py | 1 + plugins/modules/net_tools/nios/nios_member.py | 1 + plugins/modules/net_tools/nios/nios_mx_record.py | 1 + plugins/modules/net_tools/nios/nios_naptr_record.py | 1 + plugins/modules/net_tools/nios/nios_network.py | 1 + plugins/modules/net_tools/nios/nios_network_view.py | 1 + plugins/modules/net_tools/nios/nios_ptr_record.py | 1 + plugins/modules/net_tools/nios/nios_srv_record.py | 1 + plugins/modules/net_tools/nios/nios_txt_record.py | 1 + plugins/modules/net_tools/nios/nios_zone.py | 1 + plugins/modules/net_tools/nsupdate.py | 1 + plugins/modules/notification/syslogger.py | 1 + plugins/modules/packaging/language/pip_package_info.py | 1 + plugins/modules/packaging/os/redhat_subscription.py | 1 + plugins/modules/packaging/os/rhn_channel.py | 1 + plugins/modules/packaging/os/rhsm_release.py | 1 + plugins/modules/packaging/os/rhsm_repository.py | 1 + plugins/modules/packaging/os/rpm_ostree_pkg.py | 1 + plugins/modules/packaging/os/swupd.py | 1 + plugins/modules/packaging/os/zypper_repository.py | 2 +- plugins/modules/remote_management/lxca/lxca_cmms.py | 1 + plugins/modules/remote_management/lxca/lxca_nodes.py | 1 + plugins/modules/remote_management/manageiq/manageiq_group.py | 1 + plugins/modules/remote_management/manageiq/manageiq_tenant.py | 1 + plugins/modules/remote_management/manageiq/manageiq_user.py | 1 + .../remote_management/oneview/oneview_datacenter_info.py | 1 + .../remote_management/oneview/oneview_enclosure_info.py | 1 + .../remote_management/oneview/oneview_ethernet_network.py | 1 + .../remote_management/oneview/oneview_ethernet_network_info.py | 1 + .../modules/remote_management/oneview/oneview_fc_network.py | 1 + .../remote_management/oneview/oneview_fc_network_info.py | 1 + .../modules/remote_management/oneview/oneview_fcoe_network.py | 1 + .../remote_management/oneview/oneview_fcoe_network_info.py | 1 + .../oneview/oneview_logical_interconnect_group.py | 1 + .../oneview/oneview_logical_interconnect_group_info.py | 1 + .../modules/remote_management/oneview/oneview_network_set.py | 1 + .../remote_management/oneview/oneview_network_set_info.py | 1 + .../modules/remote_management/oneview/oneview_san_manager.py | 1 + .../remote_management/oneview/oneview_san_manager_info.py | 1 + plugins/modules/source_control/github/github_issue.py | 1 + plugins/modules/source_control/github/github_key.py | 1 + plugins/modules/source_control/github/github_webhook.py | 1 + plugins/modules/source_control/github/github_webhook_info.py | 1 + plugins/modules/storage/emc/emc_vnx_sg_member.py | 1 + plugins/modules/storage/hpe3par/ss_3par_cpg.py | 1 + plugins/modules/system/kernel_blacklist.py | 2 +- plugins/modules/system/lbu.py | 1 + plugins/modules/system/pids.py | 1 + plugins/modules/system/python_requirements_info.py | 1 + plugins/modules/system/selogin.py | 1 + plugins/modules/system/syspatch.py | 1 + plugins/modules/system/sysupgrade.py | 1 + plugins/modules/system/vdo.py | 1 + plugins/modules/web_infrastructure/jenkins_build.py | 1 + plugins/modules/web_infrastructure/jenkins_job.py | 1 + plugins/modules/web_infrastructure/jenkins_job_info.py | 1 + plugins/modules/web_infrastructure/jenkins_plugin.py | 2 +- plugins/modules/web_infrastructure/jenkins_script.py | 3 +-- plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group.py | 1 + .../web_infrastructure/sophos_utm/utm_aaa_group_info.py | 1 + .../web_infrastructure/sophos_utm/utm_ca_host_key_cert.py | 1 + .../web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py | 1 + plugins/modules/web_infrastructure/sophos_utm/utm_dns_host.py | 1 + .../sophos_utm/utm_network_interface_address.py | 1 + .../sophos_utm/utm_network_interface_address_info.py | 1 + .../web_infrastructure/sophos_utm/utm_proxy_auth_profile.py | 1 + .../web_infrastructure/sophos_utm/utm_proxy_exception.py | 1 + .../web_infrastructure/sophos_utm/utm_proxy_frontend.py | 1 + .../web_infrastructure/sophos_utm/utm_proxy_frontend_info.py | 1 + .../web_infrastructure/sophos_utm/utm_proxy_location.py | 1 + .../web_infrastructure/sophos_utm/utm_proxy_location_info.py | 1 + 199 files changed, 199 insertions(+), 12 deletions(-) diff --git a/plugins/lookup/nios_next_ip.py b/plugins/lookup/nios_next_ip.py index 58e95c7d13..8fdbbc6f99 100644 --- a/plugins/lookup/nios_next_ip.py +++ b/plugins/lookup/nios_next_ip.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # # Copyright 2018 Red Hat | Ansible # diff --git a/plugins/lookup/nios_next_network.py b/plugins/lookup/nios_next_network.py index c18c6ae993..a1c913320a 100644 --- a/plugins/lookup/nios_next_network.py +++ b/plugins/lookup/nios_next_network.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # # Copyright 2018 Red Hat | Ansible # diff --git a/plugins/module_utils/_mount.py b/plugins/module_utils/_mount.py index 58be0e8b68..391d468178 100644 --- a/plugins/module_utils/_mount.py +++ b/plugins/module_utils/_mount.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is based on # Lib/posixpath.py of cpython diff --git a/plugins/module_utils/_netapp.py b/plugins/module_utils/_netapp.py index 81a50a336d..126cc1bf16 100644 --- a/plugins/module_utils/_netapp.py +++ b/plugins/module_utils/_netapp.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible diff --git a/plugins/module_utils/alicloud_ecs.py b/plugins/module_utils/alicloud_ecs.py index 3c87c1ad76..d4d3bf76c9 100644 --- a/plugins/module_utils/alicloud_ecs.py +++ b/plugins/module_utils/alicloud_ecs.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible diff --git a/plugins/module_utils/cloud.py b/plugins/module_utils/cloud.py index d90d1f5234..7619023a3c 100644 --- a/plugins/module_utils/cloud.py +++ b/plugins/module_utils/cloud.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # # (c) 2016 Allen Sanabria, # diff --git a/plugins/module_utils/database.py b/plugins/module_utils/database.py index 67850308e0..825d3a2be9 100644 --- a/plugins/module_utils/database.py +++ b/plugins/module_utils/database.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible diff --git a/plugins/module_utils/heroku.py b/plugins/module_utils/heroku.py index b6e89614f1..70b144c077 100644 --- a/plugins/module_utils/heroku.py +++ b/plugins/module_utils/heroku.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Ansible Project # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) diff --git a/plugins/module_utils/hwc_utils.py b/plugins/module_utils/hwc_utils.py index c11cb7d4d2..489e90dd3c 100644 --- a/plugins/module_utils/hwc_utils.py +++ b/plugins/module_utils/hwc_utils.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Copyright (c), Google Inc, 2017 # Simplified BSD License (see licenses/simplified_bsd.txt or # https://opensource.org/licenses/BSD-2-Clause) diff --git a/plugins/module_utils/ibm_sa_utils.py b/plugins/module_utils/ibm_sa_utils.py index fdaa38a9fc..4f70f844cd 100644 --- a/plugins/module_utils/ibm_sa_utils.py +++ b/plugins/module_utils/ibm_sa_utils.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Copyright (C) 2018 IBM CORPORATION # Author(s): Tzur Eliyahu # diff --git a/plugins/module_utils/identity/keycloak/keycloak.py b/plugins/module_utils/identity/keycloak/keycloak.py index c782e3690c..d53a29ba10 100644 --- a/plugins/module_utils/identity/keycloak/keycloak.py +++ b/plugins/module_utils/identity/keycloak/keycloak.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Copyright (c) 2017, Eike Frost # # This code is part of Ansible, but is an independent component. diff --git a/plugins/module_utils/known_hosts.py b/plugins/module_utils/known_hosts.py index efd311eb51..ea6c95b6e2 100644 --- a/plugins/module_utils/known_hosts.py +++ b/plugins/module_utils/known_hosts.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible diff --git a/plugins/module_utils/linode.py b/plugins/module_utils/linode.py index 53d546dbe8..9d7c37e68d 100644 --- a/plugins/module_utils/linode.py +++ b/plugins/module_utils/linode.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible diff --git a/plugins/module_utils/manageiq.py b/plugins/module_utils/manageiq.py index 7038fac88a..98e5590cc6 100644 --- a/plugins/module_utils/manageiq.py +++ b/plugins/module_utils/manageiq.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # # Copyright (c) 2017, Daniel Korn # diff --git a/plugins/module_utils/memset.py b/plugins/module_utils/memset.py index 357fded58e..7813290a72 100644 --- a/plugins/module_utils/memset.py +++ b/plugins/module_utils/memset.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible diff --git a/plugins/module_utils/net_tools/nios/api.py b/plugins/module_utils/net_tools/nios/api.py index cbb8b63f3b..babda7659a 100644 --- a/plugins/module_utils/net_tools/nios/api.py +++ b/plugins/module_utils/net_tools/nios/api.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible diff --git a/plugins/module_utils/oneandone.py b/plugins/module_utils/oneandone.py index 466d2665fa..5f65b670f3 100644 --- a/plugins/module_utils/oneandone.py +++ b/plugins/module_utils/oneandone.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible diff --git a/plugins/module_utils/oneview.py b/plugins/module_utils/oneview.py index 66e1d6d4c7..6d786b0b80 100644 --- a/plugins/module_utils/oneview.py +++ b/plugins/module_utils/oneview.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible diff --git a/plugins/module_utils/online.py b/plugins/module_utils/online.py index 464e454288..c0294abb79 100644 --- a/plugins/module_utils/online.py +++ b/plugins/module_utils/online.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) from __future__ import (absolute_import, division, print_function) diff --git a/plugins/module_utils/opennebula.py b/plugins/module_utils/opennebula.py index a0a8d1305b..c896a9c6fa 100644 --- a/plugins/module_utils/opennebula.py +++ b/plugins/module_utils/opennebula.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # # Copyright 2018 www.privaz.io Valletech AB # diff --git a/plugins/module_utils/oracle/oci_utils.py b/plugins/module_utils/oracle/oci_utils.py index 0b82dadf0e..88e577af5c 100644 --- a/plugins/module_utils/oracle/oci_utils.py +++ b/plugins/module_utils/oracle/oci_utils.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Copyright (c) 2017, 2018, 2019 Oracle and/or its affiliates. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/module_utils/rax.py b/plugins/module_utils/rax.py index e8c455e0e9..84effee97c 100644 --- a/plugins/module_utils/rax.py +++ b/plugins/module_utils/rax.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index 94e2c4b7d8..c861820edf 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Copyright (c) 2017-2018 Dell EMC Inc. # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/module_utils/redhat.py b/plugins/module_utils/redhat.py index 0fb6e9b1f3..85f4a6aab2 100644 --- a/plugins/module_utils/redhat.py +++ b/plugins/module_utils/redhat.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible diff --git a/plugins/module_utils/remote_management/lxca/common.py b/plugins/module_utils/remote_management/lxca/common.py index 297397e30d..07092b9642 100644 --- a/plugins/module_utils/remote_management/lxca/common.py +++ b/plugins/module_utils/remote_management/lxca/common.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by diff --git a/plugins/module_utils/scaleway.py b/plugins/module_utils/scaleway.py index 3c73e92bb8..d714fd69e8 100644 --- a/plugins/module_utils/scaleway.py +++ b/plugins/module_utils/scaleway.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from __future__ import (absolute_import, division, print_function) __metaclass__ = type diff --git a/plugins/module_utils/storage/emc/emc_vnx.py b/plugins/module_utils/storage/emc/emc_vnx.py index afb1b6979c..5922512676 100644 --- a/plugins/module_utils/storage/emc/emc_vnx.py +++ b/plugins/module_utils/storage/emc/emc_vnx.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible diff --git a/plugins/module_utils/storage/hpe3par/hpe3par.py b/plugins/module_utils/storage/hpe3par/hpe3par.py index 47868a4b8c..b7734444dd 100644 --- a/plugins/module_utils/storage/hpe3par/hpe3par.py +++ b/plugins/module_utils/storage/hpe3par/hpe3par.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Hewlett Packard Enterprise Development LP # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) diff --git a/plugins/module_utils/univention_umc.py b/plugins/module_utils/univention_umc.py index c1d8b77749..a44a0052a9 100644 --- a/plugins/module_utils/univention_umc.py +++ b/plugins/module_utils/univention_umc.py @@ -1,4 +1,4 @@ -# -*- coding: UTF-8 -*- +# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. diff --git a/plugins/module_utils/utm_utils.py b/plugins/module_utils/utm_utils.py index fd196dcbca..7e6ff3093e 100644 --- a/plugins/module_utils/utm_utils.py +++ b/plugins/module_utils/utm_utils.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible diff --git a/plugins/modules/cloud/centurylink/clc_aa_policy.py b/plugins/modules/cloud/centurylink/clc_aa_policy.py index 88c27e20f5..1d52cca7c5 100644 --- a/plugins/modules/cloud/centurylink/clc_aa_policy.py +++ b/plugins/modules/cloud/centurylink/clc_aa_policy.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright (c) 2015 CenturyLink # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/centurylink/clc_alert_policy.py b/plugins/modules/cloud/centurylink/clc_alert_policy.py index 374f1cada1..de9d146dc4 100644 --- a/plugins/modules/cloud/centurylink/clc_alert_policy.py +++ b/plugins/modules/cloud/centurylink/clc_alert_policy.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright (c) 2015 CenturyLink diff --git a/plugins/modules/cloud/centurylink/clc_blueprint_package.py b/plugins/modules/cloud/centurylink/clc_blueprint_package.py index 4071b67c7c..bd0e868fa3 100644 --- a/plugins/modules/cloud/centurylink/clc_blueprint_package.py +++ b/plugins/modules/cloud/centurylink/clc_blueprint_package.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright (c) 2015 CenturyLink # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/centurylink/clc_firewall_policy.py b/plugins/modules/cloud/centurylink/clc_firewall_policy.py index ad26dc92f7..a8f8a4e5f0 100644 --- a/plugins/modules/cloud/centurylink/clc_firewall_policy.py +++ b/plugins/modules/cloud/centurylink/clc_firewall_policy.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright (c) 2015 CenturyLink # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/centurylink/clc_group.py b/plugins/modules/cloud/centurylink/clc_group.py index a80cc400e9..e1c05c6c0c 100644 --- a/plugins/modules/cloud/centurylink/clc_group.py +++ b/plugins/modules/cloud/centurylink/clc_group.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright (c) 2015 CenturyLink diff --git a/plugins/modules/cloud/centurylink/clc_loadbalancer.py b/plugins/modules/cloud/centurylink/clc_loadbalancer.py index 400a8b9c3f..950e087976 100644 --- a/plugins/modules/cloud/centurylink/clc_loadbalancer.py +++ b/plugins/modules/cloud/centurylink/clc_loadbalancer.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright (c) 2015 CenturyLink # diff --git a/plugins/modules/cloud/centurylink/clc_modify_server.py b/plugins/modules/cloud/centurylink/clc_modify_server.py index c0730a9c2b..90a368867e 100644 --- a/plugins/modules/cloud/centurylink/clc_modify_server.py +++ b/plugins/modules/cloud/centurylink/clc_modify_server.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright (c) 2015 CenturyLink # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/centurylink/clc_publicip.py b/plugins/modules/cloud/centurylink/clc_publicip.py index 8b5ac4cb4e..1cdb4aa8db 100644 --- a/plugins/modules/cloud/centurylink/clc_publicip.py +++ b/plugins/modules/cloud/centurylink/clc_publicip.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright (c) 2015 CenturyLink # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/centurylink/clc_server.py b/plugins/modules/cloud/centurylink/clc_server.py index 4e02421892..95481f1a52 100644 --- a/plugins/modules/cloud/centurylink/clc_server.py +++ b/plugins/modules/cloud/centurylink/clc_server.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright (c) 2015 CenturyLink # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/centurylink/clc_server_snapshot.py b/plugins/modules/cloud/centurylink/clc_server_snapshot.py index 1c706b07a4..1f92def088 100644 --- a/plugins/modules/cloud/centurylink/clc_server_snapshot.py +++ b/plugins/modules/cloud/centurylink/clc_server_snapshot.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright (c) 2015 CenturyLink # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/heroku/heroku_collaborator.py b/plugins/modules/cloud/heroku/heroku_collaborator.py index a326894dce..bbc34fdb30 100644 --- a/plugins/modules/cloud/heroku/heroku_collaborator.py +++ b/plugins/modules/cloud/heroku/heroku_collaborator.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/linode/linode.py b/plugins/modules/cloud/linode/linode.py index c9ee0e61ed..c627fb705a 100644 --- a/plugins/modules/cloud/linode/linode.py +++ b/plugins/modules/cloud/linode/linode.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/misc/proxmox.py b/plugins/modules/cloud/misc/proxmox.py index 21817f10dc..c777564186 100644 --- a/plugins/modules/cloud/misc/proxmox.py +++ b/plugins/modules/cloud/misc/proxmox.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/misc/proxmox_template.py b/plugins/modules/cloud/misc/proxmox_template.py index d7fb9341e6..bee2583908 100644 --- a/plugins/modules/cloud/misc/proxmox_template.py +++ b/plugins/modules/cloud/misc/proxmox_template.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright: Ansible Project # diff --git a/plugins/modules/cloud/misc/xenserver_facts.py b/plugins/modules/cloud/misc/xenserver_facts.py index bc01c56ecb..f65e3c9a86 100644 --- a/plugins/modules/cloud/misc/xenserver_facts.py +++ b/plugins/modules/cloud/misc/xenserver_facts.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py b/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py index 32e42ea865..d46ce38897 100644 --- a/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py +++ b/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify diff --git a/plugins/modules/cloud/oneandone/oneandone_load_balancer.py b/plugins/modules/cloud/oneandone/oneandone_load_balancer.py index 71f1d96b9c..5f541a878c 100644 --- a/plugins/modules/cloud/oneandone/oneandone_load_balancer.py +++ b/plugins/modules/cloud/oneandone/oneandone_load_balancer.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify diff --git a/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py b/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py index 67f2ce9cc0..28dd0d41c5 100644 --- a/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py +++ b/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify diff --git a/plugins/modules/cloud/oneandone/oneandone_private_network.py b/plugins/modules/cloud/oneandone/oneandone_private_network.py index edbdc9f8ce..6a16cf683e 100644 --- a/plugins/modules/cloud/oneandone/oneandone_private_network.py +++ b/plugins/modules/cloud/oneandone/oneandone_private_network.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify diff --git a/plugins/modules/cloud/oneandone/oneandone_public_ip.py b/plugins/modules/cloud/oneandone/oneandone_public_ip.py index edefbc938f..96b1c9f3a5 100644 --- a/plugins/modules/cloud/oneandone/oneandone_public_ip.py +++ b/plugins/modules/cloud/oneandone/oneandone_public_ip.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify diff --git a/plugins/modules/cloud/oneandone/oneandone_server.py b/plugins/modules/cloud/oneandone/oneandone_server.py index 9eaf943be7..aa651bd75f 100644 --- a/plugins/modules/cloud/oneandone/oneandone_server.py +++ b/plugins/modules/cloud/oneandone/oneandone_server.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify diff --git a/plugins/modules/cloud/opennebula/one_host.py b/plugins/modules/cloud/opennebula/one_host.py index 714d2d86a9..f205a40a2c 100644 --- a/plugins/modules/cloud/opennebula/one_host.py +++ b/plugins/modules/cloud/opennebula/one_host.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright 2018 www.privaz.io Valletech AB # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/opennebula/one_template.py b/plugins/modules/cloud/opennebula/one_template.py index b4c8a2fa83..3b0b601193 100644 --- a/plugins/modules/cloud/opennebula/one_template.py +++ b/plugins/modules/cloud/opennebula/one_template.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright: (c) 2021, Georg Gadinger # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/oracle/oci_vcn.py b/plugins/modules/cloud/oracle/oci_vcn.py index e2906357ae..a82914bdea 100644 --- a/plugins/modules/cloud/oracle/oci_vcn.py +++ b/plugins/modules/cloud/oracle/oci_vcn.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2017, 2018, Oracle and/or its affiliates. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/ovh/ovh_ip_failover.py b/plugins/modules/cloud/ovh/ovh_ip_failover.py index 545c40fff7..26179eb8f7 100644 --- a/plugins/modules/cloud/ovh/ovh_ip_failover.py +++ b/plugins/modules/cloud/ovh/ovh_ip_failover.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py b/plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py index 965a499c6e..28d6f3a129 100644 --- a/plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py +++ b/plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/packet/packet_device.py b/plugins/modules/cloud/packet/packet_device.py index f939572656..5cc8d13e9a 100644 --- a/plugins/modules/cloud/packet/packet_device.py +++ b/plugins/modules/cloud/packet/packet_device.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # (c) 2016, Tomas Karasek # (c) 2016, Matt Baldwin # (c) 2016, Thibaud Morel l'Horset diff --git a/plugins/modules/cloud/packet/packet_sshkey.py b/plugins/modules/cloud/packet/packet_sshkey.py index 97589cddb9..57e988630e 100644 --- a/plugins/modules/cloud/packet/packet_sshkey.py +++ b/plugins/modules/cloud/packet/packet_sshkey.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright 2016 Tomas Karasek # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/profitbricks/profitbricks.py b/plugins/modules/cloud/profitbricks/profitbricks.py index 4c24d6408f..3a75778a08 100644 --- a/plugins/modules/cloud/profitbricks/profitbricks.py +++ b/plugins/modules/cloud/profitbricks/profitbricks.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/profitbricks/profitbricks_datacenter.py b/plugins/modules/cloud/profitbricks/profitbricks_datacenter.py index e3ba1d4950..7897ffdeb9 100644 --- a/plugins/modules/cloud/profitbricks/profitbricks_datacenter.py +++ b/plugins/modules/cloud/profitbricks/profitbricks_datacenter.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/profitbricks/profitbricks_nic.py b/plugins/modules/cloud/profitbricks/profitbricks_nic.py index 49941241c6..5d98e05e4b 100644 --- a/plugins/modules/cloud/profitbricks/profitbricks_nic.py +++ b/plugins/modules/cloud/profitbricks/profitbricks_nic.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/profitbricks/profitbricks_volume.py b/plugins/modules/cloud/profitbricks/profitbricks_volume.py index 5fff01d3d7..be1c18b55a 100644 --- a/plugins/modules/cloud/profitbricks/profitbricks_volume.py +++ b/plugins/modules/cloud/profitbricks/profitbricks_volume.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/profitbricks/profitbricks_volume_attachments.py b/plugins/modules/cloud/profitbricks/profitbricks_volume_attachments.py index 72f03e674a..1fb3f3c0e2 100644 --- a/plugins/modules/cloud/profitbricks/profitbricks_volume_attachments.py +++ b/plugins/modules/cloud/profitbricks/profitbricks_volume_attachments.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/pubnub/pubnub_blocks.py b/plugins/modules/cloud/pubnub/pubnub_blocks.py index c8de702597..d3b76337a3 100644 --- a/plugins/modules/cloud/pubnub/pubnub_blocks.py +++ b/plugins/modules/cloud/pubnub/pubnub_blocks.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # PubNub Real-time Cloud-Hosted Push API and Push Notification Client # Frameworks diff --git a/plugins/modules/cloud/rackspace/rax.py b/plugins/modules/cloud/rackspace/rax.py index cbaa0a57d2..8c452d9d72 100644 --- a/plugins/modules/cloud/rackspace/rax.py +++ b/plugins/modules/cloud/rackspace/rax.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_cbs.py b/plugins/modules/cloud/rackspace/rax_cbs.py index a681feff84..b543f5979a 100644 --- a/plugins/modules/cloud/rackspace/rax_cbs.py +++ b/plugins/modules/cloud/rackspace/rax_cbs.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_cbs_attachments.py b/plugins/modules/cloud/rackspace/rax_cbs_attachments.py index 71d01620d4..fd21081475 100644 --- a/plugins/modules/cloud/rackspace/rax_cbs_attachments.py +++ b/plugins/modules/cloud/rackspace/rax_cbs_attachments.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_cdb.py b/plugins/modules/cloud/rackspace/rax_cdb.py index 5b9996cd21..04bbe71cda 100644 --- a/plugins/modules/cloud/rackspace/rax_cdb.py +++ b/plugins/modules/cloud/rackspace/rax_cdb.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_cdb_database.py b/plugins/modules/cloud/rackspace/rax_cdb_database.py index 6d3435e806..86cd1aac40 100644 --- a/plugins/modules/cloud/rackspace/rax_cdb_database.py +++ b/plugins/modules/cloud/rackspace/rax_cdb_database.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_cdb_user.py b/plugins/modules/cloud/rackspace/rax_cdb_user.py index 01c10950c4..674f17c070 100644 --- a/plugins/modules/cloud/rackspace/rax_cdb_user.py +++ b/plugins/modules/cloud/rackspace/rax_cdb_user.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_clb.py b/plugins/modules/cloud/rackspace/rax_clb.py index 5ff1e3140f..9160133e21 100644 --- a/plugins/modules/cloud/rackspace/rax_clb.py +++ b/plugins/modules/cloud/rackspace/rax_clb.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_clb_nodes.py b/plugins/modules/cloud/rackspace/rax_clb_nodes.py index c066ab66db..4adcc66fb7 100644 --- a/plugins/modules/cloud/rackspace/rax_clb_nodes.py +++ b/plugins/modules/cloud/rackspace/rax_clb_nodes.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_clb_ssl.py b/plugins/modules/cloud/rackspace/rax_clb_ssl.py index 114128e8b1..adf375124d 100644 --- a/plugins/modules/cloud/rackspace/rax_clb_ssl.py +++ b/plugins/modules/cloud/rackspace/rax_clb_ssl.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_dns.py b/plugins/modules/cloud/rackspace/rax_dns.py index e9b7e2be95..915e13a9a6 100644 --- a/plugins/modules/cloud/rackspace/rax_dns.py +++ b/plugins/modules/cloud/rackspace/rax_dns.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_dns_record.py b/plugins/modules/cloud/rackspace/rax_dns_record.py index 0b60120a75..1a6986dea7 100644 --- a/plugins/modules/cloud/rackspace/rax_dns_record.py +++ b/plugins/modules/cloud/rackspace/rax_dns_record.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_facts.py b/plugins/modules/cloud/rackspace/rax_facts.py index f9fd89556f..0288a5e35b 100644 --- a/plugins/modules/cloud/rackspace/rax_facts.py +++ b/plugins/modules/cloud/rackspace/rax_facts.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_files.py b/plugins/modules/cloud/rackspace/rax_files.py index 7080cc2f84..1e1f82c85d 100644 --- a/plugins/modules/cloud/rackspace/rax_files.py +++ b/plugins/modules/cloud/rackspace/rax_files.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # (c) 2013, Paul Durivage # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_files_objects.py b/plugins/modules/cloud/rackspace/rax_files_objects.py index dc44555417..3269fe0512 100644 --- a/plugins/modules/cloud/rackspace/rax_files_objects.py +++ b/plugins/modules/cloud/rackspace/rax_files_objects.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # (c) 2013, Paul Durivage # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_identity.py b/plugins/modules/cloud/rackspace/rax_identity.py index 330c510d09..2021052faa 100644 --- a/plugins/modules/cloud/rackspace/rax_identity.py +++ b/plugins/modules/cloud/rackspace/rax_identity.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_keypair.py b/plugins/modules/cloud/rackspace/rax_keypair.py index 0314883f60..90b0183e50 100644 --- a/plugins/modules/cloud/rackspace/rax_keypair.py +++ b/plugins/modules/cloud/rackspace/rax_keypair.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_meta.py b/plugins/modules/cloud/rackspace/rax_meta.py index b7d172d93f..3504181f19 100644 --- a/plugins/modules/cloud/rackspace/rax_meta.py +++ b/plugins/modules/cloud/rackspace/rax_meta.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_mon_alarm.py b/plugins/modules/cloud/rackspace/rax_mon_alarm.py index 8de26609db..7e99db3fa8 100644 --- a/plugins/modules/cloud/rackspace/rax_mon_alarm.py +++ b/plugins/modules/cloud/rackspace/rax_mon_alarm.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_mon_check.py b/plugins/modules/cloud/rackspace/rax_mon_check.py index e04dfc7444..17a3932f6e 100644 --- a/plugins/modules/cloud/rackspace/rax_mon_check.py +++ b/plugins/modules/cloud/rackspace/rax_mon_check.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_mon_entity.py b/plugins/modules/cloud/rackspace/rax_mon_entity.py index 69f49cd07b..2f8cdeefd8 100644 --- a/plugins/modules/cloud/rackspace/rax_mon_entity.py +++ b/plugins/modules/cloud/rackspace/rax_mon_entity.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_mon_notification.py b/plugins/modules/cloud/rackspace/rax_mon_notification.py index 416d03bae8..fb645c3036 100644 --- a/plugins/modules/cloud/rackspace/rax_mon_notification.py +++ b/plugins/modules/cloud/rackspace/rax_mon_notification.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py b/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py index d5294cd509..25e506829f 100644 --- a/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py +++ b/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_network.py b/plugins/modules/cloud/rackspace/rax_network.py index 27a793b5a1..146c08c8e1 100644 --- a/plugins/modules/cloud/rackspace/rax_network.py +++ b/plugins/modules/cloud/rackspace/rax_network.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_queue.py b/plugins/modules/cloud/rackspace/rax_queue.py index dca006da77..46c942c70d 100644 --- a/plugins/modules/cloud/rackspace/rax_queue.py +++ b/plugins/modules/cloud/rackspace/rax_queue.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_scaling_group.py b/plugins/modules/cloud/rackspace/rax_scaling_group.py index 2f8fa0a2cc..4080e4c6a4 100644 --- a/plugins/modules/cloud/rackspace/rax_scaling_group.py +++ b/plugins/modules/cloud/rackspace/rax_scaling_group.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_scaling_policy.py b/plugins/modules/cloud/rackspace/rax_scaling_policy.py index 384825f0ee..be46bd62a6 100644 --- a/plugins/modules/cloud/rackspace/rax_scaling_policy.py +++ b/plugins/modules/cloud/rackspace/rax_scaling_policy.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/scaleway/scaleway_compute.py b/plugins/modules/cloud/scaleway/scaleway_compute.py index 421157a425..c5d5af9177 100644 --- a/plugins/modules/cloud/scaleway/scaleway_compute.py +++ b/plugins/modules/cloud/scaleway/scaleway_compute.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Scaleway Compute management module # diff --git a/plugins/modules/cloud/scaleway/scaleway_database_backup.py b/plugins/modules/cloud/scaleway/scaleway_database_backup.py index 578032458d..35f35f820a 100644 --- a/plugins/modules/cloud/scaleway/scaleway_database_backup.py +++ b/plugins/modules/cloud/scaleway/scaleway_database_backup.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Scaleway database backups management module # diff --git a/plugins/modules/cloud/scaleway/scaleway_ip.py b/plugins/modules/cloud/scaleway/scaleway_ip.py index 26da122e31..135da120cf 100644 --- a/plugins/modules/cloud/scaleway/scaleway_ip.py +++ b/plugins/modules/cloud/scaleway/scaleway_ip.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Scaleway IP management module # diff --git a/plugins/modules/cloud/scaleway/scaleway_lb.py b/plugins/modules/cloud/scaleway/scaleway_lb.py index f19c0a3c43..9761500ab9 100644 --- a/plugins/modules/cloud/scaleway/scaleway_lb.py +++ b/plugins/modules/cloud/scaleway/scaleway_lb.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Scaleway Load-balancer management module # diff --git a/plugins/modules/cloud/scaleway/scaleway_security_group.py b/plugins/modules/cloud/scaleway/scaleway_security_group.py index 9303e06e00..f9faee6104 100644 --- a/plugins/modules/cloud/scaleway/scaleway_security_group.py +++ b/plugins/modules/cloud/scaleway/scaleway_security_group.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Scaleway Security Group management module # diff --git a/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py b/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py index 118883328a..9f95921202 100644 --- a/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py +++ b/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Scaleway Security Group Rule management module # diff --git a/plugins/modules/cloud/scaleway/scaleway_sshkey.py b/plugins/modules/cloud/scaleway/scaleway_sshkey.py index 08555b2316..bc15cefb20 100644 --- a/plugins/modules/cloud/scaleway/scaleway_sshkey.py +++ b/plugins/modules/cloud/scaleway/scaleway_sshkey.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Scaleway SSH keys management module # diff --git a/plugins/modules/cloud/scaleway/scaleway_user_data.py b/plugins/modules/cloud/scaleway/scaleway_user_data.py index 4a38e76d72..d51d3e174d 100644 --- a/plugins/modules/cloud/scaleway/scaleway_user_data.py +++ b/plugins/modules/cloud/scaleway/scaleway_user_data.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Scaleway user data management module # diff --git a/plugins/modules/cloud/scaleway/scaleway_volume.py b/plugins/modules/cloud/scaleway/scaleway_volume.py index e879d3c95c..a49e23c17d 100644 --- a/plugins/modules/cloud/scaleway/scaleway_volume.py +++ b/plugins/modules/cloud/scaleway/scaleway_volume.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Scaleway volumes management module # diff --git a/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py b/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py index 5ed8028e37..da8f010229 100644 --- a/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py +++ b/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) diff --git a/plugins/modules/cloud/univention/udm_dns_record.py b/plugins/modules/cloud/univention/udm_dns_record.py index 90654bee3c..0c56970dd3 100644 --- a/plugins/modules/cloud/univention/udm_dns_record.py +++ b/plugins/modules/cloud/univention/udm_dns_record.py @@ -1,5 +1,5 @@ #!/usr/bin/python -# -*- coding: UTF-8 -*- +# -*- coding: utf-8 -*- # Copyright: (c) 2016, Adfinis SyGroup AG # Tobias Rueetschi diff --git a/plugins/modules/cloud/univention/udm_dns_zone.py b/plugins/modules/cloud/univention/udm_dns_zone.py index 3e0cae523d..f1cea87e4f 100644 --- a/plugins/modules/cloud/univention/udm_dns_zone.py +++ b/plugins/modules/cloud/univention/udm_dns_zone.py @@ -1,5 +1,5 @@ #!/usr/bin/python -# -*- coding: UTF-8 -*- +# -*- coding: utf-8 -*- # Copyright: (c) 2016, Adfinis SyGroup AG # Tobias Rueetschi diff --git a/plugins/modules/cloud/univention/udm_group.py b/plugins/modules/cloud/univention/udm_group.py index d2cf2aea80..d20187c628 100644 --- a/plugins/modules/cloud/univention/udm_group.py +++ b/plugins/modules/cloud/univention/udm_group.py @@ -1,5 +1,5 @@ #!/usr/bin/python -# -*- coding: UTF-8 -*- +# -*- coding: utf-8 -*- # Copyright: (c) 2016, Adfinis SyGroup AG # Tobias Rueetschi diff --git a/plugins/modules/cloud/univention/udm_share.py b/plugins/modules/cloud/univention/udm_share.py index 3e8fb20792..fb86d83666 100644 --- a/plugins/modules/cloud/univention/udm_share.py +++ b/plugins/modules/cloud/univention/udm_share.py @@ -1,5 +1,5 @@ #!/usr/bin/python -# -*- coding: UTF-8 -*- +# -*- coding: utf-8 -*- # Copyright: (c) 2016, Adfinis SyGroup AG # Tobias Rueetschi diff --git a/plugins/modules/cloud/univention/udm_user.py b/plugins/modules/cloud/univention/udm_user.py index efbd95f426..b0d6138fda 100644 --- a/plugins/modules/cloud/univention/udm_user.py +++ b/plugins/modules/cloud/univention/udm_user.py @@ -1,5 +1,5 @@ #!/usr/bin/python -# -*- coding: UTF-8 -*- +# -*- coding: utf-8 -*- # Copyright: (c) 2016, Adfinis SyGroup AG # Tobias Rueetschi diff --git a/plugins/modules/clustering/consul/consul.py b/plugins/modules/clustering/consul/consul.py index cd695c4754..f85e1cc729 100644 --- a/plugins/modules/clustering/consul/consul.py +++ b/plugins/modules/clustering/consul/consul.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # (c) 2015, Steve Gargan # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/clustering/consul/consul_acl.py b/plugins/modules/clustering/consul/consul_acl.py index 5a37ca0eb9..1e01e58af5 100644 --- a/plugins/modules/clustering/consul/consul_acl.py +++ b/plugins/modules/clustering/consul/consul_acl.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # (c) 2015, Steve Gargan # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/clustering/consul/consul_kv.py b/plugins/modules/clustering/consul/consul_kv.py index d392228146..f7b33b856e 100644 --- a/plugins/modules/clustering/consul/consul_kv.py +++ b/plugins/modules/clustering/consul/consul_kv.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # (c) 2015, Steve Gargan # (c) 2018 Genome Research Ltd. diff --git a/plugins/modules/clustering/etcd3.py b/plugins/modules/clustering/etcd3.py index 28c5915693..6a09513364 100644 --- a/plugins/modules/clustering/etcd3.py +++ b/plugins/modules/clustering/etcd3.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # (c) 2018, Jean-Philippe Evrard # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/clustering/znode.py b/plugins/modules/clustering/znode.py index 8456a187ee..d55a502b15 100644 --- a/plugins/modules/clustering/znode.py +++ b/plugins/modules/clustering/znode.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright 2015 WP Engine, Inc. All rights reserved. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/database/aerospike/aerospike_migrations.py b/plugins/modules/database/aerospike/aerospike_migrations.py index 33f27cd381..27b979ad1f 100644 --- a/plugins/modules/database/aerospike/aerospike_migrations.py +++ b/plugins/modules/database/aerospike/aerospike_migrations.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- """short_description: Check or wait for migrations between nodes""" # Copyright: (c) 2018, Albert Autin diff --git a/plugins/modules/database/influxdb/influxdb_database.py b/plugins/modules/database/influxdb/influxdb_database.py index 7b798c3679..6601b30124 100644 --- a/plugins/modules/database/influxdb/influxdb_database.py +++ b/plugins/modules/database/influxdb/influxdb_database.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2016, Kamil Szczygiel # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/database/influxdb/influxdb_retention_policy.py b/plugins/modules/database/influxdb/influxdb_retention_policy.py index a145f9e32b..6cb45229cd 100644 --- a/plugins/modules/database/influxdb/influxdb_retention_policy.py +++ b/plugins/modules/database/influxdb/influxdb_retention_policy.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2016, Kamil Szczygiel # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/database/influxdb/influxdb_user.py b/plugins/modules/database/influxdb/influxdb_user.py index 8746445335..76524d8613 100644 --- a/plugins/modules/database/influxdb/influxdb_user.py +++ b/plugins/modules/database/influxdb/influxdb_user.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2017, Vitaliy Zhhuta # insipred by Kamil Szczygiel influxdb_database module diff --git a/plugins/modules/files/sapcar_extract.py b/plugins/modules/files/sapcar_extract.py index b6a76a1629..8463703c1e 100644 --- a/plugins/modules/files/sapcar_extract.py +++ b/plugins/modules/files/sapcar_extract.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2021, Rainer Leber # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/files/xattr.py b/plugins/modules/files/xattr.py index 8578ed4c4e..f862dd720b 100644 --- a/plugins/modules/files/xattr.py +++ b/plugins/modules/files/xattr.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/monitoring/sensu/sensu_client.py b/plugins/modules/monitoring/sensu/sensu_client.py index ee67a6e75b..886c398e09 100644 --- a/plugins/modules/monitoring/sensu/sensu_client.py +++ b/plugins/modules/monitoring/sensu/sensu_client.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # (c) 2017, Red Hat Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/monitoring/sensu/sensu_handler.py b/plugins/modules/monitoring/sensu/sensu_handler.py index 0a56831ae0..6511479899 100644 --- a/plugins/modules/monitoring/sensu/sensu_handler.py +++ b/plugins/modules/monitoring/sensu/sensu_handler.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # (c) 2017, Red Hat Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/dnsimple.py b/plugins/modules/net_tools/dnsimple.py index a575d944cb..188f9fd64a 100644 --- a/plugins/modules/net_tools/dnsimple.py +++ b/plugins/modules/net_tools/dnsimple.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright: Ansible Project # diff --git a/plugins/modules/net_tools/ip_netns.py b/plugins/modules/net_tools/ip_netns.py index 9854709e82..700f0a17bd 100644 --- a/plugins/modules/net_tools/ip_netns.py +++ b/plugins/modules/net_tools/ip_netns.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # (c) 2017, Arie Bregman # # This file is a module for Ansible that interacts with Network Manager diff --git a/plugins/modules/net_tools/ipinfoio_facts.py b/plugins/modules/net_tools/ipinfoio_facts.py index f4186cdc65..ee1d49f3ac 100644 --- a/plugins/modules/net_tools/ipinfoio_facts.py +++ b/plugins/modules/net_tools/ipinfoio_facts.py @@ -1,5 +1,5 @@ #!/usr/bin/python -# -*- coding: UTF-8 -*- +# -*- coding: utf-8 -*- # Copyright: (c) 2016, Aleksei Kostiuk # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/ipwcli_dns.py b/plugins/modules/net_tools/ipwcli_dns.py index 284f3ad810..8a6122edff 100644 --- a/plugins/modules/net_tools/ipwcli_dns.py +++ b/plugins/modules/net_tools/ipwcli_dns.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2020, Christian Wollinger # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/lldp.py b/plugins/modules/net_tools/lldp.py index ae86db4088..1b8fa9eb06 100644 --- a/plugins/modules/net_tools/lldp.py +++ b/plugins/modules/net_tools/lldp.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/nios/nios_a_record.py b/plugins/modules/net_tools/nios/nios_a_record.py index b4adfe0103..cc2e70b920 100644 --- a/plugins/modules/net_tools/nios/nios_a_record.py +++ b/plugins/modules/net_tools/nios/nios_a_record.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2018 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/nios/nios_aaaa_record.py b/plugins/modules/net_tools/nios/nios_aaaa_record.py index 9b22f86948..b6e5ff5fd6 100644 --- a/plugins/modules/net_tools/nios/nios_aaaa_record.py +++ b/plugins/modules/net_tools/nios/nios_aaaa_record.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2018 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/nios/nios_cname_record.py b/plugins/modules/net_tools/nios/nios_cname_record.py index 099cb02572..c752713663 100644 --- a/plugins/modules/net_tools/nios/nios_cname_record.py +++ b/plugins/modules/net_tools/nios/nios_cname_record.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2018 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/nios/nios_dns_view.py b/plugins/modules/net_tools/nios/nios_dns_view.py index 46c56fc7bb..a3bd9db938 100644 --- a/plugins/modules/net_tools/nios/nios_dns_view.py +++ b/plugins/modules/net_tools/nios/nios_dns_view.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2018 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/nios/nios_fixed_address.py b/plugins/modules/net_tools/nios/nios_fixed_address.py index bc2969bbe5..26e3ed7d68 100644 --- a/plugins/modules/net_tools/nios/nios_fixed_address.py +++ b/plugins/modules/net_tools/nios/nios_fixed_address.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2018 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/nios/nios_host_record.py b/plugins/modules/net_tools/nios/nios_host_record.py index 6fed663657..825ff31765 100644 --- a/plugins/modules/net_tools/nios/nios_host_record.py +++ b/plugins/modules/net_tools/nios/nios_host_record.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2018 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/nios/nios_member.py b/plugins/modules/net_tools/nios/nios_member.py index 186933864a..ff9bd5dfa5 100644 --- a/plugins/modules/net_tools/nios/nios_member.py +++ b/plugins/modules/net_tools/nios/nios_member.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2018 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/nios/nios_mx_record.py b/plugins/modules/net_tools/nios/nios_mx_record.py index 6e54ff2bda..a34a1fdc78 100644 --- a/plugins/modules/net_tools/nios/nios_mx_record.py +++ b/plugins/modules/net_tools/nios/nios_mx_record.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2018 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/nios/nios_naptr_record.py b/plugins/modules/net_tools/nios/nios_naptr_record.py index f943d3d6d9..e2e5e164d7 100644 --- a/plugins/modules/net_tools/nios/nios_naptr_record.py +++ b/plugins/modules/net_tools/nios/nios_naptr_record.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2018 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/nios/nios_network.py b/plugins/modules/net_tools/nios/nios_network.py index 6a7decb894..458e45dd8d 100644 --- a/plugins/modules/net_tools/nios/nios_network.py +++ b/plugins/modules/net_tools/nios/nios_network.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2018 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/nios/nios_network_view.py b/plugins/modules/net_tools/nios/nios_network_view.py index a27f8519a0..f4a18bcd26 100644 --- a/plugins/modules/net_tools/nios/nios_network_view.py +++ b/plugins/modules/net_tools/nios/nios_network_view.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2018 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/nios/nios_ptr_record.py b/plugins/modules/net_tools/nios/nios_ptr_record.py index 22550f129a..a0c3e63270 100644 --- a/plugins/modules/net_tools/nios/nios_ptr_record.py +++ b/plugins/modules/net_tools/nios/nios_ptr_record.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2018 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/nios/nios_srv_record.py b/plugins/modules/net_tools/nios/nios_srv_record.py index 574a5fcf8b..9c0247d49d 100644 --- a/plugins/modules/net_tools/nios/nios_srv_record.py +++ b/plugins/modules/net_tools/nios/nios_srv_record.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2018 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/nios/nios_txt_record.py b/plugins/modules/net_tools/nios/nios_txt_record.py index b3267af41f..6cb1d64d35 100644 --- a/plugins/modules/net_tools/nios/nios_txt_record.py +++ b/plugins/modules/net_tools/nios/nios_txt_record.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2018 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/nios/nios_zone.py b/plugins/modules/net_tools/nios/nios_zone.py index f97098351b..463c68c8ac 100644 --- a/plugins/modules/net_tools/nios/nios_zone.py +++ b/plugins/modules/net_tools/nios/nios_zone.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2018 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/nsupdate.py b/plugins/modules/net_tools/nsupdate.py index 520d12e803..fc0d5e1c46 100644 --- a/plugins/modules/net_tools/nsupdate.py +++ b/plugins/modules/net_tools/nsupdate.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # (c) 2016, Marcin Skarbek # (c) 2016, Andreas Olsson diff --git a/plugins/modules/notification/syslogger.py b/plugins/modules/notification/syslogger.py index 226126f5a9..7627f35985 100644 --- a/plugins/modules/notification/syslogger.py +++ b/plugins/modules/notification/syslogger.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2017, Tim Rightnour # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/packaging/language/pip_package_info.py b/plugins/modules/packaging/language/pip_package_info.py index cdcc9f51cc..25825cefb1 100644 --- a/plugins/modules/packaging/language/pip_package_info.py +++ b/plugins/modules/packaging/language/pip_package_info.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/packaging/os/redhat_subscription.py b/plugins/modules/packaging/os/redhat_subscription.py index f3e5400900..7bb540b3f1 100644 --- a/plugins/modules/packaging/os/redhat_subscription.py +++ b/plugins/modules/packaging/os/redhat_subscription.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # James Laska (jlaska@redhat.com) # diff --git a/plugins/modules/packaging/os/rhn_channel.py b/plugins/modules/packaging/os/rhn_channel.py index f1954037fa..e3a1ae3098 100644 --- a/plugins/modules/packaging/os/rhn_channel.py +++ b/plugins/modules/packaging/os/rhn_channel.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) Vincent Van de Kussen # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/packaging/os/rhsm_release.py b/plugins/modules/packaging/os/rhsm_release.py index a4d8f71197..4b76cee274 100644 --- a/plugins/modules/packaging/os/rhsm_release.py +++ b/plugins/modules/packaging/os/rhsm_release.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # (c) 2018, Sean Myers # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/packaging/os/rhsm_repository.py b/plugins/modules/packaging/os/rhsm_repository.py index 7317be6633..b103ea621a 100644 --- a/plugins/modules/packaging/os/rhsm_repository.py +++ b/plugins/modules/packaging/os/rhsm_repository.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2017, Giovanni Sciortino (@giovannisciortino) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/packaging/os/rpm_ostree_pkg.py b/plugins/modules/packaging/os/rpm_ostree_pkg.py index 7c430732e7..38e2486ddc 100644 --- a/plugins/modules/packaging/os/rpm_ostree_pkg.py +++ b/plugins/modules/packaging/os/rpm_ostree_pkg.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Dusty Mabe # Copyright: (c) 2018, Ansible Project # Copyright: (c) 2021, Abhijeet Kasurde diff --git a/plugins/modules/packaging/os/swupd.py b/plugins/modules/packaging/os/swupd.py index 4dac01be64..6ededcad02 100644 --- a/plugins/modules/packaging/os/swupd.py +++ b/plugins/modules/packaging/os/swupd.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # (c) 2017, Alberto Murillo # diff --git a/plugins/modules/packaging/os/zypper_repository.py b/plugins/modules/packaging/os/zypper_repository.py index 608675528d..38aeab618e 100644 --- a/plugins/modules/packaging/os/zypper_repository.py +++ b/plugins/modules/packaging/os/zypper_repository.py @@ -1,5 +1,5 @@ #!/usr/bin/python -# encoding: utf-8 +# -*- coding: utf-8 -*- # (c) 2013, Matthias Vogelgesang # (c) 2014, Justin Lecher diff --git a/plugins/modules/remote_management/lxca/lxca_cmms.py b/plugins/modules/remote_management/lxca/lxca_cmms.py index 776ee49fd4..b3bb6c2a8c 100644 --- a/plugins/modules/remote_management/lxca/lxca_cmms.py +++ b/plugins/modules/remote_management/lxca/lxca_cmms.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # GNU General Public License v3.0+ (see COPYING or # https://www.gnu.org/licenses/gpl-3.0.txt) # diff --git a/plugins/modules/remote_management/lxca/lxca_nodes.py b/plugins/modules/remote_management/lxca/lxca_nodes.py index f788229d3d..62b8e334d8 100644 --- a/plugins/modules/remote_management/lxca/lxca_nodes.py +++ b/plugins/modules/remote_management/lxca/lxca_nodes.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # GNU General Public License v3.0+ (see COPYING or # https://www.gnu.org/licenses/gpl-3.0.txt) # diff --git a/plugins/modules/remote_management/manageiq/manageiq_group.py b/plugins/modules/remote_management/manageiq/manageiq_group.py index 2050eb63c8..2452e101d1 100644 --- a/plugins/modules/remote_management/manageiq/manageiq_group.py +++ b/plugins/modules/remote_management/manageiq/manageiq_group.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # (c) 2018, Evert Mulder (base on manageiq_user.py by Daniel Korn ) # diff --git a/plugins/modules/remote_management/manageiq/manageiq_tenant.py b/plugins/modules/remote_management/manageiq/manageiq_tenant.py index 3ec174cfa0..58c2e1ed71 100644 --- a/plugins/modules/remote_management/manageiq/manageiq_tenant.py +++ b/plugins/modules/remote_management/manageiq/manageiq_tenant.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # (c) 2018, Evert Mulder (base on manageiq_user.py by Daniel Korn ) # diff --git a/plugins/modules/remote_management/manageiq/manageiq_user.py b/plugins/modules/remote_management/manageiq/manageiq_user.py index 8905dde2e6..f3dc8103f7 100644 --- a/plugins/modules/remote_management/manageiq/manageiq_user.py +++ b/plugins/modules/remote_management/manageiq/manageiq_user.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # (c) 2017, Daniel Korn # diff --git a/plugins/modules/remote_management/oneview/oneview_datacenter_info.py b/plugins/modules/remote_management/oneview/oneview_datacenter_info.py index 04d4fc0c7e..3e5b96376e 100644 --- a/plugins/modules/remote_management/oneview/oneview_datacenter_info.py +++ b/plugins/modules/remote_management/oneview/oneview_datacenter_info.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/remote_management/oneview/oneview_enclosure_info.py b/plugins/modules/remote_management/oneview/oneview_enclosure_info.py index a9bbb8e799..249fea4874 100644 --- a/plugins/modules/remote_management/oneview/oneview_enclosure_info.py +++ b/plugins/modules/remote_management/oneview/oneview_enclosure_info.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/remote_management/oneview/oneview_ethernet_network.py b/plugins/modules/remote_management/oneview/oneview_ethernet_network.py index c09f09c8f6..99b5d0fed9 100644 --- a/plugins/modules/remote_management/oneview/oneview_ethernet_network.py +++ b/plugins/modules/remote_management/oneview/oneview_ethernet_network.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py b/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py index 63a9e1efae..1f25364d3a 100644 --- a/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py +++ b/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/remote_management/oneview/oneview_fc_network.py b/plugins/modules/remote_management/oneview/oneview_fc_network.py index 009a54a89b..59984ee8b6 100644 --- a/plugins/modules/remote_management/oneview/oneview_fc_network.py +++ b/plugins/modules/remote_management/oneview/oneview_fc_network.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/remote_management/oneview/oneview_fc_network_info.py b/plugins/modules/remote_management/oneview/oneview_fc_network_info.py index 86430402fe..4707f39f2d 100644 --- a/plugins/modules/remote_management/oneview/oneview_fc_network_info.py +++ b/plugins/modules/remote_management/oneview/oneview_fc_network_info.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/remote_management/oneview/oneview_fcoe_network.py b/plugins/modules/remote_management/oneview/oneview_fcoe_network.py index 30e05677f8..ef24f8fc8e 100644 --- a/plugins/modules/remote_management/oneview/oneview_fcoe_network.py +++ b/plugins/modules/remote_management/oneview/oneview_fcoe_network.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py b/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py index b0ede13820..6cb3501ddf 100644 --- a/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py +++ b/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py b/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py index 78735dc5e7..e833f9e092 100644 --- a/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py +++ b/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py b/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py index e8670a33a8..7a0f0dc987 100644 --- a/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py +++ b/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/remote_management/oneview/oneview_network_set.py b/plugins/modules/remote_management/oneview/oneview_network_set.py index 14efdabe70..3a2632b765 100644 --- a/plugins/modules/remote_management/oneview/oneview_network_set.py +++ b/plugins/modules/remote_management/oneview/oneview_network_set.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/remote_management/oneview/oneview_network_set_info.py b/plugins/modules/remote_management/oneview/oneview_network_set_info.py index 5cb7463b4c..595d003c56 100644 --- a/plugins/modules/remote_management/oneview/oneview_network_set_info.py +++ b/plugins/modules/remote_management/oneview/oneview_network_set_info.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/remote_management/oneview/oneview_san_manager.py b/plugins/modules/remote_management/oneview/oneview_san_manager.py index 858072826b..20870a31d5 100644 --- a/plugins/modules/remote_management/oneview/oneview_san_manager.py +++ b/plugins/modules/remote_management/oneview/oneview_san_manager.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/remote_management/oneview/oneview_san_manager_info.py b/plugins/modules/remote_management/oneview/oneview_san_manager_info.py index c80ef474cc..46ed001827 100644 --- a/plugins/modules/remote_management/oneview/oneview_san_manager_info.py +++ b/plugins/modules/remote_management/oneview/oneview_san_manager_info.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/source_control/github/github_issue.py b/plugins/modules/source_control/github/github_issue.py index 88fe8f7b51..4add29f341 100644 --- a/plugins/modules/source_control/github/github_issue.py +++ b/plugins/modules/source_control/github/github_issue.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2017-18, Abhijeet Kasurde # diff --git a/plugins/modules/source_control/github/github_key.py b/plugins/modules/source_control/github/github_key.py index 616636edea..2afbe29aa1 100644 --- a/plugins/modules/source_control/github/github_key.py +++ b/plugins/modules/source_control/github/github_key.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/source_control/github/github_webhook.py b/plugins/modules/source_control/github/github_webhook.py index b1f0cb7a2b..8703863fa9 100644 --- a/plugins/modules/source_control/github/github_webhook.py +++ b/plugins/modules/source_control/github/github_webhook.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/source_control/github/github_webhook_info.py b/plugins/modules/source_control/github/github_webhook_info.py index 3936cbe37b..98a7516e75 100644 --- a/plugins/modules/source_control/github/github_webhook_info.py +++ b/plugins/modules/source_control/github/github_webhook_info.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/storage/emc/emc_vnx_sg_member.py b/plugins/modules/storage/emc/emc_vnx_sg_member.py index 2698f5327a..20977687fc 100644 --- a/plugins/modules/storage/emc/emc_vnx_sg_member.py +++ b/plugins/modules/storage/emc/emc_vnx_sg_member.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright (c) 2018, Luca 'remix_tj' Lorenzetto # diff --git a/plugins/modules/storage/hpe3par/ss_3par_cpg.py b/plugins/modules/storage/hpe3par/ss_3par_cpg.py index 04604c0966..be4a6a02a2 100644 --- a/plugins/modules/storage/hpe3par/ss_3par_cpg.py +++ b/plugins/modules/storage/hpe3par/ss_3par_cpg.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/system/kernel_blacklist.py b/plugins/modules/system/kernel_blacklist.py index ff6f9c227e..d8cb4a9e9d 100644 --- a/plugins/modules/system/kernel_blacklist.py +++ b/plugins/modules/system/kernel_blacklist.py @@ -1,5 +1,5 @@ #!/usr/bin/python -# encoding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright: (c) 2013, Matthias Vogelgesang # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/system/lbu.py b/plugins/modules/system/lbu.py index 6f850791b1..fcc3a0d940 100644 --- a/plugins/modules/system/lbu.py +++ b/plugins/modules/system/lbu.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2019, Kaarle Ritvanen # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/system/pids.py b/plugins/modules/system/pids.py index e7312465f1..5c7b82a794 100644 --- a/plugins/modules/system/pids.py +++ b/plugins/modules/system/pids.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2019, Saranya Sridharan # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) diff --git a/plugins/modules/system/python_requirements_info.py b/plugins/modules/system/python_requirements_info.py index 08a9ddd64e..081826f4e6 100644 --- a/plugins/modules/system/python_requirements_info.py +++ b/plugins/modules/system/python_requirements_info.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2018 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/system/selogin.py b/plugins/modules/system/selogin.py index 53b077f954..46daf1a76a 100644 --- a/plugins/modules/system/selogin.py +++ b/plugins/modules/system/selogin.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # (c) 2017, Petr Lautrbach # Based on seport.py module (c) 2014, Dan Keder diff --git a/plugins/modules/system/syspatch.py b/plugins/modules/system/syspatch.py index 6fcfaea0f5..42cb17b8a3 100644 --- a/plugins/modules/system/syspatch.py +++ b/plugins/modules/system/syspatch.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2019-2020, Andrew Klaus # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/system/sysupgrade.py b/plugins/modules/system/sysupgrade.py index a1956129df..333d7765d2 100644 --- a/plugins/modules/system/sysupgrade.py +++ b/plugins/modules/system/sysupgrade.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2020, Andrew Klaus # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/system/vdo.py b/plugins/modules/system/vdo.py index a27745510a..0b4fca306d 100644 --- a/plugins/modules/system/vdo.py +++ b/plugins/modules/system/vdo.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/jenkins_build.py b/plugins/modules/web_infrastructure/jenkins_build.py index 43dc667ace..0141185342 100644 --- a/plugins/modules/web_infrastructure/jenkins_build.py +++ b/plugins/modules/web_infrastructure/jenkins_build.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright: (c) Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/jenkins_job.py b/plugins/modules/web_infrastructure/jenkins_job.py index 9993a996e0..88a8766133 100644 --- a/plugins/modules/web_infrastructure/jenkins_job.py +++ b/plugins/modules/web_infrastructure/jenkins_job.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright: (c) Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/jenkins_job_info.py b/plugins/modules/web_infrastructure/jenkins_job_info.py index fc079857a6..503fbbf159 100644 --- a/plugins/modules/web_infrastructure/jenkins_job_info.py +++ b/plugins/modules/web_infrastructure/jenkins_job_info.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright: (c) Ansible Project # diff --git a/plugins/modules/web_infrastructure/jenkins_plugin.py b/plugins/modules/web_infrastructure/jenkins_plugin.py index a280b50aa6..6adb348156 100644 --- a/plugins/modules/web_infrastructure/jenkins_plugin.py +++ b/plugins/modules/web_infrastructure/jenkins_plugin.py @@ -1,5 +1,5 @@ #!/usr/bin/python -# encoding: utf-8 +# -*- coding: utf-8 -*- # (c) 2016, Jiri Tyr # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/jenkins_script.py b/plugins/modules/web_infrastructure/jenkins_script.py index 6d3b3d2253..3ad51a9703 100644 --- a/plugins/modules/web_infrastructure/jenkins_script.py +++ b/plugins/modules/web_infrastructure/jenkins_script.py @@ -1,6 +1,5 @@ #!/usr/bin/python - -# encoding: utf-8 +# -*- coding: utf-8 -*- # (c) 2016, James Hogarth # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group.py b/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group.py index 70a0a78fd8..e2fa6f5384 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Johannes Brunswicker # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py index d5660ab73c..ca291ba88b 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Johannes Brunswicker # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py b/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py index 81dffe223b..f05a1e6809 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Stephan Schwarz # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py index 9aa16d4aca..82eb42f620 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Stephan Schwarz # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_dns_host.py b/plugins/modules/web_infrastructure/sophos_utm/utm_dns_host.py index 76d463ccba..4554384d2d 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_dns_host.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_dns_host.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Johannes Brunswicker # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address.py b/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address.py index a8b3cc1f2b..a5c2d1fd36 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Juergen Wiebe # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py index 700799ab59..fb449939fa 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Juergen Wiebe # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py index 0dd460509a..e519d3cf33 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Stephan Schwarz # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_exception.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_exception.py index 6d606abf89..780bd68c92 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_exception.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_exception.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Sebastian Schenzel # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend.py index a738bfab6b..9d2bc7c6db 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Johannes Brunswicker # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py index 62a832d7c6..b68bde633a 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Johannes Brunswicker # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location.py index 99d56030be..4c0abb0608 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Johannes Brunswicker # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py index 99174a89b1..eda9f6ee14 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Johannes Brunswicker # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) From 87ba15fa4589fab466231f68d5e0122ca8d312d3 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 8 Aug 2021 10:49:08 +0200 Subject: [PATCH 0256/2828] Inform contributors on changelog fragments in CONTRIBUTING.md (#3167) * Inform contributors on changelog fragments. * Mention docs-only PRs as well. --- CONTRIBUTING.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ba30ed1e02..a40dbd59eb 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -26,6 +26,7 @@ Also, consider taking up a valuable, reviewed, but abandoned pull request which * All commits of a pull request branch will be squashed into one commit at last. That does not mean you must have only one commit on your pull request, though! * Please try not to force-push if it is not needed, so reviewers and other users looking at your pull request later can see the pull request commit history. * Do not add merge commits to your PR. The bot will complain and you will have to rebase ([instructions for rebasing](https://docs.ansible.com/ansible/latest/dev_guide/developing_rebasing.html)) to remove them before your PR can be merged. To avoid that git automatically does merges during pulls, you can configure it to do rebases instead by running `git config pull.rebase true` inside the respository checkout. +* Make sure your PR includes a [changelog fragment](https://docs.ansible.com/ansible/devel/community/development_process.html#changelogs-how-to). (You must not include a fragment for new modules or new plugins, except for test and filter plugins. Also you shouldn't include one for docs-only changes. If you're not sure, simply don't include one, we'll tell you whether one is needed or not :) ) You can also read [our Quick-start development guide](https://github.com/ansible/community-docs/blob/main/create_pr_quick_start_guide.rst). From 85bcef3f5ac418d3244f144f3172e174ab3ba609 Mon Sep 17 00:00:00 2001 From: Sebastian Date: Sun, 8 Aug 2021 10:50:09 +0200 Subject: [PATCH 0257/2828] contributing: make expected behavior clearer (#3168) * contributing: make expected behavior clearer reformulate the preference of not having squashed commits clearer, shorter and more precise. https://github.com/ansible-collections/community.general/pull/3164#discussion_r684644504 * Update CONTRIBUTING.md Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- CONTRIBUTING.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a40dbd59eb..9df277591c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -23,8 +23,7 @@ Note that reviewing does not only mean code review, but also offering comments o Also, consider taking up a valuable, reviewed, but abandoned pull request which you could politely ask the original authors to complete yourself. * Try committing your changes with an informative but short commit message. -* All commits of a pull request branch will be squashed into one commit at last. That does not mean you must have only one commit on your pull request, though! -* Please try not to force-push if it is not needed, so reviewers and other users looking at your pull request later can see the pull request commit history. +* Do not squash your commits and force-push to your branch if not needed. Reviews of your pull request are much easier with individual commits to comprehend the pull request history. All commits of your pull request branch will be squashed into one commit by GitHub upon merge. * Do not add merge commits to your PR. The bot will complain and you will have to rebase ([instructions for rebasing](https://docs.ansible.com/ansible/latest/dev_guide/developing_rebasing.html)) to remove them before your PR can be merged. To avoid that git automatically does merges during pulls, you can configure it to do rebases instead by running `git config pull.rebase true` inside the respository checkout. * Make sure your PR includes a [changelog fragment](https://docs.ansible.com/ansible/devel/community/development_process.html#changelogs-how-to). (You must not include a fragment for new modules or new plugins, except for test and filter plugins. Also you shouldn't include one for docs-only changes. If you're not sure, simply don't include one, we'll tell you whether one is needed or not :) ) From 2831bc45f5f579db64e6252f9c8c88ac48b1cb91 Mon Sep 17 00:00:00 2001 From: quidame Date: Sun, 8 Aug 2021 18:34:34 +0200 Subject: [PATCH 0258/2828] ini_file: fix empty-value vs. no-value inconsistency (#3074) * fix empty-value vs. no-value inconsistency * rename changelog fragment * tests: omit value where there should be no value * add integration tests --- ...ni_file-3031-empty-value-inconsistency.yml | 4 ++++ plugins/modules/files/ini_file.py | 8 +++---- .../targets/ini_file/tasks/main.yml | 23 ++++++++++++++----- 3 files changed, 25 insertions(+), 10 deletions(-) create mode 100644 changelogs/fragments/3074-ini_file-3031-empty-value-inconsistency.yml diff --git a/changelogs/fragments/3074-ini_file-3031-empty-value-inconsistency.yml b/changelogs/fragments/3074-ini_file-3031-empty-value-inconsistency.yml new file mode 100644 index 0000000000..7bfe958a12 --- /dev/null +++ b/changelogs/fragments/3074-ini_file-3031-empty-value-inconsistency.yml @@ -0,0 +1,4 @@ +--- +bugfixes: + - ini_file - fix inconsistency between empty value and no value + (https://github.com/ansible-collections/community.general/issues/3031). diff --git a/plugins/modules/files/ini_file.py b/plugins/modules/files/ini_file.py index 7d6a988e85..a9c2e290b0 100644 --- a/plugins/modules/files/ini_file.py +++ b/plugins/modules/files/ini_file.py @@ -205,11 +205,11 @@ def do_ini(module, filename, section=None, option=None, value=None, for i in range(index, 0, -1): # search backwards for previous non-blank or non-comment line if not non_blank_non_comment_pattern.match(ini_lines[i - 1]): - if option and value: + if option and value is not None: ini_lines.insert(i, assignment_format % (option, value)) msg = 'option added' changed = True - elif option and not value and allow_no_value: + elif option and value is None and allow_no_value: ini_lines.insert(i, '%s\n' % option) msg = 'option added' changed = True @@ -225,7 +225,7 @@ def do_ini(module, filename, section=None, option=None, value=None, if state == 'present': # change the existing option line if match_opt(option, line): - if not value and allow_no_value: + if value is None and allow_no_value: newline = u'%s\n' % option else: newline = assignment_format % (option, value) @@ -324,7 +324,7 @@ def main(): create = module.params['create'] if state == 'present' and not allow_no_value and value is None: - module.fail_json("Parameter 'value' must not be empty if state=present and allow_no_value=False") + module.fail_json("Parameter 'value' must be defined if state=present and allow_no_value=False") (changed, backup_file, diff, msg) = do_ini(module, path, section, option, value, state, backup, no_extra_spaces, create, allow_no_value) diff --git a/tests/integration/targets/ini_file/tasks/main.yml b/tests/integration/targets/ini_file/tasks/main.yml index 210dafe2ca..96c6771b9e 100644 --- a/tests/integration/targets/ini_file/tasks/main.yml +++ b/tests/integration/targets/ini_file/tasks/main.yml @@ -215,10 +215,10 @@ path: "{{ output_file }}" section: mysqld option: "{{ item.o }}" - value: "{{ item.v }}" + value: "{{ item.v | d(omit) }}" allow_no_value: yes with_items: - - { o: "skip-name-resolve", v: null } + - { o: "skip-name-resolve" } - { o: "max_connections", v: "500" } - name: read content from output file @@ -459,12 +459,23 @@ option: like value: tea state: absent - - name: Test with empty string + + # See https://github.com/ansible-collections/community.general/issues/3031 + - name: Tests with empty strings ini_file: path: "{{ output_file }}" - section: extensions - option: evolve + section: "{{ item.section | d('extensions') }}" + option: "{{ item.option }}" value: "" + allow_no_value: "{{ item.no_value | d(omit) }}" + loop: + - option: evolve + - option: regress + - section: foobar + option: foo + no_value: true + - option: improve + no_value: true - name: read content from output file slurp: @@ -473,7 +484,7 @@ - name: set expected content and get current ini file content set_fact: - expected15: "\n[extensions]\nevolve = \n" + expected15: "\n[extensions]\nevolve = \nregress = \nimprove = \n[foobar]\nfoo = \n" content15: "{{ output_content.content | b64decode }}" - debug: var=content15 - name: Verify content of ini file is as expected From 7f96b7df60bad4bd85c787401d521717c842da23 Mon Sep 17 00:00:00 2001 From: David Hummel <6109326+hummeltech@users.noreply.github.com> Date: Sun, 8 Aug 2021 09:35:52 -0700 Subject: [PATCH 0259/2828] nmcli: writing secrets to command line is a security hole (#3160) * nmcli: use `stdin` for setting private `wifi_sec` options I.E.: * `802-11-wireless-security.leap-password` * `802-11-wireless-security.psk` * `802-11-wireless-security.wep-key0` * `802-11-wireless-security.wep-key1` * `802-11-wireless-security.wep-key2` * `802-11-wireless-security.wep-key3` * Changelog fragement formatting. * Update changelogs/fragments/3160-pass-wifi-secrets-via-stdin-to-nmcli-module.yml Co-authored-by: Felix Fontein * Make `wifi_sec_secret_options()` into a constant * Minor cleanup `'set ' + key + ' ' + value` => `'set %s %s' % (key, value)` * Change `casing` * Change `WIFI_SEC_SECRET_OPTIONS` from `list` to `tuple` * Update `edit_connection()` to not reset `edit_commands` It will just re`set` them if `edit_connection()` is called more than once. * Do not call `edit_connection()` if `connection_update(*)` fails * Fixed `pep8` issue `E713` in tests `test for membership should be 'not in'` * Simplify `create_connection()`/`modify_connection()` logic * `WIFI_SEC_SECRET_OPTIONS`=>`SECRET_OPTIONS`, options are prefixed * Moved `if key in self.SECRET_OPTIONS` into `if value is not None` check We don't need to do anything is the value is None Co-authored-by: Felix Fontein --- ...wifi-secrets-via-stdin-to-nmcli-module.yml | 4 + plugins/modules/net_tools/nmcli.py | 26 +++- .../plugins/modules/net_tools/test_nmcli.py | 139 +++++++++++++++++- 3 files changed, 166 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/3160-pass-wifi-secrets-via-stdin-to-nmcli-module.yml diff --git a/changelogs/fragments/3160-pass-wifi-secrets-via-stdin-to-nmcli-module.yml b/changelogs/fragments/3160-pass-wifi-secrets-via-stdin-to-nmcli-module.yml new file mode 100644 index 0000000000..47e1837a0b --- /dev/null +++ b/changelogs/fragments/3160-pass-wifi-secrets-via-stdin-to-nmcli-module.yml @@ -0,0 +1,4 @@ +security_fixes: + - nmcli - do not pass WiFi secrets on the ``nmcli`` command line. Use ``nmcli con edit`` + instead and pass secrets as ``stdin`` + (https://github.com/ansible-collections/community.general/issues/3145). diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index 92d1e65ef7..06b868dace 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -709,6 +709,15 @@ class Nmcli(object): platform = 'Generic' distribution = None + SECRET_OPTIONS = ( + '802-11-wireless-security.leap-password', + '802-11-wireless-security.psk', + '802-11-wireless-security.wep-key0', + '802-11-wireless-security.wep-key1', + '802-11-wireless-security.wep-key2', + '802-11-wireless-security.wep-key3' + ) + def __init__(self, module): self.module = module self.state = module.params['state'] @@ -792,6 +801,8 @@ class Nmcli(object): else: self.ipv6_method = None + self.edit_commands = [] + def execute_command(self, cmd, use_unsafe_shell=False, data=None): if isinstance(cmd, list): cmd = [to_text(item) for item in cmd] @@ -1079,12 +1090,17 @@ class Nmcli(object): # Constructing the command. for key, value in options.items(): if value is not None: + if key in self.SECRET_OPTIONS: + self.edit_commands += ['set %s %s' % (key, value)] + continue cmd.extend([key, value]) return self.execute_command(cmd) def create_connection(self): status = self.connection_update('create') + if status[0] == 0 and self.edit_commands: + status = self.edit_connection() if self.create_connection_up: status = self.up_connection() return status @@ -1105,7 +1121,15 @@ class Nmcli(object): return self.execute_command(cmd) def modify_connection(self): - return self.connection_update('modify') + status = self.connection_update('modify') + if status[0] == 0 and self.edit_commands: + status = self.edit_connection() + return status + + def edit_connection(self): + data = "\n".join(self.edit_commands + ['save', 'quit']) + cmd = [self.nmcli_bin, 'con', 'edit', self.conn_name] + return self.execute_command(cmd, data=data) def show_connection(self): cmd = [self.nmcli_bin, '--show-secrets', 'con', 'show', self.conn_name] diff --git a/tests/unit/plugins/modules/net_tools/test_nmcli.py b/tests/unit/plugins/modules/net_tools/test_nmcli.py index c1b3e93ed4..9f131c3873 100644 --- a/tests/unit/plugins/modules/net_tools/test_nmcli.py +++ b/tests/unit/plugins/modules/net_tools/test_nmcli.py @@ -697,6 +697,23 @@ def mocked_ethernet_connection_dhcp_to_static(mocker): )) +@pytest.fixture +def mocked_secure_wireless_create_failure(mocker): + mocker_set(mocker, + execute_return=(1, "", "")) + + +@pytest.fixture +def mocked_secure_wireless_modify_failure(mocker): + mocker_set(mocker, + connection_exists=True, + execute_return=None, + execute_side_effect=( + (0, "", ""), + (1, "", ""), + )) + + @pytest.fixture def mocked_dummy_connection_static_unchanged(mocker): mocker_set(mocker, @@ -1652,6 +1669,52 @@ def test_create_secure_wireless(mocked_generic_connection_create, capfd): Test : Create secure wireless connection """ + with pytest.raises(SystemExit): + nmcli.main() + + assert nmcli.Nmcli.execute_command.call_count == 2 + arg_list = nmcli.Nmcli.execute_command.call_args_list + add_args, add_kw = arg_list[0] + + assert add_args[0][0] == '/usr/bin/nmcli' + assert add_args[0][1] == 'con' + assert add_args[0][2] == 'add' + assert add_args[0][3] == 'type' + assert add_args[0][4] == 'wifi' + assert add_args[0][5] == 'con-name' + assert add_args[0][6] == 'non_existent_nw_device' + + add_args_text = list(map(to_text, add_args[0])) + for param in ['connection.interface-name', 'wireless_non_existant', + 'ipv4.addresses', '10.10.10.10/24', + '802-11-wireless.ssid', 'Brittany', + '802-11-wireless-security.key-mgmt', 'wpa-psk']: + assert param in add_args_text + + edit_args, edit_kw = arg_list[1] + assert edit_args[0][0] == '/usr/bin/nmcli' + assert edit_args[0][1] == 'con' + assert edit_args[0][2] == 'edit' + assert edit_args[0][3] == 'non_existent_nw_device' + + edit_kw_data = edit_kw['data'].split() + for param in ['802-11-wireless-security.psk', 'VERY_SECURE_PASSWORD', + 'save', + 'quit']: + assert param in edit_kw_data + + out, err = capfd.readouterr() + results = json.loads(out) + assert not results.get('failed') + assert results['changed'] + + +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_SECURE_WIRELESS, indirect=['patch_ansible_module']) +def test_create_secure_wireless_failure(mocked_secure_wireless_create_failure, capfd): + """ + Test : Create secure wireless connection w/failure + """ + with pytest.raises(SystemExit): nmcli.main() @@ -1671,16 +1734,88 @@ def test_create_secure_wireless(mocked_generic_connection_create, capfd): for param in ['connection.interface-name', 'wireless_non_existant', 'ipv4.addresses', '10.10.10.10/24', '802-11-wireless.ssid', 'Brittany', - '802-11-wireless-security.key-mgmt', 'wpa-psk', - '802-11-wireless-security.psk', 'VERY_SECURE_PASSWORD']: + '802-11-wireless-security.key-mgmt', 'wpa-psk']: assert param in add_args_text + out, err = capfd.readouterr() + results = json.loads(out) + assert results.get('failed') + assert 'changed' not in results + + +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_SECURE_WIRELESS, indirect=['patch_ansible_module']) +def test_modify_secure_wireless(mocked_generic_connection_modify, capfd): + """ + Test : Modify secure wireless connection + """ + + with pytest.raises(SystemExit): + nmcli.main() + assert nmcli.Nmcli.execute_command.call_count == 2 + arg_list = nmcli.Nmcli.execute_command.call_args_list + add_args, add_kw = arg_list[0] + + assert add_args[0][0] == '/usr/bin/nmcli' + assert add_args[0][1] == 'con' + assert add_args[0][2] == 'modify' + assert add_args[0][3] == 'non_existent_nw_device' + + add_args_text = list(map(to_text, add_args[0])) + for param in ['connection.interface-name', 'wireless_non_existant', + 'ipv4.addresses', '10.10.10.10/24', + '802-11-wireless.ssid', 'Brittany', + '802-11-wireless-security.key-mgmt', 'wpa-psk']: + assert param in add_args_text + + edit_args, edit_kw = arg_list[1] + assert edit_args[0][0] == '/usr/bin/nmcli' + assert edit_args[0][1] == 'con' + assert edit_args[0][2] == 'edit' + assert edit_args[0][3] == 'non_existent_nw_device' + + edit_kw_data = edit_kw['data'].split() + for param in ['802-11-wireless-security.psk', 'VERY_SECURE_PASSWORD', + 'save', + 'quit']: + assert param in edit_kw_data + out, err = capfd.readouterr() results = json.loads(out) assert not results.get('failed') assert results['changed'] +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_SECURE_WIRELESS, indirect=['patch_ansible_module']) +def test_modify_secure_wireless_failure(mocked_secure_wireless_modify_failure, capfd): + """ + Test : Modify secure wireless connection w/failure + """ + + with pytest.raises(SystemExit): + nmcli.main() + + assert nmcli.Nmcli.execute_command.call_count == 2 + arg_list = nmcli.Nmcli.execute_command.call_args_list + add_args, add_kw = arg_list[1] + + assert add_args[0][0] == '/usr/bin/nmcli' + assert add_args[0][1] == 'con' + assert add_args[0][2] == 'modify' + assert add_args[0][3] == 'non_existent_nw_device' + + add_args_text = list(map(to_text, add_args[0])) + for param in ['connection.interface-name', 'wireless_non_existant', + 'ipv4.addresses', '10.10.10.10/24', + '802-11-wireless.ssid', 'Brittany', + '802-11-wireless-security.key-mgmt', 'wpa-psk']: + assert param in add_args_text + + out, err = capfd.readouterr() + results = json.loads(out) + assert results.get('failed') + assert 'changed' not in results + + @pytest.mark.parametrize('patch_ansible_module', TESTCASE_DUMMY_STATIC, indirect=['patch_ansible_module']) def test_create_dummy_static(mocked_generic_connection_create, capfd): """ From 429359e977c40b24c421311388761fe958c60610 Mon Sep 17 00:00:00 2001 From: Roy Lenferink Date: Mon, 9 Aug 2021 16:32:57 +0200 Subject: [PATCH 0260/2828] Update the .gitignore with the latest version (#3177) This because it contains new changes, e.g. ignore development environments for Python projects. --- .gitignore | 81 ++++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 70 insertions(+), 11 deletions(-) diff --git a/.gitignore b/.gitignore index c6fc14ad0b..c6c78b42e7 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,6 @@ -# Created by https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv -# Edit at https://www.gitignore.io/?templates=git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv +# Created by https://www.toptal.com/developers/gitignore/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv +# Edit at https://www.toptal.com/developers/gitignore?templates=git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv ### dotenv ### .env @@ -88,7 +88,7 @@ flycheck_*.el .nfs* ### PyCharm+all ### -# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 # User-specific stuff @@ -98,6 +98,9 @@ flycheck_*.el .idea/**/dictionaries .idea/**/shelf +# AWS User-specific +.idea/**/aws.xml + # Generated files .idea/**/contentModel.xml @@ -118,6 +121,9 @@ flycheck_*.el # When using Gradle or Maven with auto-import, you should exclude module files, # since they will be recreated, and may cause churn. Uncomment if using # auto-import. +# .idea/artifacts +# .idea/compiler.xml +# .idea/jarRepositories.xml # .idea/modules.xml # .idea/*.iml # .idea/modules @@ -198,7 +204,6 @@ parts/ sdist/ var/ wheels/ -pip-wheel-metadata/ share/python-wheels/ *.egg-info/ .installed.cfg @@ -225,13 +230,25 @@ htmlcov/ nosetests.xml coverage.xml *.cover +*.py,cover .hypothesis/ .pytest_cache/ +cover/ # Translations *.mo *.pot +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + # Scrapy stuff: .scrapy @@ -239,9 +256,19 @@ coverage.xml docs/_build/ # PyBuilder +.pybuilder/ target/ +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + # pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: .python-version # pipenv @@ -251,12 +278,24 @@ target/ # install all needed dependencies. #Pipfile.lock -# celery beat schedule file +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff celerybeat-schedule +celerybeat.pid # SageMath parsed files *.sage.py +# Environments +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + # Spyder project settings .spyderproject .spyproject @@ -264,10 +303,6 @@ celerybeat-schedule # Rope project settings .ropeproject -# Mr Developer -.mr.developer.cfg -.project - # mkdocs documentation /site @@ -279,9 +314,16 @@ dmypy.json # Pyre type checker .pyre/ +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + ### Vim ### # Swap [._]*.s[a-v][a-z] +!*.svg # comment out if you don't need vector files [._]*.sw[a-p] [._]s[a-rt-v][a-z] [._]ss[a-gi-z] @@ -299,11 +341,13 @@ tags [._]*.un~ ### WebStorm ### -# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 # User-specific stuff +# AWS User-specific + # Generated files # Sensitive or high-churn files @@ -314,6 +358,9 @@ tags # When using Gradle or Maven with auto-import, you should exclude module files, # since they will be recreated, and may cause churn. Uncomment if using # auto-import. +# .idea/artifacts +# .idea/compiler.xml +# .idea/jarRepositories.xml # .idea/modules.xml # .idea/*.iml # .idea/modules @@ -349,15 +396,27 @@ tags # *.ipr # Sonarlint plugin +# https://plugins.jetbrains.com/plugin/7973-sonarlint .idea/**/sonarlint/ # SonarQube Plugin +# https://plugins.jetbrains.com/plugin/7238-sonarqube-community-plugin .idea/**/sonarIssues.xml # Markdown Navigator plugin +# https://plugins.jetbrains.com/plugin/7896-markdown-navigator-enhanced .idea/**/markdown-navigator.xml +.idea/**/markdown-navigator-enh.xml .idea/**/markdown-navigator/ +# Cache file creation bug +# See https://youtrack.jetbrains.com/issue/JBR-2257 +.idea/$CACHE_FILE$ + +# CodeStream plugin +# https://plugins.jetbrains.com/plugin/12206-codestream +.idea/codestream.xml + ### Windows ### # Windows thumbnail cache files Thumbs.db @@ -384,4 +443,4 @@ $RECYCLE.BIN/ # Windows shortcuts *.lnk -# End of https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv +# End of https://www.toptal.com/developers/gitignore/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv From 56b5be0630e226f20cf461b2ab9c722de5e34483 Mon Sep 17 00:00:00 2001 From: zorun Date: Mon, 9 Aug 2021 22:44:36 +0200 Subject: [PATCH 0261/2828] openbsd_pkg: Fix regexp matching crash (#3161) When a package name contains special characters (e.g. "g++"), they are interpreted as part of the regexp. This can lead to a crash with an error in the python re module, for instance with "g++": sre_constants.error: multiple repeat Fix this by escaping the package name. Co-authored-by: Baptiste Jonglez --- .../fragments/3161-openbsd-pkg-fix-regexp-matching-crash.yml | 2 ++ plugins/modules/packaging/os/openbsd_pkg.py | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/3161-openbsd-pkg-fix-regexp-matching-crash.yml diff --git a/changelogs/fragments/3161-openbsd-pkg-fix-regexp-matching-crash.yml b/changelogs/fragments/3161-openbsd-pkg-fix-regexp-matching-crash.yml new file mode 100644 index 0000000000..bb29542c04 --- /dev/null +++ b/changelogs/fragments/3161-openbsd-pkg-fix-regexp-matching-crash.yml @@ -0,0 +1,2 @@ +bugfixes: + - openbsd_pkg - fix regexp matching crash. This bug could trigger on package names with special characters, for example ``g++`` (https://github.com/ansible-collections/community.general/pull/3161). diff --git a/plugins/modules/packaging/os/openbsd_pkg.py b/plugins/modules/packaging/os/openbsd_pkg.py index 61e2a5e52b..05c374cb4e 100644 --- a/plugins/modules/packaging/os/openbsd_pkg.py +++ b/plugins/modules/packaging/os/openbsd_pkg.py @@ -241,7 +241,7 @@ def package_present(names, pkg_spec, module): # "file:/local/package/directory/ is empty" message on stderr # while still installing the package, so we need to look for # for a message like "packagename-1.0: ok" just in case. - match = re.search(r"\W%s-[^:]+: ok\W" % pkg_spec[name]['stem'], pkg_spec[name]['stdout']) + match = re.search(r"\W%s-[^:]+: ok\W" % re.escape(pkg_spec[name]['stem']), pkg_spec[name]['stdout']) if match: # It turns out we were able to install the package. @@ -295,7 +295,7 @@ def package_latest(names, pkg_spec, module): pkg_spec[name]['changed'] = False for installed_name in pkg_spec[name]['installed_names']: module.debug("package_latest(): checking for pre-upgrade package name: %s" % installed_name) - match = re.search(r"\W%s->.+: ok\W" % installed_name, pkg_spec[name]['stdout']) + match = re.search(r"\W%s->.+: ok\W" % re.escape(installed_name), pkg_spec[name]['stdout']) if match: module.debug("package_latest(): pre-upgrade package name match: %s" % installed_name) From 1705335ba78ea301b7d3905c9a03d821503f4256 Mon Sep 17 00:00:00 2001 From: rainerleber <39616583+rainerleber@users.noreply.github.com> Date: Mon, 9 Aug 2021 22:52:44 +0200 Subject: [PATCH 0262/2828] SAP task list execution (#3169) * add sap task list execute * Apply suggestions from code review Co-authored-by: Felix Fontein * remove json out * Apply suggestions from code review Co-authored-by: Felix Fontein * change logic Co-authored-by: Rainer Leber Co-authored-by: Felix Fontein --- .github/BOTMETA.yml | 2 + plugins/modules/sap_task_list_execute.py | 1 + .../modules/system/sap_task_list_execute.py | 341 ++++++++++++++++++ .../system/test_sap_task_list_execute.py | 89 +++++ 4 files changed, 433 insertions(+) create mode 120000 plugins/modules/sap_task_list_execute.py create mode 100644 plugins/modules/system/sap_task_list_execute.py create mode 100644 tests/unit/plugins/modules/system/test_sap_task_list_execute.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 4912a03ba4..1e982296d6 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1058,6 +1058,8 @@ files: ignore: ryansb $modules/system/runit.py: maintainers: jsumners + $modules/system/sap_task_list_execute: + maintainers: rainerleber $modules/system/sefcontext.py: maintainers: dagwieers $modules/system/selinux_permissive.py: diff --git a/plugins/modules/sap_task_list_execute.py b/plugins/modules/sap_task_list_execute.py new file mode 120000 index 0000000000..c27ac0a6ca --- /dev/null +++ b/plugins/modules/sap_task_list_execute.py @@ -0,0 +1 @@ +system/sap_task_list_execute.py \ No newline at end of file diff --git a/plugins/modules/system/sap_task_list_execute.py b/plugins/modules/system/sap_task_list_execute.py new file mode 100644 index 0000000000..87d6a1060d --- /dev/null +++ b/plugins/modules/system/sap_task_list_execute.py @@ -0,0 +1,341 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Rainer Leber +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: sap_task_list_execute +short_description: Perform SAP Task list execution +version_added: "3.5.0" +description: + - The C(sap_task_list_execute) module depends on C(pyrfc) Python library (version 2.4.0 and upwards). + Depending on distribution you are using, you may need to install additional packages to + have these available. + - Tasks in the task list which requires manual activities will be confirmed automatically. + - This module will use the RFC package C(STC_TM_API). + +requirements: + - pyrfc >= 2.4.0 + - xmltodict + +options: + conn_username: + description: The required username for the SAP system. + required: true + type: str + conn_password: + description: The required password for the SAP system. + required: true + type: str + host: + description: The required host for the SAP system. Can be either an FQDN or IP Address. + required: true + type: str + sysnr: + description: + - The system number of the SAP system. + - You must quote the value to ensure retaining the leading zeros. + default: '00' + type: str + client: + description: + - The client number to connect to. + - You must quote the value to ensure retaining the leading zeros. + default: '000' + type: str + task_to_execute: + description: The task list which will be executed. + required: true + type: str + task_parameters: + description: + - The tasks and the parameters for execution. + - If the task list do not need any parameters. This could be empty. + - If only specific tasks from the task list should be executed. + The tasks even when no parameter is needed must be provided. + Alongside with the module parameter I(task_skip=true). + type: list + elements: dict + suboptions: + TASKNAME: + description: The name of the task in the task list. + type: str + required: true + FIELDNAME: + description: The name of the field of the task. + type: str + VALUE: + description: The value which have to be set. + type: raw + task_settings: + description: + - Setting for the execution of the task list. This can be the following as in TCODE SE80 described. + Check Mode C(CHECKRUN), Background Processing Active C(BATCH) (this is the default value), + Asynchronous Execution C(ASYNC), Trace Mode C(TRACE), Server Name C(BATCH_TARGET). + default: ['BATCH'] + type: list + elements: str + task_skip: + description: + - If this parameter is C(true) not defined tasks in I(task_parameters) are skipped. + - This could be the case when only certain tasks should run from the task list. + default: false + type: bool + +notes: + - Does not support C(check_mode). +author: + - Rainer Leber (@rainerleber) +''' + +EXAMPLES = r''' +# Pass in a message +- name: Test task execution + community.general.sap_task_list_execute: + conn_username: DDIC + conn_password: Passwd1234 + host: 10.1.8.10 + sysnr: '01' + client: '000' + task_to_execute: SAP_BASIS_SSL_CHECK + task_settings: batch + +- name: Pass in input parameters + community.general.sap_task_list_execute: + conn_username: DDIC + conn_password: Passwd1234 + host: 10.1.8.10 + sysnr: '00' + client: '000' + task_to_execute: SAP_BASIS_SSL_CHECK + task_parameters : + - { 'TASKNAME': 'CL_STCT_CHECK_SEC_CRYPTO', 'FIELDNAME': 'P_OPT2', 'VALUE': 'X' } + - TASKNAME: CL_STCT_CHECK_SEC_CRYPTO + FIELDNAME: P_OPT3 + VALUE: X + task_settings: batch + +# Exported environement variables. +- name: Hint if module will fail with error message like ImportError libsapnwrfc.so... + community.general.sap_task_list_execute: + conn_username: DDIC + conn_password: Passwd1234 + host: 10.1.8.10 + sysnr: '00' + client: '000' + task_to_execute: SAP_BASIS_SSL_CHECK + task_settings: batch + environment: + SAPNWRFC_HOME: /usr/local/sap/nwrfcsdk + LD_LIBRARY_PATH: /usr/local/sap/nwrfcsdk/lib +''' + +RETURN = r''' +msg: + description: A small execution description. + type: str + returned: always + sample: 'Successful' +out: + description: A complete description of the executed tasks. If this is available. + type: list + elements: dict + returned: on success + sample: [...,{ + "LOG": { + "STCTM_S_LOG": [ + { + "ACTIVITY": "U_CONFIG", + "ACTIVITY_DESCR": "Configuration changed", + "DETAILS": null, + "EXEC_ID": "20210728184903.815739", + "FIELD": null, + "ID": "STC_TASK", + "LOG_MSG_NO": "000000", + "LOG_NO": null, + "MESSAGE": "For radiobutton group ICM too many options are set; choose only one option", + "MESSAGE_V1": "ICM", + "MESSAGE_V2": null, + "MESSAGE_V3": null, + "MESSAGE_V4": null, + "NUMBER": "048", + "PARAMETER": null, + "PERIOD": "M", + "PERIOD_DESCR": "Maintenance", + "ROW": "0", + "SRC_LINE": "170", + "SRC_OBJECT": "CL_STCTM_REPORT_UI IF_STCTM_UI_TASK~SET_PARAMETERS", + "SYSTEM": null, + "TIMESTMP": "20210728184903", + "TSTPNM": "DDIC", + "TYPE": "E" + },... + ]}}] +''' + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.json_utils import json +import traceback +try: + from pyrfc import Connection +except ImportError: + HAS_PYRFC_LIBRARY = False + PYRFC_LIBRARY_IMPORT_ERROR = traceback.format_exc() +else: + HAS_PYRFC_LIBRARY = True +try: + import xmltodict +except ImportError: + HAS_XMLTODICT_LIBRARY = False + XMLTODICT_LIBRARY_IMPORT_ERROR = traceback.format_exc() +else: + HAS_XMLTODICT_LIBRARY = True + + +def call_rfc_method(connection, method_name, kwargs): + # PyRFC call function + return connection.call(method_name, **kwargs) + + +def process_exec_settings(task_settings): + # processes task settings to objects + exec_settings = {} + for settings in task_settings: + temp_dict = {settings.upper(): 'X'} + for key, value in temp_dict.items(): + exec_settings[key] = value + return exec_settings + + +def xml_to_dict(xml_raw): + try: + xml_parsed = xmltodict.parse(xml_raw, dict_constructor=dict) + xml_dict = xml_parsed['asx:abap']['asx:values']['SESSION']['TASKLIST'] + except KeyError: + xml_dict = "No logs available." + return xml_dict + + +def run_module(): + + params_spec = dict( + TASKNAME=dict(type='str', required=True), + FIELDNAME=dict(type='str'), + VALUE=dict(type='raw'), + ) + + # define available arguments/parameters a user can pass to the module + module = AnsibleModule( + argument_spec=dict( + # values for connection + conn_username=dict(type='str', required=True), + conn_password=dict(type='str', required=True, no_log=True), + host=dict(type='str', required=True), + sysnr=dict(type='str', default="00"), + client=dict(type='str', default="000"), + # values for execution tasks + task_to_execute=dict(type='str', required=True), + task_parameters=dict(type='list', elements='dict', options=params_spec), + task_settings=dict(type='list', elements='str', default=['BATCH']), + task_skip=dict(type='bool', default=False), + ), + supports_check_mode=False, + ) + result = dict(changed=False, msg='', out={}) + + params = module.params + + username = params['conn_username'].upper() + password = params['conn_password'] + host = params['host'] + sysnr = params['sysnr'] + client = params['client'] + + task_parameters = params['task_parameters'] + task_to_execute = params['task_to_execute'] + task_settings = params['task_settings'] + task_skip = params['task_skip'] + + if not HAS_PYRFC_LIBRARY: + module.fail_json( + msg=missing_required_lib('pyrfc'), + exception=PYRFC_LIBRARY_IMPORT_ERROR) + + if not HAS_XMLTODICT_LIBRARY: + module.fail_json( + msg=missing_required_lib('xmltodict'), + exception=XMLTODICT_LIBRARY_IMPORT_ERROR) + + # basic RFC connection with pyrfc + try: + conn = Connection(user=username, passwd=password, ashost=host, sysnr=sysnr, client=client) + except Exception as err: + result['error'] = str(err) + result['msg'] = 'Something went wrong connecting to the SAP system.' + module.fail_json(**result) + + try: + raw_params = call_rfc_method(conn, 'STC_TM_SCENARIO_GET_PARAMETERS', + {'I_SCENARIO_ID': task_to_execute}) + except Exception as err: + result['error'] = str(err) + result['msg'] = 'The task list does not exsist.' + module.fail_json(**result) + exec_settings = process_exec_settings(task_settings) + # initialize session task + session_init = call_rfc_method(conn, 'STC_TM_SESSION_BEGIN', + {'I_SCENARIO_ID': task_to_execute, + 'I_INIT_ONLY': 'X'}) + # Confirm Tasks which requires manual activities from Task List Run + for task in raw_params['ET_PARAMETER']: + call_rfc_method(conn, 'STC_TM_TASK_CONFIRM', + {'I_SESSION_ID': session_init['E_SESSION_ID'], + 'I_TASKNAME': task['TASKNAME']}) + if task_skip: + for task in raw_params['ET_PARAMETER']: + call_rfc_method(conn, 'STC_TM_TASK_SKIP', + {'I_SESSION_ID': session_init['E_SESSION_ID'], + 'I_TASKNAME': task['TASKNAME'], 'I_SKIP_DEP_TASKS': 'X'}) + # unskip defined tasks and set parameters + if task_parameters is not None: + for task in task_parameters: + call_rfc_method(conn, 'STC_TM_TASK_UNSKIP', + {'I_SESSION_ID': session_init['E_SESSION_ID'], + 'I_TASKNAME': task['TASKNAME'], 'I_UNSKIP_DEP_TASKS': 'X'}) + + call_rfc_method(conn, 'STC_TM_SESSION_SET_PARAMETERS', + {'I_SESSION_ID': session_init['E_SESSION_ID'], + 'IT_PARAMETER': task_parameters}) + # start the task + try: + session_start = call_rfc_method(conn, 'STC_TM_SESSION_RESUME', + {'I_SESSION_ID': session_init['E_SESSION_ID'], + 'IS_EXEC_SETTINGS': exec_settings}) + except Exception as err: + result['error'] = str(err) + result['msg'] = 'Something went wrong. See error.' + module.fail_json(**result) + # get task logs because the execution may successfully but the tasks shows errors or warnings + # returned value is ABAPXML https://help.sap.com/doc/abapdocu_755_index_htm/7.55/en-US/abenabap_xslt_asxml_general.htm + session_log = call_rfc_method(conn, 'STC_TM_SESSION_GET_LOG', + {'I_SESSION_ID': session_init['E_SESSION_ID']}) + + task_list = xml_to_dict(session_log['E_LOG']) + + result['changed'] = True + result['msg'] = session_start['E_STATUS_DESCR'] + result['out'] = task_list + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/tests/unit/plugins/modules/system/test_sap_task_list_execute.py b/tests/unit/plugins/modules/system/test_sap_task_list_execute.py new file mode 100644 index 0000000000..9d2299cacb --- /dev/null +++ b/tests/unit/plugins/modules/system/test_sap_task_list_execute.py @@ -0,0 +1,89 @@ +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys +from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock +from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args + +sys.modules['pyrfc'] = MagicMock() +sys.modules['pyrfc.Connection'] = MagicMock() +sys.modules['xmltodict'] = MagicMock() +sys.modules['xmltodict.parse'] = MagicMock() + +from ansible_collections.community.general.plugins.modules.system import sap_task_list_execute + + +class TestSAPRfcModule(ModuleTestCase): + + def setUp(self): + super(TestSAPRfcModule, self).setUp() + self.module = sap_task_list_execute + + def tearDown(self): + super(TestSAPRfcModule, self).tearDown() + + def define_rfc_connect(self, mocker): + return mocker.patch(self.module.call_rfc_method) + + def test_without_required_parameters(self): + """Failure must occurs when all parameters are missing""" + with self.assertRaises(AnsibleFailJson): + set_module_args({}) + self.module.main() + + def test_error_no_task_list(self): + """tests fail to exec task list""" + + set_module_args({ + "conn_username": "DDIC", + "conn_password": "Test1234", + "host": "10.1.8.9", + "task_to_execute": "SAP_BASIS_SSL_CHECK" + }) + + with patch.object(self.module, 'Connection') as conn: + conn.return_value = '' + with self.assertRaises(AnsibleFailJson) as result: + self.module.main() + self.assertEqual(result.exception.args[0]['msg'], 'The task list does not exsist.') + + def test_success(self): + """test execute task list success""" + + set_module_args({ + "conn_username": "DDIC", + "conn_password": "Test1234", + "host": "10.1.8.9", + "task_to_execute": "SAP_BASIS_SSL_CHECK" + }) + with patch.object(self.module, 'xml_to_dict') as XML: + XML.return_value = {'item': [{'TASK': {'CHECK_STATUS_DESCR': 'Check successfully', + 'STATUS_DESCR': 'Executed successfully', 'TASKNAME': 'CL_STCT_CHECK_SEC_CRYPTO', + 'LNR': '1', 'DESCRIPTION': 'Check SAP Cryptographic Library', 'DOCU_EXIST': 'X', + 'LOG_EXIST': 'X', 'ACTION_SKIP': None, 'ACTION_UNSKIP': None, 'ACTION_CONFIRM': None, + 'ACTION_MAINTAIN': None}}]} + + with self.assertRaises(AnsibleExitJson) as result: + sap_task_list_execute.main() + self.assertEqual(result.exception.args[0]['out'], {'item': [{'TASK': {'CHECK_STATUS_DESCR': 'Check successfully', + 'STATUS_DESCR': 'Executed successfully', 'TASKNAME': 'CL_STCT_CHECK_SEC_CRYPTO', + 'LNR': '1', 'DESCRIPTION': 'Check SAP Cryptographic Library', 'DOCU_EXIST': 'X', + 'LOG_EXIST': 'X', 'ACTION_SKIP': None, 'ACTION_UNSKIP': None, + 'ACTION_CONFIRM': None, 'ACTION_MAINTAIN': None}}]}) + + def test_success_no_log(self): + """test execute task list success without logs""" + + set_module_args({ + "conn_username": "DDIC", + "conn_password": "Test1234", + "host": "10.1.8.9", + "task_to_execute": "SAP_BASIS_SSL_CHECK" + }) + with patch.object(self.module, 'xml_to_dict') as XML: + XML.return_value = "No logs available." + with self.assertRaises(AnsibleExitJson) as result: + sap_task_list_execute.main() + self.assertEqual(result.exception.args[0]['out'], 'No logs available.') From b5d6457611b88f6b9e6efdc562d39ed427758324 Mon Sep 17 00:00:00 2001 From: Dag Wieers Date: Tue, 10 Aug 2021 07:49:18 +0200 Subject: [PATCH 0263/2828] Support older version of psutil (RHEL7 and RHEL6) (#2808) * Support older version of psutil (RHEL7 and RHEL6) The psutil python module is a true mess, they changed the API twice. The function arguments, as well as the objects that are returned. The documentation does not make it clear which version supports what so the safest implementation is this waterfall approach. A better approach would be to inspect the returned information, rather than trust a version, but that would not be any more efficient. In the end it is better to have something that at least works out-of-the-box on all platforms than something that requires custom updates to system packages before it works as expected. Especially for something as basic as `pids`. * A little bit more concise * Apply suggestions from code review * Add changelog fragment. Co-authored-by: Felix Fontein --- .../fragments/2808-pids-older-psutil.yml | 2 ++ plugins/modules/system/pids.py | 19 ++++++++++++++----- 2 files changed, 16 insertions(+), 5 deletions(-) create mode 100644 changelogs/fragments/2808-pids-older-psutil.yml diff --git a/changelogs/fragments/2808-pids-older-psutil.yml b/changelogs/fragments/2808-pids-older-psutil.yml new file mode 100644 index 0000000000..34015e3f2c --- /dev/null +++ b/changelogs/fragments/2808-pids-older-psutil.yml @@ -0,0 +1,2 @@ +bugfixes: +- "pids - avoid crashes for older ``psutil`` versions, like on RHEL6 and RHEL7 (https://github.com/ansible-collections/community.general/pull/2808)." diff --git a/plugins/modules/system/pids.py b/plugins/modules/system/pids.py index 5c7b82a794..622bec2500 100644 --- a/plugins/modules/system/pids.py +++ b/plugins/modules/system/pids.py @@ -79,11 +79,20 @@ def compare_lower(a, b): def get_pid(name): pids = [] - for proc in psutil.process_iter(attrs=['name', 'cmdline']): - if compare_lower(proc.info['name'], name) or \ - proc.info['cmdline'] and compare_lower(proc.info['cmdline'][0], name): - pids.append(proc.pid) - + try: + for proc in psutil.process_iter(attrs=['name', 'cmdline']): + if compare_lower(proc.info['name'], name) or \ + proc.info['cmdline'] and compare_lower(proc.info['cmdline'][0], name): + pids.append(proc.pid) + except TypeError: # EL6, EL7: process_iter() takes no arguments (1 given) + for proc in psutil.process_iter(): + try: # EL7 + proc_name, proc_cmdline = proc.name(), proc.cmdline() + except TypeError: # EL6: 'str' object is not callable + proc_name, proc_cmdline = proc.name, proc.cmdline + if compare_lower(proc_name, name) or \ + proc_cmdline and compare_lower(proc_cmdline[0], name): + pids.append(proc.pid) return pids From 6033ce695bc891828887019439ecf11668f58086 Mon Sep 17 00:00:00 2001 From: Sebastian Date: Thu, 12 Aug 2021 08:17:03 +0200 Subject: [PATCH 0264/2828] zypper: support transactional-updates (#3164) * zypper: support transactional-updates - Check if transactional updates are in use by checking for the existence of /var/lib/misc/transactional-update.state - Prefix zypper-commands with /sbin/transactional-update --continue --drop-if-no-change --quiet run if this is the case fixes ansible-collections/community.general#3159 * re-add get_bin_path for executables * fix typo --- .../3164-zypper-support-transactional-updates.yaml | 2 ++ plugins/modules/packaging/os/zypper.py | 8 ++++++++ 2 files changed, 10 insertions(+) create mode 100644 changelogs/fragments/3164-zypper-support-transactional-updates.yaml diff --git a/changelogs/fragments/3164-zypper-support-transactional-updates.yaml b/changelogs/fragments/3164-zypper-support-transactional-updates.yaml new file mode 100644 index 0000000000..d12ff9a6bf --- /dev/null +++ b/changelogs/fragments/3164-zypper-support-transactional-updates.yaml @@ -0,0 +1,2 @@ +minor_changes: + - zypper - prefix zypper commands with ``/sbin/transactional-update --continue --drop-if-no-change --quiet run`` if transactional updates are detected (https://github.com/ansible-collections/community.general/issues/3159). diff --git a/plugins/modules/packaging/os/zypper.py b/plugins/modules/packaging/os/zypper.py index 367bd8d9a0..2295b5a566 100644 --- a/plugins/modules/packaging/os/zypper.py +++ b/plugins/modules/packaging/os/zypper.py @@ -29,6 +29,7 @@ author: short_description: Manage packages on SUSE and openSUSE description: - Manage packages on SUSE and openSUSE using the zypper and rpm tools. + - Also supports transactional updates, by running zypper inside C(/sbin/transactional-update --continue --drop-if-no-change --quiet run). options: name: description: @@ -213,6 +214,7 @@ EXAMPLES = ''' ZYPP_LOCK_TIMEOUT: 20 ''' +import os.path import xml import re from xml.dom.minidom import parseString as parseXML @@ -337,6 +339,8 @@ def get_cmd(m, subcommand): is_install = subcommand in ['install', 'update', 'patch', 'dist-upgrade'] is_refresh = subcommand == 'refresh' cmd = [m.get_bin_path('zypper', required=True), '--quiet', '--non-interactive', '--xmlout'] + if transactional_updates(): + cmd = [m.get_bin_path('transactional-update', required=True), '--continue', '--drop-if-no-change', '--quiet', 'run'] + cmd if m.params['extra_args_precommand']: args_list = m.params['extra_args_precommand'].split() cmd.extend(args_list) @@ -491,6 +495,10 @@ def repo_refresh(m): return retvals + +def transactional_updates(): + return os.path.exists('/var/lib/misc/transactional-update.state') + # =========================================== # Main control flow From 1e466df863ffebd19ddab66b99ed19eec21e6c0e Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Thu, 12 Aug 2021 02:18:38 -0400 Subject: [PATCH 0265/2828] archive - idempotency enhancement for 4.0.0 (#3075) * Initial Commit * Comparing with tar file checksums rather than tar header checksums * Added changelog fragment * Revert "Comparing with tar file checksums rather than tar header checksums" This reverts commit bed4b171077058f1ed29785c6def52de2b1f441c. * Restricting idempotency tests by format * Applying review suggestions --- .../3075-archive-idempotency-enhancements.yml | 4 ++ plugins/modules/files/archive.py | 61 ++++++++++++++++--- .../targets/archive/tests/idempotency.yml | 21 +++---- 3 files changed, 65 insertions(+), 21 deletions(-) create mode 100644 changelogs/fragments/3075-archive-idempotency-enhancements.yml diff --git a/changelogs/fragments/3075-archive-idempotency-enhancements.yml b/changelogs/fragments/3075-archive-idempotency-enhancements.yml new file mode 100644 index 0000000000..3d0bf65fb7 --- /dev/null +++ b/changelogs/fragments/3075-archive-idempotency-enhancements.yml @@ -0,0 +1,4 @@ +--- +breaking_changes: + - archive - adding idempotency checks for changes to file names and content within the ``destination`` file + (https://github.com/ansible-collections/community.general/pull/3075). diff --git a/plugins/modules/files/archive.py b/plugins/modules/files/archive.py index 30c4de5aa8..91dc6e5112 100644 --- a/plugins/modules/files/archive.py +++ b/plugins/modules/files/archive.py @@ -182,6 +182,7 @@ import zipfile from fnmatch import fnmatch from sys import version_info from traceback import format_exc +from zlib import crc32 from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.common.text.converters import to_bytes, to_native @@ -234,10 +235,6 @@ def expand_paths(paths): return expanded_path, is_globby -def legacy_filter(path, exclusion_patterns): - return matches_exclusion_patterns(path, exclusion_patterns) - - def matches_exclusion_patterns(path, exclusion_patterns): return any(fnmatch(path, p) for p in exclusion_patterns) @@ -313,6 +310,7 @@ class Archive(object): if self.remove: self._check_removal_safety() + self.original_checksums = self.destination_checksums() self.original_size = self.destination_size() def add(self, path, archive_name): @@ -377,8 +375,16 @@ class Archive(object): msg='Errors when writing archive at %s: %s' % (_to_native(self.destination), '; '.join(self.errors)) ) - def compare_with_original(self): - self.changed |= self.original_size != self.destination_size() + def is_different_from_original(self): + if self.original_checksums is None: + return self.original_size != self.destination_size() + else: + return self.original_checksums != self.destination_checksums() + + def destination_checksums(self): + if self.destination_exists() and self.destination_readable(): + return self._get_checksums(self.destination) + return None def destination_exists(self): return self.destination and os.path.exists(self.destination) @@ -494,6 +500,10 @@ class Archive(object): def _add(self, path, archive_name): pass + @abc.abstractmethod + def _get_checksums(self, path): + pass + class ZipArchive(Archive): def __init__(self, module): @@ -513,9 +523,18 @@ class ZipArchive(Archive): self.file = zipfile.ZipFile(_to_native_ascii(self.destination), 'w', zipfile.ZIP_DEFLATED, True) def _add(self, path, archive_name): - if not legacy_filter(path, self.exclusion_patterns): + if not matches_exclusion_patterns(path, self.exclusion_patterns): self.file.write(path, archive_name) + def _get_checksums(self, path): + try: + archive = zipfile.ZipFile(_to_native_ascii(path), 'r') + checksums = set((info.filename, info.CRC) for info in archive.infolist()) + archive.close() + except zipfile.BadZipfile: + checksums = set() + return checksums + class TarArchive(Archive): def __init__(self, module): @@ -554,13 +573,35 @@ class TarArchive(Archive): return None if matches_exclusion_patterns(tarinfo.name, self.exclusion_patterns) else tarinfo def py26_filter(path): - return legacy_filter(path, self.exclusion_patterns) + return matches_exclusion_patterns(path, self.exclusion_patterns) if PY27: self.file.add(path, archive_name, recursive=False, filter=py27_filter) else: self.file.add(path, archive_name, recursive=False, exclude=py26_filter) + def _get_checksums(self, path): + try: + if self.format == 'xz': + with lzma.open(_to_native_ascii(path), 'r') as f: + archive = tarfile.open(fileobj=f) + checksums = set((info.name, info.chksum) for info in archive.getmembers()) + archive.close() + else: + archive = tarfile.open(_to_native_ascii(path), 'r|' + self.format) + checksums = set((info.name, info.chksum) for info in archive.getmembers()) + archive.close() + except (lzma.LZMAError, tarfile.ReadError, tarfile.CompressionError): + try: + # The python implementations of gzip, bz2, and lzma do not support restoring compressed files + # to their original names so only file checksum is returned + f = self._open_compressed_file(_to_native_ascii(path), 'r') + checksums = set([(b'', crc32(f.read()))]) + f.close() + except Exception: + checksums = set() + return checksums + def get_archive(module): if module.params['format'] == 'zip': @@ -603,7 +644,7 @@ def main(): else: archive.add_targets() archive.destination_state = STATE_INCOMPLETE if archive.has_unfound_targets() else STATE_ARCHIVED - archive.compare_with_original() + archive.changed |= archive.is_different_from_original() if archive.remove: archive.remove_targets() else: @@ -613,7 +654,7 @@ def main(): else: path = archive.paths[0] archive.add_single_target(path) - archive.compare_with_original() + archive.changed |= archive.is_different_from_original() if archive.remove: archive.remove_single_target(path) diff --git a/tests/integration/targets/archive/tests/idempotency.yml b/tests/integration/targets/archive/tests/idempotency.yml index f53f768164..9262601572 100644 --- a/tests/integration/targets/archive/tests/idempotency.yml +++ b/tests/integration/targets/archive/tests/idempotency.yml @@ -19,12 +19,12 @@ format: "{{ format }}" register: file_content_idempotency_after -# After idempotency fix result will be reliably changed for all formats - name: Assert task status is changed - file content idempotency ({{ format }}) assert: that: - - file_content_idempotency_after is not changed - when: "format in ('tar', 'zip')" + - file_content_idempotency_after is changed + # Only ``zip`` archives are guaranteed to compare file content checksums rather than header checksums + when: "format == 'zip'" - name: Remove archive - file content idempotency ({{ format }}) file: @@ -54,12 +54,10 @@ format: "{{ format }}" register: file_name_idempotency_after -# After idempotency fix result will be reliably changed for all formats - name: Check task status - file name idempotency ({{ format }}) assert: that: - - file_name_idempotency_after is not changed - when: "format in ('tar', 'zip')" + - file_name_idempotency_after is changed - name: Remove archive - file name idempotency ({{ format }}) file: @@ -89,12 +87,12 @@ format: "{{ format }}" register: single_file_content_idempotency_after -# After idempotency fix result will be reliably changed for all formats - name: Assert task status is changed - single file content idempotency ({{ format }}) assert: that: - - single_file_content_idempotency_after is not changed - when: "format in ('tar', 'zip')" + - single_file_content_idempotency_after is changed + # ``tar`` archives are not guaranteed to identify changes to file content if the file meta properties are unchanged. + when: "format != 'tar'" - name: Remove archive - single file content idempotency ({{ format }}) file: @@ -125,11 +123,12 @@ register: single_file_name_idempotency_after -# After idempotency fix result will be reliably changed for all formats +# The gz, bz2, and xz formats do not store the original file name +# so it is not possible to identify a change in this scenario. - name: Check task status - single file name idempotency ({{ format }}) assert: that: - - single_file_name_idempotency_after is not changed + - single_file_name_idempotency_after is changed when: "format in ('tar', 'zip')" - name: Remove archive - single file name idempotency ({{ format }}) From 5855ef558a5357383e4be93606c1bd101bb48f85 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 10 Aug 2021 07:53:16 +0200 Subject: [PATCH 0266/2828] Next planned release is 3.6.0. --- galaxy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/galaxy.yml b/galaxy.yml index 0f19d8d443..724e76110d 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,6 +1,6 @@ namespace: community name: general -version: 3.5.0 +version: 3.6.0 readme: README.md authors: - Ansible (https://github.com/ansible) From 1fec1d0c81449fb4fc3dea9de362a27de7c1fbda Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 12 Aug 2021 12:07:50 +0200 Subject: [PATCH 0267/2828] Fix new devel sanity errors. (#3194) --- changelogs/fragments/3194-sanity.yml | 14 ++++++++++++++ plugins/cache/memcached.py | 8 ++++---- plugins/callback/logdna.py | 2 +- plugins/connection/saltstack.py | 2 +- plugins/inventory/online.py | 2 +- plugins/module_utils/_netapp.py | 4 ++-- plugins/module_utils/online.py | 2 +- plugins/module_utils/scaleway.py | 2 +- plugins/modules/cloud/opennebula/one_template.py | 4 ++-- plugins/modules/cloud/packet/packet_device.py | 5 ++--- plugins/modules/cloud/packet/packet_sshkey.py | 2 +- .../modules/packaging/language/maven_artifact.py | 2 +- plugins/modules/system/launchd.py | 6 +++--- plugins/modules/system/ufw.py | 4 ++-- tests/unit/mock/loader.py | 4 ++-- 15 files changed, 38 insertions(+), 25 deletions(-) create mode 100644 changelogs/fragments/3194-sanity.yml diff --git a/changelogs/fragments/3194-sanity.yml b/changelogs/fragments/3194-sanity.yml new file mode 100644 index 0000000000..095894a685 --- /dev/null +++ b/changelogs/fragments/3194-sanity.yml @@ -0,0 +1,14 @@ +bugfixes: +- "memcached cache plugin - change function argument names to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." +- "logdns callback plugin - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." +- "saltstack connection plugin - fix function signature (https://github.com/ansible-collections/community.general/pull/3199)." +- "online inventory plugin - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." +- "netapp module utils - remove always-true conditional to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." +- "online module utils - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." +- "scaleway module utils - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." +- "one_template - change function argument name to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." +- "packet_device - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." +- "packet_sshkey - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." +- "maven_artifact - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." +- "launchd - use private attribute to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." +- "ufw - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." diff --git a/plugins/cache/memcached.py b/plugins/cache/memcached.py index ee36628f40..b7d14aa86d 100644 --- a/plugins/cache/memcached.py +++ b/plugins/cache/memcached.py @@ -154,12 +154,12 @@ class CacheModuleKeys(MutableSet): def __len__(self): return len(self._keyset) - def add(self, key): - self._keyset[key] = time.time() + def add(self, value): + self._keyset[value] = time.time() self._cache.set(self.PREFIX, self._keyset) - def discard(self, key): - del self._keyset[key] + def discard(self, value): + del self._keyset[value] self._cache.set(self.PREFIX, self._keyset) def remove_by_timerange(self, s_min, s_max): diff --git a/plugins/callback/logdna.py b/plugins/callback/logdna.py index 0c459bfac2..138b612de8 100644 --- a/plugins/callback/logdna.py +++ b/plugins/callback/logdna.py @@ -78,7 +78,7 @@ def get_mac(): # Getting hostname of system: def get_hostname(): - return str(socket.gethostname()).split('.local')[0] + return str(socket.gethostname()).split('.local', 1)[0] # Getting IP of system: diff --git a/plugins/connection/saltstack.py b/plugins/connection/saltstack.py index cbd85eaf3e..95870ad2d0 100644 --- a/plugins/connection/saltstack.py +++ b/plugins/connection/saltstack.py @@ -51,7 +51,7 @@ class Connection(ConnectionBase): self._connected = True return self - def exec_command(self, cmd, sudoable=False, in_data=None): + def exec_command(self, cmd, in_data=None, sudoable=False): """ run a command on the remote minion """ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) diff --git a/plugins/inventory/online.py b/plugins/inventory/online.py index 085c258d45..c678d3e0e5 100644 --- a/plugins/inventory/online.py +++ b/plugins/inventory/online.py @@ -235,7 +235,7 @@ class InventoryModule(BaseInventoryPlugin): self.headers = { 'Authorization': "Bearer %s" % token, - 'User-Agent': "ansible %s Python %s" % (ansible_version, python_version.split(' ')[0]), + 'User-Agent': "ansible %s Python %s" % (ansible_version, python_version.split(' ', 1)[0]), 'Content-type': 'application/json' } diff --git a/plugins/module_utils/_netapp.py b/plugins/module_utils/_netapp.py index 126cc1bf16..8eda53b344 100644 --- a/plugins/module_utils/_netapp.py +++ b/plugins/module_utils/_netapp.py @@ -384,8 +384,8 @@ class NetAppESeriesModule(object): path = path[1:] request_url = self.url + self.DEFAULT_REST_API_PATH + path - if self.log_requests or True: - self.module.log(pformat(dict(url=request_url, data=data, method=method))) + # if self.log_requests: + self.module.log(pformat(dict(url=request_url, data=data, method=method))) return request(url=request_url, data=data, method=method, headers=headers, use_proxy=True, force=False, last_mod_time=None, timeout=self.DEFAULT_TIMEOUT, http_agent=self.HTTP_AGENT, force_basic_auth=True, ignore_errors=ignore_errors, **self.creds) diff --git a/plugins/module_utils/online.py b/plugins/module_utils/online.py index c0294abb79..b5acbcc017 100644 --- a/plugins/module_utils/online.py +++ b/plugins/module_utils/online.py @@ -101,7 +101,7 @@ class Online(object): @staticmethod def get_user_agent_string(module): - return "ansible %s Python %s" % (module.ansible_version, sys.version.split(' ')[0]) + return "ansible %s Python %s" % (module.ansible_version, sys.version.split(' ', 1)[0]) def get(self, path, data=None, headers=None): return self.send('GET', path, data, headers) diff --git a/plugins/module_utils/scaleway.py b/plugins/module_utils/scaleway.py index d714fd69e8..bcada5fcb9 100644 --- a/plugins/module_utils/scaleway.py +++ b/plugins/module_utils/scaleway.py @@ -142,7 +142,7 @@ class Scaleway(object): @staticmethod def get_user_agent_string(module): - return "ansible %s Python %s" % (module.ansible_version, sys.version.split(' ')[0]) + return "ansible %s Python %s" % (module.ansible_version, sys.version.split(' ', 1)[0]) def get(self, path, data=None, headers=None, params=None): return self.send(method='GET', path=path, data=data, headers=headers, params=params) diff --git a/plugins/modules/cloud/opennebula/one_template.py b/plugins/modules/cloud/opennebula/one_template.py index 3b0b601193..b1d2c69ccf 100644 --- a/plugins/modules/cloud/opennebula/one_template.py +++ b/plugins/modules/cloud/opennebula/one_template.py @@ -213,8 +213,8 @@ class TemplateModule(OpenNebulaModule): def get_template_by_id(self, template_id): return self.get_template(lambda template: (template.ID == template_id)) - def get_template_by_name(self, template_name): - return self.get_template(lambda template: (template.NAME == template_name)) + def get_template_by_name(self, name): + return self.get_template(lambda template: (template.NAME == name)) def get_template_instance(self, requested_id, requested_name): if requested_id: diff --git a/plugins/modules/cloud/packet/packet_device.py b/plugins/modules/cloud/packet/packet_device.py index 5cc8d13e9a..5912a6f46a 100644 --- a/plugins/modules/cloud/packet/packet_device.py +++ b/plugins/modules/cloud/packet/packet_device.py @@ -509,11 +509,10 @@ def wait_for_devices_active(module, packet_conn, watched_devices): def wait_for_public_IPv(module, packet_conn, created_devices): def has_public_ip(addr_list, ip_v): - return any([a['public'] and a['address_family'] == ip_v and - a['address'] for a in addr_list]) + return any(a['public'] and a['address_family'] == ip_v and a['address'] for a in addr_list) def all_have_public_ip(ds, ip_v): - return all([has_public_ip(d.ip_addresses, ip_v) for d in ds]) + return all(has_public_ip(d.ip_addresses, ip_v) for d in ds) address_family = module.params.get('wait_for_public_IPv') diff --git a/plugins/modules/cloud/packet/packet_sshkey.py b/plugins/modules/cloud/packet/packet_sshkey.py index 57e988630e..4800718fd0 100644 --- a/plugins/modules/cloud/packet/packet_sshkey.py +++ b/plugins/modules/cloud/packet/packet_sshkey.py @@ -168,7 +168,7 @@ def get_sshkey_selector(module): return k.key == select_dict['key'] else: # if key string not specified, all the fields must match - return all([select_dict[f] == getattr(k, f) for f in select_dict]) + return all(select_dict[f] == getattr(k, f) for f in select_dict) return selector diff --git a/plugins/modules/packaging/language/maven_artifact.py b/plugins/modules/packaging/language/maven_artifact.py index 9e2f94190f..c184830580 100644 --- a/plugins/modules/packaging/language/maven_artifact.py +++ b/plugins/modules/packaging/language/maven_artifact.py @@ -565,7 +565,7 @@ class MavenDownloader: return "Cannot find %s checksum from %s" % (checksum_alg, remote_url) try: # Check if remote checksum only contains md5/sha1 or md5/sha1 + filename - _remote_checksum = remote_checksum.split(None)[0] + _remote_checksum = remote_checksum.split(None, 1)[0] remote_checksum = _remote_checksum # remote_checksum is empty so we continue and keep original checksum string # This should not happen since we check for remote_checksum before diff --git a/plugins/modules/system/launchd.py b/plugins/modules/system/launchd.py index e8d82ff318..8c09a44f6e 100644 --- a/plugins/modules/system/launchd.py +++ b/plugins/modules/system/launchd.py @@ -141,14 +141,14 @@ class Plist: self.__changed = False self.__service = service - state, pid, dummy, dummy = LaunchCtlList(module, service).run() + state, pid, dummy, dummy = LaunchCtlList(module, self.__service).run() # Check if readPlist is available or not self.old_plistlib = hasattr(plistlib, 'readPlist') - self.__file = self.__find_service_plist(service) + self.__file = self.__find_service_plist(self.__service) if self.__file is None: - msg = 'Unable to infer the path of %s service plist file' % service + msg = 'Unable to infer the path of %s service plist file' % self.__service if pid is None and state == ServiceState.UNLOADED: msg += ' and it was not found among active services' module.fail_json(msg=msg) diff --git a/plugins/modules/system/ufw.py b/plugins/modules/system/ufw.py index c6df6fe63a..465df6adb5 100644 --- a/plugins/modules/system/ufw.py +++ b/plugins/modules/system/ufw.py @@ -526,8 +526,8 @@ def main(): lines = [(numbered_line_re.match(line), '(v6)' in line) for line in numbered_state.splitlines()] lines = [(int(matcher.group(1)), ipv6) for (matcher, ipv6) in lines if matcher] last_number = max([no for (no, ipv6) in lines]) if lines else 0 - has_ipv4 = any([not ipv6 for (no, ipv6) in lines]) - has_ipv6 = any([ipv6 for (no, ipv6) in lines]) + has_ipv4 = any(not ipv6 for (no, ipv6) in lines) + has_ipv6 = any(ipv6 for (no, ipv6) in lines) if relative_to_cmd == 'first-ipv4': relative_to = 1 elif relative_to_cmd == 'last-ipv4': diff --git a/tests/unit/mock/loader.py b/tests/unit/mock/loader.py index 756d532e68..5389bdcb2f 100644 --- a/tests/unit/mock/loader.py +++ b/tests/unit/mock/loader.py @@ -32,8 +32,8 @@ class DictDataLoader(DataLoader): # TODO: the real _get_file_contents returns a bytestring, so we actually convert the # unicode/text it's created with to utf-8 - def _get_file_contents(self, path): - path = to_text(path) + def _get_file_contents(self, file_name): + path = to_text(file_name) if path in self._file_mapping: return (to_bytes(self._file_mapping[path]), False) else: From e123623f5c2566ef61c397e4c739664b17a59428 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 12 Aug 2021 13:11:02 +0200 Subject: [PATCH 0268/2828] Fix PR #. --- changelogs/fragments/3194-sanity.yml | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/changelogs/fragments/3194-sanity.yml b/changelogs/fragments/3194-sanity.yml index 095894a685..b6961556ce 100644 --- a/changelogs/fragments/3194-sanity.yml +++ b/changelogs/fragments/3194-sanity.yml @@ -1,14 +1,14 @@ bugfixes: -- "memcached cache plugin - change function argument names to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." -- "logdns callback plugin - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." -- "saltstack connection plugin - fix function signature (https://github.com/ansible-collections/community.general/pull/3199)." -- "online inventory plugin - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." -- "netapp module utils - remove always-true conditional to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." -- "online module utils - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." -- "scaleway module utils - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." -- "one_template - change function argument name to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." -- "packet_device - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." -- "packet_sshkey - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." -- "maven_artifact - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." -- "launchd - use private attribute to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." -- "ufw - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." +- "memcached cache plugin - change function argument names to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." +- "logdns callback plugin - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." +- "saltstack connection plugin - fix function signature (https://github.com/ansible-collections/community.general/pull/3194)." +- "online inventory plugin - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." +- "netapp module utils - remove always-true conditional to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." +- "online module utils - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." +- "scaleway module utils - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." +- "one_template - change function argument name to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." +- "packet_device - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." +- "packet_sshkey - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." +- "maven_artifact - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." +- "launchd - use private attribute to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." +- "ufw - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." From f7dba23e50590e99ecd97d5235151e7fc3e8490b Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 12 Aug 2021 22:14:34 +0200 Subject: [PATCH 0269/2828] Remove deprecated netapp leftovers. (#3197) --- changelogs/fragments/netapp-removal.yml | 2 + plugins/doc_fragments/_netapp.py | 138 ----- plugins/module_utils/_netapp.py | 748 ------------------------ 3 files changed, 2 insertions(+), 886 deletions(-) create mode 100644 changelogs/fragments/netapp-removal.yml delete mode 100644 plugins/doc_fragments/_netapp.py delete mode 100644 plugins/module_utils/_netapp.py diff --git a/changelogs/fragments/netapp-removal.yml b/changelogs/fragments/netapp-removal.yml new file mode 100644 index 0000000000..e515e377cd --- /dev/null +++ b/changelogs/fragments/netapp-removal.yml @@ -0,0 +1,2 @@ +removed_features: +- "Removed deprecated netapp module utils and doc fragments (https://github.com/ansible-collections/community.general/pull/3197)." diff --git a/plugins/doc_fragments/_netapp.py b/plugins/doc_fragments/_netapp.py deleted file mode 100644 index c3d0d3ba06..0000000000 --- a/plugins/doc_fragments/_netapp.py +++ /dev/null @@ -1,138 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, Sumit Kumar , chris Archibald -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - - DOCUMENTATION = r''' -options: - - See respective platform section for more details -requirements: - - See respective platform section for more details -notes: - - Ansible modules are available for the following NetApp Storage Platforms: E-Series, ONTAP, SolidFire -''' - - # Documentation fragment for ONTAP (na_cdot) - ONTAP = r''' -options: - hostname: - required: true - description: - - The hostname or IP address of the ONTAP instance. - username: - required: true - description: - - This can be a Cluster-scoped or SVM-scoped account, depending on whether a Cluster-level or SVM-level API is required. - For more information, please read the documentation U(https://mysupport.netapp.com/NOW/download/software/nmsdk/9.4/). - aliases: ['user'] - password: - required: true - description: - - Password for the specified user. - aliases: ['pass'] -requirements: - - A physical or virtual clustered Data ONTAP system. The modules were developed with Clustered Data ONTAP 8.3 - - Ansible 2.2 - - netapp-lib (2015.9.25). Install using 'pip install netapp-lib' - -notes: - - The modules prefixed with na\\_cdot are built to support the ONTAP storage platform. - -''' - - # Documentation fragment for SolidFire - SOLIDFIRE = r''' -options: - hostname: - required: true - description: - - The hostname or IP address of the SolidFire cluster. - username: - required: true - description: - - Please ensure that the user has the adequate permissions. For more information, please read the official documentation - U(https://mysupport.netapp.com/documentation/docweb/index.html?productID=62636&language=en-US). - aliases: ['user'] - password: - required: true - description: - - Password for the specified user. - aliases: ['pass'] - -requirements: - - The modules were developed with SolidFire 10.1 - - solidfire-sdk-python (1.1.0.92) or greater. Install using 'pip install solidfire-sdk-python' - -notes: - - The modules prefixed with na\\_elementsw are built to support the SolidFire storage platform. - -''' - - # Documentation fragment for ONTAP (na_ontap) - NA_ONTAP = r''' -options: - hostname: - description: - - The hostname or IP address of the ONTAP instance. - type: str - required: true - username: - description: - - This can be a Cluster-scoped or SVM-scoped account, depending on whether a Cluster-level or SVM-level API is required. - For more information, please read the documentation U(https://mysupport.netapp.com/NOW/download/software/nmsdk/9.4/). - type: str - required: true - aliases: [ user ] - password: - description: - - Password for the specified user. - type: str - required: true - aliases: [ pass ] - https: - description: - - Enable and disable https - type: bool - default: no - validate_certs: - description: - - If set to C(no), the SSL certificates will not be validated. - - This should only set to C(False) used on personally controlled sites using self-signed certificates. - type: bool - default: yes - http_port: - description: - - Override the default port (80 or 443) with this port - type: int - ontapi: - description: - - The ontap api version to use - type: int - use_rest: - description: - - REST API if supported by the target system for all the resources and attributes the module requires. Otherwise will revert to ZAPI. - - Always -- will always use the REST API - - Never -- will always use the ZAPI - - Auto -- will try to use the REST Api - default: Auto - choices: ['Never', 'Always', 'Auto'] - type: str - - -requirements: - - A physical or virtual clustered Data ONTAP system. The modules support Data ONTAP 9.1 and onward - - Ansible 2.6 - - Python2 netapp-lib (2017.10.30) or later. Install using 'pip install netapp-lib' - - Python3 netapp-lib (2018.11.13) or later. Install using 'pip install netapp-lib' - - To enable http on the cluster you must run the following commands 'set -privilege advanced;' 'system services web modify -http-enabled true;' - -notes: - - The modules prefixed with na\\_ontap are built to support the ONTAP storage platform. - -''' diff --git a/plugins/module_utils/_netapp.py b/plugins/module_utils/_netapp.py deleted file mode 100644 index 8eda53b344..0000000000 --- a/plugins/module_utils/_netapp.py +++ /dev/null @@ -1,748 +0,0 @@ -# -*- coding: utf-8 -*- -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is BSD licensed. -# Modules you write using this snippet, which is embedded dynamically by Ansible -# still belong to the author of the module, and may assign their own license -# to the complete work. -# -# Copyright (c) 2017, Sumit Kumar -# Copyright (c) 2017, Michael Price -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without modification, -# are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import json -import os -import random -import mimetypes - -from pprint import pformat -from ansible.module_utils import six -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError -from ansible.module_utils.urls import open_url -from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils.common.text.converters import to_native - -try: - from ansible.module_utils.ansible_release import __version__ as ansible_version -except ImportError: - ansible_version = 'unknown' - -try: - from netapp_lib.api.zapi import zapi - HAS_NETAPP_LIB = True -except ImportError: - HAS_NETAPP_LIB = False - -try: - import requests - HAS_REQUESTS = True -except ImportError: - HAS_REQUESTS = False - -import ssl -try: - from urlparse import urlparse, urlunparse -except ImportError: - from urllib.parse import urlparse, urlunparse - - -HAS_SF_SDK = False -SF_BYTE_MAP = dict( - # Management GUI displays 1024 ** 3 as 1.1 GB, thus use 1000. - bytes=1, - b=1, - kb=1000, - mb=1000 ** 2, - gb=1000 ** 3, - tb=1000 ** 4, - pb=1000 ** 5, - eb=1000 ** 6, - zb=1000 ** 7, - yb=1000 ** 8 -) - -POW2_BYTE_MAP = dict( - # Here, 1 kb = 1024 - bytes=1, - b=1, - kb=1024, - mb=1024 ** 2, - gb=1024 ** 3, - tb=1024 ** 4, - pb=1024 ** 5, - eb=1024 ** 6, - zb=1024 ** 7, - yb=1024 ** 8 -) - -try: - from solidfire.factory import ElementFactory - from solidfire.custom.models import TimeIntervalFrequency - from solidfire.models import Schedule, ScheduleInfo - - HAS_SF_SDK = True -except Exception: - HAS_SF_SDK = False - - -def has_netapp_lib(): - return HAS_NETAPP_LIB - - -def has_sf_sdk(): - return HAS_SF_SDK - - -def na_ontap_host_argument_spec(): - - return dict( - hostname=dict(required=True, type='str'), - username=dict(required=True, type='str', aliases=['user']), - password=dict(required=True, type='str', aliases=['pass'], no_log=True), - https=dict(required=False, type='bool', default=False), - validate_certs=dict(required=False, type='bool', default=True), - http_port=dict(required=False, type='int'), - ontapi=dict(required=False, type='int'), - use_rest=dict(required=False, type='str', default='Auto', choices=['Never', 'Always', 'Auto']) - ) - - -def ontap_sf_host_argument_spec(): - - return dict( - hostname=dict(required=True, type='str'), - username=dict(required=True, type='str', aliases=['user']), - password=dict(required=True, type='str', aliases=['pass'], no_log=True) - ) - - -def aws_cvs_host_argument_spec(): - - return dict( - api_url=dict(required=True, type='str'), - validate_certs=dict(required=False, type='bool', default=True), - api_key=dict(required=True, type='str', no_log=True), - secret_key=dict(required=True, type='str', no_log=True) - ) - - -def create_sf_connection(module, port=None): - hostname = module.params['hostname'] - username = module.params['username'] - password = module.params['password'] - - if HAS_SF_SDK and hostname and username and password: - try: - return_val = ElementFactory.create(hostname, username, password, port=port) - return return_val - except Exception: - raise Exception("Unable to create SF connection") - else: - module.fail_json(msg="the python SolidFire SDK module is required") - - -def setup_na_ontap_zapi(module, vserver=None): - hostname = module.params['hostname'] - username = module.params['username'] - password = module.params['password'] - https = module.params['https'] - validate_certs = module.params['validate_certs'] - port = module.params['http_port'] - version = module.params['ontapi'] - - if HAS_NETAPP_LIB: - # set up zapi - server = zapi.NaServer(hostname) - server.set_username(username) - server.set_password(password) - if vserver: - server.set_vserver(vserver) - if version: - minor = version - else: - minor = 110 - server.set_api_version(major=1, minor=minor) - # default is HTTP - if https: - if port is None: - port = 443 - transport_type = 'HTTPS' - # HACK to bypass certificate verification - if validate_certs is False: - if not os.environ.get('PYTHONHTTPSVERIFY', '') and getattr(ssl, '_create_unverified_context', None): - ssl._create_default_https_context = ssl._create_unverified_context - else: - if port is None: - port = 80 - transport_type = 'HTTP' - server.set_transport_type(transport_type) - server.set_port(port) - server.set_server_type('FILER') - return server - else: - module.fail_json(msg="the python NetApp-Lib module is required") - - -def setup_ontap_zapi(module, vserver=None): - hostname = module.params['hostname'] - username = module.params['username'] - password = module.params['password'] - - if HAS_NETAPP_LIB: - # set up zapi - server = zapi.NaServer(hostname) - server.set_username(username) - server.set_password(password) - if vserver: - server.set_vserver(vserver) - # Todo : Replace hard-coded values with configurable parameters. - server.set_api_version(major=1, minor=110) - server.set_port(80) - server.set_server_type('FILER') - server.set_transport_type('HTTP') - return server - else: - module.fail_json(msg="the python NetApp-Lib module is required") - - -def eseries_host_argument_spec(): - """Retrieve a base argument specification common to all NetApp E-Series modules""" - argument_spec = basic_auth_argument_spec() - argument_spec.update(dict( - api_username=dict(type='str', required=True), - api_password=dict(type='str', required=True, no_log=True), - api_url=dict(type='str', required=True), - ssid=dict(type='str', required=False, default='1'), - validate_certs=dict(type='bool', required=False, default=True) - )) - return argument_spec - - -class NetAppESeriesModule(object): - """Base class for all NetApp E-Series modules. - - Provides a set of common methods for NetApp E-Series modules, including version checking, mode (proxy, embedded) - verification, http requests, secure http redirection for embedded web services, and logging setup. - - Be sure to add the following lines in the module's documentation section: - extends_documentation_fragment: - - netapp.eseries - - :param dict(dict) ansible_options: dictionary of ansible option definitions - :param str web_services_version: minimally required web services rest api version (default value: "02.00.0000.0000") - :param bool supports_check_mode: whether the module will support the check_mode capabilities (default=False) - :param list(list) mutually_exclusive: list containing list(s) of mutually exclusive options (optional) - :param list(list) required_if: list containing list(s) containing the option, the option value, and then - a list of required options. (optional) - :param list(list) required_one_of: list containing list(s) of options for which at least one is required. (optional) - :param list(list) required_together: list containing list(s) of options that are required together. (optional) - :param bool log_requests: controls whether to log each request (default: True) - """ - DEFAULT_TIMEOUT = 60 - DEFAULT_SECURE_PORT = "8443" - DEFAULT_REST_API_PATH = "devmgr/v2/" - DEFAULT_REST_API_ABOUT_PATH = "devmgr/utils/about" - DEFAULT_HEADERS = {"Content-Type": "application/json", "Accept": "application/json", - "netapp-client-type": "Ansible-%s" % ansible_version} - HTTP_AGENT = "Ansible / %s" % ansible_version - SIZE_UNIT_MAP = dict(bytes=1, b=1, kb=1024, mb=1024**2, gb=1024**3, tb=1024**4, - pb=1024**5, eb=1024**6, zb=1024**7, yb=1024**8) - - def __init__(self, ansible_options, web_services_version=None, supports_check_mode=False, - mutually_exclusive=None, required_if=None, required_one_of=None, required_together=None, - log_requests=True): - argument_spec = eseries_host_argument_spec() - argument_spec.update(ansible_options) - - self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=supports_check_mode, - mutually_exclusive=mutually_exclusive, required_if=required_if, - required_one_of=required_one_of, required_together=required_together) - - args = self.module.params - self.web_services_version = web_services_version if web_services_version else "02.00.0000.0000" - self.ssid = args["ssid"] - self.url = args["api_url"] - self.log_requests = log_requests - self.creds = dict(url_username=args["api_username"], - url_password=args["api_password"], - validate_certs=args["validate_certs"]) - - if not self.url.endswith("/"): - self.url += "/" - - self.is_embedded_mode = None - self.is_web_services_valid_cache = None - - def _check_web_services_version(self): - """Verify proxy or embedded web services meets minimum version required for module. - - The minimum required web services version is evaluated against version supplied through the web services rest - api. AnsibleFailJson exception will be raised when the minimum is not met or exceeded. - - This helper function will update the supplied api url if secure http is not used for embedded web services - - :raise AnsibleFailJson: raised when the contacted api service does not meet the minimum required version. - """ - if not self.is_web_services_valid_cache: - - url_parts = urlparse(self.url) - if not url_parts.scheme or not url_parts.netloc: - self.module.fail_json(msg="Failed to provide valid API URL. Example: https://192.168.1.100:8443/devmgr/v2. URL [%s]." % self.url) - - if url_parts.scheme not in ["http", "https"]: - self.module.fail_json(msg="Protocol must be http or https. URL [%s]." % self.url) - - self.url = "%s://%s/" % (url_parts.scheme, url_parts.netloc) - about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH - rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, ignore_errors=True, **self.creds) - - if rc != 200: - self.module.warn("Failed to retrieve web services about information! Retrying with secure ports. Array Id [%s]." % self.ssid) - self.url = "https://%s:8443/" % url_parts.netloc.split(":")[0] - about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH - try: - rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, **self.creds) - except Exception as error: - self.module.fail_json(msg="Failed to retrieve the webservices about information! Array Id [%s]. Error [%s]." - % (self.ssid, to_native(error))) - - major, minor, other, revision = data["version"].split(".") - minimum_major, minimum_minor, other, minimum_revision = self.web_services_version.split(".") - - if not (major > minimum_major or - (major == minimum_major and minor > minimum_minor) or - (major == minimum_major and minor == minimum_minor and revision >= minimum_revision)): - self.module.fail_json(msg="Web services version does not meet minimum version required. Current version: [%s]." - " Version required: [%s]." % (data["version"], self.web_services_version)) - - self.module.log("Web services rest api version met the minimum required version.") - self.is_web_services_valid_cache = True - - def is_embedded(self): - """Determine whether web services server is the embedded web services. - - If web services about endpoint fails based on an URLError then the request will be attempted again using - secure http. - - :raise AnsibleFailJson: raised when web services about endpoint failed to be contacted. - :return bool: whether contacted web services is running from storage array (embedded) or from a proxy. - """ - self._check_web_services_version() - - if self.is_embedded_mode is None: - about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH - try: - rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, **self.creds) - self.is_embedded_mode = not data["runningAsProxy"] - except Exception as error: - self.module.fail_json(msg="Failed to retrieve the webservices about information! Array Id [%s]. Error [%s]." - % (self.ssid, to_native(error))) - - return self.is_embedded_mode - - def request(self, path, data=None, method='GET', headers=None, ignore_errors=False): - """Issue an HTTP request to a url, retrieving an optional JSON response. - - :param str path: web services rest api endpoint path (Example: storage-systems/1/graph). Note that when the - full url path is specified then that will be used without supplying the protocol, hostname, port and rest path. - :param data: data required for the request (data may be json or any python structured data) - :param str method: request method such as GET, POST, DELETE. - :param dict headers: dictionary containing request headers. - :param bool ignore_errors: forces the request to ignore any raised exceptions. - """ - self._check_web_services_version() - - if headers is None: - headers = self.DEFAULT_HEADERS - - if not isinstance(data, str) and headers["Content-Type"] == "application/json": - data = json.dumps(data) - - if path.startswith("/"): - path = path[1:] - request_url = self.url + self.DEFAULT_REST_API_PATH + path - - # if self.log_requests: - self.module.log(pformat(dict(url=request_url, data=data, method=method))) - - return request(url=request_url, data=data, method=method, headers=headers, use_proxy=True, force=False, last_mod_time=None, - timeout=self.DEFAULT_TIMEOUT, http_agent=self.HTTP_AGENT, force_basic_auth=True, ignore_errors=ignore_errors, **self.creds) - - -def create_multipart_formdata(files, fields=None, send_8kb=False): - """Create the data for a multipart/form request. - - :param list(list) files: list of lists each containing (name, filename, path). - :param list(list) fields: list of lists each containing (key, value). - :param bool send_8kb: only sends the first 8kb of the files (default: False). - """ - boundary = "---------------------------" + "".join([str(random.randint(0, 9)) for x in range(27)]) - data_parts = list() - data = None - - if six.PY2: # Generate payload for Python 2 - newline = "\r\n" - if fields is not None: - for key, value in fields: - data_parts.extend(["--%s" % boundary, - 'Content-Disposition: form-data; name="%s"' % key, - "", - value]) - - for name, filename, path in files: - with open(path, "rb") as fh: - value = fh.read(8192) if send_8kb else fh.read() - - data_parts.extend(["--%s" % boundary, - 'Content-Disposition: form-data; name="%s"; filename="%s"' % (name, filename), - "Content-Type: %s" % (mimetypes.guess_type(path)[0] or "application/octet-stream"), - "", - value]) - data_parts.extend(["--%s--" % boundary, ""]) - data = newline.join(data_parts) - - else: - newline = six.b("\r\n") - if fields is not None: - for key, value in fields: - data_parts.extend([six.b("--%s" % boundary), - six.b('Content-Disposition: form-data; name="%s"' % key), - six.b(""), - six.b(value)]) - - for name, filename, path in files: - with open(path, "rb") as fh: - value = fh.read(8192) if send_8kb else fh.read() - - data_parts.extend([six.b("--%s" % boundary), - six.b('Content-Disposition: form-data; name="%s"; filename="%s"' % (name, filename)), - six.b("Content-Type: %s" % (mimetypes.guess_type(path)[0] or "application/octet-stream")), - six.b(""), - value]) - data_parts.extend([six.b("--%s--" % boundary), b""]) - data = newline.join(data_parts) - - headers = { - "Content-Type": "multipart/form-data; boundary=%s" % boundary, - "Content-Length": str(len(data))} - - return headers, data - - -def request(url, data=None, headers=None, method='GET', use_proxy=True, - force=False, last_mod_time=None, timeout=10, validate_certs=True, - url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): - """Issue an HTTP request to a url, retrieving an optional JSON response.""" - - if headers is None: - headers = {"Content-Type": "application/json", "Accept": "application/json"} - headers.update({"netapp-client-type": "Ansible-%s" % ansible_version}) - - if not http_agent: - http_agent = "Ansible / %s" % ansible_version - - try: - r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, - force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, - url_username=url_username, url_password=url_password, http_agent=http_agent, - force_basic_auth=force_basic_auth) - except HTTPError as err: - r = err.fp - - try: - raw_data = r.read() - if raw_data: - data = json.loads(raw_data) - else: - raw_data = None - except Exception: - if ignore_errors: - pass - else: - raise Exception(raw_data) - - resp_code = r.getcode() - - if resp_code >= 400 and not ignore_errors: - raise Exception(resp_code, data) - else: - return resp_code, data - - -def ems_log_event(source, server, name="Ansible", id="12345", version=ansible_version, - category="Information", event="setup", autosupport="false"): - ems_log = zapi.NaElement('ems-autosupport-log') - # Host name invoking the API. - ems_log.add_new_child("computer-name", name) - # ID of event. A user defined event-id, range [0..2^32-2]. - ems_log.add_new_child("event-id", id) - # Name of the application invoking the API. - ems_log.add_new_child("event-source", source) - # Version of application invoking the API. - ems_log.add_new_child("app-version", version) - # Application defined category of the event. - ems_log.add_new_child("category", category) - # Description of event to log. An application defined message to log. - ems_log.add_new_child("event-description", event) - ems_log.add_new_child("log-level", "6") - ems_log.add_new_child("auto-support", autosupport) - server.invoke_successfully(ems_log, True) - - -def get_cserver_zapi(server): - vserver_info = zapi.NaElement('vserver-get-iter') - query_details = zapi.NaElement.create_node_with_children('vserver-info', **{'vserver-type': 'admin'}) - query = zapi.NaElement('query') - query.add_child_elem(query_details) - vserver_info.add_child_elem(query) - result = server.invoke_successfully(vserver_info, - enable_tunneling=False) - attribute_list = result.get_child_by_name('attributes-list') - vserver_list = attribute_list.get_child_by_name('vserver-info') - return vserver_list.get_child_content('vserver-name') - - -def get_cserver(connection, is_rest=False): - if not is_rest: - return get_cserver_zapi(connection) - - params = {'fields': 'type'} - api = "private/cli/vserver" - json, error = connection.get(api, params) - if json is None or error is not None: - # exit if there is an error or no data - return None - vservers = json.get('records') - if vservers is not None: - for vserver in vservers: - if vserver['type'] == 'admin': # cluster admin - return vserver['vserver'] - if len(vservers) == 1: # assume vserver admin - return vservers[0]['vserver'] - - return None - - -class OntapRestAPI(object): - def __init__(self, module, timeout=60): - self.module = module - self.username = self.module.params['username'] - self.password = self.module.params['password'] - self.hostname = self.module.params['hostname'] - self.use_rest = self.module.params['use_rest'] - self.verify = self.module.params['validate_certs'] - self.timeout = timeout - self.url = 'https://' + self.hostname + '/api/' - self.errors = list() - self.debug_logs = list() - self.check_required_library() - - def check_required_library(self): - if not HAS_REQUESTS: - self.module.fail_json(msg=missing_required_lib('requests')) - - def send_request(self, method, api, params, json=None, return_status_code=False): - ''' send http request and process reponse, including error conditions ''' - url = self.url + api - status_code = None - content = None - json_dict = None - json_error = None - error_details = None - - def get_json(response): - ''' extract json, and error message if present ''' - try: - json = response.json() - except ValueError: - return None, None - error = json.get('error') - return json, error - - try: - response = requests.request(method, url, verify=self.verify, auth=(self.username, self.password), params=params, timeout=self.timeout, json=json) - content = response.content # for debug purposes - status_code = response.status_code - # If the response was successful, no Exception will be raised - response.raise_for_status() - json_dict, json_error = get_json(response) - except requests.exceptions.HTTPError as err: - __, json_error = get_json(response) - if json_error is None: - self.log_error(status_code, 'HTTP error: %s' % err) - error_details = str(err) - # If an error was reported in the json payload, it is handled below - except requests.exceptions.ConnectionError as err: - self.log_error(status_code, 'Connection error: %s' % err) - error_details = str(err) - except Exception as err: - self.log_error(status_code, 'Other error: %s' % err) - error_details = str(err) - if json_error is not None: - self.log_error(status_code, 'Endpoint error: %d: %s' % (status_code, json_error)) - error_details = json_error - self.log_debug(status_code, content) - if return_status_code: - return status_code, error_details - return json_dict, error_details - - def get(self, api, params): - method = 'GET' - return self.send_request(method, api, params) - - def post(self, api, data, params=None): - method = 'POST' - return self.send_request(method, api, params, json=data) - - def patch(self, api, data, params=None): - method = 'PATCH' - return self.send_request(method, api, params, json=data) - - def delete(self, api, data, params=None): - method = 'DELETE' - return self.send_request(method, api, params, json=data) - - def _is_rest(self, used_unsupported_rest_properties=None): - if self.use_rest == "Always": - if used_unsupported_rest_properties: - error = "REST API currently does not support '%s'" % \ - ', '.join(used_unsupported_rest_properties) - return True, error - else: - return True, None - if self.use_rest == 'Never' or used_unsupported_rest_properties: - # force ZAPI if requested or if some parameter requires it - return False, None - method = 'HEAD' - api = 'cluster/software' - status_code, __ = self.send_request(method, api, params=None, return_status_code=True) - if status_code == 200: - return True, None - return False, None - - def is_rest(self, used_unsupported_rest_properties=None): - ''' only return error if there is a reason to ''' - use_rest, error = self._is_rest(used_unsupported_rest_properties) - if used_unsupported_rest_properties is None: - return use_rest - return use_rest, error - - def log_error(self, status_code, message): - self.errors.append(message) - self.debug_logs.append((status_code, message)) - - def log_debug(self, status_code, content): - self.debug_logs.append((status_code, content)) - - -class AwsCvsRestAPI(object): - def __init__(self, module, timeout=60): - self.module = module - self.api_key = self.module.params['api_key'] - self.secret_key = self.module.params['secret_key'] - self.api_url = self.module.params['api_url'] - self.verify = self.module.params['validate_certs'] - self.timeout = timeout - self.url = 'https://' + self.api_url + '/v1/' - self.check_required_library() - - def check_required_library(self): - if not HAS_REQUESTS: - self.module.fail_json(msg=missing_required_lib('requests')) - - def send_request(self, method, api, params, json=None): - ''' send http request and process reponse, including error conditions ''' - url = self.url + api - status_code = None - content = None - json_dict = None - json_error = None - error_details = None - headers = { - 'Content-type': "application/json", - 'api-key': self.api_key, - 'secret-key': self.secret_key, - 'Cache-Control': "no-cache", - } - - def get_json(response): - ''' extract json, and error message if present ''' - try: - json = response.json() - - except ValueError: - return None, None - success_code = [200, 201, 202] - if response.status_code not in success_code: - error = json.get('message') - else: - error = None - return json, error - try: - response = requests.request(method, url, headers=headers, timeout=self.timeout, json=json) - status_code = response.status_code - # If the response was successful, no Exception will be raised - json_dict, json_error = get_json(response) - except requests.exceptions.HTTPError as err: - __, json_error = get_json(response) - if json_error is None: - error_details = str(err) - except requests.exceptions.ConnectionError as err: - error_details = str(err) - except Exception as err: - error_details = str(err) - if json_error is not None: - error_details = json_error - - return json_dict, error_details - - # If an error was reported in the json payload, it is handled below - def get(self, api, params=None): - method = 'GET' - return self.send_request(method, api, params) - - def post(self, api, data, params=None): - method = 'POST' - return self.send_request(method, api, params, json=data) - - def patch(self, api, data, params=None): - method = 'PATCH' - return self.send_request(method, api, params, json=data) - - def put(self, api, data, params=None): - method = 'PUT' - return self.send_request(method, api, params, json=data) - - def delete(self, api, data, params=None): - method = 'DELETE' - return self.send_request(method, api, params, json=data) - - def get_state(self, jobId): - """ Method to get the state of the job """ - method = 'GET' - response, status_code = self.get('Jobs/%s' % jobId) - while str(response['state']) not in 'done': - response, status_code = self.get('Jobs/%s' % jobId) - return 'done' From 25267b80941f1b41c280ed2d6c1ec8162fb8a62b Mon Sep 17 00:00:00 2001 From: Daniel Ziegenberg Date: Sun, 15 Aug 2021 12:59:50 +0200 Subject: [PATCH 0270/2828] ini_file: add multiple options with same name to ini file (#3033) * ini_file - prepare for fixing #273 - restructure tests - fix error message call: fail_json() takes 1 positional argument but 2 were given * ini_file - multiple values for one option (#273) - add module option 'exclusive' (boolean) for the abbility to add single option=value entries without overwriting existing options with the same name but different values - add abbility to define multiple options with the same name but different values * ini_file - add more tests for ini_file * ini_file - fix sanity tests * apply suggested changes: - rename 03-regressions.yml to 03-encoding.yml - fix typos - fix documentation * apply suggested changes: - test errors also for result is failed * apply suggested changes: - make state=absent also work with module option exclusive - add more tests for state=absent and module option exclusive * fix sanity test: - 02-values.yml:251:9: hyphens: too many spaces after hyphen * apply proposed changes * apply proposed changes from review - adjust version_added to 3.6.0 - small syntax change in changelog fragment --- ...ple_options_with_same_name_to_ini_file.yml | 3 + plugins/modules/files/ini_file.py | 265 +++-- .../targets/ini_file/tasks/main.yml | 552 +-------- .../targets/ini_file/tasks/tests/00-basic.yml | 38 + .../targets/ini_file/tasks/tests/01-value.yml | 589 ++++++++++ .../ini_file/tasks/tests/02-values.yml | 1013 +++++++++++++++++ .../ini_file/tasks/tests/03-encoding.yml | 41 + 7 files changed, 1902 insertions(+), 599 deletions(-) create mode 100644 changelogs/fragments/273-add_multiple_options_with_same_name_to_ini_file.yml create mode 100644 tests/integration/targets/ini_file/tasks/tests/00-basic.yml create mode 100644 tests/integration/targets/ini_file/tasks/tests/01-value.yml create mode 100644 tests/integration/targets/ini_file/tasks/tests/02-values.yml create mode 100644 tests/integration/targets/ini_file/tasks/tests/03-encoding.yml diff --git a/changelogs/fragments/273-add_multiple_options_with_same_name_to_ini_file.yml b/changelogs/fragments/273-add_multiple_options_with_same_name_to_ini_file.yml new file mode 100644 index 0000000000..f32dc305b5 --- /dev/null +++ b/changelogs/fragments/273-add_multiple_options_with_same_name_to_ini_file.yml @@ -0,0 +1,3 @@ +minor_changes: + - ini_file - add module option ``exclusive`` (boolean) for the ability to add/remove single ``option=value`` entries without overwriting existing options with the same name but different values (https://github.com/ansible-collections/community.general/pull/3033). + - ini_file - add abbility to define multiple options with the same name but different values (https://github.com/ansible-collections/community.general/issues/273, https://github.com/ansible-collections/community.general/issues/1204). diff --git a/plugins/modules/files/ini_file.py b/plugins/modules/files/ini_file.py index a9c2e290b0..f25cc063ff 100644 --- a/plugins/modules/files/ini_file.py +++ b/plugins/modules/files/ini_file.py @@ -47,7 +47,18 @@ options: description: - The string value to be associated with an I(option). - May be omitted when removing an I(option). + - Mutually exclusive with I(values). + - I(value=v) is equivalent to I(values=[v]). type: str + values: + description: + - The string value to be associated with an I(option). + - May be omitted when removing an I(option). + - Mutually exclusive with I(value). + - I(value=v) is equivalent to I(values=[v]). + type: list + elements: str + version_added: 3.6.0 backup: description: - Create a backup file including the timestamp information so you can get @@ -56,10 +67,25 @@ options: default: no state: description: - - If set to C(absent) the option or section will be removed if present instead of created. + - If set to C(absent) and I(exclusive) set to C(yes) all matching I(option) lines are removed. + - If set to C(absent) and I(exclusive) set to C(no) the specified C(option=value) lines are removed, + but the other I(option)s with the same name are not touched. + - If set to C(present) and I(exclusive) set to C(no) the specified C(option=values) lines are added, + but the other I(option)s with the same name are not touched. + - If set to C(present) and I(exclusive) set to C(yes) all given C(option=values) lines will be + added and the other I(option)s with the same name are removed. type: str choices: [ absent, present ] default: present + exclusive: + description: + - If set to C(yes) (default), all matching I(option) lines are removed when I(state=absent), + or replaced when I(state=present). + - If set to C(no), only the specified I(value(s)) are added when I(state=present), + or removed when I(state=absent), and existing ones are not modified. + type: bool + default: yes + version_added: 3.6.0 no_extra_spaces: description: - Do not insert spaces before and after '=' symbol. @@ -103,6 +129,27 @@ EXAMPLES = r''' option: temperature value: cold backup: yes + +- name: Add "beverage=lemon juice" is in section "[drinks]" in specified file + community.general.ini_file: + path: /etc/conf + section: drinks + option: beverage + value: lemon juice + mode: '0600' + state: present + exclusive: no + +- name: Ensure multiple values "beverage=coke" and "beverage=pepsi" are in section "[drinks]" in specified file + community.general.ini_file: + path: /etc/conf + section: drinks + option: beverage + values: + - coke + - pepsi + mode: '0600' + state: present ''' import io @@ -117,24 +164,37 @@ from ansible.module_utils.common.text.converters import to_bytes, to_text def match_opt(option, line): option = re.escape(option) - return re.match('[#;]?( |\t)*%s( |\t)*(=|$)' % option, line) + return re.match('[#;]?( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line) def match_active_opt(option, line): option = re.escape(option) - return re.match('( |\t)*%s( |\t)*(=|$)' % option, line) + return re.match('( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line) -def do_ini(module, filename, section=None, option=None, value=None, - state='present', backup=False, no_extra_spaces=False, create=True, - allow_no_value=False): +def update_section_line(changed, section_lines, index, changed_lines, newline, msg): + option_changed = section_lines[index] != newline + changed = changed or option_changed + if option_changed: + msg = 'option changed' + section_lines[index] = newline + changed_lines[index] = 1 + return (changed, msg) + + +def do_ini(module, filename, section=None, option=None, values=None, + state='present', exclusive=True, backup=False, no_extra_spaces=False, + create=True, allow_no_value=False): if section is not None: section = to_text(section) if option is not None: option = to_text(option) - if value is not None: - value = to_text(value) + + # deduplicate entries in values + values_unique = [] + [values_unique.append(to_text(value)) for value in values if value not in values_unique and value is not None] + values = values_unique diff = dict( before='', @@ -145,7 +205,7 @@ def do_ini(module, filename, section=None, option=None, value=None, if not os.path.exists(filename): if not create: - module.fail_json(rc=257, msg='Destination %s does not exist !' % filename) + module.fail_json(rc=257, msg='Destination %s does not exist!' % filename) destpath = os.path.dirname(filename) if not os.path.exists(destpath) and not module.check_mode: os.makedirs(destpath) @@ -185,74 +245,134 @@ def do_ini(module, filename, section=None, option=None, value=None, section = fake_section_name within_section = not section - section_start = 0 + section_start = section_end = 0 msg = 'OK' if no_extra_spaces: assignment_format = u'%s=%s\n' else: assignment_format = u'%s = %s\n' + option_no_value_present = False + non_blank_non_comment_pattern = re.compile(to_text(r'^[ \t]*([#;].*)?$')) + before = after = [] + section_lines = [] + for index, line in enumerate(ini_lines): + # find start and end of section if line.startswith(u'[%s]' % section): within_section = True section_start = index elif line.startswith(u'['): if within_section: - if state == 'present': - # insert missing option line at the end of the section - for i in range(index, 0, -1): - # search backwards for previous non-blank or non-comment line - if not non_blank_non_comment_pattern.match(ini_lines[i - 1]): - if option and value is not None: - ini_lines.insert(i, assignment_format % (option, value)) - msg = 'option added' - changed = True - elif option and value is None and allow_no_value: - ini_lines.insert(i, '%s\n' % option) - msg = 'option added' - changed = True - break - elif state == 'absent' and not option: - # remove the entire section - del ini_lines[section_start:index] - msg = 'section removed' + section_end = index + break + + before = ini_lines[0:section_start] + section_lines = ini_lines[section_start:section_end] + after = ini_lines[section_end:len(ini_lines)] + + # Keep track of changed section_lines + changed_lines = [0] * len(section_lines) + + # handling multiple instances of option=value when state is 'present' with/without exclusive is a bit complex + # + # 1. edit all lines where we have a option=value pair with a matching value in values[] + # 2. edit all the remaing lines where we have a matching option + # 3. delete remaining lines where we have a matching option + # 4. insert missing option line(s) at the end of the section + + if state == 'present' and option: + for index, line in enumerate(section_lines): + if match_opt(option, line): + match = match_opt(option, line) + if values and match.group(6) in values: + matched_value = match.group(6) + if not matched_value and allow_no_value: + # replace existing option with no value line(s) + newline = u'%s\n' % option + option_no_value_present = True + else: + # replace existing option=value line(s) + newline = assignment_format % (option, matched_value) + (changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg) + values.remove(matched_value) + elif not values and allow_no_value: + # replace existing option with no value line(s) + newline = u'%s\n' % option + (changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg) + option_no_value_present = True + break + + if state == 'present' and exclusive and not allow_no_value: + # override option with no value to option with value if not allow_no_value + if len(values) > 0: + for index, line in enumerate(section_lines): + if not changed_lines[index] and match_active_opt(option, section_lines[index]): + newline = assignment_format % (option, values.pop(0)) + (changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg) + if len(values) == 0: + break + # remove all remaining option occurrences from the rest of the section + for index in range(len(section_lines) - 1, 0, -1): + if not changed_lines[index] and match_active_opt(option, section_lines[index]): + del section_lines[index] + del changed_lines[index] + changed = True + msg = 'option changed' + + if state == 'present': + # insert missing option line(s) at the end of the section + for index in range(len(section_lines), 0, -1): + # search backwards for previous non-blank or non-comment line + if not non_blank_non_comment_pattern.match(section_lines[index - 1]): + if option and values: + # insert option line(s) + for element in values[::-1]: + # items are added backwards, so traverse the list backwards to not confuse the user + # otherwise some of their options might appear in reverse order for whatever fancy reason ¯\_(ツ)_/¯ + if element is not None: + # insert option=value line + section_lines.insert(index, assignment_format % (option, element)) + msg = 'option added' + changed = True + elif element is None and allow_no_value: + # insert option with no value line + section_lines.insert(index, u'%s\n' % option) + msg = 'option added' + changed = True + elif option and not values and allow_no_value and not option_no_value_present: + # insert option with no value line(s) + section_lines.insert(index, u'%s\n' % option) + msg = 'option added' changed = True break + + if state == 'absent': + if option: + if exclusive: + # delete all option line(s) with given option and ignore value + new_section_lines = [line for line in section_lines if not (match_active_opt(option, line))] + if section_lines != new_section_lines: + changed = True + msg = 'option changed' + section_lines = new_section_lines + elif not exclusive and len(values) > 0: + # delete specified option=value line(s) + new_section_lines = [i for i in section_lines if not (match_active_opt(option, i) and match_active_opt(option, i).group(6) in values)] + if section_lines != new_section_lines: + changed = True + msg = 'option changed' + section_lines = new_section_lines else: - if within_section and option: - if state == 'present': - # change the existing option line - if match_opt(option, line): - if value is None and allow_no_value: - newline = u'%s\n' % option - else: - newline = assignment_format % (option, value) - option_changed = ini_lines[index] != newline - changed = changed or option_changed - if option_changed: - msg = 'option changed' - ini_lines[index] = newline - if option_changed: - # remove all possible option occurrences from the rest of the section - index = index + 1 - while index < len(ini_lines): - line = ini_lines[index] - if line.startswith(u'['): - break - if match_active_opt(option, line): - del ini_lines[index] - else: - index = index + 1 - break - elif state == 'absent': - # delete the existing line - if match_active_opt(option, line): - del ini_lines[index] - changed = True - msg = 'option changed' - break + # drop the entire section + section_lines = [] + msg = 'section removed' + changed = True + + # reassemble the ini_lines after manipulation + ini_lines = before + section_lines + after # remove the fake section line del ini_lines[0] @@ -261,9 +381,10 @@ def do_ini(module, filename, section=None, option=None, value=None, if not within_section and state == 'present': ini_lines.append(u'[%s]\n' % section) msg = 'section and option added' - if option and value is not None: - ini_lines.append(assignment_format % (option, value)) - elif option and value is None and allow_no_value: + if option and values: + for value in values: + ini_lines.append(assignment_format % (option, value)) + elif option and not values and allow_no_value: ini_lines.append(u'%s\n' % option) else: msg = 'only section added' @@ -303,12 +424,17 @@ def main(): section=dict(type='str', required=True), option=dict(type='str'), value=dict(type='str'), + values=dict(type='list', elements='str'), backup=dict(type='bool', default=False), state=dict(type='str', default='present', choices=['absent', 'present']), + exclusive=dict(type='bool', default=True), no_extra_spaces=dict(type='bool', default=False), allow_no_value=dict(type='bool', default=False), create=dict(type='bool', default=True) ), + mutually_exclusive=[ + ['value', 'values'] + ], add_file_common_args=True, supports_check_mode=True, ) @@ -317,16 +443,23 @@ def main(): section = module.params['section'] option = module.params['option'] value = module.params['value'] + values = module.params['values'] state = module.params['state'] + exclusive = module.params['exclusive'] backup = module.params['backup'] no_extra_spaces = module.params['no_extra_spaces'] allow_no_value = module.params['allow_no_value'] create = module.params['create'] - if state == 'present' and not allow_no_value and value is None: - module.fail_json("Parameter 'value' must be defined if state=present and allow_no_value=False") + if state == 'present' and not allow_no_value and value is None and not values: + module.fail_json(msg="Parameter 'value(s)' must be defined if state=present and allow_no_value=False.") - (changed, backup_file, diff, msg) = do_ini(module, path, section, option, value, state, backup, no_extra_spaces, create, allow_no_value) + if value is not None: + values = [value] + elif values is None: + values = [] + + (changed, backup_file, diff, msg) = do_ini(module, path, section, option, values, state, exclusive, backup, no_extra_spaces, create, allow_no_value) if not module.check_mode and os.path.exists(path): file_args = module.load_file_common_arguments(module.params) diff --git a/tests/integration/targets/ini_file/tasks/main.yml b/tests/integration/targets/ini_file/tasks/main.yml index 96c6771b9e..b3a1c85531 100644 --- a/tests/integration/targets/ini_file/tasks/main.yml +++ b/tests/integration/targets/ini_file/tasks/main.yml @@ -23,545 +23,31 @@ # along with Ansible. If not, see . - name: record the output directory - set_fact: output_file={{ remote_tmp_dir }}/foo.ini - -- name: add "fav=lemonade" is in section "[drinks]" in specified file - ini_file: - path: "{{ output_file }}" - section: drinks - option: fav - value: lemonade - register: result1 - -- name: verify ini_file 'changed' is true - assert: - that: - - result1 is changed - - result1.msg == 'section and option added' - -- name: read content from output file - slurp: - src: "{{ output_file }}" - register: output_content - -- name: set expected content and get current ini file content set_fact: - expected1: | + output_file: "{{ remote_tmp_dir }}/foo.ini" + non_existing_file: "{{ remote_tmp_dir }}/bar.ini" - [drinks] - fav = lemonade - content1: "{{ output_content.content | b64decode }}" - -- name: Verify content of ini file is as expected - assert: - that: - - content1 == expected1 - -- name: add "fav=lemonade" is in section "[drinks]" again - ini_file: - path: "{{ output_file }}" - section: drinks - option: fav - value: lemonade - register: result2 - -- name: Ensure unchanged - assert: - that: - - result2 is not changed - - result2.msg == 'OK' - -- name: Ensure "beverage=coke" is in section "[drinks]" - ini_file: - path: "{{ output_file }}" - section: drinks - option: beverage - value: coke - register: result3 - -- name: read content from output file - slurp: - src: "{{ output_file }}" - register: output_content - -- name: set expected content and get current ini file content - set_fact: - expected3: | - - [drinks] - fav = lemonade - beverage = coke - content3: "{{ output_content.content | b64decode }}" - -- name: assert 'changed' is true and content is OK - assert: - that: - - result3 is changed - - result3.msg == 'option added' - - content3 == expected3 - -- name: Remove option "beverage=coke" - ini_file: - path: "{{ output_file }}" - section: drinks - option: beverage - state: absent - register: result4 - -- name: read content from output file - slurp: - src: "{{ output_file }}" - register: output_content - -- name: get ini file content - set_fact: - content4: "{{ output_content.content | b64decode }}" - -- name: assert changed and content is as expected - assert: - that: - - result4 is changed - - result4.msg == 'option changed' - - content4 == expected1 - -- name: remove section 'drinks' - ini_file: - path: "{{ output_file }}" - section: drinks - state: absent - register: result5 - -- name: read content from output file - slurp: - src: "{{ output_file }}" - register: output_content - -- name: get current ini file content - set_fact: - content5: "{{ output_content.content | b64decode }}" - -- name: assert changed and content is empty - assert: - that: - - result5 is changed - - result5.msg == 'section removed' - - content5 == "\n" - -# allow_no_value - -- name: test allow_no_value - ini_file: - path: "{{ output_file }}" - section: mysqld - option: skip-name - allow_no_value: yes - register: result6 - -- name: assert section and option added - assert: - that: - - result6 is changed - - result6.msg == 'section and option added' - -- name: test allow_no_value idempotency - ini_file: - path: "{{ output_file }}" - section: mysqld - option: skip-name - allow_no_value: yes - register: result6 - -- name: assert 'changed' false - assert: - that: - - result6 is not changed - - result6.msg == 'OK' - -- name: test create empty section - ini_file: - path: "{{ output_file }}" - section: new_empty_section - allow_no_value: yes - register: result6a - -- name: assert section added - assert: - that: - - result6a is changed - - result6a.msg == 'only section added' - -- name: test create empty section idempotency - ini_file: - path: "{{ output_file }}" - section: new_empty_section - allow_no_value: yes - register: result6a - -- name: assert 'changed' false - assert: - that: - - result6a is not changed - - result6a.msg == 'OK' - -- name: test remove empty section - ini_file: - state: absent - path: "{{ output_file }}" - section: new_empty_section - allow_no_value: yes - -- name: test allow_no_value with loop - ini_file: - path: "{{ output_file }}" - section: mysqld - option: "{{ item.o }}" - value: "{{ item.v | d(omit) }}" - allow_no_value: yes - with_items: - - { o: "skip-name-resolve" } - - { o: "max_connections", v: "500" } - -- name: read content from output file - slurp: - src: "{{ output_file }}" - register: output_content - -- name: set expected content and get current ini file content - set_fact: - content7: "{{ output_content.content | b64decode }}" - expected7: | - - [mysqld] - skip-name - skip-name-resolve - max_connections = 500 - -- name: Verify content of ini file is as expected - assert: - that: - - content7 == expected7 - -- name: change option with no value to option with value - ini_file: - path: "{{ output_file }}" - section: mysqld - option: skip-name - value: myvalue - register: result8 - -- name: read content from output file - slurp: - src: "{{ output_file }}" - register: output_content - -- name: set expected content and get current ini file content - set_fact: - content8: "{{ output_content.content | b64decode }}" - expected8: | - - [mysqld] - skip-name = myvalue - skip-name-resolve - max_connections = 500 - -- name: assert 'changed' and msg 'option changed' and content is as expected - assert: - that: - - result8 is changed - - result8.msg == 'option changed' - - content8 == expected8 - -- name: change option with value to option with no value - ini_file: - path: "{{ output_file }}" - section: mysqld - option: skip-name - allow_no_value: yes - register: result9 - -- name: read content from output file - slurp: - src: "{{ output_file }}" - register: output_content - -- name: set expected content and get current ini file content - set_fact: - content9: "{{ output_content.content | b64decode }}" - expected9: | - - [mysqld] - skip-name - skip-name-resolve - max_connections = 500 - -- name: assert 'changed' and msg 'option changed' and content is as expected - assert: - that: - - result9 is changed - - result9.msg == 'option changed' - - content9 == expected9 - -- name: Remove option with no value - ini_file: - path: "{{ output_file }}" - section: mysqld - option: skip-name-resolve - state: absent - register: result10 - -- name: read content from output file - slurp: - src: "{{ output_file }}" - register: output_content - -- name: set expected content and get current ini file content - set_fact: - content10: "{{ output_content.content | b64decode }}" - expected10: | - - [mysqld] - skip-name - max_connections = 500 - -- name: assert 'changed' and msg 'option changed' and content is as expected - assert: - that: - - result10 is changed - - result10.msg == 'option changed' - - content10 == expected10 - -- name: Clean test file - copy: - content: "" - dest: "{{ output_file }}" - force: yes - -- name: Ensure "beverage=coke" is created within no section - ini_file: - section: - path: "{{ output_file }}" - option: beverage - value: coke - register: result11 - -- name: read content from output file - slurp: - src: "{{ output_file }}" - register: output_content - -- name: set expected content and get current ini file content - set_fact: - expected11: "beverage = coke\n\n" - content11: "{{ output_content.content | b64decode }}" - -- name: assert 'changed' is true and content is OK (no section) - assert: - that: - - result11 is changed - - result11.msg == 'option added' - - content11 == expected11 - -- name: Ensure "beverage=coke" is modified as "beverage=water" within no section - ini_file: - path: "{{ output_file }}" - option: beverage - value: water - section: - register: result12 - -- name: read content from output file - slurp: - src: "{{ output_file }}" - register: output_content - -- name: set expected content and get current ini file content - set_fact: - expected12: "beverage = water\n\n" - - content12: "{{ output_content.content | b64decode }}" - -- name: assert 'changed' is true and content is OK (no section) - assert: - that: - - result12 is changed - - result12.msg == 'option changed' - - content12 == expected12 - -- name: remove option 'beverage' within no section - ini_file: - section: - path: "{{ output_file }}" - option: beverage - state: absent - register: result13 - -- name: read content from output file - slurp: - src: "{{ output_file }}" - register: output_content - -- name: get current ini file content - set_fact: - content13: "{{ output_content.content | b64decode }}" - -- name: assert changed (no section) - assert: - that: - - result13 is changed - - result13.msg == 'option changed' - - content13 == "\n" - -- name: Check add option without section before existing section +- name: include tasks block: - - name: Add option with section - ini_file: + + - name: include tasks to perform basic tests + include_tasks: tests/00-basic.yml + + - name: reset output file + file: path: "{{ output_file }}" - section: drinks - option: beverage - value: water - - name: Add option without section - ini_file: - path: "{{ output_file }}" - section: - option: like - value: tea - -- name: read content from output file - slurp: - src: "{{ output_file }}" - register: output_content - -- name: set expected content and get current ini file content - set_fact: - expected14: | - like = tea - - [drinks] - beverage = water - content14: "{{ output_content.content | b64decode }}" - -- name: Verify content of ini file is as expected - assert: - that: - - content14 == expected14 - -- name: Check add option with empty string value - block: - - name: Remove drinks - ini_file: - path: "{{ output_file }}" - section: drinks - state: absent - - name: Remove tea - ini_file: - path: "{{ output_file }}" - section: - option: like - value: tea state: absent - # See https://github.com/ansible-collections/community.general/issues/3031 - - name: Tests with empty strings - ini_file: + - name: include tasks to perform tests with parameter "value" + include_tasks: tests/01-value.yml + + - name: reset output file + file: path: "{{ output_file }}" - section: "{{ item.section | d('extensions') }}" - option: "{{ item.option }}" - value: "" - allow_no_value: "{{ item.no_value | d(omit) }}" - loop: - - option: evolve - - option: regress - - section: foobar - option: foo - no_value: true - - option: improve - no_value: true + state: absent -- name: read content from output file - slurp: - src: "{{ output_file }}" - register: output_content + - name: include tasks to perform tests with parameter "values" + include_tasks: tests/02-values.yml -- name: set expected content and get current ini file content - set_fact: - expected15: "\n[extensions]\nevolve = \nregress = \nimprove = \n[foobar]\nfoo = \n" - content15: "{{ output_content.content | b64decode }}" -- debug: var=content15 -- name: Verify content of ini file is as expected - assert: - that: - - content15 == expected15 - -- name: Create starting ini file - copy: - # The content below is the following text file with BOM: - # [section1] - # var1=aaa - # var2=bbb - # [section2] - # var3=ccc - content: !!binary | - 77u/W3NlY3Rpb24xXQp2YXIxPWFhYQp2YXIyPWJiYgpbc2VjdGlvbjJdCnZhcjM9Y2NjCg== - dest: "{{ output_file }}" -- name: Test ini breakage - ini_file: - path: "{{ output_file }}" - section: section1 - option: var4 - value: 0 - -- name: read content from output file - slurp: - src: "{{ output_file }}" - register: output_content - -- name: set expected content and get current ini file content - set_fact: - expected16: "[section1]\nvar1=aaa\nvar2=bbb\nvar4 = 0\n[section2]\nvar3=ccc\n" - content16: "{{ output_content.content | b64decode }}" -- debug: - var: content16 -- name: Verify content of ini file is as expected - assert: - that: - - content16 == expected16 - -# Regression test for https://github.com/ansible-collections/community.general/pull/2578#issuecomment-868092282 -- name: Create UTF-8 test file - copy: - content: !!binary | - W2FwcDptYWluXQphdmFpbGFibGVfbGFuZ3VhZ2VzID0gZW4gZnIgZXMgZGUgcHQgamEgbHQgemhf - VFcgaWQgZGEgcHRfQlIgcnUgc2wgaXQgbmxfTkwgdWsgdGEgc2kgY3MgbmIgaHUKIyBGdWxsIGxh - bmd1YWdlIG5hbWVzIGluIG5hdGl2ZSBsYW5ndWFnZSAoY29tbWEgc2VwYXJhdGVkKQphdmFpbGFi - bGVfbGFuZ3VhZ2VzX2Z1bGwgPSBFbmdsaXNoLCBGcmFuw6dhaXMsIEVzcGHDsW9sLCBEZXV0c2No - LCBQb3J0dWd1w6pzLCDml6XmnKzoqp4sIExpZXR1dm9zLCDkuK3mlocsIEluZG9uZXNpYSwgRGFu - c2ssIFBvcnR1Z3XDqnMgKEJyYXNpbCksINCg0YPRgdGB0LrQuNC5LCBTbG92ZW7FocSNaW5hLCBJ - dGFsaWFubywgTmVkZXJsYW5kcywg0KPQutGA0LDRl9C90YHRjNC60LAsIOCupOCuruCuv+CutOCv - jSwg4LeD4LeS4LaC4LeE4La9LCDEjGVza3ksIEJva23DpWwsIE1hZ3lhcgo= - dest: '{{ output_file }}' -- name: Add entries - ini_file: - section: "{{ item.section }}" - option: "{{ item.option }}" - value: "{{ item.value }}" - path: '{{ output_file }}' - create: true - loop: - - section: app:main - option: sqlalchemy.url - value: postgresql://app:secret@database/app - - section: handler_filelog - option: args - value: (sys.stderr,) - - section: handler_filelog - option: class - value: StreamHandler - - section: handler_exc_handler - option: args - value: (sys.stderr,) - - section: båz - option: fföø - value: ḃâŗ - - section: båz - option: fföø - value: bar + - name: include tasks to test regressions + include_tasks: tests/03-encoding.yml diff --git a/tests/integration/targets/ini_file/tasks/tests/00-basic.yml b/tests/integration/targets/ini_file/tasks/tests/00-basic.yml new file mode 100644 index 0000000000..8f8d345f7e --- /dev/null +++ b/tests/integration/targets/ini_file/tasks/tests/00-basic.yml @@ -0,0 +1,38 @@ +--- +## basiscs + +- name: test-basic 1 - specify both "value" and "values" and fail + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + value: lemonade + values: + - coke + - sprite + register: result_basic_1 + ignore_errors: true + +- name: test-basic 1 - verify error message + assert: + that: + - result_basic_1 is not changed + - result_basic_1 is failed + - "result_basic_1.msg == 'parameters are mutually exclusive: value|values'" + + +- name: test-basic 2 - set "create=no" on non-existing file and fail + ini_file: + path: "{{ non_existing_file }}" + section: food + create: false + value: banana + register: result_basic_2 + ignore_errors: true + +- name: test-basic 2 - verify error message + assert: + that: + - result_basic_2 is not changed + - result_basic_2 is failed + - result_basic_2.msg == "Destination {{ non_existing_file }} does not exist!" diff --git a/tests/integration/targets/ini_file/tasks/tests/01-value.yml b/tests/integration/targets/ini_file/tasks/tests/01-value.yml new file mode 100644 index 0000000000..93499cc63d --- /dev/null +++ b/tests/integration/targets/ini_file/tasks/tests/01-value.yml @@ -0,0 +1,589 @@ +--- + +## testing value + +- name: test-value 1 - set "state=present" and "value=null" and "allow_no_value=false" and fail + ini_file: + path: "{{ output_file }}" + section: cars + option: audi + value: null + allow_no_value: false + register: result_value_1 + ignore_errors: true + +- name: test-value 1 - verify error message + assert: + that: + - result_value_1 is not changed + - result_value_1 is failed + - result_value_1.msg == "Parameter 'value(s)' must be defined if state=present and allow_no_value=False." + + +- name: test-value 2 - set "state=present" and omit "value" and "allow_no_value=false" and fail + ini_file: + path: "{{ output_file }}" + section: cars + option: audi + allow_no_value: false + register: result_value_2 + ignore_errors: true + +- name: test-value 2 - verify error message + assert: + that: + - result_value_2 is not changed + - result_value_2 is failed + - result_value_2.msg == "Parameter 'value(s)' must be defined if state=present and allow_no_value=False." + + +- name: test-value 3 - add "fav=lemonade" in section "[drinks]" in specified file + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + value: lemonade + register: result3 + +- name: test-value 3 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 3 - set expected content and get current ini file content + set_fact: + expected3: | + + [drinks] + fav = lemonade + content3: "{{ output_content.content | b64decode }}" + +- name: test-value 3 - Verify content of ini file is as expected and ini_file 'changed' is true + assert: + that: + - result3 is changed + - result3.msg == 'section and option added' + - content3 == expected3 + + +- name: test-value 4 - add "fav=lemonade" is in section "[drinks]" again + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + value: lemonade + register: result4 + +- name: test-value 4 - Ensure unchanged + assert: + that: + - result4 is not changed + - result4.msg == 'OK' + + +- name: test-value 5 - Ensure "beverage=coke" is in section "[drinks]" + ini_file: + path: "{{ output_file }}" + section: drinks + option: beverage + value: coke + register: result5 + +- name: test-value 5 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 5 - set expected content and get current ini file content + set_fact: + expected5: | + + [drinks] + fav = lemonade + beverage = coke + content5: "{{ output_content.content | b64decode }}" + +- name: test-value 5 - assert 'changed' is true and content is OK + assert: + that: + - result5 is changed + - result5.msg == 'option added' + - content5 == expected5 + + +- name: test-value 6 - Remove option "beverage=coke" + ini_file: + path: "{{ output_file }}" + section: drinks + option: beverage + state: absent + register: result6 + +- name: test-value 6 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 6 - set expected content and get current ini file content + set_fact: + expected6: | + + [drinks] + fav = lemonade + content6: "{{ output_content.content | b64decode }}" + +- name: test-value 6 - assert 'changed' is true and content is as expected + assert: + that: + - result6 is changed + - result6.msg == 'option changed' + - content6 == expected6 + + +- name: test-value 7 - remove section 'drinks' + ini_file: + path: "{{ output_file }}" + section: drinks + state: absent + register: result7 + +- name: test-value 7 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 7 - get current ini file content + set_fact: + content7: "{{ output_content.content | b64decode }}" + +- name: test-value 7 - assert 'changed' is true and content is empty + assert: + that: + - result7 is changed + - result7.msg == 'section removed' + - content7 == "\n" + + +# allow_no_value + +- name: test-value 8 - test allow_no_value + ini_file: + path: "{{ output_file }}" + section: mysqld + option: skip-name + allow_no_value: yes + register: result8 + +- name: test-value 8 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 8 - set expected content and get current ini file content + set_fact: + content8: "{{ output_content.content | b64decode }}" + expected8: | + + [mysqld] + skip-name + +- name: test-value 8 - assert 'changed' is true and section and option added + assert: + that: + - result8 is changed + - result8.msg == 'section and option added' + - content8 == expected8 + + +- name: test-value 9 - test allow_no_value idempotency + ini_file: + path: "{{ output_file }}" + section: mysqld + option: skip-name + allow_no_value: yes + register: result9 + +- name: test-value 9 - assert 'changed' is false + assert: + that: + - result9 is not changed + - result9.msg == 'OK' + + +- name: test-value 10 - test create empty section + ini_file: + path: "{{ output_file }}" + section: new_empty_section + allow_no_value: yes + register: result10 + +- name: test-value 10 - assert 'changed' is true and section added + assert: + that: + - result10 is changed + - result10.msg == 'only section added' + + +- name: test-value 11 - test create empty section idempotency + ini_file: + path: "{{ output_file }}" + section: new_empty_section + allow_no_value: yes + register: result11 + +- name: test-value 11 - assert 'changed' is false + assert: + that: + - result11 is not changed + - result11.msg == 'OK' + + +- name: test-value 12 - test remove empty section + ini_file: + state: absent + path: "{{ output_file }}" + section: new_empty_section + allow_no_value: yes + +- name: test-value 12 - test allow_no_value with loop + ini_file: + path: "{{ output_file }}" + section: mysqld + option: "{{ item.o }}" + value: "{{ item.v | d(omit) }}" + allow_no_value: yes + loop: + - { o: "skip-name-resolve" } + - { o: "max_connections", v: "500" } + +- name: test-value 12 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 12 - set expected content and get current ini file content + set_fact: + content12: "{{ output_content.content | b64decode }}" + expected12: | + + [mysqld] + skip-name + skip-name-resolve + max_connections = 500 + +- name: test-value 12 - Verify content of ini file is as expected + assert: + that: + - content12 == expected12 + + +- name: test-value 13 - change option with no value to option with value + ini_file: + path: "{{ output_file }}" + section: mysqld + option: skip-name + value: myvalue + register: result13 + +- name: test-value 13 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 13 - set expected content and get current ini file content + set_fact: + content13: "{{ output_content.content | b64decode }}" + expected13: | + + [mysqld] + skip-name = myvalue + skip-name-resolve + max_connections = 500 + +- name: test-value 13 - assert 'changed' and msg 'option changed' and content is as expected + assert: + that: + - result13 is changed + - result13.msg == 'option changed' + - content13 == expected13 + + +- name: test-value 14 - change option with value to option with no value + ini_file: + path: "{{ output_file }}" + section: mysqld + option: skip-name + allow_no_value: yes + register: result14 + +- name: test-value 14 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 14 - set expected content and get current ini file content + set_fact: + content14: "{{ output_content.content | b64decode }}" + expected14: | + + [mysqld] + skip-name + skip-name-resolve + max_connections = 500 + +- name: test-value 14 - assert 'changed' is true and msg 'option changed' and content is as expected + assert: + that: + - result14 is changed + - result14.msg == 'option changed' + - content14 == expected14 + + +- name: test-value 15 - Remove option with no value + ini_file: + path: "{{ output_file }}" + section: mysqld + option: skip-name-resolve + state: absent + register: result15 + +- name: test-value 15 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 15 - set expected content and get current ini file content + set_fact: + content15: "{{ output_content.content | b64decode }}" + expected15: | + + [mysqld] + skip-name + max_connections = 500 + +- name: test-value 15 - assert 'changed' is true and msg 'option changed' and content is as expected + assert: + that: + - result15 is changed + - result15.msg == 'option changed' + - content15 == expected15 + + +- name: test-value 16 - Clean test file + copy: + content: "" + dest: "{{ output_file }}" + force: yes + +- name: test-value 16 - Ensure "beverage=coke" is created within no section + ini_file: + section: + path: "{{ output_file }}" + option: beverage + value: coke + register: result16 + +- name: test-value 16 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 16 - set expected content and get current ini file content + set_fact: + expected16: |+ + beverage = coke + + content16: "{{ output_content.content | b64decode }}" + +- name: test-value 16 - assert 'changed' is true and content is OK (no section) + assert: + that: + - result16 is changed + - result16.msg == 'option added' + - content16 == expected16 + + +- name: test-value 17 - Ensure "beverage=coke" is modified as "beverage=water" within no section + ini_file: + path: "{{ output_file }}" + option: beverage + value: water + section: + register: result17 + +- name: test-value 17 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 17 - set expected content and get current ini file content + set_fact: + expected17: |+ + beverage = water + + content17: "{{ output_content.content | b64decode }}" + +- name: test-value 17 - assert 'changed' is true and content is OK (no section) + assert: + that: + - result17 is changed + - result17.msg == 'option changed' + - content17 == expected17 + + +- name: test-value 18 - remove option 'beverage' within no section + ini_file: + section: + path: "{{ output_file }}" + option: beverage + state: absent + register: result18 + +- name: test-value 18 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 18 - get current ini file content + set_fact: + content18: "{{ output_content.content | b64decode }}" + +- name: test-value 18 - assert 'changed' is true and option is removed (no section) + assert: + that: + - result18 is changed + - result18.msg == 'option changed' + - content18 == "\n" + + +- name: test-value 19 - Check add option without section before existing section + block: + - name: test-value 19 - Add option with section + ini_file: + path: "{{ output_file }}" + section: drinks + option: beverage + value: water + - name: test-value 19 - Add option without section + ini_file: + path: "{{ output_file }}" + section: + option: like + value: tea + +- name: test-value 19 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 19 - set expected content and get current ini file content + set_fact: + expected19: | + like = tea + + [drinks] + beverage = water + content19: "{{ output_content.content | b64decode }}" + +- name: test-value 19 - Verify content of ini file is as expected + assert: + that: + - content19 == expected19 + + +- name: test-value 20 - Check add option with empty string value + block: + - name: test-value 20 - Remove drinks + ini_file: + path: "{{ output_file }}" + section: drinks + state: absent + - name: test-value 20 - Remove tea + ini_file: + path: "{{ output_file }}" + section: + option: like + value: tea + state: absent + # See https://github.com/ansible-collections/community.general/issues/3031 + - name: test-value 20 - Tests with empty strings + ini_file: + path: "{{ output_file }}" + section: "{{ item.section | d('extensions') }}" + option: "{{ item.option }}" + value: "" + allow_no_value: "{{ item.no_value | d(omit) }}" + loop: + - option: evolve + - option: regress + - section: foobar + option: foo + no_value: true + - option: improve + no_value: true + +- name: test-value 20 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 20 - set expected content and get current ini file content + set_fact: + expected20: |+ + + [extensions] + evolve = + regress = + improve = + [foobar] + foo = + content20: "{{ output_content.content | b64decode }}" + +- name: test-value 20 - Verify content of ini file is as expected + assert: + that: + - content20 == expected20 + + +- name: test-value 21 - Create starting ini file + copy: + # The content below is the following text file with BOM: + # [section1] + # var1=aaa + # var2=bbb + # [section2] + # var3=ccc + content: !!binary | + 77u/W3NlY3Rpb24xXQp2YXIxPWFhYQp2YXIyPWJiYgpbc2VjdGlvbjJdCnZhcjM9Y2NjCg== + dest: "{{ output_file }}" + +- name: test-value 21 - Test ini breakage + ini_file: + path: "{{ output_file }}" + section: section1 + option: var4 + value: 0 + register: result21 + +- name: test-value 21 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 21 - set expected content and get current ini file content + set_fact: + expected21: | + [section1] + var1=aaa + var2=bbb + var4 = 0 + [section2] + var3=ccc + content21: "{{ output_content.content | b64decode }}" + +- name: test-value 21 - Verify content of ini file is as expected + assert: + that: + - result21 is changed + - result21.msg == 'option added' + - content21 == expected21 diff --git a/tests/integration/targets/ini_file/tasks/tests/02-values.yml b/tests/integration/targets/ini_file/tasks/tests/02-values.yml new file mode 100644 index 0000000000..c3ef6b61a6 --- /dev/null +++ b/tests/integration/targets/ini_file/tasks/tests/02-values.yml @@ -0,0 +1,1013 @@ +--- + +## testing values + +- name: "test-values 1 - set 'state=present' and 'values=[]' and 'allow_no_value=false' and fail" + ini_file: + path: "{{ output_file }}" + section: cars + option: audi + values: [] + allow_no_value: false + register: result1 + ignore_errors: true + +- name: test-values 1 - verify error message + assert: + that: + - result1 is not changed + - result1 is failed + - result1.msg == "Parameter 'value(s)' must be defined if state=present and allow_no_value=False." + + +- name: "test-values 2 - set 'state=present' and omit 'values' and 'allow_no_value=false' and fail" + ini_file: + path: "{{ output_file }}" + section: cars + option: audi + allow_no_value: false + register: result2 + ignore_errors: true + +- name: test-values 2 - verify error message + assert: + that: + - result2 is not changed + - result2 is failed + - result2.msg == "Parameter 'value(s)' must be defined if state=present and allow_no_value=False." + + +- name: "test-values 3 - ensure 'fav=lemonade' and 'fav=cocktail' is 'present' in section '[drinks]' in specified file" + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + values: + - lemonade + - cocktail + state: present + register: result3 + +- name: test-values 3 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 3 - set expected content and get current ini file content + set_fact: + expected3: | + + [drinks] + fav = lemonade + fav = cocktail + content3: "{{ output_content.content | b64decode }}" + +- name: test-values 3 - Verify content of ini file is as expected and ini_file 'changed' is true + assert: + that: + - result3 is changed + - result3.msg == 'section and option added' + - content3 == expected3 + + +- name: "test-values 4 - remove option 'fav=lemonade' from section '[drinks]' in specified file" + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + values: + - lemonade + state: absent + exclusive: false + register: result4 + +- name: test-values 4 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 4 - set expected content and get current ini file content + set_fact: + expected4: | + + [drinks] + fav = cocktail + content4: "{{ output_content.content | b64decode }}" + +- name: test-values 4 - Verify content of ini file is as expected and ini_file 'changed' is true + assert: + that: + - result4 is changed + - result4.msg == 'option changed' + - content4 == expected4 + + +- name: "test-values 5 - add option 'fav=lemonade' in section '[drinks]' in specified file" + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + values: + - lemonade + state: present + exclusive: false + register: result5 + +- name: test-values 5 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 5 - set expected content and get current ini file content + set_fact: + expected5: | + + [drinks] + fav = cocktail + fav = lemonade + content5: "{{ output_content.content | b64decode }}" + +- name: test-values 5 - Verify content of ini file is as expected and ini_file 'changed' is true + assert: + that: + - result5 is changed + - result5.msg == 'option added' + - content5 == expected5 + + +- name: "test-values 6 - ensure 'fav=lemonade' and 'fav=cocktail' is 'present' in section '[drinks]' and check for idempotency" + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + values: + - lemonade + - cocktail + state: present + register: result6 + +- name: test-values 6 - Ensure unchanged + assert: + that: + - result6 is not changed + - result6.msg == 'OK' + + +- name: "test-values 7 - ensure 'fav=cocktail' and 'fav=lemonade' (list reverse order) is 'present' in section '[drinks]' and check for idempotency" + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + values: + - cocktail + - lemonade + state: present + register: result7 + +- name: test-values 7 - Ensure unchanged + assert: + that: + - result7 is not changed + - result7.msg == 'OK' + + +- name: "test-values 8 - add option 'fav=lemonade' in section '[drinks]' again and ensure idempotency" + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + values: + - lemonade + state: present + exclusive: false + register: result8 + +- name: test-values 8 - Ensure unchanged + assert: + that: + - result8 is not changed + - result8.msg == 'OK' + + +- name: "test-values 9 - ensure only 'fav=lemonade' is 'present' in section '[drinks]' in specified file" + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + values: + - lemonade + state: present + register: result9 + +- name: test-values 9 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 9 - set expected content and get current ini file content + set_fact: + expected9: | + + [drinks] + fav = lemonade + content9: "{{ output_content.content | b64decode }}" + +- name: test-values 9 - Verify content of ini file is as expected and ini_file 'changed' is true + assert: + that: + - result9 is changed + - result9.msg == 'option changed' + - content9 == expected9 + + +- name: "test-values 10 - remove non-existent 'fav=cocktail' from section '[drinks]' in specified file" + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + values: + - cocktail + state: absent + register: result10 + +- name: test-values 10 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 10 - set expected content and get current ini file content + set_fact: + expected10: | + + [drinks] + content10: "{{ output_content.content | b64decode }}" + + +- name: test-values 10 - Ensure unchanged + assert: + that: + - result10 is changed + - result10.msg == 'option changed' + - content10 == expected10 + + +- name: "test-values 11 - Ensure 'fav=lemonade' and 'beverage=coke' is 'present' in section '[drinks]'" + block: + - name: "test-values 11 - resetting ini_fie: Ensure 'fav=lemonade' is 'present' in section '[drinks]'" + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + values: + - lemonade + state: present + - name: "test-values 11 - Ensure 'beverage=coke' is 'present' in section '[drinks]'" + ini_file: + path: "{{ output_file }}" + section: drinks + option: beverage + values: + - coke + state: present + register: result11 + +- name: test-values 11 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 11 - set expected content and get current ini file content + set_fact: + expected11: | + + [drinks] + fav = lemonade + beverage = coke + content11: "{{ output_content.content | b64decode }}" + +- name: test-values 11 - assert 'changed' is true and content is OK + assert: + that: + - result11 is changed + - result11.msg == 'option added' + - content11 == expected11 + + +- name: "test-values 12 - add option 'fav=lemonade' in section '[drinks]' again and ensure idempotency" + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + values: + - lemonade + state: present + exclusive: false + register: result12 + +- name: test-values 12 - Ensure unchanged + assert: + that: + - result12 is not changed + - result12.msg == 'OK' + + +- name: "test-values 13 - add option 'fav=cocktail' in section '[drinks]' in specified file" + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + values: + - cocktail + state: present + exclusive: false + register: result13 + +- name: test-values 13 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 13 - set expected content and get current ini file content + set_fact: + expected13: | + + [drinks] + fav = lemonade + beverage = coke + fav = cocktail + content13: "{{ output_content.content | b64decode }}" + +- name: test-values 13 - Verify content of ini file is as expected and ini_file 'changed' is true + assert: + that: + - result13 is changed + - result13.msg == 'option added' + - content13 == expected13 + + +- name: "test-values 14 - Ensure 'refreshment=[water, juice, soft drink]' is 'present' in section '[drinks]'" + ini_file: + path: "{{ output_file }}" + section: drinks + option: refreshment + values: + - water + - juice + - soft drink + state: present + register: result14 + +- name: test-values 14 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 14 - set expected content and get current ini file content + set_fact: + expected14: | + + [drinks] + fav = lemonade + beverage = coke + fav = cocktail + refreshment = water + refreshment = juice + refreshment = soft drink + content14: "{{ output_content.content | b64decode }}" + +- name: test-values 14 - assert 'changed' is true and content is OK + assert: + that: + - result14 is changed + - result14.msg == 'option added' + - content14 == expected14 + + +- name: "test-values 15 - ensure 'fav=lemonade' and 'fav=cocktail' is 'present' in section '[drinks]' and check for idempotency" + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + values: + - lemonade + - cocktail + state: present + register: result15 + +- name: test-values 15 - Ensure unchanged + assert: + that: + - result15 is not changed + - result15.msg == 'OK' + + +- name: "test-values 16 - ensure 'fav=cocktail' and 'fav=lemonade' (list reverse order) is 'present' in section '[drinks]' and check for idempotency" + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + values: + - cocktail + - lemonade + state: present + register: result16 + +- name: test-values 16 - Ensure unchanged + assert: + that: + - result16 is not changed + - result16.msg == 'OK' + + +- name: "test-values 17 - Ensure option 'refreshment' is 'absent' in section '[drinks]'" + ini_file: + path: "{{ output_file }}" + section: drinks + option: refreshment + state: absent + register: result17 + +- name: test-values 17 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 17 - set expected content and get current ini file content + set_fact: + expected17: | + + [drinks] + fav = lemonade + beverage = coke + fav = cocktail + content17: "{{ output_content.content | b64decode }}" + +- name: test-values 17 - assert 'changed' is true and content is as expected + assert: + that: + - result17 is changed + - result17.msg == 'option changed' + - content17 == expected17 + + +- name: "test-values 18 - Ensure 'beverage=coke' is 'abesent' in section '[drinks]'" + ini_file: + path: "{{ output_file }}" + section: drinks + option: beverage + state: absent + register: result18 + +- name: test-values 18 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 18 - set expected content and get current ini file content + set_fact: + expected18: | + + [drinks] + fav = lemonade + fav = cocktail + content18: "{{ output_content.content | b64decode }}" + +- name: test-values 18 - assert 'changed' is true and content is as expected + assert: + that: + - result18 is changed + - result18.msg == 'option changed' + - content18 == expected18 + + +- name: "test-values 19 - Ensure non-existant 'beverage=coke' is 'abesent' in section '[drinks]'" + ini_file: + path: "{{ output_file }}" + section: drinks + option: beverage + values: + - coke + state: absent + register: result19 + +- name: test-values 19 - Ensure unchanged + assert: + that: + - result19 is not changed + - result19.msg == 'OK' + + +- name: test-values 20 - remove section 'drinks' + ini_file: + path: "{{ output_file }}" + section: drinks + state: absent + register: result20 + +- name: test-values 20 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 20 - get current ini file content + set_fact: + content20: "{{ output_content.content | b64decode }}" + +- name: test-values 20 - assert 'changed' is true and content is empty + assert: + that: + - result20 is changed + - result20.msg == 'section removed' + - content20 == "\n" + + +- name: "test-values 21 - Ensure 'refreshment=[water, juice, soft drink, juice]' (duplicates removed) is 'present' in section '[drinks]'" + ini_file: + path: "{{ output_file }}" + section: drinks + option: refreshment + values: + - water + - juice + - soft drink + - juice + state: present + register: result21 + +- name: test-values 21 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 21 - set expected content and get current ini file content + set_fact: + expected21: | + + [drinks] + refreshment = water + refreshment = juice + refreshment = soft drink + content21: "{{ output_content.content | b64decode }}" + +- name: test-values 21 - assert 'changed' is true and content is OK + assert: + that: + - result21 is changed + - result21.msg == 'section and option added' + - content21 == expected21 + + +- name: test-values 22 - Create starting ini file + copy: + content: | + + # Some comment to test + [mysqld] + connect_timeout = 300 + max_connections = 1000 + [section1] + var1 = aaa + # comment in section + var2 = foo + # var2 = bar + + [section2] + var3 = ccc + # comment after section + dest: "{{ output_file }}" + +- name: "test-values 22 - Ensure 'skip-name' with 'allow_no_value' is 'present' in section '[mysqld]' test allow_no_value" + ini_file: + path: "{{ output_file }}" + section: mysqld + option: skip-name + allow_no_value: true + state: present + register: result22 + +- name: test-values 22 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 22 - set expected content and get current ini file content + set_fact: + expected22: | + + # Some comment to test + [mysqld] + connect_timeout = 300 + max_connections = 1000 + skip-name + [section1] + var1 = aaa + # comment in section + var2 = foo + # var2 = bar + + [section2] + var3 = ccc + # comment after section + content22: "{{ output_content.content | b64decode }}" + +- name: test-values 22 - assert 'changed' is true and content is OK and option added + assert: + that: + - result22 is changed + - result22.msg == 'option added' + - content22 == expected22 + + +- name: "test-values 23 - Ensure 'var2=[foo, foobar]' is 'present' in section '[section1]'" + ini_file: + path: "{{ output_file }}" + section: section1 + option: var2 + values: + - foo + - foobar + state: present + register: result23 + +- name: test-values 23 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 23 - set expected content and get current ini file content + set_fact: + content23: "{{ output_content.content | b64decode }}" + expected23: | + + # Some comment to test + [mysqld] + connect_timeout = 300 + max_connections = 1000 + skip-name + [section1] + var1 = aaa + # comment in section + var2 = foo + var2 = foobar + # var2 = bar + + [section2] + var3 = ccc + # comment after section +- name: test-values 23 - assert 'changed' and msg 'option added' and content is as expected + assert: + that: + - result23 is changed + - result23.msg == 'option added' + - content23 == expected23 + + +- name: "test-values 24 - Ensure 'var2=[foo, foobar, bar]' is 'present' in section '[section1]' replacing commented option 'var2=bar'" + ini_file: + path: "{{ output_file }}" + section: section1 + option: var2 + values: + - foo + - bar + - foobar + state: present + register: result24 + +- name: test-values 24 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 24 - set expected content and get current ini file content + set_fact: + content24: "{{ output_content.content | b64decode }}" + expected24: | + + # Some comment to test + [mysqld] + connect_timeout = 300 + max_connections = 1000 + skip-name + [section1] + var1 = aaa + # comment in section + var2 = foo + var2 = foobar + var2 = bar + + [section2] + var3 = ccc + # comment after section +- name: test-values 24 - assert 'added' and msg 'option changed' and content is as expected + assert: + that: + - result24 is changed + - result24.msg == 'option changed' + - content24 == expected24 + + +- name: test-values 25 - Clean test file + copy: + content: "" + dest: "{{ output_file }}" + force: yes + +- name: "test-values 25 - Ensure 'beverage=[coke, pepsi]' is created within no section" + ini_file: + section: + path: "{{ output_file }}" + option: beverage + values: + - coke + - pepsi + state: present + register: result25 + +- name: test-values 25 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 25 - set expected content and get current ini file content + set_fact: + expected25: |+ + beverage = coke + beverage = pepsi + + content25: "{{ output_content.content | b64decode }}" + +- name: test-values 25 - assert 'changed' is true and content is OK (no section) + assert: + that: + - result25 is changed + - result25.msg == 'option added' + - content25 == expected25 + + +- name: "test-values 26 - Ensure 'beverage=coke' and 'beverage=pepsi' are modified within no section" + ini_file: + path: "{{ output_file }}" + option: beverage + values: + - water + - orange juice + section: + state: present + exclusive: true + register: result26 + +- name: test-values 26 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 26 - set expected content and get current ini file content + set_fact: + expected26: |+ + beverage = water + beverage = orange juice + + content26: "{{ output_content.content | b64decode }}" + +- name: test-values 26 - assert 'changed' is true and content is OK (no section) + assert: + that: + - result26 is changed + - result26.msg == 'option changed' + - content26 == expected26 + + +- name: "test-values 27 - ensure option 'beverage' is 'absent' within no section" + ini_file: + section: + path: "{{ output_file }}" + option: beverage + state: absent + register: result27 + +- name: test-values 27 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 27 - get current ini file content + set_fact: + content27: "{{ output_content.content | b64decode }}" + +- name: test-values 27 - assert changed (no section) + assert: + that: + - result27 is changed + - result27.msg == 'option changed' + - content27 == "\n" + + +- name: "test-values 28 - Ensure option 'present' without section before existing section" + block: + - name: test-values 28 - ensure option present within section + ini_file: + path: "{{ output_file }}" + section: drinks + option: beverage + values: + - water + - orange juice + state: present + + - name: test-values 28 - ensure option present without section + ini_file: + path: "{{ output_file }}" + section: + option: like + values: + - tea + - coffee + state: present + +- name: test-values 28 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 28 - set expected content and get current ini file content + set_fact: + expected28: | + like = tea + like = coffee + + [drinks] + beverage = water + beverage = orange juice + content28: "{{ output_content.content | b64decode }}" + +- name: test-values 28 - Verify content of ini file is as expected + assert: + that: + - content28 == expected28 + + +- name: test-value 29 - Create starting ini file + copy: + content: | + [drinks] + fav = cocktail + beverage = water + fav = lemonade + beverage = orange juice + dest: "{{ output_file }}" + +- name: "test-value 29 - Test 'state=absent' with 'exclusive=true' with multiple options in ini_file" + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + values: + - cocktail + state: absent + register: result29 + +- name: test-value 29 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 29 - set expected content and get current ini file content + set_fact: + expected29: | + [drinks] + beverage = water + beverage = orange juice + content29: "{{ output_content.content | b64decode }}" + +- name: test-value 29 - Verify content of ini file is as expected + assert: + that: + - result29 is changed + - result29.msg == 'option changed' + - content29 == expected29 + + +- name: test-value 30 - Create starting ini file + copy: + content: | + [drinks] + fav = cocktail + beverage = water + fav = lemonade + beverage = orange juice + dest: "{{ output_file }}" + +- name: "test-value 30 - Test 'state=absent' with 'exclusive=false' with multiple options in ini_file" + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + values: + - cocktail + state: absent + exclusive: false + register: result30 + +- name: test-value 30 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 30 - set expected content and get current ini file content + set_fact: + expected30: | + [drinks] + beverage = water + fav = lemonade + beverage = orange juice + content30: "{{ output_content.content | b64decode }}" + +- name: test-value 30 - Verify content of ini file is as expected + assert: + that: + - result30 is changed + - result30.msg == 'option changed' + - content30 == expected30 + + +- name: test-value 31 - Create starting ini file + copy: + content: | + [drinks] + fav = cocktail + beverage = water + fav = lemonade + beverage = orange juice + dest: "{{ output_file }}" + +- name: "test-value 31 - Test 'state=absent' with 'exclusive=true' and no value given with multiple options in ini_file" + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + state: absent + register: result31 + +- name: test-value 31 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 31 - set expected content and get current ini file content + set_fact: + expected31: | + [drinks] + beverage = water + beverage = orange juice + content31: "{{ output_content.content | b64decode }}" + +- name: test-value 31 - Verify content of ini file is as expected + assert: + that: + - result31 is changed + - result31.msg == 'option changed' + - content31 == expected31 + + +- name: test-value 32 - Create starting ini file + copy: + content: | + [drinks] + fav = cocktail + beverage = water + fav = lemonade + beverage = orange juice + dest: "{{ output_file }}" + +- name: "test-value 32 - Test 'state=absent' with 'exclusive=false' and no value given with multiple options in ini_file" + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + state: absent + exclusive: false + register: result32 + diff: true + +- name: test-value 32 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 32 - set expected content and get current ini file content + set_fact: + expected32: | + [drinks] + fav = cocktail + beverage = water + fav = lemonade + beverage = orange juice + content32: "{{ output_content.content | b64decode }}" + +- name: test-value 32 - Verify content of ini file is as expected + assert: + that: + - result32 is not changed + - result32.msg == 'OK' + - content32 == expected32 diff --git a/tests/integration/targets/ini_file/tasks/tests/03-encoding.yml b/tests/integration/targets/ini_file/tasks/tests/03-encoding.yml new file mode 100644 index 0000000000..6280ae1ffb --- /dev/null +++ b/tests/integration/targets/ini_file/tasks/tests/03-encoding.yml @@ -0,0 +1,41 @@ +--- + +# Regression test for https://github.com/ansible-collections/community.general/pull/2578#issuecomment-868092282 +- name: Create UTF-8 test file + copy: + content: !!binary | + W2FwcDptYWluXQphdmFpbGFibGVfbGFuZ3VhZ2VzID0gZW4gZnIgZXMgZGUgcHQgamEgbHQgemhf + VFcgaWQgZGEgcHRfQlIgcnUgc2wgaXQgbmxfTkwgdWsgdGEgc2kgY3MgbmIgaHUKIyBGdWxsIGxh + bmd1YWdlIG5hbWVzIGluIG5hdGl2ZSBsYW5ndWFnZSAoY29tbWEgc2VwYXJhdGVkKQphdmFpbGFi + bGVfbGFuZ3VhZ2VzX2Z1bGwgPSBFbmdsaXNoLCBGcmFuw6dhaXMsIEVzcGHDsW9sLCBEZXV0c2No + LCBQb3J0dWd1w6pzLCDml6XmnKzoqp4sIExpZXR1dm9zLCDkuK3mlocsIEluZG9uZXNpYSwgRGFu + c2ssIFBvcnR1Z3XDqnMgKEJyYXNpbCksINCg0YPRgdGB0LrQuNC5LCBTbG92ZW7FocSNaW5hLCBJ + dGFsaWFubywgTmVkZXJsYW5kcywg0KPQutGA0LDRl9C90YHRjNC60LAsIOCupOCuruCuv+CutOCv + jSwg4LeD4LeS4LaC4LeE4La9LCDEjGVza3ksIEJva23DpWwsIE1hZ3lhcgo= + dest: '{{ output_file }}' +- name: Add entries + ini_file: + section: "{{ item.section }}" + option: "{{ item.option }}" + value: "{{ item.value }}" + path: '{{ output_file }}' + create: true + loop: + - section: app:main + option: sqlalchemy.url + value: postgresql://app:secret@database/app + - section: handler_filelog + option: args + value: (sys.stderr,) + - section: handler_filelog + option: class + value: StreamHandler + - section: handler_exc_handler + option: args + value: (sys.stderr,) + - section: båz + option: fföø + value: ḃâŗ + - section: båz + option: fföø + value: bar From 432c89148721b4a4598cb20d9569aa9bbdac1f2b Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 15 Aug 2021 21:11:16 +1000 Subject: [PATCH 0271/2828] Add ipv4 example to linode inventory docs (#3200) * Add ipv4 example to linode inventory * Update plugins/inventory/linode.py Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- plugins/inventory/linode.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/plugins/inventory/linode.py b/plugins/inventory/linode.py index c2dcac5392..177bd0a42a 100644 --- a/plugins/inventory/linode.py +++ b/plugins/inventory/linode.py @@ -78,6 +78,10 @@ groups: webservers: "'web' in (tags|list)" mailservers: "'mail' in (tags|list)" compose: + # By default, Ansible tries to connect to the label of the instance. + # Since that might not be a valid name to connect to, you can + # replace it with the first IPv4 address of the linode as follows: + ansible_ssh_host: ipv4[0] ansible_port: 2222 ''' From 16945d3847f65319278ec460760fa1d0422b8cb5 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 16 Aug 2021 22:23:06 +1200 Subject: [PATCH 0272/2828] vdo - refactor (#3191) * refactor to vdo * adjusted if condition * added changelog fragment * Update plugins/modules/system/vdo.py Co-authored-by: Felix Fontein * adjustements per the PR * more occurrences of bool compared with yes or no * Update changelogs/fragments/3191-vdo-refactor.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- changelogs/fragments/3191-vdo-refactor.yml | 4 + plugins/modules/system/vdo.py | 255 ++++++--------------- 2 files changed, 76 insertions(+), 183 deletions(-) create mode 100644 changelogs/fragments/3191-vdo-refactor.yml diff --git a/changelogs/fragments/3191-vdo-refactor.yml b/changelogs/fragments/3191-vdo-refactor.yml new file mode 100644 index 0000000000..fe3fcfe7b1 --- /dev/null +++ b/changelogs/fragments/3191-vdo-refactor.yml @@ -0,0 +1,4 @@ +minor_changes: + - vdo - minor refactoring of the code (https://github.com/ansible-collections/community.general/pull/3191). +bugfixes: + - vdo - boolean arguments now compared with proper ``true`` and ``false`` values instead of string representations like ``"yes"`` or ``"no"`` (https://github.com/ansible-collections/community.general/pull/3191). diff --git a/plugins/modules/system/vdo.py b/plugins/modules/system/vdo.py index 0b4fca306d..ab5cf4e400 100644 --- a/plugins/modules/system/vdo.py +++ b/plugins/modules/system/vdo.py @@ -315,7 +315,7 @@ except ImportError: # # @return vdolist A list of currently created VDO volumes. def inventory_vdos(module, vdocmd): - rc, vdostatusout, err = module.run_command("%s status" % (vdocmd)) + rc, vdostatusout, err = module.run_command([vdocmd, "status"]) # if rc != 0: # module.fail_json(msg="Inventorying VDOs failed: %s" @@ -323,15 +323,13 @@ def inventory_vdos(module, vdocmd): vdolist = [] - if (rc == 2 and - re.findall(r"vdoconf.yml does not exist", err, re.MULTILINE)): + if rc == 2 and re.findall(r"vdoconf\.yml does not exist", err, re.MULTILINE): # If there is no /etc/vdoconf.yml file, assume there are no # VDO volumes. Return an empty list of VDO volumes. return vdolist if rc != 0: - module.fail_json(msg="Inventorying VDOs failed: %s" - % vdostatusout, rc=rc, err=err) + module.fail_json(msg="Inventorying VDOs failed: %s" % vdostatusout, rc=rc, err=err) vdostatusyaml = yaml.load(vdostatusout) if vdostatusyaml is None: @@ -346,7 +344,7 @@ def inventory_vdos(module, vdocmd): def list_running_vdos(module, vdocmd): - rc, vdolistout, err = module.run_command("%s list" % (vdocmd)) + rc, vdolistout, err = module.run_command([vdocmd, "list"]) runningvdolist = filter(None, vdolistout.split('\n')) return runningvdolist @@ -360,36 +358,30 @@ def list_running_vdos(module, vdocmd): # # @return vdocmdoptions A string to be used in a 'vdo ' command. def start_vdo(module, vdoname, vdocmd): - rc, out, err = module.run_command("%s start --name=%s" % (vdocmd, vdoname)) + rc, out, err = module.run_command([vdocmd, "start", "--name=%s" % vdoname]) if rc == 0: module.log("started VDO volume %s" % vdoname) - return rc def stop_vdo(module, vdoname, vdocmd): - rc, out, err = module.run_command("%s stop --name=%s" % (vdocmd, vdoname)) + rc, out, err = module.run_command([vdocmd, "stop", "--name=%s" % vdoname]) if rc == 0: module.log("stopped VDO volume %s" % vdoname) - return rc def activate_vdo(module, vdoname, vdocmd): - rc, out, err = module.run_command("%s activate --name=%s" - % (vdocmd, vdoname)) + rc, out, err = module.run_command([vdocmd, "activate", "--name=%s" % vdoname]) if rc == 0: module.log("activated VDO volume %s" % vdoname) - return rc def deactivate_vdo(module, vdoname, vdocmd): - rc, out, err = module.run_command("%s deactivate --name=%s" - % (vdocmd, vdoname)) + rc, out, err = module.run_command([vdocmd, "deactivate", "--name=%s" % vdoname]) if rc == 0: module.log("deactivated VDO volume %s" % vdoname) - return rc @@ -397,32 +389,31 @@ def add_vdooptions(params): vdocmdoptions = "" options = [] - if ('logicalsize' in params) and (params['logicalsize'] is not None): + if params.get('logicalsize') is not None: options.append("--vdoLogicalSize=" + params['logicalsize']) - if (('blockmapcachesize' in params) and - (params['blockmapcachesize'] is not None)): + if params.get('blockmapcachesize') is not None: options.append("--blockMapCacheSize=" + params['blockmapcachesize']) - if ('readcache' in params) and (params['readcache'] == 'enabled'): + if params.get('readcache') == 'enabled': options.append("--readCache=enabled") - if ('readcachesize' in params) and (params['readcachesize'] is not None): + if params.get('readcachesize') is not None: options.append("--readCacheSize=" + params['readcachesize']) - if ('slabsize' in params) and (params['slabsize'] is not None): + if params.get('slabsize') is not None: options.append("--vdoSlabSize=" + params['slabsize']) - if ('emulate512' in params) and (params['emulate512']): + if params.get('emulate512'): options.append("--emulate512=enabled") - if ('indexmem' in params) and (params['indexmem'] is not None): + if params.get('indexmem') is not None: options.append("--indexMem=" + params['indexmem']) - if ('indexmode' in params) and (params['indexmode'] == 'sparse'): + if params.get('indexmode') == 'sparse': options.append("--sparseIndex=enabled") - if ('force' in params) and (params['force']): + if params.get('force'): options.append("--force") # Entering an invalid thread config results in a cryptic @@ -431,23 +422,21 @@ def add_vdooptions(params): # output a more helpful message, but one would have to log # onto that system to read the error. For now, heed the thread # limit warnings in the DOCUMENTATION section above. - if ('ackthreads' in params) and (params['ackthreads'] is not None): + if params.get('ackthreads') is not None: options.append("--vdoAckThreads=" + params['ackthreads']) - if ('biothreads' in params) and (params['biothreads'] is not None): + if params.get('biothreads') is not None: options.append("--vdoBioThreads=" + params['biothreads']) - if ('cputhreads' in params) and (params['cputhreads'] is not None): + if params.get('cputhreads') is not None: options.append("--vdoCpuThreads=" + params['cputhreads']) - if ('logicalthreads' in params) and (params['logicalthreads'] is not None): + if params.get('logicalthreads') is not None: options.append("--vdoLogicalThreads=" + params['logicalthreads']) - if (('physicalthreads' in params) and - (params['physicalthreads'] is not None)): + if params.get('physicalthreads') is not None: options.append("--vdoPhysicalThreads=" + params['physicalthreads']) - vdocmdoptions = ' '.join(options) return vdocmdoptions @@ -531,31 +520,24 @@ def run_module(): # Since this is a creation of a new VDO volume, it will contain all # all of the parameters given by the playbook; the rest will # assume default values. - options = module.params - vdocmdoptions = add_vdooptions(options) - rc, out, err = module.run_command("%s create --name=%s --device=%s %s" - % (vdocmd, desiredvdo, device, - vdocmdoptions)) + vdocmdoptions = add_vdooptions(module.params) + rc, out, err = module.run_command( + [vdocmd, "create", "--name=%s" % desiredvdo, "--device=%s" % device] + vdocmdoptions) if rc == 0: result['changed'] = True else: - module.fail_json(msg="Creating VDO %s failed." - % desiredvdo, rc=rc, err=err) + module.fail_json(msg="Creating VDO %s failed." % desiredvdo, rc=rc, err=err) - if (module.params['compression'] == 'disabled'): - rc, out, err = module.run_command("%s disableCompression --name=%s" - % (vdocmd, desiredvdo)) + if module.params['compression'] == 'disabled': + rc, out, err = module.run_command([vdocmd, "disableCompression", "--name=%s" % desiredvdo]) - if ((module.params['deduplication'] is not None) and - module.params['deduplication'] == 'disabled'): - rc, out, err = module.run_command("%s disableDeduplication " - "--name=%s" - % (vdocmd, desiredvdo)) + if module.params['deduplication'] == 'disabled': + rc, out, err = module.run_command([vdocmd, "disableDeduplication", "--name=%s" % desiredvdo]) - if module.params['activated'] == 'no': + if module.params['activated'] is False: deactivate_vdo(module, desiredvdo, vdocmd) - if module.params['running'] == 'no': + if module.params['running'] is False: stop_vdo(module, desiredvdo, vdocmd) # Print a post-run list of VDO volumes in the result object. @@ -564,8 +546,8 @@ def run_module(): module.exit_json(**result) # Modify the current parameters of a VDO that exists. - if (desiredvdo in vdolist) and (state == 'present'): - rc, vdostatusoutput, err = module.run_command("%s status" % (vdocmd)) + if desiredvdo in vdolist and state == 'present': + rc, vdostatusoutput, err = module.run_command([vdocmd, "status"]) vdostatusyaml = yaml.load(vdostatusoutput) # An empty dictionary to contain dictionaries of VDO statistics @@ -630,7 +612,7 @@ def run_module(): diffparams = {} # Check for differences between the playbook parameters and the - # current parameters. This will need a comparison function; + # current parameters. This will need a comparison function; # since AnsibleModule params are all strings, compare them as # strings (but if it's None; skip). for key in currentparams.keys(): @@ -641,10 +623,7 @@ def run_module(): if diffparams: vdocmdoptions = add_vdooptions(diffparams) if vdocmdoptions: - rc, out, err = module.run_command("%s modify --name=%s %s" - % (vdocmd, - desiredvdo, - vdocmdoptions)) + rc, out, err = module.run_command([vdocmd, "modify", "--name=%s" % desiredvdo] + vdocmdoptions) if rc == 0: result['changed'] = True else: @@ -653,107 +632,36 @@ def run_module(): if 'deduplication' in diffparams.keys(): dedupemod = diffparams['deduplication'] - if dedupemod == 'disabled': - rc, out, err = module.run_command("%s " - "disableDeduplication " - "--name=%s" - % (vdocmd, desiredvdo)) + dedupeparam = "disableDeduplication" if dedupemod == 'disabled' else "enableDeduplication" + rc, out, err = module.run_command([vdocmd, dedupeparam, "--name=%s" % desiredvdo]) - if rc == 0: - result['changed'] = True - else: - module.fail_json(msg="Changing deduplication on " - "VDO volume %s failed." - % desiredvdo, rc=rc, err=err) - - if dedupemod == 'enabled': - rc, out, err = module.run_command("%s " - "enableDeduplication " - "--name=%s" - % (vdocmd, desiredvdo)) - - if rc == 0: - result['changed'] = True - else: - module.fail_json(msg="Changing deduplication on " - "VDO volume %s failed." - % desiredvdo, rc=rc, err=err) + if rc == 0: + result['changed'] = True + else: + module.fail_json(msg="Changing deduplication on VDO volume %s failed." % desiredvdo, rc=rc, err=err) if 'compression' in diffparams.keys(): compressmod = diffparams['compression'] - if compressmod == 'disabled': - rc, out, err = module.run_command("%s disableCompression " - "--name=%s" - % (vdocmd, desiredvdo)) - - if rc == 0: - result['changed'] = True - else: - module.fail_json(msg="Changing compression on " - "VDO volume %s failed." - % desiredvdo, rc=rc, err=err) - - if compressmod == 'enabled': - rc, out, err = module.run_command("%s enableCompression " - "--name=%s" - % (vdocmd, desiredvdo)) - - if rc == 0: - result['changed'] = True - else: - module.fail_json(msg="Changing compression on " - "VDO volume %s failed." - % desiredvdo, rc=rc, err=err) + compressparam = "disableCompression" if compressmod == 'disabled' else "enableCompression" + rc, out, err = module.run_command([vdocmd, compressparam, "--name=%s" % desiredvdo]) + if rc == 0: + result['changed'] = True + else: + module.fail_json(msg="Changing compression on VDO volume %s failed." % desiredvdo, rc=rc, err=err) if 'writepolicy' in diffparams.keys(): writepolmod = diffparams['writepolicy'] - if writepolmod == 'auto': - rc, out, err = module.run_command("%s " - "changeWritePolicy " - "--name=%s " - "--writePolicy=%s" - % (vdocmd, - desiredvdo, - writepolmod)) + rc, out, err = module.run_command([ + vdocmd, + "changeWritePolicy", + "--name=%s" % desiredvdo, + "--writePolicy=%s" % writepolmod, + ]) - if rc == 0: - result['changed'] = True - else: - module.fail_json(msg="Changing write policy on " - "VDO volume %s failed." - % desiredvdo, rc=rc, err=err) - - if writepolmod == 'sync': - rc, out, err = module.run_command("%s " - "changeWritePolicy " - "--name=%s " - "--writePolicy=%s" - % (vdocmd, - desiredvdo, - writepolmod)) - - if rc == 0: - result['changed'] = True - else: - module.fail_json(msg="Changing write policy on " - "VDO volume %s failed." - % desiredvdo, rc=rc, err=err) - - if writepolmod == 'async': - rc, out, err = module.run_command("%s " - "changeWritePolicy " - "--name=%s " - "--writePolicy=%s" - % (vdocmd, - desiredvdo, - writepolmod)) - - if rc == 0: - result['changed'] = True - else: - module.fail_json(msg="Changing write policy on " - "VDO volume %s failed." - % desiredvdo, rc=rc, err=err) + if rc == 0: + result['changed'] = True + else: + module.fail_json(msg="Changing write policy on VDO volume %s failed." % desiredvdo, rc=rc, err=err) # Process the size parameters, to determine of a growPhysical or # growLogical operation needs to occur. @@ -771,19 +679,15 @@ def run_module(): diffsizeparams = {} for key in sizeparams.keys(): - if module.params[key] is not None: - if str(sizeparams[key]) != module.params[key]: - diffsizeparams[key] = module.params[key] + if module.params[key] is not None and str(sizeparams[key]) != module.params[key]: + diffsizeparams[key] = module.params[key] if module.params['growphysical']: physdevice = module.params['device'] - rc, devsectors, err = module.run_command("blockdev --getsz %s" - % (physdevice)) + rc, devsectors, err = module.run_command([module.get_bin_path("blockdev"), "--getsz", physdevice]) devblocks = (int(devsectors) / 8) dmvdoname = ('/dev/mapper/' + desiredvdo) - currentvdostats = (processedvdos[desiredvdo] - ['VDO statistics'] - [dmvdoname]) + currentvdostats = processedvdos[desiredvdo]['VDO statistics'][dmvdoname] currentphysblocks = currentvdostats['physical blocks'] # Set a growPhysical threshold to grow only when there is @@ -795,34 +699,25 @@ def run_module(): if currentphysblocks > growthresh: result['changed'] = True - rc, out, err = module.run_command("%s growPhysical --name=%s" - % (vdocmd, desiredvdo)) + rc, out, err = module.run_command([vdocmd, "growPhysical", "--name=%s" % desiredvdo]) if 'logicalsize' in diffsizeparams.keys(): result['changed'] = True - vdocmdoptions = ("--vdoLogicalSize=" + - diffsizeparams['logicalsize']) - rc, out, err = module.run_command("%s growLogical --name=%s %s" - % (vdocmd, - desiredvdo, - vdocmdoptions)) + rc, out, err = module.run_command([vdocmd, "growLogical", "--name=%s" % desiredvdo, "--vdoLogicalSize=%s" % diffsizeparams['logicalsize']]) vdoactivatestatus = processedvdos[desiredvdo]['Activate'] - if ((module.params['activated'] == 'no') and - (vdoactivatestatus == 'enabled')): + if module.params['activated'] is False and vdoactivatestatus == 'enabled': deactivate_vdo(module, desiredvdo, vdocmd) if not result['changed']: result['changed'] = True - if ((module.params['activated'] == 'yes') and - (vdoactivatestatus == 'disabled')): + if module.params['activated'] and vdoactivatestatus == 'disabled': activate_vdo(module, desiredvdo, vdocmd) if not result['changed']: result['changed'] = True - if ((module.params['running'] == 'no') and - (desiredvdo in runningvdolist)): + if module.params['running'] is False and desiredvdo in runningvdolist: stop_vdo(module, desiredvdo, vdocmd) if not result['changed']: result['changed'] = True @@ -834,10 +729,7 @@ def run_module(): # the activate_vdo() operation succeeded, as 'vdoactivatestatus' # will have the activated status prior to the activate_vdo() # call. - if (((vdoactivatestatus == 'enabled') or - (module.params['activated'] == 'yes')) and - (module.params['running'] == 'yes') and - (desiredvdo not in runningvdolist)): + if (vdoactivatestatus == 'enabled' or module.params['activated']) and module.params['running'] and desiredvdo not in runningvdolist: start_vdo(module, desiredvdo, vdocmd) if not result['changed']: result['changed'] = True @@ -850,14 +742,12 @@ def run_module(): module.exit_json(**result) # Remove a desired VDO that currently exists. - if (desiredvdo in vdolist) and (state == 'absent'): - rc, out, err = module.run_command("%s remove --name=%s" - % (vdocmd, desiredvdo)) + if desiredvdo in vdolist and state == 'absent': + rc, out, err = module.run_command([vdocmd, "remove", "--name=%s" % desiredvdo]) if rc == 0: result['changed'] = True else: - module.fail_json(msg="Removing VDO %s failed." - % desiredvdo, rc=rc, err=err) + module.fail_json(msg="Removing VDO %s failed." % desiredvdo, rc=rc, err=err) # Print a post-run list of VDO volumes in the result object. vdolist = inventory_vdos(module, vdocmd) @@ -869,8 +759,7 @@ def run_module(): # not exist. Print a post-run list of VDO volumes in the result # object. vdolist = inventory_vdos(module, vdocmd) - module.log("received request to remove non-existent VDO volume %s" - % desiredvdo) + module.log("received request to remove non-existent VDO volume %s" % desiredvdo) module.exit_json(**result) From 8a4cdd2b8a23cbbbc3dd9beb22c53fe75aafaf76 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 16 Aug 2021 22:24:15 +1200 Subject: [PATCH 0273/2828] slack - minor refactoring and pythonifying (#3205) * slack - minor refactoring and pythonifying * added changelog fragment * Update changelogs/fragments/3205-slack-minor-refactor.yaml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../fragments/3205-slack-minor-refactor.yaml | 2 ++ plugins/modules/notification/slack.py | 32 +++++++++---------- 2 files changed, 18 insertions(+), 16 deletions(-) create mode 100644 changelogs/fragments/3205-slack-minor-refactor.yaml diff --git a/changelogs/fragments/3205-slack-minor-refactor.yaml b/changelogs/fragments/3205-slack-minor-refactor.yaml new file mode 100644 index 0000000000..5337350f69 --- /dev/null +++ b/changelogs/fragments/3205-slack-minor-refactor.yaml @@ -0,0 +1,2 @@ +minor_changes: + - slack - minor refactoring (https://github.com/ansible-collections/community.general/pull/3205). diff --git a/plugins/modules/notification/slack.py b/plugins/modules/notification/slack.py index 197e5f9498..3023bd9d8a 100644 --- a/plugins/modules/notification/slack.py +++ b/plugins/modules/notification/slack.py @@ -264,12 +264,12 @@ def is_valid_hex_color(color_choice): def escape_quotes(text): - '''Backslash any quotes within text.''' + """Backslash any quotes within text.""" return "".join(escape_table.get(c, c) for c in text) def recursive_escape_quotes(obj, keys): - '''Recursively escape quotes inside supplied keys inside block kit objects''' + """Recursively escape quotes inside supplied keys inside block kit objects""" if isinstance(obj, dict): escaped = {} for k, v in obj.items(): @@ -284,7 +284,7 @@ def recursive_escape_quotes(obj, keys): return escaped -def build_payload_for_slack(module, text, channel, thread_id, username, icon_url, icon_emoji, link_names, +def build_payload_for_slack(text, channel, thread_id, username, icon_url, icon_emoji, link_names, parse, color, attachments, blocks, message_id): payload = {} if color == "normal" and text is not None: @@ -344,7 +344,7 @@ def build_payload_for_slack(module, text, channel, thread_id, username, icon_url return payload -def get_slack_message(module, domain, token, channel, ts): +def get_slack_message(module, token, channel, ts): headers = { 'Content-Type': 'application/json', 'Accept': 'application/json', @@ -372,7 +372,7 @@ def do_notify_slack(module, domain, token, payload): use_webapi = False if token.count('/') >= 2: # New style webhook token - slack_uri = SLACK_INCOMING_WEBHOOK % (token) + slack_uri = SLACK_INCOMING_WEBHOOK % token elif re.match(r'^xox[abp]-\S+$', token): slack_uri = SLACK_UPDATEMESSAGE_WEBAPI if 'ts' in payload else SLACK_POSTMESSAGE_WEBAPI use_webapi = True @@ -396,7 +396,7 @@ def do_notify_slack(module, domain, token, payload): if use_webapi: obscured_incoming_webhook = slack_uri else: - obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % ('[obscured]') + obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % '[obscured]' module.fail_json(msg=" failed to send %s to %s: %s" % (data, obscured_incoming_webhook, info['msg'])) # each API requires different handling @@ -409,21 +409,21 @@ def do_notify_slack(module, domain, token, payload): def main(): module = AnsibleModule( argument_spec=dict( - domain=dict(type='str', required=False, default=None), + domain=dict(type='str'), token=dict(type='str', required=True, no_log=True), - msg=dict(type='str', required=False, default=None), - channel=dict(type='str', default=None), - thread_id=dict(type='str', default=None), + msg=dict(type='str'), + channel=dict(type='str'), + thread_id=dict(type='str'), username=dict(type='str', default='Ansible'), icon_url=dict(type='str', default='https://www.ansible.com/favicon.ico'), - icon_emoji=dict(type='str', default=None), + icon_emoji=dict(type='str'), link_names=dict(type='int', default=1, choices=[0, 1]), - parse=dict(type='str', default=None, choices=['none', 'full']), + parse=dict(type='str', choices=['none', 'full']), validate_certs=dict(default=True, type='bool'), color=dict(type='str', default='normal'), - attachments=dict(type='list', elements='dict', required=False, default=None), + attachments=dict(type='list', elements='dict'), blocks=dict(type='list', elements='dict'), - message_id=dict(type='str', default=None), + message_id=dict(type='str'), ), supports_check_mode=True, ) @@ -453,7 +453,7 @@ def main(): # if updating an existing message, we can check if there's anything to update if message_id is not None: changed = False - msg = get_slack_message(module, domain, token, channel, message_id) + msg = get_slack_message(module, token, channel, message_id) for key in ('icon_url', 'icon_emoji', 'link_names', 'color', 'attachments', 'blocks'): if msg.get(key) != module.params.get(key): changed = True @@ -465,7 +465,7 @@ def main(): elif module.check_mode: module.exit_json(changed=changed) - payload = build_payload_for_slack(module, text, channel, thread_id, username, icon_url, icon_emoji, link_names, + payload = build_payload_for_slack(text, channel, thread_id, username, icon_url, icon_emoji, link_names, parse, color, attachments, blocks, message_id) slack_response = do_notify_slack(module, domain, token, payload) From fccae19177152817b21b87ebef223decbc83e3e8 Mon Sep 17 00:00:00 2001 From: Kellin Date: Tue, 17 Aug 2021 01:05:02 -0400 Subject: [PATCH 0274/2828] Linode inventory plugin typo fixes (#3218) - Fix a typo in the Linode inventory plugin unit tests - Fix some style issues in descriptions where punctuation was missing Signed-off-by: Kellin --- plugins/inventory/linode.py | 2 +- tests/unit/plugins/inventory/test_linode.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inventory/linode.py b/plugins/inventory/linode.py index 177bd0a42a..5af9effd52 100644 --- a/plugins/inventory/linode.py +++ b/plugins/inventory/linode.py @@ -23,7 +23,7 @@ DOCUMENTATION = r''' - constructed options: plugin: - description: marks this as an instance of the 'linode' plugin + description: Marks this as an instance of the 'linode' plugin. required: true choices: ['linode', 'community.general.linode'] access_token: diff --git a/tests/unit/plugins/inventory/test_linode.py b/tests/unit/plugins/inventory/test_linode.py index ab75c6c9fc..f2627d850d 100644 --- a/tests/unit/plugins/inventory/test_linode.py +++ b/tests/unit/plugins/inventory/test_linode.py @@ -62,7 +62,7 @@ def test_empty_config_query_options(inventory): assert regions == types == tags == [] -def test_conig_query_options(inventory): +def test_config_query_options(inventory): regions, types, tags = inventory._get_query_options({ 'regions': ['eu-west', 'us-east'], 'types': ['g5-standard-2', 'g6-standard-2'], From f19e191467bdb62d35636f5989e36221a1be3503 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 17 Aug 2021 07:32:02 +0200 Subject: [PATCH 0275/2828] Temporarily disable datadog_downtime unit tests. (#3222) --- ...test_datadog_downtime.py => test_datadog_downtime.py.disabled} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tests/unit/plugins/modules/monitoring/{test_datadog_downtime.py => test_datadog_downtime.py.disabled} (100%) diff --git a/tests/unit/plugins/modules/monitoring/test_datadog_downtime.py b/tests/unit/plugins/modules/monitoring/test_datadog_downtime.py.disabled similarity index 100% rename from tests/unit/plugins/modules/monitoring/test_datadog_downtime.py rename to tests/unit/plugins/modules/monitoring/test_datadog_downtime.py.disabled From 41101e55a09c618fae5ed16c005cfff64ebe5c0c Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 17 Aug 2021 20:43:18 +1200 Subject: [PATCH 0276/2828] module_helper - implemented classmethod to start the module (#3206) * module_helper - implemented classmethod to start the module plus minor change * rolled back the __changed__() method * added changelog fragment * Update plugins/module_utils/mh/base.py Co-authored-by: Sviatoslav Sydorenko * no capt Piccards allowed in the base class * removed extra piccards Co-authored-by: Sviatoslav Sydorenko --- changelogs/fragments/3206-mh-classmethod.yaml | 2 ++ plugins/module_utils/mh/base.py | 4 ++++ plugins/modules/packaging/language/cpanm.py | 3 +-- plugins/modules/system/xfconf.py | 3 +-- 4 files changed, 8 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/3206-mh-classmethod.yaml diff --git a/changelogs/fragments/3206-mh-classmethod.yaml b/changelogs/fragments/3206-mh-classmethod.yaml new file mode 100644 index 0000000000..19cd8a6739 --- /dev/null +++ b/changelogs/fragments/3206-mh-classmethod.yaml @@ -0,0 +1,2 @@ +minor_changes: + - module_helper module_utils - added classmethod to trigger the execution of MH modules (https://github.com/ansible-collections/community.general/pull/3206). diff --git a/plugins/module_utils/mh/base.py b/plugins/module_utils/mh/base.py index a120c2556e..90c228b306 100644 --- a/plugins/module_utils/mh/base.py +++ b/plugins/module_utils/mh/base.py @@ -63,3 +63,7 @@ class ModuleHelperBase(object): if 'failed' not in output: output['failed'] = False self.module.exit_json(changed=self.has_changed(), **output) + + @classmethod + def execute(cls, module=None): + cls(module).run() diff --git a/plugins/modules/packaging/language/cpanm.py b/plugins/modules/packaging/language/cpanm.py index b8ab7e1a2f..d2c4d5a2ec 100644 --- a/plugins/modules/packaging/language/cpanm.py +++ b/plugins/modules/packaging/language/cpanm.py @@ -248,8 +248,7 @@ class CPANMinus(CmdMixin, ModuleHelper): def main(): - cpanm = CPANMinus() - cpanm.run() + CPANMinus.execute() if __name__ == '__main__': diff --git a/plugins/modules/system/xfconf.py b/plugins/modules/system/xfconf.py index 001613fc23..baf6bdd494 100644 --- a/plugins/modules/system/xfconf.py +++ b/plugins/modules/system/xfconf.py @@ -277,8 +277,7 @@ class XFConfProperty(CmdMixin, StateMixin, ModuleHelper): def main(): - xfconf = XFConfProperty() - xfconf.run() + XFConfProperty.execute() if __name__ == '__main__': From 6ac410b3f617074bb7af86050d8adfa4d495a3e6 Mon Sep 17 00:00:00 2001 From: Ricky White Date: Wed, 18 Aug 2021 03:26:44 -0400 Subject: [PATCH 0277/2828] tss: added fix for bug report in issue #3192 (#3199) * Added fix for bug report in issue #3192 * Added changelog fragment * Typo fix * Added Importerror to exception - as req by linters * Moved the conditional import statement to try/except block --- ...gin-bugfix-for-backwards-compatibility.yml | 3 +++ plugins/lookup/tss.py | 26 ++++++++++++++----- 2 files changed, 22 insertions(+), 7 deletions(-) create mode 100644 changelogs/fragments/3199-tss-lookup-plugin-bugfix-for-backwards-compatibility.yml diff --git a/changelogs/fragments/3199-tss-lookup-plugin-bugfix-for-backwards-compatibility.yml b/changelogs/fragments/3199-tss-lookup-plugin-bugfix-for-backwards-compatibility.yml new file mode 100644 index 0000000000..3909286487 --- /dev/null +++ b/changelogs/fragments/3199-tss-lookup-plugin-bugfix-for-backwards-compatibility.yml @@ -0,0 +1,3 @@ +bugfixes: + - tss lookup plugin - fixed backwards compatibility issue with ``python-tss-sdk`` version <=0.0.5 + (https://github.com/ansible-collections/community.general/issues/3192, https://github.com/ansible-collections/community.general/pull/3199). diff --git a/plugins/lookup/tss.py b/plugins/lookup/tss.py index d5e6ea6dcd..65f8b114f6 100644 --- a/plugins/lookup/tss.py +++ b/plugins/lookup/tss.py @@ -118,15 +118,23 @@ from ansible.errors import AnsibleError, AnsibleOptionsError sdk_is_missing = False try: - from thycotic import __version__ as sdk_version - from thycotic.secrets.server import ( - SecretServer, - SecretServerError, - PasswordGrantAuthorizer, - ) + from thycotic.secrets.server import SecretServer, SecretServerError except ImportError: sdk_is_missing = True +# Added for backwards compatability - See issue #3192 +# https://github.com/ansible-collections/community.general/issues/3192 +try: + from thycotic import __version__ as sdk_version +except ImportError: + sdk_version = "0.0.5" + +try: + from thycotic.secrets.server import PasswordGrantAuthorizer + sdK_version_below_v1 = False +except ImportError: + sdK_version_below_v1 = True + from ansible.utils.display import Display from ansible.plugins.lookup import LookupBase @@ -138,9 +146,13 @@ class LookupModule(LookupBase): @staticmethod def Client(server_parameters): - if LooseVersion(sdk_version) < LooseVersion('1.0.0'): + if LooseVersion(sdk_version) < LooseVersion('1.0.0') or sdK_version_below_v1: return SecretServer(**server_parameters) else: + # The Password Authorizer became available in v1.0.0 and beyond. + # Import only if sdk_version requires it. + # from thycotic.secrets.server import PasswordGrantAuthorizer + authorizer = PasswordGrantAuthorizer( server_parameters["base_url"], server_parameters["username"], From c7fccb2c0168ed6d95de9cafe831e2b5bd4b0c9b Mon Sep 17 00:00:00 2001 From: Jacob Date: Thu, 19 Aug 2021 15:13:10 -0400 Subject: [PATCH 0278/2828] redfish_info: Include Status property for GetChassisThermals (#3233) * redfish_info: Include Status property for GetChassisThermals Include Status property for Thermal objects when querying Thermal properties via GetChassisThermals command. FIXES #3232 * fixup for rename of fragments file * Update changelogs/fragments/3233-include-thermal-sensor-status-via-redfish_info.yaml Co-authored-by: Ajpantuso Co-authored-by: Ajpantuso --- .../3233-include-thermal-sensor-status-via-redfish_info.yaml | 2 ++ plugins/module_utils/redfish_utils.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/3233-include-thermal-sensor-status-via-redfish_info.yaml diff --git a/changelogs/fragments/3233-include-thermal-sensor-status-via-redfish_info.yaml b/changelogs/fragments/3233-include-thermal-sensor-status-via-redfish_info.yaml new file mode 100644 index 0000000000..baed989fbf --- /dev/null +++ b/changelogs/fragments/3233-include-thermal-sensor-status-via-redfish_info.yaml @@ -0,0 +1,2 @@ +minor_changes: + - redfish_info - include ``Status`` property for Thermal objects when querying Thermal properties via ``GetChassisThermals`` command (https://github.com/ansible-collections/community.general/issues/3232). diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index c861820edf..0f8e6630ba 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -1887,7 +1887,7 @@ class RedfishUtils(object): 'LowerThresholdCritical', 'LowerThresholdFatal', 'LowerThresholdNonCritical', 'MaxReadingRangeTemp', 'MinReadingRangeTemp', 'ReadingCelsius', 'RelatedItem', - 'SensorNumber'] + 'SensorNumber', 'Status'] # Go through list for chassis_uri in self.chassis_uris: From bcccf4e388b573f68bc9f93572e3679675788473 Mon Sep 17 00:00:00 2001 From: Martin Vician Date: Fri, 20 Aug 2021 12:54:29 +0100 Subject: [PATCH 0279/2828] Add option for domain authorization (#3228) Use DomainPasswordGrantAuthorizer if parameter `domain` is used. --- .../3228-tss-domain-authorization.yml | 3 ++ plugins/lookup/tss.py | 45 +++++++++++++++---- 2 files changed, 39 insertions(+), 9 deletions(-) create mode 100644 changelogs/fragments/3228-tss-domain-authorization.yml diff --git a/changelogs/fragments/3228-tss-domain-authorization.yml b/changelogs/fragments/3228-tss-domain-authorization.yml new file mode 100644 index 0000000000..0a80b3dd8e --- /dev/null +++ b/changelogs/fragments/3228-tss-domain-authorization.yml @@ -0,0 +1,3 @@ +minor_changes: + - tss lookup plugin - added new parameter for domain authorization + (https://github.com/ansible-collections/community.general/pull/3228). diff --git a/plugins/lookup/tss.py b/plugins/lookup/tss.py index 65f8b114f6..ecc3fd6c8b 100644 --- a/plugins/lookup/tss.py +++ b/plugins/lookup/tss.py @@ -45,6 +45,16 @@ options: - section: tss_lookup key: password required: true + domain: + default: "" + description: The domain with which to request the OAuth2 Access Grant. + env: + - name: TSS_DOMAIN + ini: + - section: tss_lookup + key: domain + required: false + version_added: 3.6.0 api_path_uri: default: /api/v1 description: The path to append to the base URL to form a valid REST @@ -130,7 +140,8 @@ except ImportError: sdk_version = "0.0.5" try: - from thycotic.secrets.server import PasswordGrantAuthorizer + from thycotic.secrets.server import PasswordGrantAuthorizer, DomainPasswordGrantAuthorizer + sdK_version_below_v1 = False except ImportError: sdK_version_below_v1 = True @@ -138,7 +149,6 @@ except ImportError: from ansible.utils.display import Display from ansible.plugins.lookup import LookupBase - display = Display() @@ -147,18 +157,34 @@ class LookupModule(LookupBase): def Client(server_parameters): if LooseVersion(sdk_version) < LooseVersion('1.0.0') or sdK_version_below_v1: - return SecretServer(**server_parameters) - else: - # The Password Authorizer became available in v1.0.0 and beyond. - # Import only if sdk_version requires it. - # from thycotic.secrets.server import PasswordGrantAuthorizer - - authorizer = PasswordGrantAuthorizer( + return SecretServer( server_parameters["base_url"], server_parameters["username"], server_parameters["password"], + server_parameters["api_path_uri"], server_parameters["token_path_uri"], ) + else: + # The Password Authorizer and Domain Password Authorizer + # became available in v1.0.0 and beyond. + # Import only if sdk_version requires it. + # from thycotic.secrets.server import PasswordGrantAuthorizer + + if server_parameters["domain"]: + authorizer = DomainPasswordGrantAuthorizer( + server_parameters["base_url"], + server_parameters["username"], + server_parameters["domain"], + server_parameters["password"], + server_parameters["token_path_uri"], + ) + else: + authorizer = PasswordGrantAuthorizer( + server_parameters["base_url"], + server_parameters["username"], + server_parameters["password"], + server_parameters["token_path_uri"], + ) return SecretServer( server_parameters["base_url"], authorizer, server_parameters["api_path_uri"] @@ -175,6 +201,7 @@ class LookupModule(LookupBase): "base_url": self.get_option("base_url"), "username": self.get_option("username"), "password": self.get_option("password"), + "domain": self.get_option("domain"), "api_path_uri": self.get_option("api_path_uri"), "token_path_uri": self.get_option("token_path_uri"), } From 8a62b79ef2e902116575f1fe266bbce5def6f9e8 Mon Sep 17 00:00:00 2001 From: David Hummel <6109326+hummeltech@users.noreply.github.com> Date: Fri, 20 Aug 2021 12:45:30 -0700 Subject: [PATCH 0280/2828] nmcli: Disallow Wi-Fi options not supported by nmcli (#3141) * nmcli: Disallow Wi-Fi options not supported by nmcli By querying nmcli directly * Added changelog fragment * Added tests * Simplify `get_available_options()` * Update changelogs/fragments/3141-disallow-options-unsupported-by-nmcli.yml Co-authored-by: Felix Fontein * Remove redundant `802-11-wireless` settings from test show outputs * Update `mocked_wireless_create(mocker)` * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Ajpantuso * Address comment re. creating function & use nmcli naming conventions I.E. `setting`.`property` = `value` ``` nmcli> help set set [. ] :: set property value This command sets property value. Example: nmcli> set con.id My connection ``` * Added `ignore_unsupported_suboptions` option & improved `wifi(_sec)` doc * Corrected pep8 issues ``` ERROR: Found 2 pep8 issue(s) which need to be resolved: ERROR: plugins/modules/net_tools/nmcli.py:342:161: E501: line too long (236 > 160 characters) ERROR: plugins/modules/net_tools/nmcli.py:359:161: E501: line too long (237 > 160 characters) ``` * Fixed remaining sanity check issues and added even more docs * No need to split Note * Update plugins/modules/net_tools/nmcli.py 3.5.0 has already been released. Co-authored-by: Felix Fontein * Followed uniformity guideline for format macros from Ansible's dev guide * Addressed comment https://github.com/ansible-collections/community.general/pull/3141#discussion_r689098383 * Documentation cleanup continuation * Replace `NM_SETTING_*`s having a description with their numeric value * Splitting up long paragraphs. Also removed `wifi`.`seen-bssids` as it "`is only meant for reading`" * Addressed remaining comments and clarified `wake-on-lan` note * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Felix Fontein * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Felix Fontein * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Felix Fontein * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Felix Fontein * Finishing addressing documentation comments. * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Ajpantuso * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Ajpantuso * Update nmcli.py * Added wifi-related `list` type options to `settings_type` method * Moved `edit_commands` `execution` logic into its own method * Move `unsupported_property` deletion into `main` function * Missing `.items()` * Resolved missing proper `nmcli conn edit` arguments * Resolve pylint issue `dangerous-default-value` Co-authored-by: Felix Fontein Co-authored-by: Ajpantuso Co-authored-by: David Hummel --- ...-disallow-options-unsupported-by-nmcli.yml | 3 + plugins/modules/net_tools/nmcli.py | 378 +++++++++++++++++- .../plugins/modules/net_tools/test_nmcli.py | 190 ++++++++- 3 files changed, 546 insertions(+), 25 deletions(-) create mode 100644 changelogs/fragments/3141-disallow-options-unsupported-by-nmcli.yml diff --git a/changelogs/fragments/3141-disallow-options-unsupported-by-nmcli.yml b/changelogs/fragments/3141-disallow-options-unsupported-by-nmcli.yml new file mode 100644 index 0000000000..e6c15c8786 --- /dev/null +++ b/changelogs/fragments/3141-disallow-options-unsupported-by-nmcli.yml @@ -0,0 +1,3 @@ +minor_changes: + - nmcli - query ``nmcli`` directly to determine available WiFi options + (https://github.com/ansible-collections/community.general/pull/3141). diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index 06b868dace..0a7d78b681 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -332,11 +332,141 @@ options: version_added: 2.0.0 wifi_sec: description: - - 'The security configuration of the WiFi connection. The valid attributes are listed on: + - The security configuration of the WiFi connection. + - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host. + - 'An up-to-date list of supported attributes can be found here: U(https://networkmanager.dev/docs/api/latest/settings-802-11-wireless-security.html).' - 'For instance to use common WPA-PSK auth with a password: C({key-mgmt: wpa-psk, psk: my_password}).' type: dict + suboptions: + auth-alg: + description: + - When WEP is used (that is, if I(key-mgmt) = C(none) or C(ieee8021x)) indicate the 802.11 authentication algorithm required by the AP here. + - One of C(open) for Open System, C(shared) for Shared Key, or C(leap) for Cisco LEAP. + - When using Cisco LEAP (that is, if I(key-mgmt=ieee8021x) and I(auth-alg=leap)) the I(leap-username) and I(leap-password) properties + must be specified. + type: str + choices: [ open, shared, leap ] + fils: + description: + - Indicates whether Fast Initial Link Setup (802.11ai) must be enabled for the connection. + - One of C(0) (use global default value), C(1) (disable FILS), C(2) (enable FILS if the supplicant and the access point support it) or C(3) + (enable FILS and fail if not supported). + - When set to C(0) and no global default is set, FILS will be optionally enabled. + type: int + choices: [ 0, 1, 2, 3 ] + default: 0 + group: + description: + - A list of group/broadcast encryption algorithms which prevents connections to Wi-Fi networks that do not utilize one of the algorithms in + the list. + - For maximum compatibility leave this property empty. + type: list + elements: str + choices: [ wep40, wep104, tkip, ccmp ] + key-mgmt: + description: + - Key management used for the connection. + - One of C(none) (WEP or no password protection), C(ieee8021x) (Dynamic WEP), C(owe) (Opportunistic Wireless Encryption), C(wpa-psk) (WPA2 + + WPA3 personal), C(sae) (WPA3 personal only), C(wpa-eap) (WPA2 + WPA3 enterprise) or C(wpa-eap-suite-b-192) (WPA3 enterprise only). + - This property must be set for any Wi-Fi connection that uses security. + type: str + choices: [ none, ieee8021x, owe, wpa-psk, sae, wpa-eap, wpa-eap-suite-b-192 ] + leap-password-flags: + description: Flags indicating how to handle the I(leap-password) property. + type: list + elements: int + leap-password: + description: The login password for legacy LEAP connections (that is, if I(key-mgmt=ieee8021x) and I(auth-alg=leap)). + type: str + leap-username: + description: The login username for legacy LEAP connections (that is, if I(key-mgmt=ieee8021x) and I(auth-alg=leap)). + type: str + pairwise: + description: + - A list of pairwise encryption algorithms which prevents connections to Wi-Fi networks that do not utilize one of the algorithms in the + list. + - For maximum compatibility leave this property empty. + type: list + elements: str + choices: [ tkip, ccmp ] + pmf: + description: + - Indicates whether Protected Management Frames (802.11w) must be enabled for the connection. + - One of C(0) (use global default value), C(1) (disable PMF), C(2) (enable PMF if the supplicant and the access point support it) or C(3) + (enable PMF and fail if not supported). + - When set to C(0) and no global default is set, PMF will be optionally enabled. + type: int + choices: [ 0, 1, 2, 3 ] + default: 0 + proto: + description: + - List of strings specifying the allowed WPA protocol versions to use. + - Each element may be C(wpa) (allow WPA) or C(rsn) (allow WPA2/RSN). + - If not specified, both WPA and RSN connections are allowed. + type: list + elements: str + choices: [ wpa, rsn ] + psk-flags: + description: Flags indicating how to handle the I(psk) property. + type: list + elements: int + psk: + description: + - Pre-Shared-Key for WPA networks. + - For WPA-PSK, it is either an ASCII passphrase of 8 to 63 characters that is (as specified in the 802.11i standard) hashed to derive the + actual key, or the key in form of 64 hexadecimal character. + - The WPA3-Personal networks use a passphrase of any length for SAE authentication. + type: str + wep-key-flags: + description: Flags indicating how to handle the I(wep-key0), I(wep-key1), I(wep-key2), and I(wep-key3) properties. + type: list + elements: int + wep-key-type: + description: + - Controls the interpretation of WEP keys. + - Allowed values are C(1), in which case the key is either a 10- or 26-character hexadecimal string, or a 5- or 13-character ASCII + password; or C(2), in which case the passphrase is provided as a string and will be hashed using the de-facto MD5 method to derive the + actual WEP key. + type: int + choices: [ 1, 2 ] + wep-key0: + description: + - Index 0 WEP key. This is the WEP key used in most networks. + - See the I(wep-key-type) property for a description of how this key is interpreted. + type: str + wep-key1: + description: + - Index 1 WEP key. This WEP index is not used by most networks. + - See the I(wep-key-type) property for a description of how this key is interpreted. + type: str + wep-key2: + description: + - Index 2 WEP key. This WEP index is not used by most networks. + - See the I(wep-key-type) property for a description of how this key is interpreted. + type: str + wep-key3: + description: + - Index 3 WEP key. This WEP index is not used by most networks. + - See the I(wep-key-type) property for a description of how this key is interpreted. + type: str + wep-tx-keyidx: + description: + - When static WEP is used (that is, if I(key-mgmt=none)) and a non-default WEP key index is used by the AP, put that WEP key index here. + - Valid values are C(0) (default key) through C(3). + - Note that some consumer access points (like the Linksys WRT54G) number the keys C(1) - C(4). + type: int + choices: [ 0, 1, 2, 3 ] + default: 0 + wps-method: + description: + - Flags indicating which mode of WPS is to be used if any. + - There is little point in changing the default setting as NetworkManager will automatically determine whether it is feasible to start WPS + enrollment from the Access Point capabilities. + - WPS can be disabled by setting this property to a value of C(1). + type: int + default: 0 version_added: 3.0.0 ssid: description: @@ -345,12 +475,162 @@ options: version_added: 3.0.0 wifi: description: - - 'The configuration of the WiFi connection. The valid attributes are listed on: + - The configuration of the WiFi connection. + - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host. + - 'An up-to-date list of supported attributes can be found here: U(https://networkmanager.dev/docs/api/latest/settings-802-11-wireless.html).' - 'For instance to create a hidden AP mode WiFi connection: C({hidden: true, mode: ap}).' type: dict + suboptions: + ap-isolation: + description: + - Configures AP isolation, which prevents communication between wireless devices connected to this AP. + - This property can be set to a value different from C(-1) only when the interface is configured in AP mode. + - If set to C(1), devices are not able to communicate with each other. This increases security because it protects devices against attacks + from other clients in the network. At the same time, it prevents devices to access resources on the same wireless networks as file + shares, printers, etc. + - If set to C(0), devices can talk to each other. + - When set to C(-1), the global default is used; in case the global default is unspecified it is assumed to be C(0). + type: int + choices: [ -1, 0, 1 ] + default: -1 + assigned-mac-address: + description: + - The new field for the cloned MAC address. + - It can be either a hardware address in ASCII representation, or one of the special values C(preserve), C(permanent), C(random) or + C(stable). + - This field replaces the deprecated I(cloned-mac-address) on D-Bus, which can only contain explicit hardware addresses. + - Note that this property only exists in D-Bus API. libnm and nmcli continue to call this property I(cloned-mac-address). + type: str + band: + description: + - 802.11 frequency band of the network. + - One of C(a) for 5GHz 802.11a or C(bg) for 2.4GHz 802.11. + - This will lock associations to the Wi-Fi network to the specific band, so for example, if C(a) is specified, the device will not + associate with the same network in the 2.4GHz band even if the network's settings are compatible. + - This setting depends on specific driver capability and may not work with all drivers. + type: str + choices: [ a, bg ] + bssid: + description: + - If specified, directs the device to only associate with the given access point. + - This capability is highly driver dependent and not supported by all devices. + - Note this property does not control the BSSID used when creating an Ad-Hoc network and is unlikely to in the future. + type: str + channel: + description: + - Wireless channel to use for the Wi-Fi connection. + - The device will only join (or create for Ad-Hoc networks) a Wi-Fi network on the specified channel. + - Because channel numbers overlap between bands, this property also requires the I(band) property to be set. + type: int + default: 0 + cloned-mac-address: + description: + - This D-Bus field is deprecated in favor of I(assigned-mac-address) which is more flexible and allows specifying special variants like + C(random). + - For libnm and nmcli, this field is called I(cloned-mac-address). + type: str + generate-mac-address-mask: + description: + - With I(cloned-mac-address) setting C(random) or C(stable), by default all bits of the MAC address are scrambled and a + locally-administered, unicast MAC address is created. This property allows to specify that certain bits are fixed. + - Note that the least significant bit of the first MAC address will always be unset to create a unicast MAC address. + - If the property is C(null), it is eligible to be overwritten by a default connection setting. + - If the value is still c(null) or an empty string, the default is to create a locally-administered, unicast MAC address. + - If the value contains one MAC address, this address is used as mask. The set bits of the mask are to be filled with the current MAC + address of the device, while the unset bits are subject to randomization. + - Setting C(FE:FF:FF:00:00:00) means to preserve the OUI of the current MAC address and only randomize the lower 3 bytes using the + C(random) or C(stable) algorithm. + - If the value contains one additional MAC address after the mask, this address is used instead of the current MAC address to fill the bits + that shall not be randomized. + - For example, a value of C(FE:FF:FF:00:00:00 68:F7:28:00:00:00) will set the OUI of the MAC address to 68:F7:28, while the lower bits are + randomized. + - A value of C(02:00:00:00:00:00 00:00:00:00:00:00) will create a fully scrambled globally-administered, burned-in MAC address. + - If the value contains more than one additional MAC addresses, one of them is chosen randomly. For example, + C(02:00:00:00:00:00 00:00:00:00:00:00 02:00:00:00:00:00) will create a fully scrambled MAC address, randomly locally or globally + administered. + type: str + hidden: + description: + - If C(true), indicates that the network is a non-broadcasting network that hides its SSID. This works both in infrastructure and AP mode. + - In infrastructure mode, various workarounds are used for a more reliable discovery of hidden networks, such as probe-scanning the SSID. + However, these workarounds expose inherent insecurities with hidden SSID networks, and thus hidden SSID networks should be used with + caution. + - In AP mode, the created network does not broadcast its SSID. + - Note that marking the network as hidden may be a privacy issue for you (in infrastructure mode) or client stations (in AP mode), as the + explicit probe-scans are distinctly recognizable on the air. + type: bool + default: false + mac-address-blacklist: + description: + - A list of permanent MAC addresses of Wi-Fi devices to which this connection should never apply. + - Each MAC address should be given in the standard hex-digits-and-colons notation (for example, C(00:11:22:33:44:55)). + type: list + elements: str + mac-address-randomization: + description: + - One of C(0) (never randomize unless the user has set a global default to randomize and the supplicant supports randomization), C(1) + (never randomize the MAC address), or C(2) (always randomize the MAC address). + - This property is deprecated for I(cloned-mac-address). + type: int + default: 0 + choices: [ 0, 1, 2 ] + mac-address: + description: + - If specified, this connection will only apply to the Wi-Fi device whose permanent MAC address matches. + - This property does not change the MAC address of the device (for example for MAC spoofing). + type: str + mode: + description: Wi-Fi network mode. If blank, C(infrastructure) is assumed. + type: str + choices: [ infrastructure, mesh, adhoc, ap ] + default: infrastructure + mtu: + description: If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into multiple Ethernet frames. + type: int + default: 0 + powersave: + description: + - One of C(2) (disable Wi-Fi power saving), C(3) (enable Wi-Fi power saving), C(1) (don't touch currently configure setting) or C(0) (use + the globally configured value). + - All other values are reserved. + type: int + default: 0 + choices: [ 0, 1, 2, 3 ] + rate: + description: + - If non-zero, directs the device to only use the specified bitrate for communication with the access point. + - Units are in Kb/s, so for example C(5500) = 5.5 Mbit/s. + - This property is highly driver dependent and not all devices support setting a static bitrate. + type: int + default: 0 + tx-power: + description: + - If non-zero, directs the device to use the specified transmit power. + - Units are dBm. + - This property is highly driver dependent and not all devices support setting a static transmit power. + type: int + default: 0 + wake-on-wlan: + description: + - The NMSettingWirelessWakeOnWLan options to enable. Not all devices support all options. + - May be any combination of C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_ANY) (C(0x2)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_DISCONNECT) (C(0x4)), + C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_MAGIC) (C(0x8)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_GTK_REKEY_FAILURE) (C(0x10)), + C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_EAP_IDENTITY_REQUEST) (C(0x20)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_4WAY_HANDSHAKE) (C(0x40)), + C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_RFKILL_RELEASE) (C(0x80)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_TCP) (C(0x100)) or the special values + C(0x1) (to use global settings) and C(0x8000) (to disable management of Wake-on-LAN in NetworkManager). + - Note the option values' sum must be specified in order to combine multiple options. + type: int + default: 1 version_added: 3.5.0 + ignore_unsupported_suboptions: + description: + - Ignore suboptions which are invalid or unsupported by the version of NetworkManager/nmcli installed on the host. + - Only I(wifi) and I(wifi_sec) options are currently affected. + type: bool + default: false + version_added: 3.6.0 ''' EXAMPLES = r''' @@ -699,6 +979,7 @@ class Nmcli(object): A subclass may wish to override the following action methods:- - create_connection() - delete_connection() + - edit_connection() - modify_connection() - show_connection() - up_connection() @@ -721,6 +1002,7 @@ class Nmcli(object): def __init__(self, module): self.module = module self.state = module.params['state'] + self.ignore_unsupported_suboptions = module.params['ignore_unsupported_suboptions'] self.autoconnect = module.params['autoconnect'] self.conn_name = module.params['conn_name'] self.master = module.params['master'] @@ -810,6 +1092,12 @@ class Nmcli(object): cmd = to_text(cmd) return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data) + def execute_edit_commands(self, commands, arguments): + arguments = arguments or [] + cmd = [self.nmcli_bin, 'con', 'edit'] + arguments + data = "\n".join(commands) + return self.execute_command(cmd, data=data) + def connection_options(self, detect_change=False): # Options common to multiple connection types. options = { @@ -920,9 +1208,6 @@ class Nmcli(object): }) if self.wifi: for name, value in self.wifi.items(): - # Disregard 'ssid' via 'wifi.ssid' - if name == 'ssid': - continue options.update({ '802-11-wireless.%s' % name: value }) @@ -1039,7 +1324,14 @@ class Nmcli(object): 'ipv4.routes', 'ipv4.route-metric' 'ipv6.dns', - 'ipv6.dns-search'): + 'ipv6.dns-search', + '802-11-wireless-security.group', + '802-11-wireless-security.leap-password-flags', + '802-11-wireless-security.pairwise', + '802-11-wireless-security.proto', + '802-11-wireless-security.psk-flags', + '802-11-wireless-security.wep-key-flags', + '802-11-wireless.mac-address-blacklist'): return list return str @@ -1127,9 +1419,8 @@ class Nmcli(object): return status def edit_connection(self): - data = "\n".join(self.edit_commands + ['save', 'quit']) - cmd = [self.nmcli_bin, 'con', 'edit', self.conn_name] - return self.execute_command(cmd, data=data) + commands = self.edit_commands + ['save', 'quit'] + return self.execute_edit_commands(commands, arguments=[self.conn_name]) def show_connection(self): cmd = [self.nmcli_bin, '--show-secrets', 'con', 'show', self.conn_name] @@ -1173,6 +1464,60 @@ class Nmcli(object): return conn_info + def get_supported_properties(self, setting): + properties = [] + + if setting == '802-11-wireless-security': + set_property = 'psk' + set_value = 'FAKEVALUE' + commands = ['set %s.%s %s' % (setting, set_property, set_value)] + else: + commands = [] + + commands += ['print %s' % setting, 'quit', 'yes'] + + (rc, out, err) = self.execute_edit_commands(commands, arguments=['type', self.type]) + + if rc != 0: + raise NmcliModuleError(err) + + for line in out.splitlines(): + prefix = '%s.' % setting + if (line.startswith(prefix)): + pair = line.split(':', 1) + property = pair[0].strip().replace(prefix, '') + properties.append(property) + + return properties + + def check_for_unsupported_properties(self, setting): + if setting == '802-11-wireless': + setting_key = 'wifi' + elif setting == '802-11-wireless-security': + setting_key = 'wifi_sec' + else: + setting_key = setting + + supported_properties = self.get_supported_properties(setting) + unsupported_properties = [] + + for property, value in getattr(self, setting_key).items(): + if property not in supported_properties: + unsupported_properties.append(property) + + if unsupported_properties: + msg_options = [] + for property in unsupported_properties: + msg_options.append('%s.%s' % (setting_key, property)) + + msg = 'Invalid or unsupported option(s): "%s"' % '", "'.join(msg_options) + if self.ignore_unsupported_suboptions: + self.module.warn(msg) + else: + self.module.fail_json(msg=msg) + + return unsupported_properties + def _compare_conn_params(self, conn_info, options): changed = False diff_before = dict() @@ -1230,6 +1575,7 @@ def main(): # Parsing argument file module = AnsibleModule( argument_spec=dict( + ignore_unsupported_suboptions=dict(type='bool', default=False), autoconnect=dict(type='bool', default=True), state=dict(type='str', required=True, choices=['absent', 'present']), conn_name=dict(type='str', required=True), @@ -1315,6 +1661,7 @@ def main(): ip_tunnel_dev=dict(type='str'), ip_tunnel_local=dict(type='str'), ip_tunnel_remote=dict(type='str'), + # 802-11-wireless* specific vars ssid=dict(type='str'), wifi=dict(type='dict'), wifi_sec=dict(type='dict', no_log=True), @@ -1343,6 +1690,19 @@ def main(): nmcli.module.fail_json(msg="Please specify a name for the master when type is %s" % nmcli.type) if nmcli.ifname is None: nmcli.module.fail_json(msg="Please specify an interface name for the connection when type is %s" % nmcli.type) + if nmcli.type == 'wifi': + unsupported_properties = {} + if nmcli.wifi: + if 'ssid' in nmcli.wifi: + module.warn("Ignoring option 'wifi.ssid', it must be specified with option 'ssid'") + del nmcli.wifi['ssid'] + unsupported_properties['wifi'] = nmcli.check_for_unsupported_properties('802-11-wireless') + if nmcli.wifi_sec: + unsupported_properties['wifi_sec'] = nmcli.check_for_unsupported_properties('802-11-wireless-security') + if nmcli.ignore_unsupported_suboptions and unsupported_properties: + for setting_key, properties in unsupported_properties.items(): + for property in properties: + del getattr(nmcli, setting_key)[property] try: if nmcli.state == 'absent': diff --git a/tests/unit/plugins/modules/net_tools/test_nmcli.py b/tests/unit/plugins/modules/net_tools/test_nmcli.py index 9f131c3873..ca83044201 100644 --- a/tests/unit/plugins/modules/net_tools/test_nmcli.py +++ b/tests/unit/plugins/modules/net_tools/test_nmcli.py @@ -507,6 +507,51 @@ TESTCASE_SECURE_WIRELESS = [ } ] +TESTCASE_DEFAULT_WIRELESS_SHOW_OUTPUT = """\ +802-11-wireless.ssid: -- +802-11-wireless.mode: infrastructure +802-11-wireless.band: -- +802-11-wireless.channel: 0 +802-11-wireless.bssid: -- +802-11-wireless.rate: 0 +802-11-wireless.tx-power: 0 +802-11-wireless.mac-address: -- +802-11-wireless.cloned-mac-address: -- +802-11-wireless.generate-mac-address-mask:-- +802-11-wireless.mac-address-blacklist: -- +802-11-wireless.mac-address-randomization:default +802-11-wireless.mtu: auto +802-11-wireless.seen-bssids: -- +802-11-wireless.hidden: no +802-11-wireless.powersave: 0 (default) +802-11-wireless.wake-on-wlan: 0x1 (default) +802-11-wireless.ap-isolation: -1 (default) +""" + +TESTCASE_DEFAULT_SECURE_WIRELESS_SHOW_OUTPUT = \ + TESTCASE_DEFAULT_WIRELESS_SHOW_OUTPUT + """\ +802-11-wireless-security.key-mgmt: -- +802-11-wireless-security.wep-tx-keyidx: 0 +802-11-wireless-security.auth-alg: -- +802-11-wireless-security.proto: -- +802-11-wireless-security.pairwise: -- +802-11-wireless-security.group: -- +802-11-wireless-security.pmf: 0 (default) +802-11-wireless-security.leap-username: -- +802-11-wireless-security.wep-key0: -- +802-11-wireless-security.wep-key1: -- +802-11-wireless-security.wep-key2: -- +802-11-wireless-security.wep-key3: -- +802-11-wireless-security.wep-key-flags: 0 (none) +802-11-wireless-security.wep-key-type: unknown +802-11-wireless-security.psk: testingtestingtesting +802-11-wireless-security.psk-flags: 0 (none) +802-11-wireless-security.leap-password: -- +802-11-wireless-security.leap-password-flags:0 (none) +802-11-wireless-security.wps-method: 0x0 (default) +802-11-wireless-security.fils: 0 (default) +""" + TESTCASE_DUMMY_STATIC = [ { 'type': 'dummy', @@ -697,10 +742,48 @@ def mocked_ethernet_connection_dhcp_to_static(mocker): )) +@pytest.fixture +def mocked_wireless_create(mocker): + mocker_set(mocker, + execute_return=None, + execute_side_effect=( + (0, TESTCASE_DEFAULT_WIRELESS_SHOW_OUTPUT, ""), + (0, "", ""), + )) + + +@pytest.fixture +def mocked_secure_wireless_create(mocker): + mocker_set(mocker, + execute_return=None, + execute_side_effect=( + (0, TESTCASE_DEFAULT_SECURE_WIRELESS_SHOW_OUTPUT, ""), + (0, "", ""), + (0, "", ""), + )) + + @pytest.fixture def mocked_secure_wireless_create_failure(mocker): mocker_set(mocker, - execute_return=(1, "", "")) + execute_return=None, + execute_side_effect=( + (0, TESTCASE_DEFAULT_SECURE_WIRELESS_SHOW_OUTPUT, ""), + (1, "", ""), + )) + + +@pytest.fixture +def mocked_secure_wireless_modify(mocker): + mocker_set(mocker, + connection_exists=True, + execute_return=None, + execute_side_effect=( + (0, TESTCASE_DEFAULT_SECURE_WIRELESS_SHOW_OUTPUT, ""), + (0, "", ""), + (0, "", ""), + (0, "", ""), + )) @pytest.fixture @@ -709,6 +792,7 @@ def mocked_secure_wireless_modify_failure(mocker): connection_exists=True, execute_return=None, execute_side_effect=( + (0, TESTCASE_DEFAULT_SECURE_WIRELESS_SHOW_OUTPUT, ""), (0, "", ""), (1, "", ""), )) @@ -1629,7 +1713,7 @@ def test_ethernet_connection_static_unchanged(mocked_ethernet_connection_static_ @pytest.mark.parametrize('patch_ansible_module', TESTCASE_WIRELESS, indirect=['patch_ansible_module']) -def test_create_wireless(mocked_generic_connection_create, capfd): +def test_create_wireless(mocked_wireless_create, capfd): """ Test : Create wireless connection """ @@ -1637,10 +1721,22 @@ def test_create_wireless(mocked_generic_connection_create, capfd): with pytest.raises(SystemExit): nmcli.main() - assert nmcli.Nmcli.execute_command.call_count == 1 + assert nmcli.Nmcli.execute_command.call_count == 2 arg_list = nmcli.Nmcli.execute_command.call_args_list - add_args, add_kw = arg_list[0] + get_available_options_args, get_available_options_kw = arg_list[0] + assert get_available_options_args[0][0] == '/usr/bin/nmcli' + assert get_available_options_args[0][1] == 'con' + assert get_available_options_args[0][2] == 'edit' + assert get_available_options_args[0][3] == 'type' + assert get_available_options_args[0][4] == 'wifi' + + get_available_options_data = get_available_options_kw['data'].split() + for param in ['print', '802-11-wireless', + 'quit', 'yes']: + assert param in get_available_options_data + + add_args, add_kw = arg_list[1] assert add_args[0][0] == '/usr/bin/nmcli' assert add_args[0][1] == 'con' assert add_args[0][2] == 'add' @@ -1664,7 +1760,7 @@ def test_create_wireless(mocked_generic_connection_create, capfd): @pytest.mark.parametrize('patch_ansible_module', TESTCASE_SECURE_WIRELESS, indirect=['patch_ansible_module']) -def test_create_secure_wireless(mocked_generic_connection_create, capfd): +def test_create_secure_wireless(mocked_secure_wireless_create, capfd): """ Test : Create secure wireless connection """ @@ -1672,10 +1768,22 @@ def test_create_secure_wireless(mocked_generic_connection_create, capfd): with pytest.raises(SystemExit): nmcli.main() - assert nmcli.Nmcli.execute_command.call_count == 2 + assert nmcli.Nmcli.execute_command.call_count == 3 arg_list = nmcli.Nmcli.execute_command.call_args_list - add_args, add_kw = arg_list[0] + get_available_options_args, get_available_options_kw = arg_list[0] + assert get_available_options_args[0][0] == '/usr/bin/nmcli' + assert get_available_options_args[0][1] == 'con' + assert get_available_options_args[0][2] == 'edit' + assert get_available_options_args[0][3] == 'type' + assert get_available_options_args[0][4] == 'wifi' + + get_available_options_data = get_available_options_kw['data'].split() + for param in ['print', '802-11-wireless-security', + 'quit', 'yes']: + assert param in get_available_options_data + + add_args, add_kw = arg_list[1] assert add_args[0][0] == '/usr/bin/nmcli' assert add_args[0][1] == 'con' assert add_args[0][2] == 'add' @@ -1691,7 +1799,7 @@ def test_create_secure_wireless(mocked_generic_connection_create, capfd): '802-11-wireless-security.key-mgmt', 'wpa-psk']: assert param in add_args_text - edit_args, edit_kw = arg_list[1] + edit_args, edit_kw = arg_list[2] assert edit_args[0][0] == '/usr/bin/nmcli' assert edit_args[0][1] == 'con' assert edit_args[0][2] == 'edit' @@ -1718,10 +1826,22 @@ def test_create_secure_wireless_failure(mocked_secure_wireless_create_failure, c with pytest.raises(SystemExit): nmcli.main() - assert nmcli.Nmcli.execute_command.call_count == 1 + assert nmcli.Nmcli.execute_command.call_count == 2 arg_list = nmcli.Nmcli.execute_command.call_args_list - add_args, add_kw = arg_list[0] + get_available_options_args, get_available_options_kw = arg_list[0] + assert get_available_options_args[0][0] == '/usr/bin/nmcli' + assert get_available_options_args[0][1] == 'con' + assert get_available_options_args[0][2] == 'edit' + assert get_available_options_args[0][3] == 'type' + assert get_available_options_args[0][4] == 'wifi' + + get_available_options_data = get_available_options_kw['data'].split() + for param in ['print', '802-11-wireless-security', + 'quit', 'yes']: + assert param in get_available_options_data + + add_args, add_kw = arg_list[1] assert add_args[0][0] == '/usr/bin/nmcli' assert add_args[0][1] == 'con' assert add_args[0][2] == 'add' @@ -1744,17 +1864,36 @@ def test_create_secure_wireless_failure(mocked_secure_wireless_create_failure, c @pytest.mark.parametrize('patch_ansible_module', TESTCASE_SECURE_WIRELESS, indirect=['patch_ansible_module']) -def test_modify_secure_wireless(mocked_generic_connection_modify, capfd): +def test_modify_secure_wireless(mocked_secure_wireless_modify, capfd): """ Test : Modify secure wireless connection """ with pytest.raises(SystemExit): nmcli.main() - assert nmcli.Nmcli.execute_command.call_count == 2 + assert nmcli.Nmcli.execute_command.call_count == 4 arg_list = nmcli.Nmcli.execute_command.call_args_list - add_args, add_kw = arg_list[0] + get_available_options_args, get_available_options_kw = arg_list[0] + assert get_available_options_args[0][0] == '/usr/bin/nmcli' + assert get_available_options_args[0][1] == 'con' + assert get_available_options_args[0][2] == 'edit' + assert get_available_options_args[0][3] == 'type' + assert get_available_options_args[0][4] == 'wifi' + + get_available_options_data = get_available_options_kw['data'].split() + for param in ['print', '802-11-wireless-security', + 'quit', 'yes']: + assert param in get_available_options_data + + show_args, show_kw = arg_list[1] + assert show_args[0][0] == '/usr/bin/nmcli' + assert show_args[0][1] == '--show-secrets' + assert show_args[0][2] == 'con' + assert show_args[0][3] == 'show' + assert show_args[0][4] == 'non_existent_nw_device' + + add_args, add_kw = arg_list[2] assert add_args[0][0] == '/usr/bin/nmcli' assert add_args[0][1] == 'con' assert add_args[0][2] == 'modify' @@ -1767,7 +1906,7 @@ def test_modify_secure_wireless(mocked_generic_connection_modify, capfd): '802-11-wireless-security.key-mgmt', 'wpa-psk']: assert param in add_args_text - edit_args, edit_kw = arg_list[1] + edit_args, edit_kw = arg_list[3] assert edit_args[0][0] == '/usr/bin/nmcli' assert edit_args[0][1] == 'con' assert edit_args[0][2] == 'edit' @@ -1794,10 +1933,29 @@ def test_modify_secure_wireless_failure(mocked_secure_wireless_modify_failure, c with pytest.raises(SystemExit): nmcli.main() - assert nmcli.Nmcli.execute_command.call_count == 2 + assert nmcli.Nmcli.execute_command.call_count == 3 arg_list = nmcli.Nmcli.execute_command.call_args_list - add_args, add_kw = arg_list[1] + get_available_options_args, get_available_options_kw = arg_list[0] + assert get_available_options_args[0][0] == '/usr/bin/nmcli' + assert get_available_options_args[0][1] == 'con' + assert get_available_options_args[0][2] == 'edit' + assert get_available_options_args[0][3] == 'type' + assert get_available_options_args[0][4] == 'wifi' + + get_available_options_data = get_available_options_kw['data'].split() + for param in ['print', '802-11-wireless-security', + 'quit', 'yes']: + assert param in get_available_options_data + + show_args, show_kw = arg_list[1] + assert show_args[0][0] == '/usr/bin/nmcli' + assert show_args[0][1] == '--show-secrets' + assert show_args[0][2] == 'con' + assert show_args[0][3] == 'show' + assert show_args[0][4] == 'non_existent_nw_device' + + add_args, add_kw = arg_list[2] assert add_args[0][0] == '/usr/bin/nmcli' assert add_args[0][1] == 'con' assert add_args[0][2] == 'modify' From 23e7ef025529b7c79fab284121f0b4b5045e45fa Mon Sep 17 00:00:00 2001 From: Matt 'Archer' Vaughn Date: Sat, 21 Aug 2021 15:57:28 -0400 Subject: [PATCH 0281/2828] Add option for retry_servfail (#3247) * Add option for retry_servfail cf. https://dnspython.readthedocs.io/en/latest/resolver-class.html#dns.resolver.Resolver.retry_servfail Setting this option to `True` allows for the possibility of the lookup plugin to retry and thereby recover from potentially transient lookup failures, which would otherwise cause the task or play to bail with an unrecoverable exception. * Create 3247-retry_servfail-for-dig * documentation for `retry_servfail` option * Rename 3247-retry_servfail-for-dig to 3247-retry_servfail-for-dig.yaml * fix whitespace * Update plugins/lookup/dig.py Co-authored-by: Ajpantuso * Update plugins/lookup/dig.py Co-authored-by: Ajpantuso * rm try/except block Co-authored-by: Ajpantuso --- changelogs/fragments/3247-retry_servfail-for-dig.yaml | 3 +++ plugins/lookup/dig.py | 11 +++++++++++ 2 files changed, 14 insertions(+) create mode 100644 changelogs/fragments/3247-retry_servfail-for-dig.yaml diff --git a/changelogs/fragments/3247-retry_servfail-for-dig.yaml b/changelogs/fragments/3247-retry_servfail-for-dig.yaml new file mode 100644 index 0000000000..1e4a00384f --- /dev/null +++ b/changelogs/fragments/3247-retry_servfail-for-dig.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - dig lookup plugin - add ``retry_servfail`` option (https://github.com/ansible-collections/community.general/pull/3247). diff --git a/plugins/lookup/dig.py b/plugins/lookup/dig.py index f5156b4d1e..19ded61de7 100644 --- a/plugins/lookup/dig.py +++ b/plugins/lookup/dig.py @@ -35,6 +35,11 @@ DOCUMENTATION = ''' flat: description: If 0 each record is returned as a dictionary, otherwise a string default: 1 + retry_servfail: + description: Retry a nameserver if it returns SERVFAIL. + default: false + type: bool + version_added: 3.6.0 notes: - ALL is not a record per-se, merely the listed fields are available for any record results you retrieve in the form of a dictionary. - While the 'dig' lookup plugin supports anything which dnspython supports out of the box, only a subset can be converted into a dictionary. @@ -73,6 +78,10 @@ EXAMPLES = """ - ansible.builtin.debug: msg: "XMPP service for gmail.com. is available at {{ item.target }} on port {{ item.port }}" with_items: "{{ lookup('community.general.dig', '_xmpp-server._tcp.gmail.com./SRV', 'flat=0', wantlist=True) }}" + +- name: Retry nameservers that return SERVFAIL + ansible.builtin.debug: + msg: "{{ lookup('community.general.dig', 'example.org./A', 'retry_servfail=True') }}" """ RETURN = """ @@ -300,6 +309,8 @@ class LookupModule(LookupBase): rdclass = dns.rdataclass.from_text(arg) except Exception as e: raise AnsibleError("dns lookup illegal CLASS: %s" % to_native(e)) + elif opt == 'retry_servfail': + myres.retry_servfail = bool(arg) continue From 1ca9c350109966f7f901f447b7b9c483bd5006cf Mon Sep 17 00:00:00 2001 From: zerotens Date: Mon, 23 Aug 2021 06:24:05 +0200 Subject: [PATCH 0282/2828] nmcli: allow IPv4/IPv6 configuration on ipip and sit devices (#3239) * Allow IPv4/IPv6 configuration on mode "sit" tunnel devices * Update Unit Test for Allow IPv4/IPv6 configuration on mode "sit" tunnel devices * Add changelog for Allow IPv4/IPv6 configuration on mode "sit" tunnel devices * Update changelogs/fragments/3239-nmcli-sit-ip-config-bugfix.yaml Co-authored-by: Ajpantuso * Added ip4/ip6 configuration arguments for ipip tunnels Co-authored-by: Ajpantuso --- .../fragments/3239-nmcli-sit-ipip-config-bugfix.yaml | 2 ++ plugins/modules/net_tools/nmcli.py | 2 ++ tests/unit/plugins/modules/net_tools/test_nmcli.py | 12 ++++++++++++ 3 files changed, 16 insertions(+) create mode 100644 changelogs/fragments/3239-nmcli-sit-ipip-config-bugfix.yaml diff --git a/changelogs/fragments/3239-nmcli-sit-ipip-config-bugfix.yaml b/changelogs/fragments/3239-nmcli-sit-ipip-config-bugfix.yaml new file mode 100644 index 0000000000..78a172342e --- /dev/null +++ b/changelogs/fragments/3239-nmcli-sit-ipip-config-bugfix.yaml @@ -0,0 +1,2 @@ +bugfixes: + - "nmcli - added ip4/ip6 configuration arguments for ``sit`` and ``ipip`` tunnels (https://github.com/ansible-collections/community.general/issues/3238, https://github.com/ansible-collections/community.general/pull/3239)." diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index 0a7d78b681..cce9e44ee4 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -1248,6 +1248,8 @@ class Nmcli(object): 'ethernet', 'generic', 'infiniband', + 'ipip', + 'sit', 'team', 'vlan', 'wifi' diff --git a/tests/unit/plugins/modules/net_tools/test_nmcli.py b/tests/unit/plugins/modules/net_tools/test_nmcli.py index ca83044201..f81b636a81 100644 --- a/tests/unit/plugins/modules/net_tools/test_nmcli.py +++ b/tests/unit/plugins/modules/net_tools/test_nmcli.py @@ -388,6 +388,12 @@ TESTCASE_IPIP_SHOW_OUTPUT = """\ connection.id: non_existent_nw_device connection.interface-name: ipip-existent_nw_device connection.autoconnect: yes +ipv4.ignore-auto-dns: no +ipv4.ignore-auto-routes: no +ipv4.never-default: no +ipv4.may-fail: yes +ipv6.ignore-auto-dns: no +ipv6.ignore-auto-routes: no ip-tunnel.mode: ipip ip-tunnel.parent: non_existent_ipip_device ip-tunnel.local: 192.168.225.5 @@ -411,6 +417,12 @@ TESTCASE_SIT_SHOW_OUTPUT = """\ connection.id: non_existent_nw_device connection.interface-name: sit-existent_nw_device connection.autoconnect: yes +ipv4.ignore-auto-dns: no +ipv4.ignore-auto-routes: no +ipv4.never-default: no +ipv4.may-fail: yes +ipv6.ignore-auto-dns: no +ipv6.ignore-auto-routes: no ip-tunnel.mode: sit ip-tunnel.parent: non_existent_sit_device ip-tunnel.local: 192.168.225.5 From f2fa56b485bab467250185c0dd3ee6b777ee0044 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Wed, 25 Aug 2021 06:36:19 +0200 Subject: [PATCH 0283/2828] Fix apache2_module a2enmod/a2dismod detection and error message if not found. (#3258) --- changelogs/fragments/3258-apache2_module.yml | 2 ++ plugins/modules/web_infrastructure/apache2_module.py | 10 ++++++---- 2 files changed, 8 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/3258-apache2_module.yml diff --git a/changelogs/fragments/3258-apache2_module.yml b/changelogs/fragments/3258-apache2_module.yml new file mode 100644 index 0000000000..a60f2125a4 --- /dev/null +++ b/changelogs/fragments/3258-apache2_module.yml @@ -0,0 +1,2 @@ +bugfixes: +- "apache2_module - fix ``a2enmod``/``a2dismod`` detection, and error message when not found (https://github.com/ansible-collections/community.general/issues/3253)." diff --git a/plugins/modules/web_infrastructure/apache2_module.py b/plugins/modules/web_infrastructure/apache2_module.py index c75dc1c30c..44327fe13c 100644 --- a/plugins/modules/web_infrastructure/apache2_module.py +++ b/plugins/modules/web_infrastructure/apache2_module.py @@ -202,15 +202,17 @@ def _set_state(module, state): result=success_msg, warnings=module.warnings) - a2mod_binary = [module.get_bin_path(a2mod_binary)] - if a2mod_binary is None: + a2mod_binary_path = module.get_bin_path(a2mod_binary) + if a2mod_binary_path is None: module.fail_json(msg="%s not found. Perhaps this system does not use %s to manage apache" % (a2mod_binary, a2mod_binary)) + a2mod_binary_cmd = [a2mod_binary_path] + if not want_enabled and force: # force exists only for a2dismod on debian - a2mod_binary.append('-f') + a2mod_binary_cmd.append('-f') - result, stdout, stderr = module.run_command(a2mod_binary + [name]) + result, stdout, stderr = module.run_command(a2mod_binary_cmd + [name]) if _module_is_enabled(module) == want_enabled: module.exit_json(changed=True, From cbcb942b0efacde22d7310b1d66205face469b75 Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Wed, 25 Aug 2021 00:41:05 -0400 Subject: [PATCH 0284/2828] tss_lookup_plugin - Refactor and decoupling (#3252) * Initial commit * Adding changelog fragment * Applying initial review suggestions * Increasing unit coverage * Removed unneccessary constant * Improving test readability * Cleanup constants --- .../3252-tss_lookup_plugin-refactor.yml | 4 + plugins/lookup/tss.py | 159 ++++++++++-------- tests/unit/plugins/lookup/test_tss.py | 104 ++++++++++-- 3 files changed, 187 insertions(+), 80 deletions(-) create mode 100644 changelogs/fragments/3252-tss_lookup_plugin-refactor.yml diff --git a/changelogs/fragments/3252-tss_lookup_plugin-refactor.yml b/changelogs/fragments/3252-tss_lookup_plugin-refactor.yml new file mode 100644 index 0000000000..6e8ccb29f8 --- /dev/null +++ b/changelogs/fragments/3252-tss_lookup_plugin-refactor.yml @@ -0,0 +1,4 @@ +--- +minor_changes: + - tss lookup plugin - refactored to decouple the supporting third-party library (``python-tss-sdk``) + (https://github.com/ansible-collections/community.general/pull/3252). diff --git a/plugins/lookup/tss.py b/plugins/lookup/tss.py index ecc3fd6c8b..fe6042e130 100644 --- a/plugins/lookup/tss.py +++ b/plugins/lookup/tss.py @@ -47,7 +47,9 @@ options: required: true domain: default: "" - description: The domain with which to request the OAuth2 Access Grant. + description: + - The domain with which to request the OAuth2 Access Grant. + - Requires C(python-tss-sdk) version 1.0.0 or greater. env: - name: TSS_DOMAIN ini: @@ -122,100 +124,125 @@ EXAMPLES = r""" - ansible.builtin.debug: msg: the password is {{ secret_password }} """ -from distutils.version import LooseVersion -from ansible.errors import AnsibleError, AnsibleOptionsError -sdk_is_missing = False +import abc + +from ansible.errors import AnsibleError, AnsibleOptionsError +from ansible.module_utils import six +from ansible.plugins.lookup import LookupBase +from ansible.utils.display import Display try: from thycotic.secrets.server import SecretServer, SecretServerError -except ImportError: - sdk_is_missing = True -# Added for backwards compatability - See issue #3192 -# https://github.com/ansible-collections/community.general/issues/3192 -try: - from thycotic import __version__ as sdk_version + HAS_TSS_SDK = True except ImportError: - sdk_version = "0.0.5" + SecretServer = None + SecretServerError = None + HAS_TSS_SDK = False try: from thycotic.secrets.server import PasswordGrantAuthorizer, DomainPasswordGrantAuthorizer - sdK_version_below_v1 = False + HAS_TSS_AUTHORIZER = True except ImportError: - sdK_version_below_v1 = True + PasswordGrantAuthorizer = None + DomainPasswordGrantAuthorizer = None + HAS_TSS_AUTHORIZER = False -from ansible.utils.display import Display -from ansible.plugins.lookup import LookupBase display = Display() -class LookupModule(LookupBase): - @staticmethod - def Client(server_parameters): +@six.add_metaclass(abc.ABCMeta) +class TSSClient(object): + def __init__(self): + self._client = None - if LooseVersion(sdk_version) < LooseVersion('1.0.0') or sdK_version_below_v1: - return SecretServer( + @staticmethod + def from_params(**server_parameters): + if HAS_TSS_AUTHORIZER: + return TSSClientV1(**server_parameters) + else: + return TSSClientV0(**server_parameters) + + def get_secret(self, term): + display.debug("tss_lookup term: %s" % term) + + secret_id = self._term_to_secret_id(term) + display.vvv(u"Secret Server lookup of Secret with ID %d" % secret_id) + + return self._client.get_secret_json(secret_id) + + @staticmethod + def _term_to_secret_id(term): + try: + return int(term) + except ValueError: + raise AnsibleOptionsError("Secret ID must be an integer") + + +class TSSClientV0(TSSClient): + def __init__(self, **server_parameters): + super(TSSClientV0, self).__init__() + + if server_parameters.get("domain"): + raise AnsibleError("The 'domain' option requires 'python-tss-sdk' version 1.0.0 or greater") + + self._client = SecretServer( + server_parameters["base_url"], + server_parameters["username"], + server_parameters["password"], + server_parameters["api_path_uri"], + server_parameters["token_path_uri"], + ) + + +class TSSClientV1(TSSClient): + def __init__(self, **server_parameters): + super(TSSClientV1, self).__init__() + + authorizer = self._get_authorizer(**server_parameters) + self._client = SecretServer( + server_parameters["base_url"], authorizer, server_parameters["api_path_uri"] + ) + + @staticmethod + def _get_authorizer(**server_parameters): + if server_parameters.get("domain"): + return DomainPasswordGrantAuthorizer( server_parameters["base_url"], server_parameters["username"], + server_parameters["domain"], server_parameters["password"], - server_parameters["api_path_uri"], server_parameters["token_path_uri"], ) - else: - # The Password Authorizer and Domain Password Authorizer - # became available in v1.0.0 and beyond. - # Import only if sdk_version requires it. - # from thycotic.secrets.server import PasswordGrantAuthorizer - if server_parameters["domain"]: - authorizer = DomainPasswordGrantAuthorizer( - server_parameters["base_url"], - server_parameters["username"], - server_parameters["domain"], - server_parameters["password"], - server_parameters["token_path_uri"], - ) - else: - authorizer = PasswordGrantAuthorizer( - server_parameters["base_url"], - server_parameters["username"], - server_parameters["password"], - server_parameters["token_path_uri"], - ) + return PasswordGrantAuthorizer( + server_parameters["base_url"], + server_parameters["username"], + server_parameters["password"], + server_parameters["token_path_uri"], + ) - return SecretServer( - server_parameters["base_url"], authorizer, server_parameters["api_path_uri"] - ) +class LookupModule(LookupBase): def run(self, terms, variables, **kwargs): - if sdk_is_missing: + if not HAS_TSS_SDK: raise AnsibleError("python-tss-sdk must be installed to use this plugin") self.set_options(var_options=variables, direct=kwargs) - secret_server = LookupModule.Client( - { - "base_url": self.get_option("base_url"), - "username": self.get_option("username"), - "password": self.get_option("password"), - "domain": self.get_option("domain"), - "api_path_uri": self.get_option("api_path_uri"), - "token_path_uri": self.get_option("token_path_uri"), - } + tss = TSSClient.from_params( + base_url=self.get_option("base_url"), + username=self.get_option("username"), + password=self.get_option("password"), + domain=self.get_option("domain"), + api_path_uri=self.get_option("api_path_uri"), + token_path_uri=self.get_option("token_path_uri"), ) - result = [] - for term in terms: - display.debug("tss_lookup term: %s" % term) - try: - id = int(term) - display.vvv(u"Secret Server lookup of Secret with ID %d" % id) - result.append(secret_server.get_secret_json(id)) - except ValueError: - raise AnsibleOptionsError("Secret ID must be an integer") - except SecretServerError as error: - raise AnsibleError("Secret Server lookup failure: %s" % error.message) - return result + try: + return [tss.get_secret(term) for term in terms] + except SecretServerError as error: + raise AnsibleError("Secret Server lookup failure: %s" % error.message) diff --git a/tests/unit/plugins/lookup/test_tss.py b/tests/unit/plugins/lookup/test_tss.py index cca2f6ff5f..97073d34be 100644 --- a/tests/unit/plugins/lookup/test_tss.py +++ b/tests/unit/plugins/lookup/test_tss.py @@ -10,12 +10,25 @@ __metaclass__ = type from ansible_collections.community.general.tests.unit.compat.unittest import TestCase from ansible_collections.community.general.tests.unit.compat.mock import ( patch, + DEFAULT, MagicMock, ) from ansible_collections.community.general.plugins.lookup import tss from ansible.plugins.loader import lookup_loader +TSS_IMPORT_PATH = 'ansible_collections.community.general.plugins.lookup.tss' + + +def make_absolute(name): + return '.'.join([TSS_IMPORT_PATH, name]) + + +class SecretServerError(Exception): + def __init__(self): + self.message = '' + + class MockSecretServer(MagicMock): RESPONSE = '{"foo": "bar"}' @@ -23,21 +36,84 @@ class MockSecretServer(MagicMock): return self.RESPONSE -class TestLookupModule(TestCase): +class MockFaultySecretServer(MagicMock): + def get_secret_json(self, path): + raise SecretServerError + + +@patch(make_absolute('SecretServer'), MockSecretServer()) +class TestTSSClient(TestCase): + def setUp(self): + self.server_params = { + 'base_url': '', + 'username': '', + 'domain': '', + 'password': '', + 'api_path_uri': '', + 'token_path_uri': '', + } + + def test_from_params(self): + with patch(make_absolute('HAS_TSS_AUTHORIZER'), False): + self.assert_client_version('v0') + + with patch.dict(self.server_params, {'domain': 'foo'}): + with self.assertRaises(tss.AnsibleError): + self._get_client() + + with patch.multiple(TSS_IMPORT_PATH, + HAS_TSS_AUTHORIZER=True, + PasswordGrantAuthorizer=DEFAULT, + DomainPasswordGrantAuthorizer=DEFAULT): + + self.assert_client_version('v1') + + with patch.dict(self.server_params, {'domain': 'foo'}): + self.assert_client_version('v1') + + def assert_client_version(self, version): + version_to_class = { + 'v0': tss.TSSClientV0, + 'v1': tss.TSSClientV1 + } + + client = self._get_client() + self.assertIsInstance(client, version_to_class[version]) + + def _get_client(self): + return tss.TSSClient.from_params(**self.server_params) + + +class TestLookupModule(TestCase): + VALID_TERMS = [1] + INVALID_TERMS = ['foo'] + def setUp(self): - tss.sdk_is_missing = False self.lookup = lookup_loader.get("community.general.tss") - @patch( - "ansible_collections.community.general.plugins.lookup.tss.LookupModule.Client", - MockSecretServer(), - ) + @patch.multiple(TSS_IMPORT_PATH, + HAS_TSS_SDK=False, + SecretServer=MockSecretServer) + def test_missing_sdk(self): + with self.assertRaises(tss.AnsibleError): + self._run_lookup(self.VALID_TERMS) + + @patch.multiple(TSS_IMPORT_PATH, + HAS_TSS_SDK=True, + SecretServerError=SecretServerError) def test_get_secret_json(self): - self.assertListEqual( - [MockSecretServer.RESPONSE], - self.lookup.run( - [1], - [], - **{"base_url": "dummy", "username": "dummy", "password": "dummy", } - ), - ) + with patch(make_absolute('SecretServer'), MockSecretServer): + self.assertListEqual([MockSecretServer.RESPONSE], self._run_lookup(self.VALID_TERMS)) + + with self.assertRaises(tss.AnsibleOptionsError): + self._run_lookup(self.INVALID_TERMS) + + with patch(make_absolute('SecretServer'), MockFaultySecretServer): + with self.assertRaises(tss.AnsibleError): + self._run_lookup(self.VALID_TERMS) + + def _run_lookup(self, terms, variables=None, **kwargs): + variables = variables or [] + kwargs = kwargs or {"base_url": "dummy", "username": "dummy", "password": "dummy"} + + return self.lookup.run(terms, variables, **kwargs) From e40aa69e77752fe3571ec500ce749c67f7a50a13 Mon Sep 17 00:00:00 2001 From: Robin Roth Date: Thu, 26 Aug 2021 08:09:26 +0200 Subject: [PATCH 0285/2828] Stop notifications for apache2_module for me (#3261) --- .github/BOTMETA.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 1e982296d6..6055224145 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1101,7 +1101,8 @@ files: $modules/web_infrastructure/apache2_mod_proxy.py: maintainers: oboukili $modules/web_infrastructure/apache2_module.py: - maintainers: berendt n0trax robinro + maintainers: berendt n0trax + ignore: robinro $modules/web_infrastructure/deploy_helper.py: maintainers: ramondelafuente $modules/web_infrastructure/django_manage.py: From b8a081b9b23ae6b858115b8890ad5f5e8c0a0e11 Mon Sep 17 00:00:00 2001 From: zerotens Date: Thu, 26 Aug 2021 08:16:36 +0200 Subject: [PATCH 0286/2828] nmcli: Support gre tunnels (#3262) * Add gre tunnel support * Add gre tunnel support * Fix Blank Lines * Fix unit test Add changelog fragment * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Felix Fontein * Update Docs * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Ajpantuso * Update Docs Co-authored-by: Felix Fontein Co-authored-by: Ajpantuso --- .../3262-nmcli-add-gre-tunnel-support.yaml | 2 + plugins/modules/net_tools/nmcli.py | 41 +++++- .../plugins/modules/net_tools/test_nmcli.py | 123 ++++++++++++++++++ 3 files changed, 162 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/3262-nmcli-add-gre-tunnel-support.yaml diff --git a/changelogs/fragments/3262-nmcli-add-gre-tunnel-support.yaml b/changelogs/fragments/3262-nmcli-add-gre-tunnel-support.yaml new file mode 100644 index 0000000000..e3f6bef7bc --- /dev/null +++ b/changelogs/fragments/3262-nmcli-add-gre-tunnel-support.yaml @@ -0,0 +1,2 @@ +minor_changes: + - "nmcli - add ``gre`` tunnel support (https://github.com/ansible-collections/community.general/issues/3105, https://github.com/ansible-collections/community.general/pull/3262)." diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index cce9e44ee4..7bc8a6b775 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -55,7 +55,7 @@ options: - Type C(generic) is added in Ansible 2.5. - Type C(infiniband) is added in community.general 2.0.0. type: str - choices: [ bond, bond-slave, bridge, bridge-slave, dummy, ethernet, generic, infiniband, ipip, sit, team, team-slave, vlan, vxlan, wifi ] + choices: [ bond, bond-slave, bridge, bridge-slave, dummy, ethernet, generic, gre, infiniband, ipip, sit, team, team-slave, vlan, vxlan, wifi ] mode: description: - This is the type of device or network connection that you wish to create for a bond or bridge. @@ -314,16 +314,28 @@ options: type: str ip_tunnel_dev: description: - - This is used with IPIP/SIT - parent device this IPIP/SIT tunnel, can use ifname. + - This is used with GRE/IPIP/SIT - parent device this GRE/IPIP/SIT tunnel, can use ifname. type: str ip_tunnel_remote: description: - - This is used with IPIP/SIT - IPIP/SIT destination IP address. + - This is used with GRE/IPIP/SIT - GRE/IPIP/SIT destination IP address. type: str ip_tunnel_local: description: - - This is used with IPIP/SIT - IPIP/SIT local IP address. + - This is used with GRE/IPIP/SIT - GRE/IPIP/SIT local IP address. type: str + ip_tunnel_input_key: + description: + - The key used for tunnel input packets. + - Only used when I(type=gre). + type: str + version_added: 3.6.0 + ip_tunnel_output_key: + description: + - The key used for tunnel output packets. + - Only used when I(type=gre). + type: str + version_added: 3.6.0 zone: description: - The trust level of the connection. @@ -896,6 +908,14 @@ EXAMPLES = r''' vxlan_local: 192.168.1.2 vxlan_remote: 192.168.1.5 + - name: Add gre + community.general.nmcli: + type: gre + conn_name: gre_test1 + ip_tunnel_dev: eth0 + ip_tunnel_local: 192.168.1.2 + ip_tunnel_remote: 192.168.1.5 + - name: Add ipip community.general.nmcli: type: ipip @@ -1058,6 +1078,8 @@ class Nmcli(object): self.ip_tunnel_dev = module.params['ip_tunnel_dev'] self.ip_tunnel_local = module.params['ip_tunnel_local'] self.ip_tunnel_remote = module.params['ip_tunnel_remote'] + self.ip_tunnel_input_key = module.params['ip_tunnel_input_key'] + self.ip_tunnel_output_key = module.params['ip_tunnel_output_key'] self.nmcli_bin = self.module.get_bin_path('nmcli', True) self.dhcp_client_id = module.params['dhcp_client_id'] self.zone = module.params['zone'] @@ -1190,6 +1212,11 @@ class Nmcli(object): 'ip-tunnel.parent': self.ip_tunnel_dev, 'ip-tunnel.remote': self.ip_tunnel_remote, }) + if self.type == 'gre': + options.update({ + 'ip-tunnel.input-key': self.ip_tunnel_input_key, + 'ip-tunnel.output-key': self.ip_tunnel_output_key + }) elif self.type == 'vlan': options.update({ 'vlan.id': self.vlanid, @@ -1247,6 +1274,7 @@ class Nmcli(object): 'dummy', 'ethernet', 'generic', + 'gre', 'infiniband', 'ipip', 'sit', @@ -1293,6 +1321,7 @@ class Nmcli(object): @property def tunnel_conn_type(self): return self.type in ( + 'gre', 'ipip', 'sit', ) @@ -1592,6 +1621,7 @@ def main(): 'dummy', 'ethernet', 'generic', + 'gre', 'infiniband', 'ipip', 'sit', @@ -1663,6 +1693,9 @@ def main(): ip_tunnel_dev=dict(type='str'), ip_tunnel_local=dict(type='str'), ip_tunnel_remote=dict(type='str'), + # ip-tunnel type gre specific vars + ip_tunnel_input_key=dict(type='str', no_log=True), + ip_tunnel_output_key=dict(type='str', no_log=True), # 802-11-wireless* specific vars ssid=dict(type='str'), wifi=dict(type='dict'), diff --git a/tests/unit/plugins/modules/net_tools/test_nmcli.py b/tests/unit/plugins/modules/net_tools/test_nmcli.py index f81b636a81..9277bd5fb6 100644 --- a/tests/unit/plugins/modules/net_tools/test_nmcli.py +++ b/tests/unit/plugins/modules/net_tools/test_nmcli.py @@ -62,6 +62,12 @@ TESTCASE_CONNECTION = [ 'state': 'absent', '_ansible_check_mode': True, }, + { + 'type': 'gre', + 'conn_name': 'non_existent_nw_device', + 'state': 'absent', + '_ansible_check_mode': True, + }, { 'type': 'ipip', 'conn_name': 'non_existent_nw_device', @@ -371,6 +377,39 @@ vxlan.local: 192.168.225.5 vxlan.remote: 192.168.225.6 """ +TESTCASE_GRE = [ + { + 'type': 'gre', + 'conn_name': 'non_existent_nw_device', + 'ifname': 'gre-existent_nw_device', + 'ip_tunnel_dev': 'non_existent_gre_device', + 'ip_tunnel_local': '192.168.225.5', + 'ip_tunnel_remote': '192.168.225.6', + 'ip_tunnel_input_key': '1', + 'ip_tunnel_output_key': '2', + 'state': 'present', + '_ansible_check_mode': False, + } +] + +TESTCASE_GRE_SHOW_OUTPUT = """\ +connection.id: non_existent_nw_device +connection.interface-name: gre-existent_nw_device +connection.autoconnect: yes +ipv4.ignore-auto-dns: no +ipv4.ignore-auto-routes: no +ipv4.never-default: no +ipv4.may-fail: yes +ipv6.ignore-auto-dns: no +ipv6.ignore-auto-routes: no +ip-tunnel.mode: gre +ip-tunnel.parent: non_existent_gre_device +ip-tunnel.local: 192.168.225.5 +ip-tunnel.remote: 192.168.225.6 +ip-tunnel.input-key: 1 +ip-tunnel.output-key: 2 +""" + TESTCASE_IPIP = [ { 'type': 'ipip', @@ -708,6 +747,13 @@ def mocked_vxlan_connection_unchanged(mocker): execute_return=(0, TESTCASE_VXLAN_SHOW_OUTPUT, "")) +@pytest.fixture +def mocked_gre_connection_unchanged(mocker): + mocker_set(mocker, + connection_exists=True, + execute_return=(0, TESTCASE_GRE_SHOW_OUTPUT, "")) + + @pytest.fixture def mocked_ipip_connection_unchanged(mocker): mocker_set(mocker, @@ -1630,6 +1676,83 @@ def test_eth_dhcp_client_id_con_create(mocked_generic_connection_create, capfd): assert results['changed'] +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GRE, indirect=['patch_ansible_module']) +def test_create_gre(mocked_generic_connection_create, capfd): + """ + Test if gre created + """ + with pytest.raises(SystemExit): + nmcli.main() + + assert nmcli.Nmcli.execute_command.call_count == 1 + arg_list = nmcli.Nmcli.execute_command.call_args_list + args, kwargs = arg_list[0] + + assert args[0][0] == '/usr/bin/nmcli' + assert args[0][1] == 'con' + assert args[0][2] == 'add' + assert args[0][3] == 'type' + assert args[0][4] == 'ip-tunnel' + assert args[0][5] == 'con-name' + assert args[0][6] == 'non_existent_nw_device' + + args_text = list(map(to_text, args[0])) + for param in ['connection.interface-name', 'gre-existent_nw_device', + 'ip-tunnel.local', '192.168.225.5', + 'ip-tunnel.mode', 'gre', + 'ip-tunnel.parent', 'non_existent_gre_device', + 'ip-tunnel.remote', '192.168.225.6', + 'ip-tunnel.input-key', '1', + 'ip-tunnel.output-key', '2']: + assert param in args_text + + out, err = capfd.readouterr() + results = json.loads(out) + assert not results.get('failed') + assert results['changed'] + + +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GRE, indirect=['patch_ansible_module']) +def test_gre_mod(mocked_generic_connection_modify, capfd): + """ + Test if gre modified + """ + with pytest.raises(SystemExit): + nmcli.main() + + assert nmcli.Nmcli.execute_command.call_count == 1 + arg_list = nmcli.Nmcli.execute_command.call_args_list + args, kwargs = arg_list[0] + + assert args[0][0] == '/usr/bin/nmcli' + assert args[0][1] == 'con' + assert args[0][2] == 'modify' + assert args[0][3] == 'non_existent_nw_device' + + args_text = list(map(to_text, args[0])) + for param in ['ip-tunnel.local', '192.168.225.5', 'ip-tunnel.remote', '192.168.225.6']: + assert param in args_text + + out, err = capfd.readouterr() + results = json.loads(out) + assert not results.get('failed') + assert results['changed'] + + +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GRE, indirect=['patch_ansible_module']) +def test_gre_connection_unchanged(mocked_gre_connection_unchanged, capfd): + """ + Test : GRE connection unchanged + """ + with pytest.raises(SystemExit): + nmcli.main() + + out, err = capfd.readouterr() + results = json.loads(out) + assert not results.get('failed') + assert not results['changed'] + + @pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_DHCP, indirect=['patch_ansible_module']) def test_ethernet_connection_dhcp_unchanged(mocked_ethernet_connection_dhcp_unchanged, capfd): """ From cc458f7c376d59455bc4028c25d20a850cb7fd82 Mon Sep 17 00:00:00 2001 From: Nicolas Karolak Date: Fri, 27 Aug 2021 06:08:54 +0200 Subject: [PATCH 0287/2828] parse scw-cli config file for oauth_token (#3250) If `api_token` is not set and config file exists, it will try to fetch the value from the activated profile and fallback on default. This should not break existing workflows. --- .../fragments/3250-parse-scw-config.yml | 2 + plugins/inventory/scaleway.py | 41 ++++++++++++++++++- 2 files changed, 41 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/3250-parse-scw-config.yml diff --git a/changelogs/fragments/3250-parse-scw-config.yml b/changelogs/fragments/3250-parse-scw-config.yml new file mode 100644 index 0000000000..8c96c55e47 --- /dev/null +++ b/changelogs/fragments/3250-parse-scw-config.yml @@ -0,0 +1,2 @@ +minor_changes: + - scaleway plugin inventory - parse scw-cli config file for ``oauth_token`` (https://github.com/ansible-collections/community.general/pull/3250). diff --git a/plugins/inventory/scaleway.py b/plugins/inventory/scaleway.py index 86140124c5..fa65eae321 100644 --- a/plugins/inventory/scaleway.py +++ b/plugins/inventory/scaleway.py @@ -13,6 +13,8 @@ DOCUMENTATION = r''' short_description: Scaleway inventory source description: - Get inventory hosts from Scaleway. + requirements: + - PyYAML options: plugin: description: Token that ensures this is a source file for the 'scaleway' plugin. @@ -30,9 +32,10 @@ DOCUMENTATION = r''' description: Filter results on a specific tag. type: list oauth_token: - required: True description: - Scaleway OAuth token. + - If not explicitly defined or in environment variables, it will try to lookup in the scaleway-cli configuration file + (C($SCW_CONFIG_PATH), C($XDG_CONFIG_HOME/scw/config.yaml), or C(~/.config/scw/config.yaml)). - More details on L(how to generate token, https://www.scaleway.com/en/docs/generate-api-keys/). env: # in order of precedence @@ -95,13 +98,22 @@ variables: ansible_user: "'admin'" ''' +import os import json +try: + import yaml +except ImportError as exc: + YAML_IMPORT_ERROR = exc +else: + YAML_IMPORT_ERROR = None + from ansible.errors import AnsibleError from ansible.plugins.inventory import BaseInventoryPlugin, Constructable from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, parse_pagination_link from ansible.module_utils.urls import open_url from ansible.module_utils.common.text.converters import to_native, to_text +from ansible.module_utils.six import raise_from import ansible.module_utils.six.moves.urllib.parse as urllib_parse @@ -278,13 +290,38 @@ class InventoryModule(BaseInventoryPlugin, Constructable): # Composed variables self._set_composite_vars(self.get_option('variables'), host_infos, hostname, strict=False) + def get_oauth_token(self): + oauth_token = self.get_option('oauth_token') + + if 'SCW_CONFIG_PATH' in os.environ: + scw_config_path = os.getenv('SCW_CONFIG_PATH') + elif 'XDG_CONFIG_HOME' in os.environ: + scw_config_path = os.path.join(os.getenv('XDG_CONFIG_HOME'), 'scw', 'config.yaml') + else: + scw_config_path = os.path.join(os.path.expanduser('~'), '.config', 'scw', 'config.yaml') + + if not oauth_token and os.path.exists(scw_config_path): + with open(scw_config_path) as fh: + scw_config = yaml.safe_load(fh) + active_profile = scw_config.get('active_profile', 'default') + if active_profile == 'default': + oauth_token = scw_config.get('secret_key') + else: + oauth_token = scw_config['profiles'][active_profile].get('secret_key') + + return oauth_token + def parse(self, inventory, loader, path, cache=True): + if YAML_IMPORT_ERROR: + raise_from(AnsibleError('PyYAML is probably missing'), YAML_IMPORT_ERROR) super(InventoryModule, self).parse(inventory, loader, path) self._read_config_data(path=path) config_zones = self.get_option("regions") tags = self.get_option("tags") - token = self.get_option("oauth_token") + token = self.get_oauth_token() + if not token: + raise AnsibleError("'oauth_token' value is null, you must configure it either in inventory, envvars or scaleway-cli config.") hostname_preference = self.get_option("hostnames") for zone in self._get_zones(config_zones): From 825e17c1cfc33571b273984f195166f268b0850c Mon Sep 17 00:00:00 2001 From: Laurent Paumier <30328363+laurpaum@users.noreply.github.com> Date: Fri, 27 Aug 2021 06:17:04 +0200 Subject: [PATCH 0288/2828] Fix keycloak_realm module (#3231) * fix events_listeners element type add events_enabled parameter * Update plugins/modules/identity/keycloak/keycloak_realm.py Co-authored-by: Felix Fontein * add changelog * Update changelogs/fragments/3231-fix-keycloak-realm-events.yml Co-authored-by: Felix Fontein * Update changelogs/fragments/3231-fix-keycloak-realm-events.yml Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_realm.py Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../fragments/3231-fix-keycloak-realm-events.yml | 5 +++++ plugins/modules/identity/keycloak/keycloak_realm.py | 12 ++++++++++-- 2 files changed, 15 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/3231-fix-keycloak-realm-events.yml diff --git a/changelogs/fragments/3231-fix-keycloak-realm-events.yml b/changelogs/fragments/3231-fix-keycloak-realm-events.yml new file mode 100644 index 0000000000..9950ed2c59 --- /dev/null +++ b/changelogs/fragments/3231-fix-keycloak-realm-events.yml @@ -0,0 +1,5 @@ +bugfixes: + - keycloak_realm - element type for ``events_listeners`` parameter should be ``string`` instead of ``dict`` (https://github.com/ansible-collections/community.general/pull/3231). +minor_changes: + - keycloak_realm - add ``events_enabled`` parameter to allow activation or deactivation of login events (https://github.com/ansible-collections/community.general/pull/3231). + \ No newline at end of file diff --git a/plugins/modules/identity/keycloak/keycloak_realm.py b/plugins/modules/identity/keycloak/keycloak_realm.py index 95f79704ef..da37fa2723 100644 --- a/plugins/modules/identity/keycloak/keycloak_realm.py +++ b/plugins/modules/identity/keycloak/keycloak_realm.py @@ -242,6 +242,13 @@ options: - enabledEventTypes type: list elements: str + events_enabled: + description: + - Enables or disables login events for this realm. + aliases: + - eventsEnabled + type: bool + version_added: 3.6.0 events_expiration: description: - The realm events expiration. @@ -254,7 +261,7 @@ options: aliases: - eventsListeners type: list - elements: dict + elements: str failure_factor: description: - The realm failure factor. @@ -626,8 +633,9 @@ def main(): email_theme=dict(type='str', aliases=['emailTheme']), enabled=dict(type='bool'), enabled_event_types=dict(type='list', elements='str', aliases=['enabledEventTypes']), + events_enabled=dict(type='bool', aliases=['eventsEnabled']), events_expiration=dict(type='int', aliases=['eventsExpiration']), - events_listeners=dict(type='list', elements='dict', aliases=['eventsListeners']), + events_listeners=dict(type='list', elements='str', aliases=['eventsListeners']), failure_factor=dict(type='int', aliases=['failureFactor']), internationalization_enabled=dict(type='bool', aliases=['internationalizationEnabled']), login_theme=dict(type='str', aliases=['loginTheme']), From e77adff0b76e13ce932dde6ab26ea320335d7476 Mon Sep 17 00:00:00 2001 From: Kellin Date: Fri, 27 Aug 2021 00:20:04 -0400 Subject: [PATCH 0289/2828] Linode Inventory can use full IP data from APIv4 (#3203) * Linode Inventory can use full IP data from APIv4 - The Linode dynamic inventory module does not currently distinguish between private and public IP addresses even though the Linode APIv4 contains this information. This change keeps the current behavior as the default and adds an option to set `ip_style: api`. When set, this option allows administrators to differentiate between private, public, slaac, local_link, and pool network addresses providing a more nuanced and granular view of the remote host's network information. Signed-off-by: Kellin * Review - amend changelog details - Adds a link back to this pull request - Uses markdown styles for easier to read publishing in the changelogs - Amends the wording style to match the existing changelog styles Co-authored-by: Felix Fontein * Add scope to example invocation - Adds the `community.general` scope to invocation example Co-authored-by: Felix Fontein * Convert lamda to list comprehension - Change the ip type filter from a lambda to a list comprehension Co-authored-by: Felix Fontein * Add punctuation to description sentence - Adds a period to the end of the description sentence Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- ...node-inventory-return-full-api-ip-data.yml | 2 + plugins/inventory/linode.py | 46 +++++++++++++++++++ tests/unit/plugins/inventory/test_linode.py | 1 + 3 files changed, 49 insertions(+) create mode 100644 changelogs/fragments/3203-linode-inventory-return-full-api-ip-data.yml diff --git a/changelogs/fragments/3203-linode-inventory-return-full-api-ip-data.yml b/changelogs/fragments/3203-linode-inventory-return-full-api-ip-data.yml new file mode 100644 index 0000000000..fa7581e820 --- /dev/null +++ b/changelogs/fragments/3203-linode-inventory-return-full-api-ip-data.yml @@ -0,0 +1,2 @@ +minor_changes: + - "linode inventory plugin - adds the ``ip_style`` configuration key. Set to ``api`` to get more detailed network details back from the remote Linode host (https://github.com/ansible-collections/community.general/pull/3203)." diff --git a/plugins/inventory/linode.py b/plugins/inventory/linode.py index 5af9effd52..0ce510852a 100644 --- a/plugins/inventory/linode.py +++ b/plugins/inventory/linode.py @@ -26,6 +26,15 @@ DOCUMENTATION = r''' description: Marks this as an instance of the 'linode' plugin. required: true choices: ['linode', 'community.general.linode'] + ip_style: + description: Populate hostvars with all information available from the Linode APIv4. + type: string + default: + - plain + choices: + - plain + - api + version_added: 3.6.0 access_token: description: The Linode account personal access token. required: true @@ -83,6 +92,13 @@ compose: # replace it with the first IPv4 address of the linode as follows: ansible_ssh_host: ipv4[0] ansible_port: 2222 + +# Example where control traffic limited to internal network +plugin: community.general.linode +access_token: foobar +ip_style: api +compose: + ansible_host: "ipv4 | community.general.json_query('[?public==`false`].address') | first" ''' import os @@ -170,14 +186,44 @@ class InventoryModule(BaseInventoryPlugin, Constructable): def _add_hostvars_for_instances(self): """Add hostvars for instances in the dynamic inventory.""" + ip_style = self.get_option('ip_style') for instance in self.instances: hostvars = instance._raw_json for hostvar_key in hostvars: + if ip_style == 'api' and hostvar_key in ['ipv4', 'ipv6']: + continue self.inventory.set_variable( instance.label, hostvar_key, hostvars[hostvar_key] ) + if ip_style == 'api': + ips = instance.ips.ipv4.public + instance.ips.ipv4.private + ips += [instance.ips.ipv6.slaac, instance.ips.ipv6.link_local] + ips += instance.ips.ipv6.pools + + for ip_type in set(ip.type for ip in ips): + self.inventory.set_variable( + instance.label, + ip_type, + self._ip_data([ip for ip in ips if ip.type == ip_type]) + ) + + def _ip_data(self, ip_list): + data = [] + for ip in list(ip_list): + data.append( + { + 'address': ip.address, + 'subnet_mask': ip.subnet_mask, + 'gateway': ip.gateway, + 'public': ip.public, + 'prefix': ip.prefix, + 'rdns': ip.rdns, + 'type': ip.type + } + ) + return data def _validate_option(self, name, desired_type, option_value): """Validate user specified configuration data against types.""" diff --git a/tests/unit/plugins/inventory/test_linode.py b/tests/unit/plugins/inventory/test_linode.py index f2627d850d..501f95b1f2 100644 --- a/tests/unit/plugins/inventory/test_linode.py +++ b/tests/unit/plugins/inventory/test_linode.py @@ -49,6 +49,7 @@ def test_access_token_lookup(inventory): def test_validate_option(inventory): assert ['eu-west'] == inventory._validate_option('regions', list, 'eu-west') assert ['eu-west'] == inventory._validate_option('regions', list, ['eu-west']) + assert 'api' == inventory._validate_option('ip_style', str, 'api') def test_validation_option_bad_option(inventory): From 4e2d4e3c68c078e1b04ef69bc9a4a5f8588f3b7a Mon Sep 17 00:00:00 2001 From: Atlas974 <43972908+Atlas974@users.noreply.github.com> Date: Fri, 27 Aug 2021 18:48:32 +0200 Subject: [PATCH 0290/2828] Fixed incorrect VMID: cloning to an existing VM (#3266) * Fixed incorrect VMID: cloning to an existing VM During a cloning operation, if the destination VM already exists the VMID returned is not correct. The VMID returned should be that of the destination VM and not that of the source VM (consistent with line 1230). A playbook that relies on the returned VMID, for example, to perform other operations on the destination VM, will not work properly if it is unexpectedly interrupted. * Add files via upload * moved 3266-vmid-existing-target-clone.yml to changelogs/fragments/ replaced line separator CRLF -> LF * storing vmid list in variable to avoid multiple API calls --- changelogs/fragments/3266-vmid-existing-target-clone.yml | 3 +++ plugins/modules/cloud/misc/proxmox_kvm.py | 5 +++-- 2 files changed, 6 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/3266-vmid-existing-target-clone.yml diff --git a/changelogs/fragments/3266-vmid-existing-target-clone.yml b/changelogs/fragments/3266-vmid-existing-target-clone.yml new file mode 100644 index 0000000000..5ff59f5311 --- /dev/null +++ b/changelogs/fragments/3266-vmid-existing-target-clone.yml @@ -0,0 +1,3 @@ +bugfixes: + - proxmox_kvm - clone operation should return the VMID of the target VM and not that of the source VM. + This was failing when the target VM with the chosen name already existed (https://github.com/ansible-collections/community.general/pull/3266). \ No newline at end of file diff --git a/plugins/modules/cloud/misc/proxmox_kvm.py b/plugins/modules/cloud/misc/proxmox_kvm.py index 159968ce6e..25b29b369b 100644 --- a/plugins/modules/cloud/misc/proxmox_kvm.py +++ b/plugins/modules/cloud/misc/proxmox_kvm.py @@ -1201,8 +1201,9 @@ def main(): module.fail_json(vmid=vmid, msg='VM with vmid = %s does not exist in cluster' % vmid) # Ensure the choosen VM name doesn't already exist when cloning - if get_vmid(proxmox, name): - module.exit_json(changed=False, vmid=vmid, msg="VM with name <%s> already exists" % name) + existing_vmid = get_vmid(proxmox, name) + if existing_vmid: + module.exit_json(changed=False, vmid=existing_vmid[0], msg="VM with name <%s> already exists" % name) # Ensure the choosen VM id doesn't already exist when cloning if get_vm(proxmox, newid): From 69641d36e18c5a62d966dc24db28d2a0d28b9bda Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 29 Aug 2021 07:50:09 +1200 Subject: [PATCH 0291/2828] openwrt_init - improvements (#3284) * improvements on openwrt_init * added changelog fragment --- .../3284-openwrt_init-improvements.yaml | 4 +++ plugins/modules/system/openwrt_init.py | 35 ++++++------------- 2 files changed, 14 insertions(+), 25 deletions(-) create mode 100644 changelogs/fragments/3284-openwrt_init-improvements.yaml diff --git a/changelogs/fragments/3284-openwrt_init-improvements.yaml b/changelogs/fragments/3284-openwrt_init-improvements.yaml new file mode 100644 index 0000000000..99a60dfce8 --- /dev/null +++ b/changelogs/fragments/3284-openwrt_init-improvements.yaml @@ -0,0 +1,4 @@ +minor_changes: + - openwrt_init - minor refactoring (https://github.com/ansible-collections/community.general/pull/3284). +bugfixes: + - openwrt_init - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3284). diff --git a/plugins/modules/system/openwrt_init.py b/plugins/modules/system/openwrt_init.py index afc3c3a956..fa9488ecb2 100644 --- a/plugins/modules/system/openwrt_init.py +++ b/plugins/modules/system/openwrt_init.py @@ -70,9 +70,7 @@ RETURN = ''' ''' import os -import glob from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_bytes, to_native module = None init_script = None @@ -81,15 +79,12 @@ init_script = None # =============================== # Check if service is enabled def is_enabled(): - (rc, out, err) = module.run_command("%s enabled" % init_script) - if rc == 0: - return True - return False + rc, dummy, dummy = module.run_command([init_script, 'enabled']) + return rc == 0 # =========================================== # Main control flow - def main(): global module, init_script # init @@ -98,22 +93,19 @@ def main(): name=dict(required=True, type='str', aliases=['service']), state=dict(type='str', choices=['started', 'stopped', 'restarted', 'reloaded']), enabled=dict(type='bool'), - pattern=dict(type='str', required=False, default=None), + pattern=dict(type='str'), ), supports_check_mode=True, - required_one_of=[['state', 'enabled']], + required_one_of=[('state', 'enabled')], ) # initialize service = module.params['name'] init_script = '/etc/init.d/' + service - rc = 0 - out = err = '' result = { 'name': service, 'changed': False, } - # check if service exists if not os.path.exists(init_script): module.fail_json(msg='service %s does not exist' % service) @@ -129,13 +121,10 @@ def main(): # Change enable/disable if needed if enabled != module.params['enabled']: result['changed'] = True - if module.params['enabled']: - action = 'enable' - else: - action = 'disable' + action = 'enable' if module.params['enabled'] else 'disable' if not module.check_mode: - (rc, out, err) = module.run_command("%s %s" % (init_script, action)) + rc, dummy, err = module.run_command([init_script, action]) # openwrt init scripts can return a non-zero exit code on a successful 'enable' # command if the init script doesn't contain a STOP value, so we ignore the exit # code and explicitly check if the service is now in the desired state @@ -153,17 +142,13 @@ def main(): psbin = module.get_bin_path('ps', True) # this should be busybox ps, so we only want/need to the 'w' option - (rc, psout, pserr) = module.run_command('%s w' % psbin) + rc, psout, dummy = module.run_command([psbin, 'w']) # If rc is 0, set running as appropriate if rc == 0: lines = psout.split("\n") - for line in lines: - if module.params['pattern'] in line and "pattern=" not in line: - # so as to not confuse ./hacking/test-module.py - running = True - break + running = any((module.params['pattern'] in line and "pattern=" not in line) for line in lines) else: - (rc, out, err) = module.run_command("%s running" % init_script) + rc, dummy, dummy = module.run_command([init_script, 'running']) if rc == 0: running = True @@ -187,7 +172,7 @@ def main(): if action: if not module.check_mode: - (rc, out, err) = module.run_command("%s %s" % (init_script, action)) + rc, dummy, err = module.run_command([init_script, action]) if rc != 0: module.fail_json(msg="Unable to %s service %s: %s" % (action, service, err)) From a91eb6ae4f9b2ca3143c953ed65303e94c806a1d Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 29 Aug 2021 07:54:39 +1200 Subject: [PATCH 0292/2828] snap - improved error handling (#3211) * snap - improved error handling * added changelog fragment * removed experiments left overs * rolled back the smaller list of params for commands other than install --- .../fragments/3211-snap-error-handling.yml | 2 + plugins/modules/packaging/os/snap.py | 70 ++++--------------- 2 files changed, 17 insertions(+), 55 deletions(-) create mode 100644 changelogs/fragments/3211-snap-error-handling.yml diff --git a/changelogs/fragments/3211-snap-error-handling.yml b/changelogs/fragments/3211-snap-error-handling.yml new file mode 100644 index 0000000000..d361b99f01 --- /dev/null +++ b/changelogs/fragments/3211-snap-error-handling.yml @@ -0,0 +1,2 @@ +minor_changes: + - snap - improved module error handling, especially for the case when snap server is down (https://github.com/ansible-collections/community.general/issues/2970). diff --git a/plugins/modules/packaging/os/snap.py b/plugins/modules/packaging/os/snap.py index de6fedccdc..a62be76425 100644 --- a/plugins/modules/packaging/os/snap.py +++ b/plugins/modules/packaging/os/snap.py @@ -1,6 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- +# Copyright: (c) 2021, Alexei Znamensky (russoz) # Copyright: (c) 2018, Stanislas Lange (angristan) # Copyright: (c) 2018, Victor Carceler @@ -12,17 +13,13 @@ __metaclass__ = type DOCUMENTATION = ''' --- module: snap - short_description: Manages snaps - - description: - "Manages snaps packages." - options: name: description: - - Name of the snap to install or remove. Can be a list of snaps. + - Name of the snaps. required: true type: list elements: str @@ -117,10 +114,10 @@ from ansible_collections.community.general.plugins.module_utils.module_helper im __state_map = dict( present='install', absent='remove', - info='info', # not public - list='list', # not public enabled='enable', disabled='disable', + info='info', # not public + list='list', # not public ) @@ -171,9 +168,6 @@ class Snap(CmdStateModuleHelper): '\n'.join(results[3]), ] - def snap_exists(self, snap_name): - return 0 == self.run_command(params=[{'state': 'info'}, {'name': snap_name}])[0] - def is_snap_installed(self, snap_name): return 0 == self.run_command(params=[{'state': 'list'}, {'name': snap_name}])[0] @@ -188,14 +182,7 @@ class Snap(CmdStateModuleHelper): notes = match.group('notes') return "disabled" not in notes.split(',') - def validate_input_snaps(self): - """Ensure that all exist.""" - for snap_name in self.vars.name: - if not self.snap_exists(snap_name): - raise ModuleHelperException(msg="No snap matching '%s' available." % snap_name) - def state_present(self): - self.validate_input_snaps() # if snap doesnt exist, it will explode when trying to install self.vars.meta('classic').set(output=True) self.vars.meta('channel').set(output=True) actionable_snaps = [s for s in self.vars.name if not self.is_snap_installed(s)] @@ -227,59 +214,32 @@ class Snap(CmdStateModuleHelper): "error output for more details.".format(cmd=self.vars.cmd) raise ModuleHelperException(msg=msg) - def state_absent(self): - self.validate_input_snaps() # if snap doesnt exist, it will be absent by definition - actionable_snaps = [s for s in self.vars.name if self.is_snap_installed(s)] + def _generic_state_action(self, actionable_func, actionable_var, params=None): + actionable_snaps = [s for s in self.vars.name if actionable_func(s)] if not actionable_snaps: return self.changed = True - self.vars.snaps_removed = actionable_snaps + self.vars[actionable_var] = actionable_snaps if self.module.check_mode: return - params = ['classic', 'channel', 'state'] # get base cmd parts + if params is None: + params = ['state'] commands = [params + [{'actionable_snaps': actionable_snaps}]] self.vars.cmd, rc, out, err = self._run_multiple_commands(commands) if rc == 0: return - msg = "Ooops! Snap removal failed while executing '{cmd}', please examine logs and " \ + msg = "Ooops! Snap operation failed while executing '{cmd}', please examine logs and " \ "error output for more details.".format(cmd=self.vars.cmd) raise ModuleHelperException(msg=msg) + def state_absent(self): + self._generic_state_action(self.is_snap_installed, "snaps_removed", ['classic', 'channel', 'state']) + def state_enabled(self): - self.validate_input_snaps() - actionable_snaps = [s for s in self.vars.name if self.is_snap_enabled(s) is False] - if not actionable_snaps: - return - self.changed = True - self.vars.snaps_enabled = actionable_snaps - if self.module.check_mode: - return - params = ['classic', 'channel', 'state'] # get base cmd parts - commands = [params + [{'actionable_snaps': actionable_snaps}]] - self.vars.cmd, rc, out, err = self._run_multiple_commands(commands) - if rc == 0: - return - msg = "Ooops! Snap enabling failed while executing '{cmd}', please examine logs and " \ - "error output for more details.".format(cmd=self.vars.cmd) - raise ModuleHelperException(msg=msg) + self._generic_state_action(lambda s: not self.is_snap_enabled(s), "snaps_enabled", ['classic', 'channel', 'state']) def state_disabled(self): - self.validate_input_snaps() - actionable_snaps = [s for s in self.vars.name if self.is_snap_enabled(s) is True] - if not actionable_snaps: - return - self.changed = True - self.vars.snaps_enabled = actionable_snaps - if self.module.check_mode: - return - params = ['classic', 'channel', 'state'] # get base cmd parts - commands = [params + [{'actionable_snaps': actionable_snaps}]] - self.vars.cmd, rc, out, err = self._run_multiple_commands(commands) - if rc == 0: - return - msg = "Ooops! Snap disabling failed while executing '{cmd}', please examine logs and " \ - "error output for more details.".format(cmd=self.vars.cmd) - raise ModuleHelperException(msg=msg) + self._generic_state_action(self.is_snap_enabled, "snaps_disabled", ['classic', 'channel', 'state']) def main(): From cf433567535d2a854335a4e252aa6320752208b3 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 29 Aug 2021 10:15:34 +0200 Subject: [PATCH 0293/2828] Fix unit tests (#3289) * Force new enough requests version. * Revert "Force new enough requests version." This reverts commit 339d40bef7d10e19b4d8beb885eb7e414b5c7354. * Make sure we don't install a too new python-gitlab for Ansible 2.10. * Change requirement instead of appending new one. * Fix quoting. * Try to skip if import fails. * Revert "Try to skip if import fails." This reverts commit 254bbd8548c08be4d49aca2e2fcedf23e1d23436. * Make other Python versions happy... * Update tests/utils/shippable/units.sh Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> --- tests/utils/shippable/shippable.sh | 2 +- tests/utils/shippable/units.sh | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/utils/shippable/shippable.sh b/tests/utils/shippable/shippable.sh index 3a00812f12..5f94d9fff5 100755 --- a/tests/utils/shippable/shippable.sh +++ b/tests/utils/shippable/shippable.sh @@ -232,4 +232,4 @@ fi ansible-test env --dump --show --timeout "${timeout}" --color -v if [ "${SHIPPABLE_BUILD_ID:-}" ]; then "tests/utils/shippable/check_matrix.py"; fi -"tests/utils/shippable/${script}.sh" "${test}" +"tests/utils/shippable/${script}.sh" "${test}" "${ansible_version}" diff --git a/tests/utils/shippable/units.sh b/tests/utils/shippable/units.sh index 38e79935e7..88db336d26 100755 --- a/tests/utils/shippable/units.sh +++ b/tests/utils/shippable/units.sh @@ -22,6 +22,11 @@ esac ansible-test env --timeout "${timeout}" --color -v +if [ "$2" == "2.10" ]; then + sed -i -E 's/^python-gitlab($| .*)/python-gitlab < 2.10.1 ; python_version >= '\'3.6\''/g' tests/unit/requirements.txt + echo "python-gitlab ; python_version < '3.6'" >> tests/unit/requirements.txt +fi + # shellcheck disable=SC2086 ansible-test units --color -v --docker default --python "${version}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} \ "${options[@]:+${options[@]}}" \ From df8fdcda7901a40d7d48c17ec9ce35c3d43ecbfe Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 29 Aug 2021 23:03:15 +1200 Subject: [PATCH 0294/2828] mh CmdMixin - added ArgFormat.BOOLEAN_NOT and logic (#3290) * mh CmdMixin - added ArgFormat.BOOLEAN_NOT and logic * added changelog fragment --- changelogs/fragments/3290-mh-cmd-boolean-not.yaml | 2 ++ plugins/module_utils/mh/mixins/cmd.py | 5 ++++- tests/unit/plugins/module_utils/test_module_helper.py | 10 ++++++++-- 3 files changed, 14 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/3290-mh-cmd-boolean-not.yaml diff --git a/changelogs/fragments/3290-mh-cmd-boolean-not.yaml b/changelogs/fragments/3290-mh-cmd-boolean-not.yaml new file mode 100644 index 0000000000..ab34539f15 --- /dev/null +++ b/changelogs/fragments/3290-mh-cmd-boolean-not.yaml @@ -0,0 +1,2 @@ +minor_changes: + - module_helper cmd module utils - added the ``ArgFormat`` style ``BOOLEAN_NOT``, to add CLI parameters when the module argument is false-ish (https://github.com/ansible-collections/community.general/pull/3290). diff --git a/plugins/module_utils/mh/mixins/cmd.py b/plugins/module_utils/mh/mixins/cmd.py index aed4174c4f..51e5ae9873 100644 --- a/plugins/module_utils/mh/mixins/cmd.py +++ b/plugins/module_utils/mh/mixins/cmd.py @@ -16,6 +16,7 @@ class ArgFormat(object): BOOLEAN = 0 PRINTF = 1 FORMAT = 2 + BOOLEAN_NOT = 3 @staticmethod def stars_deco(num): @@ -50,12 +51,14 @@ class ArgFormat(object): _fmts = { ArgFormat.BOOLEAN: lambda _fmt, v: ([_fmt] if bool(v) else []), + ArgFormat.BOOLEAN_NOT: lambda _fmt, v: ([] if bool(v) else [_fmt]), ArgFormat.PRINTF: printf_fmt, ArgFormat.FORMAT: lambda _fmt, v: [_fmt.format(v)], } self.name = name self.stars = stars + self.style = style if fmt is None: fmt = "{0}" @@ -76,7 +79,7 @@ class ArgFormat(object): self.arg_format = (self.stars_deco(stars))(self.arg_format) def to_text(self, value): - if value is None: + if value is None and self.style != ArgFormat.BOOLEAN_NOT: return [] func = self.arg_format return [str(p) for p in func(value)] diff --git a/tests/unit/plugins/module_utils/test_module_helper.py b/tests/unit/plugins/module_utils/test_module_helper.py index f40a0f10ee..00667fcea3 100644 --- a/tests/unit/plugins/module_utils/test_module_helper.py +++ b/tests/unit/plugins/module_utils/test_module_helper.py @@ -24,6 +24,12 @@ ARG_FORMATS = dict( False, []), simple_boolean_none=("--superflag", ArgFormat.BOOLEAN, 0, None, []), + simple_boolean_not_true=("--superflag", ArgFormat.BOOLEAN_NOT, 0, + True, []), + simple_boolean_not_false=("--superflag", ArgFormat.BOOLEAN_NOT, 0, + False, ["--superflag"]), + simple_boolean_not_none=("--superflag", ArgFormat.BOOLEAN_NOT, 0, + None, ["--superflag"]), single_printf=("--param=%s", ArgFormat.PRINTF, 0, "potatoes", ["--param=potatoes"]), single_printf_no_substitution=("--param", ArgFormat.PRINTF, 0, @@ -65,7 +71,7 @@ def test_arg_format(fmt, style, stars, value, expected): af = ArgFormat('name', fmt, style, stars) actual = af.to_text(value) print("formatted string = {0}".format(actual)) - assert actual == expected + assert actual == expected, "actual = {0}".format(actual) ARG_FORMATS_FAIL = dict( @@ -218,7 +224,7 @@ CAUSE_CHG_DECO_IDS = sorted(CAUSE_CHG_DECO.keys()) @pytest.mark.parametrize(CAUSE_CHG_DECO_PARAMS, [[CAUSE_CHG_DECO[tc][param] - for param in CAUSE_CHG_DECO_PARAMS] + for param in CAUSE_CHG_DECO_PARAMS] for tc in CAUSE_CHG_DECO_IDS], ids=CAUSE_CHG_DECO_IDS) def test_cause_changes_deco(method, expect_exception, expect_changed): From 1ce79db7633ce2a8caf71681222466a95e18de18 Mon Sep 17 00:00:00 2001 From: Reto Kupferschmid Date: Sun, 29 Aug 2021 13:20:46 +0200 Subject: [PATCH 0295/2828] add deprecation warning for python-dnsimple 1 (#3267) * add deprecation warning for python-dnsimple 1 * add changelog fragment * Update changelogs/fragments/3267-dnsimple1-deprecation.yml Co-authored-by: Felix Fontein * Update plugins/modules/net_tools/dnsimple.py Co-authored-by: Felix Fontein * fix typo Co-authored-by: Felix Fontein * Update plugins/modules/net_tools/dnsimple.py Co-authored-by: Felix Fontein * Update changelogs/fragments/3267-dnsimple1-deprecation.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- changelogs/fragments/3267-dnsimple1-deprecation.yml | 3 +++ plugins/modules/net_tools/dnsimple.py | 7 +++++++ 2 files changed, 10 insertions(+) create mode 100644 changelogs/fragments/3267-dnsimple1-deprecation.yml diff --git a/changelogs/fragments/3267-dnsimple1-deprecation.yml b/changelogs/fragments/3267-dnsimple1-deprecation.yml new file mode 100644 index 0000000000..dadc1d2901 --- /dev/null +++ b/changelogs/fragments/3267-dnsimple1-deprecation.yml @@ -0,0 +1,3 @@ +--- +deprecated_features: + - "dnsimple - python-dnsimple < 2.0.0 is deprecated and support for it will be removed in community.general 5.0.0 (https://github.com/ansible-collections/community.general/pull/2946#discussion_r667624693)." diff --git a/plugins/modules/net_tools/dnsimple.py b/plugins/modules/net_tools/dnsimple.py index 188f9fd64a..3681348f4e 100644 --- a/plugins/modules/net_tools/dnsimple.py +++ b/plugins/modules/net_tools/dnsimple.py @@ -82,6 +82,8 @@ options: version_added: 3.5.0 requirements: - "dnsimple >= 1.0.0" +notes: + - "Support for C(dnsimple < 2) is deprecated and will be removed in community.general 5.0.0." author: "Alex Coomans (@drcapulet)" ''' @@ -402,6 +404,11 @@ def main(): if DNSIMPLE_MAJOR_VERSION > 1: ds = DNSimpleV2(account_email, account_api_token, sandbox, module) else: + module.deprecate( + 'Support for python-dnsimple < 2 is deprecated. ' + 'Update python-dnsimple to version >= 2.0.0', + version='5.0.0', collection_name='community.general' + ) ds = DNSimpleV1(account_email, account_api_token, sandbox, module) # Let's figure out what operation we want to do # No domain, return a list From d9dcdcbbe469d140de61cfb4b5643d5bddfadb4e Mon Sep 17 00:00:00 2001 From: Sebastian Damm Date: Mon, 30 Aug 2021 06:53:30 +0200 Subject: [PATCH 0296/2828] udm_dns_record: Fix handling of PTR records (#3244) (#3256) * udm_dns_record: Fix handling of PTR records (#3244) Before, it was not possible to manage PTR records in Univention DNS, due to broken zone lookups and improper used parameters of the object. This patch fixes the PTR handling, allowing both v4 and v6 entries. * udm_dns_record: [doc] add changelog fragment * udm_dns_record: [fix] validation errors * udm_dns_record: import ipaddress module conditionally (#3244) * udm_dns_record: fix sanity check error, improve doc (#3244) * udm_dns_record: Improve changes to meet community standards (#3244) --- ...256-fix-ptr-handling-in-udm_dns_record.yml | 2 + .../cloud/univention/udm_dns_record.py | 60 +++++++++++++++++-- 2 files changed, 57 insertions(+), 5 deletions(-) create mode 100644 changelogs/fragments/3256-fix-ptr-handling-in-udm_dns_record.yml diff --git a/changelogs/fragments/3256-fix-ptr-handling-in-udm_dns_record.yml b/changelogs/fragments/3256-fix-ptr-handling-in-udm_dns_record.yml new file mode 100644 index 0000000000..141a31349f --- /dev/null +++ b/changelogs/fragments/3256-fix-ptr-handling-in-udm_dns_record.yml @@ -0,0 +1,2 @@ +bugfixes: + - udm_dns_record - fixed managing of PTR records, which can never have worked before (https://github.com/ansible-collections/community.general/pull/3256). diff --git a/plugins/modules/cloud/univention/udm_dns_record.py b/plugins/modules/cloud/univention/udm_dns_record.py index 0c56970dd3..4e7aa70b32 100644 --- a/plugins/modules/cloud/univention/udm_dns_record.py +++ b/plugins/modules/cloud/univention/udm_dns_record.py @@ -21,6 +21,7 @@ description: requirements: - Python >= 2.6 - Univention + - ipaddress (for I(type=ptr_record)) options: state: type: str @@ -34,11 +35,13 @@ options: description: - "Name of the record, this is also the DNS record. E.g. www for www.example.com." + - For PTR records this has to be the IP address. zone: type: str required: true description: - Corresponding DNS zone for this record, e.g. example.com. + - For PTR records this has to be the full reverse zone (for example C(1.1.192.in-addr.arpa)). type: type: str required: true @@ -66,12 +69,29 @@ EXAMPLES = ''' a: - 192.0.2.1 - 2001:0db8::42 + +- name: Create a DNS v4 PTR record on a UCS + community.general.udm_dns_record: + name: 192.0.2.1 + zone: 2.0.192.in-addr.arpa + type: ptr_record + data: + ptr_record: "www.example.com." + +- name: Create a DNS v6 PTR record on a UCS + community.general.udm_dns_record: + name: 2001:db8:0:0:0:ff00:42:8329 + zone: 2.4.0.0.0.0.f.f.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa + type: ptr_record + data: + ptr_record: "www.example.com." ''' RETURN = '''#''' HAVE_UNIVENTION = False +HAVE_IPADDRESS = False try: from univention.admin.handlers.dns import ( forward_zone, @@ -82,6 +102,7 @@ except ImportError: pass from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import missing_required_lib from ansible_collections.community.general.plugins.module_utils.univention_umc import ( umc_module_for_add, umc_module_for_edit, @@ -90,6 +111,11 @@ from ansible_collections.community.general.plugins.module_utils.univention_umc i config, uldap, ) +try: + import ipaddress + HAVE_IPADDRESS = True +except ImportError: + pass def main(): @@ -124,14 +150,30 @@ def main(): changed = False diff = None + workname = name + if type == 'ptr_record': + if not HAVE_IPADDRESS: + module.fail_json(msg=missing_required_lib('ipaddress')) + try: + if 'arpa' not in zone: + raise Exception("Zone must be reversed zone for ptr_record. (e.g. 1.1.192.in-addr.arpa)") + ipaddr_rev = ipaddress.ip_address(name).reverse_pointer + subnet_offset = ipaddr_rev.find(zone) + if subnet_offset == -1: + raise Exception("reversed IP address {0} is not part of zone.".format(ipaddr_rev)) + workname = ipaddr_rev[0:subnet_offset - 1] + except Exception as e: + module.fail_json( + msg='handling PTR record for {0} in zone {1} failed: {2}'.format(name, zone, e) + ) + obj = list(ldap_search( - '(&(objectClass=dNSZone)(zoneName={0})(relativeDomainName={1}))'.format(zone, name), + '(&(objectClass=dNSZone)(zoneName={0})(relativeDomainName={1}))'.format(zone, workname), attr=['dNSZone'] )) - exists = bool(len(obj)) container = 'zoneName={0},cn=dns,{1}'.format(zone, base_dn()) - dn = 'relativeDomainName={0},{1}'.format(name, container) + dn = 'relativeDomainName={0},{1}'.format(workname, container) if state == 'present': try: @@ -144,13 +186,21 @@ def main(): ) or reverse_zone.lookup( config(), uldap(), - '(zone={0})'.format(zone), + '(zoneName={0})'.format(zone), scope='domain', ) + if len(so) == 0: + raise Exception("Did not find zone '{0}' in Univention".format(zone)) obj = umc_module_for_add('dns/{0}'.format(type), container, superordinate=so[0]) else: obj = umc_module_for_edit('dns/{0}'.format(type), dn) - obj['name'] = name + + if type == 'ptr_record': + obj['ip'] = name + obj['address'] = workname + else: + obj['name'] = name + for k, v in data.items(): obj[k] = v diff = obj.diff() From 97e2c3dec9fee886a37d029b08a554228809eaa2 Mon Sep 17 00:00:00 2001 From: Laurent Paumier <30328363+laurpaum@users.noreply.github.com> Date: Tue, 31 Aug 2021 07:07:53 +0200 Subject: [PATCH 0297/2828] Keycloak: add identity providers management (#3210) * init new module * update * add mappers * improve mappers * tests * fix tests * fix tests * Update plugins/modules/identity/keycloak/keycloak_identity_provider.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_identity_provider.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_identity_provider.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_identity_provider.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_identity_provider.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_identity_provider.py Co-authored-by: Felix Fontein * fix typos * update botmeta * improve change detection * fix tests * add integration tests * remove updateProfileFirstLoginMode parameter Co-authored-by: Laurent PAUMIER Co-authored-by: Felix Fontein --- .github/BOTMETA.yml | 2 + .../identity/keycloak/keycloak.py | 164 +++++ .../keycloak/keycloak_identity_provider.py | 608 ++++++++++++++++++ plugins/modules/keycloak_identity_provider.py | 1 + .../keycloak_identity_provider/aliases | 1 + .../keycloak_identity_provider/tasks/main.yml | 171 +++++ .../keycloak_identity_provider/vars/main.yml | 7 + .../test_keycloak_identity_provider.py | 495 ++++++++++++++ 8 files changed, 1449 insertions(+) create mode 100644 plugins/modules/identity/keycloak/keycloak_identity_provider.py create mode 120000 plugins/modules/keycloak_identity_provider.py create mode 100644 tests/integration/targets/keycloak_identity_provider/aliases create mode 100644 tests/integration/targets/keycloak_identity_provider/tasks/main.yml create mode 100644 tests/integration/targets/keycloak_identity_provider/vars/main.yml create mode 100644 tests/unit/plugins/modules/identity/keycloak/test_keycloak_identity_provider.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 6055224145..0d2922182b 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -506,6 +506,8 @@ files: maintainers: Gaetan2907 $modules/identity/keycloak/keycloak_group.py: maintainers: adamgoossens + $modules/identity/keycloak/keycloak_identity_provider.py: + maintainers: laurpaum $modules/identity/keycloak/keycloak_realm.py: maintainers: kris2kris $modules/identity/keycloak/keycloak_role.py: diff --git a/plugins/module_utils/identity/keycloak/keycloak.py b/plugins/module_utils/identity/keycloak/keycloak.py index d53a29ba10..5ddb1320b9 100644 --- a/plugins/module_utils/identity/keycloak/keycloak.py +++ b/plugins/module_utils/identity/keycloak/keycloak.py @@ -78,6 +78,11 @@ URL_AUTHENTICATION_EXECUTION_RAISE_PRIORITY = "{url}/admin/realms/{realm}/authen URL_AUTHENTICATION_EXECUTION_LOWER_PRIORITY = "{url}/admin/realms/{realm}/authentication/executions/{id}/lower-priority" URL_AUTHENTICATION_CONFIG = "{url}/admin/realms/{realm}/authentication/config/{id}" +URL_IDENTITY_PROVIDERS = "{url}/admin/realms/{realm}/identity-provider/instances" +URL_IDENTITY_PROVIDER = "{url}/admin/realms/{realm}/identity-provider/instances/{alias}" +URL_IDENTITY_PROVIDER_MAPPERS = "{url}/admin/realms/{realm}/identity-provider/instances/{alias}/mappers" +URL_IDENTITY_PROVIDER_MAPPER = "{url}/admin/realms/{realm}/identity-provider/instances/{alias}/mappers/{id}" + def keycloak_argument_spec(): """ @@ -1437,3 +1442,162 @@ class KeycloakAPI(object): except Exception as e: self.module.fail_json(msg='Could not get executions for authentication flow %s in realm %s: %s' % (config["alias"], realm, str(e))) + + def get_identity_providers(self, realm='master'): + """ Fetch representations for identity providers in a realm + :param realm: realm to be queried + :return: list of representations for identity providers + """ + idps_url = URL_IDENTITY_PROVIDERS.format(url=self.baseurl, realm=realm) + try: + return json.loads(to_native(open_url(idps_url, method='GET', headers=self.restheaders, + validate_certs=self.validate_certs).read())) + except ValueError as e: + self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of identity providers for realm %s: %s' + % (realm, str(e))) + except Exception as e: + self.module.fail_json(msg='Could not obtain list of identity providers for realm %s: %s' + % (realm, str(e))) + + def get_identity_provider(self, alias, realm='master'): + """ Fetch identity provider representation from a realm using the idp's alias. + If the identity provider does not exist, None is returned. + :param alias: Alias of the identity provider to fetch. + :param realm: Realm in which the identity provider resides; default 'master'. + """ + idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=alias) + try: + return json.loads(to_native(open_url(idp_url, method="GET", headers=self.restheaders, + validate_certs=self.validate_certs).read())) + except HTTPError as e: + if e.code == 404: + return None + else: + self.module.fail_json(msg='Could not fetch identity provider %s in realm %s: %s' + % (alias, realm, str(e))) + except Exception as e: + self.module.fail_json(msg='Could not fetch identity provider %s in realm %s: %s' + % (alias, realm, str(e))) + + def create_identity_provider(self, idprep, realm='master'): + """ Create an identity provider. + :param idprep: Identity provider representation of the idp to be created. + :param realm: Realm in which this identity provider resides, default "master". + :return: HTTPResponse object on success + """ + idps_url = URL_IDENTITY_PROVIDERS.format(url=self.baseurl, realm=realm) + try: + return open_url(idps_url, method='POST', headers=self.restheaders, + data=json.dumps(idprep), validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Could not create identity provider %s in realm %s: %s' + % (idprep['alias'], realm, str(e))) + + def update_identity_provider(self, idprep, realm='master'): + """ Update an existing identity provider. + :param idprep: Identity provider representation of the idp to be updated. + :param realm: Realm in which this identity provider resides, default "master". + :return HTTPResponse object on success + """ + idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=idprep['alias']) + try: + return open_url(idp_url, method='PUT', headers=self.restheaders, + data=json.dumps(idprep), validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Could not update identity provider %s in realm %s: %s' + % (idprep['alias'], realm, str(e))) + + def delete_identity_provider(self, alias, realm='master'): + """ Delete an identity provider. + :param alias: Alias of the identity provider. + :param realm: Realm in which this identity provider resides, default "master". + """ + idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=alias) + try: + return open_url(idp_url, method='DELETE', headers=self.restheaders, + validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Unable to delete identity provider %s in realm %s: %s' + % (alias, realm, str(e))) + + def get_identity_provider_mappers(self, alias, realm='master'): + """ Fetch representations for identity provider mappers + :param alias: Alias of the identity provider. + :param realm: realm to be queried + :return: list of representations for identity provider mappers + """ + mappers_url = URL_IDENTITY_PROVIDER_MAPPERS.format(url=self.baseurl, realm=realm, alias=alias) + try: + return json.loads(to_native(open_url(mappers_url, method='GET', headers=self.restheaders, + validate_certs=self.validate_certs).read())) + except ValueError as e: + self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of identity provider mappers for idp %s in realm %s: %s' + % (alias, realm, str(e))) + except Exception as e: + self.module.fail_json(msg='Could not obtain list of identity provider mappers for idp %s in realm %s: %s' + % (alias, realm, str(e))) + + def get_identity_provider_mapper(self, mid, alias, realm='master'): + """ Fetch identity provider representation from a realm using the idp's alias. + If the identity provider does not exist, None is returned. + :param mid: Unique ID of the mapper to fetch. + :param alias: Alias of the identity provider. + :param realm: Realm in which the identity provider resides; default 'master'. + """ + mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mid) + try: + return json.loads(to_native(open_url(mapper_url, method="GET", headers=self.restheaders, + validate_certs=self.validate_certs).read())) + except HTTPError as e: + if e.code == 404: + return None + else: + self.module.fail_json(msg='Could not fetch mapper %s for identity provider %s in realm %s: %s' + % (mid, alias, realm, str(e))) + except Exception as e: + self.module.fail_json(msg='Could not fetch mapper %s for identity provider %s in realm %s: %s' + % (mid, alias, realm, str(e))) + + def create_identity_provider_mapper(self, mapper, alias, realm='master'): + """ Create an identity provider mapper. + :param mapper: IdentityProviderMapperRepresentation of the mapper to be created. + :param alias: Alias of the identity provider. + :param realm: Realm in which this identity provider resides, default "master". + :return: HTTPResponse object on success + """ + mappers_url = URL_IDENTITY_PROVIDER_MAPPERS.format(url=self.baseurl, realm=realm, alias=alias) + try: + return open_url(mappers_url, method='POST', headers=self.restheaders, + data=json.dumps(mapper), validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Could not create identity provider mapper %s for idp %s in realm %s: %s' + % (mapper['name'], alias, realm, str(e))) + + def update_identity_provider_mapper(self, mapper, alias, realm='master'): + """ Update an existing identity provider. + :param mapper: IdentityProviderMapperRepresentation of the mapper to be updated. + :param alias: Alias of the identity provider. + :param realm: Realm in which this identity provider resides, default "master". + :return HTTPResponse object on success + """ + mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mapper['id']) + try: + return open_url(mapper_url, method='PUT', headers=self.restheaders, + data=json.dumps(mapper), validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Could not update mapper %s for identity provider %s in realm %s: %s' + % (mapper['id'], alias, realm, str(e))) + + def delete_identity_provider_mapper(self, mid, alias, realm='master'): + """ Delete an identity provider. + :param mid: Unique ID of the mapper to delete. + :param alias: Alias of the identity provider. + :param realm: Realm in which this identity provider resides, default "master". + """ + mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mid) + try: + return open_url(mapper_url, method='DELETE', headers=self.restheaders, + validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Unable to delete mapper %s for identity provider %s in realm %s: %s' + % (mid, alias, realm, str(e))) diff --git a/plugins/modules/identity/keycloak/keycloak_identity_provider.py b/plugins/modules/identity/keycloak/keycloak_identity_provider.py new file mode 100644 index 0000000000..f56aeb9067 --- /dev/null +++ b/plugins/modules/identity/keycloak/keycloak_identity_provider.py @@ -0,0 +1,608 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: keycloak_identity_provider + +short_description: Allows administration of Keycloak identity providers via Keycloak API + +version_added: 3.6.0 + +description: + - This module allows you to add, remove or modify Keycloak identity providers via the Keycloak REST API. + It requires access to the REST API via OpenID Connect; the user connecting and the client being + used must have the requisite access rights. In a default Keycloak installation, admin-cli + and an admin user would work, as would a separate client definition with the scope tailored + to your needs and a user having the expected roles. + + - The names of module options are snake_cased versions of the camelCase ones found in the + Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/15.0/rest-api/index.html). + + +options: + state: + description: + - State of the identity provider. + - On C(present), the identity provider will be created if it does not yet exist, or updated with the parameters you provide. + - On C(absent), the identity provider will be removed if it exists. + default: 'present' + type: str + choices: + - present + - absent + + realm: + description: + - The Keycloak realm under which this identity provider resides. + default: 'master' + type: str + + alias: + description: + - The alias uniquely identifies an identity provider and it is also used to build the redirect URI. + required: true + type: str + + display_name: + description: + - Friendly name for identity provider. + aliases: + - displayName + type: str + + enabled: + description: + - Enable/disable this identity provider. + type: bool + + store_token: + description: + - Enable/disable whether tokens must be stored after authenticating users. + aliases: + - storeToken + type: bool + + add_read_token_role_on_create: + description: + - Enable/disable whether new users can read any stored tokens. This assigns the C(broker.read-token) role. + aliases: + - addReadTokenRoleOnCreate + type: bool + + trust_email: + description: + - If enabled, email provided by this provider is not verified even if verification is enabled for the realm. + aliases: + - trustEmail + type: bool + + link_only: + description: + - If true, users cannot log in through this provider. They can only link to this provider. + This is useful if you don't want to allow login from the provider, but want to integrate with a provider. + aliases: + - linkOnly + type: bool + + first_broker_login_flow_alias: + description: + - Alias of authentication flow, which is triggered after first login with this identity provider. + aliases: + - firstBrokerLoginFlowAlias + type: str + + post_broker_login_flow_alias: + description: + - Alias of authentication flow, which is triggered after each login with this identity provider. + aliases: + - postBrokerLoginFlowAlias + type: str + + authenticate_by_default: + description: + - Specifies if this identity provider should be used by default for authentication even before displaying login screen. + aliases: + - authenticateByDefault + type: bool + + provider_id: + description: + - Protocol used by this provider (supported values are C(oidc) or C(saml)). + aliases: + - providerId + type: str + + config: + description: + - Dict specifying the configuration options for the provider; the contents differ depending on the value of I(providerId). + Examples are given below for C(oidc) and C(saml). It is easiest to obtain valid config values by dumping an already-existing + identity provider configuration through check-mode in the I(existing) field. + type: dict + suboptions: + hide_on_login_page: + description: + - If hidden, login with this provider is possible only if requested explicitly, for example using the C(kc_idp_hint) parameter. + aliases: + - hideOnLoginPage + type: bool + + gui_order: + description: + - Number defining order of the provider in GUI (for example, on Login page). + aliases: + - guiOrder + type: int + + sync_mode: + description: + - Default sync mode for all mappers. The sync mode determines when user data will be synced using the mappers. + aliases: + - syncMode + type: str + + issuer: + description: + - The issuer identifier for the issuer of the response. If not provided, no validation will be performed. + type: str + + authorizationUrl: + description: + - The Authorization URL. + type: str + + tokenUrl: + description: + - The Token URL. + type: str + + logoutUrl: + description: + - End session endpoint to use to logout user from external IDP. + type: str + + userInfoUrl: + description: + - The User Info URL. + type: str + + clientAuthMethod: + description: + - The client authentication method. + type: str + + clientId: + description: + - The client or client identifier registered within the identity provider. + type: str + + clientSecret: + description: + - The client or client secret registered within the identity provider. + type: str + + defaultScope: + description: + - The scopes to be sent when asking for authorization. + type: str + + validateSignature: + description: + - Enable/disable signature validation of external IDP signatures. + type: bool + + useJwksUrl: + description: + - If the switch is on, identity provider public keys will be downloaded from given JWKS URL. + type: bool + + jwksUrl: + description: + - URL where identity provider keys in JWK format are stored. See JWK specification for more details. + type: str + + entityId: + description: + - The Entity ID that will be used to uniquely identify this SAML Service Provider. + type: str + + singleSignOnServiceUrl: + description: + - The URL that must be used to send authentication requests (SAML AuthnRequest). + type: str + + singleLogoutServiceUrl: + description: + - The URL that must be used to send logout requests. + type: str + + backchannelSupported: + description: + - Does the external IDP support backchannel logout? + type: str + + nameIDPolicyFormat: + description: + - Specifies the URI reference corresponding to a name identifier format. + type: str + + principalType: + description: + - Way to identify and track external users from the assertion. + type: str + + mappers: + description: + - A list of dicts defining mappers associated with this Identity Provider. + type: list + elements: dict + suboptions: + id: + description: + - Unique ID of this mapper. + type: str + + name: + description: + - Name of the mapper. + type: str + + identityProviderAlias: + description: + - Alias of the identity provider for this mapper. + type: str + + identityProviderMapper: + description: + - Type of mapper. + type: str + + config: + description: + - Dict specifying the configuration options for the mapper; the contents differ depending on the value of I(identityProviderMapper). + type: dict + +extends_documentation_fragment: +- community.general.keycloak + +author: + - Laurent Paumier (@laurpaum) +''' + +EXAMPLES = ''' +- name: Create OIDC identity provider, authentication with credentials + community.general.keycloak_identity_provider: + state: present + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: admin + auth_password: admin + realm: myrealm + alias: oidc-idp + display_name: OpenID Connect IdP + enabled: true + provider_id: oidc + config: + issuer: https://idp.example.com + authorizationUrl: https://idp.example.com/auth + tokenUrl: https://idp.example.com/token + userInfoUrl: https://idp.example.com/userinfo + clientAuthMethod: client_secret_post + clientId: my-client + clientSecret: secret + +- name: Create SAML identity provider, authentication with credentials + community.general.keycloak_identity_provider: + state: present + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: admin + auth_password: admin + realm: myrealm + alias: saml-idp + display_name: SAML IdP + enabled: true + provider_id: saml + config: + entityId: https://auth.example.com/auth/realms/myrealm + singleSignOnServiceUrl: https://idp.example.com/login + wantAuthnRequestsSigned: true + wantAssertionsSigned: true +''' + +RETURN = ''' +msg: + description: Message as to what action was taken + returned: always + type: str + sample: "Identity provider my-idp has been created" + +proposed: + description: Representation of proposed changes to identity provider + returned: always + type: dict + sample: { + "config": { + "authorizationUrl": "https://idp.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "secret", + "issuer": "https://idp.example.com", + "tokenUrl": "https://idp.example.com/token", + "userInfoUrl": "https://idp.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP", + "providerId": "oidc" + } + +existing: + description: Representation of existing identity provider + returned: always + type: dict + sample: { + "addReadTokenRoleOnCreate": false, + "alias": "my-idp", + "authenticateByDefault": false, + "config": { + "authorizationUrl": "https://old.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "**********", + "issuer": "https://old.example.com", + "syncMode": "FORCE", + "tokenUrl": "https://old.example.com/token", + "userInfoUrl": "https://old.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP", + "enabled": true, + "firstBrokerLoginFlowAlias": "first broker login", + "internalId": "4d28d7e3-1b80-45bb-8a30-5822bf55aa1c", + "linkOnly": false, + "providerId": "oidc", + "storeToken": false, + "trustEmail": false, + } + +end_state: + description: Representation of identity provider after module execution + returned: always + type: dict + sample: { + "addReadTokenRoleOnCreate": false, + "alias": "my-idp", + "authenticateByDefault": false, + "config": { + "authorizationUrl": "https://idp.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "**********", + "issuer": "https://idp.example.com", + "tokenUrl": "https://idp.example.com/token", + "userInfoUrl": "https://idp.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP", + "enabled": true, + "firstBrokerLoginFlowAlias": "first broker login", + "internalId": "4d28d7e3-1b80-45bb-8a30-5822bf55aa1c", + "linkOnly": false, + "providerId": "oidc", + "storeToken": false, + "trustEmail": false, + } + +''' + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule + + +def sanitize(idp): + result = idp.copy() + if 'config' in result: + result['config'] = sanitize(result['config']) + if 'clientSecret' in result: + result['clientSecret'] = '**********' + return result + + +def get_identity_provider_with_mappers(kc, alias, realm): + idp = kc.get_identity_provider(alias, realm) + if idp is not None: + idp['mappers'] = sorted(kc.get_identity_provider_mappers(alias, realm), key=lambda x: x.get('name')) + if idp is None: + idp = dict() + return idp + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + mapper_spec = dict( + id=dict(type='str'), + name=dict(type='str'), + identityProviderAlias=dict(type='str'), + identityProviderMapper=dict(type='str'), + config=dict(type='dict'), + ) + + meta_args = dict( + state=dict(type='str', default='present', choices=['present', 'absent']), + realm=dict(type='str', default='master'), + alias=dict(type='str', required=True), + add_read_token_role_on_create=dict(type='bool', aliases=['addReadTokenRoleOnCreate']), + authenticate_by_default=dict(type='bool', aliases=['authenticateByDefault']), + config=dict(type='dict'), + display_name=dict(type='str', aliases=['displayName']), + enabled=dict(type='bool'), + first_broker_login_flow_alias=dict(type='str', aliases=['firstBrokerLoginFlowAlias']), + link_only=dict(type='bool', aliases=['linkOnly']), + post_broker_login_flow_alias=dict(type='str', aliases=['postBrokerLoginFlowAlias']), + provider_id=dict(type='str', aliases=['providerId']), + store_token=dict(type='bool', aliases=['storeToken']), + trust_email=dict(type='bool', aliases=['trustEmail']), + mappers=dict(type='list', elements='dict', options=mapper_spec), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), + required_together=([['auth_realm', 'auth_username', 'auth_password']])) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + alias = module.params.get('alias') + state = module.params.get('state') + + # convert module parameters to client representation parameters (if they belong in there) + idp_params = [x for x in module.params + if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'mappers'] and + module.params.get(x) is not None] + + # does the identity provider already exist? + before_idp = get_identity_provider_with_mappers(kc, alias, realm) + + # build a changeset + changeset = dict() + + for param in idp_params: + new_param_value = module.params.get(param) + old_value = before_idp[camel(param)] if camel(param) in before_idp else None + if new_param_value != old_value: + changeset[camel(param)] = new_param_value + + # special handling of mappers list to allow change detection + changeset['mappers'] = before_idp.get('mappers', list()) + if module.params.get('mappers') is not None: + for new_mapper in module.params.get('mappers'): + old_mapper = next((x for x in changeset['mappers'] if x['name'] == new_mapper['name']), None) + new_mapper = dict((k, v) for k, v in new_mapper.items() if new_mapper[k] is not None) + if old_mapper is not None: + old_mapper.update(new_mapper) + else: + changeset['mappers'].append(new_mapper) + # remove mappers if not present in module params + changeset['mappers'] = [x for x in changeset['mappers'] + if [y for y in module.params.get('mappers', []) if y['name'] == x['name']] != []] + + # prepare the new representation + updated_idp = before_idp.copy() + updated_idp.update(changeset) + + result['proposed'] = sanitize(changeset) + result['existing'] = sanitize(before_idp) + + # if before_idp is none, the identity provider doesn't exist. + if before_idp == dict(): + if state == 'absent': + # nothing to do. + if module._diff: + result['diff'] = dict(before='', after='') + result['changed'] = False + result['end_state'] = dict() + result['msg'] = 'Identity provider does not exist; doing nothing.' + module.exit_json(**result) + + # for 'present', create a new identity provider. + result['changed'] = True + + if module._diff: + result['diff'] = dict(before='', after=sanitize(updated_idp)) + + if module.check_mode: + module.exit_json(**result) + + # do it for real! + updated_idp = updated_idp.copy() + mappers = updated_idp.pop('mappers', []) + kc.create_identity_provider(updated_idp, realm) + for mapper in mappers: + kc.create_identity_provider_mapper(mapper, alias, realm) + after_idp = get_identity_provider_with_mappers(kc, alias, realm) + + result['end_state'] = sanitize(after_idp) + + result['msg'] = 'Identity provider {alias} has been created'.format(alias=alias) + module.exit_json(**result) + + else: + if state == 'present': + # no changes + if updated_idp == before_idp: + result['changed'] = False + result['end_state'] = sanitize(updated_idp) + result['msg'] = "No changes required to identity provider {alias}.".format(alias=alias) + module.exit_json(**result) + + # update the existing role + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=sanitize(before_idp), after=sanitize(updated_idp)) + + if module.check_mode: + module.exit_json(**result) + + # do the update + updated_idp = updated_idp.copy() + updated_mappers = updated_idp.pop('mappers', []) + kc.update_identity_provider(updated_idp, realm) + for mapper in updated_mappers: + if mapper.get('id') is not None: + kc.update_identity_provider_mapper(mapper, alias, realm) + else: + kc.create_identity_provider_mapper(mapper, alias, realm) + for mapper in [x for x in before_idp['mappers'] + if [y for y in updated_mappers if y["name"] == x['name']] == []]: + kc.delete_identity_provider_mapper(mapper['id'], alias, realm) + + after_idp = get_identity_provider_with_mappers(kc, alias, realm) + + result['end_state'] = sanitize(after_idp) + + result['msg'] = "Identity provider {alias} has been updated".format(alias=alias) + module.exit_json(**result) + + elif state == 'absent': + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=sanitize(before_idp), after='') + + if module.check_mode: + module.exit_json(**result) + + # delete for real + kc.delete_identity_provider(alias, realm) + + result['end_state'] = dict() + + result['msg'] = "Identity provider {alias} has been deleted".format(alias=alias) + module.exit_json(**result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_identity_provider.py b/plugins/modules/keycloak_identity_provider.py new file mode 120000 index 0000000000..6beed321db --- /dev/null +++ b/plugins/modules/keycloak_identity_provider.py @@ -0,0 +1 @@ +./identity/keycloak/keycloak_identity_provider.py \ No newline at end of file diff --git a/tests/integration/targets/keycloak_identity_provider/aliases b/tests/integration/targets/keycloak_identity_provider/aliases new file mode 100644 index 0000000000..ad7ccf7ada --- /dev/null +++ b/tests/integration/targets/keycloak_identity_provider/aliases @@ -0,0 +1 @@ +unsupported diff --git a/tests/integration/targets/keycloak_identity_provider/tasks/main.yml b/tests/integration/targets/keycloak_identity_provider/tasks/main.yml new file mode 100644 index 0000000000..5bc0bc3fa0 --- /dev/null +++ b/tests/integration/targets/keycloak_identity_provider/tasks/main.yml @@ -0,0 +1,171 @@ +--- +- name: Create realm + community.general.keycloak_realm: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + id: "{{ realm }}" + realm: "{{ realm }}" + state: present + +- name: Create new identity provider + community.general.keycloak_identity_provider: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + alias: "{{ idp }}" + display_name: OpenID Connect IdP + enabled: true + provider_id: oidc + config: + issuer: https://idp.example.com + authorizationUrl: https://idp.example.com/auth + tokenUrl: https://idp.example.com/token + userInfoUrl: https://idp.example.com/userinfo + clientAuthMethod: client_secret_post + clientId: clientid + clientSecret: clientsecret + syncMode: FORCE + mappers: + - name: "first_name" + identityProviderAlias: "oidc-idp" + identityProviderMapper: "oidc-user-attribute-idp-mapper" + config: + claim: "first_name" + user.attribute: "first_name" + syncMode: "INHERIT" + - name: "last_name" + identityProviderAlias: "oidc-idp" + identityProviderMapper: "oidc-user-attribute-idp-mapper" + config: + claim: "last_name" + user.attribute: "last_name" + syncMode: "INHERIT" + state: present + register: result + +- name: Debug + debug: + var: result + +- name: Assert identity provider created + assert: + that: + - result is changed + - result.existing == {} + - result.end_state.alias == "{{ idp }}" + - result.end_state.mappers != [] + +- name: Update existing identity provider (no change) + community.general.keycloak_identity_provider: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + alias: "{{ idp }}" + enabled: true + provider_id: oidc + config: + issuer: https://idp.example.com + authorizationUrl: https://idp.example.com/auth + tokenUrl: https://idp.example.com/token + userInfoUrl: https://idp.example.com/userinfo + clientAuthMethod: client_secret_post + clientId: clientid + clientSecret: "**********" + syncMode: FORCE + mappers: + - name: "first_name" + identityProviderAlias: "oidc-idp" + identityProviderMapper: "oidc-user-attribute-idp-mapper" + config: + claim: "first_name" + user.attribute: "first_name" + syncMode: "INHERIT" + - name: "last_name" + identityProviderAlias: "oidc-idp" + identityProviderMapper: "oidc-user-attribute-idp-mapper" + config: + claim: "last_name" + user.attribute: "last_name" + syncMode: "INHERIT" + state: present + register: result + +- name: Debug + debug: + var: result + +- name: Assert identity provider unchanged + assert: + that: + - result is not changed + +- name: Update existing identity provider (with change) + community.general.keycloak_identity_provider: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + alias: "{{ idp }}" + enabled: false + state: present + register: result + +- name: Debug + debug: + var: result + +- name: Assert identity provider updated + assert: + that: + - result is changed + - result.existing.enabled == true + - result.end_state.enabled == false + +- name: Delete existing identity provider + community.general.keycloak_identity_provider: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + alias: "{{ idp }}" + state: absent + register: result + +- name: Debug + debug: + var: result + +- name: Assert identity provider deleted + assert: + that: + - result is changed + - result.end_state == {} + +- name: Delete absent identity provider + community.general.keycloak_identity_provider: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + alias: "{{ idp }}" + state: absent + register: result + +- name: Debug + debug: + var: result + +- name: Assert identity provider unchanged + assert: + that: + - result is not changed + - result.end_state == {} diff --git a/tests/integration/targets/keycloak_identity_provider/vars/main.yml b/tests/integration/targets/keycloak_identity_provider/vars/main.yml new file mode 100644 index 0000000000..bd37149b31 --- /dev/null +++ b/tests/integration/targets/keycloak_identity_provider/vars/main.yml @@ -0,0 +1,7 @@ +--- +url: http://localhost:8080/auth +admin_realm: master +admin_user: admin +admin_password: password +realm: myrealm +idp: myidp diff --git a/tests/unit/plugins/modules/identity/keycloak/test_keycloak_identity_provider.py b/tests/unit/plugins/modules/identity/keycloak/test_keycloak_identity_provider.py new file mode 100644 index 0000000000..8666b61759 --- /dev/null +++ b/tests/unit/plugins/modules/identity/keycloak/test_keycloak_identity_provider.py @@ -0,0 +1,495 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from contextlib import contextmanager + +from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.general.tests.unit.compat.mock import call, patch +from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args + +from ansible_collections.community.general.plugins.modules.identity.keycloak import keycloak_identity_provider + +from itertools import count + +from ansible.module_utils.six import StringIO + + +@contextmanager +def patch_keycloak_api(get_identity_provider, create_identity_provider=None, update_identity_provider=None, delete_identity_provider=None, + get_identity_provider_mappers=None, create_identity_provider_mapper=None, update_identity_provider_mapper=None, + delete_identity_provider_mapper=None): + """Mock context manager for patching the methods in PwPolicyIPAClient that contact the IPA server + + Patches the `login` and `_post_json` methods + + Keyword arguments are passed to the mock object that patches `_post_json` + + No arguments are passed to the mock object that patches `login` because no tests require it + + Example:: + + with patch_ipa(return_value={}) as (mock_login, mock_post): + ... + """ + + obj = keycloak_identity_provider.KeycloakAPI + with patch.object(obj, 'get_identity_provider', side_effect=get_identity_provider) \ + as mock_get_identity_provider: + with patch.object(obj, 'create_identity_provider', side_effect=create_identity_provider) \ + as mock_create_identity_provider: + with patch.object(obj, 'update_identity_provider', side_effect=update_identity_provider) \ + as mock_update_identity_provider: + with patch.object(obj, 'delete_identity_provider', side_effect=delete_identity_provider) \ + as mock_delete_identity_provider: + with patch.object(obj, 'get_identity_provider_mappers', side_effect=get_identity_provider_mappers) \ + as mock_get_identity_provider_mappers: + with patch.object(obj, 'create_identity_provider_mapper', side_effect=create_identity_provider_mapper) \ + as mock_create_identity_provider_mapper: + with patch.object(obj, 'update_identity_provider_mapper', side_effect=update_identity_provider_mapper) \ + as mock_update_identity_provider_mapper: + with patch.object(obj, 'delete_identity_provider_mapper', side_effect=delete_identity_provider_mapper) \ + as mock_delete_identity_provider_mapper: + yield mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, \ + mock_delete_identity_provider, mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, \ + mock_update_identity_provider_mapper, mock_delete_identity_provider_mapper + + +def get_response(object_with_future_response, method, get_id_call_count): + if callable(object_with_future_response): + return object_with_future_response() + if isinstance(object_with_future_response, dict): + return get_response( + object_with_future_response[method], method, get_id_call_count) + if isinstance(object_with_future_response, list): + call_number = next(get_id_call_count) + return get_response( + object_with_future_response[call_number], method, get_id_call_count) + return object_with_future_response + + +def build_mocked_request(get_id_user_count, response_dict): + def _mocked_requests(*args, **kwargs): + url = args[0] + method = kwargs['method'] + future_response = response_dict.get(url, None) + return get_response(future_response, method, get_id_user_count) + return _mocked_requests + + +def create_wrapper(text_as_string): + """Allow to mock many times a call to one address. + Without this function, the StringIO is empty for the second call. + """ + def _create_wrapper(): + return StringIO(text_as_string) + return _create_wrapper + + +def mock_good_connection(): + token_response = { + 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), } + return patch( + 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + side_effect=build_mocked_request(count(), token_response), + autospec=True + ) + + +class TestKeycloakIdentityProvider(ModuleTestCase): + def setUp(self): + super(TestKeycloakIdentityProvider, self).setUp() + self.module = keycloak_identity_provider + + def test_create_when_absent(self): + """Add a new identity provider""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + 'validate_certs': True, + 'realm': 'realm-name', + 'alias': 'oidc-idp', + 'display_name': 'OpenID Connect IdP', + 'enabled': True, + 'provider_id': 'oidc', + 'config': { + 'issuer': 'https://idp.example.com', + 'authorizationUrl': 'https://idp.example.com/auth', + 'tokenUrl': 'https://idp.example.com/token', + 'userInfoUrl': 'https://idp.example.com/userinfo', + 'clientAuthMethod': 'client_secret_post', + 'clientId': 'my-client', + 'clientSecret': 'secret', + 'syncMode': "FORCE", + }, + 'mappers': [{ + 'name': "first_name", + 'identityProviderAlias': "oidc-idp", + 'identityProviderMapper': "oidc-user-attribute-idp-mapper", + 'config': { + 'claim': "first_name", + 'user.attribute': "first_name", + 'syncMode': "INHERIT", + } + }, { + 'name': "last_name", + 'identityProviderAlias': "oidc-idp", + 'identityProviderMapper': "oidc-user-attribute-idp-mapper", + 'config': { + 'claim': "last_name", + 'user.attribute': "last_name", + 'syncMode': "INHERIT", + } + }] + } + return_value_idp_get = [ + None, + { + "addReadTokenRoleOnCreate": False, + "alias": "oidc-idp", + "authenticateByDefault": False, + "config": { + "authorizationUrl": "https://idp.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "no_log", + "issuer": "https://idp.example.com", + "syncMode": "FORCE", + "tokenUrl": "https://idp.example.com/token", + "userInfoUrl": "https://idp.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP", + "enabled": True, + "firstBrokerLoginFlowAlias": "first broker login", + "internalId": "7ab437d5-f2bb-4ecc-91a8-315349454da6", + "linkOnly": False, + "providerId": "oidc", + "storeToken": False, + "trustEmail": False, + } + ] + return_value_mappers_get = [ + [{ + "config": { + "claim": "first_name", + "syncMode": "INHERIT", + "user.attribute": "first_name" + }, + "id": "5fde49bb-93bd-4f5d-97d6-c5d0c1d07aef", + "identityProviderAlias": "oidc-idp", + "identityProviderMapper": "oidc-user-attribute-idp-mapper", + "name": "first_name" + }, { + "config": { + "claim": "last_name", + "syncMode": "INHERIT", + "user.attribute": "last_name" + }, + "id": "f00c61e0-34d9-4bed-82d1-7e45acfefc09", + "identityProviderAlias": "oidc-idp", + "identityProviderMapper": "oidc-user-attribute-idp-mapper", + "name": "last_name" + }] + ] + return_value_idp_created = [None] + return_value_mapper_created = [None, None] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_identity_provider=return_value_idp_get, get_identity_provider_mappers=return_value_mappers_get, + create_identity_provider=return_value_idp_created, create_identity_provider_mapper=return_value_mapper_created) \ + as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider, + mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper, + mock_delete_identity_provider_mapper): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_identity_provider.mock_calls), 2) + self.assertEqual(len(mock_get_identity_provider_mappers.mock_calls), 1) + self.assertEqual(len(mock_create_identity_provider.mock_calls), 1) + self.assertEqual(len(mock_create_identity_provider_mapper.mock_calls), 2) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_create_when_present(self): + """Update existing identity provider""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + 'validate_certs': True, + 'realm': 'realm-name', + 'alias': 'oidc-idp', + 'display_name': 'OpenID Connect IdP', + 'enabled': True, + 'provider_id': 'oidc', + 'config': { + 'issuer': 'https://idp.example.com', + 'authorizationUrl': 'https://idp.example.com/auth', + 'tokenUrl': 'https://idp.example.com/token', + 'userInfoUrl': 'https://idp.example.com/userinfo', + 'clientAuthMethod': 'client_secret_post', + 'clientId': 'my-client', + 'clientSecret': 'secret', + 'syncMode': "FORCE" + }, + 'mappers': [{ + 'name': "first_name", + 'identityProviderAlias': "oidc-idp", + 'identityProviderMapper': "oidc-user-attribute-idp-mapper", + 'config': { + 'claim': "first_name", + 'user.attribute': "first_name", + 'syncMode': "INHERIT", + } + }, { + 'name': "last_name", + 'identityProviderAlias': "oidc-idp", + 'identityProviderMapper': "oidc-user-attribute-idp-mapper", + 'config': { + 'claim': "last_name", + 'user.attribute': "last_name", + 'syncMode': "INHERIT", + } + }] + } + return_value_idp_get = [ + { + "addReadTokenRoleOnCreate": False, + "alias": "oidc-idp", + "authenticateByDefault": False, + "config": { + "authorizationUrl": "https://idp.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "no_log", + "issuer": "https://idp.example.com", + "syncMode": "FORCE", + "tokenUrl": "https://idp.example.com/token", + "userInfoUrl": "https://idp.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP changeme", + "enabled": True, + "firstBrokerLoginFlowAlias": "first broker login", + "internalId": "7ab437d5-f2bb-4ecc-91a8-315349454da6", + "linkOnly": False, + "providerId": "oidc", + "storeToken": False, + "trustEmail": False, + }, + { + "addReadTokenRoleOnCreate": False, + "alias": "oidc-idp", + "authenticateByDefault": False, + "config": { + "authorizationUrl": "https://idp.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "no_log", + "issuer": "https://idp.example.com", + "syncMode": "FORCE", + "tokenUrl": "https://idp.example.com/token", + "userInfoUrl": "https://idp.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP", + "enabled": True, + "firstBrokerLoginFlowAlias": "first broker login", + "internalId": "7ab437d5-f2bb-4ecc-91a8-315349454da6", + "linkOnly": False, + "providerId": "oidc", + "storeToken": False, + "trustEmail": False, + } + ] + return_value_mappers_get = [ + [{ + "config": { + "claim": "first_name_changeme", + "syncMode": "INHERIT", + "user.attribute": "first_name_changeme" + }, + "id": "5fde49bb-93bd-4f5d-97d6-c5d0c1d07aef", + "identityProviderAlias": "oidc-idp", + "identityProviderMapper": "oidc-user-attribute-idp-mapper", + "name": "first_name" + }], + [{ + "config": { + "claim": "first_name", + "syncMode": "INHERIT", + "user.attribute": "first_name" + }, + "id": "5fde49bb-93bd-4f5d-97d6-c5d0c1d07aef", + "identityProviderAlias": "oidc-idp", + "identityProviderMapper": "oidc-user-attribute-idp-mapper", + "name": "first_name" + }, { + "config": { + "claim": "last_name", + "syncMode": "INHERIT", + "user.attribute": "last_name" + }, + "id": "f00c61e0-34d9-4bed-82d1-7e45acfefc09", + "identityProviderAlias": "oidc-idp", + "identityProviderMapper": "oidc-user-attribute-idp-mapper", + "name": "last_name" + }] + ] + return_value_idp_updated = [None] + return_value_mapper_updated = [None] + return_value_mapper_created = [None] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_identity_provider=return_value_idp_get, get_identity_provider_mappers=return_value_mappers_get, + update_identity_provider=return_value_idp_updated, update_identity_provider_mapper=return_value_mapper_updated, + create_identity_provider_mapper=return_value_mapper_created) \ + as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider, + mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper, + mock_delete_identity_provider_mapper): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_identity_provider.mock_calls), 2) + self.assertEqual(len(mock_get_identity_provider_mappers.mock_calls), 2) + self.assertEqual(len(mock_update_identity_provider.mock_calls), 1) + self.assertEqual(len(mock_update_identity_provider_mapper.mock_calls), 1) + self.assertEqual(len(mock_create_identity_provider_mapper.mock_calls), 1) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_delete_when_absent(self): + """Remove an absent identity provider""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + 'validate_certs': True, + 'realm': 'realm-name', + 'alias': 'oidc-idp', + 'state': 'absent', + } + return_value_idp_get = [None] + changed = False + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_identity_provider=return_value_idp_get) \ + as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider, + mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper, + mock_delete_identity_provider_mapper): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_identity_provider.mock_calls), 1) + self.assertEqual(len(mock_delete_identity_provider.mock_calls), 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_delete_when_present(self): + """Remove an existing identity provider""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + 'validate_certs': True, + 'realm': 'realm-name', + 'alias': 'oidc-idp', + 'state': 'absent', + } + return_value_idp_get = [ + { + "addReadTokenRoleOnCreate": False, + "alias": "oidc-idp", + "authenticateByDefault": False, + "config": { + "authorizationUrl": "https://idp.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "no_log", + "issuer": "https://idp.example.com", + "syncMode": "FORCE", + "tokenUrl": "https://idp.example.com/token", + "userInfoUrl": "https://idp.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP", + "enabled": True, + "firstBrokerLoginFlowAlias": "first broker login", + "internalId": "7ab437d5-f2bb-4ecc-91a8-315349454da6", + "linkOnly": False, + "providerId": "oidc", + "storeToken": False, + "trustEmail": False, + }, + None + ] + return_value_mappers_get = [ + [{ + "config": { + "claim": "email", + "syncMode": "INHERIT", + "user.attribute": "email" + }, + "id": "5fde49bb-93bd-4f5d-97d6-c5d0c1d07aef", + "identityProviderAlias": "oidc-idp", + "identityProviderMapper": "oidc-user-attribute-idp-mapper", + "name": "email" + }] + ] + return_value_idp_deleted = [None] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_identity_provider=return_value_idp_get, get_identity_provider_mappers=return_value_mappers_get, + delete_identity_provider=return_value_idp_deleted) \ + as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider, + mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper, + mock_delete_identity_provider_mapper): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_identity_provider.mock_calls), 1) + self.assertEqual(len(mock_get_identity_provider_mappers.mock_calls), 1) + self.assertEqual(len(mock_delete_identity_provider.mock_calls), 1) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + +if __name__ == '__main__': + unittest.main() From 58c6f6c95af9c7f27d9dac67c3d05c8c7f07d546 Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Tue, 31 Aug 2021 01:10:10 -0400 Subject: [PATCH 0298/2828] Initial commit (#3300) --- plugins/modules/cloud/misc/proxmox.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/plugins/modules/cloud/misc/proxmox.py b/plugins/modules/cloud/misc/proxmox.py index c777564186..136829d13f 100644 --- a/plugins/modules/cloud/misc/proxmox.py +++ b/plugins/modules/cloud/misc/proxmox.py @@ -32,7 +32,14 @@ options: type: str disk: description: - - hard disk size in GB for instance + - This option was previously described as "hard disk size in GB for instance" however several formats describing + a lxc mount are permitted. + - Older versions of Proxmox will accept a numeric value for size using the I(storage) parameter to automatically + choose which storage to allocate from, however new versions enforce the C(:) syntax. + - "Additional options are available by using some combination of the following key-value pairs as a + comma-delimited list C([volume=] [,acl=<1|0>] [,mountoptions=] [,quota=<1|0>] + [,replicate=<1|0>] [,ro=<1|0>] [,shared=<1|0>] [,size=])." + - See U(https://pve.proxmox.com/wiki/Linux_Container) for a full description. - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this option has a default of C(3). Note that the default value of I(proxmox_default_behavior) changes in community.general 4.0.0. From baa721ac2281f9e628821863798f030c9efd4c9d Mon Sep 17 00:00:00 2001 From: froebela <32922546+froebela@users.noreply.github.com> Date: Tue, 31 Aug 2021 07:11:58 +0200 Subject: [PATCH 0299/2828] zfs.py: treated received properties as local and added diff mode support (#502) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * zfs.py: treated received properties as local and added diff mode support If you use "zfs set" to explicitly set ZFS properties, they are marked as from source "local". If ZFS properties are implicitly set by using "zfs send" and "zfs receive", for example as part of a template based installation, they are marked as from source "received". But as there is no technical difference between both types of them, the “received” ZFS properties should also be considered “local”. Otherwise Ansible would detect changes, which aren’t actual changes. Therefore I changed line 202/207 to reflect this. For us it’s quite important, that Ansible modules support the diff mode in order to qualify changes. Therefore I added some code lines to address this. * added changelog fragment for PR #502 * fixed typos in changelog fragment for PR #502 * minor changes in changelog fragment for PR #502 * added link to pull request in changelog fragment for PR #502 * extended the diff data structure to always include the name of the zfs filesystem * added code to also maintain the diff data structure after a change * reverted back some code lines for better code readability * added an extra dict in the diff data structure to hold the zfs properties --- .../502-zfs_bugfix_and_diff_mode_support.yaml | 4 ++++ plugins/modules/storage/zfs/zfs.py | 22 +++++++++++++++---- 2 files changed, 22 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/502-zfs_bugfix_and_diff_mode_support.yaml diff --git a/changelogs/fragments/502-zfs_bugfix_and_diff_mode_support.yaml b/changelogs/fragments/502-zfs_bugfix_and_diff_mode_support.yaml new file mode 100644 index 0000000000..1ba7727c7c --- /dev/null +++ b/changelogs/fragments/502-zfs_bugfix_and_diff_mode_support.yaml @@ -0,0 +1,4 @@ +bugfixes: + - zfs - treated received properties as local (https://github.com/ansible-collections/community.general/pull/502). +minor_changes: + - zfs - added diff mode support (https://github.com/ansible-collections/community.general/pull/502). diff --git a/plugins/modules/storage/zfs/zfs.py b/plugins/modules/storage/zfs/zfs.py index 2d5d4487dd..a804753a16 100644 --- a/plugins/modules/storage/zfs/zfs.py +++ b/plugins/modules/storage/zfs/zfs.py @@ -194,12 +194,16 @@ class Zfs(object): self.module.fail_json(msg=err) def set_properties_if_changed(self): + diff = {'before': {'extra_zfs_properties': {}}, 'after': {'extra_zfs_properties': {}}} current_properties = self.get_current_properties() for prop, value in self.properties.items(): - if current_properties.get(prop, None) != value: + current_value = current_properties.get(prop, None) + if current_value != value: self.set_property(prop, value) + diff['before']['extra_zfs_properties'][prop] = current_value + diff['after']['extra_zfs_properties'][prop] = value if self.module.check_mode: - return + return diff updated_properties = self.get_current_properties() for prop in self.properties: value = updated_properties.get(prop, None) @@ -207,6 +211,9 @@ class Zfs(object): self.module.fail_json(msg="zfsprop was not present after being successfully set: %s" % prop) if current_properties.get(prop, None) != value: self.changed = True + if prop in diff['after']['extra_zfs_properties']: + diff['after']['extra_zfs_properties'][prop] = value + return diff def get_current_properties(self): cmd = [self.zfs_cmd, 'get', '-H', '-p', '-o', "property,value,source"] @@ -220,7 +227,7 @@ class Zfs(object): # include source '-' so that creation-only properties are not removed # to avoids errors when the dataset already exists and the property is not changed # this scenario is most likely when the same playbook is run more than once - if source == 'local' or source == '-': + if source == 'local' or source == 'received' or source == '-': properties[prop] = value # Add alias for enhanced sharing properties if self.enhanced_sharing: @@ -266,13 +273,20 @@ def main(): if state == 'present': if zfs.exists(): - zfs.set_properties_if_changed() + result['diff'] = zfs.set_properties_if_changed() else: zfs.create() + result['diff'] = {'before': {'state': 'absent'}, 'after': {'state': state}} elif state == 'absent': if zfs.exists(): zfs.destroy() + result['diff'] = {'before': {'state': 'present'}, 'after': {'state': state}} + else: + result['diff'] = {} + + result['diff']['before_header'] = name + result['diff']['after_header'] = name result.update(zfs.properties) result['changed'] = zfs.changed From b2bb7e3f9c2e4225ac8f6c6867ab4051c3538993 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 31 Aug 2021 17:14:08 +1200 Subject: [PATCH 0300/2828] django_manage - added splitting the command parameter for running (#3283) * added splitting the command parameter for running * added changelog fragment * refactored variable names for improved readability --- ...3-django_manage-fix-command-splitting.yaml | 2 + .../web_infrastructure/django_manage.py | 38 ++++++++++--------- .../simple_project/p1/p1/settings.py | 1 + .../targets/django_manage/tasks/main.yaml | 6 +++ 4 files changed, 29 insertions(+), 18 deletions(-) create mode 100644 changelogs/fragments/3283-django_manage-fix-command-splitting.yaml diff --git a/changelogs/fragments/3283-django_manage-fix-command-splitting.yaml b/changelogs/fragments/3283-django_manage-fix-command-splitting.yaml new file mode 100644 index 0000000000..ba8b4efd69 --- /dev/null +++ b/changelogs/fragments/3283-django_manage-fix-command-splitting.yaml @@ -0,0 +1,2 @@ +bugfixes: + - django_manage - argument ``command`` is being splitted again as it should (https://github.com/ansible-collections/community.general/issues/3215). diff --git a/plugins/modules/web_infrastructure/django_manage.py b/plugins/modules/web_infrastructure/django_manage.py index 98ffdc446b..0c8126c457 100644 --- a/plugins/modules/web_infrastructure/django_manage.py +++ b/plugins/modules/web_infrastructure/django_manage.py @@ -158,6 +158,7 @@ EXAMPLES = """ import os import sys +import shlex from ansible.module_utils.basic import AnsibleModule @@ -273,61 +274,62 @@ def main(): ), ) - command = module.params['command'] + command_split = shlex.split(module.params['command']) + command_bin = command_split[0] project_path = module.params['project_path'] virtualenv = module.params['virtualenv'] for param in specific_params: value = module.params[param] - if value and param not in command_allowed_param_map[command]: - module.fail_json(msg='%s param is incompatible with command=%s' % (param, command)) + if value and param not in command_allowed_param_map[command_bin]: + module.fail_json(msg='%s param is incompatible with command=%s' % (param, command_bin)) - for param in command_required_param_map.get(command, ()): + for param in command_required_param_map.get(command_bin, ()): if not module.params[param]: - module.fail_json(msg='%s param is required for command=%s' % (param, command)) + module.fail_json(msg='%s param is required for command=%s' % (param, command_bin)) _ensure_virtualenv(module) - cmd = ["./manage.py", command] + run_cmd_args = ["./manage.py"] + command_split - if command in noinput_commands: - cmd.append("--noinput") + if command_bin in noinput_commands and '--noinput' not in command_split: + run_cmd_args.append("--noinput") for param in general_params: if module.params[param]: - cmd.append('--%s=%s' % (param, module.params[param])) + run_cmd_args.append('--%s=%s' % (param, module.params[param])) for param in specific_boolean_params: if module.params[param]: - cmd.append('--%s' % param) + run_cmd_args.append('--%s' % param) # these params always get tacked on the end of the command for param in end_of_command_params: if module.params[param]: - cmd.append(module.params[param]) + run_cmd_args.append(module.params[param]) - rc, out, err = module.run_command(cmd, cwd=project_path) + rc, out, err = module.run_command(run_cmd_args, cwd=project_path) if rc != 0: - if command == 'createcachetable' and 'table' in err and 'already exists' in err: + if command_bin == 'createcachetable' and 'table' in err and 'already exists' in err: out = 'already exists.' else: if "Unknown command:" in err: - _fail(module, cmd, err, "Unknown django command: %s" % command) - _fail(module, cmd, out, err, path=os.environ["PATH"], syspath=sys.path) + _fail(module, run_cmd_args, err, "Unknown django command: %s" % command_bin) + _fail(module, run_cmd_args, out, err, path=os.environ["PATH"], syspath=sys.path) changed = False lines = out.split('\n') - filt = globals().get(command + "_filter_output", None) + filt = globals().get(command_bin + "_filter_output", None) if filt: filtered_output = list(filter(filt, lines)) if len(filtered_output): changed = True - check_changed = globals().get("{0}_check_changed".format(command), None) + check_changed = globals().get("{0}_check_changed".format(command_bin), None) if check_changed: changed = check_changed(out) - module.exit_json(changed=changed, out=out, cmd=cmd, app_path=project_path, project_path=project_path, + module.exit_json(changed=changed, out=out, cmd=run_cmd_args, app_path=project_path, project_path=project_path, virtualenv=virtualenv, settings=module.params['settings'], pythonpath=module.params['pythonpath']) diff --git a/tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/settings.py b/tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/settings.py index 0a11583aba..f2472c1fe8 100644 --- a/tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/settings.py +++ b/tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/settings.py @@ -121,3 +121,4 @@ USE_TZ = True # https://docs.djangoproject.com/en/3.1/howto/static-files/ STATIC_URL = '/static/' +STATIC_ROOT = '/tmp/django-static' diff --git a/tests/integration/targets/django_manage/tasks/main.yaml b/tests/integration/targets/django_manage/tasks/main.yaml index ed305ca96b..0421739acc 100644 --- a/tests/integration/targets/django_manage/tasks/main.yaml +++ b/tests/integration/targets/django_manage/tasks/main.yaml @@ -48,3 +48,9 @@ pythonpath: "{{ tmp_django_root.path }}/1045-single-app-project/" command: check virtualenv: "{{ tmp_django_root.path }}/venv" + +- name: Run collectstatic --noinput on simple project + community.general.django_manage: + project_path: "{{ tmp_django_root.path }}/simple_project/p1" + command: collectstatic --noinput + virtualenv: "{{ tmp_django_root.path }}/venv" From 2d6816e11e1672df5b2aa485e8af9eaa45d7c5be Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Tue, 31 Aug 2021 04:21:53 -0400 Subject: [PATCH 0301/2828] proxmox inventory plugin - Update examples documentation (#3299) * Initial commit * Update plugins/inventory/proxmox.py Co-authored-by: Felix Fontein --- plugins/inventory/proxmox.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py index 33a564f333..f52f0f1bb3 100644 --- a/plugins/inventory/proxmox.py +++ b/plugins/inventory/proxmox.py @@ -88,13 +88,24 @@ DOCUMENTATION = ''' ''' EXAMPLES = ''' +# Minimal example which will not gather additional facts for QEMU/LXC guests +# By not specifying a URL the plugin will attempt to connect to the controller host on port 8006 # my.proxmox.yml plugin: community.general.proxmox -url: http://localhost:8006 user: ansible@pve password: secure -validate_certs: no + +# More complete example demonstrating the use of 'want_facts' and the constructed options +# Note that using facts returned by 'want_facts' in constructed options requires 'want_facts=true' +# my.proxmox.yml +plugin: community.general.proxmox +url: http://pve.domain.com:8006 +user: ansible@pve +password: secure +validate_certs: false +want_facts: true keyed_groups: + # proxmox_tags_parsed is an example of a fact only returned when 'want_facts=true' - key: proxmox_tags_parsed separator: "" prefix: group From edd7b84285dd944f8c3e736928ef6a56a563748b Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 31 Aug 2021 22:34:57 +1200 Subject: [PATCH 0302/2828] pamd - fixed issue+minor refactorings (#3285) * pamd - fixed issue+minor refactorings * added changelog fragment * added unit test suggested in issue * Update tests/integration/targets/pamd/tasks/main.yml * fixed per PR + additional adjustment Co-authored-by: Felix Fontein --- .../3285-pamd-updated-with-empty-args.yaml | 4 ++ plugins/modules/system/pamd.py | 46 +++++++++---------- tests/integration/targets/pamd/tasks/main.yml | 31 ++++++++++--- .../unit/plugins/modules/system/test_pamd.py | 8 ++++ 4 files changed, 59 insertions(+), 30 deletions(-) create mode 100644 changelogs/fragments/3285-pamd-updated-with-empty-args.yaml diff --git a/changelogs/fragments/3285-pamd-updated-with-empty-args.yaml b/changelogs/fragments/3285-pamd-updated-with-empty-args.yaml new file mode 100644 index 0000000000..1c176dfdc3 --- /dev/null +++ b/changelogs/fragments/3285-pamd-updated-with-empty-args.yaml @@ -0,0 +1,4 @@ +bugfixes: + - pamd - code for ``state=updated`` when dealing with the pam module arguments, made no distinction between ``None`` and an empty list (https://github.com/ansible-collections/community.general/issues/3260). +minor_changes: + - pamd - minor refactorings (https://github.com/ansible-collections/community.general/pull/3285). diff --git a/plugins/modules/system/pamd.py b/plugins/modules/system/pamd.py index 738a23ee43..dda504974d 100644 --- a/plugins/modules/system/pamd.py +++ b/plugins/modules/system/pamd.py @@ -274,8 +274,7 @@ RULE_REGEX = re.compile(r"""(?P-?(?:auth|account|session|password))\s (?P\[.*\]|\S*)\s+ (?P\S*)\s* (?P.*)\s*""", re.X) - -RULE_ARG_REGEX = re.compile(r"""(\[.*\]|\S*)""") +RULE_ARG_REGEX = re.compile(r"(\[.*\]|\S*)") VALID_TYPES = ['account', '-account', 'auth', '-auth', 'password', '-password', 'session', '-session'] @@ -358,11 +357,9 @@ class PamdRule(PamdLine): # Method to check if a rule matches the type, control and path. def matches(self, rule_type, rule_control, rule_path, rule_args=None): - if (rule_type == self.rule_type and + return (rule_type == self.rule_type and rule_control == self.rule_control and - rule_path == self.rule_path): - return True - return False + rule_path == self.rule_path) @classmethod def rule_from_string(cls, line): @@ -507,25 +504,25 @@ class PamdService(object): # Get a list of rules we want to change rules_to_find = self.get(rule_type, rule_control, rule_path) - new_args = parse_module_arguments(new_args) + new_args = parse_module_arguments(new_args, return_none=True) changes = 0 for current_rule in rules_to_find: rule_changed = False if new_type: - if(current_rule.rule_type != new_type): + if current_rule.rule_type != new_type: rule_changed = True current_rule.rule_type = new_type if new_control: - if(current_rule.rule_control != new_control): + if current_rule.rule_control != new_control: rule_changed = True current_rule.rule_control = new_control if new_path: - if(current_rule.rule_path != new_path): + if current_rule.rule_path != new_path: rule_changed = True current_rule.rule_path = new_path - if new_args: - if(current_rule.rule_args != new_args): + if new_args is not None: + if current_rule.rule_args != new_args: rule_changed = True current_rule.rule_args = new_args @@ -724,8 +721,9 @@ class PamdService(object): current_line = self._head while current_line is not None: - if not current_line.validate()[0]: - return current_line.validate() + curr_validate = current_line.validate() + if not curr_validate[0]: + return curr_validate current_line = current_line.next return True, "Module is valid" @@ -750,22 +748,25 @@ class PamdService(object): return '\n'.join(lines) + '\n' -def parse_module_arguments(module_arguments): - # Return empty list if we have no args to parse - if not module_arguments: - return [] - elif isinstance(module_arguments, list) and len(module_arguments) == 1 and not module_arguments[0]: +def parse_module_arguments(module_arguments, return_none=False): + # If args is None, return empty list by default. + # But if return_none is True, then return None + if module_arguments is None: + return None if return_none else [] + if isinstance(module_arguments, list) and len(module_arguments) == 1 and not module_arguments[0]: return [] if not isinstance(module_arguments, list): module_arguments = [module_arguments] - parsed_args = list() + # From this point on, module_arguments is guaranteed to be a list, empty or not + parsed_args = [] + re_clear_spaces = re.compile(r"\s*=\s*") for arg in module_arguments: for item in filter(None, RULE_ARG_REGEX.findall(arg)): if not item.startswith("["): - re.sub("\\s*=\\s*", "=", item) + re_clear_spaces.sub("=", item) parsed_args.append(item) return parsed_args @@ -861,8 +862,7 @@ def main(): fd.write(str(service)) except IOError: - module.fail_json(msg='Unable to create temporary \ - file %s' % temp_file) + module.fail_json(msg='Unable to create temporary file %s' % temp_file) module.atomic_move(temp_file.name, os.path.realpath(fname)) diff --git a/tests/integration/targets/pamd/tasks/main.yml b/tests/integration/targets/pamd/tasks/main.yml index 3e0fb4ee32..3835ff9db0 100644 --- a/tests/integration/targets/pamd/tasks/main.yml +++ b/tests/integration/targets/pamd/tasks/main.yml @@ -5,11 +5,10 @@ set_fact: test_pamd_file: "/tmp/pamd_file" -- name: Copy temporary pam.d file +- name: Create temporary pam.d file copy: content: "session required pam_lastlog.so silent showfailed" dest: "{{ test_pamd_file }}" - - name: Test working on a single-line file works (2925) community.general.pamd: path: /tmp @@ -20,17 +19,37 @@ module_arguments: silent state: args_absent register: pamd_file_output - - name: Check if changes made assert: that: - pamd_file_output is changed -- name: Copy temporary pam.d file +- name: Test removing all arguments from an entry (3260) + community.general.pamd: + path: /tmp + name: pamd_file + type: session + control: required + module_path: pam_lastlog.so + module_arguments: "" + state: updated + register: pamd_file_output_noargs +- name: Read back the file (3260) + slurp: + src: "{{ test_pamd_file }}" + register: pamd_file_slurp_noargs +- name: Check if changes made (3260) + vars: + line_array: "{{ (pamd_file_slurp_noargs.content|b64decode).split('\n')[2].split() }}" + assert: + that: + - pamd_file_output_noargs is changed + - line_array == ['session', 'required', 'pam_lastlog.so'] + +- name: Create temporary pam.d file copy: content: "" dest: "{{ test_pamd_file }}" - # This test merely demonstrates that, as-is, module will not perform any changes on an empty file # All the existing values for "state" will first search for a rule matching type, control, module_path # and will not perform any change whatsoever if no existing rules match. @@ -43,12 +62,10 @@ module_path: pam_lastlog.so module_arguments: silent register: pamd_file_output_empty - - name: Read back the file slurp: src: "{{ test_pamd_file }}" register: pamd_file_slurp - - name: Check if changes made assert: that: diff --git a/tests/unit/plugins/modules/system/test_pamd.py b/tests/unit/plugins/modules/system/test_pamd.py index e7a6883564..19c9d7352a 100644 --- a/tests/unit/plugins/modules/system/test_pamd.py +++ b/tests/unit/plugins/modules/system/test_pamd.py @@ -218,6 +218,14 @@ auth required pam_deny.so test_rule = PamdRule('auth', 'sufficient', 'pam_unix.so', 'nullok try_first_pass') self.assertNotIn(str(test_rule), str(self.pamd)) + def test_update_rule_remove_module_args(self): + self.assertTrue(self.pamd.update_rule('auth', 'sufficient', 'pam_unix.so', new_args='')) + test_rule = PamdRule('auth', 'sufficient', 'pam_unix.so', '') + self.assertIn(str(test_rule), str(self.pamd)) + + test_rule = PamdRule('auth', 'sufficient', 'pam_unix.so', 'nullok try_first_pass') + self.assertNotIn(str(test_rule), str(self.pamd)) + def test_update_first_three(self): self.assertTrue(self.pamd.update_rule('auth', 'required', 'pam_env.so', new_type='one', new_control='two', new_path='three')) From 1f5345881d1af3429573f53b07d2684537626089 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 31 Aug 2021 23:09:29 +1200 Subject: [PATCH 0303/2828] open_iscsi - minor refactoring (#3286) * open_iscsi - minor refactoring * added changelog fragment --- .../3286-open_iscsi-improvements.yaml | 4 + plugins/modules/system/open_iscsi.py | 109 +++++++----------- 2 files changed, 47 insertions(+), 66 deletions(-) create mode 100644 changelogs/fragments/3286-open_iscsi-improvements.yaml diff --git a/changelogs/fragments/3286-open_iscsi-improvements.yaml b/changelogs/fragments/3286-open_iscsi-improvements.yaml new file mode 100644 index 0000000000..860a5f7811 --- /dev/null +++ b/changelogs/fragments/3286-open_iscsi-improvements.yaml @@ -0,0 +1,4 @@ +minor_changes: + - open_iscsi - minor refactoring (https://github.com/ansible-collections/community.general/pull/3286). +bugfixes: + - open_iscsi - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3286). diff --git a/plugins/modules/system/open_iscsi.py b/plugins/modules/system/open_iscsi.py index 570925f6a4..2d255356e6 100644 --- a/plugins/modules/system/open_iscsi.py +++ b/plugins/modules/system/open_iscsi.py @@ -125,6 +125,7 @@ import time from ansible.module_utils.basic import AnsibleModule ISCSIADM = 'iscsiadm' +iscsiadm_cmd = None def compare_nodelists(l1, l2): @@ -134,12 +135,12 @@ def compare_nodelists(l1, l2): def iscsi_get_cached_nodes(module, portal=None): - cmd = '%s --mode node' % iscsiadm_cmd - (rc, out, err) = module.run_command(cmd) + cmd = [iscsiadm_cmd, '--mode', 'node'] + rc, out, err = module.run_command(cmd) + nodes = [] if rc == 0: lines = out.splitlines() - nodes = [] for line in lines: # line format is "ip:port,target_portal_group_tag targetname" parts = line.split() @@ -156,7 +157,7 @@ def iscsi_get_cached_nodes(module, portal=None): # for newer versions see iscsiadm(8); also usr/iscsiadm.c for details # err can contain [N|n]o records... elif rc == 21 or (rc == 255 and "o records found" in err): - nodes = [] + pass else: module.fail_json(cmd=cmd, rc=rc, msg=err) @@ -164,16 +165,13 @@ def iscsi_get_cached_nodes(module, portal=None): def iscsi_discover(module, portal, port): - cmd = '%s --mode discovery --type sendtargets --portal %s:%s' % (iscsiadm_cmd, portal, port) - (rc, out, err) = module.run_command(cmd) - - if rc > 0: - module.fail_json(cmd=cmd, rc=rc, msg=err) + cmd = [iscsiadm_cmd, '--mode', 'discovery', '--type', 'sendtargets', '--portal', '%s:%s' % (portal, port)] + module.run_command(cmd, check_rc=True) def target_loggedon(module, target, portal=None, port=None): - cmd = '%s --mode session' % iscsiadm_cmd - (rc, out, err) = module.run_command(cmd) + cmd = [iscsiadm_cmd, '--mode', 'session'] + rc, out, err = module.run_command(cmd) if portal is None: portal = "" @@ -199,30 +197,23 @@ def target_login(module, target, portal=None, port=None): ('node.session.auth.username', node_user), ('node.session.auth.password', node_pass)] for (name, value) in params: - cmd = '%s --mode node --targetname %s --op=update --name %s --value %s' % (iscsiadm_cmd, target, name, value) - (rc, out, err) = module.run_command(cmd) - if rc > 0: - module.fail_json(cmd=cmd, rc=rc, msg=err) + cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--op=update', '--name', name, '--value', value] + module.run_command(cmd, check_rc=True) - cmd = '%s --mode node --targetname %s --login' % (iscsiadm_cmd, target) + cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--login'] if portal is not None and port is not None: - cmd += ' --portal %s:%s' % (portal, port) + cmd.append('--portal') + cmd.append('%s:%s' % (portal, port)) - (rc, out, err) = module.run_command(cmd) - - if rc > 0: - module.fail_json(cmd=cmd, rc=rc, msg=err) + module.run_command(cmd, check_rc=True) def target_logout(module, target): - cmd = '%s --mode node --targetname %s --logout' % (iscsiadm_cmd, target) - (rc, out, err) = module.run_command(cmd) - - if rc > 0: - module.fail_json(cmd=cmd, rc=rc, msg=err) + cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--logout'] + module.run_command(cmd, check_rc=True) -def target_device_node(module, target): +def target_device_node(target): # if anyone know a better way to find out which devicenodes get created for # a given target... @@ -239,51 +230,39 @@ def target_device_node(module, target): def target_isauto(module, target, portal=None, port=None): - cmd = '%s --mode node --targetname %s' % (iscsiadm_cmd, target) + cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target] - if portal is not None: - if port is not None: - portal = '%s:%s' % (portal, port) - cmd = '%s --portal %s' % (cmd, portal) + if portal is not None and port is not None: + cmd.append('--portal') + cmd.append('%s:%s' % (portal, port)) - (rc, out, err) = module.run_command(cmd) + dummy, out, dummy = module.run_command(cmd, check_rc=True) - if rc == 0: - lines = out.splitlines() - for line in lines: - if 'node.startup' in line: - return 'automatic' in line - return False - else: - module.fail_json(cmd=cmd, rc=rc, msg=err) + lines = out.splitlines() + for line in lines: + if 'node.startup' in line: + return 'automatic' in line + return False def target_setauto(module, target, portal=None, port=None): - cmd = '%s --mode node --targetname %s --op=update --name node.startup --value automatic' % (iscsiadm_cmd, target) + cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--op=update', '--name', 'node.startup', '--value', 'automatic'] - if portal is not None: - if port is not None: - portal = '%s:%s' % (portal, port) - cmd = '%s --portal %s' % (cmd, portal) + if portal is not None and port is not None: + cmd.append('--portal') + cmd.append('%s:%s' % (portal, port)) - (rc, out, err) = module.run_command(cmd) - - if rc > 0: - module.fail_json(cmd=cmd, rc=rc, msg=err) + module.run_command(cmd, check_rc=True) def target_setmanual(module, target, portal=None, port=None): - cmd = '%s --mode node --targetname %s --op=update --name node.startup --value manual' % (iscsiadm_cmd, target) + cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--op=update', '--name', 'node.startup', '--value', 'manual'] - if portal is not None: - if port is not None: - portal = '%s:%s' % (portal, port) - cmd = '%s --portal %s' % (cmd, portal) + if portal is not None and port is not None: + cmd.append('--portal') + cmd.append('%s:%s' % (portal, port)) - (rc, out, err) = module.run_command(cmd) - - if rc > 0: - module.fail_json(cmd=cmd, rc=rc, msg=err) + module.run_command(cmd, check_rc=True) def main(): @@ -308,6 +287,7 @@ def main(): ), required_together=[['node_user', 'node_pass']], + required_if=[('discover', True, ['portal'])], supports_check_mode=True, ) @@ -335,13 +315,10 @@ def main(): cached = iscsi_get_cached_nodes(module, portal) # return json dict - result = {} - result['changed'] = False + result = {'changed': False} if discover: - if portal is None: - module.fail_json(msg="Need to specify at least the portal (ip) to discover") - elif check: + if check: nodes = cached else: iscsi_discover(module, portal, port) @@ -376,13 +353,13 @@ def main(): if (login and loggedon) or (not login and not loggedon): result['changed'] |= False if login: - result['devicenodes'] = target_device_node(module, target) + result['devicenodes'] = target_device_node(target) elif not check: if login: target_login(module, target, portal, port) # give udev some time time.sleep(1) - result['devicenodes'] = target_device_node(module, target) + result['devicenodes'] = target_device_node(target) else: target_logout(module, target) result['changed'] |= True From fce562ad6dee6d43ce9db070a8908000ffefc23d Mon Sep 17 00:00:00 2001 From: Max Bidlingmaier Date: Tue, 31 Aug 2021 15:07:52 +0200 Subject: [PATCH 0304/2828] Enhancement to gitlab_group_members to accept user lists as input (#3047) * - fix to issue 3041 - add func to work with user lists - add func to set members to the ones give * Added version_added to new parameter * fixed elements in definition of gitlab_users nad wrong import in gitlab_users * linter issues fixed * added list elelements to argument_spec * More whitspeaces for the linter * Update plugins/modules/source_control/gitlab/gitlab_group_members.py Co-authored-by: Felix Fontein * adapted changelog * removed bugfix (other PR), changes due to review * changed input handling according to review * Fixed test findings * Added list of dict to allow for specifying user/access_level tuples * corrected doc section * fixed parameter definitions * removed strange additional import * Update changelogs/fragments/3041-gitlab_x_members_fix_and_enhancement.yml Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_group_members.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_project_members.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_group_members.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_group_members.py Co-authored-by: Felix Fontein * fixed required if * Update plugins/modules/source_control/gitlab/gitlab_group_members.py Co-authored-by: Felix Fontein * Added suggestions from PR * fixed identation problem * Update plugins/modules/source_control/gitlab/gitlab_group_members.py Co-authored-by: Zainab Alsaffar * Update plugins/modules/source_control/gitlab/gitlab_group_members.py Co-authored-by: Zainab Alsaffar * Update plugins/modules/source_control/gitlab/gitlab_group_members.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_group_members.py Co-authored-by: Felix Fontein * Recommended changes from discussionst * Fixed issues from automatic tests * added missing metaclass due to test finding * added integration tests * Update plugins/modules/source_control/gitlab/gitlab_group_members.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_group_members.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_group_members.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_group_members.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_group_members.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_group_members.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_group_members.py Co-authored-by: Felix Fontein * fixed optimization for only one user * Reverted gitlab_project_members to original version - changes will be done in a separate branch * added examples for new functionality * - fixed changelog after reverting gitlab_project_memebers - fully reverted gitlab_project_members * Fixed error handling: when single users are not updateable in bulk mode the exception should not stop the code flow but document the problem in the result. * Better error handling * on error give username, not gitlab numeric userid * Fixed broken check_mode * Update plugins/modules/source_control/gitlab/gitlab_group_members.py Co-authored-by: Felix Fontein * Change from review Co-authored-by: Max Bidlingmaier Co-authored-by: Felix Fontein Co-authored-by: Zainab Alsaffar --- ...1-gitlab_x_members_fix_and_enhancement.yml | 3 + .../gitlab/gitlab_group_members.py | 330 ++++++++++++++---- .../gitlab_group_members/tasks/main.yml | 47 ++- .../gitlab_group_members/vars/main.yml | 10 +- 4 files changed, 311 insertions(+), 79 deletions(-) create mode 100644 changelogs/fragments/3041-gitlab_x_members_fix_and_enhancement.yml diff --git a/changelogs/fragments/3041-gitlab_x_members_fix_and_enhancement.yml b/changelogs/fragments/3041-gitlab_x_members_fix_and_enhancement.yml new file mode 100644 index 0000000000..ce558e1f84 --- /dev/null +++ b/changelogs/fragments/3041-gitlab_x_members_fix_and_enhancement.yml @@ -0,0 +1,3 @@ +minor_changes: +- gitlab_group_members - ``gitlab_user`` can now also be a list of users (https://github.com/ansible-collections/community.general/pull/3047). +- gitlab_group_members - added functionality to set all members exactly as given (https://github.com/ansible-collections/community.general/pull/3047). diff --git a/plugins/modules/source_control/gitlab/gitlab_group_members.py b/plugins/modules/source_control/gitlab/gitlab_group_members.py index 50779e6445..b526873d30 100644 --- a/plugins/modules/source_control/gitlab/gitlab_group_members.py +++ b/plugins/modules/source_control/gitlab/gitlab_group_members.py @@ -32,15 +32,38 @@ options: type: str gitlab_user: description: - - The username of the member to add to/remove from the GitLab group. - required: true - type: str + - A username or a list of usernames to add to/remove from the GitLab group. + - Mutually exclusive with I(gitlab_users_access). + type: list + elements: str access_level: description: - The access level for the user. - Required if I(state=present), user state is set to present. + - Mutually exclusive with I(gitlab_users_access). type: str choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] + gitlab_users_access: + description: + - Provide a list of user to access level mappings. + - Every dictionary in this list specifies a user (by username) and the access level the user should have. + - Mutually exclusive with I(gitlab_user) and I(access_level). + - Use together with I(purge_users) to remove all users not specified here from the group. + type: list + elements: dict + suboptions: + name: + description: A username or a list of usernames to add to/remove from the GitLab group. + type: str + required: true + access_level: + description: + - The access level for the user. + - Required if I(state=present), user state is set to present. + type: str + choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] + required: true + version_added: 3.6.0 state: description: - State of the member in the group. @@ -49,6 +72,15 @@ options: choices: ['present', 'absent'] default: 'present' type: str + purge_users: + description: + - Adds/remove users of the given access_level to match the given gitlab_user/gitlab_users_access list. + If omitted do not purge orphaned members. + - Is only used when I(state=present). + type: list + elements: str + choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] + version_added: 3.6.0 notes: - Supports C(check_mode). ''' @@ -70,6 +102,51 @@ EXAMPLES = r''' gitlab_group: groupname gitlab_user: username state: absent + +- name: Add a list of Users to A GitLab Group + gitlab_group_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + gitlab_group: groupname + gitlab_user: + - user1 + - user2 + access_level: developer + state: present + +- name: Add a list of Users with Dedicated Access Levels to A GitLab Group + gitlab_group_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + gitlab_group: groupname + gitlab_users_access: + - name: user1 + access_level: developer + - name: user2 + access_level: maintainer + state: present + +- name: Add a user, remove all others which might be on this access level + gitlab_group_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + gitlab_group: groupname + gitlab_user: username + access_level: developer + pruge_users: developer + state: present + +- name: Remove a list of Users with Dedicated Access Levels to A GitLab Group + gitlab_group_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + gitlab_group: groupname + gitlab_users_access: + - name: user1 + access_level: developer + - name: user2 + access_level: maintainer + state: absent ''' RETURN = r''' # ''' @@ -111,6 +188,17 @@ class GitLabGroup(object): group = self._gitlab.groups.get(gitlab_group_id) return group.members.list(all=True) + # get single member in a group by user name + def get_member_in_a_group(self, gitlab_group_id, gitlab_user_id): + member = None + group = self._gitlab.groups.get(gitlab_group_id) + try: + member = group.members.get(gitlab_user_id) + if member: + return member + except gitlab.exceptions.GitlabGetError as e: + return None + # check if the user is a member of the group def is_user_a_member(self, members, gitlab_user_id): for member in members: @@ -120,27 +208,14 @@ class GitLabGroup(object): # add user to a group def add_member_to_group(self, gitlab_user_id, gitlab_group_id, access_level): - try: - group = self._gitlab.groups.get(gitlab_group_id) - add_member = group.members.create( - {'user_id': gitlab_user_id, 'access_level': access_level}) - - if add_member: - return add_member.username - - except (gitlab.exceptions.GitlabCreateError) as e: - self._module.fail_json( - msg="Failed to add member to the Group, Group ID %s: %s" % (gitlab_group_id, e)) + group = self._gitlab.groups.get(gitlab_group_id) + add_member = group.members.create( + {'user_id': gitlab_user_id, 'access_level': access_level}) # remove user from a group def remove_user_from_group(self, gitlab_user_id, gitlab_group_id): - try: - group = self._gitlab.groups.get(gitlab_group_id) - group.members.delete(gitlab_user_id) - - except (gitlab.exceptions.GitlabDeleteError) as e: - self._module.fail_json( - msg="Failed to remove member from GitLab group, ID %s: %s" % (gitlab_group_id, e)) + group = self._gitlab.groups.get(gitlab_group_id) + group.members.delete(gitlab_user_id) # get user's access level def get_user_access_level(self, members, gitlab_user_id): @@ -152,12 +227,8 @@ class GitLabGroup(object): def update_user_access_level(self, members, gitlab_user_id, access_level): for member in members: if member.id == gitlab_user_id: - try: - member.access_level = access_level - member.save() - except (gitlab.exceptions.GitlabCreateError) as e: - self._module.fail_json( - msg="Failed to update the access level for the member, %s: %s" % (gitlab_user_id, e)) + member.access_level = access_level + member.save() def main(): @@ -165,9 +236,18 @@ def main(): argument_spec.update(dict( api_token=dict(type='str', required=True, no_log=True), gitlab_group=dict(type='str', required=True), - gitlab_user=dict(type='str', required=True), + gitlab_user=dict(type='list', elements='str'), state=dict(type='str', default='present', choices=['present', 'absent']), - access_level=dict(type='str', required=False, choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']) + access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']), + purge_users=dict(type='list', elements='str', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']), + gitlab_users_access=dict( + type='list', + elements='dict', + options=dict( + name=dict(type='str', required=True), + access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner'], required=True), + ) + ), )) module = AnsibleModule( @@ -175,15 +255,19 @@ def main(): mutually_exclusive=[ ['api_username', 'api_token'], ['api_password', 'api_token'], + ['gitlab_user', 'gitlab_users_access'], + ['access_level', 'gitlab_users_access'], ], required_together=[ ['api_username', 'api_password'], + ['gitlab_user', 'access_level'], ], required_one_of=[ ['api_username', 'api_token'], + ['gitlab_user', 'gitlab_users_access'], ], required_if=[ - ['state', 'present', ['access_level']], + ['state', 'present', ['access_level', 'gitlab_users_access'], True], ], supports_check_mode=True, ) @@ -191,72 +275,166 @@ def main(): if not HAS_PY_GITLAB: module.fail_json(msg=missing_required_lib('python-gitlab', url='https://python-gitlab.readthedocs.io/en/stable/'), exception=GITLAB_IMP_ERR) + access_level_int = { + 'guest': gitlab.GUEST_ACCESS, + 'reporter': gitlab.REPORTER_ACCESS, + 'developer': gitlab.DEVELOPER_ACCESS, + 'maintainer': gitlab.MAINTAINER_ACCESS, + 'owner': gitlab.OWNER_ACCESS + } + gitlab_group = module.params['gitlab_group'] - gitlab_user = module.params['gitlab_user'] state = module.params['state'] access_level = module.params['access_level'] + purge_users = module.params['purge_users'] - # convert access level string input to int - if access_level: - access_level_int = { - 'guest': gitlab.GUEST_ACCESS, - 'reporter': gitlab.REPORTER_ACCESS, - 'developer': gitlab.DEVELOPER_ACCESS, - 'maintainer': gitlab.MAINTAINER_ACCESS, - 'owner': gitlab.OWNER_ACCESS - } - - access_level = access_level_int[access_level] + if purge_users: + purge_users = [access_level_int[level] for level in purge_users] # connect to gitlab server gl = gitlabAuthentication(module) group = GitLabGroup(module, gl) - gitlab_user_id = group.get_user_id(gitlab_user) gitlab_group_id = group.get_group_id(gitlab_group) # group doesn't exist if not gitlab_group_id: module.fail_json(msg="group '%s' not found." % gitlab_group) - # user doesn't exist - if not gitlab_user_id: - if state == 'absent': - module.exit_json(changed=False, result="user '%s' not found, and thus also not part of the group" % gitlab_user) - else: - module.fail_json(msg="user '%s' not found." % gitlab_user) + members = [] + if module.params['gitlab_user'] is not None: + gitlab_users_access = [] + gitlab_users = module.params['gitlab_user'] + for gl_user in gitlab_users: + gitlab_users_access.append({'name': gl_user, 'access_level': access_level_int[access_level] if access_level else None}) + elif module.params['gitlab_users_access'] is not None: + gitlab_users_access = module.params['gitlab_users_access'] + for user_level in gitlab_users_access: + user_level['access_level'] = access_level_int[user_level['access_level']] - members = group.get_members_in_a_group(gitlab_group_id) - is_user_a_member = group.is_user_a_member(members, gitlab_user_id) - - # check if the user is a member in the group - if not is_user_a_member: - if state == 'present': - # add user to the group - if not module.check_mode: - group.add_member_to_group(gitlab_user_id, gitlab_group_id, access_level) - module.exit_json(changed=True, result="Successfully added user '%s' to the group." % gitlab_user) - # state as absent - else: - module.exit_json(changed=False, result="User, '%s', is not a member in the group. No change to report" % gitlab_user) - # in case that a user is a member + if len(gitlab_users_access) == 1 and not purge_users: + # only single user given + members = [group.get_member_in_a_group(gitlab_group_id, group.get_user_id(gitlab_users_access[0]['name']))] + if members[0] is None: + members = [] + elif len(gitlab_users_access) > 1 or purge_users: + # list of users given + members = group.get_members_in_a_group(gitlab_group_id) else: - if state == 'present': - # compare the access level - user_access_level = group.get_user_access_level(members, gitlab_user_id) - if user_access_level == access_level: - module.exit_json(changed=False, result="User, '%s', is already a member in the group. No change to report" % gitlab_user) + module.exit_json(changed='OK', result="Nothing to do, please give at least one user or set purge_users true.", + result_data=[]) + + changed = False + error = False + changed_users = [] + changed_data = [] + + for gitlab_user in gitlab_users_access: + gitlab_user_id = group.get_user_id(gitlab_user['name']) + + # user doesn't exist + if not gitlab_user_id: + if state == 'absent': + changed_users.append("user '%s' not found, and thus also not part of the group" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', + 'msg': "user '%s' not found, and thus also not part of the group" % gitlab_user['name']}) else: - # update the access level for the user - if not module.check_mode: - group.update_user_access_level(members, gitlab_user_id, access_level) - module.exit_json(changed=True, result="Successfully updated the access level for the user, '%s'" % gitlab_user) + error = True + changed_users.append("user '%s' not found." % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "user '%s' not found." % gitlab_user['name']}) + continue + + is_user_a_member = group.is_user_a_member(members, gitlab_user_id) + + # check if the user is a member in the group + if not is_user_a_member: + if state == 'present': + # add user to the group + try: + if not module.check_mode: + group.add_member_to_group(gitlab_user_id, gitlab_group_id, gitlab_user['access_level']) + changed = True + changed_users.append("Successfully added user '%s' to group" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', + 'msg': "Successfully added user '%s' to group" % gitlab_user['name']}) + except (gitlab.exceptions.GitlabCreateError) as e: + error = True + changed_users.append("Failed to updated the access level for the user, '%s'" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "Not allowed to add the access level for the member, %s: %s" % (gitlab_user['name'], e)}) + # state as absent + else: + changed_users.append("User, '%s', is not a member in the group. No change to report" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', + 'msg': "User, '%s', is not a member in the group. No change to report" % gitlab_user['name']}) + # in case that a user is a member else: - # remove the user from the group - if not module.check_mode: - group.remove_user_from_group(gitlab_user_id, gitlab_group_id) - module.exit_json(changed=True, result="Successfully removed user, '%s', from the group" % gitlab_user) + if state == 'present': + # compare the access level + user_access_level = group.get_user_access_level(members, gitlab_user_id) + if user_access_level == gitlab_user['access_level']: + changed_users.append("User, '%s', is already a member in the group. No change to report" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', + 'msg': "User, '%s', is already a member in the group. No change to report" % gitlab_user['name']}) + else: + # update the access level for the user + try: + if not module.check_mode: + group.update_user_access_level(members, gitlab_user_id, gitlab_user['access_level']) + changed = True + changed_users.append("Successfully updated the access level for the user, '%s'" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', + 'msg': "Successfully updated the access level for the user, '%s'" % gitlab_user['name']}) + except (gitlab.exceptions.GitlabUpdateError) as e: + error = True + changed_users.append("Failed to updated the access level for the user, '%s'" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "Not allowed to update the access level for the member, %s: %s" % (gitlab_user['name'], e)}) + else: + # remove the user from the group + try: + if not module.check_mode: + group.remove_user_from_group(gitlab_user_id, gitlab_group_id) + changed = True + changed_users.append("Successfully removed user, '%s', from the group" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', + 'msg': "Successfully removed user, '%s', from the group" % gitlab_user['name']}) + except (gitlab.exceptions.GitlabDeleteError) as e: + error = True + changed_users.append("Failed to removed user, '%s', from the group" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "Failed to remove user, '%s' from the group: %s" % (gitlab_user['name'], e)}) + + # if state = present and purge_users set delete users which are in members having give access level but not in gitlab_users + if state == 'present' and purge_users: + uppercase_names_in_gitlab_users_access = [] + for name in gitlab_users_access: + uppercase_names_in_gitlab_users_access.append(name['name'].upper()) + + for member in members: + if member.access_level in purge_users and member.username.upper() not in uppercase_names_in_gitlab_users_access: + try: + if not module.check_mode: + group.remove_user_from_group(member.id, gitlab_group_id) + changed = True + changed_users.append("Successfully removed user '%s', from group. Was not in given list" % member.username) + changed_data.append({'gitlab_user': member.username, 'result': 'CHANGED', + 'msg': "Successfully removed user '%s', from group. Was not in given list" % member.username}) + except (gitlab.exceptions.GitlabDeleteError) as e: + error = True + changed_users.append("Failed to removed user, '%s', from the group" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "Failed to remove user, '%s' from the group: %s" % (gitlab_user['name'], e)}) + + if len(gitlab_users_access) == 1 and error: + # if single user given and an error occurred return error for list errors will be per user + module.fail_json(msg="FAILED: '%s '" % changed_users[0], result_data=changed_data) + elif error: + module.fail_json(msg='FAILED: At least one given user/permission could not be set', result_data=changed_data) + + module.exit_json(changed=changed, msg='Successfully set memberships', result="\n".join(changed_users), result_data=changed_data) if __name__ == '__main__': diff --git a/tests/integration/targets/gitlab_group_members/tasks/main.yml b/tests/integration/targets/gitlab_group_members/tasks/main.yml index 4d4f1168d0..109a0f2bdb 100644 --- a/tests/integration/targets/gitlab_group_members/tasks/main.yml +++ b/tests/integration/targets/gitlab_group_members/tasks/main.yml @@ -13,7 +13,7 @@ state: present - name: Add a User to A GitLab Group - gitlab_group_members: + gitlab_group_members: api_url: '{{ gitlab_server_url }}' api_token: '{{ gitlab_api_access_token }}' gitlab_group: '{{ gitlab_group_name }}' @@ -27,4 +27,47 @@ api_token: '{{ gitlab_api_access_token }}' gitlab_group: '{{ gitlab_group_name }}' gitlab_user: '{{ username }}' - state: absent \ No newline at end of file + state: absent + +- name: Add a list of Users to A GitLab Group + gitlab_group_members: + api_url: '{{ gitlab_server_url }}' + api_token: '{{ gitlab_api_access_token }}' + gitlab_group: '{{ gitlab_group_name }}' + gitlab_user: '{{ userlist }}' + access_level: '{{ gitlab_access_level }}' + state: present + +- name: Remove a list of Users to A GitLab Group + gitlab_group_members: + api_url: '{{ gitlab_server_url }}' + api_token: '{{ gitlab_api_access_token }}' + gitlab_group: '{{ gitlab_group_name }}' + gitlab_user: '{{ userlist }}' + state: absent + +- name: Add a list of Users with Dedicated Access Levels to A GitLab Group + gitlab_group_members: + api_url: '{{ gitlab_server_url }}' + api_token: '{{ gitlab_api_access_token }}' + gitlab_group: '{{ gitlab_group_name }}' + gitlab_users_access: '{{ dedicated_access_users }}' + state: present + +- name: Remove a list of Users with Dedicated Access Levels to A GitLab Group + gitlab_group_members: + api_url: '{{ gitlab_server_url }}' + api_token: '{{ gitlab_api_access_token }}' + gitlab_group: '{{ gitlab_group_name }}' + gitlab_users_access: '{{ dedicated_access_users }}' + state: absent + +- name: Add a user, remove all others which might be on this access level + gitlab_group_members: + api_url: '{{ gitlab_server_url }}' + api_token: '{{ gitlab_api_access_token }}' + gitlab_group: '{{ gitlab_group_name }}' + gitlab_user: '{{ username }}' + access_level: '{{ gitlab_access_level }}' + pruge_users: '{{ gitlab_access_level }}' + state: present diff --git a/tests/integration/targets/gitlab_group_members/vars/main.yml b/tests/integration/targets/gitlab_group_members/vars/main.yml index 7f68893cf9..6a6b17319d 100644 --- a/tests/integration/targets/gitlab_group_members/vars/main.yml +++ b/tests/integration/targets/gitlab_group_members/vars/main.yml @@ -2,4 +2,12 @@ gitlab_server_url: https://gitlabserver.example.com gitlab_api_access_token: 126hngbscx890cv09b gitlab_group_name: groupname1 username: username1 -gitlab_access_level: developer \ No newline at end of file +gitlab_access_level: developer +userlist: + - username1 + - username2 +dedicated_access_users: + - name: username1 + access_level: "developer" + - name: username2 + access_level: "maintainer" From 135faf44216febd34f19a1b39b941c967a7375fe Mon Sep 17 00:00:00 2001 From: Scott Anderson Date: Tue, 31 Aug 2021 12:19:29 -0400 Subject: [PATCH 0305/2828] django_manage: Remove scottanderson42 and tastychutney as maintainers. (#3314) Note: tastychutney is another github account of mine that was also added as a maintainer. --- .github/BOTMETA.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 0d2922182b..b07f95e8cc 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1108,7 +1108,8 @@ files: $modules/web_infrastructure/deploy_helper.py: maintainers: ramondelafuente $modules/web_infrastructure/django_manage.py: - maintainers: scottanderson42 russoz tastychutney + maintainers: russoz + ignore: scottanderson42 tastychutney labels: django_manage $modules/web_infrastructure/ejabberd_user.py: maintainers: privateip From bf8df21d27b81dd8e0c406cde63ffbf108529a8f Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 31 Aug 2021 18:22:08 +0200 Subject: [PATCH 0306/2828] Next expected release is 3.7.0. --- galaxy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/galaxy.yml b/galaxy.yml index 724e76110d..5b08ca814a 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,6 +1,6 @@ namespace: community name: general -version: 3.6.0 +version: 3.7.0 readme: README.md authors: - Ansible (https://github.com/ansible) From c121e8685fec9bb47e1cd744213d5e1bc9e3d7b4 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Wed, 1 Sep 2021 07:33:22 +0200 Subject: [PATCH 0307/2828] Fix documentation bugs. (#3321) --- .../source_control/gitlab/gitlab_group_members.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/plugins/modules/source_control/gitlab/gitlab_group_members.py b/plugins/modules/source_control/gitlab/gitlab_group_members.py index b526873d30..d11e72d92f 100644 --- a/plugins/modules/source_control/gitlab/gitlab_group_members.py +++ b/plugins/modules/source_control/gitlab/gitlab_group_members.py @@ -74,7 +74,7 @@ options: type: str purge_users: description: - - Adds/remove users of the given access_level to match the given gitlab_user/gitlab_users_access list. + - Adds/remove users of the given access_level to match the given I(gitlab_user)/I(gitlab_users_access) list. If omitted do not purge orphaned members. - Is only used when I(state=present). type: list @@ -104,7 +104,7 @@ EXAMPLES = r''' state: absent - name: Add a list of Users to A GitLab Group - gitlab_group_members: + community.general.gitlab_group_members: api_url: 'https://gitlab.example.com' api_token: 'Your-Private-Token' gitlab_group: groupname @@ -115,7 +115,7 @@ EXAMPLES = r''' state: present - name: Add a list of Users with Dedicated Access Levels to A GitLab Group - gitlab_group_members: + community.general.gitlab_group_members: api_url: 'https://gitlab.example.com' api_token: 'Your-Private-Token' gitlab_group: groupname @@ -127,7 +127,7 @@ EXAMPLES = r''' state: present - name: Add a user, remove all others which might be on this access level - gitlab_group_members: + community.general.gitlab_group_members: api_url: 'https://gitlab.example.com' api_token: 'Your-Private-Token' gitlab_group: groupname @@ -137,7 +137,7 @@ EXAMPLES = r''' state: present - name: Remove a list of Users with Dedicated Access Levels to A GitLab Group - gitlab_group_members: + community.general.gitlab_group_members: api_url: 'https://gitlab.example.com' api_token: 'Your-Private-Token' gitlab_group: groupname From 7c493eb4e5eef63832762eac4978909d93a61808 Mon Sep 17 00:00:00 2001 From: Silvie Chlupova <33493796+schlupov@users.noreply.github.com> Date: Wed, 1 Sep 2021 22:58:10 +0200 Subject: [PATCH 0308/2828] Fix copr integration tests (#3237) Fixes: #2084 --- changelogs/fragments/3237-copr-fix_chroot_naming.yml | 2 ++ plugins/modules/packaging/os/copr.py | 11 ++++++----- tests/integration/targets/copr/aliases | 1 - tests/integration/targets/copr/tasks/main.yml | 8 ++++---- 4 files changed, 12 insertions(+), 10 deletions(-) create mode 100644 changelogs/fragments/3237-copr-fix_chroot_naming.yml diff --git a/changelogs/fragments/3237-copr-fix_chroot_naming.yml b/changelogs/fragments/3237-copr-fix_chroot_naming.yml new file mode 100644 index 0000000000..7a942bc94e --- /dev/null +++ b/changelogs/fragments/3237-copr-fix_chroot_naming.yml @@ -0,0 +1,2 @@ +bugfixes: + - copr - fix chroot naming issues, ``centos-stream`` changed naming to ``centos-stream-`` (for exmaple ``centos-stream-8``) (https://github.com/ansible-collections/community.general/issues/2084, https://github.com/ansible-collections/community.general/pull/3237). \ No newline at end of file diff --git a/plugins/modules/packaging/os/copr.py b/plugins/modules/packaging/os/copr.py index 4bf665e045..cb31e8c9fb 100644 --- a/plugins/modules/packaging/os/copr.py +++ b/plugins/modules/packaging/os/copr.py @@ -120,8 +120,7 @@ class CoprModule(object): @property def short_chroot(self): """str: Chroot (distribution-version-architecture) shorten to distribution-version.""" - chroot_parts = self.chroot.split("-") - return "{0}-{1}".format(chroot_parts[0], chroot_parts[1]) + return self.chroot.rsplit('-', 1)[0] @property def arch(self): @@ -193,18 +192,20 @@ class CoprModule(object): Returns: Information about the repository. """ - distribution, version = self.short_chroot.split("-") + distribution, version = self.short_chroot.split('-', 1) chroot = self.short_chroot while True: repo_info, status_code = self._get(chroot) if repo_info: return repo_info if distribution == "rhel": - chroot = "centos-stream" + chroot = "centos-stream-8" distribution = "centos" elif distribution == "centos": - if version == "stream": + if version == "stream-8": version = "8" + elif version == "stream-9": + version = "9" chroot = "epel-{0}".format(version) distribution = "epel" else: diff --git a/tests/integration/targets/copr/aliases b/tests/integration/targets/copr/aliases index fbe7da85db..0ad5e1c80c 100644 --- a/tests/integration/targets/copr/aliases +++ b/tests/integration/targets/copr/aliases @@ -3,4 +3,3 @@ needs/root skip/macos skip/osx skip/freebsd -disabled # FIXME diff --git a/tests/integration/targets/copr/tasks/main.yml b/tests/integration/targets/copr/tasks/main.yml index 32ce67208d..1c8afd992f 100644 --- a/tests/integration/targets/copr/tasks/main.yml +++ b/tests/integration/targets/copr/tasks/main.yml @@ -6,7 +6,7 @@ host: copr.fedorainfracloud.org state: enabled name: '@copr/integration_tests' - chroot: centos-stream-x86_64 + chroot: fedora-rawhide-x86_64 register: result - name: assert that the copr project was enabled @@ -21,7 +21,7 @@ copr: state: enabled name: '@copr/integration_tests' - chroot: centos-stream-x86_64 + chroot: fedora-rawhide-x86_64 register: result - name: assert that the copr project was enabled @@ -46,7 +46,7 @@ copr: state: disabled name: '@copr/integration_tests' - chroot: centos-stream-x86_64 + chroot: fedora-rawhide-x86_64 register: result - name: assert that the copr project was disabled @@ -61,4 +61,4 @@ host: copr.fedorainfracloud.org state: absent name: '@copr/integration_tests' - chroot: centos-stream-x86_64 + chroot: fedora-rawhide-x86_64 From 3502f3b48690500c8d60942ca85bb3527f856e4e Mon Sep 17 00:00:00 2001 From: Kyle Williams <36274986+kyle-williams-1@users.noreply.github.com> Date: Wed, 1 Sep 2021 14:59:27 -0600 Subject: [PATCH 0309/2828] redfish: clean etag of quotes before patch (#3296) * Some vendors surround header etag with quotes, which need to be cleaned before sending a patch * Minor change fragment * Add etag strip quote option * Rebase * Cleanup fragment * Apply suggestions from code review Co-authored-by: Felix Fontein * Update plugins/modules/remote_management/redfish/redfish_command.py Co-authored-by: Felix Fontein * Description update * Update plugins/modules/remote_management/redfish/redfish_config.py Co-authored-by: Felix Fontein Co-authored-by: Kyle Williams Co-authored-by: Felix Fontein --- changelogs/fragments/3296-clean-etag.yaml | 2 ++ plugins/module_utils/redfish_utils.py | 5 ++++- .../redfish/redfish_command.py | 17 +++++++++++++++-- .../remote_management/redfish/redfish_config.py | 17 +++++++++++++++-- 4 files changed, 36 insertions(+), 5 deletions(-) create mode 100644 changelogs/fragments/3296-clean-etag.yaml diff --git a/changelogs/fragments/3296-clean-etag.yaml b/changelogs/fragments/3296-clean-etag.yaml new file mode 100644 index 0000000000..317772cb15 --- /dev/null +++ b/changelogs/fragments/3296-clean-etag.yaml @@ -0,0 +1,2 @@ +minor_changes: + - "redfish_command and redfish_config and redfish_utils module utils - add parameter to strip etag of quotes before patch, since some vendors do not properly ``If-Match`` etag with quotes (https://github.com/ansible-collections/community.general/pull/3296)." diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index 0f8e6630ba..b4d0dba015 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -29,7 +29,7 @@ FAIL_MSG = 'Issuing a data modification command without specifying the '\ class RedfishUtils(object): def __init__(self, creds, root_uri, timeout, module, resource_id=None, - data_modification=False): + data_modification=False, strip_etag_quotes=False): self.root_uri = root_uri self.creds = creds self.timeout = timeout @@ -37,6 +37,7 @@ class RedfishUtils(object): self.service_root = '/redfish/v1/' self.resource_id = resource_id self.data_modification = data_modification + self.strip_etag_quotes = strip_etag_quotes self._init_session() def _auth_params(self, headers): @@ -121,6 +122,8 @@ class RedfishUtils(object): if not etag: etag = r['data'].get('@odata.etag') if etag: + if self.strip_etag_quotes: + etag = etag.strip('"') req_headers['If-Match'] = etag username, password, basic_auth = self._auth_params(req_headers) try: diff --git a/plugins/modules/remote_management/redfish/redfish_command.py b/plugins/modules/remote_management/redfish/redfish_command.py index 72392ec9f3..e79308f2d7 100644 --- a/plugins/modules/remote_management/redfish/redfish_command.py +++ b/plugins/modules/remote_management/redfish/redfish_command.py @@ -207,6 +207,15 @@ options: description: - The transfer method to use with the image type: str + strip_etag_quotes: + description: + - Removes surrounding quotes of etag used in C(If-Match) header + of C(PATCH) requests. + - Only use this option to resolve bad vendor implementation where + C(If-Match) only matches the unquoted etag string. + type: bool + default: false + version_added: 3.7.0 author: "Jose Delarosa (@jose-delarosa)" ''' @@ -631,7 +640,8 @@ def main(): transfer_protocol_type=dict(), transfer_method=dict(), ) - ) + ), + strip_etag_quotes=dict(type='bool', default=False), ), required_together=[ ('username', 'password'), @@ -686,10 +696,13 @@ def main(): # VirtualMedia options virtual_media = module.params['virtual_media'] + # Etag options + strip_etag_quotes = module.params['strip_etag_quotes'] + # Build root URI root_uri = "https://" + module.params['baseuri'] rf_utils = RedfishUtils(creds, root_uri, timeout, module, - resource_id=resource_id, data_modification=True) + resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes) # Check that Category is valid if category not in CATEGORY_COMMANDS_ALL: diff --git a/plugins/modules/remote_management/redfish/redfish_config.py b/plugins/modules/remote_management/redfish/redfish_config.py index 9b15a3e63e..ff4b15487e 100644 --- a/plugins/modules/remote_management/redfish/redfish_config.py +++ b/plugins/modules/remote_management/redfish/redfish_config.py @@ -91,6 +91,15 @@ options: - setting dict of EthernetInterface on OOB controller type: dict version_added: '0.2.0' + strip_etag_quotes: + description: + - Removes surrounding quotes of etag used in C(If-Match) header + of C(PATCH) requests. + - Only use this option to resolve bad vendor implementation where + C(If-Match) only matches the unquoted etag string. + type: bool + default: false + version_added: 3.7.0 author: "Jose Delarosa (@jose-delarosa)" ''' @@ -237,7 +246,8 @@ def main(): nic_config=dict( type='dict', default={} - ) + ), + strip_etag_quotes=dict(type='bool', default=False), ), required_together=[ ('username', 'password'), @@ -275,10 +285,13 @@ def main(): nic_addr = module.params['nic_addr'] nic_config = module.params['nic_config'] + # Etag options + strip_etag_quotes = module.params['strip_etag_quotes'] + # Build root URI root_uri = "https://" + module.params['baseuri'] rf_utils = RedfishUtils(creds, root_uri, timeout, module, - resource_id=resource_id, data_modification=True) + resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes) # Check that Category is valid if category not in CATEGORY_COMMANDS_ALL: From 76317d1f6473a82490927095f14f6df5422e3bed Mon Sep 17 00:00:00 2001 From: Manuel Gayer <85677493+nm-mga@users.noreply.github.com> Date: Sun, 5 Sep 2021 18:28:04 +0200 Subject: [PATCH 0310/2828] nmcli: Support GSM connections (#3313) * nmcli: Support GSM connections * Add GSM support * Add GSM unit test * nmcli: Add changelog fragment * nmcli: Fix GSM unit test * Fix copy-paste error in test_gsm_mod * nmcli: Fix yaml formatting * nmcli: Fix yaml formatting * nmcli: Fix typeerror * type must be str not string * nmcli: Fix gsm_show_output * gsm.username did not match input * nmcli: Fix gsm_show_output * doublechecked generated output with test-client * nmcli: GSM fix unit test * Removed `mocked_gsm_connection_unchanged` * Revert "nmcli: GSM fix unit test" This reverts commit 2d112b779aab03865731377919c509b8e88ad56a. * nmcli: gsm fix unit test * Add needed output to `TESTCASE_GSM_SHOW_OUTPUT` * Move `mocked_gsm_connection_unchanged`to sort correctly * nmcli: gsm fix _compare_conn_params * Strip double-qoutes of gsm.apn if exist * nmcli: GSM apply suggestions from code review Co-authored-by: Ajpantuso * nmcli: GSM: Fix documentation * Shorten too long lines * nmcli: GSM apply suggestions from code review Co-authored-by: Ajpantuso * nmcli: GSM add version Co-authored-by: Felix Fontein Co-authored-by: Ajpantuso Co-authored-by: Felix Fontein --- .../fragments/3313-nmcli-add_gsm_support.yml | 2 + plugins/modules/net_tools/nmcli.py | 129 ++++++++++++++++- .../plugins/modules/net_tools/test_nmcli.py | 137 ++++++++++++++++++ 3 files changed, 265 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/3313-nmcli-add_gsm_support.yml diff --git a/changelogs/fragments/3313-nmcli-add_gsm_support.yml b/changelogs/fragments/3313-nmcli-add_gsm_support.yml new file mode 100644 index 0000000000..9986bca675 --- /dev/null +++ b/changelogs/fragments/3313-nmcli-add_gsm_support.yml @@ -0,0 +1,2 @@ +minor_changes: + - "nmcli - add ``gsm`` support (https://github.com/ansible-collections/community.general/pull/3313)." diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index 7bc8a6b775..843e8bd8ef 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -54,8 +54,9 @@ options: - Type C(dummy) is added in community.general 3.5.0. - Type C(generic) is added in Ansible 2.5. - Type C(infiniband) is added in community.general 2.0.0. + - Type C(gsm) is added in community.general 3.7.0. type: str - choices: [ bond, bond-slave, bridge, bridge-slave, dummy, ethernet, generic, gre, infiniband, ipip, sit, team, team-slave, vlan, vxlan, wifi ] + choices: [ bond, bond-slave, bridge, bridge-slave, dummy, ethernet, generic, gre, infiniband, ipip, sit, team, team-slave, vlan, vxlan, wifi, gsm ] mode: description: - This is the type of device or network connection that you wish to create for a bond or bridge. @@ -183,7 +184,7 @@ options: mtu: description: - The connection MTU, e.g. 9000. This can't be applied when creating the interface and is done once the interface has been created. - - Can be used when modifying Team, VLAN, Ethernet (Future plans to implement wifi, pppoe, infiniband) + - Can be used when modifying Team, VLAN, Ethernet (Future plans to implement wifi, gsm, pppoe, infiniband) - This parameter defaults to C(1500) when unset. type: int dhcp_client_id: @@ -643,6 +644,101 @@ options: type: bool default: false version_added: 3.6.0 + gsm: + description: + - The configuration of the GSM connection. + - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host. + - 'An up-to-date list of supported attributes can be found here: + U(https://networkmanager.dev/docs/api/latest/settings-gsm.html).' + - 'For instance to use apn, pin, username and password: + C({apn: provider.apn, pin: 1234, username: apn.username, password: apn.password}).' + type: dict + version_added: 3.7.0 + suboptions: + apn: + description: + - The GPRS Access Point Name specifying the APN used when establishing a data session with the GSM-based network. + - The APN often determines how the user will be billed for their network usage and whether the user has access to the Internet or + just a provider-specific walled-garden, so it is important to use the correct APN for the user's mobile broadband plan. + - The APN may only be composed of the characters a-z, 0-9, ., and - per GSM 03.60 Section 14.9. + type: str + auto-config: + description: When C(true), the settings such as I(gsm.apn), I(gsm.username), or I(gsm.password) will default to values that match the network + the modem will register to in the Mobile Broadband Provider database. + type: bool + default: false + device-id: + description: + - The device unique identifier (as given by the C(WWAN) management service) which this connection applies to. + - If given, the connection will only apply to the specified device. + type: str + home-only: + description: + - When C(true), only connections to the home network will be allowed. + - Connections to roaming networks will not be made. + type: bool + default: false + mtu: + description: If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into multiple Ethernet frames. + type: int + default: 0 + network-id: + description: + - The Network ID (GSM LAI format, ie MCC-MNC) to force specific network registration. + - If the Network ID is specified, NetworkManager will attempt to force the device to register only on the specified network. + - This can be used to ensure that the device does not roam when direct roaming control of the device is not otherwise possible. + type: str + number: + description: Legacy setting that used to help establishing PPP data sessions for GSM-based modems. + type: str + password: + description: + - The password used to authenticate with the network, if required. + - Many providers do not require a password, or accept any password. + - But if a password is required, it is specified here. + type: str + password-flags: + description: + - NMSettingSecretFlags indicating how to handle the I(password) property. + - 'Following choices are allowed: + C(0) B(NONE): The system is responsible for providing and storing this secret (default), + C(1) B(AGENT_OWNED): A user secret agent is responsible for providing and storing this secret; when it is required agents will be + asked to retrieve it + C(2) B(NOT_SAVED): This secret should not be saved, but should be requested from the user each time it is needed + C(4) B(NOT_REQUIRED): In situations where it cannot be automatically determined that the secret is required + (some VPNs and PPP providers do not require all secrets) this flag indicates that the specific secret is not required.' + type: int + choices: [ 0, 1, 2 , 4 ] + default: 0 + pin: + description: + - If the SIM is locked with a PIN it must be unlocked before any other operations are requested. + - Specify the PIN here to allow operation of the device. + type: str + pin-flags: + description: + - NMSettingSecretFlags indicating how to handle the I(gsm.pin) property. + - See I(gsm.password-flags) for NMSettingSecretFlags choices. + type: int + choices: [ 0, 1, 2 , 4 ] + default: 0 + sim-id: + description: + - The SIM card unique identifier (as given by the C(WWAN) management service) which this connection applies to. + - 'If given, the connection will apply to any device also allowed by I(gsm.device-id) which contains a SIM card matching + the given identifier.' + type: str + sim-operator-id: + description: + - A MCC/MNC string like C(310260) or C(21601I) identifying the specific mobile network operator which this connection applies to. + - 'If given, the connection will apply to any device also allowed by I(gsm.device-id) and I(gsm.sim-id) which contains a SIM card + provisioned by the given operator.' + type: str + username: + description: + - The username used to authenticate with the network, if required. + - Many providers do not require a username, or accept any username. + - But if a username is required, it is specified here. ''' EXAMPLES = r''' @@ -979,6 +1075,19 @@ EXAMPLES = r''' autoconnect: true state: present +- name: Create a gsm connection + community.general.nmcli: + type: gsm + conn_name: my-gsm-provider + ifname: cdc-wdm0 + gsm: + apn: my.provider.apn + username: my-provider-username + password: my-provider-password + pin: my-sim-pin + autoconnect: true + state: present + ''' RETURN = r"""# @@ -1086,6 +1195,7 @@ class Nmcli(object): self.ssid = module.params['ssid'] self.wifi = module.params['wifi'] self.wifi_sec = module.params['wifi_sec'] + self.gsm = module.params['gsm'] if self.method4: self.ipv4_method = self.method4 @@ -1243,6 +1353,12 @@ class Nmcli(object): options.update({ '802-11-wireless-security.%s' % name: value }) + elif self.type == 'gsm': + if self.gsm: + for name, value in self.gsm.items(): + options.update({ + 'gsm.%s' % name: value, + }) # Convert settings values based on the situation. for setting, value in options.items(): setting_type = self.settings_type(setting) @@ -1280,7 +1396,8 @@ class Nmcli(object): 'sit', 'team', 'vlan', - 'wifi' + 'wifi', + 'gsm', ) @property @@ -1573,6 +1690,10 @@ class Nmcli(object): value = value.upper() # ensure current_value is also converted to uppercase in case nmcli changes behaviour current_value = current_value.upper() + if key == 'gsm.apn': + # Depending on version nmcli adds double-qoutes to gsm.apn + # Need to strip them in order to compare both + current_value = current_value.strip('"') else: # parameter does not exist current_value = None @@ -1630,6 +1751,7 @@ def main(): 'vlan', 'vxlan', 'wifi', + 'gsm', ]), ip4=dict(type='str'), gw4=dict(type='str'), @@ -1700,6 +1822,7 @@ def main(): ssid=dict(type='str'), wifi=dict(type='dict'), wifi_sec=dict(type='dict', no_log=True), + gsm=dict(type='dict'), ), mutually_exclusive=[['never_default4', 'gw4']], required_if=[("type", "wifi", [("ssid")])], diff --git a/tests/unit/plugins/modules/net_tools/test_nmcli.py b/tests/unit/plugins/modules/net_tools/test_nmcli.py index 9277bd5fb6..bf2977e81d 100644 --- a/tests/unit/plugins/modules/net_tools/test_nmcli.py +++ b/tests/unit/plugins/modules/net_tools/test_nmcli.py @@ -86,6 +86,12 @@ TESTCASE_CONNECTION = [ 'state': 'absent', '_ansible_check_mode': True, }, + { + 'type': 'gsm', + 'conn_name': 'non_existent_nw_device', + 'state': 'absent', + '_ansible_check_mode': True, + }, ] TESTCASE_GENERIC = [ @@ -603,6 +609,7 @@ TESTCASE_DEFAULT_SECURE_WIRELESS_SHOW_OUTPUT = \ 802-11-wireless-security.fils: 0 (default) """ + TESTCASE_DUMMY_STATIC = [ { 'type': 'dummy', @@ -638,6 +645,53 @@ ipv6.addresses: 2001:db8::1/128 """ +TESTCASE_GSM = [ + { + 'type': 'gsm', + 'conn_name': 'non_existent_nw_device', + 'ifname': 'gsm_non_existant', + 'gsm': { + 'apn': 'internet.telekom', + 'username': 't-mobile', + 'password': 'tm', + 'pin': '1234', + }, + 'method4': 'auto', + 'state': 'present', + '_ansible_check_mode': False, + } +] + +TESTCASE_GSM_SHOW_OUTPUT = """\ +connection.id: non_existent_nw_device +connection.type: gsm +connection.interface-name: gsm_non_existant +connection.autoconnect: yes +ipv4.method: auto +ipv4.ignore-auto-dns: no +ipv4.ignore-auto-routes: no +ipv4.never-default: no +ipv4.may-fail: yes +ipv6.method: auto +ipv6.ignore-auto-dns: no +ipv6.ignore-auto-routes: no +gsm.auto-config: no +gsm.number: -- +gsm.username: t-mobile +gsm.password: tm +gsm.password-flags: 0 (none) +gsm.apn: "internet.telekom" +gsm.network-id: -- +gsm.pin: 1234 +gsm.pin-flags: 0 (none) +gsm.home-only: no +gsm.device-id: -- +gsm.sim-id: -- +gsm.sim-operator-id: -- +gsm.mtu: auto +""" + + def mocker_set(mocker, connection_exists=False, execute_return=(0, "", ""), @@ -863,6 +917,13 @@ def mocked_dummy_connection_static_unchanged(mocker): execute_return=(0, TESTCASE_DUMMY_STATIC_SHOW_OUTPUT, "")) +@pytest.fixture +def mocked_gsm_connection_unchanged(mocker): + mocker_set(mocker, + connection_exists=True, + execute_return=(0, TESTCASE_GSM_SHOW_OUTPUT, "")) + + @pytest.mark.parametrize('patch_ansible_module', TESTCASE_BOND, indirect=['patch_ansible_module']) def test_bond_connection_create(mocked_generic_connection_create, capfd): """ @@ -2162,3 +2223,79 @@ def test_dummy_connection_static_unchanged(mocked_dummy_connection_static_unchan results = json.loads(out) assert not results.get('failed') assert not results['changed'] + + +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GSM, indirect=['patch_ansible_module']) +def test_create_gsm(mocked_generic_connection_create, capfd): + """ + Test if gsm created + """ + with pytest.raises(SystemExit): + nmcli.main() + + assert nmcli.Nmcli.execute_command.call_count == 1 + arg_list = nmcli.Nmcli.execute_command.call_args_list + args, kwargs = arg_list[0] + + assert args[0][0] == '/usr/bin/nmcli' + assert args[0][1] == 'con' + assert args[0][2] == 'add' + assert args[0][3] == 'type' + assert args[0][4] == 'gsm' + assert args[0][5] == 'con-name' + assert args[0][6] == 'non_existent_nw_device' + + args_text = list(map(to_text, args[0])) + for param in ['connection.interface-name', 'gsm_non_existant', + 'gsm.apn', 'internet.telekom', + 'gsm.username', 't-mobile', + 'gsm.password', 'tm', + 'gsm.pin', '1234']: + assert param in args_text + + out, err = capfd.readouterr() + results = json.loads(out) + assert not results.get('failed') + assert results['changed'] + + +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GSM, indirect=['patch_ansible_module']) +def test_gsm_mod(mocked_generic_connection_modify, capfd): + """ + Test if gsm modified + """ + with pytest.raises(SystemExit): + nmcli.main() + + assert nmcli.Nmcli.execute_command.call_count == 1 + arg_list = nmcli.Nmcli.execute_command.call_args_list + args, kwargs = arg_list[0] + + assert args[0][0] == '/usr/bin/nmcli' + assert args[0][1] == 'con' + assert args[0][2] == 'modify' + assert args[0][3] == 'non_existent_nw_device' + + args_text = list(map(to_text, args[0])) + for param in ['gsm.username', 't-mobile', + 'gsm.password', 'tm']: + assert param in args_text + + out, err = capfd.readouterr() + results = json.loads(out) + assert not results.get('failed') + assert results['changed'] + + +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GSM, indirect=['patch_ansible_module']) +def test_gsm_connection_unchanged(mocked_gsm_connection_unchanged, capfd): + """ + Test if gsm connection unchanged + """ + with pytest.raises(SystemExit): + nmcli.main() + + out, err = capfd.readouterr() + results = json.loads(out) + assert not results.get('failed') + assert not results['changed'] From 0f9311c3d9d1d1f6eeff43ff40b5d524555e0fe6 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 7 Sep 2021 05:23:33 +1200 Subject: [PATCH 0311/2828] zpool_facts - pythonification (#3332) * pythonification * added changelog fragment * adjustment per PR --- .../fragments/3332-zpool_facts-pythonify.yaml | 2 + plugins/modules/storage/zfs/zpool_facts.py | 46 +++++++------------ 2 files changed, 19 insertions(+), 29 deletions(-) create mode 100644 changelogs/fragments/3332-zpool_facts-pythonify.yaml diff --git a/changelogs/fragments/3332-zpool_facts-pythonify.yaml b/changelogs/fragments/3332-zpool_facts-pythonify.yaml new file mode 100644 index 0000000000..ddb29b9efb --- /dev/null +++ b/changelogs/fragments/3332-zpool_facts-pythonify.yaml @@ -0,0 +1,2 @@ +minor_changes: + - zpool_facts - minor refactoring (https://github.com/ansible-collections/community.general/pull/3332). diff --git a/plugins/modules/storage/zfs/zpool_facts.py b/plugins/modules/storage/zfs/zpool_facts.py index ed3d6cf965..b7a66255c6 100644 --- a/plugins/modules/storage/zfs/zpool_facts.py +++ b/plugins/modules/storage/zfs/zpool_facts.py @@ -125,23 +125,16 @@ class ZPoolFacts(object): def __init__(self, module): self.module = module - self.name = module.params['name'] self.parsable = module.params['parsable'] self.properties = module.params['properties'] - self._pools = defaultdict(dict) self.facts = [] def pool_exists(self): cmd = [self.module.get_bin_path('zpool'), 'list', self.name] - - (rc, out, err) = self.module.run_command(cmd) - - if rc == 0: - return True - else: - return False + rc, dummy, dummy = self.module.run_command(cmd) + return rc == 0 def get_facts(self): cmd = [self.module.get_bin_path('zpool'), 'get', '-H'] @@ -153,41 +146,36 @@ class ZPoolFacts(object): if self.name: cmd.append(self.name) - (rc, out, err) = self.module.run_command(cmd) + rc, out, err = self.module.run_command(cmd, check_rc=True) - if rc == 0: - for line in out.splitlines(): - pool, property, value = line.split('\t') + for line in out.splitlines(): + pool, prop, value = line.split('\t') - self._pools[pool].update({property: value}) + self._pools[pool].update({prop: value}) - for k, v in iteritems(self._pools): - v.update({'name': k}) - self.facts.append(v) + for k, v in iteritems(self._pools): + v.update({'name': k}) + self.facts.append(v) - return {'ansible_zfs_pools': self.facts} - else: - self.module.fail_json(msg='Error while trying to get facts about ZFS pool: %s' % self.name, - stderr=err, - rc=rc) + return {'ansible_zfs_pools': self.facts} def main(): module = AnsibleModule( argument_spec=dict( - name=dict(required=False, aliases=['pool', 'zpool'], type='str'), - parsable=dict(required=False, default=False, type='bool'), - properties=dict(required=False, default='all', type='str'), + name=dict(aliases=['pool', 'zpool'], type='str'), + parsable=dict(default=False, type='bool'), + properties=dict(default='all', type='str'), ), supports_check_mode=True ) zpool_facts = ZPoolFacts(module) - result = {} - result['changed'] = False - result['name'] = zpool_facts.name - + result = { + 'changed': False, + 'name': zpool_facts.name, + } if zpool_facts.parsable: result['parsable'] = zpool_facts.parsable From a20862797ecea1e4d90ceccbdbe2cd656776dc71 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 6 Sep 2021 22:37:10 +0200 Subject: [PATCH 0312/2828] Fix default value of new option. (#3338) --- changelogs/fragments/3337-linode-fix.yml | 2 ++ plugins/inventory/linode.py | 3 +-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/3337-linode-fix.yml diff --git a/changelogs/fragments/3337-linode-fix.yml b/changelogs/fragments/3337-linode-fix.yml new file mode 100644 index 0000000000..06887b1901 --- /dev/null +++ b/changelogs/fragments/3337-linode-fix.yml @@ -0,0 +1,2 @@ +bugfixes: + - "linode inventory plugin - fix default value of new option ``ip_style`` (https://github.com/ansible-collections/community.general/issues/3337)." diff --git a/plugins/inventory/linode.py b/plugins/inventory/linode.py index 0ce510852a..4bbd79a303 100644 --- a/plugins/inventory/linode.py +++ b/plugins/inventory/linode.py @@ -29,8 +29,7 @@ DOCUMENTATION = r''' ip_style: description: Populate hostvars with all information available from the Linode APIv4. type: string - default: - - plain + default: plain choices: - plain - api From dd25c0d3bfcd2a41a8ec6970180c16caa04087b9 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 7 Sep 2021 16:22:46 +1200 Subject: [PATCH 0313/2828] django_manage - split params (#3334) * django_manage - fix fixures * docs formatting adjustments * param apps also in need of splitting * oops, the splitted version was not being properly added to the command args * added changelog fragment * check for None * moving to shlex.split() * Update changelogs/fragments/3334-django_manage-split-params.yaml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../fragments/3334-django_manage-split-params.yaml | 2 ++ plugins/modules/web_infrastructure/django_manage.py | 11 +++++++---- 2 files changed, 9 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/3334-django_manage-split-params.yaml diff --git a/changelogs/fragments/3334-django_manage-split-params.yaml b/changelogs/fragments/3334-django_manage-split-params.yaml new file mode 100644 index 0000000000..38ec68a532 --- /dev/null +++ b/changelogs/fragments/3334-django_manage-split-params.yaml @@ -0,0 +1,2 @@ +bugfixes: + - django_manage - parameters ``apps`` and ``fixtures`` are now splitted instead of being used as a single argument (https://github.com/ansible-collections/community.general/issues/3333). diff --git a/plugins/modules/web_infrastructure/django_manage.py b/plugins/modules/web_infrastructure/django_manage.py index 0c8126c457..4ced7452bb 100644 --- a/plugins/modules/web_infrastructure/django_manage.py +++ b/plugins/modules/web_infrastructure/django_manage.py @@ -62,7 +62,7 @@ options: clear: description: - Clear the existing files before trying to copy or link the original file. - - Used only with the 'collectstatic' command. The C(--noinput) argument will be added automatically. + - Used only with the C(collectstatic) command. The C(--noinput) argument will be added automatically. required: false default: no type: bool @@ -109,9 +109,9 @@ options: required: false aliases: [test_runner] notes: - - C(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the virtualenv parameter + - C(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the I(virtualenv) parameter is specified. - - This module will create a virtualenv if the virtualenv parameter is specified and a virtualenv does not already + - This module will create a virtualenv if the I(virtualenv) parameter is specified and a virtual environment does not already exist at the given location. - This module assumes English error messages for the C(createcachetable) command to detect table existence, unfortunately. @@ -306,7 +306,10 @@ def main(): # these params always get tacked on the end of the command for param in end_of_command_params: if module.params[param]: - run_cmd_args.append(module.params[param]) + if param in ('fixtures', 'apps'): + run_cmd_args.extend(shlex.split(module.params[param])) + else: + run_cmd_args.append(module.params[param]) rc, out, err = module.run_command(run_cmd_args, cwd=project_path) if rc != 0: From 6b207bce4ce4929a8979bf22f3f3543c597e5ef5 Mon Sep 17 00:00:00 2001 From: Andreas Botzner Date: Wed, 8 Sep 2021 07:14:37 +0200 Subject: [PATCH 0314/2828] Adds redis_data_info module (#3227) * Added redis_data_info module Added: - redis_data_info module and suggested 'exists' return flag. - module_utils for redis with a base class that handles database connections. - inhereited unit tests and added some new ones for the exit flag * Docfix and sanity * typo * Suggested doc changes and ssl option * TLS and validate_certs fix * Set support_check_mode for info plugin * Docfix and import errors * Redis versioning Fix * version bump and append fixes --- .github/BOTMETA.yml | 2 + plugins/doc_fragments/redis.py | 57 +++++++++ plugins/module_utils/redis.py | 93 ++++++++++++++ .../modules/database/misc/redis_data_info.py | 111 +++++++++++++++++ plugins/modules/redis_data_info.py | 1 + .../database/misc/test_redis_data_info.py | 113 ++++++++++++++++++ 6 files changed, 377 insertions(+) create mode 100644 plugins/doc_fragments/redis.py create mode 100644 plugins/module_utils/redis.py create mode 100644 plugins/modules/database/misc/redis_data_info.py create mode 120000 plugins/modules/redis_data_info.py create mode 100644 tests/unit/plugins/modules/database/misc/test_redis_data_info.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index b07f95e8cc..5b55449a67 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -458,6 +458,8 @@ files: maintainers: slok $modules/database/misc/redis_info.py: maintainers: levonet + $modules/database/misc/redis_data_info.py: + maintainers: paginabianca $modules/database/misc/riak.py: maintainers: drewkerrigan jsmartin $modules/database/mssql/mssql_db.py: diff --git a/plugins/doc_fragments/redis.py b/plugins/doc_fragments/redis.py new file mode 100644 index 0000000000..e7af25ec8f --- /dev/null +++ b/plugins/doc_fragments/redis.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Andreas Botzner +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + # Common parameters for Redis modules + DOCUMENTATION = r''' +options: + login_host: + description: + - Specify the target host running the database. + default: localhost + type: str + login_port: + description: + - Specify the port to connect to. + default: 6379 + type: int + login_user: + description: + - Specify the user to authenticate with. + - Requires L(redis,https://pypi.org/project/redis) >= 3.4.0. + type: str + login_password: + description: + - Specify the password to authenticate with. + - Usually not used when target is localhost. + type: str + tls: + description: + - Specify whether or not to use TLS for the connection. + type: bool + default: true + validate_certs: + description: + - Specify whether or not to validate TLS certificates. + - This should only be turned off for personally controlled sites or with + C(localhost) as target. + type: bool + default: true + ca_certs: + description: + - Path to root certificates file. If not set and I(tls) is + set to C(true), certifi ca-certificates will be used. + type: str +requirements: [ "redis", "certifi" ] + +notes: + - Requires the C(redis) Python package on the remote host. You can + install it with pip (C(pip install redis)) or with a package manager. + Information on the library can be found at U(https://github.com/andymccurdy/redis-py). +''' diff --git a/plugins/module_utils/redis.py b/plugins/module_utils/redis.py new file mode 100644 index 0000000000..9d55aecad0 --- /dev/null +++ b/plugins/module_utils/redis.py @@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- +# +# Copyright: (c) 2021, Andreas Botzner +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +from ansible.module_utils.basic import missing_required_lib +__metaclass__ = type + +import traceback + +REDIS_IMP_ERR = None +try: + from redis import Redis + from redis import __version__ as redis_version + HAS_REDIS_PACKAGE = True +except ImportError: + REDIS_IMP_ERR = traceback.format_exc() + HAS_REDIS_PACKAGE = False + +try: + import certifi + HAS_CERTIFI_PACKAGE = True +except ImportError: + CERTIFI_IMPORT_ERROR = traceback.format_exc() + HAS_CERTIFI_PACKAGE = False + + +def fail_imports(module): + errors = [] + traceback = [] + if not HAS_REDIS_PACKAGE: + errors.append(missing_required_lib('redis')) + traceback.append(REDIS_IMP_ERR) + if not HAS_CERTIFI_PACKAGE: + errors.append(missing_required_lib('certifi')) + traceback.append(CERTIFI_IMPORT_ERROR) + if errors: + module.fail_json(errors=errors, traceback='\n'.join(traceback)) + + +def redis_auth_argument_spec(): + return dict( + login_host=dict(type='str', + default='localhost',), + login_user=dict(type='str'), + login_password=dict(type='str', + no_log=True + ), + login_port=dict(type='int', default=6379), + tls=dict(type='bool', + default=True), + validate_certs=dict(type='bool', + default=True + ), + ca_certs=dict(type='str') + ) + + +class RedisAnsible(object): + '''Base class for Redis module''' + + def __init__(self, module): + self.module = module + self.connection = self._connect() + + def _connect(self): + login_host = self.module.params['login_host'] + login_user = self.module.params['login_user'] + login_password = self.module.params['login_password'] + login_port = self.module.params['login_port'] + tls = self.module.params['tls'] + validate_certs = 'required' if self.module.params['validate_certs'] else None + ca_certs = self.module.params['ca_certs'] + if tls and ca_certs is None: + ca_certs = str(certifi.where()) + if tuple(map(int, redis_version.split('.'))) < (3, 4, 0) and login_user is not None: + self.module.fail_json( + msg='The option `username` in only supported with redis >= 3.4.0.') + params = {'host': login_host, + 'port': login_port, + 'password': login_password, + 'ssl_ca_certs': ca_certs, + 'ssl_cert_reqs': validate_certs, + 'ssl': tls} + if login_user is not None: + params['username'] = login_user + try: + return Redis(**params) + except Exception as e: + self.module.fail_json(msg='{0}'.format(str(e))) + return None diff --git a/plugins/modules/database/misc/redis_data_info.py b/plugins/modules/database/misc/redis_data_info.py new file mode 100644 index 0000000000..866bda62d1 --- /dev/null +++ b/plugins/modules/database/misc/redis_data_info.py @@ -0,0 +1,111 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Andreas Botzner +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: redis_data_info +short_description: Get value of key in Redis database +version_added: 3.7.0 +description: + - Get value of keys in Redis database. +author: "Andreas Botzner (@paginabianca)" +options: + key: + description: + - Database key. + type: str + required: true + +extends_documentation_fragment: + - community.general.redis + +seealso: + - module: community.general.redis_info + - module: community.general.redis +''' + +EXAMPLES = ''' +- name: Get key foo=bar from loalhost with no username + community.general.redis_data_info: + login_host: localhost + login_password: supersecret + key: foo + +- name: Get key foo=bar on redishost with custom ca-cert file + community.general.redis_data_info: + login_host: redishost + login_password: supersecret + login_user: somuser + validate_certs: true + ssl_ca_certs: /path/to/ca/certs + key: foo +''' + +RETURN = ''' +exists: + description: If they key exists in the database. + returned: on success + type: bool +value: + description: Value key was set to. + returned: if existing + type: str + sample: 'value_of_some_key' +msg: + description: A short message. + returned: always + type: str + sample: 'Got key: foo with value: bar' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.redis import ( + fail_imports, redis_auth_argument_spec, RedisAnsible) + + +def main(): + redis_auth_args = redis_auth_argument_spec() + module_args = dict( + key=dict(type='str', required=True, no_log=False), + ) + module_args.update(redis_auth_args) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + ) + fail_imports(module) + + redis = RedisAnsible(module) + + key = module.params['key'] + result = {'changed': False} + + value = None + try: + value = redis.connection.get(key) + except Exception as e: + msg = 'Failed to get value of key "{0}" with exception: {1}'.format( + key, str(e)) + result['msg'] = msg + module.fail_json(**result) + + if value is None: + msg = 'Key "{0}" does not exist in database'.format(key) + result['exists'] = False + else: + msg = 'Got key "{0}"'.format(key) + result['value'] = value + result['exists'] = True + result['msg'] = msg + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/redis_data_info.py b/plugins/modules/redis_data_info.py new file mode 120000 index 0000000000..14c54fb2d3 --- /dev/null +++ b/plugins/modules/redis_data_info.py @@ -0,0 +1 @@ +database/misc/redis_data_info.py \ No newline at end of file diff --git a/tests/unit/plugins/modules/database/misc/test_redis_data_info.py b/tests/unit/plugins/modules/database/misc/test_redis_data_info.py new file mode 100644 index 0000000000..808c583e37 --- /dev/null +++ b/tests/unit/plugins/modules/database/misc/test_redis_data_info.py @@ -0,0 +1,113 @@ +# -*- coding: utf-8 -*- +# +# Copyright: (c) 2021, Andreas Botzner +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +import pytest +import json +from redis import __version__ + +from ansible_collections.community.general.plugins.modules.database.misc import ( + redis_data_info) +from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args + + +HAS_REDIS_USERNAME_OPTION = True +if tuple(map(int, __version__.split('.'))) < (3, 4, 0): + HAS_REDIS_USERNAME_OPTION = False + + +def test_redis_data_info_without_arguments(capfd): + set_module_args({}) + with pytest.raises(SystemExit): + redis_data_info.main() + out, err = capfd.readouterr() + assert not err + assert json.loads(out)['failed'] + + +@pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0") +def test_redis_data_info_existing_key(capfd, mocker): + set_module_args({'login_host': 'localhost', + 'login_user': 'root', + 'login_password': 'secret', + 'key': 'foo', + '_ansible_check_mode': False}) + mocker.patch('redis.Redis.get', return_value='bar') + with pytest.raises(SystemExit): + redis_data_info.main() + out, err = capfd.readouterr() + print(out) + assert not err + assert json.loads(out)['exists'] + assert json.loads(out)['value'] == 'bar' + + +@pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0") +def test_redis_data_info_absent_key(capfd, mocker): + set_module_args({'login_host': 'localhost', + 'login_user': 'root', + 'login_password': 'secret', + 'key': 'foo', + '_ansible_check_mode': False}) + mocker.patch('redis.Redis.get', return_value=None) + with pytest.raises(SystemExit): + redis_data_info.main() + out, err = capfd.readouterr() + print(out) + assert not err + assert not json.loads(out)['exists'] + assert 'value' not in json.loads(out) + + +@pytest.mark.skipif(HAS_REDIS_USERNAME_OPTION, reason="Redis version > 3.4.0") +def test_redis_data_fail_username(capfd, mocker): + set_module_args({'login_host': 'localhost', + 'login_user': 'root', + 'login_password': 'secret', + 'key': 'foo', + '_ansible_check_mode': False}) + with pytest.raises(SystemExit): + redis_data_info.main() + out, err = capfd.readouterr() + print(out) + assert not err + assert json.loads(out)['failed'] + assert json.loads( + out)['msg'] == 'The option `username` in only supported with redis >= 3.4.0.' + + +@pytest.mark.skipif(HAS_REDIS_USERNAME_OPTION, reason="Redis version > 3.4.0") +def test_redis_data_info_absent_key_no_username(capfd, mocker): + set_module_args({'login_host': 'localhost', + 'login_password': 'secret', + 'key': 'foo', + '_ansible_check_mode': False}) + mocker.patch('redis.Redis.get', return_value=None) + with pytest.raises(SystemExit): + redis_data_info.main() + out, err = capfd.readouterr() + print(out) + assert not err + assert not json.loads(out)['exists'] + assert 'value' not in json.loads(out) + + +@pytest.mark.skipif(HAS_REDIS_USERNAME_OPTION, reason="Redis version > 3.4.0") +def test_redis_data_info_existing_key_no_username(capfd, mocker): + set_module_args({'login_host': 'localhost', + 'login_password': 'secret', + 'key': 'foo', + '_ansible_check_mode': False}) + mocker.patch('redis.Redis.get', return_value='bar') + with pytest.raises(SystemExit): + redis_data_info.main() + out, err = capfd.readouterr() + print(out) + assert not err + assert json.loads(out)['exists'] + assert json.loads(out)['value'] == 'bar' From 7c43cc3faa51bfb873e5c2a6336478f9594f14de Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 9 Sep 2021 07:31:44 +0200 Subject: [PATCH 0315/2828] Improve CI (#3348) * Remove superfluous test. * Use remote_temp_dir instead of output_dir on remote. * Read certificate from correct place. * Adjust more places. * Fix boolean. * Improve cryptography setup. * Fix java_keystore changes. * Need to copy binary from remote. * Use correct Python for serve script. * Sleep before downloading. * Use correct Python interpreter. * Avoid failing shebang test. * Fix permission error with macOS 11.1. * Avoid shebang trouble. --- .../ansible_galaxy_install/meta/main.yml | 2 + .../ansible_galaxy_install/tasks/main.yml | 2 +- .../integration/targets/archive/meta/main.yml | 1 + .../targets/archive/tasks/main.yml | 2 +- .../targets/archive/tests/broken-link.yml | 12 +- .../targets/archive/tests/core.yml | 58 ++++---- .../targets/archive/tests/exclusions.yml | 18 +-- .../targets/archive/tests/idempotency.yml | 56 ++++---- .../targets/archive/tests/remove.yml | 70 +++++----- .../integration/targets/consul/meta/main.yml | 1 + .../integration/targets/consul/tasks/main.yml | 28 ++-- .../targets/deploy_helper/meta/main.yml | 2 + .../targets/deploy_helper/tasks/main.yml | 2 +- .../targets/filter_random_mac/meta/main.yml | 2 + .../targets/filter_random_mac/tasks/main.yml | 3 - .../targets/flatpak/tasks/setup.yml | 2 +- tests/integration/targets/gem/meta/main.yml | 1 + tests/integration/targets/gem/tasks/main.yml | 18 +-- .../targets/git_config/meta/main.yml | 2 + .../tasks/get_set_state_present_file.yml | 5 +- .../git_config/tasks/setup_no_value.yml | 3 +- .../targets/git_config/tasks/setup_value.yml | 3 +- tests/integration/targets/hg/meta/main.yml | 1 + .../targets/hg/tasks/run-tests.yml | 6 +- .../targets/iso_create/meta/main.yml | 1 + .../targets/iso_create/tasks/main.yml | 54 ++++---- .../iso_create/tasks/prepare_dest_dir.yml | 4 +- .../targets/iso_extract/meta/main.yml | 1 + .../targets/iso_extract/tasks/main.yml | 2 +- .../targets/iso_extract/tasks/prepare.yml | 6 +- .../targets/iso_extract/tasks/tests.yml | 8 +- .../targets/java_cert/defaults/main.yml | 18 +-- .../targets/java_cert/meta/main.yml | 1 + .../targets/java_cert/tasks/main.yml | 22 +-- .../targets/java_cert/tasks/state_change.yml | 8 +- .../targets/java_keystore/meta/main.yml | 1 + .../targets/java_keystore/tasks/prepare.yml | 14 +- .../targets/java_keystore/tasks/tests.yml | 128 ++++++++++++++---- tests/integration/targets/mail/meta/main.yml | 2 + tests/integration/targets/mail/tasks/main.yml | 4 +- tests/integration/targets/nomad/meta/main.yml | 1 + .../integration/targets/nomad/tasks/main.yml | 22 +-- tests/integration/targets/npm/meta/main.yml | 1 + tests/integration/targets/npm/tasks/main.yml | 2 +- tests/integration/targets/npm/tasks/setup.yml | 4 +- tests/integration/targets/pids/meta/main.yml | 2 + tests/integration/targets/pids/tasks/main.yml | 18 ++- .../targets/setup_openssl/tasks/main.yml | 21 +++ .../targets/setup_openssl/vars/Debian.yml | 2 + .../targets/setup_openssl/vars/FreeBSD.yml | 2 + .../targets/setup_openssl/vars/RedHat.yml | 2 + .../targets/setup_openssl/vars/Suse.yml | 2 + .../targets/ssh_config/meta/main.yml | 1 + .../targets/ssh_config/tasks/main.yml | 8 +- .../targets/supervisorctl/meta/main.yml | 1 + .../targets/supervisorctl/tasks/main.yml | 2 +- .../targets/synchronize-buildah/aliases | 3 - .../targets/synchronize-buildah/inventory | 1 - .../files/normal_file.txt | 1 - .../test_buildah_synchronize/tasks/main.yml | 71 ---------- .../targets/synchronize-buildah/runme.sh | 15 -- .../test_synchronize_buildah.yml | 8 -- .../targets/xattr/defaults/main.yml | 2 +- tests/integration/targets/xattr/meta/main.yml | 1 + tests/integration/targets/yarn/meta/main.yml | 1 + tests/integration/targets/yarn/tasks/run.yml | 28 ++-- .../integration/targets/zypper/meta/main.yml | 2 + .../targets/zypper/tasks/zypper.yml | 36 ++--- 68 files changed, 440 insertions(+), 394 deletions(-) create mode 100644 tests/integration/targets/ansible_galaxy_install/meta/main.yml create mode 100644 tests/integration/targets/deploy_helper/meta/main.yml create mode 100644 tests/integration/targets/filter_random_mac/meta/main.yml create mode 100644 tests/integration/targets/git_config/meta/main.yml create mode 100644 tests/integration/targets/mail/meta/main.yml create mode 100644 tests/integration/targets/pids/meta/main.yml delete mode 100644 tests/integration/targets/synchronize-buildah/aliases delete mode 100644 tests/integration/targets/synchronize-buildah/inventory delete mode 100644 tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/files/normal_file.txt delete mode 100644 tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/tasks/main.yml delete mode 100644 tests/integration/targets/synchronize-buildah/runme.sh delete mode 100644 tests/integration/targets/synchronize-buildah/test_synchronize_buildah.yml create mode 100644 tests/integration/targets/zypper/meta/main.yml diff --git a/tests/integration/targets/ansible_galaxy_install/meta/main.yml b/tests/integration/targets/ansible_galaxy_install/meta/main.yml new file mode 100644 index 0000000000..1810d4bec9 --- /dev/null +++ b/tests/integration/targets/ansible_galaxy_install/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_remote_tmp_dir diff --git a/tests/integration/targets/ansible_galaxy_install/tasks/main.yml b/tests/integration/targets/ansible_galaxy_install/tasks/main.yml index 232c96aff5..276dab3a30 100644 --- a/tests/integration/targets/ansible_galaxy_install/tasks/main.yml +++ b/tests/integration/targets/ansible_galaxy_install/tasks/main.yml @@ -50,7 +50,7 @@ ################################################### - name: set_fact: - reqs_file: '{{ output_dir }}/reqs.yaml' + reqs_file: '{{ remote_tmp_dir }}/reqs.yaml' - name: Copy requirements file copy: diff --git a/tests/integration/targets/archive/meta/main.yml b/tests/integration/targets/archive/meta/main.yml index 5438ced5c3..56bc554611 100644 --- a/tests/integration/targets/archive/meta/main.yml +++ b/tests/integration/targets/archive/meta/main.yml @@ -1,2 +1,3 @@ dependencies: - setup_pkg_mgr + - setup_remote_tmp_dir diff --git a/tests/integration/targets/archive/tasks/main.yml b/tests/integration/targets/archive/tasks/main.yml index 1e2c9f9c27..e7b6c44175 100644 --- a/tests/integration/targets/archive/tasks/main.yml +++ b/tests/integration/targets/archive/tasks/main.yml @@ -75,7 +75,7 @@ register: backports_lzma_pip - name: prep our files - copy: src={{ item }} dest={{output_dir}}/{{ item }} + copy: src={{ item }} dest={{remote_tmp_dir}}/{{ item }} with_items: - foo.txt - bar.txt diff --git a/tests/integration/targets/archive/tests/broken-link.yml b/tests/integration/targets/archive/tests/broken-link.yml index cc1e07aaf1..677ebe0bf7 100644 --- a/tests/integration/targets/archive/tests/broken-link.yml +++ b/tests/integration/targets/archive/tests/broken-link.yml @@ -3,29 +3,29 @@ - name: Create link - broken link ({{ format }}) file: src: /nowhere - dest: "{{ output_dir }}/nowhere.txt" + dest: "{{ remote_tmp_dir }}/nowhere.txt" state: link force: yes - name: Archive - broken link ({{ format }}) archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_broken_link.{{ format }}" + path: "{{ remote_tmp_dir }}/*.txt" + dest: "{{ remote_tmp_dir }}/archive_broken_link.{{ format }}" format: "{{ format }}" - name: Verify archive exists - broken link ({{ format }}) file: - path: "{{ output_dir }}/archive_broken_link.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_broken_link.{{ format }}" state: file - name: Remove archive - broken link ({{ format }}) file: - path: "{{ output_dir }}/archive_broken_link.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_broken_link.{{ format }}" state: absent - name: Remove link - broken link ({{ format }}) file: - path: "{{ output_dir }}/nowhere.txt" + path: "{{ remote_tmp_dir }}/nowhere.txt" state: absent # 'zip' does not support symlink's when: format != 'zip' diff --git a/tests/integration/targets/archive/tests/core.yml b/tests/integration/targets/archive/tests/core.yml index d008e9c122..f3ae906429 100644 --- a/tests/integration/targets/archive/tests/core.yml +++ b/tests/integration/targets/archive/tests/core.yml @@ -25,14 +25,14 @@ # Core functionality tests - name: Archive - no options ({{ format }}) archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_no_opts.{{ format }}" + path: "{{ remote_tmp_dir }}/*.txt" + dest: "{{ remote_tmp_dir }}/archive_no_opts.{{ format }}" format: "{{ format }}" register: archive_no_options - name: Verify that archive exists - no options ({{ format }}) file: - path: "{{output_dir}}/archive_no_opts.{{ format }}" + path: "{{remote_tmp_dir}}/archive_no_opts.{{ format }}" state: file - name: Verify that archive result is changed and includes all files - no options ({{ format }}) @@ -44,20 +44,20 @@ - name: Remove the archive - no options ({{ format }}) file: - path: "{{ output_dir }}/archive_no_options.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_no_options.{{ format }}" state: absent - name: Archive - file options ({{ format }}) archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_file_options.{{ format }}" + path: "{{ remote_tmp_dir }}/*.txt" + dest: "{{ remote_tmp_dir }}/archive_file_options.{{ format }}" format: "{{ format }}" mode: "u+rwX,g-rwx,o-rwx" register: archive_file_options - name: Retrieve archive file information - file options ({{ format }}) stat: - path: "{{ output_dir }}/archive_file_options.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_file_options.{{ format }}" register: archive_file_options_stat - name: Test that the file modes were changed @@ -69,19 +69,19 @@ - name: Remove the archive - file options ({{ format }}) file: - path: "{{ output_dir }}/archive_file_options.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_file_options.{{ format }}" state: absent - name: Archive - non-ascii ({{ format }}) archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_nonascii_くらとみ.{{ format }}" + path: "{{ remote_tmp_dir }}/*.txt" + dest: "{{ remote_tmp_dir }}/archive_nonascii_くらとみ.{{ format }}" format: "{{ format }}" register: archive_nonascii - name: Retrieve archive file information - non-ascii ({{ format }}) stat: - path: "{{ output_dir }}/archive_nonascii_くらとみ.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_nonascii_くらとみ.{{ format }}" register: archive_nonascii_stat - name: Test that archive exists - non-ascii ({{ format }}) @@ -92,13 +92,13 @@ - name: Remove the archive - non-ascii ({{ format }}) file: - path: "{{ output_dir }}/archive_nonascii_くらとみ.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_nonascii_くらとみ.{{ format }}" state: absent - name: Archive - single target ({{ format }}) archive: - path: "{{ output_dir }}/foo.txt" - dest: "{{ output_dir }}/archive_single_target.{{ format }}" + path: "{{ remote_tmp_dir }}/foo.txt" + dest: "{{ remote_tmp_dir }}/archive_single_target.{{ format }}" format: "{{ format }}" register: archive_single_target @@ -117,7 +117,7 @@ - block: - name: Retrieve contents of archive - single target ({{ format }}) ansible.builtin.unarchive: - src: "{{ output_dir }}/archive_single_target.{{ format }}" + src: "{{ remote_tmp_dir }}/archive_single_target.{{ format }}" dest: . list_files: true check_mode: true @@ -135,22 +135,22 @@ - name: Remove archive - single target ({{ format }}) file: - path: "{{ output_dir }}/archive_single_target.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_single_target.{{ format }}" state: absent - name: Archive - path list ({{ format }}) archive: path: - - "{{ output_dir }}/empty.txt" - - "{{ output_dir }}/foo.txt" - - "{{ output_dir }}/bar.txt" - dest: "{{ output_dir }}/archive_path_list.{{ format }}" + - "{{ remote_tmp_dir }}/empty.txt" + - "{{ remote_tmp_dir }}/foo.txt" + - "{{ remote_tmp_dir }}/bar.txt" + dest: "{{ remote_tmp_dir }}/archive_path_list.{{ format }}" format: "{{ format }}" register: archive_path_list - name: Verify that archive exists - path list ({{ format }}) file: - path: "{{output_dir}}/archive_path_list.{{ format }}" + path: "{{remote_tmp_dir}}/archive_path_list.{{ format }}" state: file - name: Assert that archive contains all files - path list ({{ format }}) @@ -161,16 +161,16 @@ - name: Remove archive - path list ({{ format }}) file: - path: "{{ output_dir }}/archive_path_list.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_path_list.{{ format }}" state: absent - name: Archive - missing paths ({{ format }}) archive: path: - - "{{ output_dir }}/*.txt" - - "{{ output_dir }}/dne.txt" - exclude_path: "{{ output_dir }}/foo.txt" - dest: "{{ output_dir }}/archive_missing_paths.{{ format }}" + - "{{ remote_tmp_dir }}/*.txt" + - "{{ remote_tmp_dir }}/dne.txt" + exclude_path: "{{ remote_tmp_dir }}/foo.txt" + dest: "{{ remote_tmp_dir }}/archive_missing_paths.{{ format }}" format: "{{ format }}" register: archive_missing_paths @@ -179,10 +179,10 @@ that: - archive_missing_paths is changed - "archive_missing_paths.dest_state == 'incomplete'" - - "'{{ output_dir }}/dne.txt' in archive_missing_paths.missing" - - "'{{ output_dir }}/foo.txt' not in archive_missing_paths.missing" + - "'{{ remote_tmp_dir }}/dne.txt' in archive_missing_paths.missing" + - "'{{ remote_tmp_dir }}/foo.txt' not in archive_missing_paths.missing" - name: Remove archive - missing paths ({{ format }}) file: - path: "{{ output_dir }}/archive_missing_paths.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_missing_paths.{{ format }}" state: absent diff --git a/tests/integration/targets/archive/tests/exclusions.yml b/tests/integration/targets/archive/tests/exclusions.yml index 0b65f85851..b2a8c7b890 100644 --- a/tests/integration/targets/archive/tests/exclusions.yml +++ b/tests/integration/targets/archive/tests/exclusions.yml @@ -1,8 +1,8 @@ --- - name: Archive - exclusion patterns ({{ format }}) archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_exclusion_patterns.{{ format }}" + path: "{{ remote_tmp_dir }}/*.txt" + dest: "{{ remote_tmp_dir }}/archive_exclusion_patterns.{{ format }}" format: "{{ format }}" exclusion_patterns: b?r.* register: archive_exclusion_patterns @@ -15,26 +15,26 @@ - name: Remove archive - exclusion patterns ({{ format }}) file: - path: "{{ output_dir }}/archive_exclusion_patterns.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_exclusion_patterns.{{ format }}" state: absent - name: Archive - exclude path ({{ format }}) archive: path: - - "{{ output_dir }}/sub/subfile.txt" - - "{{ output_dir }}" + - "{{ remote_tmp_dir }}/sub/subfile.txt" + - "{{ remote_tmp_dir }}" exclude_path: - - "{{ output_dir }}" - dest: "{{ output_dir }}/archive_exclude_paths.{{ format }}" + - "{{ remote_tmp_dir }}" + dest: "{{ remote_tmp_dir }}/archive_exclude_paths.{{ format }}" format: "{{ format }}" register: archive_excluded_paths - name: Assert that excluded paths do not influence archive root - exclude path ({{ format }}) assert: that: - - archive_excluded_paths.arcroot != output_dir + - archive_excluded_paths.arcroot != remote_tmp_dir - name: Remove archive - exclude path ({{ format }}) file: - path: "{{ output_dir }}/archive_exclude_paths.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_exclude_paths.{{ format }}" state: absent diff --git a/tests/integration/targets/archive/tests/idempotency.yml b/tests/integration/targets/archive/tests/idempotency.yml index 9262601572..5a44922adb 100644 --- a/tests/integration/targets/archive/tests/idempotency.yml +++ b/tests/integration/targets/archive/tests/idempotency.yml @@ -1,8 +1,8 @@ --- - name: Archive - file content idempotency ({{ format }}) archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_file_content_idempotency.{{ format }}" + path: "{{ remote_tmp_dir }}/*.txt" + dest: "{{ remote_tmp_dir }}/archive_file_content_idempotency.{{ format }}" format: "{{ format }}" register: file_content_idempotency_before @@ -10,12 +10,12 @@ lineinfile: line: bar.txt regexp: "^foo.txt$" - path: "{{ output_dir }}/foo.txt" + path: "{{ remote_tmp_dir }}/foo.txt" - name: Archive second time - file content idempotency ({{ format }}) archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_file_content_idempotency.{{ format }}" + path: "{{ remote_tmp_dir }}/*.txt" + dest: "{{ remote_tmp_dir }}/archive_file_content_idempotency.{{ format }}" format: "{{ format }}" register: file_content_idempotency_after @@ -28,29 +28,29 @@ - name: Remove archive - file content idempotency ({{ format }}) file: - path: "{{ output_dir }}/archive_file_content_idempotency.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_file_content_idempotency.{{ format }}" state: absent - name: Modify file back - file content idempotency ({{ format }}) lineinfile: line: foo.txt regexp: "^bar.txt$" - path: "{{ output_dir }}/foo.txt" + path: "{{ remote_tmp_dir }}/foo.txt" - name: Archive - file name idempotency ({{ format }}) archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_file_name_idempotency.{{ format }}" + path: "{{ remote_tmp_dir }}/*.txt" + dest: "{{ remote_tmp_dir }}/archive_file_name_idempotency.{{ format }}" format: "{{ format }}" register: file_name_idempotency_before - name: Rename file - file name idempotency ({{ format }}) - command: "mv {{ output_dir}}/foo.txt {{ output_dir }}/fii.txt" + command: "mv {{ remote_tmp_dir }}/foo.txt {{ remote_tmp_dir }}/fii.txt" - name: Archive again - file name idempotency ({{ format }}) archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_file_name_idempotency.{{ format }}" + path: "{{ remote_tmp_dir }}/*.txt" + dest: "{{ remote_tmp_dir }}/archive_file_name_idempotency.{{ format }}" format: "{{ format }}" register: file_name_idempotency_after @@ -61,16 +61,16 @@ - name: Remove archive - file name idempotency ({{ format }}) file: - path: "{{ output_dir }}/archive_file_name_idempotency.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_file_name_idempotency.{{ format }}" state: absent - name: Rename file back - file name idempotency ({{ format }}) - command: "mv {{ output_dir }}/fii.txt {{ output_dir }}/foo.txt" + command: "mv {{ remote_tmp_dir }}/fii.txt {{ remote_tmp_dir }}/foo.txt" - name: Archive - single file content idempotency ({{ format }}) archive: - path: "{{ output_dir }}/foo.txt" - dest: "{{ output_dir }}/archive_single_file_content_idempotency.{{ format }}" + path: "{{ remote_tmp_dir }}/foo.txt" + dest: "{{ remote_tmp_dir }}/archive_single_file_content_idempotency.{{ format }}" format: "{{ format }}" register: single_file_content_idempotency_before @@ -78,12 +78,12 @@ lineinfile: line: bar.txt regexp: "^foo.txt$" - path: "{{ output_dir }}/foo.txt" + path: "{{ remote_tmp_dir }}/foo.txt" - name: Archive second time - single file content idempotency ({{ format }}) archive: - path: "{{ output_dir }}/foo.txt" - dest: "{{ output_dir }}/archive_single_file_content_idempotency.{{ format }}" + path: "{{ remote_tmp_dir }}/foo.txt" + dest: "{{ remote_tmp_dir }}/archive_single_file_content_idempotency.{{ format }}" format: "{{ format }}" register: single_file_content_idempotency_after @@ -96,29 +96,29 @@ - name: Remove archive - single file content idempotency ({{ format }}) file: - path: "{{ output_dir }}/archive_single_file_content_idempotency.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_single_file_content_idempotency.{{ format }}" state: absent - name: Modify file back - single file content idempotency ({{ format }}) lineinfile: line: foo.txt regexp: "^bar.txt$" - path: "{{ output_dir }}/foo.txt" + path: "{{ remote_tmp_dir }}/foo.txt" - name: Archive - single file name idempotency ({{ format }}) archive: - path: "{{ output_dir }}/foo.txt" - dest: "{{ output_dir }}/archive_single_file_name_idempotency.{{ format }}" + path: "{{ remote_tmp_dir }}/foo.txt" + dest: "{{ remote_tmp_dir }}/archive_single_file_name_idempotency.{{ format }}" format: "{{ format }}" register: single_file_name_idempotency_before - name: Rename file - single file name idempotency ({{ format }}) - command: "mv {{ output_dir}}/foo.txt {{ output_dir }}/fii.txt" + command: "mv {{ remote_tmp_dir }}/foo.txt {{ remote_tmp_dir }}/fii.txt" - name: Archive again - single file name idempotency ({{ format }}) archive: - path: "{{ output_dir }}/fii.txt" - dest: "{{ output_dir }}/archive_single_file_name_idempotency.{{ format }}" + path: "{{ remote_tmp_dir }}/fii.txt" + dest: "{{ remote_tmp_dir }}/archive_single_file_name_idempotency.{{ format }}" format: "{{ format }}" register: single_file_name_idempotency_after @@ -133,8 +133,8 @@ - name: Remove archive - single file name idempotency ({{ format }}) file: - path: "{{ output_dir }}/archive_single_file_name_idempotency.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_single_file_name_idempotency.{{ format }}" state: absent - name: Rename file back - single file name idempotency ({{ format }}) - command: "mv {{ output_dir }}/fii.txt {{ output_dir }}/foo.txt" + command: "mv {{ remote_tmp_dir }}/fii.txt {{ remote_tmp_dir }}/foo.txt" diff --git a/tests/integration/targets/archive/tests/remove.yml b/tests/integration/targets/archive/tests/remove.yml index 26849ac850..08f16e98da 100644 --- a/tests/integration/targets/archive/tests/remove.yml +++ b/tests/integration/targets/archive/tests/remove.yml @@ -1,15 +1,15 @@ --- - name: Archive - remove source files ({{ format }}) archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_remove_source_files.{{ format }}" + path: "{{ remote_tmp_dir }}/*.txt" + dest: "{{ remote_tmp_dir }}/archive_remove_source_files.{{ format }}" format: "{{ format }}" remove: yes register: archive_remove_source_files - name: Verify archive exists - remove source files ({{ format }}) file: - path: "{{ output_dir }}/archive_remove_source_files.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_remove_source_files.{{ format }}" state: file - name: Verify all files were archived - remove source files ({{ format }}) @@ -20,13 +20,13 @@ - name: Remove Archive - remove source files ({{ format }}) file: - path: "{{ output_dir }}/archive_remove_source_files.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_remove_source_files.{{ format }}" state: absent - name: Assert that source files were removed - remove source files ({{ format }}) assert: that: - - "'{{ output_dir }}/{{ item }}' is not exists" + - "'{{ remote_tmp_dir }}/{{ item }}' is not exists" with_items: - foo.txt - bar.txt @@ -35,7 +35,7 @@ - name: Copy source files - remove source directory ({{ format }}) copy: src: "{{ item }}" - dest: "{{ output_dir }}/{{ item }}" + dest: "{{ remote_tmp_dir }}/{{ item }}" with_items: - foo.txt - bar.txt @@ -43,13 +43,13 @@ - name: Create temporary directory - remove source directory ({{ format }}) file: - path: "{{ output_dir }}/tmpdir" + path: "{{ remote_tmp_dir }}/tmpdir" state: directory - name: Copy source files to temporary directory - remove source directory ({{ format }}) copy: src: "{{ item }}" - dest: "{{ output_dir }}/tmpdir/{{ item }}" + dest: "{{ remote_tmp_dir }}/tmpdir/{{ item }}" with_items: - foo.txt - bar.txt @@ -57,15 +57,15 @@ - name: Archive - remove source directory ({{ format }}) archive: - path: "{{ output_dir }}/tmpdir" - dest: "{{ output_dir }}/archive_remove_source_directory.{{ format }}" + path: "{{ remote_tmp_dir }}/tmpdir" + dest: "{{ remote_tmp_dir }}/archive_remove_source_directory.{{ format }}" format: "{{ format }}" remove: yes register: archive_remove_source_directory - name: Verify archive exists - remove source directory ({{ format }}) file: - path: "{{ output_dir }}/archive_remove_source_directory.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_remove_source_directory.{{ format }}" state: file - name: Verify archive contains all files - remove source directory ({{ format }}) @@ -76,23 +76,23 @@ - name: Remove archive - remove source directory ({{ format }}) file: - path: "{{ output_dir }}/archive_remove_source_directory.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_remove_source_directory.{{ format }}" state: absent - name: Verify source directory was removed - remove source directory ({{ format }}) assert: that: - - "'{{ output_dir }}/tmpdir' is not exists" + - "'{{ remote_tmp_dir }}/tmpdir' is not exists" - name: Create temporary directory - remove source excluding path ({{ format }}) file: - path: "{{ output_dir }}/tmpdir" + path: "{{ remote_tmp_dir }}/tmpdir" state: directory - name: Copy source files to temporary directory - remove source excluding path ({{ format }}) copy: src: "{{ item }}" - dest: "{{ output_dir }}/tmpdir/{{ item }}" + dest: "{{ remote_tmp_dir }}/tmpdir/{{ item }}" with_items: - foo.txt - bar.txt @@ -100,16 +100,16 @@ - name: Archive - remove source excluding path ({{ format }}) archive: - path: "{{ output_dir }}/tmpdir/*" - dest: "{{ output_dir }}/archive_remove_source_excluding_path.{{ format }}" + path: "{{ remote_tmp_dir }}/tmpdir/*" + dest: "{{ remote_tmp_dir }}/archive_remove_source_excluding_path.{{ format }}" format: "{{ format }}" remove: yes - exclude_path: "{{ output_dir }}/tmpdir/empty.txt" + exclude_path: "{{ remote_tmp_dir }}/tmpdir/empty.txt" register: archive_remove_source_excluding_path - name: Verify archive exists - remove source excluding path ({{ format }}) file: - path: "{{ output_dir }}/archive_remove_source_excluding_path.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_remove_source_excluding_path.{{ format }}" state: file - name: Verify all files except excluded are archived - remove source excluding path ({{ format }}) @@ -120,18 +120,18 @@ - name: Remove archive - remove source excluding path ({{ format }}) file: - path: "{{ output_dir }}/archive_remove_source_excluding_path.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_remove_source_excluding_path.{{ format }}" state: absent - name: Verify that excluded file still exists - remove source excluding path ({{ format }}) file: - path: "{{ output_dir }}/tmpdir/empty.txt" + path: "{{ remote_tmp_dir }}/tmpdir/empty.txt" state: file - name: Copy source files to temporary directory - remove source excluding sub path ({{ format }}) copy: src: "{{ item }}" - dest: "{{ output_dir }}/tmpdir/{{ item }}" + dest: "{{ remote_tmp_dir }}/tmpdir/{{ item }}" with_items: - foo.txt - bar.txt @@ -142,33 +142,33 @@ - name: Archive - remove source excluding sub path ({{ format }}) archive: path: - - "{{ output_dir }}/tmpdir/*.txt" - - "{{ output_dir }}/tmpdir/sub/*" - dest: "{{ output_dir }}/archive_remove_source_excluding_sub_path.{{ format }}" + - "{{ remote_tmp_dir }}/tmpdir/*.txt" + - "{{ remote_tmp_dir }}/tmpdir/sub/*" + dest: "{{ remote_tmp_dir }}/archive_remove_source_excluding_sub_path.{{ format }}" format: "{{ format }}" remove: yes - exclude_path: "{{ output_dir }}/tmpdir/sub/subfile.txt" + exclude_path: "{{ remote_tmp_dir }}/tmpdir/sub/subfile.txt" register: archive_remove_source_excluding_sub_path - name: Verify archive exists - remove source excluding sub path ({{ format }}) file: - path: "{{ output_dir }}/archive_remove_source_excluding_sub_path.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_remove_source_excluding_sub_path.{{ format }}" state: file - name: Remove archive - remove source excluding sub path ({{ format }}) file: - path: "{{ output_dir }}/archive_remove_source_excluding_sub_path.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_remove_source_excluding_sub_path.{{ format }}" state: absent - name: Verify that sub path still exists - remove source excluding sub path ({{ format }}) file: - path: "{{ output_dir }}/tmpdir/sub/subfile.txt" + path: "{{ remote_tmp_dir }}/tmpdir/sub/subfile.txt" state: file - name: Copy source files to temporary directory - remove source with nested paths ({{ format }}) copy: src: "{{ item }}" - dest: "{{ output_dir }}/tmpdir/{{ item }}" + dest: "{{ remote_tmp_dir }}/tmpdir/{{ item }}" with_items: - foo.txt - bar.txt @@ -178,20 +178,20 @@ - name: Archive - remove source with nested paths ({{ format }}) archive: - path: "{{ output_dir }}/tmpdir/" - dest: "{{ output_dir }}/archive_remove_source_nested_paths.{{ format }}" + path: "{{ remote_tmp_dir }}/tmpdir/" + dest: "{{ remote_tmp_dir }}/archive_remove_source_nested_paths.{{ format }}" format: "{{ format }}" remove: yes register: archive_remove_nested_paths - name: Verify archive exists - remove source with nested paths ({{ format }}) file: - path: "{{ output_dir }}/archive_remove_source_nested_paths.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_remove_source_nested_paths.{{ format }}" state: file - name: Verify source files were removed - remove source with nested paths ({{ format }}) file: - path: "{{ output_dir }}/tmpdir" + path: "{{ remote_tmp_dir }}/tmpdir" state: absent register: archive_remove_nested_paths_status @@ -203,5 +203,5 @@ - name: Remove archive - remove source with nested paths ({{ format }}) file: - path: "{{ output_dir }}/archive_remove_source_nested_paths.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_remove_source_nested_paths.{{ format }}" state: absent diff --git a/tests/integration/targets/consul/meta/main.yml b/tests/integration/targets/consul/meta/main.yml index f4c99a2ad7..f9bb8406a4 100644 --- a/tests/integration/targets/consul/meta/main.yml +++ b/tests/integration/targets/consul/meta/main.yml @@ -2,3 +2,4 @@ dependencies: - setup_pkg_mgr - setup_openssl + - setup_remote_tmp_dir diff --git a/tests/integration/targets/consul/tasks/main.yml b/tests/integration/targets/consul/tasks/main.yml index 4de2d332e5..7f216f81f0 100644 --- a/tests/integration/targets/consul/tasks/main.yml +++ b/tests/integration/targets/consul/tasks/main.yml @@ -7,7 +7,7 @@ vars: consul_version: 1.5.0 consul_uri: https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/consul/consul_{{ consul_version }}_{{ ansible_system | lower }}_{{ consul_arch }}.zip - consul_cmd: '{{ output_dir }}/consul' + consul_cmd: '{{ remote_tmp_dir }}/consul' block: - name: register pyOpenSSL version command: '{{ ansible_python_interpreter }} -c ''import OpenSSL; print(OpenSSL.__version__)''' @@ -27,19 +27,19 @@ block: - name: Generate privatekey community.crypto.openssl_privatekey: - path: '{{ output_dir }}/privatekey.pem' + path: '{{ remote_tmp_dir }}/privatekey.pem' - name: Generate CSR community.crypto.openssl_csr: - path: '{{ output_dir }}/csr.csr' - privatekey_path: '{{ output_dir }}/privatekey.pem' + path: '{{ remote_tmp_dir }}/csr.csr' + privatekey_path: '{{ remote_tmp_dir }}/privatekey.pem' subject: commonName: localhost - name: Generate selfsigned certificate register: selfsigned_certificate community.crypto.openssl_certificate: - path: '{{ output_dir }}/cert.pem' - csr_path: '{{ output_dir }}/csr.csr' - privatekey_path: '{{ output_dir }}/privatekey.pem' + path: '{{ remote_tmp_dir }}/cert.pem' + csr_path: '{{ remote_tmp_dir }}/csr.csr' + privatekey_path: '{{ remote_tmp_dir }}/privatekey.pem' provider: selfsigned selfsigned_digest: sha256 - name: Install unzip @@ -59,21 +59,21 @@ - name: Download consul binary unarchive: src: '{{ consul_uri }}' - dest: '{{ output_dir }}' + dest: '{{ remote_tmp_dir }}' remote_src: true register: result until: result is success - vars: - remote_dir: '{{ echo_output_dir.stdout }}' + remote_dir: '{{ echo_remote_tmp_dir.stdout }}' block: - - command: echo {{ output_dir }} - register: echo_output_dir + - command: echo {{ remote_tmp_dir }} + register: echo_remote_tmp_dir - name: Create configuration file template: src: consul_config.hcl.j2 - dest: '{{ output_dir }}/consul_config.hcl' + dest: '{{ remote_tmp_dir }}/consul_config.hcl' - name: Start Consul (dev mode enabled) - shell: nohup {{ consul_cmd }} agent -dev -config-file {{ output_dir }}/consul_config.hcl /dev/null 2>&1 & + shell: nohup {{ consul_cmd }} agent -dev -config-file {{ remote_tmp_dir }}/consul_config.hcl /dev/null 2>&1 & - name: Create some data command: '{{ consul_cmd }} kv put data/value{{ item }} foo{{ item }}' loop: @@ -83,5 +83,5 @@ - import_tasks: consul_session.yml always: - name: Kill consul process - shell: kill $(cat {{ output_dir }}/consul.pid) + shell: kill $(cat {{ remote_tmp_dir }}/consul.pid) ignore_errors: true diff --git a/tests/integration/targets/deploy_helper/meta/main.yml b/tests/integration/targets/deploy_helper/meta/main.yml new file mode 100644 index 0000000000..1810d4bec9 --- /dev/null +++ b/tests/integration/targets/deploy_helper/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_remote_tmp_dir diff --git a/tests/integration/targets/deploy_helper/tasks/main.yml b/tests/integration/targets/deploy_helper/tasks/main.yml index a61ab2a075..6d03b8da0e 100644 --- a/tests/integration/targets/deploy_helper/tasks/main.yml +++ b/tests/integration/targets/deploy_helper/tasks/main.yml @@ -5,7 +5,7 @@ #################################################################### - name: record the output directory - set_fact: deploy_helper_test_root={{output_dir}}/deploy_helper_test_root + set_fact: deploy_helper_test_root={{remote_tmp_dir}}/deploy_helper_test_root - name: State=query with default parameters deploy_helper: path={{ deploy_helper_test_root }} state=query diff --git a/tests/integration/targets/filter_random_mac/meta/main.yml b/tests/integration/targets/filter_random_mac/meta/main.yml new file mode 100644 index 0000000000..1810d4bec9 --- /dev/null +++ b/tests/integration/targets/filter_random_mac/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_remote_tmp_dir diff --git a/tests/integration/targets/filter_random_mac/tasks/main.yml b/tests/integration/targets/filter_random_mac/tasks/main.yml index 782b6e5c95..e09017c6fb 100644 --- a/tests/integration/targets/filter_random_mac/tasks/main.yml +++ b/tests/integration/targets/filter_random_mac/tasks/main.yml @@ -8,9 +8,6 @@ # Copyright: (c) 2019, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -- set_fact: - output_dir: "{{ lookup('env', 'OUTPUT_DIR') }}" - - name: Test random_mac filter bad argument type debug: var: "0 | community.general.random_mac" diff --git a/tests/integration/targets/flatpak/tasks/setup.yml b/tests/integration/targets/flatpak/tasks/setup.yml index 8fc0a23566..decf20d166 100644 --- a/tests/integration/targets/flatpak/tasks/setup.yml +++ b/tests/integration/targets/flatpak/tasks/setup.yml @@ -57,7 +57,7 @@ mode: '0755' - name: Start HTTP server - command: '{{ remote_tmp_dir }}/serve.py 127.0.0.1 8000 /tmp/flatpak/' + command: '{{ ansible_python.executable }} {{ remote_tmp_dir }}/serve.py 127.0.0.1 8000 /tmp/flatpak/' async: 120 poll: 0 register: webserver_status diff --git a/tests/integration/targets/gem/meta/main.yml b/tests/integration/targets/gem/meta/main.yml index 5438ced5c3..56bc554611 100644 --- a/tests/integration/targets/gem/meta/main.yml +++ b/tests/integration/targets/gem/meta/main.yml @@ -1,2 +1,3 @@ dependencies: - setup_pkg_mgr + - setup_remote_tmp_dir diff --git a/tests/integration/targets/gem/tasks/main.yml b/tests/integration/targets/gem/tasks/main.yml index 499057775c..4674fb1075 100644 --- a/tests/integration/targets/gem/tasks/main.yml +++ b/tests/integration/targets/gem/tasks/main.yml @@ -122,7 +122,7 @@ gem: name: gist state: present - install_dir: "{{ output_dir }}/gems" + install_dir: "{{ remote_tmp_dir }}/gems" ignore_errors: yes register: install_gem_fail_result @@ -141,12 +141,12 @@ name: gist state: present user_install: no - install_dir: "{{ output_dir }}/gems" + install_dir: "{{ remote_tmp_dir }}/gems" register: install_gem_result - name: Find gems in custom directory find: - paths: "{{ output_dir }}/gems/gems" + paths: "{{ remote_tmp_dir }}/gems/gems" file_type: directory contains: gist register: gem_search @@ -163,12 +163,12 @@ name: gist state: absent user_install: no - install_dir: "{{ output_dir }}/gems" + install_dir: "{{ remote_tmp_dir }}/gems" register: install_gem_result - name: Find gems in custom directory find: - paths: "{{ output_dir }}/gems/gems" + paths: "{{ remote_tmp_dir }}/gems/gems" file_type: directory contains: gist register: gem_search @@ -184,14 +184,14 @@ gem: name: gist state: present - bindir: "{{ output_dir }}/custom_bindir" + bindir: "{{ remote_tmp_dir }}/custom_bindir" norc: yes user_install: no # Avoid conflicts between --install-dir and --user-install when running as root on CentOS / Fedora / RHEL register: install_gem_result - name: Get stats of gem executable stat: - path: "{{ output_dir }}/custom_bindir/gist" + path: "{{ remote_tmp_dir }}/custom_bindir/gist" register: gem_bindir_stat - name: Ensure gem executable was installed in custom directory @@ -204,14 +204,14 @@ gem: name: gist state: absent - bindir: "{{ output_dir }}/custom_bindir" + bindir: "{{ remote_tmp_dir }}/custom_bindir" norc: yes user_install: no # Avoid conflicts between --install-dir and --user-install when running as root on CentOS / Fedora / RHEL register: install_gem_result - name: Get stats of gem executable stat: - path: "{{ output_dir }}/custom_bindir/gist" + path: "{{ remote_tmp_dir }}/custom_bindir/gist" register: gem_bindir_stat - name: Ensure gem executable was removed from custom directory diff --git a/tests/integration/targets/git_config/meta/main.yml b/tests/integration/targets/git_config/meta/main.yml new file mode 100644 index 0000000000..1810d4bec9 --- /dev/null +++ b/tests/integration/targets/git_config/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_remote_tmp_dir diff --git a/tests/integration/targets/git_config/tasks/get_set_state_present_file.yml b/tests/integration/targets/git_config/tasks/get_set_state_present_file.yml index 20946ac393..5d46ed35c5 100644 --- a/tests/integration/targets/git_config/tasks/get_set_state_present_file.yml +++ b/tests/integration/targets/git_config/tasks/get_set_state_present_file.yml @@ -6,7 +6,7 @@ name: "{{ option_name }}" value: "{{ option_value }}" scope: "file" - file: "{{ output_dir }}/gitconfig_file" + file: "{{ remote_tmp_dir }}/gitconfig_file" state: present register: result @@ -14,7 +14,7 @@ git_config: name: "{{ option_name }}" scope: "file" - file: "{{ output_dir }}/gitconfig_file" + file: "{{ remote_tmp_dir }}/gitconfig_file" state: present register: get_result @@ -26,4 +26,3 @@ - set_result.diff.after == option_value + "\n" - get_result is not changed - get_result.config_value == option_value -... diff --git a/tests/integration/targets/git_config/tasks/setup_no_value.yml b/tests/integration/targets/git_config/tasks/setup_no_value.yml index d5552450cf..7bccfc0368 100644 --- a/tests/integration/targets/git_config/tasks/setup_no_value.yml +++ b/tests/integration/targets/git_config/tasks/setup_no_value.yml @@ -8,6 +8,5 @@ - name: set up without value (file) file: - path: "{{ output_dir }}/gitconfig_file" + path: "{{ remote_tmp_dir }}/gitconfig_file" state: absent -... diff --git a/tests/integration/targets/git_config/tasks/setup_value.yml b/tests/integration/targets/git_config/tasks/setup_value.yml index 3eff9c423a..748e838b3d 100644 --- a/tests/integration/targets/git_config/tasks/setup_value.yml +++ b/tests/integration/targets/git_config/tasks/setup_value.yml @@ -9,5 +9,4 @@ - name: set up with value (file) copy: src: gitconfig - dest: "{{ output_dir }}/gitconfig_file" -... + dest: "{{ remote_tmp_dir }}/gitconfig_file" diff --git a/tests/integration/targets/hg/meta/main.yml b/tests/integration/targets/hg/meta/main.yml index 5438ced5c3..56bc554611 100644 --- a/tests/integration/targets/hg/meta/main.yml +++ b/tests/integration/targets/hg/meta/main.yml @@ -1,2 +1,3 @@ dependencies: - setup_pkg_mgr + - setup_remote_tmp_dir diff --git a/tests/integration/targets/hg/tasks/run-tests.yml b/tests/integration/targets/hg/tasks/run-tests.yml index 775b297817..0818f4f466 100644 --- a/tests/integration/targets/hg/tasks/run-tests.yml +++ b/tests/integration/targets/hg/tasks/run-tests.yml @@ -6,14 +6,14 @@ - name: set where to extract the repo set_fact: - checkout_dir: "{{ output_dir }}/hg_project_test" + checkout_dir: "{{ remote_tmp_dir }}/hg_project_test" - name: set what repo to use set_fact: repo: "http://hg.pf.osdn.net/view/a/ak/akasurde/hg_project_test" -- name: clean out the output_dir - shell: rm -rf {{ output_dir }}/* +- name: clean out the remote_tmp_dir + shell: rm -rf {{ remote_tmp_dir }}/* - name: verify that mercurial is installed so this test can continue shell: which hg diff --git a/tests/integration/targets/iso_create/meta/main.yml b/tests/integration/targets/iso_create/meta/main.yml index 5438ced5c3..56bc554611 100644 --- a/tests/integration/targets/iso_create/meta/main.yml +++ b/tests/integration/targets/iso_create/meta/main.yml @@ -1,2 +1,3 @@ dependencies: - setup_pkg_mgr + - setup_remote_tmp_dir diff --git a/tests/integration/targets/iso_create/tasks/main.yml b/tests/integration/targets/iso_create/tasks/main.yml index 4a0df3b818..0e21e01aef 100644 --- a/tests/integration/targets/iso_create/tasks/main.yml +++ b/tests/integration/targets/iso_create/tasks/main.yml @@ -14,15 +14,23 @@ - debug: var=install_pycdlib - set_fact: - output_dir_test: '{{ output_dir }}/test_iso_create' + output_test_dir: '{{ remote_tmp_dir }}/test_iso_create' # - include_tasks: prepare_dest_dir.yml +- name: Copy files and directories + copy: + src: '{{ item }}' + dest: '{{ remote_tmp_dir }}/{{ item }}' + loop: + - test1.cfg + - test_dir + - name: Test check mode iso_create: src_files: - - "{{ role_path }}/files/test1.cfg" - dest_iso: "{{ output_dir_test }}/test.iso" + - "{{ remote_tmp_dir }}/test1.cfg" + dest_iso: "{{ output_test_dir }}/test.iso" interchange_level: 3 register: iso_result check_mode: yes @@ -30,7 +38,7 @@ - name: Check if iso file created stat: - path: "{{ output_dir_test }}/test.iso" + path: "{{ output_test_dir }}/test.iso" register: iso_file - debug: var=iso_file - assert: @@ -41,15 +49,15 @@ - name: Create iso file with a specified file iso_create: src_files: - - "{{ role_path }}/files/test1.cfg" - dest_iso: "{{ output_dir_test }}/test.iso" + - "{{ remote_tmp_dir }}/test1.cfg" + dest_iso: "{{ output_test_dir }}/test.iso" interchange_level: 3 register: iso_result - debug: var=iso_result - name: Check if iso file created stat: - path: "{{ output_dir_test }}/test.iso" + path: "{{ output_test_dir }}/test.iso" register: iso_file - assert: @@ -60,16 +68,16 @@ - name: Create iso file with a specified file and folder iso_create: src_files: - - "{{ role_path }}/files/test1.cfg" - - "{{ role_path }}/files/test_dir" - dest_iso: "{{ output_dir_test }}/test1.iso" + - "{{ remote_tmp_dir }}/test1.cfg" + - "{{ remote_tmp_dir }}/test_dir" + dest_iso: "{{ output_test_dir }}/test1.iso" interchange_level: 3 register: iso_result - debug: var=iso_result - name: Check if iso file created stat: - path: "{{ output_dir_test }}/test1.iso" + path: "{{ output_test_dir }}/test1.iso" register: iso_file - assert: @@ -80,15 +88,15 @@ - name: Create iso file with volume identification string iso_create: src_files: - - "{{ role_path }}/files/test1.cfg" - dest_iso: "{{ output_dir_test }}/test2.iso" + - "{{ remote_tmp_dir }}/test1.cfg" + dest_iso: "{{ output_test_dir }}/test2.iso" vol_ident: "OEMDRV" register: iso_result - debug: var=iso_result - name: Check if iso file created stat: - path: "{{ output_dir_test }}/test2.iso" + path: "{{ output_test_dir }}/test2.iso" register: iso_file - assert: @@ -99,15 +107,15 @@ - name: Create iso file with Rock Ridge extention iso_create: src_files: - - "{{ role_path }}/files/test1.cfg" - dest_iso: "{{ output_dir_test }}/test3.iso" + - "{{ remote_tmp_dir }}/test1.cfg" + dest_iso: "{{ output_test_dir }}/test3.iso" rock_ridge: "1.09" register: iso_result - debug: var=iso_result - name: Check if iso file created stat: - path: "{{ output_dir_test }}/test3.iso" + path: "{{ output_test_dir }}/test3.iso" register: iso_file - assert: @@ -118,15 +126,15 @@ - name: Create iso file with Joliet extention iso_create: src_files: - - "{{ role_path }}/files/test1.cfg" - dest_iso: "{{ output_dir_test }}/test4.iso" + - "{{ remote_tmp_dir }}/test1.cfg" + dest_iso: "{{ output_test_dir }}/test4.iso" joliet: 3 register: iso_result - debug: var=iso_result - name: Check if iso file created stat: - path: "{{ output_dir_test }}/test4.iso" + path: "{{ output_test_dir }}/test4.iso" register: iso_file - assert: @@ -137,15 +145,15 @@ - name: Create iso file with UDF enabled iso_create: src_files: - - "{{ role_path }}/files/test1.cfg" - dest_iso: "{{ output_dir_test }}/test5.iso" + - "{{ remote_tmp_dir }}/test1.cfg" + dest_iso: "{{ output_test_dir }}/test5.iso" udf: True register: iso_result - debug: var=iso_result - name: Check if iso file created stat: - path: "{{ output_dir_test }}/test5.iso" + path: "{{ output_test_dir }}/test5.iso" register: iso_file - assert: diff --git a/tests/integration/targets/iso_create/tasks/prepare_dest_dir.yml b/tests/integration/targets/iso_create/tasks/prepare_dest_dir.yml index 94c529d52a..8320c3942e 100644 --- a/tests/integration/targets/iso_create/tasks/prepare_dest_dir.yml +++ b/tests/integration/targets/iso_create/tasks/prepare_dest_dir.yml @@ -3,10 +3,10 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - name: Make sure our testing sub-directory does not exist file: - path: '{{ output_dir_test }}' + path: '{{ output_test_dir }}' state: absent - name: Create our testing sub-directory file: - path: '{{ output_dir_test }}' + path: '{{ output_test_dir }}' state: directory diff --git a/tests/integration/targets/iso_extract/meta/main.yml b/tests/integration/targets/iso_extract/meta/main.yml index 0e51c36ebd..07990bd4ef 100644 --- a/tests/integration/targets/iso_extract/meta/main.yml +++ b/tests/integration/targets/iso_extract/meta/main.yml @@ -1,3 +1,4 @@ dependencies: - setup_pkg_mgr - setup_epel + - setup_remote_tmp_dir diff --git a/tests/integration/targets/iso_extract/tasks/main.yml b/tests/integration/targets/iso_extract/tasks/main.yml index 1eb279a3d2..18fd9b37a9 100644 --- a/tests/integration/targets/iso_extract/tasks/main.yml +++ b/tests/integration/targets/iso_extract/tasks/main.yml @@ -23,7 +23,7 @@ # along with Ansible. If not, see . - set_fact: - output_dir_test: '{{ output_dir }}/test_iso_extract' + output_test_dir: '{{ remote_tmp_dir }}/test_iso_extract' - name: Install 7zip import_tasks: 7zip.yml diff --git a/tests/integration/targets/iso_extract/tasks/prepare.yml b/tests/integration/targets/iso_extract/tasks/prepare.yml index 78c06ad52c..4e240caca6 100644 --- a/tests/integration/targets/iso_extract/tasks/prepare.yml +++ b/tests/integration/targets/iso_extract/tasks/prepare.yml @@ -19,15 +19,15 @@ - name: Make sure our testing sub-directory does not exist file: - path: '{{ output_dir_test }}' + path: '{{ output_test_dir }}' state: absent - name: Create our testing sub-directory file: - path: '{{ output_dir_test }}' + path: '{{ output_test_dir }}' state: directory - name: copy the iso to the test dir copy: src: test.iso - dest: '{{ output_dir_test }}' + dest: '{{ output_test_dir }}' diff --git a/tests/integration/targets/iso_extract/tasks/tests.yml b/tests/integration/targets/iso_extract/tasks/tests.yml index 18f22422ce..1475027adf 100644 --- a/tests/integration/targets/iso_extract/tasks/tests.yml +++ b/tests/integration/targets/iso_extract/tasks/tests.yml @@ -19,8 +19,8 @@ - name: Extract the iso iso_extract: - image: '{{ output_dir_test }}/test.iso' - dest: '{{ output_dir_test }}' + image: '{{ output_test_dir }}/test.iso' + dest: '{{ output_test_dir }}' files: - 1.txt - 2.txt @@ -32,8 +32,8 @@ - name: Extract the iso again iso_extract: - image: '{{ output_dir_test }}/test.iso' - dest: '{{ output_dir_test }}' + image: '{{ output_test_dir }}/test.iso' + dest: '{{ output_test_dir }}' files: - 1.txt - 2.txt diff --git a/tests/integration/targets/java_cert/defaults/main.yml b/tests/integration/targets/java_cert/defaults/main.yml index 8e63493600..b391eeff2d 100644 --- a/tests/integration/targets/java_cert/defaults/main.yml +++ b/tests/integration/targets/java_cert/defaults/main.yml @@ -1,15 +1,15 @@ --- test_pkcs12_path: testpkcs.p12 test_keystore_path: keystore.jks -test_keystore2_path: "{{ output_dir }}/keystore2.jks" +test_keystore2_path: "{{ remote_tmp_dir }}/keystore2.jks" test_keystore2_password: changeit -test_cert_path: "{{ output_dir }}/cert.pem" -test_key_path: "{{ output_dir }}/key.pem" -test_csr_path: "{{ output_dir }}/req.csr" -test_cert2_path: "{{ output_dir }}/cert2.pem" -test_key2_path: "{{ output_dir }}/key2.pem" -test_csr2_path: "{{ output_dir }}/req2.csr" -test_pkcs_path: "{{ output_dir }}/cert.p12" -test_pkcs2_path: "{{ output_dir }}/cert2.p12" +test_cert_path: "{{ remote_tmp_dir }}/cert.pem" +test_key_path: "{{ remote_tmp_dir }}/key.pem" +test_csr_path: "{{ remote_tmp_dir }}/req.csr" +test_cert2_path: "{{ remote_tmp_dir }}/cert2.pem" +test_key2_path: "{{ remote_tmp_dir }}/key2.pem" +test_csr2_path: "{{ remote_tmp_dir }}/req2.csr" +test_pkcs_path: "{{ remote_tmp_dir }}/cert.p12" +test_pkcs2_path: "{{ remote_tmp_dir }}/cert2.p12" test_ssl: setupSSLServer.py test_ssl_port: 21500 diff --git a/tests/integration/targets/java_cert/meta/main.yml b/tests/integration/targets/java_cert/meta/main.yml index 9bc23ac67f..1d78393199 100644 --- a/tests/integration/targets/java_cert/meta/main.yml +++ b/tests/integration/targets/java_cert/meta/main.yml @@ -1,3 +1,4 @@ dependencies: - setup_java_keytool - setup_openssl + - setup_remote_tmp_dir diff --git a/tests/integration/targets/java_cert/tasks/main.yml b/tests/integration/targets/java_cert/tasks/main.yml index 20550740da..2088e3bfda 100644 --- a/tests/integration/targets/java_cert/tasks/main.yml +++ b/tests/integration/targets/java_cert/tasks/main.yml @@ -9,15 +9,15 @@ - name: prep pkcs12 file ansible.builtin.copy: src: "{{ test_pkcs12_path }}" - dest: "{{ output_dir }}/{{ test_pkcs12_path }}" + dest: "{{ remote_tmp_dir }}/{{ test_pkcs12_path }}" - name: import pkcs12 community.general.java_cert: - pkcs12_path: "{{ output_dir }}/{{ test_pkcs12_path }}" + pkcs12_path: "{{ remote_tmp_dir }}/{{ test_pkcs12_path }}" pkcs12_password: changeit pkcs12_alias: default cert_alias: default - keystore_path: "{{ output_dir }}/{{ test_keystore_path }}" + keystore_path: "{{ remote_tmp_dir }}/{{ test_keystore_path }}" keystore_pass: changeme_keystore keystore_create: yes state: present @@ -30,11 +30,11 @@ - name: import pkcs12 with wrong password community.general.java_cert: - pkcs12_path: "{{ output_dir }}/{{ test_pkcs12_path }}" + pkcs12_path: "{{ remote_tmp_dir }}/{{ test_pkcs12_path }}" pkcs12_password: wrong_pass pkcs12_alias: default cert_alias: default_new - keystore_path: "{{ output_dir }}/{{ test_keystore_path }}" + keystore_path: "{{ remote_tmp_dir }}/{{ test_keystore_path }}" keystore_pass: changeme_keystore keystore_create: yes state: present @@ -49,9 +49,9 @@ - name: test fail on mutually exclusive params community.general.java_cert: cert_path: ca.crt - pkcs12_path: "{{ output_dir }}/{{ test_pkcs12_path }}" + pkcs12_path: "{{ remote_tmp_dir }}/{{ test_pkcs12_path }}" cert_alias: default - keystore_path: "{{ output_dir }}/{{ test_keystore_path }}" + keystore_path: "{{ remote_tmp_dir }}/{{ test_keystore_path }}" keystore_pass: changeme_keystore keystore_create: yes state: present @@ -65,7 +65,7 @@ - name: test fail on missing required params community.general.java_cert: - keystore_path: "{{ output_dir }}/{{ test_keystore_path }}" + keystore_path: "{{ remote_tmp_dir }}/{{ test_keystore_path }}" keystore_pass: changeme_keystore state: absent ignore_errors: true @@ -78,7 +78,7 @@ - name: delete object based on cert_alias parameter community.general.java_cert: - keystore_path: "{{ output_dir }}/{{ test_keystore_path }}" + keystore_path: "{{ remote_tmp_dir }}/{{ test_keystore_path }}" keystore_pass: changeme_keystore cert_alias: default state: absent @@ -98,8 +98,8 @@ path: "{{ item }}" state: absent loop: - - "{{ output_dir }}/{{ test_pkcs12_path }}" - - "{{ output_dir }}/{{ test_keystore_path }}" + - "{{ remote_tmp_dir }}/{{ test_pkcs12_path }}" + - "{{ remote_tmp_dir }}/{{ test_keystore_path }}" - "{{ test_keystore2_path }}" - "{{ test_cert_path }}" - "{{ test_key_path }}" diff --git a/tests/integration/targets/java_cert/tasks/state_change.yml b/tests/integration/targets/java_cert/tasks/state_change.yml index 38ef62cd0f..c0b92c8d2a 100644 --- a/tests/integration/targets/java_cert/tasks/state_change.yml +++ b/tests/integration/targets/java_cert/tasks/state_change.yml @@ -239,13 +239,17 @@ - name: Copy the ssl server script copy: src: "setupSSLServer.py" - dest: "{{ output_dir }}" + dest: "{{ remote_tmp_dir }}" - name: Create an SSL server that we will use for testing URL imports - command: python {{ output_dir }}/setupSSLServer.py {{ output_dir }} {{ test_ssl_port }} + command: "{{ ansible_python.executable }} {{ remote_tmp_dir }}/setupSSLServer.py {{ remote_tmp_dir }} {{ test_ssl_port }}" async: 10 poll: 0 +- name: "Wait for one second to make sure that the serve script has actually been started" + pause: + seconds: 1 + - name: | Download the original cert.pem from our temporary server. The current cert should contain cert2.pem. Importing this cert should return a status of changed diff --git a/tests/integration/targets/java_keystore/meta/main.yml b/tests/integration/targets/java_keystore/meta/main.yml index 9bc23ac67f..1d78393199 100644 --- a/tests/integration/targets/java_keystore/meta/main.yml +++ b/tests/integration/targets/java_keystore/meta/main.yml @@ -1,3 +1,4 @@ dependencies: - setup_java_keytool - setup_openssl + - setup_remote_tmp_dir diff --git a/tests/integration/targets/java_keystore/tasks/prepare.yml b/tests/integration/targets/java_keystore/tasks/prepare.yml index f8811c03ed..04b7cbd9d8 100644 --- a/tests/integration/targets/java_keystore/tasks/prepare.yml +++ b/tests/integration/targets/java_keystore/tasks/prepare.yml @@ -1,12 +1,12 @@ --- - name: Create test directory ansible.builtin.file: - path: "{{ output_dir }}" + path: "{{ remote_tmp_dir }}" state: directory - name: Create private keys community.crypto.openssl_privatekey: - path: "{{ output_dir ~ '/' ~ (item.keyname | default(item.name)) ~ '.key' }}" + path: "{{ remote_tmp_dir ~ '/' ~ (item.keyname | default(item.name)) ~ '.key' }}" size: 2048 # this should work everywhere # The following is more efficient, but might not work everywhere: # type: ECC @@ -17,17 +17,17 @@ - name: Create CSRs community.crypto.openssl_csr: - path: "{{ output_dir ~ '/' ~ item.name ~ '.csr' }}" - privatekey_path: "{{ output_dir ~ '/' ~ (item.keyname | default(item.name)) ~ '.key' }}" + path: "{{ remote_tmp_dir ~ '/' ~ item.name ~ '.csr' }}" + privatekey_path: "{{ remote_tmp_dir ~ '/' ~ (item.keyname | default(item.name)) ~ '.key' }}" privatekey_passphrase: "{{ item.passphrase | default(omit) }}" commonName: "{{ item.commonName }}" loop: "{{ java_keystore_certs + java_keystore_new_certs }}" - name: Create certificates community.crypto.x509_certificate: - path: "{{ output_dir ~ '/' ~ item.name ~ '.pem' }}" - csr_path: "{{ output_dir ~ '/' ~ item.name ~ '.csr' }}" - privatekey_path: "{{ output_dir ~ '/' ~ (item.keyname | default(item.name)) ~ '.key' }}" + path: "{{ remote_tmp_dir ~ '/' ~ item.name ~ '.pem' }}" + csr_path: "{{ remote_tmp_dir ~ '/' ~ item.name ~ '.csr' }}" + privatekey_path: "{{ remote_tmp_dir ~ '/' ~ (item.keyname | default(item.name)) ~ '.key' }}" privatekey_passphrase: "{{ item.passphrase | default(omit) }}" provider: selfsigned loop: "{{ java_keystore_certs + java_keystore_new_certs }}" diff --git a/tests/integration/targets/java_keystore/tasks/tests.yml b/tests/integration/targets/java_keystore/tasks/tests.yml index 8510a64165..07b30ad97d 100644 --- a/tests/integration/targets/java_keystore/tasks/tests.yml +++ b/tests/integration/targets/java_keystore/tasks/tests.yml @@ -1,199 +1,273 @@ --- - name: Create test directory ansible.builtin.file: - path: "{{ output_dir }}" + path: "{{ remote_tmp_dir }}" state: directory - name: Ensure the Java keystore does not exist (cleanup between tests) ansible.builtin.file: - path: "{{ output_dir ~ '/' ~ item.name ~ '.jks' }}" + path: "{{ remote_tmp_dir ~ '/' ~ item.name ~ '.jks' }}" state: absent loop: "{{ java_keystore_certs }}" loop_control: - label: "{{ output_dir ~ '/' ~ item.name ~ '.jks' }}" + label: "{{ remote_tmp_dir ~ '/' ~ item.name ~ '.jks' }}" +- name: Read certificates + slurp: + src: "{{ remote_tmp_dir ~ '/' ~ item.name ~ '.pem' }}" + loop: "{{ java_keystore_certs }}" + when: not remote_cert + register: certificates + +- name: Read certificate keys + slurp: + src: "{{ remote_tmp_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.key' }}" + loop: "{{ java_keystore_certs }}" + when: not remote_cert + register: certificate_keys + - name: Create a Java keystore for the given ({{ 'remote' if remote_cert else 'local' }}) certificates (check mode) community.general.java_keystore: &java_keystore_params name: example - dest: "{{ output_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.jks' }}" - certificate: "{{ omit if remote_cert else lookup('file', output_dir ~ '/' ~ item.name ~ '.pem') }}" - private_key: "{{ omit if remote_cert else lookup('file', output_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.key') }}" - certificate_path: "{{ omit if not remote_cert else output_dir ~ '/' ~ item.name ~ '.pem' }}" - private_key_path: "{{ omit if not remote_cert else output_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.key' }}" + dest: "{{ remote_tmp_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.jks' }}" + certificate: "{{ omit if remote_cert else (certificates.results[loop_index].content | b64decode) }}" + private_key: "{{ omit if remote_cert else (certificate_keys.results[loop_index].content | b64decode) }}" + certificate_path: "{{ omit if not remote_cert else remote_tmp_dir ~ '/' ~ item.name ~ '.pem' }}" + private_key_path: "{{ omit if not remote_cert else remote_tmp_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.key' }}" private_key_passphrase: "{{ item.passphrase | d(omit) }}" password: changeit ssl_backend: "{{ ssl_backend }}" keystore_type: "{{ item.keystore_type | d(omit) }}" loop: "{{ java_keystore_certs }}" + loop_control: + index_var: loop_index check_mode: yes register: result_check - name: Create a Java keystore for the given certificates community.general.java_keystore: *java_keystore_params loop: "{{ java_keystore_certs }}" + loop_control: + index_var: loop_index register: result - name: Create a Java keystore for the given certificates (idempotency, check mode) community.general.java_keystore: *java_keystore_params loop: "{{ java_keystore_certs }}" + loop_control: + index_var: loop_index check_mode: yes register: result_idem_check - name: Create a Java keystore for the given certificates (idempotency) community.general.java_keystore: *java_keystore_params loop: "{{ java_keystore_certs }}" + loop_control: + index_var: loop_index register: result_idem -- name: Create a Java keystore for the given certificates (certificate changed, check mode) - community.general.java_keystore: *java_keystore_params +- name: Read certificates (new) + slurp: + src: "{{ remote_tmp_dir ~ '/' ~ item.name ~ '.pem' }}" loop: "{{ java_keystore_new_certs }}" + when: not remote_cert + register: certificates_new + +- name: Read certificate keys (new) + slurp: + src: "{{ remote_tmp_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.key' }}" + loop: "{{ java_keystore_new_certs }}" + when: not remote_cert + register: certificate_keys_new + +- name: Create a Java keystore for the given certificates (certificate changed, check mode) + community.general.java_keystore: &java_keystore_params_new_certs + name: example + dest: "{{ remote_tmp_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.jks' }}" + certificate: "{{ omit if remote_cert else (certificates_new.results[loop_index].content | b64decode) }}" + private_key: "{{ omit if remote_cert else (certificate_keys_new.results[loop_index].content | b64decode) }}" + certificate_path: "{{ omit if not remote_cert else remote_tmp_dir ~ '/' ~ item.name ~ '.pem' }}" + private_key_path: "{{ omit if not remote_cert else remote_tmp_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.key' }}" + private_key_passphrase: "{{ item.passphrase | d(omit) }}" + password: changeit + ssl_backend: "{{ ssl_backend }}" + keystore_type: "{{ item.keystore_type | d(omit) }}" + loop: "{{ java_keystore_new_certs }}" + loop_control: + index_var: loop_index check_mode: yes register: result_change_check - name: Create a Java keystore for the given certificates (certificate changed) - community.general.java_keystore: *java_keystore_params + community.general.java_keystore: *java_keystore_params_new_certs loop: "{{ java_keystore_new_certs }}" + loop_control: + index_var: loop_index register: result_change - name: Create a Java keystore for the given certificates (alias changed, check mode) community.general.java_keystore: - <<: *java_keystore_params + <<: *java_keystore_params_new_certs name: foobar loop: "{{ java_keystore_new_certs }}" + loop_control: + index_var: loop_index check_mode: yes register: result_alias_change_check - name: Create a Java keystore for the given certificates (alias changed) community.general.java_keystore: - <<: *java_keystore_params + <<: *java_keystore_params_new_certs name: foobar loop: "{{ java_keystore_new_certs }}" + loop_control: + index_var: loop_index register: result_alias_change - name: Create a Java keystore for the given certificates (password changed, check mode) community.general.java_keystore: - <<: *java_keystore_params + <<: *java_keystore_params_new_certs name: foobar password: hunter2 loop: "{{ java_keystore_new_certs }}" + loop_control: + index_var: loop_index check_mode: yes register: result_pw_change_check - name: Create a Java keystore for the given certificates (password changed) community.general.java_keystore: - <<: *java_keystore_params + <<: *java_keystore_params_new_certs name: foobar password: hunter2 loop: "{{ java_keystore_new_certs }}" + loop_control: + index_var: loop_index register: result_pw_change - name: Create a Java keystore for the given certificates (force keystore type pkcs12, check mode) community.general.java_keystore: - <<: *java_keystore_params + <<: *java_keystore_params_new_certs name: foobar password: hunter2 keystore_type: pkcs12 loop: "{{ java_keystore_new_certs }}" + loop_control: + index_var: loop_index check_mode: yes register: result_type_pkcs12_check - name: Create a Java keystore for the given certificates (force keystore type jks, check mode) community.general.java_keystore: - <<: *java_keystore_params + <<: *java_keystore_params_new_certs name: foobar password: hunter2 keystore_type: jks loop: "{{ java_keystore_new_certs }}" + loop_control: + index_var: loop_index check_mode: yes register: result_type_jks_check - name: Create a Java keystore for the given certificates (force keystore type jks) community.general.java_keystore: - <<: *java_keystore_params + <<: *java_keystore_params_new_certs name: foobar password: hunter2 keystore_type: jks loop: "{{ java_keystore_new_certs }}" + loop_control: + index_var: loop_index register: result_type_jks - name: Stat keystore (before failure) ansible.builtin.stat: - path: "{{ output_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.jks' }}" + path: "{{ remote_tmp_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.jks' }}" loop: "{{ java_keystore_new_certs }}" register: result_stat_before - name: Fail to create a Java keystore for the given certificates (password too short) community.general.java_keystore: - <<: *java_keystore_params + <<: *java_keystore_params_new_certs name: foobar password: short keystore_type: jks loop: "{{ java_keystore_new_certs }}" + loop_control: + index_var: loop_index register: result_fail_jks ignore_errors: true - name: Stat keystore (after failure) ansible.builtin.stat: - path: "{{ output_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.jks' }}" + path: "{{ remote_tmp_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.jks' }}" loop: "{{ java_keystore_new_certs }}" register: result_stat_after - name: Create a Java keystore for the given certificates (keystore type changed, check mode) community.general.java_keystore: - <<: *java_keystore_params + <<: *java_keystore_params_new_certs name: foobar password: hunter2 keystore_type: pkcs12 loop: "{{ java_keystore_new_certs }}" + loop_control: + index_var: loop_index check_mode: yes register: result_type_change_check - name: Create a Java keystore for the given certificates (keystore type changed) community.general.java_keystore: - <<: *java_keystore_params + <<: *java_keystore_params_new_certs name: foobar password: hunter2 keystore_type: pkcs12 loop: "{{ java_keystore_new_certs }}" + loop_control: + index_var: loop_index register: result_type_change - name: Create a Java keystore for the given certificates (omit keystore type, check mode) community.general.java_keystore: - <<: *java_keystore_params + <<: *java_keystore_params_new_certs name: foobar password: hunter2 loop: "{{ java_keystore_new_certs }}" + loop_control: + index_var: loop_index check_mode: yes register: result_type_omit_check - name: Create a Java keystore for the given certificates (omit keystore type) community.general.java_keystore: - <<: *java_keystore_params + <<: *java_keystore_params_new_certs name: foobar password: hunter2 loop: "{{ java_keystore_new_certs }}" + loop_control: + index_var: loop_index register: result_type_omit - name: Check that the remote certificates have not been removed ansible.builtin.file: - path: "{{ output_dir ~ '/' ~ item.name ~ '.pem' }}" + path: "{{ remote_tmp_dir ~ '/' ~ item.name ~ '.pem' }}" state: file loop: "{{ java_keystore_certs + java_keystore_new_certs }}" when: remote_cert - name: Check that the remote private keys have not been removed ansible.builtin.file: - path: "{{ output_dir ~ '/' ~ item.name ~ '.key' }}" + path: "{{ remote_tmp_dir ~ '/' ~ item.name ~ '.key' }}" state: file loop: "{{ java_keystore_certs }}" when: remote_cert diff --git a/tests/integration/targets/mail/meta/main.yml b/tests/integration/targets/mail/meta/main.yml new file mode 100644 index 0000000000..1810d4bec9 --- /dev/null +++ b/tests/integration/targets/mail/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_remote_tmp_dir diff --git a/tests/integration/targets/mail/tasks/main.yml b/tests/integration/targets/mail/tasks/main.yml index 714b662dfd..dbde6743d2 100644 --- a/tests/integration/targets/mail/tasks/main.yml +++ b/tests/integration/targets/mail/tasks/main.yml @@ -16,7 +16,7 @@ - name: Install test smtpserver copy: src: '{{ item }}' - dest: '{{ output_dir }}/{{ item }}' + dest: '{{ remote_tmp_dir }}/{{ item }}' loop: - smtpserver.py - smtpserver.crt @@ -25,7 +25,7 @@ # FIXME: Verify the mail after it was send would be nice # This would require either dumping the content, or registering async task output - name: Start test smtpserver - shell: '{{ ansible_python.executable }} {{ output_dir }}/smtpserver.py 10025:10465' + shell: '{{ ansible_python.executable }} {{ remote_tmp_dir }}/smtpserver.py 10025:10465' async: 30 poll: 0 register: smtpserver diff --git a/tests/integration/targets/nomad/meta/main.yml b/tests/integration/targets/nomad/meta/main.yml index f4c99a2ad7..f9bb8406a4 100644 --- a/tests/integration/targets/nomad/meta/main.yml +++ b/tests/integration/targets/nomad/meta/main.yml @@ -2,3 +2,4 @@ dependencies: - setup_pkg_mgr - setup_openssl + - setup_remote_tmp_dir diff --git a/tests/integration/targets/nomad/tasks/main.yml b/tests/integration/targets/nomad/tasks/main.yml index 1e42e7b2f6..81833684f0 100644 --- a/tests/integration/targets/nomad/tasks/main.yml +++ b/tests/integration/targets/nomad/tasks/main.yml @@ -6,7 +6,7 @@ vars: nomad_version: 0.12.4 nomad_uri: https://releases.hashicorp.com/nomad/{{ nomad_version }}/nomad_{{ nomad_version }}_{{ ansible_system | lower }}_{{ nomad_arch }}.zip - nomad_cmd: '{{ output_dir }}/nomad' + nomad_cmd: '{{ remote_tmp_dir }}/nomad' block: - name: register pyOpenSSL version @@ -36,21 +36,21 @@ block: - name: Generate privatekey community.crypto.openssl_privatekey: - path: '{{ output_dir }}/privatekey.pem' + path: '{{ remote_tmp_dir }}/privatekey.pem' - name: Generate CSR community.crypto.openssl_csr: - path: '{{ output_dir }}/csr.csr' - privatekey_path: '{{ output_dir }}/privatekey.pem' + path: '{{ remote_tmp_dir }}/csr.csr' + privatekey_path: '{{ remote_tmp_dir }}/privatekey.pem' subject: commonName: localhost - name: Generate selfsigned certificate register: selfsigned_certificate community.crypto.openssl_certificate: - path: '{{ output_dir }}/cert.pem' - csr_path: '{{ output_dir }}/csr.csr' - privatekey_path: '{{ output_dir }}/privatekey.pem' + path: '{{ remote_tmp_dir }}/cert.pem' + csr_path: '{{ remote_tmp_dir }}/csr.csr' + privatekey_path: '{{ remote_tmp_dir }}/privatekey.pem' provider: selfsigned selfsigned_digest: sha256 @@ -75,17 +75,17 @@ - name: Download nomad binary unarchive: src: '{{ nomad_uri }}' - dest: '{{ output_dir }}' + dest: '{{ remote_tmp_dir }}' remote_src: true register: result until: result is success - vars: - remote_dir: '{{ echo_output_dir.stdout }}' + remote_dir: '{{ echo_remote_tmp_dir.stdout }}' block: - - command: echo {{ output_dir }} - register: echo_output_dir + - command: echo {{ remote_tmp_dir }} + register: echo_remote_tmp_dir - name: Run tests integration block: diff --git a/tests/integration/targets/npm/meta/main.yml b/tests/integration/targets/npm/meta/main.yml index 392c359035..230548b160 100644 --- a/tests/integration/targets/npm/meta/main.yml +++ b/tests/integration/targets/npm/meta/main.yml @@ -1,3 +1,4 @@ dependencies: - setup_pkg_mgr - setup_gnutar + - setup_remote_tmp_dir diff --git a/tests/integration/targets/npm/tasks/main.yml b/tests/integration/targets/npm/tasks/main.yml index ed5a16a624..c3971fd91d 100644 --- a/tests/integration/targets/npm/tasks/main.yml +++ b/tests/integration/targets/npm/tasks/main.yml @@ -25,7 +25,7 @@ # Setup steps # expand remote path -- command: 'echo {{ output_dir }}' +- command: 'echo {{ remote_tmp_dir }}' register: echo - set_fact: remote_dir: '{{ echo.stdout }}' diff --git a/tests/integration/targets/npm/tasks/setup.yml b/tests/integration/targets/npm/tasks/setup.yml index 4e0d908e33..a463b1f8b7 100644 --- a/tests/integration/targets/npm/tasks/setup.yml +++ b/tests/integration/targets/npm/tasks/setup.yml @@ -1,6 +1,6 @@ - name: 'Download NPM' unarchive: src: 'https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/npm/{{ nodejs_path }}.tar.gz' - dest: '{{ output_dir }}' + dest: '{{ remote_tmp_dir }}' remote_src: yes - creates: '{{ output_dir }}/{{ nodejs_path }}.tar.gz' + creates: '{{ remote_tmp_dir }}/{{ nodejs_path }}.tar.gz' diff --git a/tests/integration/targets/pids/meta/main.yml b/tests/integration/targets/pids/meta/main.yml new file mode 100644 index 0000000000..1810d4bec9 --- /dev/null +++ b/tests/integration/targets/pids/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_remote_tmp_dir diff --git a/tests/integration/targets/pids/tasks/main.yml b/tests/integration/targets/pids/tasks/main.yml index b56093cf0c..823d588561 100644 --- a/tests/integration/targets/pids/tasks/main.yml +++ b/tests/integration/targets/pids/tasks/main.yml @@ -31,13 +31,21 @@ register: find_sleep - name: "Copying 'sleep' binary" + command: cp {{ find_sleep.stdout }} {{ remote_tmp_dir }}/{{ random_name }} + # The following does not work on macOS 11.1 (it uses shutil.copystat, and that will die with a PermissionError): + # copy: + # src: "{{ find_sleep.stdout }}" + # dest: "{{ remote_tmp_dir }}/{{ random_name }}" + # mode: "0777" + # remote_src: true + +- name: Copy helper script copy: - src: "{{ find_sleep.stdout }}" - dest: "{{ output_dir }}/{{ random_name }}" - mode: "0777" + src: obtainpid.sh + dest: "{{ remote_tmp_dir }}/obtainpid.sh" - name: "Running the copy of 'sleep' binary" - command: "sh {{ role_path }}/files/obtainpid.sh '{{ output_dir }}/{{ random_name }}' '{{ output_dir }}/obtainpid.txt'" + command: "sh {{ remote_tmp_dir }}/obtainpid.sh '{{ remote_tmp_dir }}/{{ random_name }}' '{{ remote_tmp_dir }}/obtainpid.txt'" async: 100 poll: 0 @@ -74,7 +82,7 @@ - name: "Reading pid from the file" slurp: - src: "{{ output_dir }}/obtainpid.txt" + src: "{{ remote_tmp_dir }}/obtainpid.txt" register: newpid - name: "Verify that the Process IDs (PIDs) returned is not empty and also equal to the PIDs obtained in console" diff --git a/tests/integration/targets/setup_openssl/tasks/main.yml b/tests/integration/targets/setup_openssl/tasks/main.yml index 62df7dd5f6..27d485a83f 100644 --- a/tests/integration/targets/setup_openssl/tasks/main.yml +++ b/tests/integration/targets/setup_openssl/tasks/main.yml @@ -33,6 +33,27 @@ extra_args: "-c {{ remote_constraints }}" when: ansible_os_family == 'Darwin' +- when: ansible_facts.distribution ~ ansible_facts.distribution_major_version not in ['CentOS6', 'RedHat6'] + block: + - name: Install cryptography (Python 3) + become: true + package: + name: '{{ cryptography_package_name_python3 }}' + when: not ansible_os_family == 'Darwin' and ansible_python_version is version('3.0', '>=') + + - name: Install cryptography (Python 2) + become: true + package: + name: '{{ cryptography_package_name }}' + when: not ansible_os_family == 'Darwin' and ansible_python_version is version('3.0', '<') + + - name: Install cryptography (Darwin) + become: true + pip: + name: cryptography>=3.3 + extra_args: "-c {{ remote_constraints }}" + when: ansible_os_family == 'Darwin' + - name: register pyOpenSSL version command: "{{ ansible_python.executable }} -c 'import OpenSSL; print(OpenSSL.__version__)'" register: pyopenssl_version diff --git a/tests/integration/targets/setup_openssl/vars/Debian.yml b/tests/integration/targets/setup_openssl/vars/Debian.yml index 755c7a083c..7254d00a5f 100644 --- a/tests/integration/targets/setup_openssl/vars/Debian.yml +++ b/tests/integration/targets/setup_openssl/vars/Debian.yml @@ -1,3 +1,5 @@ +cryptography_package_name: python-cryptography +cryptography_package_name_python3: python3-cryptography pyopenssl_package_name: python-openssl pyopenssl_package_name_python3: python3-openssl openssl_package_name: openssl diff --git a/tests/integration/targets/setup_openssl/vars/FreeBSD.yml b/tests/integration/targets/setup_openssl/vars/FreeBSD.yml index 4fef270602..c34b3646f4 100644 --- a/tests/integration/targets/setup_openssl/vars/FreeBSD.yml +++ b/tests/integration/targets/setup_openssl/vars/FreeBSD.yml @@ -1,3 +1,5 @@ +cryptography_package_name: py27-cryptography +cryptography_package_name_python3: "py{{ ansible_python.version.major }}{{ ansible_python.version.minor }}-cryptography" pyopenssl_package_name: py27-openssl pyopenssl_package_name_python3: "py{{ ansible_python.version.major }}{{ ansible_python.version.minor }}-openssl" openssl_package_name: openssl diff --git a/tests/integration/targets/setup_openssl/vars/RedHat.yml b/tests/integration/targets/setup_openssl/vars/RedHat.yml index 2959932cd7..5e077d732f 100644 --- a/tests/integration/targets/setup_openssl/vars/RedHat.yml +++ b/tests/integration/targets/setup_openssl/vars/RedHat.yml @@ -1,3 +1,5 @@ +cryptography_package_name: python-cryptography +cryptography_package_name_python3: python3-cryptography pyopenssl_package_name: pyOpenSSL pyopenssl_package_name_python3: python3-pyOpenSSL openssl_package_name: openssl diff --git a/tests/integration/targets/setup_openssl/vars/Suse.yml b/tests/integration/targets/setup_openssl/vars/Suse.yml index 2d5200f341..ec2c556bee 100644 --- a/tests/integration/targets/setup_openssl/vars/Suse.yml +++ b/tests/integration/targets/setup_openssl/vars/Suse.yml @@ -1,3 +1,5 @@ +cryptography_package_name: python-cryptography +cryptography_package_name_python3: python3-cryptography pyopenssl_package_name: python-pyOpenSSL pyopenssl_package_name_python3: python3-pyOpenSSL openssl_package_name: openssl diff --git a/tests/integration/targets/ssh_config/meta/main.yml b/tests/integration/targets/ssh_config/meta/main.yml index 91a63627f6..4c6838dbe1 100644 --- a/tests/integration/targets/ssh_config/meta/main.yml +++ b/tests/integration/targets/ssh_config/meta/main.yml @@ -1,2 +1,3 @@ dependencies: - setup_remote_constraints + - setup_remote_tmp_dir diff --git a/tests/integration/targets/ssh_config/tasks/main.yml b/tests/integration/targets/ssh_config/tasks/main.yml index bd5acc9e04..74a6f02fd2 100644 --- a/tests/integration/targets/ssh_config/tasks/main.yml +++ b/tests/integration/targets/ssh_config/tasks/main.yml @@ -9,15 +9,15 @@ extra_args: "-c {{ remote_constraints }}" - set_fact: - output_dir_test: '{{ output_dir }}/test_ssh_config' + output_test_dir: '{{ remote_tmp_dir }}/test_ssh_config' - set_fact: - ssh_config_test: '{{ output_dir_test }}/ssh_config_test' - ssh_private_key: '{{ output_dir_test }}/fake_id_rsa' + ssh_config_test: '{{ output_test_dir }}/ssh_config_test' + ssh_private_key: '{{ output_test_dir }}/fake_id_rsa' - name: create a temporary directory file: - path: "{{ output_dir_test }}" + path: "{{ output_test_dir }}" state: directory - name: Copy sample config file diff --git a/tests/integration/targets/supervisorctl/meta/main.yml b/tests/integration/targets/supervisorctl/meta/main.yml index 5438ced5c3..56bc554611 100644 --- a/tests/integration/targets/supervisorctl/meta/main.yml +++ b/tests/integration/targets/supervisorctl/meta/main.yml @@ -1,2 +1,3 @@ dependencies: - setup_pkg_mgr + - setup_remote_tmp_dir diff --git a/tests/integration/targets/supervisorctl/tasks/main.yml b/tests/integration/targets/supervisorctl/tasks/main.yml index 2a7ecdcfc0..0c3dd31b76 100644 --- a/tests/integration/targets/supervisorctl/tasks/main.yml +++ b/tests/integration/targets/supervisorctl/tasks/main.yml @@ -9,7 +9,7 @@ suffix: supervisorctl-tests register: supervisord_sock_path - - command: 'echo {{ output_dir }}' + - command: 'echo {{ remote_tmp_dir }}' register: echo - set_fact: remote_dir: '{{ echo.stdout }}' diff --git a/tests/integration/targets/synchronize-buildah/aliases b/tests/integration/targets/synchronize-buildah/aliases deleted file mode 100644 index 30b10b7ccb..0000000000 --- a/tests/integration/targets/synchronize-buildah/aliases +++ /dev/null @@ -1,3 +0,0 @@ -non_local -needs/root -unsupported diff --git a/tests/integration/targets/synchronize-buildah/inventory b/tests/integration/targets/synchronize-buildah/inventory deleted file mode 100644 index 2eeaf31350..0000000000 --- a/tests/integration/targets/synchronize-buildah/inventory +++ /dev/null @@ -1 +0,0 @@ -buildah-container ansible_host=buildah-container ansible_connection=buildah diff --git a/tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/files/normal_file.txt b/tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/files/normal_file.txt deleted file mode 100644 index 33257a92c0..0000000000 --- a/tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/files/normal_file.txt +++ /dev/null @@ -1 +0,0 @@ -abnormal content diff --git a/tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/tasks/main.yml b/tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/tasks/main.yml deleted file mode 100644 index a80e218921..0000000000 --- a/tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/tasks/main.yml +++ /dev/null @@ -1,71 +0,0 @@ -#################################################################### -# WARNING: These are designed specifically for Ansible tests # -# and should not be used as examples of how to write Ansible roles # -#################################################################### - -# test code for the synchronize module -# (c) 2014, James Tanner - -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -- name: cleanup old files - file: - path: '{{ output_dir }}' - state: absent - -- name: ensure the target directory exists - file: - path: '{{ output_dir }}' - state: directory - -- name: synchronize file to new filename - synchronize: - src: normal_file.txt - dest: '{{ output_dir }}/remote_file.txt' - register: sync_result - -- assert: - that: - - "'changed' in sync_result" - - sync_result is changed - - "'cmd' in sync_result" - - "'rsync' in sync_result.cmd" - - "'msg' in sync_result" - - "sync_result.msg.startswith('/dev/null 2>/dev/null - -set -e - -buildah from --name $CONTAINER_NAME docker.io/library/centos:7 -trap '{ buildah rm $CONTAINER_NAME; }' EXIT -buildah run $CONTAINER_NAME -- yum install -y rsync - -ansible-playbook test_synchronize_buildah.yml -c buildah -i inventory -vv diff --git a/tests/integration/targets/synchronize-buildah/test_synchronize_buildah.yml b/tests/integration/targets/synchronize-buildah/test_synchronize_buildah.yml deleted file mode 100644 index e1cc96657e..0000000000 --- a/tests/integration/targets/synchronize-buildah/test_synchronize_buildah.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- hosts: buildah-container - connection: buildah - gather_facts: no - vars: - output_dir: /tmp/ansible_test_synchronize_buildah - roles: - - test_buildah_synchronize diff --git a/tests/integration/targets/xattr/defaults/main.yml b/tests/integration/targets/xattr/defaults/main.yml index af18fb8474..c208bf6fb9 100644 --- a/tests/integration/targets/xattr/defaults/main.yml +++ b/tests/integration/targets/xattr/defaults/main.yml @@ -1 +1 @@ -test_file: "{{ output_dir }}/foo.txt" +test_file: "{{ remote_tmp_dir }}/foo.txt" diff --git a/tests/integration/targets/xattr/meta/main.yml b/tests/integration/targets/xattr/meta/main.yml index 5438ced5c3..56bc554611 100644 --- a/tests/integration/targets/xattr/meta/main.yml +++ b/tests/integration/targets/xattr/meta/main.yml @@ -1,2 +1,3 @@ dependencies: - setup_pkg_mgr + - setup_remote_tmp_dir diff --git a/tests/integration/targets/yarn/meta/main.yml b/tests/integration/targets/yarn/meta/main.yml index 392c359035..230548b160 100644 --- a/tests/integration/targets/yarn/meta/main.yml +++ b/tests/integration/targets/yarn/meta/main.yml @@ -1,3 +1,4 @@ dependencies: - setup_pkg_mgr - setup_gnutar + - setup_remote_tmp_dir diff --git a/tests/integration/targets/yarn/tasks/run.yml b/tests/integration/targets/yarn/tasks/run.yml index bd17d7ffeb..906880797f 100644 --- a/tests/integration/targets/yarn/tasks/run.yml +++ b/tests/integration/targets/yarn/tasks/run.yml @@ -6,31 +6,31 @@ - name: 'Download Nodejs' unarchive: src: 'https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/yarn/{{ nodejs_path }}.tar.gz' - dest: '{{ output_dir }}' + dest: '{{ remote_tmp_dir }}' remote_src: yes - creates: '{{ output_dir }}/{{ nodejs_path }}.tar.gz' + creates: '{{ remote_tmp_dir }}/{{ nodejs_path }}.tar.gz' - name: 'Download Yarn' unarchive: src: 'https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/yarn/yarn-v{{yarn_version}}.tar.gz' - dest: '{{ output_dir }}' + dest: '{{ remote_tmp_dir }}' remote_src: yes - creates: '{{ output_dir }}/yarn-v{{yarn_version}}_pkg.tar.gz' + creates: '{{ remote_tmp_dir }}/yarn-v{{yarn_version}}_pkg.tar.gz' - name: 'Copy node to directory created earlier' - command: "mv {{ output_dir }}/{{ nodejs_path }} /usr/local/lib/nodejs/{{nodejs_path}}" + command: "mv {{ remote_tmp_dir }}/{{ nodejs_path }} /usr/local/lib/nodejs/{{nodejs_path}}" # Clean up before running tests - name: Remove any previous Nodejs modules file: - path: '{{output_dir}}/node_modules' + path: '{{remote_tmp_dir}}/node_modules' state: absent # Set vars for our test harness - vars: #node_bin_path: "/usr/local/lib/nodejs/node-v{{nodejs_version}}/bin" node_bin_path: "/usr/local/lib/nodejs/{{ nodejs_path }}/bin" - yarn_bin_path: "{{ output_dir }}/yarn-v{{ yarn_version }}/bin" + yarn_bin_path: "{{ remote_tmp_dir }}/yarn-v{{ yarn_version }}/bin" package: 'iconv-lite' environment: PATH: "{{ node_bin_path }}:{{ansible_env.PATH}}" @@ -45,11 +45,11 @@ - name: 'Create dummy package.json' copy: src: templates/package.j2 - dest: '{{ output_dir }}/package.json' + dest: '{{ remote_tmp_dir }}/package.json' - name: 'Install all packages.' yarn: - path: '{{ output_dir }}' + path: '{{ remote_tmp_dir }}' executable: '{{ yarn_bin_path }}/yarn' state: present environment: @@ -57,7 +57,7 @@ - name: 'Install the same package from package.json again.' yarn: - path: '{{ output_dir }}' + path: '{{ remote_tmp_dir }}' executable: '{{ yarn_bin_path }}/yarn' name: '{{ package }}' state: present @@ -71,7 +71,7 @@ - name: 'Install all packages in check mode.' yarn: - path: '{{ output_dir }}' + path: '{{ remote_tmp_dir }}' executable: '{{ yarn_bin_path }}/yarn' state: present environment: @@ -89,7 +89,7 @@ - name: 'Install package with explicit version (older version of package)' yarn: - path: '{{ output_dir }}' + path: '{{ remote_tmp_dir }}' executable: '{{ yarn_bin_path }}/yarn' name: left-pad version: 1.1.0 @@ -104,7 +104,7 @@ - name: 'Upgrade old package' yarn: - path: '{{ output_dir }}' + path: '{{ remote_tmp_dir }}' executable: '{{ yarn_bin_path }}/yarn' name: left-pad state: latest @@ -118,7 +118,7 @@ - name: 'Remove a package' yarn: - path: '{{ output_dir }}' + path: '{{ remote_tmp_dir }}' executable: '{{ yarn_bin_path }}/yarn' name: '{{ package }}' state: absent diff --git a/tests/integration/targets/zypper/meta/main.yml b/tests/integration/targets/zypper/meta/main.yml new file mode 100644 index 0000000000..1810d4bec9 --- /dev/null +++ b/tests/integration/targets/zypper/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_remote_tmp_dir diff --git a/tests/integration/targets/zypper/tasks/zypper.yml b/tests/integration/targets/zypper/tasks/zypper.yml index eed27ca3b2..30c4f991bc 100644 --- a/tests/integration/targets/zypper/tasks/zypper.yml +++ b/tests/integration/targets/zypper/tasks/zypper.yml @@ -150,17 +150,17 @@ # INSTALL broken local package - name: create directory file: - path: "{{output_dir | expanduser}}/zypper1" + path: "{{remote_tmp_dir | expanduser}}/zypper1" state: directory - name: fake rpm package file: - path: "{{output_dir | expanduser}}/zypper1/broken.rpm" + path: "{{remote_tmp_dir | expanduser}}/zypper1/broken.rpm" state: touch - name: install broken rpm zypper: - name: "{{output_dir | expanduser}}/zypper1/broken.rpm" + name: "{{remote_tmp_dir | expanduser}}/zypper1/broken.rpm" state: present register: zypper_result ignore_errors: yes @@ -191,29 +191,29 @@ - name: create directory file: - path: "{{output_dir | expanduser}}/zypper2" + path: "{{remote_tmp_dir | expanduser}}/zypper2" state: directory - name: copy spec file copy: src: empty.spec - dest: "{{ output_dir | expanduser }}/zypper2/empty.spec" + dest: "{{ remote_tmp_dir | expanduser }}/zypper2/empty.spec" - name: build rpm command: | rpmbuild -bb \ - --define "_topdir {{output_dir | expanduser }}/zypper2/rpm-build" + --define "_topdir {{remote_tmp_dir | expanduser }}/zypper2/rpm-build" --define "_builddir %{_topdir}" \ --define "_rpmdir %{_topdir}" \ --define "_srcrpmdir %{_topdir}" \ - --define "_specdir {{output_dir | expanduser}}/zypper2" \ + --define "_specdir {{remote_tmp_dir | expanduser}}/zypper2" \ --define "_sourcedir %{_topdir}" \ - {{ output_dir }}/zypper2/empty.spec + {{ remote_tmp_dir }}/zypper2/empty.spec register: rpm_build_result - name: install empty rpm zypper: - name: "{{ output_dir | expanduser }}/zypper2/rpm-build/noarch/empty-1-0.noarch.rpm" + name: "{{ remote_tmp_dir | expanduser }}/zypper2/rpm-build/noarch/empty-1-0.noarch.rpm" disable_gpg_check: yes register: zypper_result @@ -236,13 +236,13 @@ - name: extract from rpm zypper: - name: "{{ output_dir | expanduser }}/zypper2/rpm-build/noarch/empty-1-0.noarch.rpm" + name: "{{ remote_tmp_dir | expanduser }}/zypper2/rpm-build/noarch/empty-1-0.noarch.rpm" state: installed disable_gpg_check: yes - extra_args_precommand: --root {{ output_dir | expanduser }}/testdir/ + extra_args_precommand: --root {{ remote_tmp_dir | expanduser }}/testdir/ - name: check that dir var is exist - stat: path={{ output_dir | expanduser }}/testdir/var + stat: path={{ remote_tmp_dir | expanduser }}/testdir/var register: stat_result - name: check that we extract rpm package in testdir folder and folder var is exist @@ -458,25 +458,25 @@ - name: Deploy spec files to build 2 packages with duplicate files. template: src: duplicate.spec.j2 - dest: "{{ output_dir | expanduser }}/zypper2/duplicate{{ item }}.spec" + dest: "{{ remote_tmp_dir | expanduser }}/zypper2/duplicate{{ item }}.spec" loop: "{{ looplist }}" - name: build rpms with duplicate files command: | rpmbuild -bb \ - --define "_topdir {{output_dir | expanduser }}/zypper2/rpm-build" + --define "_topdir {{remote_tmp_dir | expanduser }}/zypper2/rpm-build" --define "_builddir %{_topdir}" \ --define "_rpmdir %{_topdir}" \ --define "_srcrpmdir %{_topdir}" \ - --define "_specdir {{output_dir | expanduser}}/zypper2" \ + --define "_specdir {{remote_tmp_dir | expanduser}}/zypper2" \ --define "_sourcedir %{_topdir}" \ - {{ output_dir | expanduser }}/zypper2/duplicate{{ item }}.spec + {{ remote_tmp_dir | expanduser }}/zypper2/duplicate{{ item }}.spec loop: "{{ looplist }}" - name: install duplicate rpms zypper: name: >- - {{ output_dir | expanduser }}/zypper2/rpm-build/noarch/duplicate{{ item }}-1-0.noarch.rpm + {{ remote_tmp_dir | expanduser }}/zypper2/rpm-build/noarch/duplicate{{ item }}-1-0.noarch.rpm disable_gpg_check: true ignore_errors: true register: zypper_duplicate_result @@ -499,7 +499,7 @@ - name: install duplicate rpms zypper: name: >- - {{ output_dir | expanduser }}/zypper2/rpm-build/noarch/duplicate{{ item }}-1-0.noarch.rpm + {{ remote_tmp_dir | expanduser }}/zypper2/rpm-build/noarch/duplicate{{ item }}-1-0.noarch.rpm disable_gpg_check: true replacefiles: true ignore_errors: true From 05fe587a3ea2643bacd4dea07112d18464eae6bf Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Thu, 9 Sep 2021 07:58:21 -0400 Subject: [PATCH 0316/2828] Adding new maintainer (#3349) --- .github/BOTMETA.yml | 2 +- commit-rights.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 5b55449a67..3dfca22e73 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1,7 +1,7 @@ automerge: true files: plugins/: - supershipit: quidame Ajpantuso + supershipit: quidame changelogs/: {} changelogs/fragments/: support: community diff --git a/commit-rights.md b/commit-rights.md index 58743e5048..43836350c5 100644 --- a/commit-rights.md +++ b/commit-rights.md @@ -69,5 +69,6 @@ Individuals who have been asked to become a part of this group have generally be | ------------------- | -------------------- | ------------------ | -------------------- | | Alexei Znamensky | russoz | russoz | | | Andrew Klychkov | andersson007 | andersson007_ | | +| Andrew Pantuso | Ajpantuso | ajpantuso | | | Felix Fontein | felixfontein | felixfontein | | | John R Barker | gundalow | gundalow | | From 58d018ebbd60e2cebcd8050cd61b0a80a6e4b1a1 Mon Sep 17 00:00:00 2001 From: mkschuel <77283980+mkschuel@users.noreply.github.com> Date: Fri, 10 Sep 2021 23:38:26 +0200 Subject: [PATCH 0317/2828] Adds few more gitlab group options (#3248) * Adds few more gitlab group options * Update plugins/modules/source_control/gitlab/gitlab_group.py Co-authored-by: Felix Fontein * Removes default for new string options * Removes default from argument_spec * Adds changelog fragment * Update plugins/modules/source_control/gitlab/gitlab_group.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_group.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_group.py Co-authored-by: Felix Fontein Co-authored-by: Maik Schueller Co-authored-by: Felix Fontein --- ...248-adds-few-more-gitlab-group-options.yml | 2 + .../source_control/gitlab/gitlab_group.py | 53 +++++++++++++++++-- .../targets/gitlab_group/tasks/main.yml | 25 +++++++++ .../modules/source_control/gitlab/gitlab.py | 7 ++- .../gitlab/test_gitlab_group.py | 17 ++++-- 5 files changed, 96 insertions(+), 8 deletions(-) create mode 100644 changelogs/fragments/3248-adds-few-more-gitlab-group-options.yml diff --git a/changelogs/fragments/3248-adds-few-more-gitlab-group-options.yml b/changelogs/fragments/3248-adds-few-more-gitlab-group-options.yml new file mode 100644 index 0000000000..f565fea565 --- /dev/null +++ b/changelogs/fragments/3248-adds-few-more-gitlab-group-options.yml @@ -0,0 +1,2 @@ +minor_changes: + - gitlab_group - add new options ``project_creation_level``, ``auto_devops_enabled``, ``subgroup_creation_level`` (https://github.com/ansible-collections/community.general/pull/3248). diff --git a/plugins/modules/source_control/gitlab/gitlab_group.py b/plugins/modules/source_control/gitlab/gitlab_group.py index 42e1801a81..cdf0f41b65 100644 --- a/plugins/modules/source_control/gitlab/gitlab_group.py +++ b/plugins/modules/source_control/gitlab/gitlab_group.py @@ -61,6 +61,23 @@ options: choices: ["private", "internal", "public"] default: private type: str + project_creation_level: + description: + - Determine if developers can create projects in the group. + choices: ["developer", "maintainer", "noone"] + type: str + version_added: 3.7.0 + auto_devops_enabled: + description: + - Default to Auto DevOps pipeline for all projects within this group. + type: bool + version_added: 3.7.0 + subgroup_creation_level: + description: + - Allowed to create subgroups. + choices: ["maintainer", "owner"] + type: str + version_added: 3.7.0 ''' EXAMPLES = ''' @@ -93,6 +110,20 @@ EXAMPLES = ''' path: my_first_group state: present parent: "super_parent/parent" + +# Other group which only allows sub-groups - no projects +- name: "Create GitLab Group for SubGroups only" + community.general.gitlab_group: + api_url: https://gitlab.example.com/ + validate_certs: True + api_username: dj-wasabi + api_password: "MySecretPassword" + name: my_main_group + path: my_main_group + state: present + project_creation_level: noone + auto_devops_enabled: false + subgroup_creation_level: maintainer ''' RETURN = ''' @@ -166,7 +197,10 @@ class GitLabGroup(object): 'name': name, 'path': options['path'], 'parent_id': parent_id, - 'visibility': options['visibility'] + 'visibility': options['visibility'], + 'project_creation_level': options['project_creation_level'], + 'auto_devops_enabled': options['auto_devops_enabled'], + 'subgroup_creation_level': options['subgroup_creation_level'], } if options.get('description'): payload['description'] = options['description'] @@ -176,7 +210,11 @@ class GitLabGroup(object): changed, group = self.updateGroup(self.groupObject, { 'name': name, 'description': options['description'], - 'visibility': options['visibility']}) + 'visibility': options['visibility'], + 'project_creation_level': options['project_creation_level'], + 'auto_devops_enabled': options['auto_devops_enabled'], + 'subgroup_creation_level': options['subgroup_creation_level'], + }) self.groupObject = group if changed: @@ -258,6 +296,9 @@ def main(): state=dict(type='str', default="present", choices=["absent", "present"]), parent=dict(type='str'), visibility=dict(type='str', default="private", choices=["internal", "private", "public"]), + project_creation_level=dict(type='str', choices=['developer', 'maintainer', 'noone']), + auto_devops_enabled=dict(type='bool'), + subgroup_creation_level=dict(type='str', choices=['maintainer', 'owner']), )) module = AnsibleModule( @@ -281,6 +322,9 @@ def main(): state = module.params['state'] parent_identifier = module.params['parent'] group_visibility = module.params['visibility'] + project_creation_level = module.params['project_creation_level'] + auto_devops_enabled = module.params['auto_devops_enabled'] + subgroup_creation_level = module.params['subgroup_creation_level'] if not HAS_GITLAB_PACKAGE: module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) @@ -314,7 +358,10 @@ def main(): if gitlab_group.createOrUpdateGroup(group_name, parent_group, { "path": group_path, "description": description, - "visibility": group_visibility}): + "visibility": group_visibility, + "project_creation_level": project_creation_level, + "auto_devops_enabled": auto_devops_enabled, + "subgroup_creation_level": subgroup_creation_level}): module.exit_json(changed=True, msg="Successfully created or updated the group %s" % group_name, group=gitlab_group.groupObject._attrs) else: module.exit_json(changed=False, msg="No need to update the group %s" % group_name, group=gitlab_group.groupObject._attrs) diff --git a/tests/integration/targets/gitlab_group/tasks/main.yml b/tests/integration/targets/gitlab_group/tasks/main.yml index 34444134c2..fbf8de29a0 100644 --- a/tests/integration/targets/gitlab_group/tasks/main.yml +++ b/tests/integration/targets/gitlab_group/tasks/main.yml @@ -72,3 +72,28 @@ assert: that: - gitlab_group_state_desc.group.description == "My Test Group" + +- name: Cleanup GitLab Group for project_creation_level Test + gitlab_group: + api_url: "{{ gitlab_host }}" + validate_certs: false + api_token: "{{ gitlab_login_token }}" + name: ansible_test_group + path: ansible_test_group + state: absent + +- name: Create GitLab Group for project_creation_level Test + gitlab_group: + api_url: "{{ gitlab_host }}" + validate_certs: false + api_token: "{{ gitlab_login_token }}" + name: ansible_test_group + path: ansible_test_group + project_creation_level: noone + state: present + register: gitlab_group_state_pcl + +- name: Test group created with project_creation_level + assert: + that: + - gitlab_group_state_pcl.group.project_creation_level == "noone" diff --git a/tests/unit/plugins/modules/source_control/gitlab/gitlab.py b/tests/unit/plugins/modules/source_control/gitlab/gitlab.py index 5feff78b43..cca9ab5ae6 100644 --- a/tests/unit/plugins/modules/source_control/gitlab/gitlab.py +++ b/tests/unit/plugins/modules/source_control/gitlab/gitlab.py @@ -194,6 +194,7 @@ def resp_get_group(url, request): '"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/1/foo.jpg",' '"web_url": "http://localhost:3000/groups/foo-bar", "request_access_enabled": false,' '"full_name": "Foobar Group", "full_path": "foo-bar",' + '"project_creation_level": "maintainer", "subgroup_creation_level": "maintainer",' '"file_template_project_id": 1, "parent_id": null, "projects": [{"id": 1,"description": null, "default_branch": "master",' '"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",' '"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",' @@ -225,7 +226,8 @@ def resp_create_group(url, request): '"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/1/foo.jpg",' '"web_url": "http://localhost:3000/groups/foo-bar", "request_access_enabled": false,' '"full_name": "Foobar Group", "full_path": "foo-bar",' - '"file_template_project_id": 1, "parent_id": null}') + '"file_template_project_id": 1, "parent_id": null,' + '"project_creation_level": "developer", "subgroup_creation_level": "maintainer"}') content = content.encode("utf-8") return response(200, content, headers, None, 5, request) @@ -238,7 +240,8 @@ def resp_create_subgroup(url, request): '"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/2/bar.jpg",' '"web_url": "http://localhost:3000/groups/foo-bar/bar-foo", "request_access_enabled": false,' '"full_name": "BarFoo Group", "full_path": "foo-bar/bar-foo",' - '"file_template_project_id": 1, "parent_id": 1}') + '"file_template_project_id": 1, "parent_id": 1,' + '"project_creation_level": "noone"}') content = content.encode("utf-8") return response(200, content, headers, None, 5, request) diff --git a/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_group.py b/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_group.py index abf49860f9..0b05f8a7ff 100644 --- a/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_group.py +++ b/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_group.py @@ -68,32 +68,43 @@ class TestGitlabGroup(GitlabModuleTestCase): def test_create_group(self): group = self.moduleUtil.createGroup({'name': "Foobar Group", 'path': "foo-bar", - 'description': "An interesting group"}) + 'description': "An interesting group", + 'project_creation_level': "developer", + 'subgroup_creation_level': "maintainer"}) self.assertEqual(type(group), Group) self.assertEqual(group.name, "Foobar Group") self.assertEqual(group.path, "foo-bar") self.assertEqual(group.description, "An interesting group") + self.assertEqual(group.project_creation_level, "developer") + self.assertEqual(group.subgroup_creation_level, "maintainer") self.assertEqual(group.id, 1) @with_httmock(resp_create_subgroup) def test_create_subgroup(self): - group = self.moduleUtil.createGroup({'name': "BarFoo Group", 'path': "bar-foo", "parent_id": 1}) + group = self.moduleUtil.createGroup({'name': "BarFoo Group", + 'path': "bar-foo", + 'parent_id': 1, + 'project_creation_level': "noone"}) self.assertEqual(type(group), Group) self.assertEqual(group.name, "BarFoo Group") self.assertEqual(group.full_path, "foo-bar/bar-foo") + self.assertEqual(group.project_creation_level, "noone") self.assertEqual(group.id, 2) self.assertEqual(group.parent_id, 1) @with_httmock(resp_get_group) def test_update_group(self): group = self.gitlab_instance.groups.get(1) - changed, newGroup = self.moduleUtil.updateGroup(group, {'name': "BarFoo Group", "visibility": "private"}) + changed, newGroup = self.moduleUtil.updateGroup(group, {'name': "BarFoo Group", + 'visibility': "private", + 'project_creation_level': "maintainer"}) self.assertEqual(changed, True) self.assertEqual(newGroup.name, "BarFoo Group") self.assertEqual(newGroup.visibility, "private") + self.assertEqual(newGroup.project_creation_level, "maintainer") changed, newGroup = self.moduleUtil.updateGroup(group, {'name': "BarFoo Group"}) From 612543919e13e5cf64da92c3342ce7cc7a7453e1 Mon Sep 17 00:00:00 2001 From: Roy Lenferink Date: Sun, 12 Sep 2021 13:46:17 +0200 Subject: [PATCH 0318/2828] Add ipaselinuxusermaporder option to the ipa_config module (#3178) --- ...linuxusermaporder-to-ipa-config-module.yml | 3 +++ plugins/modules/identity/ipa/ipa_config.py | 27 +++++++++++++++++-- 2 files changed, 28 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/3178-add-ipaselinuxusermaporder-to-ipa-config-module.yml diff --git a/changelogs/fragments/3178-add-ipaselinuxusermaporder-to-ipa-config-module.yml b/changelogs/fragments/3178-add-ipaselinuxusermaporder-to-ipa-config-module.yml new file mode 100644 index 0000000000..9057be911c --- /dev/null +++ b/changelogs/fragments/3178-add-ipaselinuxusermaporder-to-ipa-config-module.yml @@ -0,0 +1,3 @@ +minor_changes: + - ipa_config - add ``ipaselinuxusermaporder`` option to set the SELinux user map order + (https://github.com/ansible-collections/community.general/pull/3178). diff --git a/plugins/modules/identity/ipa/ipa_config.py b/plugins/modules/identity/ipa/ipa_config.py index e8ee073d6e..2b41dfb098 100644 --- a/plugins/modules/identity/ipa/ipa_config.py +++ b/plugins/modules/identity/ipa/ipa_config.py @@ -72,6 +72,12 @@ options: aliases: ["searchtimelimit"] type: int version_added: '2.5.0' + ipaselinuxusermaporder: + description: The SELinux user map order (order in increasing priority of SELinux users). + aliases: ["selinuxusermaporder"] + type: list + elements: str + version_added: '3.7.0' ipauserauthtype: description: The authentication type to use by default. aliases: ["userauthtype"] @@ -181,6 +187,18 @@ EXAMPLES = r''' ipa_host: localhost ipa_user: admin ipa_pass: supersecret + +- name: Ensure the SELinux user map order is set + community.general.ipa_config: + ipaselinuxusermaporder: + - "guest_u:s0" + - "xguest_u:s0" + - "user_u:s0" + - "staff_u:s0-s0:c0.c1023" + - "unconfined_u:s0-s0:c0.c1023" + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret ''' RETURN = r''' @@ -213,8 +231,8 @@ def get_config_dict(ipaconfigstring=None, ipadefaultloginshell=None, ipagroupsearchfields=None, ipahomesrootdir=None, ipakrbauthzdata=None, ipamaxusernamelength=None, ipapwdexpadvnotify=None, ipasearchrecordslimit=None, - ipasearchtimelimit=None, ipauserauthtype=None, - ipausersearchfields=None): + ipasearchtimelimit=None, ipaselinuxusermaporder=None, + ipauserauthtype=None, ipausersearchfields=None): config = {} if ipaconfigstring is not None: config['ipaconfigstring'] = ipaconfigstring @@ -238,6 +256,8 @@ def get_config_dict(ipaconfigstring=None, ipadefaultloginshell=None, config['ipasearchrecordslimit'] = str(ipasearchrecordslimit) if ipasearchtimelimit is not None: config['ipasearchtimelimit'] = str(ipasearchtimelimit) + if ipaselinuxusermaporder is not None: + config['ipaselinuxusermaporder'] = '$'.join(ipaselinuxusermaporder) if ipauserauthtype is not None: config['ipauserauthtype'] = ipauserauthtype if ipausersearchfields is not None: @@ -263,6 +283,7 @@ def ensure(module, client): ipapwdexpadvnotify=module.params.get('ipapwdexpadvnotify'), ipasearchrecordslimit=module.params.get('ipasearchrecordslimit'), ipasearchtimelimit=module.params.get('ipasearchtimelimit'), + ipaselinuxusermaporder=module.params.get('ipaselinuxusermaporder'), ipauserauthtype=module.params.get('ipauserauthtype'), ipausersearchfields=module.params.get('ipausersearchfields'), ) @@ -304,6 +325,8 @@ def main(): ipapwdexpadvnotify=dict(type='int', aliases=['pwdexpadvnotify']), ipasearchrecordslimit=dict(type='int', aliases=['searchrecordslimit']), ipasearchtimelimit=dict(type='int', aliases=['searchtimelimit']), + ipaselinuxusermaporder=dict(type='list', elements='str', + aliases=['selinuxusermaporder']), ipauserauthtype=dict(type='list', elements='str', aliases=['userauthtype'], choices=["password", "radius", "otp", "pkinit", From 29e4066944686f09c911778e8419027909c8802b Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Sun, 12 Sep 2021 07:46:53 -0400 Subject: [PATCH 0319/2828] New filter plugin - unicode_normalization (#3359) * Initial commit * Adding maintainer in BOTMETA * Adding changelog fragment * Updating filter_guide * Applying initial review suggestions --- .github/BOTMETA.yml | 2 + .../3359-add-unicode_normalize-filter.yml | 4 ++ docs/docsite/rst/filter_guide.rst | 31 ++++++++++++++ plugins/filter/unicode_normalize.py | 40 +++++++++++++++++++ .../targets/filter_unicode_normalize/aliases | 2 + .../filter_unicode_normalize/tasks/main.yml | 39 ++++++++++++++++++ .../filter_unicode_normalize/vars/main.yml | 4 ++ 7 files changed, 122 insertions(+) create mode 100644 changelogs/fragments/3359-add-unicode_normalize-filter.yml create mode 100644 plugins/filter/unicode_normalize.py create mode 100644 tests/integration/targets/filter_unicode_normalize/aliases create mode 100644 tests/integration/targets/filter_unicode_normalize/tasks/main.yml create mode 100644 tests/integration/targets/filter_unicode_normalize/vars/main.yml diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 3dfca22e73..df2520e263 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -131,6 +131,8 @@ files: $filters/random_mac.py: {} $filters/time.py: maintainers: resmo + $filters/unicode_normalize.py: + maintainers: Ajpantuso $filters/version_sort.py: maintainers: ericzolf $inventories/: diff --git a/changelogs/fragments/3359-add-unicode_normalize-filter.yml b/changelogs/fragments/3359-add-unicode_normalize-filter.yml new file mode 100644 index 0000000000..33aa06dc92 --- /dev/null +++ b/changelogs/fragments/3359-add-unicode_normalize-filter.yml @@ -0,0 +1,4 @@ +--- +add plugin.filter: + - name: unicode_normalize + description: Normalizes unicode strings to facilitate comparison of characters with normalized forms diff --git a/docs/docsite/rst/filter_guide.rst b/docs/docsite/rst/filter_guide.rst index 201b275aae..dab8464439 100644 --- a/docs/docsite/rst/filter_guide.rst +++ b/docs/docsite/rst/filter_guide.rst @@ -751,3 +751,34 @@ To extract ports from all clusters with name containing 'server1': server_name_query: "domain.server[?contains(name,'server1')].port" .. note:: while using ``starts_with`` and ``contains``, you have to use `` to_json | from_json `` filter for correct parsing of data structure. + +Working with Unicode +--------------------- + +`Unicode `_ makes it possible to produce two strings which may be visually equivalent, but are comprised of distinctly different characters/character sequences. To address this ``Unicode`` defines `normalization forms `_ which avoid these distinctions by choosing a unique character sequence for a given visual representation. + +You can use the ``community.general.unicode_normalize`` filter to normalize ``Unicode`` strings within your playbooks. + +.. code-block:: yaml+jinja + + - name: Compare Unicode representations + debug: + msg: "{{ with_combining_character | community.general.unicode_normalize == without_combining_character }}" + vars: + with_combining_character: "{{ 'Mayagu\u0308ez' }}" + without_combining_character: Mayagüez + +This produces: + +.. code-block:: ansible-output + + TASK [Compare Unicode representations] ******************************************************** + ok: [localhost] => { + "msg": true + } + +The ``community.general.unicode_normalize`` filter accepts a keyword argument to select the ``Unicode`` form used to normalize the input string. + +:form: One of ``'NFC'`` (default), ``'NFD'``, ``'NFKC'``, or ``'NFKD'``. See the `Unicode reference `_ for more information. + +.. versionadded:: 3.7.0 diff --git a/plugins/filter/unicode_normalize.py b/plugins/filter/unicode_normalize.py new file mode 100644 index 0000000000..9afbf29e3f --- /dev/null +++ b/plugins/filter/unicode_normalize.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Andrew Pantuso (@ajpantuso) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from unicodedata import normalize + +from ansible.errors import AnsibleFilterError, AnsibleFilterTypeError +from ansible.module_utils.six import text_type + + +def unicode_normalize(data, form='NFC'): + """Applies normalization to 'unicode' strings. + + Args: + data: A unicode string piped into the Jinja filter + form: One of ('NFC', 'NFD', 'NFKC', 'NFKD'). + See https://docs.python.org/3/library/unicodedata.html#unicodedata.normalize for more information. + + Returns: + A normalized unicode string of the specified 'form'. + """ + + if not isinstance(data, text_type): + raise AnsibleFilterTypeError("%s is not a valid input type" % type(data)) + + if form not in ('NFC', 'NFD', 'NFKC', 'NFKD'): + raise AnsibleFilterError("%s is not a valid form" % form) + + return normalize(form, data) + + +class FilterModule(object): + def filters(self): + return { + 'unicode_normalize': unicode_normalize, + } diff --git a/tests/integration/targets/filter_unicode_normalize/aliases b/tests/integration/targets/filter_unicode_normalize/aliases new file mode 100644 index 0000000000..f04737b845 --- /dev/null +++ b/tests/integration/targets/filter_unicode_normalize/aliases @@ -0,0 +1,2 @@ +shippable/posix/group2 +skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller diff --git a/tests/integration/targets/filter_unicode_normalize/tasks/main.yml b/tests/integration/targets/filter_unicode_normalize/tasks/main.yml new file mode 100644 index 0000000000..948ca74b4b --- /dev/null +++ b/tests/integration/targets/filter_unicode_normalize/tasks/main.yml @@ -0,0 +1,39 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- name: Test 'NFC' normalization + assert: + that: + - u_umlaut != u_umlaut_combining + - u_umlaut_combining != (u_umlaut_combining | community.general.unicode_normalize) + - u_umlaut == (u_umlaut_combining | community.general.unicode_normalize) + +- name: Test 'NFKC' normalization + assert: + that: + - latin_capital_i != roman_numeral_one + - latin_capital_i == (roman_numeral_one | community.general.unicode_normalize(form='NFKC')) + +- name: Register invalid input type + debug: + msg: "{{ 1 | community.general.unicode_normalize }}" + ignore_errors: true + register: invalid_input_type + +- name: Assert an invalid input type causes failure + assert: + that: + - invalid_input_type is failed + +- name: Register invalid form selection + debug: + msg: "{{ 'arbitrary text' | community.general.unicode_normalize(form='invalid') }}" + ignore_errors: true + register: invalid_form_selection + +- name: Assert invalid form selection causes failure + assert: + that: + - invalid_form_selection is failed diff --git a/tests/integration/targets/filter_unicode_normalize/vars/main.yml b/tests/integration/targets/filter_unicode_normalize/vars/main.yml new file mode 100644 index 0000000000..88d19b20db --- /dev/null +++ b/tests/integration/targets/filter_unicode_normalize/vars/main.yml @@ -0,0 +1,4 @@ +u_umlaut: "{{ '\u00fc' }}" +u_umlaut_combining: "{{ 'u' + '\u0308' }}" +roman_numeral_one: "{{ '\u2160' }}" +latin_capital_i: "{{ '\u0049' }}" From 0a5db85ad52115d535f9f80d330c81eb91b20b80 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rapha=C3=ABl=20Droz?= Date: Mon, 13 Sep 2021 02:16:06 -0300 Subject: [PATCH 0320/2828] gitlab_runner: Support project-scoped runners registration (#2971) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * support project-scoped gitlab.com runners registration * rename glproject variable to gitlab_project * update version * Update plugins/modules/source_control/gitlab/gitlab_runner.py Co-authored-by: Raphaël Droz Co-authored-by: Felix Fontein --- .../fragments/634-gitlab_project_runners.yaml | 2 + .../source_control/gitlab/gitlab_runner.py | 41 +++++++++++++++---- 2 files changed, 36 insertions(+), 7 deletions(-) create mode 100644 changelogs/fragments/634-gitlab_project_runners.yaml diff --git a/changelogs/fragments/634-gitlab_project_runners.yaml b/changelogs/fragments/634-gitlab_project_runners.yaml new file mode 100644 index 0000000000..0a3a733624 --- /dev/null +++ b/changelogs/fragments/634-gitlab_project_runners.yaml @@ -0,0 +1,2 @@ +minor_changes: +- gitlab_runner - support project-scoped gitlab.com runners registration (https://github.com/ansible-collections/community.general/pull/634). diff --git a/plugins/modules/source_control/gitlab/gitlab_runner.py b/plugins/modules/source_control/gitlab/gitlab_runner.py index 25490b00dd..34471b01d4 100644 --- a/plugins/modules/source_control/gitlab/gitlab_runner.py +++ b/plugins/modules/source_control/gitlab/gitlab_runner.py @@ -1,6 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- +# Copyright: (c) 2021, Raphaël Droz (raphael.droz@gmail.com) # Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) # Copyright: (c) 2018, Samy Coenen # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -38,6 +39,11 @@ options: description: - Your private token to interact with the GitLab API. type: str + project: + description: + - ID or full path of the project in the form of group/name. + type: str + version_added: '3.7.0' description: description: - The unique name of the runner. @@ -131,6 +137,15 @@ EXAMPLES = ''' description: Docker Machine t1 owned: yes state: absent + +- name: Register runner for a specific project + community.general.gitlab_runner: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + registration_token: 4gfdsg345 + description: MyProject runner + state: present + project: mygroup/mysubgroup/myproject ''' RETURN = ''' @@ -181,9 +196,13 @@ except NameError: class GitLabRunner(object): - def __init__(self, module, gitlab_instance): + def __init__(self, module, gitlab_instance, project=None): self._module = module self._gitlab = gitlab_instance + # Whether to operate on GitLab-instance-wide or project-wide runners + # See https://gitlab.com/gitlab-org/gitlab-ce/issues/60774 + # for group runner token access + self._runners_endpoint = project.runners if project else gitlab_instance.runners self.runnerObject = None def createOrUpdateRunner(self, description, options): @@ -230,7 +249,7 @@ class GitLabRunner(object): return True try: - runner = self._gitlab.runners.create(arguments) + runner = self._runners_endpoint.create(arguments) except (gitlab.exceptions.GitlabCreateError) as e: self._module.fail_json(msg="Failed to create runner: %s " % to_native(e)) @@ -265,19 +284,19 @@ class GitLabRunner(object): ''' def findRunner(self, description, owned=False): if owned: - runners = self._gitlab.runners.list(as_list=False) + runners = self._runners_endpoint.list(as_list=False) else: - runners = self._gitlab.runners.all(as_list=False) + runners = self._runners_endpoint.all(as_list=False) for runner in runners: # python-gitlab 2.2 through at least 2.5 returns a list of dicts for list() instead of a Runner # object, so we need to handle both if hasattr(runner, "description"): if (runner.description == description): - return self._gitlab.runners.get(runner.id) + return self._runners_endpoint.get(runner.id) else: if (runner['description'] == description): - return self._gitlab.runners.get(runner['id']) + return self._runners_endpoint.get(runner['id']) ''' @param description Description of the runner @@ -313,6 +332,7 @@ def main(): access_level=dict(type='str', default='ref_protected', choices=["not_protected", "ref_protected"]), maximum_timeout=dict(type='int', default=3600), registration_token=dict(type='str', no_log=True), + project=dict(type='str'), state=dict(type='str', default="present", choices=["absent", "present"]), )) @@ -344,13 +364,20 @@ def main(): access_level = module.params['access_level'] maximum_timeout = module.params['maximum_timeout'] registration_token = module.params['registration_token'] + project = module.params['project'] if not HAS_GITLAB_PACKAGE: module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) gitlab_instance = gitlabAuthentication(module) + gitlab_project = None + if project: + try: + gitlab_project = gitlab_instance.projects.get(project) + except gitlab.exceptions.GitlabGetError as e: + module.fail_json(msg='No such a project %s' % project, exception=to_native(e)) - gitlab_runner = GitLabRunner(module, gitlab_instance) + gitlab_runner = GitLabRunner(module, gitlab_instance, gitlab_project) runner_exists = gitlab_runner.existsRunner(runner_description, owned) if state == 'absent': From 118c040879c742cf3d97c452653efd6f91a4f91b Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Mon, 13 Sep 2021 01:16:49 -0400 Subject: [PATCH 0321/2828] pids - refactor module to make version-based behavior consistent (#3315) * Initial commit * Adding changelog fragment * Further refactoring * Fixing bad copy/paste and adding task for psutil >= 5.7.0 install * Inverting psutil installation order to reduce duplication * Optimizing regex compilation --- changelogs/fragments/3315-pids-refactor.yml | 4 + plugins/modules/system/pids.py | 168 +++++++++++++----- tests/integration/targets/pids/tasks/main.yml | 12 +- 3 files changed, 137 insertions(+), 47 deletions(-) create mode 100644 changelogs/fragments/3315-pids-refactor.yml diff --git a/changelogs/fragments/3315-pids-refactor.yml b/changelogs/fragments/3315-pids-refactor.yml new file mode 100644 index 0000000000..53a36c2cad --- /dev/null +++ b/changelogs/fragments/3315-pids-refactor.yml @@ -0,0 +1,4 @@ +--- +minor_changes: + - pids - refactor to add support for older ``psutil`` versions to the ``pattern`` option + (https://github.com/ansible-collections/community.general/pull/3315). diff --git a/plugins/modules/system/pids.py b/plugins/modules/system/pids.py index 622bec2500..9745c31449 100644 --- a/plugins/modules/system/pids.py +++ b/plugins/modules/system/pids.py @@ -54,9 +54,12 @@ pids: sample: [100,200] ''' +import abc import re +from distutils.version import LooseVersion from os.path import basename +from ansible.module_utils import six from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.common.text.converters import to_native @@ -68,6 +71,100 @@ except ImportError: HAS_PSUTIL = False +class PSAdapterError(Exception): + pass + + +@six.add_metaclass(abc.ABCMeta) +class PSAdapter(object): + NAME_ATTRS = ('name', 'cmdline') + PATTERN_ATTRS = ('name', 'exe', 'cmdline') + + def __init__(self, psutil): + self._psutil = psutil + + @staticmethod + def from_package(psutil): + version = LooseVersion(psutil.__version__) + if version < LooseVersion('2.0.0'): + return PSAdapter100(psutil) + elif version < LooseVersion('5.3.0'): + return PSAdapter200(psutil) + else: + return PSAdapter530(psutil) + + def get_pids_by_name(self, name): + return [p.pid for p in self._process_iter(*self.NAME_ATTRS) if self._has_name(p, name)] + + def _process_iter(self, *attrs): + return self._psutil.process_iter() + + def _has_name(self, proc, name): + attributes = self._get_proc_attributes(proc, *self.NAME_ATTRS) + return (compare_lower(attributes['name'], name) or + attributes['cmdline'] and compare_lower(attributes['cmdline'][0], name)) + + def _get_proc_attributes(self, proc, *attributes): + return dict((attribute, self._get_attribute_from_proc(proc, attribute)) for attribute in attributes) + + @staticmethod + @abc.abstractmethod + def _get_attribute_from_proc(proc, attribute): + pass + + def get_pids_by_pattern(self, pattern, ignore_case): + flags = 0 + if ignore_case: + flags |= re.I + + try: + regex = re.compile(pattern, flags) + except re.error as e: + raise PSAdapterError("'%s' is not a valid regular expression: %s" % (pattern, to_native(e))) + + return [p.pid for p in self._process_iter(*self.PATTERN_ATTRS) if self._matches_regex(p, regex)] + + def _matches_regex(self, proc, regex): + # See https://psutil.readthedocs.io/en/latest/#find-process-by-name for more information + attributes = self._get_proc_attributes(proc, *self.PATTERN_ATTRS) + matches_name = regex.search(to_native(attributes['name'])) + matches_exe = attributes['exe'] and regex.search(basename(to_native(attributes['exe']))) + matches_cmd = attributes['cmdline'] and regex.search(to_native(' '.join(attributes['cmdline']))) + + return any([matches_name, matches_exe, matches_cmd]) + + +class PSAdapter100(PSAdapter): + def __init__(self, psutil): + super(PSAdapter100, self).__init__(psutil) + + @staticmethod + def _get_attribute_from_proc(proc, attribute): + return getattr(proc, attribute) + + +class PSAdapter200(PSAdapter): + def __init__(self, psutil): + super(PSAdapter200, self).__init__(psutil) + + @staticmethod + def _get_attribute_from_proc(proc, attribute): + method = getattr(proc, attribute) + return method() + + +class PSAdapter530(PSAdapter): + def __init__(self, psutil): + super(PSAdapter530, self).__init__(psutil) + + def _process_iter(self, *attrs): + return self._psutil.process_iter(attrs=attrs) + + @staticmethod + def _get_attribute_from_proc(proc, attribute): + return proc.info[attribute] + + def compare_lower(a, b): if a is None or b is None: # this could just be "return False" but would lead to surprising behavior if both a and b are None @@ -76,38 +173,36 @@ def compare_lower(a, b): return a.lower() == b.lower() -def get_pid(name): - pids = [] +class Pids(object): + def __init__(self, module): + if not HAS_PSUTIL: + module.fail_json(msg=missing_required_lib('psutil')) - try: - for proc in psutil.process_iter(attrs=['name', 'cmdline']): - if compare_lower(proc.info['name'], name) or \ - proc.info['cmdline'] and compare_lower(proc.info['cmdline'][0], name): - pids.append(proc.pid) - except TypeError: # EL6, EL7: process_iter() takes no arguments (1 given) - for proc in psutil.process_iter(): - try: # EL7 - proc_name, proc_cmdline = proc.name(), proc.cmdline() - except TypeError: # EL6: 'str' object is not callable - proc_name, proc_cmdline = proc.name, proc.cmdline - if compare_lower(proc_name, name) or \ - proc_cmdline and compare_lower(proc_cmdline[0], name): - pids.append(proc.pid) - return pids + self._ps = PSAdapter.from_package(psutil) + self._module = module + self._name = module.params['name'] + self._pattern = module.params['pattern'] + self._ignore_case = module.params['ignore_case'] -def get_matching_command_pids(pattern, ignore_case): - flags = 0 - if ignore_case: - flags |= re.I + self._pids = [] - regex = re.compile(pattern, flags) - # See https://psutil.readthedocs.io/en/latest/#find-process-by-name for more information - return [p.pid for p in psutil.process_iter(["name", "exe", "cmdline"]) - if regex.search(to_native(p.info["name"])) - or (p.info["exe"] and regex.search(basename(to_native(p.info["exe"])))) - or (p.info["cmdline"] and regex.search(to_native(' '.join(p.cmdline())))) - ] + def execute(self): + if self._name: + self._pids = self._ps.get_pids_by_name(self._name) + else: + try: + self._pids = self._ps.get_pids_by_pattern(self._pattern, self._ignore_case) + except PSAdapterError as e: + self._module.fail_json(msg=to_native(e)) + + return self._module.exit_json(**self.result) + + @property + def result(self): + return { + 'pids': self._pids, + } def main(): @@ -126,22 +221,7 @@ def main(): supports_check_mode=True, ) - if not HAS_PSUTIL: - module.fail_json(msg=missing_required_lib('psutil')) - - name = module.params["name"] - pattern = module.params["pattern"] - ignore_case = module.params["ignore_case"] - - if name: - response = dict(pids=get_pid(name)) - else: - try: - response = dict(pids=get_matching_command_pids(pattern, ignore_case)) - except re.error as e: - module.fail_json(msg="'%s' is not a valid regular expression: %s" % (pattern, to_native(e))) - - module.exit_json(**response) + Pids(module).execute() if __name__ == '__main__': diff --git a/tests/integration/targets/pids/tasks/main.yml b/tests/integration/targets/pids/tasks/main.yml index 823d588561..a43b923e25 100644 --- a/tests/integration/targets/pids/tasks/main.yml +++ b/tests/integration/targets/pids/tasks/main.yml @@ -6,12 +6,18 @@ # Test code for the pids module # Copyright: (c) 2019, Saranya Sridharan # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -- name: "Installing the psutil module" +- name: Attempt installation of latest 'psutil' version + pip: + name: psutil + ignore_errors: true + register: psutil_latest_install + +- name: Install greatest 'psutil' version which will work with all pip versions pip: name: psutil < 5.7.0 - # Version 5.7.0 breaks on older pip versions. See https://github.com/ansible/ansible/pull/70667 + when: psutil_latest_install is failed -- name: "Checking the empty result" +- name: "Checking the empty result" pids: name: "blahblah" register: emptypids From 4e39a4b8251c6f3671a89e215bb25596f2e64da4 Mon Sep 17 00:00:00 2001 From: Ricky White Date: Mon, 13 Sep 2021 14:05:49 -0400 Subject: [PATCH 0322/2828] Added additional maintainer for the dsv and tss plugins (#3368) --- .github/BOTMETA.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index df2520e263..96c191db8b 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -175,7 +175,7 @@ files: $lookups/dnstxt.py: maintainers: jpmens $lookups/dsv.py: - maintainers: amigus + maintainers: amigus endlesstrax $lookups/etcd3.py: maintainers: eric-belhomme $lookups/etcd.py: @@ -211,7 +211,7 @@ files: maintainers: $team_ansible_core jpmens $lookups/shelvefile.py: {} $lookups/tss.py: - maintainers: amigus + maintainers: amigus endlesstrax $module_utils/: labels: module_utils $module_utils/gitlab.py: From bd63da680d2d12e6084cbff63ee007fc088c05a5 Mon Sep 17 00:00:00 2001 From: John Losito Date: Mon, 13 Sep 2021 15:52:30 -0400 Subject: [PATCH 0323/2828] Allow dependabot to check github actions (#1604) --- .github/dependabot.yml | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 .github/dependabot.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..1cd413055f --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,6 @@ +version: 2 +updates: + - package-ecosystem: "github-actions" + directory: "/" + interval: + schedule: "weekly" From dc8d076a251e213687f9ff1ff3d3ecc7010103f2 Mon Sep 17 00:00:00 2001 From: Martin Vician Date: Tue, 14 Sep 2021 12:34:59 +0100 Subject: [PATCH 0324/2828] tss: add option for token authorization (#3327) * Added token parameter for AccessTokenAuthorizer Parameters username and password are not required anymore because of this. * Added changelog fragments * Apply suggestions from code review Co-authored-by: Ajpantuso * token authorizer is prioritized token authorizer is prioritized when token parameter is set * Apply suggestions from code review Co-authored-by: Felix Fontein * domain optional if token not provided * Updated examples - `base_url` is required everywhere - examples for user, name + domain authorization included - token authorization included * Update 3327-tss-token-authorization.yml * Apply suggestions from code review Co-authored-by: Felix Fontein Co-authored-by: Ajpantuso Co-authored-by: Felix Fontein --- .../3327-tss-token-authorization.yml | 4 ++ plugins/lookup/tss.py | 70 ++++++++++++++----- 2 files changed, 57 insertions(+), 17 deletions(-) create mode 100644 changelogs/fragments/3327-tss-token-authorization.yml diff --git a/changelogs/fragments/3327-tss-token-authorization.yml b/changelogs/fragments/3327-tss-token-authorization.yml new file mode 100644 index 0000000000..5d9f56cb72 --- /dev/null +++ b/changelogs/fragments/3327-tss-token-authorization.yml @@ -0,0 +1,4 @@ +minor_changes: + - tss lookup plugin - added ``token`` parameter for token authorization; + ``username`` and ``password`` are optional when ``token`` is provided + (https://github.com/ansible-collections/community.general/pull/3327). diff --git a/plugins/lookup/tss.py b/plugins/lookup/tss.py index fe6042e130..3b561e94fc 100644 --- a/plugins/lookup/tss.py +++ b/plugins/lookup/tss.py @@ -36,19 +36,20 @@ options: ini: - section: tss_lookup key: username - required: true password: - description: The password associated with the supplied username. + description: + - The password associated with the supplied username. + - Required when I(token) is not provided. env: - name: TSS_PASSWORD ini: - section: tss_lookup key: password - required: true domain: default: "" description: - The domain with which to request the OAuth2 Access Grant. + - Optional when I(token) is not provided. - Requires C(python-tss-sdk) version 1.0.0 or greater. env: - name: TSS_DOMAIN @@ -57,6 +58,17 @@ options: key: domain required: false version_added: 3.6.0 + token: + description: + - Existing token for Thycotic authorizer. + - If provided, I(username) and I(password) are not needed. + - Requires C(python-tss-sdk) version 1.0.0 or greater. + env: + - name: TSS_TOKEN + ini: + - section: tss_lookup + key: token + version_added: 3.7.0 api_path_uri: default: /api/v1 description: The path to append to the base URL to form a valid REST @@ -83,18 +95,6 @@ _list: """ EXAMPLES = r""" -- hosts: localhost - vars: - secret: "{{ lookup('community.general.tss', 1) }}" - tasks: - - ansible.builtin.debug: - msg: > - the password is {{ - (secret['items'] - | items2dict(key_name='slug', - value_name='itemValue'))['password'] - }} - - hosts: localhost vars: secret: >- @@ -116,10 +116,39 @@ EXAMPLES = r""" value_name='itemValue'))['password'] }} +- hosts: localhost + vars: + secret: >- + {{ + lookup( + 'community.general.tss', + 102, + base_url='https://secretserver.domain.com/SecretServer/', + username='user.name', + password='password', + domain='domain' + ) + }} + tasks: + - ansible.builtin.debug: + msg: > + the password is {{ + (secret['items'] + | items2dict(key_name='slug', + value_name='itemValue'))['password'] + }} + - hosts: localhost vars: secret_password: >- - {{ ((lookup('community.general.tss', 1) | from_json).get('items') | items2dict(key_name='slug', value_name='itemValue'))['password'] }}" + {{ + ((lookup( + 'community.general.tss', + 102, + base_url='https://secretserver.domain.com/SecretServer/', + token='thycotic_access_token', + ) | from_json).get('items') | items2dict(key_name='slug', value_name='itemValue'))['password'] + }} tasks: - ansible.builtin.debug: msg: the password is {{ secret_password }} @@ -142,12 +171,13 @@ except ImportError: HAS_TSS_SDK = False try: - from thycotic.secrets.server import PasswordGrantAuthorizer, DomainPasswordGrantAuthorizer + from thycotic.secrets.server import PasswordGrantAuthorizer, DomainPasswordGrantAuthorizer, AccessTokenAuthorizer HAS_TSS_AUTHORIZER = True except ImportError: PasswordGrantAuthorizer = None DomainPasswordGrantAuthorizer = None + AccessTokenAuthorizer = None HAS_TSS_AUTHORIZER = False @@ -209,6 +239,11 @@ class TSSClientV1(TSSClient): @staticmethod def _get_authorizer(**server_parameters): + if server_parameters.get("token"): + return AccessTokenAuthorizer( + server_parameters["token"], + ) + if server_parameters.get("domain"): return DomainPasswordGrantAuthorizer( server_parameters["base_url"], @@ -238,6 +273,7 @@ class LookupModule(LookupBase): username=self.get_option("username"), password=self.get_option("password"), domain=self.get_option("domain"), + token=self.get_option("token"), api_path_uri=self.get_option("api_path_uri"), token_path_uri=self.get_option("token_path_uri"), ) From 517570a64fcbbfdad5704f88c63eabb4cf74a84d Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Tue, 14 Sep 2021 20:05:02 +0100 Subject: [PATCH 0325/2828] Add opentelemetry callback plugin (#3091) * Add opentelemetry callback plugin * Apply suggestions from code review Co-authored-by: Felix Fontein * Formatting (text), booleans and renamed env variables * This should be done in a future release * Remove insecure in favour of the OTEL env variable. Add descriptions * Use OpenTelemetrySource * Move generate_distributed_traces * Move update_span_data and set_span_attribute * Move finish_task * Move start_task * Refactor to support UTs * Add first UT * Fix codestyle * opentelemetry callback entry in the botmeta * Fix linting * Fix signature * Mock methods * Use MagicMock * Mock the methods * UT for transform_to_boolean_or_default * Fix linting * Set test data * Mock _time_ns * Exclude tests for python <= 3.6 * Remove obsoleted setup task type configuration * Remove unused docs * Apply suggestions from code review Co-authored-by: Felix Fontein * Fix docs * unrequired logic that was originally took from https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/callback/junit.py\#L226 * Use raise_from for the required dependencies * Fix linting * Add requirements for the UTs * add missing dependency for the opentelemetry plugin in the UTs * Add ANSIBLE_ prefix for the ansible specific options * Add more context in the docs and remove duplicated docs * As suggested in the code review * Verify if the OTEL env variables for the endpoint were set * Fix docs typo * Fix linting * Revert "Fix linting" This reverts commit 3a54c827c5472553a6baf5598bc76a0f63f020c1. * Revert "Verify if the OTEL env variables for the endpoint were set" This reverts commit cab9d8648899c28c0345745690c4ec7a41f7e680. * Remove console_output as suggested * Apply suggestions from code review Co-authored-by: flowerysong * Delegate the definition of OTEL_EXPORTER_OTLP_INSECURE to the user * Move definitions above, close to the class that uses them Co-authored-by: Felix Fontein Co-authored-by: flowerysong --- .github/BOTMETA.yml | 3 + plugins/callback/opentelemetry.py | 401 ++++++++++++++++++ .../plugins/callback/test_opentelemetry.py | 93 ++++ tests/unit/requirements.txt | 5 + 4 files changed, 502 insertions(+) create mode 100644 plugins/callback/opentelemetry.py create mode 100644 tests/unit/plugins/callback/test_opentelemetry.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 96c191db8b..78cd46871f 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -62,6 +62,9 @@ files: $callbacks/nrdp.py: maintainers: rverchere $callbacks/null.py: {} + $callbacks/opentelemetry.py: + maintainers: v1v + keywords: opentelemetry observability $callbacks/say.py: notify: chris-short maintainers: $team_macos diff --git a/plugins/callback/opentelemetry.py b/plugins/callback/opentelemetry.py new file mode 100644 index 0000000000..f256b7263d --- /dev/null +++ b/plugins/callback/opentelemetry.py @@ -0,0 +1,401 @@ +# (C) 2021, Victor Martinez +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + author: Victor Martinez (@v1v) + name: opentelemetry + type: notification + short_description: Create distributed traces with OpenTelemetry + version_added: 3.7.0 + description: + - This callback creates distributed traces for each Ansible task with OpenTelemetry. + - You can configure the OpenTelemetry exporter and SDK with environment variables. + - See U(https://opentelemetry-python.readthedocs.io/en/latest/exporter/otlp/otlp.html). + - See U(https://opentelemetry-python.readthedocs.io/en/latest/sdk/environment_variables.html#opentelemetry-sdk-environment-variables). + options: + hide_task_arguments: + default: false + type: bool + description: + - Hide the arguments for a task. + env: + - name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS + otel_service_name: + default: ansible + type: str + description: + - The service name resource attribute. + env: + - name: OTEL_SERVICE_NAME + requirements: + - opentelemetry-api (python lib) + - opentelemetry-exporter-otlp (python lib) + - opentelemetry-sdk (python lib) +''' + + +EXAMPLES = ''' +examples: | + Enable the plugin in ansible.cfg: + [defaults] + callbacks_enabled = community.general.opentelemetry + + Set the environment variable: + export OTEL_EXPORTER_OTLP_ENDPOINT= + export OTEL_EXPORTER_OTLP_HEADERS="authorization=Bearer your_otel_token" + export OTEL_SERVICE_NAME=your_service_name +''' + +import getpass +import os +import socket +import sys +import time +import uuid + +from os.path import basename + +from ansible.errors import AnsibleError +from ansible.module_utils.six import raise_from +from ansible.plugins.callback import CallbackBase + +try: + from opentelemetry import trace + from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter + from opentelemetry.sdk.resources import SERVICE_NAME, Resource + from opentelemetry.trace.status import Status, StatusCode + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.trace.export import ( + ConsoleSpanExporter, + SimpleSpanProcessor, + BatchSpanProcessor + ) + from opentelemetry.util._time import _time_ns +except ImportError as imp_exc: + OTEL_LIBRARY_IMPORT_ERROR = imp_exc +else: + OTEL_LIBRARY_IMPORT_ERROR = None + +try: + from collections import OrderedDict +except ImportError: + try: + from ordereddict import OrderedDict + except ImportError as imp_exc: + ORDER_LIBRARY_IMPORT_ERROR = imp_exc + else: + ORDER_LIBRARY_IMPORT_ERROR = None +else: + ORDER_LIBRARY_IMPORT_ERROR = None + + +class TaskData: + """ + Data about an individual task. + """ + + def __init__(self, uuid, name, path, play, action, args): + self.uuid = uuid + self.name = name + self.path = path + self.play = play + self.host_data = OrderedDict() + if sys.version_info >= (3, 7): + self.start = time.time_ns() + else: + self.start = _time_ns() + self.action = action + self.args = args + + def add_host(self, host): + if host.uuid in self.host_data: + if host.status == 'included': + # concatenate task include output from multiple items + host.result = '%s\n%s' % (self.host_data[host.uuid].result, host.result) + else: + return + + self.host_data[host.uuid] = host + + +class HostData: + """ + Data about an individual host. + """ + + def __init__(self, uuid, name, status, result): + self.uuid = uuid + self.name = name + self.status = status + self.result = result + if sys.version_info >= (3, 7): + self.finish = time.time_ns() + else: + self.finish = _time_ns() + + +class OpenTelemetrySource(object): + def __init__(self, display): + self.ansible_playbook = "" + self.ansible_version = None + self.session = str(uuid.uuid4()) + self.host = socket.gethostname() + try: + self.ip_address = socket.gethostbyname(socket.gethostname()) + except Exception as e: + self.ip_address = None + self.user = getpass.getuser() + + self._display = display + + def start_task(self, tasks_data, hide_task_arguments, play_name, task): + """ record the start of a task for one or more hosts """ + + uuid = task._uuid + + if uuid in tasks_data: + return + + name = task.get_name().strip() + path = task.get_path() + action = task.action + args = None + + if not task.no_log and not hide_task_arguments: + args = ', '.join(('%s=%s' % a for a in task.args.items())) + + tasks_data[uuid] = TaskData(uuid, name, path, play_name, action, args) + + def finish_task(self, tasks_data, status, result): + """ record the results of a task for a single host """ + + task_uuid = result._task._uuid + + if hasattr(result, '_host') and result._host is not None: + host_uuid = result._host._uuid + host_name = result._host.name + else: + host_uuid = 'include' + host_name = 'include' + + task = tasks_data[task_uuid] + + if self.ansible_version is None and result._task_fields['args'].get('_ansible_version'): + self.ansible_version = result._task_fields['args'].get('_ansible_version') + + task.add_host(HostData(host_uuid, host_name, status, result)) + + def generate_distributed_traces(self, otel_service_name, ansible_playbook, tasks_data, status): + """ generate distributed traces from the collected TaskData and HostData """ + + tasks = [] + parent_start_time = None + for task_uuid, task in tasks_data.items(): + if parent_start_time is None: + parent_start_time = task.start + tasks.append(task) + + trace.set_tracer_provider( + TracerProvider( + resource=Resource.create({SERVICE_NAME: otel_service_name}) + ) + ) + + processor = BatchSpanProcessor(OTLPSpanExporter()) + + trace.get_tracer_provider().add_span_processor(processor) + + tracer = trace.get_tracer(__name__) + + with tracer.start_as_current_span(ansible_playbook, start_time=parent_start_time) as parent: + parent.set_status(status) + # Populate trace metadata attributes + if self.ansible_version is not None: + parent.set_attribute("ansible.version", self.ansible_version) + parent.set_attribute("ansible.session", self.session) + parent.set_attribute("ansible.host.name", self.host) + if self.ip_address is not None: + parent.set_attribute("ansible.host.ip", self.ip_address) + parent.set_attribute("ansible.host.user", self.user) + for task in tasks: + for host_uuid, host_data in task.host_data.items(): + with tracer.start_as_current_span(task.name, start_time=task.start, end_on_exit=False) as span: + self.update_span_data(task, host_data, span) + + def update_span_data(self, task_data, host_data, span): + """ update the span with the given TaskData and HostData """ + + name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name) + + message = 'success' + status = Status(status_code=StatusCode.OK) + if host_data.status == 'included': + rc = 0 + else: + res = host_data.result._result + rc = res.get('rc', 0) + if host_data.status == 'failed': + if 'exception' in res: + message = res['exception'].strip().split('\n')[-1] + elif 'msg' in res: + message = res['msg'] + else: + message = 'failed' + status = Status(status_code=StatusCode.ERROR) + elif host_data.status == 'skipped': + if 'skip_reason' in res: + message = res['skip_reason'] + else: + message = 'skipped' + status = Status(status_code=StatusCode.UNSET) + + span.set_status(status) + self.set_span_attribute(span, "ansible.task.args", task_data.args) + self.set_span_attribute(span, "ansible.task.module", task_data.action) + self.set_span_attribute(span, "ansible.task.message", message) + self.set_span_attribute(span, "ansible.task.name", name) + self.set_span_attribute(span, "ansible.task.result", rc) + self.set_span_attribute(span, "ansible.task.host.name", host_data.name) + self.set_span_attribute(span, "ansible.task.host.status", host_data.status) + span.end(end_time=host_data.finish) + + def set_span_attribute(self, span, attributeName, attributeValue): + """ update the span attribute with the given attribute and value if not None """ + + if span is None and self._display is not None: + self._display.warning('span object is None. Please double check if that is expected.') + else: + if attributeValue is not None: + span.set_attribute(attributeName, attributeValue) + + +class CallbackModule(CallbackBase): + """ + This callback creates distributed traces. + """ + + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'notification' + CALLBACK_NAME = 'community.general.opentelemetry' + CALLBACK_NEEDS_ENABLED = True + + def __init__(self, display=None): + super(CallbackModule, self).__init__(display=display) + self.hide_task_arguments = None + self.otel_service_name = None + self.ansible_playbook = None + self.play_name = None + self.tasks_data = None + self.errors = 0 + self.disabled = False + + if OTEL_LIBRARY_IMPORT_ERROR: + raise_from( + AnsibleError('The `opentelemetry-api`, `opentelemetry-exporter-otlp` or `opentelemetry-sdk` must be installed to use this plugin'), + OTEL_LIBRARY_IMPORT_ERROR) + + if ORDER_LIBRARY_IMPORT_ERROR: + raise_from( + AnsibleError('The `ordereddict` must be installed to use this plugin'), + ORDER_LIBRARY_IMPORT_ERROR) + else: + self.tasks_data = OrderedDict() + + self.opentelemetry = OpenTelemetrySource(display=self._display) + + def set_options(self, task_keys=None, var_options=None, direct=None): + super(CallbackModule, self).set_options(task_keys=task_keys, + var_options=var_options, + direct=direct) + + self.hide_task_arguments = self.get_option('hide_task_arguments') + + self.otel_service_name = self.get_option('otel_service_name') + + if not self.otel_service_name: + self.otel_service_name = 'ansible' + + def v2_playbook_on_start(self, playbook): + self.ansible_playbook = basename(playbook._file_name) + + def v2_playbook_on_play_start(self, play): + self.play_name = play.get_name() + + def v2_runner_on_no_hosts(self, task): + self.opentelemetry.start_task( + self.tasks_data, + self.hide_task_arguments, + self.play_name, + task + ) + + def v2_playbook_on_task_start(self, task, is_conditional): + self.opentelemetry.start_task( + self.tasks_data, + self.hide_task_arguments, + self.play_name, + task + ) + + def v2_playbook_on_cleanup_task_start(self, task): + self.opentelemetry.start_task( + self.tasks_data, + self.hide_task_arguments, + self.play_name, + task + ) + + def v2_playbook_on_handler_task_start(self, task): + self.opentelemetry.start_task( + self.tasks_data, + self.hide_task_arguments, + self.play_name, + task + ) + + def v2_runner_on_failed(self, result, ignore_errors=False): + self.errors += 1 + self.opentelemetry.finish_task( + self.tasks_data, + 'failed', + result + ) + + def v2_runner_on_ok(self, result): + self.opentelemetry.finish_task( + self.tasks_data, + 'ok', + result + ) + + def v2_runner_on_skipped(self, result): + self.opentelemetry.finish_task( + self.tasks_data, + 'skipped', + result + ) + + def v2_playbook_on_include(self, included_file): + self.opentelemetry.finish_task( + self.tasks_data, + 'included', + included_file + ) + + def v2_playbook_on_stats(self, stats): + if self.errors == 0: + status = Status(status_code=StatusCode.OK) + else: + status = Status(status_code=StatusCode.ERROR) + self.opentelemetry.generate_distributed_traces( + self.otel_service_name, + self.ansible_playbook, + self.tasks_data, + status + ) + + def v2_runner_on_async_failed(self, result, **kwargs): + self.errors += 1 diff --git a/tests/unit/plugins/callback/test_opentelemetry.py b/tests/unit/plugins/callback/test_opentelemetry.py new file mode 100644 index 0000000000..7fcfc5cddb --- /dev/null +++ b/tests/unit/plugins/callback/test_opentelemetry.py @@ -0,0 +1,93 @@ +# (C) 2021, Victor Martinez +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.playbook.task import Task +from ansible.executor.task_result import TaskResult +from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock, Mock +from ansible_collections.community.general.plugins.callback.opentelemetry import OpenTelemetrySource, TaskData, CallbackModule +from collections import OrderedDict +import sys + +OPENTELEMETRY_MINIMUM_PYTHON_VERSION = (3, 7) + + +class TestOpentelemetry(unittest.TestCase): + @patch('ansible_collections.community.general.plugins.callback.opentelemetry.socket') + def setUp(self, mock_socket): + # TODO: this python version validation won't be needed as long as the _time_ns call is mocked. + if sys.version_info < OPENTELEMETRY_MINIMUM_PYTHON_VERSION: + self.skipTest("Python %s+ is needed for OpenTelemetry" % + ",".join(map(str, OPENTELEMETRY_MINIMUM_PYTHON_VERSION))) + + mock_socket.gethostname.return_value = 'my-host' + mock_socket.gethostbyname.return_value = '1.2.3.4' + self.opentelemetry = OpenTelemetrySource(display=None) + self.task_fields = {'args': {}} + self.mock_host = Mock('MockHost') + self.mock_host.name = 'myhost' + self.mock_host._uuid = 'myhost_uuid' + self.mock_task = Task() + self.mock_task.action = 'myaction' + self.mock_task.no_log = False + self.mock_task._role = 'myrole' + self.mock_task._uuid = 'myuuid' + self.mock_task.args = {} + self.mock_task.get_name = MagicMock(return_value='mytask') + self.mock_task.get_path = MagicMock(return_value='/mypath') + self.my_task = TaskData('myuuid', 'mytask', '/mypath', 'myplay', 'myaction', '') + self.my_task_result = TaskResult(host=self.mock_host, task=self.mock_task, return_data={}, task_fields=self.task_fields) + + def test_start_task(self): + tasks_data = OrderedDict() + + self.opentelemetry.start_task( + tasks_data, + False, + 'myplay', + self.mock_task + ) + + task_data = tasks_data['myuuid'] + self.assertEqual(task_data.uuid, 'myuuid') + self.assertEqual(task_data.name, 'mytask') + self.assertEqual(task_data.path, '/mypath') + self.assertEqual(task_data.play, 'myplay') + self.assertEqual(task_data.action, 'myaction') + self.assertEqual(task_data.args, '') + + def test_finish_task_with_a_host_match(self): + tasks_data = OrderedDict() + tasks_data['myuuid'] = self.my_task + + self.opentelemetry.finish_task( + tasks_data, + 'ok', + self.my_task_result + ) + + task_data = tasks_data['myuuid'] + host_data = task_data.host_data['myhost_uuid'] + self.assertEqual(host_data.uuid, 'myhost_uuid') + self.assertEqual(host_data.name, 'myhost') + self.assertEqual(host_data.status, 'ok') + + def test_finish_task_without_a_host_match(self): + result = TaskResult(host=None, task=self.mock_task, return_data={}, task_fields=self.task_fields) + tasks_data = OrderedDict() + tasks_data['myuuid'] = self.my_task + + self.opentelemetry.finish_task( + tasks_data, + 'ok', + result + ) + + task_data = tasks_data['myuuid'] + host_data = task_data.host_data['include'] + self.assertEqual(host_data.uuid, 'include') + self.assertEqual(host_data.name, 'include') + self.assertEqual(host_data.status, 'ok') diff --git a/tests/unit/requirements.txt b/tests/unit/requirements.txt index c8294bd71a..3cf288fef9 100644 --- a/tests/unit/requirements.txt +++ b/tests/unit/requirements.txt @@ -26,3 +26,8 @@ datadog-api-client >= 1.0.0b3 ; python_version >= '3.6' # requirement for dnsimple module dnsimple >= 2 ; python_version >= '3.6' dataclasses ; python_version == '3.6' + +# requirement for the opentelemetry callback plugin +opentelemetry-api ; python_version >= '3.6' +opentelemetry-exporter-otlp ; python_version >= '3.6' +opentelemetry-sdk ; python_version >= '3.6' From b20fc7a7c32d30a4a7f094ea8e037385ba1d389d Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Wed, 15 Sep 2021 07:21:15 +0200 Subject: [PATCH 0326/2828] Install nios test requirements. (#3375) --- tests/integration/targets/prepare_nios_tests/tasks/main.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/integration/targets/prepare_nios_tests/tasks/main.yml b/tests/integration/targets/prepare_nios_tests/tasks/main.yml index e69de29bb2..f8f55f38af 100644 --- a/tests/integration/targets/prepare_nios_tests/tasks/main.yml +++ b/tests/integration/targets/prepare_nios_tests/tasks/main.yml @@ -0,0 +1,4 @@ +--- +- name: Install + pip: + name: infoblox-client From 06345839c6e333f9021d255af5e2707ecbba2c12 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Thu, 16 Sep 2021 19:22:44 +0100 Subject: [PATCH 0327/2828] opentelemetry callback: context propagation and error exception (#3378) * opentelemetry callback: context propagation and error exception * Apply suggestions from code review Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- plugins/callback/opentelemetry.py | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/plugins/callback/opentelemetry.py b/plugins/callback/opentelemetry.py index f256b7263d..b523603828 100644 --- a/plugins/callback/opentelemetry.py +++ b/plugins/callback/opentelemetry.py @@ -30,6 +30,13 @@ DOCUMENTATION = ''' - The service name resource attribute. env: - name: OTEL_SERVICE_NAME + traceparent: + default: None + type: str + description: + - The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header). + env: + - name: TRACEPARENT requirements: - opentelemetry-api (python lib) - opentelemetry-exporter-otlp (python lib) @@ -64,9 +71,11 @@ from ansible.plugins.callback import CallbackBase try: from opentelemetry import trace + from opentelemetry.trace import SpanKind from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter from opentelemetry.sdk.resources import SERVICE_NAME, Resource from opentelemetry.trace.status import Status, StatusCode + from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import ( ConsoleSpanExporter, @@ -151,6 +160,11 @@ class OpenTelemetrySource(object): self._display = display + def traceparent_context(self, traceparent): + carrier = dict() + carrier['traceparent'] = traceparent + return TraceContextTextMapPropagator().extract(carrier=carrier) + def start_task(self, tasks_data, hide_task_arguments, play_name, task): """ record the start of a task for one or more hosts """ @@ -188,7 +202,7 @@ class OpenTelemetrySource(object): task.add_host(HostData(host_uuid, host_name, status, result)) - def generate_distributed_traces(self, otel_service_name, ansible_playbook, tasks_data, status): + def generate_distributed_traces(self, otel_service_name, ansible_playbook, tasks_data, status, traceparent): """ generate distributed traces from the collected TaskData and HostData """ tasks = [] @@ -210,7 +224,8 @@ class OpenTelemetrySource(object): tracer = trace.get_tracer(__name__) - with tracer.start_as_current_span(ansible_playbook, start_time=parent_start_time) as parent: + with tracer.start_as_current_span(ansible_playbook, context=self.traceparent_context(traceparent), + start_time=parent_start_time, kind=SpanKind.SERVER) as parent: parent.set_status(status) # Populate trace metadata attributes if self.ansible_version is not None: @@ -244,7 +259,9 @@ class OpenTelemetrySource(object): message = res['msg'] else: message = 'failed' - status = Status(status_code=StatusCode.ERROR) + status = Status(status_code=StatusCode.ERROR, description=message) + # Record an exception with the task message + span.record_exception(BaseException(message)) elif host_data.status == 'skipped': if 'skip_reason' in res: message = res['skip_reason'] @@ -291,6 +308,7 @@ class CallbackModule(CallbackBase): self.tasks_data = None self.errors = 0 self.disabled = False + self.traceparent = False if OTEL_LIBRARY_IMPORT_ERROR: raise_from( @@ -318,6 +336,9 @@ class CallbackModule(CallbackBase): if not self.otel_service_name: self.otel_service_name = 'ansible' + # See https://github.com/open-telemetry/opentelemetry-specification/issues/740 + self.traceparent = self.get_option('traceparent') + def v2_playbook_on_start(self, playbook): self.ansible_playbook = basename(playbook._file_name) @@ -394,7 +415,8 @@ class CallbackModule(CallbackBase): self.otel_service_name, self.ansible_playbook, self.tasks_data, - status + status, + self.traceparent ) def v2_runner_on_async_failed(self, result, **kwargs): From 331f5bdf24bbd1d9e150c274843e83430167ac7f Mon Sep 17 00:00:00 2001 From: Patrick Pfurtscheller <57419021+PfurtschellerP@users.noreply.github.com> Date: Thu, 16 Sep 2021 22:20:49 +0200 Subject: [PATCH 0328/2828] redfish_utils: adding "Id" to the add user function (#3343) * Adding "Id" to the add user function Some implementations of Redfish (e.g. the one in Cisco's CIMC) seem to require the id of the new user for account creation. I'm not that firm with Python but lines 982 and 983 should fix it. * changed indention * created changelog fragment * Update changelogs/fragments/3343-redfish_utils-addUser-userId.yml Co-authored-by: Felix Fontein * Update change type * supplemented the description of the ID parameter * Update plugins/modules/remote_management/redfish/redfish_command.py Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- changelogs/fragments/3343-redfish_utils-addUser-userId.yml | 2 ++ plugins/module_utils/redfish_utils.py | 2 ++ plugins/modules/remote_management/redfish/redfish_command.py | 3 ++- 3 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/3343-redfish_utils-addUser-userId.yml diff --git a/changelogs/fragments/3343-redfish_utils-addUser-userId.yml b/changelogs/fragments/3343-redfish_utils-addUser-userId.yml new file mode 100644 index 0000000000..7b8aa0b700 --- /dev/null +++ b/changelogs/fragments/3343-redfish_utils-addUser-userId.yml @@ -0,0 +1,2 @@ +bugfixes: + - redfish_utils module utils - if given, add account ID of user that should be created to HTTP request (https://github.com/ansible-collections/community.general/pull/3343/). diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index b4d0dba015..55686b2f50 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -979,6 +979,8 @@ class RedfishUtils(object): payload['Password'] = user.get('account_password') if user.get('account_roleid'): payload['RoleId'] = user.get('account_roleid') + if user.get('account_id'): + payload['Id'] = user.get('account_id') response = self.post_request(self.root_uri + self.accounts_uri, payload) if not response['ret']: diff --git a/plugins/modules/remote_management/redfish/redfish_command.py b/plugins/modules/remote_management/redfish/redfish_command.py index e79308f2d7..8702e468ca 100644 --- a/plugins/modules/remote_management/redfish/redfish_command.py +++ b/plugins/modules/remote_management/redfish/redfish_command.py @@ -56,7 +56,8 @@ options: required: false aliases: [ account_id ] description: - - ID of account to delete/modify + - ID of account to delete/modify. + - Can also be used in account creation to work around vendor issues where the ID of the new user is required in the POST request. type: str new_username: required: false From 7a2efb4775af4296bf4dfe640cbc0ae52c87d5dd Mon Sep 17 00:00:00 2001 From: Max Bidlingmaier Date: Thu, 16 Sep 2021 22:26:31 +0200 Subject: [PATCH 0329/2828] Get behavior of gitlab_project_members to the one of gitlab_group_members (#3319) * Initial change to get behaviour of gitlab_project_members like the new gitlab_group_members * added changelog * linter: removed trainling whitespaces * Update plugins/modules/source_control/gitlab/gitlab_project_members.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_project_members.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_project_members.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_project_members.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_project_members.py Co-authored-by: Felix Fontein * requested changes * linter fixes * undoing formatting changes to existing code Co-authored-by: Max-Florian Bidlingmaier Co-authored-by: Felix Fontein Co-authored-by: Max Bidlingmaier --- ...319-gitlab_project_members_enhancement.yml | 3 + .../gitlab/gitlab_project_members.py | 332 ++++++++++++++---- .../gitlab_project_members/defaults/main.yml | 8 + .../gitlab_project_members/tasks/main.yml | 45 ++- 4 files changed, 312 insertions(+), 76 deletions(-) create mode 100644 changelogs/fragments/3319-gitlab_project_members_enhancement.yml diff --git a/changelogs/fragments/3319-gitlab_project_members_enhancement.yml b/changelogs/fragments/3319-gitlab_project_members_enhancement.yml new file mode 100644 index 0000000000..7795cd1f02 --- /dev/null +++ b/changelogs/fragments/3319-gitlab_project_members_enhancement.yml @@ -0,0 +1,3 @@ +minor_changes: + - gitlab_project_members - ``gitlab_user`` can now also be a list of users (https://github.com/ansible-collections/community.general/pull/3319). + - gitlab_project_members - added functionality to set all members exactly as given (https://github.com/ansible-collections/community.general/pull/3319). diff --git a/plugins/modules/source_control/gitlab/gitlab_project_members.py b/plugins/modules/source_control/gitlab/gitlab_project_members.py index 0ae8f4b25c..51f60d459f 100644 --- a/plugins/modules/source_control/gitlab/gitlab_project_members.py +++ b/plugins/modules/source_control/gitlab/gitlab_project_members.py @@ -53,15 +53,37 @@ options: type: str gitlab_user: description: - - The username of the member to add to/remove from the GitLab project. - required: true - type: str + - A username or a list of usernames to add to/remove from the GitLab project. + - Mutually exclusive with I(gitlab_users_access). + type: list + elements: str access_level: description: - The access level for the user. - Required if I(state=present), user state is set to present. type: str choices: ['guest', 'reporter', 'developer', 'maintainer'] + gitlab_users_access: + description: + - Provide a list of user to access level mappings. + - Every dictionary in this list specifies a user (by username) and the access level the user should have. + - Mutually exclusive with I(gitlab_user) and I(access_level). + - Use together with I(purge_users) to remove all users not specified here from the project. + type: list + elements: dict + suboptions: + name: + description: A username or a list of usernames to add to/remove from the GitLab project. + type: str + required: true + access_level: + description: + - The access level for the user. + - Required if I(state=present), user state is set to present. + type: str + choices: ['guest', 'reporter', 'developer', 'maintainer'] + required: true + version_added: 3.7.0 state: description: - State of the member in the project. @@ -70,6 +92,15 @@ options: choices: ['present', 'absent'] default: 'present' type: str + purge_users: + description: + - Adds/remove users of the given access_level to match the given I(gitlab_user)/I(gitlab_users_access) list. + If omitted do not purge orphaned members. + - Is only used when I(state=present). + type: list + elements: str + choices: ['guest', 'reporter', 'developer', 'maintainer'] + version_added: 3.7.0 notes: - Supports C(check_mode). ''' @@ -93,6 +124,51 @@ EXAMPLES = r''' project: projectname gitlab_user: username state: absent + +- name: Add a list of Users to A GitLab project + community.general.gitlab_project_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + gitlab_project: projectname + gitlab_user: + - user1 + - user2 + access_level: developer + state: present + +- name: Add a list of Users with Dedicated Access Levels to A GitLab project + community.general.gitlab_project_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + project: projectname + gitlab_users_access: + - name: user1 + access_level: developer + - name: user2 + access_level: maintainer + state: present + +- name: Add a user, remove all others which might be on this access level + community.general.gitlab_project_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + project: projectname + gitlab_user: username + access_level: developer + pruge_users: developer + state: present + +- name: Remove a list of Users with Dedicated Access Levels to A GitLab project + community.general.gitlab_project_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + project: projectname + gitlab_users_access: + - name: user1 + access_level: developer + - name: user2 + access_level: maintainer + state: absent ''' RETURN = r''' # ''' @@ -132,6 +208,17 @@ class GitLabProjectMembers(object): project = self._gitlab.projects.get(gitlab_project_id) return project.members.list(all=True) + # get single member in a project by user name + def get_member_in_a_project(self, gitlab_project_id, gitlab_user_id): + member = None + project = self._gitlab.projects.get(gitlab_project_id) + try: + member = project.members.get(gitlab_user_id) + if member: + return member + except gitlab.exceptions.GitlabGetError as e: + return None + # check if the user is a member of the project def is_user_a_member(self, members, gitlab_user_id): for member in members: @@ -141,27 +228,14 @@ class GitLabProjectMembers(object): # add user to a project def add_member_to_project(self, gitlab_user_id, gitlab_project_id, access_level): - try: - project = self._gitlab.projects.get(gitlab_project_id) - add_member = project.members.create( - {'user_id': gitlab_user_id, 'access_level': access_level}) - - if add_member: - return add_member.username - - except (gitlab.exceptions.GitlabCreateError) as e: - self._module.fail_json( - msg="Failed to add member to the project, project ID %s: %s" % (gitlab_project_id, e)) + project = self._gitlab.projects.get(gitlab_project_id) + add_member = project.members.create( + {'user_id': gitlab_user_id, 'access_level': access_level}) # remove user from a project def remove_user_from_project(self, gitlab_user_id, gitlab_project_id): - try: - project = self._gitlab.projects.get(gitlab_project_id) - project.members.delete(gitlab_user_id) - - except (gitlab.exceptions.GitlabDeleteError) as e: - self._module.fail_json( - msg="Failed to remove member from GitLab project, ID %s: %s" % (gitlab_project_id, e)) + project = self._gitlab.projects.get(gitlab_project_id) + project.members.delete(gitlab_user_id) # get user's access level def get_user_access_level(self, members, gitlab_user_id): @@ -173,12 +247,8 @@ class GitLabProjectMembers(object): def update_user_access_level(self, members, gitlab_user_id, access_level): for member in members: if member.id == gitlab_user_id: - try: - member.access_level = access_level - member.save() - except (gitlab.exceptions.GitlabCreateError) as e: - self._module.fail_json( - msg="Failed to update the access level for the member, %s: %s" % (gitlab_user_id, e)) + member.access_level = access_level + member.save() def main(): @@ -186,9 +256,20 @@ def main(): argument_spec.update(dict( api_token=dict(type='str', required=True, no_log=True), project=dict(type='str', required=True), - gitlab_user=dict(type='str', required=True), + gitlab_user=dict(type='list', elements='str'), state=dict(type='str', default='present', choices=['present', 'absent']), - access_level=dict(type='str', required=False, choices=['guest', 'reporter', 'developer', 'maintainer']) + access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer']), + purge_users=dict(type='list', elements='str', choices=[ + 'guest', 'reporter', 'developer', 'maintainer']), + gitlab_users_access=dict( + type='list', + elements='dict', + options=dict( + name=dict(type='str', required=True), + access_level=dict(type='str', choices=[ + 'guest', 'reporter', 'developer', 'maintainer'], required=True), + ) + ), )) module = AnsibleModule( @@ -196,15 +277,19 @@ def main(): mutually_exclusive=[ ['api_username', 'api_token'], ['api_password', 'api_token'], + ['gitlab_user', 'gitlab_users_access'], + ['access_level', 'gitlab_users_access'], ], required_together=[ ['api_username', 'api_password'], + ['gitlab_user', 'access_level'], ], required_one_of=[ ['api_username', 'api_token'], + ['gitlab_user', 'gitlab_users_access'], ], required_if=[ - ['state', 'present', ['access_level']], + ['state', 'present', ['access_level', 'gitlab_users_access'], True], ], supports_check_mode=True, ) @@ -212,71 +297,168 @@ def main(): if not HAS_PY_GITLAB: module.fail_json(msg=missing_required_lib('python-gitlab', url='https://python-gitlab.readthedocs.io/en/stable/'), exception=GITLAB_IMP_ERR) + access_level_int = { + 'guest': gitlab.GUEST_ACCESS, + 'reporter': gitlab.REPORTER_ACCESS, + 'developer': gitlab.DEVELOPER_ACCESS, + 'maintainer': gitlab.MAINTAINER_ACCESS, + } + gitlab_project = module.params['project'] - gitlab_user = module.params['gitlab_user'] state = module.params['state'] access_level = module.params['access_level'] + purge_users = module.params['purge_users'] - # convert access level string input to int - if access_level: - access_level_int = { - 'guest': gitlab.GUEST_ACCESS, - 'reporter': gitlab.REPORTER_ACCESS, - 'developer': gitlab.DEVELOPER_ACCESS, - 'maintainer': gitlab.MAINTAINER_ACCESS - } - - access_level = access_level_int[access_level] + if purge_users: + purge_users = [access_level_int[level] for level in purge_users] # connect to gitlab server gl = gitlabAuthentication(module) project = GitLabProjectMembers(module, gl) - gitlab_user_id = project.get_user_id(gitlab_user) gitlab_project_id = project.get_project(gitlab_project) # project doesn't exist if not gitlab_project_id: module.fail_json(msg="project '%s' not found." % gitlab_project) - # user doesn't exist - if not gitlab_user_id: - if state == 'absent': - module.exit_json(changed=False, result="user '%s' not found, and thus also not part of the project" % gitlab_user) - else: - module.fail_json(msg="user '%s' not found." % gitlab_user) + members = [] + if module.params['gitlab_user'] is not None: + gitlab_users_access = [] + gitlab_users = module.params['gitlab_user'] + for gl_user in gitlab_users: + gitlab_users_access.append( + {'name': gl_user, 'access_level': access_level_int[access_level] if access_level else None}) + elif module.params['gitlab_users_access'] is not None: + gitlab_users_access = module.params['gitlab_users_access'] + for user_level in gitlab_users_access: + user_level['access_level'] = access_level_int[user_level['access_level']] - members = project.get_members_in_a_project(gitlab_project_id) - is_user_a_member = project.is_user_a_member(members, gitlab_user_id) - - # check if the user is a member in the project - if not is_user_a_member: - if state == 'present': - # add user to the project - if not module.check_mode: - project.add_member_to_project(gitlab_user_id, gitlab_project_id, access_level) - module.exit_json(changed=True, result="Successfully added user '%s' to the project." % gitlab_user) - # state as absent - else: - module.exit_json(changed=False, result="User, '%s', is not a member in the project. No change to report" % gitlab_user) - # in case that a user is a member + if len(gitlab_users_access) == 1 and not purge_users: + # only single user given + members = [project.get_member_in_a_project( + gitlab_project_id, project.get_user_id(gitlab_users_access[0]['name']))] + if members[0] is None: + members = [] + elif len(gitlab_users_access) > 1 or purge_users: + # list of users given + members = project.get_members_in_a_project(gitlab_project_id) else: - if state == 'present': - # compare the access level - user_access_level = project.get_user_access_level(members, gitlab_user_id) - if user_access_level == access_level: - module.exit_json(changed=False, result="User, '%s', is already a member in the project. No change to report" % gitlab_user) + module.exit_json(changed='OK', result="Nothing to do, please give at least one user or set purge_users true.", + result_data=[]) + + changed = False + error = False + changed_users = [] + changed_data = [] + + for gitlab_user in gitlab_users_access: + gitlab_user_id = project.get_user_id(gitlab_user['name']) + + # user doesn't exist + if not gitlab_user_id: + if state == 'absent': + changed_users.append("user '%s' not found, and thus also not part of the project" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', + 'msg': "user '%s' not found, and thus also not part of the project" % gitlab_user['name']}) else: - # update the access level for the user - if not module.check_mode: - project.update_user_access_level(members, gitlab_user_id, access_level) - module.exit_json(changed=True, result="Successfully updated the access level for the user, '%s'" % gitlab_user) + error = True + changed_users.append("user '%s' not found." % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "user '%s' not found." % gitlab_user['name']}) + continue + + is_user_a_member = project.is_user_a_member(members, gitlab_user_id) + + # check if the user is a member in the project + if not is_user_a_member: + if state == 'present': + # add user to the project + try: + if not module.check_mode: + project.add_member_to_project(gitlab_user_id, gitlab_project_id, gitlab_user['access_level']) + changed = True + changed_users.append("Successfully added user '%s' to project" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', + 'msg': "Successfully added user '%s' to project" % gitlab_user['name']}) + except (gitlab.exceptions.GitlabCreateError) as e: + error = True + changed_users.append("Failed to updated the access level for the user, '%s'" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "Not allowed to add the access level for the member, %s: %s" % (gitlab_user['name'], e)}) + # state as absent + else: + changed_users.append("User, '%s', is not a member in the project. No change to report" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', + 'msg': "User, '%s', is not a member in the project. No change to report" % gitlab_user['name']}) + # in case that a user is a member else: - # remove the user from the project - if not module.check_mode: - project.remove_user_from_project(gitlab_user_id, gitlab_project_id) - module.exit_json(changed=True, result="Successfully removed user, '%s', from the project" % gitlab_user) + if state == 'present': + # compare the access level + user_access_level = project.get_user_access_level(members, gitlab_user_id) + if user_access_level == gitlab_user['access_level']: + changed_users.append("User, '%s', is already a member in the project. No change to report" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', + 'msg': "User, '%s', is already a member in the project. No change to report" % gitlab_user['name']}) + else: + # update the access level for the user + try: + if not module.check_mode: + project.update_user_access_level(members, gitlab_user_id, gitlab_user['access_level']) + changed = True + changed_users.append("Successfully updated the access level for the user, '%s'" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', + 'msg': "Successfully updated the access level for the user, '%s'" % gitlab_user['name']}) + except (gitlab.exceptions.GitlabUpdateError) as e: + error = True + changed_users.append("Failed to updated the access level for the user, '%s'" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "Not allowed to update the access level for the member, %s: %s" % (gitlab_user['name'], e)}) + else: + # remove the user from the project + try: + if not module.check_mode: + project.remove_user_from_project(gitlab_user_id, gitlab_project_id) + changed = True + changed_users.append("Successfully removed user, '%s', from the project" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', + 'msg': "Successfully removed user, '%s', from the project" % gitlab_user['name']}) + except (gitlab.exceptions.GitlabDeleteError) as e: + error = True + changed_users.append("Failed to removed user, '%s', from the project" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "Failed to remove user, '%s' from the project: %s" % (gitlab_user['name'], e)}) + + # if state = present and purge_users set delete users which are in members having give access level but not in gitlab_users + if state == 'present' and purge_users: + uppercase_names_in_gitlab_users_access = [] + for name in gitlab_users_access: + uppercase_names_in_gitlab_users_access.append(name['name'].upper()) + + for member in members: + if member.access_level in purge_users and member.username.upper() not in uppercase_names_in_gitlab_users_access: + try: + if not module.check_mode: + project.remove_user_from_project(member.id, gitlab_project_id) + changed = True + changed_users.append("Successfully removed user '%s', from project. Was not in given list" % member.username) + changed_data.append({'gitlab_user': member.username, 'result': 'CHANGED', + 'msg': "Successfully removed user '%s', from project. Was not in given list" % member.username}) + except (gitlab.exceptions.GitlabDeleteError) as e: + error = True + changed_users.append("Failed to removed user, '%s', from the project" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "Failed to remove user, '%s' from the project: %s" % (gitlab_user['name'], e)}) + + if len(gitlab_users_access) == 1 and error: + # if single user given and an error occurred return error for list errors will be per user + module.fail_json(msg="FAILED: '%s '" % changed_users[0], result_data=changed_data) + elif error: + module.fail_json( + msg='FAILED: At least one given user/permission could not be set', result_data=changed_data) + + module.exit_json(changed=changed, msg='Successfully set memberships', result="\n".join(changed_users), result_data=changed_data) if __name__ == '__main__': diff --git a/tests/integration/targets/gitlab_project_members/defaults/main.yml b/tests/integration/targets/gitlab_project_members/defaults/main.yml index a31fc0f2d6..1b3ac19a47 100644 --- a/tests/integration/targets/gitlab_project_members/defaults/main.yml +++ b/tests/integration/targets/gitlab_project_members/defaults/main.yml @@ -3,3 +3,11 @@ gitlab_api_access_token: "token" gitlab_project: some_project username: some_user gitlab_access_level: developer +userlist: + - username1 + - username2 +dedicated_access_users: + - name: username1 + access_level: "developer" + - name: username2 + access_level: "maintainer" diff --git a/tests/integration/targets/gitlab_project_members/tasks/main.yml b/tests/integration/targets/gitlab_project_members/tasks/main.yml index c3330bae41..ade06d7ca2 100644 --- a/tests/integration/targets/gitlab_project_members/tasks/main.yml +++ b/tests/integration/targets/gitlab_project_members/tasks/main.yml @@ -19,7 +19,7 @@ api_token: "{{ gitlab_api_access_token }}" project: "{{ gitlab_project }}" gitlab_user: "{{ username }}" - state: absent + state: absent - name: Add a User to A GitLab Project community.general.gitlab_project_members: @@ -78,3 +78,46 @@ assert: that: - remove_gitlab_project_members_state_again is not changed + +- name: Add a list of Users to A GitLab Project + community.general.gitlab_project_members: + api_url: "{{ gitlab_server_url }}" + api_token: "{{ gitlab_api_access_token }}" + project: "{{ gitlab_project }}" + gitlab_user: "{{ userlist }}" + access_level: "{{ gitlab_access_level }}" + state: present + +- name: Remove a list of Users to A GitLab Project + community.general.gitlab_project_members:: + api_url: "{{ gitlab_server_url }}" + api_token: "{{ gitlab_api_access_token }}" + project: "{{ gitlab_project }}" + gitlab_user: "{{ userlist }}" + state: absent + +- name: Add a list of Users with Dedicated Access Levels to A GitLab Project + community.general.gitlab_project_members:: + api_url: "{{ gitlab_server_url }}" + api_token: "{{ gitlab_api_access_token }}" + project: "{{ gitlab_project }}" + gitlab_users_access: "{{ dedicated_access_users }}" + state: present + +- name: Remove a list of Users with Dedicated Access Levels to A GitLab Project + community.general.gitlab_project_members:: + api_url: "{{ gitlab_server_url }}" + api_token: "{{ gitlab_api_access_token }}" + project: "{{ gitlab_project }}" + gitlab_users_access: "{{ dedicated_access_users }}" + state: absent + +- name: Add a user, remove all others which might be on this access level + community.general.gitlab_project_members:: + api_url: "{{ gitlab_server_url }}" + api_token: "{{ gitlab_api_access_token }}" + project: "{{ gitlab_project }}" + gitlab_user: "{{ username }}" + access_level: "{{ gitlab_access_level }}" + pruge_users: "{{ gitlab_access_level }}" + state: present From 8ab96d95332d6e961a7e927538c6bf84baf31c58 Mon Sep 17 00:00:00 2001 From: Cliff Hults Date: Sat, 18 Sep 2021 09:19:41 -0400 Subject: [PATCH 0330/2828] Icinga2 inventory plugin (#3202) * Added Icinga2 inventory plugin * Added Icinga2 inventory plugin * Linting * Added tests * Linting * Linting * Added tests * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Resolved reviews and added host filters * Linting * Fixing yaml for example and updating tests * Updating test data * Fixing pep8 indentations * Missed copywriting * Missed copywriting * Updated documentation grammar * Removing Cacheable class and cleanup * Update plugins/inventory/icinga2.py * Update plugins/inventory/icinga2.py * Bump version number Co-authored-by: Felix Fontein * Update plugins/inventory/icinga2.py Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> Co-authored-by: Felix Fontein --- .github/BOTMETA.yml | 2 + plugins/inventory/icinga2.py | 222 +++++++++++++++++++ tests/unit/plugins/inventory/test_icinga2.py | 97 ++++++++ 3 files changed, 321 insertions(+) create mode 100644 plugins/inventory/icinga2.py create mode 100644 tests/unit/plugins/inventory/test_icinga2.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 78cd46871f..09cd8b8f3c 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -155,6 +155,8 @@ files: maintainers: sieben $inventories/proxmox.py: maintainers: $team_virt ilijamt + $inventories/icinga2.py: + maintainers: bongoeadgc6 $inventories/scaleway.py: maintainers: $team_scaleway labels: cloud scaleway diff --git a/plugins/inventory/icinga2.py b/plugins/inventory/icinga2.py new file mode 100644 index 0000000000..8a50ecd178 --- /dev/null +++ b/plugins/inventory/icinga2.py @@ -0,0 +1,222 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, Cliff Hults +# Copyright (c) 2021 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = ''' + name: icinga2 + short_description: Icinga2 inventory source + version_added: 3.7.0 + author: + - Cliff Hults (@BongoEADGC6) + description: + - Get inventory hosts from the Icinga2 API. + - "Uses a configuration file as an inventory source, it must end in + C(.icinga2.yml) or C(.icinga2.yaml)." + options: + plugin: + description: Name of the plugin. + required: true + type: string + choices: ['community.general.icinga2'] + url: + description: Root URL of Icinga2 API. + type: string + required: true + user: + description: Username to query the API. + type: string + required: true + password: + description: Password to query the API. + type: string + required: true + host_filter: + description: An Icinga2 API valid host filter. + type: string + required: false + validate_certs: + description: Enables or disables SSL certificate verification. + type: boolean + default: true +''' + +EXAMPLES = r''' +# my.icinga2.yml +plugin: community.general.icinga2 +url: http://localhost:5665 +user: ansible +password: secure +host_filter: \"linux-servers\" in host.groups +validate_certs: false +''' + +import json + +from ansible.errors import AnsibleParserError +from ansible.plugins.inventory import BaseInventoryPlugin, Constructable +from ansible.module_utils.urls import open_url + + +class InventoryModule(BaseInventoryPlugin, Constructable): + ''' Host inventory parser for ansible using Icinga2 as source. ''' + + NAME = 'community.general.icinga2' + + def __init__(self): + + super(InventoryModule, self).__init__() + + # from config + self.icinga2_url = None + self.icinga2_user = None + self.icinga2_password = None + self.ssl_verify = None + self.host_filter = None + + self.cache_key = None + self.use_cache = None + + def verify_file(self, path): + valid = False + if super(InventoryModule, self).verify_file(path): + if path.endswith(('icinga2.yaml', 'icinga2.yml')): + valid = True + else: + self.display.vvv('Skipping due to inventory source not ending in "icinga2.yaml" nor "icinga2.yml"') + return valid + + def _api_connect(self): + self.headers = { + 'User-Agent': "ansible-icinga2-inv", + 'Accept': "application/json", + } + api_status_url = self.icinga2_url + "/status" + request_args = { + 'headers': self.headers, + 'url_username': self.icinga2_user, + 'url_password': self.icinga2_password, + 'validate_certs': self.ssl_verify + } + open_url(api_status_url, **request_args) + + def _post_request(self, request_url, data=None): + self.display.vvv("Requested URL: %s" % request_url) + request_args = { + 'headers': self.headers, + 'url_username': self.icinga2_user, + 'url_password': self.icinga2_password, + 'validate_certs': self.ssl_verify + } + if data is not None: + request_args['data'] = json.dumps(data) + self.display.vvv("Request Args: %s" % request_args) + response = open_url(request_url, **request_args) + response_body = response.read() + json_data = json.loads(response_body.decode('utf-8')) + if 200 <= response.status <= 299: + return json_data + if response.status == 404 and json_data['status'] == "No objects found.": + raise AnsibleParserError( + "API returned no data -- Response: %s - %s" + % (response.status, json_data['status'])) + if response.status == 401: + raise AnsibleParserError( + "API was unable to complete query -- Response: %s - %s" + % (response.status, json_data['status'])) + if response.status == 500: + raise AnsibleParserError( + "API Response - %s - %s" + % (json_data['status'], json_data['errors'])) + raise AnsibleParserError( + "Unexpected data returned - %s - %s" + % (json_data['status'], json_data['errors'])) + + def _query_hosts(self, hosts=None, attrs=None, joins=None, host_filter=None): + query_hosts_url = "{0}/objects/hosts".format(self.icinga2_url) + self.headers['X-HTTP-Method-Override'] = 'GET' + data_dict = dict() + if hosts: + data_dict['hosts'] = hosts + if attrs is not None: + data_dict['attrs'] = attrs + if joins is not None: + data_dict['joins'] = joins + if host_filter is not None: + data_dict['filter'] = host_filter.replace("\\\"", "\"") + self.display.vvv(host_filter) + host_dict = self._post_request(query_hosts_url, data_dict) + return host_dict['results'] + + def get_inventory_from_icinga(self): + """Query for all hosts """ + self.display.vvv("Querying Icinga2 for inventory") + query_args = { + "attrs": ["address", "state_type", "state", "groups"], + } + if self.host_filter is not None: + query_args['host_filter'] = self.host_filter + # Icinga2 API Call + results_json = self._query_hosts(**query_args) + # Manipulate returned API data to Ansible inventory spec + ansible_inv = self._convert_inv(results_json) + return ansible_inv + + def _populate(self): + groups = self._to_json(self.get_inventory_from_icinga()) + return groups + + def _to_json(self, in_dict): + """Convert dictionary to JSON""" + return json.dumps(in_dict, sort_keys=True, indent=2) + + def _convert_inv(self, json_data): + """Convert Icinga2 API data to JSON format for Ansible""" + groups_dict = {"_meta": {"hostvars": {}}} + for entry in json_data: + host_name = entry['name'] + host_attrs = entry['attrs'] + if host_attrs['state'] == 0: + host_attrs['state'] = 'on' + else: + host_attrs['state'] = 'off' + host_groups = host_attrs['groups'] + host_addr = host_attrs['address'] + self.inventory.add_host(host_addr) + for group in host_groups: + if group not in self.inventory.groups.keys(): + self.inventory.add_group(group) + self.inventory.add_child(group, host_addr) + self.inventory.set_variable(host_addr, 'address', host_addr) + self.inventory.set_variable(host_addr, 'hostname', host_name) + self.inventory.set_variable(host_addr, 'state', + host_attrs['state']) + self.inventory.set_variable(host_addr, 'state_type', + host_attrs['state_type']) + return groups_dict + + def parse(self, inventory, loader, path, cache=True): + + super(InventoryModule, self).parse(inventory, loader, path) + + # read config from file, this sets 'options' + self._read_config_data(path) + + # Store the options from the YAML file + self.icinga2_url = self.get_option('url').rstrip('/') + '/v1' + self.icinga2_user = self.get_option('user') + self.icinga2_password = self.get_option('password') + self.ssl_verify = self.get_option('validate_certs') + self.host_filter = self.get_option('host_filter') + # Not currently enabled + # self.cache_key = self.get_cache_key(path) + # self.use_cache = cache and self.get_option('cache') + + # Test connection to API + self._api_connect() + + # Call our internal helper to populate the dynamic inventory + self._populate() diff --git a/tests/unit/plugins/inventory/test_icinga2.py b/tests/unit/plugins/inventory/test_icinga2.py new file mode 100644 index 0000000000..266045f203 --- /dev/null +++ b/tests/unit/plugins/inventory/test_icinga2.py @@ -0,0 +1,97 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, Cliff Hults +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# +# The API responses used in these tests were recorded from PVE version 6.2. + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible.inventory.data import InventoryData +from ansible_collections.community.general.plugins.inventory.icinga2 import InventoryModule + + +@pytest.fixture(scope="module") +def inventory(): + r = InventoryModule() + r.inventory = InventoryData() + return r + + +def test_verify_file_bad_config(inventory): + assert inventory.verify_file('foobar.icinga2.yml') is False + + +def check_api(): + return True + + +# NOTE: when updating/adding replies to this function, +# be sure to only add only the _contents_ of the 'data' dict in the API reply +def query_hosts(hosts=None, attrs=None, joins=None, host_filter=None): + # _get_hosts - list of dicts + json_host_data = [ + { + 'attrs': { + 'address': 'test-host1.home.local', + 'groups': ['home_servers', 'servers_dell'], + 'state': 0.0, + 'state_type': 1.0 + }, + 'joins': {}, + 'meta': {}, + 'name': 'test-host1', + 'type': 'Host' + }, + { + 'attrs': { + 'address': 'test-host2.home.local', + 'groups': ['home_servers', 'servers_hp'], + 'state': 1.0, + 'state_type': 1.0 + }, + 'joins': {}, + 'meta': {}, + 'name': 'test-host2', + 'type': 'Host' + } + ] + return json_host_data + + +def test_populate(inventory, mocker): + # module settings + inventory.icinga2_user = 'ansible' + inventory.icinga2_password = 'password' + inventory.icinga2_url = 'https://localhost:5665' + '/v1' + + # bypass authentication and API fetch calls + inventory._check_api = mocker.MagicMock(side_effect=check_api) + inventory._query_hosts = mocker.MagicMock(side_effect=query_hosts) + inventory._populate() + + # get different hosts + host1_info = inventory.inventory.get_host('test-host1.home.local') + print(host1_info) + host2_info = inventory.inventory.get_host('test-host2.home.local') + print(host2_info) + + # check if host in the home_servers group + assert 'home_servers' in inventory.inventory.groups + group1_data = inventory.inventory.groups['home_servers'] + group1_test_data = [host1_info, host2_info] + print(group1_data.hosts) + print(group1_test_data) + assert group1_data.hosts == group1_test_data + # Test servers_hp group + group2_data = inventory.inventory.groups['servers_hp'] + group2_test_data = [host2_info] + print(group2_data.hosts) + print(group2_test_data) + assert group2_data.hosts == group2_test_data + + # check if host state rules apply properyl + assert host1_info.get_vars()['state'] == 'on' + assert host2_info.get_vars()['state'] == 'off' From 7aae8d5386e0ddb944f171b1847e3e16e981d635 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 19 Sep 2021 23:44:37 +1200 Subject: [PATCH 0331/2828] Interfaces_file - improvements (#3328) * pythonific!! no camel cases, bitte * simplified iface attributes parsing * some improvements, passing tests * simplified set_interface_option() * further simplifications * remove unreachable stmt * pythonified a file open * added changelog fragment * adjustment per PR * PR: fixed the auto- case * PR: added testcase and chglog frag for the misleading change report * extra line removed * integration is not destructive --- .../3328-interfaces_file-improvements.yaml | 4 + plugins/modules/system/interfaces_file.py | 161 ++++++++---------- .../targets/interfaces_file/aliases | 1 + .../interfaces_file/files/interfaces_ff | 7 + .../targets/interfaces_file/tasks/main.yml | 33 ++++ .../interfaces_file/test_interfaces_file.py | 22 +-- 6 files changed, 123 insertions(+), 105 deletions(-) create mode 100644 changelogs/fragments/3328-interfaces_file-improvements.yaml create mode 100644 tests/integration/targets/interfaces_file/aliases create mode 100644 tests/integration/targets/interfaces_file/files/interfaces_ff create mode 100644 tests/integration/targets/interfaces_file/tasks/main.yml diff --git a/changelogs/fragments/3328-interfaces_file-improvements.yaml b/changelogs/fragments/3328-interfaces_file-improvements.yaml new file mode 100644 index 0000000000..10734af603 --- /dev/null +++ b/changelogs/fragments/3328-interfaces_file-improvements.yaml @@ -0,0 +1,4 @@ +bugfixes: + - interfaces_file - no longer reporting change when none happened (https://github.com/ansible-collections/community.general/pull/3328). +minor_changes: + - interfaces_file - minor refactor (https://github.com/ansible-collections/community.general/pull/3328). diff --git a/plugins/modules/system/interfaces_file.py b/plugins/modules/system/interfaces_file.py index c22c0ce29e..7666ba1cbc 100644 --- a/plugins/modules/system/interfaces_file.py +++ b/plugins/modules/system/interfaces_file.py @@ -148,57 +148,48 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_bytes -def lineDict(line): +def line_dict(line): return {'line': line, 'line_type': 'unknown'} -def optionDict(line, iface, option, value, address_family): +def make_option_dict(line, iface, option, value, address_family): return {'line': line, 'iface': iface, 'option': option, 'value': value, 'line_type': 'option', 'address_family': address_family} -def getValueFromLine(s): - spaceRe = re.compile(r'\s+') - for m in spaceRe.finditer(s): - pass - valueEnd = m.start() - option = s.split()[0] - optionStart = s.find(option) - optionLen = len(option) - valueStart = re.search(r'\s', s[optionLen + optionStart:]).end() + optionLen + optionStart - return s[valueStart:valueEnd] +def get_option_value(line): + patt = re.compile(r'^\s+(?P