From 83afe0f8687f3472efc7a948641ff786f76ba1fd Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 28 Jan 2021 13:40:13 +0100 Subject: [PATCH 0001/3093] Next expected release is 2.1.0. Next changelog ancestor is 2.0.0. --- changelogs/changelog.yaml | 2 +- galaxy.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml index 80b7c7c644..e78468a3ca 100644 --- a/changelogs/changelog.yaml +++ b/changelogs/changelog.yaml @@ -1,2 +1,2 @@ -ancestor: 1.0.0 +ancestor: 2.0.0 releases: {} diff --git a/galaxy.yml b/galaxy.yml index bc005c2c41..20b24b64e9 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,6 +1,6 @@ namespace: community name: general -version: 2.0.0 +version: 2.1.0 readme: README.md authors: - Ansible (https://github.com/ansible) From b6774971a6ed7fd1c6b14a9aa2ccd0692a9ebb4a Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 28 Jan 2021 13:40:56 +0100 Subject: [PATCH 0002/3093] Clear changelog for 3.0.0. --- .../1021-git_config-custom-file.yaml | 4 ---- .../1028-proxmox-kvm-linked-clone.yml | 3 --- .../1036-redis-cache-keyset-name.yaml | 3 --- ...-and-homebrew-cask-package-validation.yaml | 5 ---- .../1039-archive-fix-paramater-types.yaml | 3 --- ...040-ldap_search-changed-must-be-false.yaml | 2 -- .../fragments/1055-redis-cache-sentinel.yaml | 3 --- ...ck-if-the-value-matches-expected-form.yaml | 2 -- .../fragments/1081-solaris_zone-python3.yml | 3 --- .../1083-archive-remove-path-folder.yml | 3 --- changelogs/fragments/1101-slack-ts-fix.yaml | 2 -- changelogs/fragments/1105-beadm_bugfix.yaml | 3 --- .../fragments/1107-monit-fix-status-check.yml | 2 -- ...126-influxdb-conditional-path-argument.yml | 4 ---- .../1127-maven_artifact_client_cert.yml | 2 -- ...1140-iptables_state-fix-race-condition.yml | 4 ---- .../1144-consul-add-tcp-check-support.yml | 3 --- .../1149-filesystem-fix-355-state-absent.yml | 4 ---- .../fragments/1154-django_manage-docs.yml | 2 -- changelogs/fragments/1169-getbinpkgonly.yaml | 3 --- .../fragments/1175-zypper-absent-lang.yml | 2 -- ...79-composer_require_v2_idempotence_fix.yml | 4 ---- .../1185-proxmox-ignore-qemu-templates.yml | 3 --- ...e_description-in-gitlab-group-creation.yml | 2 -- .../1197_gitlab_project_variable.yml | 2 -- .../fragments/1206-proxmox-api-token.yml | 5 ---- .../1223-nios-remove-redundant-aliases.yml | 3 --- .../1243-pkgng-present-ignoreosver.yaml | 2 -- .../fragments/1244-renamed-parameter.yaml | 2 -- ...-flatpak-use-non-interactive-argument.yaml | 2 -- ...at-pkgin-add-full-version-package-name.yml | 2 -- ...258-proxmox_kvm-ignore-pool-on-update.yaml | 2 -- .../fragments/1260-nmcli-ib-routes.yaml | 4 ---- .../fragments/1264-dict_kv-new-filter.yaml | 2 -- .../1270-linode-v4-stackscript-support.yaml | 2 -- .../fragments/1305-added-xfconf-tests.yaml | 2 -- .../1307-macports-fix-status-check.yml | 3 --- .../1317-kubevirt-migration-removal.yml | 13 ----------- .../1319-google-migration-removal.yml | 16 ------------- .../1322-module_helper_and_xfconf.yaml | 5 ---- .../fragments/1331-proxmox-info-modules.yml | 3 --- ...tadog-mark-notification_message-no_log.yml | 3 --- .../fragments/1339-ip-no_log-nonsecret.yml | 3 --- ...-apache2-module-amend-shib-workaround.yaml | 2 -- .../1394-pamd-removing-comments.yaml | 2 -- .../1399-fixed-wrong-elements-type.yaml | 2 -- .../fragments/1404-proxmox-doc-fragments.yml | 4 ---- changelogs/fragments/1411_homebrew_tap.yml | 2 -- .../fragments/1413-proxmox-features.yml | 2 -- .../fragments/1415-valmod_req_mismatch.yml | 2 -- .../fragments/1419-xfconf-return-values.yaml | 2 -- .../fragments/1423-valmod_multiple_cases.yml | 6 ----- .../1425_bitbucket_pipeline_variable.yml | 2 -- .../1426-nmcli-add-zone-parameter.yml | 2 -- changelogs/fragments/1428-npm-no-optional.yml | 3 --- .../1436-mas-fix-no-app-installed.yml | 3 --- ...eyed_groups-to-linode-inventory-plugin.yml | 4 ---- .../fragments/1455-rhn-register-force.yml | 2 -- .../fragments/1462-splunk-millisecond.yaml | 2 -- ...tack-fix-put_file-to-preserve-checksum.yml | 2 -- .../1480-module-helper-improvements.yml | 2 -- .../1481-deprecated-brew-cask-command.yaml | 2 -- ...perty-name-in-redfish-memory-inventory.yml | 2 -- .../1491-gitlab-runner-owned-parameter.yml | 5 ---- ..._to_be_compatible_with_gopass_versions.yml | 2 -- changelogs/fragments/1504_jira.yml | 2 -- changelogs/fragments/1506_gitlab_project.yml | 2 -- .../1516-ldap_entry-improvements.yaml | 2 -- ...lave-from-list-of-ip-based-connections.yml | 2 -- .../fragments/1522-yaml-callback-unicode.yml | 2 -- .../1527-fix-nios-api-member-normalize.yaml | 3 --- .../1532-monit-support-all-services.yaml | 2 -- ...549-add-tag-filter-to-linode-inventory.yml | 4 ---- .../1550-add-jobs-parameter-to-make.yml | 2 -- changelogs/fragments/1552_launchd.yml | 2 -- changelogs/fragments/1553_sendgrid.yml | 2 -- .../1555-ipa-sudorule-add-commandgroup.yml | 2 -- changelogs/fragments/1574-make-question.yaml | 2 -- ...store.py-to-be-compatible-with-gopass.yaml | 5 ---- ...nfluxdb-shard-group-duration-parameter.yml | 2 -- .../1595-ldap-gssapi-sasl-authentication.yml | 2 -- ...1610-bugfix-onepassword-lookup-plugin.yaml | 2 -- changelogs/fragments/1614_npm.yml | 2 -- ..._search-switch-off-cheasing-referrals.yaml | 4 ---- .../1620-terraform_init_reconfigure_fix.yml | 2 -- .../1632-using_check_rc_in_terraform.yml | 2 -- .../1645-proxmox-env-passthrough.yml | 3 --- .../1654-dnsmadeeasy-http-400-fixes.yaml | 2 -- .../fragments/1679-homebrew_search_path.yml | 8 ------- .../1681-add_passwordstore_yaml_support.yaml | 2 -- .../fragments/1690-scaleway-regions.yaml | 2 -- ...-container-container_config-parameter.yaml | 2 -- .../fragments/229_lvol_percentage_fix.yml | 2 -- changelogs/fragments/296-ansible-2.9.yml | 2 -- ...-consul_kv-fix-env-variables-handling.yaml | 4 ---- .../fragments/311-jira-error-handling.yaml | 2 -- changelogs/fragments/320_unsafe_text.yml | 2 -- changelogs/fragments/331_keycloak.yml | 2 -- .../335-icinga2_host-return-error-code.yaml | 2 -- changelogs/fragments/33979-xfs_growfs.yml | 2 -- ...9-pacman_improve_group_expansion_speed.yml | 2 -- .../360_syspatch_apply_patches_by_default.yml | 4 ---- .../409-datadog-monitor-include-tags.yaml | 2 -- ...use-stderr-and-environment-for-config.yaml | 2 -- ...lays-add_playbook_task_name_and_action.yml | 4 ---- ...-legacy-python-certificate-validation.yaml | 2 -- .../474-yarn_fix-outdated-fix-list.yml | 3 --- changelogs/fragments/47680_pam_limits.yml | 2 -- .../fragments/479-ini_file-empty-section.yaml | 4 ---- .../fragments/522-parted_change_label.yml | 2 -- changelogs/fragments/548_apk.yml | 2 -- .../560-pkgng-add-stdout-and-stderr.yaml | 2 -- .../fragments/562-nmcli-fix-idempotency.yaml | 2 -- .../563-update-terraform-status-test.yaml | 4 ---- changelogs/fragments/568_packaging.yml | 2 -- .../569-pkgng-add-upgrade-action.yaml | 6 ----- ...plunk-add-option-to-not-validate-cert.yaml | 2 -- .../604-lists_mergeby-new-filter.yml | 23 ------------------- .../610_logstash_callback_add_ini_config.yml | 2 -- ...nv-vars-intput-and-default-item-limit.yaml | 2 -- .../fragments/613-snmp_facts-EndOfMibView.yml | 2 -- .../615-digital-ocean-tag-info-bugfix.yml | 2 -- .../630-git_config-handling-invalid-dir.yaml | 2 -- changelogs/fragments/63767_selective.yml | 2 -- changelogs/fragments/638_cobbler_py3.yml | 2 -- .../641-update-ansible-logstash-callback.yml | 3 --- .../650_pacman_support_zst_package_files.yaml | 4 ---- changelogs/fragments/66813_gitlab_project.yml | 2 -- ...sx_defaults_fix_handling_negative_ints.yml | 2 -- .../fragments/677-jenkins_plugins_sha1.yaml | 5 ---- ...87-fix-redfish-payload-decode-python35.yml | 2 -- .../689-haproxy_agent_and_health.yml | 7 ------ ...g-revamp-on-xfconf-adding-array-values.yml | 2 -- ...anage-createcachetable-fix-idempotence.yml | 2 -- .../702-slack-support-for-blocks.yaml | 2 -- .../704-doas-set-correct-default-values.yml | 2 -- .../fragments/707-jira-error-handling.yaml | 2 -- .../708-set-correct-default-values.yml | 3 --- changelogs/fragments/711-lxd-target.yml | 2 -- .../713-maven-timestamp-snapshot.yml | 2 -- changelogs/fragments/722-plugins.yml | 4 ---- changelogs/fragments/738-ipa-python3.yml | 2 -- .../744-xfconf_make_locale-independent.yml | 5 ---- changelogs/fragments/750-jc-new-filter.yaml | 2 -- changelogs/fragments/768-facter.yml | 2 -- changelogs/fragments/773-resize-partition.yml | 2 -- .../777-interfaces_file-re-escape.yml | 3 --- .../783-fix-gem-installed-versions.yaml | 2 -- .../788-fix_omapi_host_on_python3.yaml | 2 -- .../789-pkg5-wrap-to-modify-package-list.yaml | 2 -- .../fragments/797-proxmox-kvm-cloud-init.yaml | 2 -- .../802-pushover-device-parameter.yml | 2 -- .../811-proxmox-kvm-state-absent.yml | 3 --- ...ios_added_acknowledge_and_servicecheck.yml | 3 --- .../823-terraform_init_reconfigure.yaml | 2 -- .../825-bootsource-override-option.yaml | 2 -- changelogs/fragments/830-pam-limits.yml | 3 --- changelogs/fragments/831-proxmox-kvm-wait.yml | 3 --- .../fragments/843-update-slack-messages.yml | 2 -- .../849-proxmox-kvm-state-absent-force.yml | 3 --- ...proxmox_kvm-remove_hard_coded_defaults.yml | 6 ----- .../891-packet_net-fix-not-subscriptable.yaml | 2 -- .../fragments/892-slack-token-validation.yml | 3 --- .../fragments/899_launchd_user_service.yml | 2 -- ...ike-migration-handle-unstable-cluster.yaml | 3 --- ...-enhance-redfish-manager-reset-actions.yml | 2 -- ...y_proper_failure_on_missing_python-xml.yml | 2 -- .../943-proxmox-kvm-code-cleanup.yml | 3 --- .../fragments/945-darwin-timezone-py3.yaml | 2 -- .../951-ipa_user-add-userauthtype-param.yaml | 3 --- changelogs/fragments/953_syslogger.yml | 2 -- ...en-calling-a-module-from-action-plugin.yml | 4 ---- .../968-gitlab_variables-pagination.yml | 4 ---- changelogs/fragments/992-nmcli-locale.yml | 2 -- .../fragments/993-file-capabilities.yml | 2 -- ..._forced_check_for_all_services_or_host.yml | 3 --- .../add_argument_check_for_rundeck.yaml | 2 -- .../airbrake_deployment_add_version.yml | 3 --- ...x_filesystem-module_util-routing-issue.yml | 3 --- changelogs/fragments/cloudflare_dns.yml | 2 -- .../cve_bitbucket_pipeline_variable.yml | 2 -- .../fragments/cyberarkconjur-removal.yml | 2 -- changelogs/fragments/dconf_refactor.yml | 2 -- changelogs/fragments/deprecation-removals.yml | 5 ---- changelogs/fragments/digital-ocean.yml | 2 -- .../fragments/docker-migration-removal.yml | 14 ----------- changelogs/fragments/dsv_fix.yml | 2 -- changelogs/fragments/firewalld_migration.yml | 3 --- changelogs/fragments/fix-plugin-imports.yml | 5 ---- ...x_parsing_array_values_in_osx_defaults.yml | 2 -- changelogs/fragments/galaxy-yml.yml | 2 -- changelogs/fragments/gluster-deprecation.yaml | 2 -- .../hashi_vault-migration-removal.yml | 14 ----------- .../fragments/hetzner-migration-removal.yml | 14 ----------- .../homebrew-cask-at-symbol-fix.yaml | 2 -- changelogs/fragments/infinidat-removal.yml | 2 -- changelogs/fragments/jira_improvements.yaml | 7 ------ ...path-to-locate-the-lldpctl-executable.yaml | 2 -- changelogs/fragments/logicmonitor-removal.yml | 3 --- .../fragments/lookup-passwordstore-umask.yml | 5 ---- changelogs/fragments/mysql.yml | 2 -- changelogs/fragments/nios-fix-ib_spec.yaml | 2 -- .../nios_host_record-fix-aliases-removal.yml | 3 --- changelogs/fragments/nmcli-refactor.yml | 8 ------- changelogs/fragments/oc-migration-removal.yml | 14 ----------- changelogs/fragments/odbc.yml | 2 -- changelogs/fragments/openbsd_pkg.yml | 3 --- .../fragments/parted_negative_numbers.yml | 2 -- .../fragments/pkgutil-check-mode-etc.yaml | 4 ---- changelogs/fragments/porting-guide-2.yml | 2 -- .../postgresql-migration-removal.yml | 14 ----------- .../proxmox_template-appliance-download.yml | 3 --- changelogs/fragments/proxysql.yml | 2 -- .../remove-ansible.netcommon-dependency.yml | 4 ---- .../remove-ansible.posix-dependency.yml | 2 -- .../fragments/remove-deprecated-modules-2.yml | 10 -------- .../fragments/remove-deprecated-modules.yml | 20 ---------------- .../fragments/remove-deprecated-redirects.yml | 7 ------ changelogs/fragments/snmp_facts.yml | 2 -- changelogs/fragments/telegram-api-update.yml | 5 ---- changelogs/fragments/xfconf_add_uint_type.yml | 2 -- changelogs/fragments/xml-remove-changed.yml | 2 -- changelogs/fragments/zfs-root-snapshot.yml | 2 -- 223 files changed, 732 deletions(-) delete mode 100644 changelogs/fragments/1021-git_config-custom-file.yaml delete mode 100644 changelogs/fragments/1028-proxmox-kvm-linked-clone.yml delete mode 100644 changelogs/fragments/1036-redis-cache-keyset-name.yaml delete mode 100644 changelogs/fragments/1038-fix-homebrew-and-homebrew-cask-package-validation.yaml delete mode 100644 changelogs/fragments/1039-archive-fix-paramater-types.yaml delete mode 100644 changelogs/fragments/1040-ldap_search-changed-must-be-false.yaml delete mode 100644 changelogs/fragments/1055-redis-cache-sentinel.yaml delete mode 100644 changelogs/fragments/1079-redis-use-regexp-to-check-if-the-value-matches-expected-form.yaml delete mode 100644 changelogs/fragments/1081-solaris_zone-python3.yml delete mode 100644 changelogs/fragments/1083-archive-remove-path-folder.yml delete mode 100644 changelogs/fragments/1101-slack-ts-fix.yaml delete mode 100644 changelogs/fragments/1105-beadm_bugfix.yaml delete mode 100644 changelogs/fragments/1107-monit-fix-status-check.yml delete mode 100644 changelogs/fragments/1126-influxdb-conditional-path-argument.yml delete mode 100644 changelogs/fragments/1127-maven_artifact_client_cert.yml delete mode 100644 changelogs/fragments/1140-iptables_state-fix-race-condition.yml delete mode 100644 changelogs/fragments/1144-consul-add-tcp-check-support.yml delete mode 100644 changelogs/fragments/1149-filesystem-fix-355-state-absent.yml delete mode 100644 changelogs/fragments/1154-django_manage-docs.yml delete mode 100644 changelogs/fragments/1169-getbinpkgonly.yaml delete mode 100644 changelogs/fragments/1175-zypper-absent-lang.yml delete mode 100644 changelogs/fragments/1179-composer_require_v2_idempotence_fix.yml delete mode 100644 changelogs/fragments/1185-proxmox-ignore-qemu-templates.yml delete mode 100644 changelogs/fragments/1196-use_description-in-gitlab-group-creation.yml delete mode 100644 changelogs/fragments/1197_gitlab_project_variable.yml delete mode 100644 changelogs/fragments/1206-proxmox-api-token.yml delete mode 100644 changelogs/fragments/1223-nios-remove-redundant-aliases.yml delete mode 100644 changelogs/fragments/1243-pkgng-present-ignoreosver.yaml delete mode 100644 changelogs/fragments/1244-renamed-parameter.yaml delete mode 100644 changelogs/fragments/1246-flatpak-use-non-interactive-argument.yaml delete mode 100644 changelogs/fragments/1256-feat-pkgin-add-full-version-package-name.yml delete mode 100644 changelogs/fragments/1258-proxmox_kvm-ignore-pool-on-update.yaml delete mode 100644 changelogs/fragments/1260-nmcli-ib-routes.yaml delete mode 100644 changelogs/fragments/1264-dict_kv-new-filter.yaml delete mode 100644 changelogs/fragments/1270-linode-v4-stackscript-support.yaml delete mode 100644 changelogs/fragments/1305-added-xfconf-tests.yaml delete mode 100644 changelogs/fragments/1307-macports-fix-status-check.yml delete mode 100644 changelogs/fragments/1317-kubevirt-migration-removal.yml delete mode 100644 changelogs/fragments/1319-google-migration-removal.yml delete mode 100644 changelogs/fragments/1322-module_helper_and_xfconf.yaml delete mode 100644 changelogs/fragments/1331-proxmox-info-modules.yml delete mode 100644 changelogs/fragments/1338-datadog-mark-notification_message-no_log.yml delete mode 100644 changelogs/fragments/1339-ip-no_log-nonsecret.yml delete mode 100644 changelogs/fragments/1383-apache2-module-amend-shib-workaround.yaml delete mode 100644 changelogs/fragments/1394-pamd-removing-comments.yaml delete mode 100644 changelogs/fragments/1399-fixed-wrong-elements-type.yaml delete mode 100644 changelogs/fragments/1404-proxmox-doc-fragments.yml delete mode 100644 changelogs/fragments/1411_homebrew_tap.yml delete mode 100644 changelogs/fragments/1413-proxmox-features.yml delete mode 100644 changelogs/fragments/1415-valmod_req_mismatch.yml delete mode 100644 changelogs/fragments/1419-xfconf-return-values.yaml delete mode 100644 changelogs/fragments/1423-valmod_multiple_cases.yml delete mode 100644 changelogs/fragments/1425_bitbucket_pipeline_variable.yml delete mode 100644 changelogs/fragments/1426-nmcli-add-zone-parameter.yml delete mode 100644 changelogs/fragments/1428-npm-no-optional.yml delete mode 100644 changelogs/fragments/1436-mas-fix-no-app-installed.yml delete mode 100644 changelogs/fragments/1453-add-support-for-keyed_groups-to-linode-inventory-plugin.yml delete mode 100644 changelogs/fragments/1455-rhn-register-force.yml delete mode 100644 changelogs/fragments/1462-splunk-millisecond.yaml delete mode 100644 changelogs/fragments/1472-saltstack-fix-put_file-to-preserve-checksum.yml delete mode 100644 changelogs/fragments/1480-module-helper-improvements.yml delete mode 100644 changelogs/fragments/1481-deprecated-brew-cask-command.yaml delete mode 100644 changelogs/fragments/1484-fix-property-name-in-redfish-memory-inventory.yml delete mode 100644 changelogs/fragments/1491-gitlab-runner-owned-parameter.yml delete mode 100644 changelogs/fragments/1493-fix_passwordstore.py_to_be_compatible_with_gopass_versions.yml delete mode 100644 changelogs/fragments/1504_jira.yml delete mode 100644 changelogs/fragments/1506_gitlab_project.yml delete mode 100644 changelogs/fragments/1516-ldap_entry-improvements.yaml delete mode 100644 changelogs/fragments/1517-bridge-slave-from-list-of-ip-based-connections.yml delete mode 100644 changelogs/fragments/1522-yaml-callback-unicode.yml delete mode 100644 changelogs/fragments/1527-fix-nios-api-member-normalize.yaml delete mode 100644 changelogs/fragments/1532-monit-support-all-services.yaml delete mode 100644 changelogs/fragments/1549-add-tag-filter-to-linode-inventory.yml delete mode 100644 changelogs/fragments/1550-add-jobs-parameter-to-make.yml delete mode 100644 changelogs/fragments/1552_launchd.yml delete mode 100644 changelogs/fragments/1553_sendgrid.yml delete mode 100644 changelogs/fragments/1555-ipa-sudorule-add-commandgroup.yml delete mode 100644 changelogs/fragments/1574-make-question.yaml delete mode 100644 changelogs/fragments/1589-passwordstore-fix-passwordstore.py-to-be-compatible-with-gopass.yaml delete mode 100644 changelogs/fragments/1590-influxdb-shard-group-duration-parameter.yml delete mode 100644 changelogs/fragments/1595-ldap-gssapi-sasl-authentication.yml delete mode 100644 changelogs/fragments/1610-bugfix-onepassword-lookup-plugin.yaml delete mode 100644 changelogs/fragments/1614_npm.yml delete mode 100644 changelogs/fragments/1618-ldap_search-switch-off-cheasing-referrals.yaml delete mode 100644 changelogs/fragments/1620-terraform_init_reconfigure_fix.yml delete mode 100644 changelogs/fragments/1632-using_check_rc_in_terraform.yml delete mode 100644 changelogs/fragments/1645-proxmox-env-passthrough.yml delete mode 100644 changelogs/fragments/1654-dnsmadeeasy-http-400-fixes.yaml delete mode 100644 changelogs/fragments/1679-homebrew_search_path.yml delete mode 100644 changelogs/fragments/1681-add_passwordstore_yaml_support.yaml delete mode 100644 changelogs/fragments/1690-scaleway-regions.yaml delete mode 100644 changelogs/fragments/216-fix-lxc-container-container_config-parameter.yaml delete mode 100644 changelogs/fragments/229_lvol_percentage_fix.yml delete mode 100644 changelogs/fragments/296-ansible-2.9.yml delete mode 100644 changelogs/fragments/303-consul_kv-fix-env-variables-handling.yaml delete mode 100644 changelogs/fragments/311-jira-error-handling.yaml delete mode 100644 changelogs/fragments/320_unsafe_text.yml delete mode 100644 changelogs/fragments/331_keycloak.yml delete mode 100644 changelogs/fragments/335-icinga2_host-return-error-code.yaml delete mode 100644 changelogs/fragments/33979-xfs_growfs.yml delete mode 100644 changelogs/fragments/349-pacman_improve_group_expansion_speed.yml delete mode 100644 changelogs/fragments/360_syspatch_apply_patches_by_default.yml delete mode 100644 changelogs/fragments/409-datadog-monitor-include-tags.yaml delete mode 100644 changelogs/fragments/436-infoblox-use-stderr-and-environment-for-config.yaml delete mode 100644 changelogs/fragments/442-log_plays-add_playbook_task_name_and_action.yml delete mode 100644 changelogs/fragments/470-spacewalk-legacy-python-certificate-validation.yaml delete mode 100644 changelogs/fragments/474-yarn_fix-outdated-fix-list.yml delete mode 100644 changelogs/fragments/47680_pam_limits.yml delete mode 100644 changelogs/fragments/479-ini_file-empty-section.yaml delete mode 100644 changelogs/fragments/522-parted_change_label.yml delete mode 100644 changelogs/fragments/548_apk.yml delete mode 100644 changelogs/fragments/560-pkgng-add-stdout-and-stderr.yaml delete mode 100644 changelogs/fragments/562-nmcli-fix-idempotency.yaml delete mode 100644 changelogs/fragments/563-update-terraform-status-test.yaml delete mode 100644 changelogs/fragments/568_packaging.yml delete mode 100644 changelogs/fragments/569-pkgng-add-upgrade-action.yaml delete mode 100644 changelogs/fragments/596-splunk-add-option-to-not-validate-cert.yaml delete mode 100644 changelogs/fragments/604-lists_mergeby-new-filter.yml delete mode 100644 changelogs/fragments/610_logstash_callback_add_ini_config.yml delete mode 100644 changelogs/fragments/611-gitlab-runners-env-vars-intput-and-default-item-limit.yaml delete mode 100644 changelogs/fragments/613-snmp_facts-EndOfMibView.yml delete mode 100644 changelogs/fragments/615-digital-ocean-tag-info-bugfix.yml delete mode 100644 changelogs/fragments/630-git_config-handling-invalid-dir.yaml delete mode 100644 changelogs/fragments/63767_selective.yml delete mode 100644 changelogs/fragments/638_cobbler_py3.yml delete mode 100644 changelogs/fragments/641-update-ansible-logstash-callback.yml delete mode 100644 changelogs/fragments/650_pacman_support_zst_package_files.yaml delete mode 100644 changelogs/fragments/66813_gitlab_project.yml delete mode 100644 changelogs/fragments/676-osx_defaults_fix_handling_negative_ints.yml delete mode 100644 changelogs/fragments/677-jenkins_plugins_sha1.yaml delete mode 100644 changelogs/fragments/687-fix-redfish-payload-decode-python35.yml delete mode 100644 changelogs/fragments/689-haproxy_agent_and_health.yml delete mode 100644 changelogs/fragments/693-big-revamp-on-xfconf-adding-array-values.yml delete mode 100644 changelogs/fragments/699-django_manage-createcachetable-fix-idempotence.yml delete mode 100644 changelogs/fragments/702-slack-support-for-blocks.yaml delete mode 100644 changelogs/fragments/704-doas-set-correct-default-values.yml delete mode 100644 changelogs/fragments/707-jira-error-handling.yaml delete mode 100644 changelogs/fragments/708-set-correct-default-values.yml delete mode 100644 changelogs/fragments/711-lxd-target.yml delete mode 100644 changelogs/fragments/713-maven-timestamp-snapshot.yml delete mode 100644 changelogs/fragments/722-plugins.yml delete mode 100644 changelogs/fragments/738-ipa-python3.yml delete mode 100644 changelogs/fragments/744-xfconf_make_locale-independent.yml delete mode 100644 changelogs/fragments/750-jc-new-filter.yaml delete mode 100644 changelogs/fragments/768-facter.yml delete mode 100644 changelogs/fragments/773-resize-partition.yml delete mode 100644 changelogs/fragments/777-interfaces_file-re-escape.yml delete mode 100644 changelogs/fragments/783-fix-gem-installed-versions.yaml delete mode 100644 changelogs/fragments/788-fix_omapi_host_on_python3.yaml delete mode 100644 changelogs/fragments/789-pkg5-wrap-to-modify-package-list.yaml delete mode 100644 changelogs/fragments/797-proxmox-kvm-cloud-init.yaml delete mode 100644 changelogs/fragments/802-pushover-device-parameter.yml delete mode 100644 changelogs/fragments/811-proxmox-kvm-state-absent.yml delete mode 100644 changelogs/fragments/820_nagios_added_acknowledge_and_servicecheck.yml delete mode 100644 changelogs/fragments/823-terraform_init_reconfigure.yaml delete mode 100644 changelogs/fragments/825-bootsource-override-option.yaml delete mode 100644 changelogs/fragments/830-pam-limits.yml delete mode 100644 changelogs/fragments/831-proxmox-kvm-wait.yml delete mode 100644 changelogs/fragments/843-update-slack-messages.yml delete mode 100644 changelogs/fragments/849-proxmox-kvm-state-absent-force.yml delete mode 100644 changelogs/fragments/850-proxmox_kvm-remove_hard_coded_defaults.yml delete mode 100644 changelogs/fragments/891-packet_net-fix-not-subscriptable.yaml delete mode 100644 changelogs/fragments/892-slack-token-validation.yml delete mode 100644 changelogs/fragments/899_launchd_user_service.yml delete mode 100644 changelogs/fragments/900-aerospike-migration-handle-unstable-cluster.yaml delete mode 100644 changelogs/fragments/903-enhance-redfish-manager-reset-actions.yml delete mode 100644 changelogs/fragments/939-zypper_repository_proper_failure_on_missing_python-xml.yml delete mode 100644 changelogs/fragments/943-proxmox-kvm-code-cleanup.yml delete mode 100644 changelogs/fragments/945-darwin-timezone-py3.yaml delete mode 100644 changelogs/fragments/951-ipa_user-add-userauthtype-param.yaml delete mode 100644 changelogs/fragments/953_syslogger.yml delete mode 100644 changelogs/fragments/967-use-fqcn-when-calling-a-module-from-action-plugin.yml delete mode 100644 changelogs/fragments/968-gitlab_variables-pagination.yml delete mode 100644 changelogs/fragments/992-nmcli-locale.yml delete mode 100644 changelogs/fragments/993-file-capabilities.yml delete mode 100644 changelogs/fragments/998-nagios-added_forced_check_for_all_services_or_host.yml delete mode 100644 changelogs/fragments/add_argument_check_for_rundeck.yaml delete mode 100644 changelogs/fragments/airbrake_deployment_add_version.yml delete mode 100644 changelogs/fragments/aix_filesystem-module_util-routing-issue.yml delete mode 100644 changelogs/fragments/cloudflare_dns.yml delete mode 100644 changelogs/fragments/cve_bitbucket_pipeline_variable.yml delete mode 100644 changelogs/fragments/cyberarkconjur-removal.yml delete mode 100644 changelogs/fragments/dconf_refactor.yml delete mode 100644 changelogs/fragments/deprecation-removals.yml delete mode 100644 changelogs/fragments/digital-ocean.yml delete mode 100644 changelogs/fragments/docker-migration-removal.yml delete mode 100644 changelogs/fragments/dsv_fix.yml delete mode 100644 changelogs/fragments/firewalld_migration.yml delete mode 100644 changelogs/fragments/fix-plugin-imports.yml delete mode 100644 changelogs/fragments/fix_parsing_array_values_in_osx_defaults.yml delete mode 100644 changelogs/fragments/galaxy-yml.yml delete mode 100644 changelogs/fragments/gluster-deprecation.yaml delete mode 100644 changelogs/fragments/hashi_vault-migration-removal.yml delete mode 100644 changelogs/fragments/hetzner-migration-removal.yml delete mode 100644 changelogs/fragments/homebrew-cask-at-symbol-fix.yaml delete mode 100644 changelogs/fragments/infinidat-removal.yml delete mode 100644 changelogs/fragments/jira_improvements.yaml delete mode 100644 changelogs/fragments/lldp-use-get_bin_path-to-locate-the-lldpctl-executable.yaml delete mode 100644 changelogs/fragments/logicmonitor-removal.yml delete mode 100644 changelogs/fragments/lookup-passwordstore-umask.yml delete mode 100644 changelogs/fragments/mysql.yml delete mode 100644 changelogs/fragments/nios-fix-ib_spec.yaml delete mode 100644 changelogs/fragments/nios_host_record-fix-aliases-removal.yml delete mode 100644 changelogs/fragments/nmcli-refactor.yml delete mode 100644 changelogs/fragments/oc-migration-removal.yml delete mode 100644 changelogs/fragments/odbc.yml delete mode 100644 changelogs/fragments/openbsd_pkg.yml delete mode 100644 changelogs/fragments/parted_negative_numbers.yml delete mode 100644 changelogs/fragments/pkgutil-check-mode-etc.yaml delete mode 100644 changelogs/fragments/porting-guide-2.yml delete mode 100644 changelogs/fragments/postgresql-migration-removal.yml delete mode 100644 changelogs/fragments/proxmox_template-appliance-download.yml delete mode 100644 changelogs/fragments/proxysql.yml delete mode 100644 changelogs/fragments/remove-ansible.netcommon-dependency.yml delete mode 100644 changelogs/fragments/remove-ansible.posix-dependency.yml delete mode 100644 changelogs/fragments/remove-deprecated-modules-2.yml delete mode 100644 changelogs/fragments/remove-deprecated-modules.yml delete mode 100644 changelogs/fragments/remove-deprecated-redirects.yml delete mode 100644 changelogs/fragments/snmp_facts.yml delete mode 100644 changelogs/fragments/telegram-api-update.yml delete mode 100644 changelogs/fragments/xfconf_add_uint_type.yml delete mode 100644 changelogs/fragments/xml-remove-changed.yml delete mode 100644 changelogs/fragments/zfs-root-snapshot.yml diff --git a/changelogs/fragments/1021-git_config-custom-file.yaml b/changelogs/fragments/1021-git_config-custom-file.yaml deleted file mode 100644 index f83a43fef2..0000000000 --- a/changelogs/fragments/1021-git_config-custom-file.yaml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: - - git_config - added parameter and scope ``file`` allowing user to change parameters in a custom file (https://github.com/ansible-collections/community.general/issues/1021). -bugfixes: - - git_config - using list instead of string as first parameter in the ``run_command()`` call (https://github.com/ansible-collections/community.general/issues/1021). diff --git a/changelogs/fragments/1028-proxmox-kvm-linked-clone.yml b/changelogs/fragments/1028-proxmox-kvm-linked-clone.yml deleted file mode 100644 index f85b5f5f87..0000000000 --- a/changelogs/fragments/1028-proxmox-kvm-linked-clone.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - proxmox_kvm - fix issue causing linked clones not being create by allowing ``format=unspecified`` (https://github.com/ansible-collections/community.general/issues/1027). diff --git a/changelogs/fragments/1036-redis-cache-keyset-name.yaml b/changelogs/fragments/1036-redis-cache-keyset-name.yaml deleted file mode 100644 index bbe34ab6b3..0000000000 --- a/changelogs/fragments/1036-redis-cache-keyset-name.yaml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - redis cache plugin - make the redis cache keyset name configurable - (https://github.com/ansible-collections/community.general/pull/1036). diff --git a/changelogs/fragments/1038-fix-homebrew-and-homebrew-cask-package-validation.yaml b/changelogs/fragments/1038-fix-homebrew-and-homebrew-cask-package-validation.yaml deleted file mode 100644 index f3ca271d1e..0000000000 --- a/changelogs/fragments/1038-fix-homebrew-and-homebrew-cask-package-validation.yaml +++ /dev/null @@ -1,5 +0,0 @@ -bugfixes: -- homebrew - fix package name validation for packages containing hypen ``-`` - (https://github.com/ansible-collections/community.general/issues/1037). -- homebrew_cask - fix package name validation for casks containing hypen ``-`` - (https://github.com/ansible-collections/community.general/issues/1037). diff --git a/changelogs/fragments/1039-archive-fix-paramater-types.yaml b/changelogs/fragments/1039-archive-fix-paramater-types.yaml deleted file mode 100644 index b02315c98f..0000000000 --- a/changelogs/fragments/1039-archive-fix-paramater-types.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - archive - fix paramater types (https://github.com/ansible-collections/community.general/pull/1039). diff --git a/changelogs/fragments/1040-ldap_search-changed-must-be-false.yaml b/changelogs/fragments/1040-ldap_search-changed-must-be-false.yaml deleted file mode 100644 index 6fc23f705b..0000000000 --- a/changelogs/fragments/1040-ldap_search-changed-must-be-false.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - ldap_search - the module no longer incorrectly reports a change (https://github.com/ansible-collections/community.general/issues/1040). diff --git a/changelogs/fragments/1055-redis-cache-sentinel.yaml b/changelogs/fragments/1055-redis-cache-sentinel.yaml deleted file mode 100644 index 9e9bbf1e86..0000000000 --- a/changelogs/fragments/1055-redis-cache-sentinel.yaml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - redis cache plugin - add redis sentinel functionality to cache plugin - (https://github.com/ansible-collections/community.general/pull/1055). diff --git a/changelogs/fragments/1079-redis-use-regexp-to-check-if-the-value-matches-expected-form.yaml b/changelogs/fragments/1079-redis-use-regexp-to-check-if-the-value-matches-expected-form.yaml deleted file mode 100644 index 27d01de91d..0000000000 --- a/changelogs/fragments/1079-redis-use-regexp-to-check-if-the-value-matches-expected-form.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - redis - fixes parsing of config values which should not be converted to bytes (https://github.com/ansible-collections/community.general/pull/1079). diff --git a/changelogs/fragments/1081-solaris_zone-python3.yml b/changelogs/fragments/1081-solaris_zone-python3.yml deleted file mode 100644 index 40cd448f5e..0000000000 --- a/changelogs/fragments/1081-solaris_zone-python3.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - solaris_zone - fixed issue trying to configure zone in Python 3 (https://github.com/ansible-collections/community.general/issues/1081). diff --git a/changelogs/fragments/1083-archive-remove-path-folder.yml b/changelogs/fragments/1083-archive-remove-path-folder.yml deleted file mode 100644 index 506467dc8b..0000000000 --- a/changelogs/fragments/1083-archive-remove-path-folder.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -breaking_changes: - - archive - remove path folder itself when ``remove`` paramater is true (https://github.com/ansible-collections/community.general/issues/1041). diff --git a/changelogs/fragments/1101-slack-ts-fix.yaml b/changelogs/fragments/1101-slack-ts-fix.yaml deleted file mode 100644 index e9c04cbce1..0000000000 --- a/changelogs/fragments/1101-slack-ts-fix.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- slack - avoid trying to update existing message when sending messages that contain the string "ts" (https://github.com/ansible-collections/community.general/issues/1097). diff --git a/changelogs/fragments/1105-beadm_bugfix.yaml b/changelogs/fragments/1105-beadm_bugfix.yaml deleted file mode 100644 index 0ff37156c3..0000000000 --- a/changelogs/fragments/1105-beadm_bugfix.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - beadm - fixed issue "list object has no attribute split" (https://github.com/ansible-collections/community.general/issues/791). diff --git a/changelogs/fragments/1107-monit-fix-status-check.yml b/changelogs/fragments/1107-monit-fix-status-check.yml deleted file mode 100644 index 400b9715d5..0000000000 --- a/changelogs/fragments/1107-monit-fix-status-check.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - monit - fix modules ability to determine the current state of the monitored process (https://github.com/ansible-collections/community.general/pull/1107). diff --git a/changelogs/fragments/1126-influxdb-conditional-path-argument.yml b/changelogs/fragments/1126-influxdb-conditional-path-argument.yml deleted file mode 100644 index ec5cb1f63e..0000000000 --- a/changelogs/fragments/1126-influxdb-conditional-path-argument.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -bugfixes: - - influxdb - fix usage of path for older version of - python-influxdb (https://github.com/ansible-collections/community.general/issues/997). diff --git a/changelogs/fragments/1127-maven_artifact_client_cert.yml b/changelogs/fragments/1127-maven_artifact_client_cert.yml deleted file mode 100644 index 612ea04921..0000000000 --- a/changelogs/fragments/1127-maven_artifact_client_cert.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - maven_artifact - added ``client_cert`` and ``client_key`` parameters to the maven_artifact module (https://github.com/ansible-collections/community.general/issues/1123). diff --git a/changelogs/fragments/1140-iptables_state-fix-race-condition.yml b/changelogs/fragments/1140-iptables_state-fix-race-condition.yml deleted file mode 100644 index 00cd6d2d1b..0000000000 --- a/changelogs/fragments/1140-iptables_state-fix-race-condition.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -bugfixes: - - iptables_state - fix race condition between module and its action plugin - (https://github.com/ansible-collections/community.general/issues/1136). diff --git a/changelogs/fragments/1144-consul-add-tcp-check-support.yml b/changelogs/fragments/1144-consul-add-tcp-check-support.yml deleted file mode 100644 index b3a5e54a83..0000000000 --- a/changelogs/fragments/1144-consul-add-tcp-check-support.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - consul - added support for tcp checks (https://github.com/ansible-collections/community.general/issues/1128). diff --git a/changelogs/fragments/1149-filesystem-fix-355-state-absent.yml b/changelogs/fragments/1149-filesystem-fix-355-state-absent.yml deleted file mode 100644 index e969c9bead..0000000000 --- a/changelogs/fragments/1149-filesystem-fix-355-state-absent.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -bugfixes: - - filesystem - add option ``state`` with default ``present``. When set to ``absent``, filesystem signatures are removed - (https://github.com/ansible-collections/community.general/issues/355). diff --git a/changelogs/fragments/1154-django_manage-docs.yml b/changelogs/fragments/1154-django_manage-docs.yml deleted file mode 100644 index ea9af785dd..0000000000 --- a/changelogs/fragments/1154-django_manage-docs.yml +++ /dev/null @@ -1,2 +0,0 @@ -deprecated_features: - - django_manage - the parameter ``liveserver`` relates to a no longer maintained third-party module for django. It is now deprecated, and will be remove in community.general 3.0.0 (https://github.com/ansible-collections/community.general/pull/1154). diff --git a/changelogs/fragments/1169-getbinpkgonly.yaml b/changelogs/fragments/1169-getbinpkgonly.yaml deleted file mode 100644 index b94dc6f224..0000000000 --- a/changelogs/fragments/1169-getbinpkgonly.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - portage - add ``getbinpkgonly`` option, remove unnecessary note on internal portage behaviour (getbinpkg=yes), and remove the undocumented exclusiveness of the pkg options as portage makes no such restriction (https://github.com/ansible-collections/community.general/pull/1169). diff --git a/changelogs/fragments/1175-zypper-absent-lang.yml b/changelogs/fragments/1175-zypper-absent-lang.yml deleted file mode 100644 index 05d0a3d947..0000000000 --- a/changelogs/fragments/1175-zypper-absent-lang.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- zypper - force ``LANG=C`` to as zypper is looking in XML output where attribute could be translated (https://github.com/ansible-collections/community.general/issues/1175). diff --git a/changelogs/fragments/1179-composer_require_v2_idempotence_fix.yml b/changelogs/fragments/1179-composer_require_v2_idempotence_fix.yml deleted file mode 100644 index 03874b6775..0000000000 --- a/changelogs/fragments/1179-composer_require_v2_idempotence_fix.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -bugfixes: - - composer - fix bug in command idempotence with composer v2 - (https://github.com/ansible-collections/community.general/issues/1179). diff --git a/changelogs/fragments/1185-proxmox-ignore-qemu-templates.yml b/changelogs/fragments/1185-proxmox-ignore-qemu-templates.yml deleted file mode 100644 index 34fbdbb0b1..0000000000 --- a/changelogs/fragments/1185-proxmox-ignore-qemu-templates.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - proxmox inventory plugin - ignore QEMU templates altogether instead of skipping the creation of the host in the inventory (https://github.com/ansible-collections/community.general/pull/1185). diff --git a/changelogs/fragments/1196-use_description-in-gitlab-group-creation.yml b/changelogs/fragments/1196-use_description-in-gitlab-group-creation.yml deleted file mode 100644 index d8242b0ddf..0000000000 --- a/changelogs/fragments/1196-use_description-in-gitlab-group-creation.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - gitlab_group - added description parameter to ``createGroup()`` call (https://github.com/ansible-collections/community.general/issues/138). diff --git a/changelogs/fragments/1197_gitlab_project_variable.yml b/changelogs/fragments/1197_gitlab_project_variable.yml deleted file mode 100644 index e95d6b95da..0000000000 --- a/changelogs/fragments/1197_gitlab_project_variable.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- gitlab_project_variable - add support for ``environment_scope`` on projects variables (https://github.com/ansible-collections/community.general/pull/1197). diff --git a/changelogs/fragments/1206-proxmox-api-token.yml b/changelogs/fragments/1206-proxmox-api-token.yml deleted file mode 100644 index bec5b17f1e..0000000000 --- a/changelogs/fragments/1206-proxmox-api-token.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -minor_changes: - - proxmox - add support for API tokens (https://github.com/ansible-collections/community.general/pull/1206). - - proxmox_kvm - add support for API tokens (https://github.com/ansible-collections/community.general/pull/1206). - - proxmox_template - add support for API tokens (https://github.com/ansible-collections/community.general/pull/1206). diff --git a/changelogs/fragments/1223-nios-remove-redundant-aliases.yml b/changelogs/fragments/1223-nios-remove-redundant-aliases.yml deleted file mode 100644 index 9982979de2..0000000000 --- a/changelogs/fragments/1223-nios-remove-redundant-aliases.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - nios_fixed_address, nios_host_record, nios_zone - removed redundant parameter aliases causing warning messages to incorrectly appear in task output (https://github.com/ansible-collections/community.general/issues/852). diff --git a/changelogs/fragments/1243-pkgng-present-ignoreosver.yaml b/changelogs/fragments/1243-pkgng-present-ignoreosver.yaml deleted file mode 100644 index 5f06075f8b..0000000000 --- a/changelogs/fragments/1243-pkgng-present-ignoreosver.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - pkgng - present the ``ignore_osver`` option to pkg (https://github.com/ansible-collections/community.general/pull/1243). diff --git a/changelogs/fragments/1244-renamed-parameter.yaml b/changelogs/fragments/1244-renamed-parameter.yaml deleted file mode 100644 index a07b97cbf0..0000000000 --- a/changelogs/fragments/1244-renamed-parameter.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - django_manage - renamed parameter ``app_path`` to ``project_path``, adding ``app_path`` and ``chdir`` as aliases (https://github.com/ansible-collections/community.general/issues/1044). diff --git a/changelogs/fragments/1246-flatpak-use-non-interactive-argument.yaml b/changelogs/fragments/1246-flatpak-use-non-interactive-argument.yaml deleted file mode 100644 index e62e552c7d..0000000000 --- a/changelogs/fragments/1246-flatpak-use-non-interactive-argument.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- flatpak - use of the ``--non-interactive`` argument instead of ``-y`` when possible (https://github.com/ansible-collections/community.general/pull/1246). diff --git a/changelogs/fragments/1256-feat-pkgin-add-full-version-package-name.yml b/changelogs/fragments/1256-feat-pkgin-add-full-version-package-name.yml deleted file mode 100644 index b0d7ffb4e9..0000000000 --- a/changelogs/fragments/1256-feat-pkgin-add-full-version-package-name.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - pkgin - add support for installation of full versioned package names (https://github.com/ansible-collections/community.general/pull/1256). diff --git a/changelogs/fragments/1258-proxmox_kvm-ignore-pool-on-update.yaml b/changelogs/fragments/1258-proxmox_kvm-ignore-pool-on-update.yaml deleted file mode 100644 index 8f9e9157fe..0000000000 --- a/changelogs/fragments/1258-proxmox_kvm-ignore-pool-on-update.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - proxmox_kvm - ignore unsupported ``pool`` parameter on update (https://github.com/ansible-collections/community.general/pull/1258). diff --git a/changelogs/fragments/1260-nmcli-ib-routes.yaml b/changelogs/fragments/1260-nmcli-ib-routes.yaml deleted file mode 100644 index 3fcf4fe4c7..0000000000 --- a/changelogs/fragments/1260-nmcli-ib-routes.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -minor_changes: - - nmcli - add infiniband type support (https://github.com/ansible-collections/community.general/pull/1260). - - nmcli - add ``ipv4.routes``, ``ipv4.route-metric`` and ``ipv4.never-default`` support (https://github.com/ansible-collections/community.general/pull/1260). diff --git a/changelogs/fragments/1264-dict_kv-new-filter.yaml b/changelogs/fragments/1264-dict_kv-new-filter.yaml deleted file mode 100644 index 0981113124..0000000000 --- a/changelogs/fragments/1264-dict_kv-new-filter.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- "Add new filter plugin ``dict_kv`` which returns a single key-value pair from two arguments. Useful for generating complex dictionaries without using loops. For example ``'value' | community.general.dict_kv('key'))`` evaluates to ``{'key': 'value'}`` (https://github.com/ansible-collections/community.general/pull/1264)." diff --git a/changelogs/fragments/1270-linode-v4-stackscript-support.yaml b/changelogs/fragments/1270-linode-v4-stackscript-support.yaml deleted file mode 100644 index 7b91e3f640..0000000000 --- a/changelogs/fragments/1270-linode-v4-stackscript-support.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - linode_v4 - added support for Linode StackScript usage when creating instances (https://github.com/ansible-collections/community.general/issues/723). diff --git a/changelogs/fragments/1305-added-xfconf-tests.yaml b/changelogs/fragments/1305-added-xfconf-tests.yaml deleted file mode 100644 index f90ab5f70b..0000000000 --- a/changelogs/fragments/1305-added-xfconf-tests.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - xfconf - removed unnecessary second execution of ``xfconf-query`` (https://github.com/ansible-collections/community.general/pull/1305). diff --git a/changelogs/fragments/1307-macports-fix-status-check.yml b/changelogs/fragments/1307-macports-fix-status-check.yml deleted file mode 100644 index 878e66ca39..0000000000 --- a/changelogs/fragments/1307-macports-fix-status-check.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - macports - fix failure to install a package whose name is contained within an already installed package's name or variant (https://github.com/ansible-collections/community.general/issues/1307). diff --git a/changelogs/fragments/1317-kubevirt-migration-removal.yml b/changelogs/fragments/1317-kubevirt-migration-removal.yml deleted file mode 100644 index 0d0a114e2a..0000000000 --- a/changelogs/fragments/1317-kubevirt-migration-removal.yml +++ /dev/null @@ -1,13 +0,0 @@ -removed_features: - - | - All Kubevirt modules and plugins have now been migrated from community.general to the `community.kubevirt `_ Ansible collection. - If you use ansible-base 2.10 or newer, redirections have been provided. - - If you use Ansible 2.9 and installed this collection, you need to adjust the FQCNs (``community.general.kubevirt_vm`` → ``community.kubevirt.kubevirt_vm``) and make sure to install the community.kubevirt collection. -breaking_changes: - - | - If you use Ansible 2.9 and the Kubevirt plugins or modules from this collection, community.general 2.0.0 results in errors when trying to use the Kubevirt content by FQCN, like ``community.general.kubevirt_vm``. - Since Ansible 2.9 is not able to use redirections, you will have to adjust your playbooks and roles manually to use the new FQCNs (``community.kubevirt.kubevirt_vm`` for the previous example) and to make sure that you have ``community.kubevirt`` installed. - - If you use ansible-base 2.10 or newer and did not install Ansible 3.0.0, but installed (and/or upgraded) community.general manually, you need to make sure to also install the ``community.kubevirt`` collection if you are using any of the Kubevirt plugins or modules. - While ansible-base 2.10 or newer can use the redirects that community.general 2.0.0 adds, the collection they point to (such as community.google) must be installed for them to work. diff --git a/changelogs/fragments/1319-google-migration-removal.yml b/changelogs/fragments/1319-google-migration-removal.yml deleted file mode 100644 index 922c2672d5..0000000000 --- a/changelogs/fragments/1319-google-migration-removal.yml +++ /dev/null @@ -1,16 +0,0 @@ -removed_features: - - | - All Google cloud modules and plugins have now been migrated away from this collection. - They can be found in either the `community.google `_ or `google.cloud `_ collections. - If you use ansible-base 2.10 or newer, redirections have been provided. - - If you use Ansible 2.9 and installed this collection, you need to adjust the FQCNs (``community.general.gce_img`` → ``community.google.gce_img``) and make sure to install the community.google or google.cloud collections as appropriate. - - The Google cloud inventory script ``gce.py`` has been migrated to the ``community.google`` collection. Install the ``community.google`` collection in order to continue using it. -breaking_changes: - - | - If you use Ansible 2.9 and the Google cloud plugins or modules from this collection, community.general 2.0.0 results in errors when trying to use the Google cloud content by FQCN, like ``community.general.gce_img``. - Since Ansible 2.9 is not able to use redirections, you will have to adjust your playbooks and roles manually to use the new FQCNs (``community.google.gce_img`` for the previous example) and to make sure that you have ``community.google`` installed. - - If you use ansible-base 2.10 or newer and did not install Ansible 3.0.0, but installed (and/or upgraded) community.general manually, you need to make sure to also install the ``community.google`` or ``google.cloud`` collections if you are using any of the Google cloud plugins or modules. - While ansible-base 2.10 or newer can use the redirects that community.general 2.0.0 adds, the collection they point to (such as community.google) must be installed for them to work. - - The Google cloud inventory script ``gce.py`` has been migrated to the ``community.google`` collection. Install the ``community.google`` collection in order to continue using it. diff --git a/changelogs/fragments/1322-module_helper_and_xfconf.yaml b/changelogs/fragments/1322-module_helper_and_xfconf.yaml deleted file mode 100644 index d62d76e33a..0000000000 --- a/changelogs/fragments/1322-module_helper_and_xfconf.yaml +++ /dev/null @@ -1,5 +0,0 @@ -minor_changes: - - module_helper - added ModuleHelper class and a couple of convenience tools for module developers (https://github.com/ansible-collections/community.general/pull/1322). -bugfixes: - - xfconf - xfconf no longer passing the command args as a string, but rather as a list (https://github.com/ansible-collections/community.general/issues/1328). - - xfconf - parameter ``value`` no longer required for state ``absent`` (https://github.com/ansible-collections/community.general/issues/1329). diff --git a/changelogs/fragments/1331-proxmox-info-modules.yml b/changelogs/fragments/1331-proxmox-info-modules.yml deleted file mode 100644 index c7510d30cc..0000000000 --- a/changelogs/fragments/1331-proxmox-info-modules.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - proxmox - extract common code and documentation (https://github.com/ansible-collections/community.general/pull/1331). diff --git a/changelogs/fragments/1338-datadog-mark-notification_message-no_log.yml b/changelogs/fragments/1338-datadog-mark-notification_message-no_log.yml deleted file mode 100644 index 49c036c3ac..0000000000 --- a/changelogs/fragments/1338-datadog-mark-notification_message-no_log.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - datadog - mark ``notification_message`` as ``no_log`` (https://github.com/ansible-collections/community.general/pull/1338). diff --git a/changelogs/fragments/1339-ip-no_log-nonsecret.yml b/changelogs/fragments/1339-ip-no_log-nonsecret.yml deleted file mode 100644 index b3619ad672..0000000000 --- a/changelogs/fragments/1339-ip-no_log-nonsecret.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: -- ipa_user - silence warning about non-secret ``krbpasswordexpiration`` and ``update_password`` options not having ``no_log`` set (https://github.com/ansible-collections/community.general/pull/1339). -- ipa_host - silence warning about non-secret ``random_password`` option not having ``no_log`` set (https://github.com/ansible-collections/community.general/pull/1339). diff --git a/changelogs/fragments/1383-apache2-module-amend-shib-workaround.yaml b/changelogs/fragments/1383-apache2-module-amend-shib-workaround.yaml deleted file mode 100644 index ccd89b0121..0000000000 --- a/changelogs/fragments/1383-apache2-module-amend-shib-workaround.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- apache2_module - amend existing module identifier workaround to also apply to updated Shibboleth modules (https://github.com/ansible-collections/community.general/issues/1379). diff --git a/changelogs/fragments/1394-pamd-removing-comments.yaml b/changelogs/fragments/1394-pamd-removing-comments.yaml deleted file mode 100644 index b539e632ef..0000000000 --- a/changelogs/fragments/1394-pamd-removing-comments.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - pamd - added logic to retain the comment line (https://github.com/ansible-collections/community.general/issues/1394). diff --git a/changelogs/fragments/1399-fixed-wrong-elements-type.yaml b/changelogs/fragments/1399-fixed-wrong-elements-type.yaml deleted file mode 100644 index 29b09aff3b..0000000000 --- a/changelogs/fragments/1399-fixed-wrong-elements-type.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- utm_proxy_exception - four parameters had elements types set as 'string' (invalid), changed to 'str' (https://github.com/ansible-collections/community.general/pull/1399). diff --git a/changelogs/fragments/1404-proxmox-doc-fragments.yml b/changelogs/fragments/1404-proxmox-doc-fragments.yml deleted file mode 100644 index 3682d11421..0000000000 --- a/changelogs/fragments/1404-proxmox-doc-fragments.yml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: - - proxmox - improve and extract more common documentation (https://github.com/ansible-collections/community.general/pull/1404). - - proxmox_kvm - improve and extract more common documentation (https://github.com/ansible-collections/community.general/pull/1404). - - proxmox_template - improve documentation (https://github.com/ansible-collections/community.general/pull/1404). diff --git a/changelogs/fragments/1411_homebrew_tap.yml b/changelogs/fragments/1411_homebrew_tap.yml deleted file mode 100644 index acbd4d0c9d..0000000000 --- a/changelogs/fragments/1411_homebrew_tap.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- homebrew_tap - provide error message to user when module fails (https://github.com/ansible-collections/community.general/issues/1411). diff --git a/changelogs/fragments/1413-proxmox-features.yml b/changelogs/fragments/1413-proxmox-features.yml deleted file mode 100644 index f9762b5fc9..0000000000 --- a/changelogs/fragments/1413-proxmox-features.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - proxmox - add ``features`` option to LXC (https://github.com/ansible-collections/community.general/issues/816). diff --git a/changelogs/fragments/1415-valmod_req_mismatch.yml b/changelogs/fragments/1415-valmod_req_mismatch.yml deleted file mode 100644 index 66e3026710..0000000000 --- a/changelogs/fragments/1415-valmod_req_mismatch.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - vmadm - simplification of code (https://github.com/ansible-collections/community.general/pull/1415). diff --git a/changelogs/fragments/1419-xfconf-return-values.yaml b/changelogs/fragments/1419-xfconf-return-values.yaml deleted file mode 100644 index 6d3b4eb67b..0000000000 --- a/changelogs/fragments/1419-xfconf-return-values.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- xfconf - add in missing return values that are specified in the documentation (https://github.com/ansible-collections/community.general/issues/1418). diff --git a/changelogs/fragments/1423-valmod_multiple_cases.yml b/changelogs/fragments/1423-valmod_multiple_cases.yml deleted file mode 100644 index 97276a7cb1..0000000000 --- a/changelogs/fragments/1423-valmod_multiple_cases.yml +++ /dev/null @@ -1,6 +0,0 @@ -bugfixes: - - utm_network_interface_address - changed param type from invalid 'boolean' to valid 'bool' (https://github.com/ansible-collections/community.general/pull/1423). - - bigpanda - removed the dynamic default for ``host`` param (https://github.com/ansible-collections/community.general/pull/1423). - - udm_user - removed the dynamic default for ``userexpiry`` param (https://github.com/ansible-collections/community.general/pull/1423). - - profitbricks_nic - removed the dynamic default for ``name`` param (https://github.com/ansible-collections/community.general/pull/1423). - - profitbricks_nic - replaced code with ``required`` and ``required_if`` (https://github.com/ansible-collections/community.general/pull/1423). diff --git a/changelogs/fragments/1425_bitbucket_pipeline_variable.yml b/changelogs/fragments/1425_bitbucket_pipeline_variable.yml deleted file mode 100644 index b284ca54d3..0000000000 --- a/changelogs/fragments/1425_bitbucket_pipeline_variable.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- bitbucket_pipeline_variable - change pagination logic for pipeline variable get API (https://github.com/ansible-collections/community.general/issues/1425). diff --git a/changelogs/fragments/1426-nmcli-add-zone-parameter.yml b/changelogs/fragments/1426-nmcli-add-zone-parameter.yml deleted file mode 100644 index 1d8a619f14..0000000000 --- a/changelogs/fragments/1426-nmcli-add-zone-parameter.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - nmcli - add ``zone`` parameter (https://github.com/ansible-collections/community.general/issues/949, https://github.com/ansible-collections/community.general/pull/1426). diff --git a/changelogs/fragments/1428-npm-no-optional.yml b/changelogs/fragments/1428-npm-no-optional.yml deleted file mode 100644 index 8c91fd7dfb..0000000000 --- a/changelogs/fragments/1428-npm-no-optional.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - npm - add ``no-optional`` option (https://github.com/ansible-collections/community.general/issues/1421). diff --git a/changelogs/fragments/1436-mas-fix-no-app-installed.yml b/changelogs/fragments/1436-mas-fix-no-app-installed.yml deleted file mode 100644 index 4db32a1a91..0000000000 --- a/changelogs/fragments/1436-mas-fix-no-app-installed.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - "mas - fix ``invalid literal`` when no app can be found (https://github.com/ansible-collections/community.general/pull/1436)." diff --git a/changelogs/fragments/1453-add-support-for-keyed_groups-to-linode-inventory-plugin.yml b/changelogs/fragments/1453-add-support-for-keyed_groups-to-linode-inventory-plugin.yml deleted file mode 100644 index d951992345..0000000000 --- a/changelogs/fragments/1453-add-support-for-keyed_groups-to-linode-inventory-plugin.yml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: - - linode inventory plugin - add support for ``keyed_groups``, ``groups``, - and ``compose`` options - (https://github.com/ansible-collections/community.general/issues/1326). diff --git a/changelogs/fragments/1455-rhn-register-force.yml b/changelogs/fragments/1455-rhn-register-force.yml deleted file mode 100644 index 9fe46d45b0..0000000000 --- a/changelogs/fragments/1455-rhn-register-force.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - rhn_register - added ``force`` parameter to allow forced registering (https://github.com/ansible-collections/community.general/issues/1454). diff --git a/changelogs/fragments/1462-splunk-millisecond.yaml b/changelogs/fragments/1462-splunk-millisecond.yaml deleted file mode 100644 index aa73f5efef..0000000000 --- a/changelogs/fragments/1462-splunk-millisecond.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - splunk callback - new parameter ``include_milliseconds`` to add milliseconds to existing timestamp field (https://github.com/ansible-collections/community.general/pull/1462). diff --git a/changelogs/fragments/1472-saltstack-fix-put_file-to-preserve-checksum.yml b/changelogs/fragments/1472-saltstack-fix-put_file-to-preserve-checksum.yml deleted file mode 100644 index 7db884a7f2..0000000000 --- a/changelogs/fragments/1472-saltstack-fix-put_file-to-preserve-checksum.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - saltstack connection plugin - use ``hashutil.base64_decodefile`` to ensure that the file checksum is preserved (https://github.com/ansible-collections/community.general/pull/1472). diff --git a/changelogs/fragments/1480-module-helper-improvements.yml b/changelogs/fragments/1480-module-helper-improvements.yml deleted file mode 100644 index 655cc34069..0000000000 --- a/changelogs/fragments/1480-module-helper-improvements.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - module_helper module utils - multiple convenience features added (https://github.com/ansible-collections/community.general/pull/1480). diff --git a/changelogs/fragments/1481-deprecated-brew-cask-command.yaml b/changelogs/fragments/1481-deprecated-brew-cask-command.yaml deleted file mode 100644 index 4a04acd10b..0000000000 --- a/changelogs/fragments/1481-deprecated-brew-cask-command.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - homebrew_cask - Homebrew will be deprecating use of ``brew cask`` commands as of version 2.6.0, see https://brew.sh/2020/12/01/homebrew-2.6.0/. Added logic to stop using ``brew cask`` for brew version >= 2.6.0 (https://github.com/ansible-collections/community.general/pull/1481). diff --git a/changelogs/fragments/1484-fix-property-name-in-redfish-memory-inventory.yml b/changelogs/fragments/1484-fix-property-name-in-redfish-memory-inventory.yml deleted file mode 100644 index 445cae58c0..0000000000 --- a/changelogs/fragments/1484-fix-property-name-in-redfish-memory-inventory.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - redfish_info module, redfish_utils module utils - correct ``PartNumber`` property name in Redfish ``GetMemoryInventory`` command (https://github.com/ansible-collections/community.general/issues/1483). diff --git a/changelogs/fragments/1491-gitlab-runner-owned-parameter.yml b/changelogs/fragments/1491-gitlab-runner-owned-parameter.yml deleted file mode 100644 index 2d9eb403f6..0000000000 --- a/changelogs/fragments/1491-gitlab-runner-owned-parameter.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -minor_changes: - - gitlab_runner - add ``owned`` option to allow non-admin use (https://github.com/ansible-collections/community.general/pull/1491). -bugfixes: - - gitlab_runner - fix compatiblity with some versions of python-gitlab (https://github.com/ansible-collections/community.general/pull/1491). diff --git a/changelogs/fragments/1493-fix_passwordstore.py_to_be_compatible_with_gopass_versions.yml b/changelogs/fragments/1493-fix_passwordstore.py_to_be_compatible_with_gopass_versions.yml deleted file mode 100644 index b46fc07a81..0000000000 --- a/changelogs/fragments/1493-fix_passwordstore.py_to_be_compatible_with_gopass_versions.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - passwordstore lookup plugin - always use explicit ``show`` command to retrieve password. This ensures compatibility with ``gopass`` and avoids problems when password names equal ``pass`` commands (https://github.com/ansible-collections/community.general/pull/1493). diff --git a/changelogs/fragments/1504_jira.yml b/changelogs/fragments/1504_jira.yml deleted file mode 100644 index 2db8aad3ea..0000000000 --- a/changelogs/fragments/1504_jira.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- jira - provide error message raised from exception (https://github.com/ansible-collections/community.general/issues/1504). diff --git a/changelogs/fragments/1506_gitlab_project.yml b/changelogs/fragments/1506_gitlab_project.yml deleted file mode 100644 index 2949a9b3f3..0000000000 --- a/changelogs/fragments/1506_gitlab_project.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- gitlab_project - add parameter ``lfs_enabled`` to specify Git LFS (https://github.com/ansible-collections/community.general/issues/1506). diff --git a/changelogs/fragments/1516-ldap_entry-improvements.yaml b/changelogs/fragments/1516-ldap_entry-improvements.yaml deleted file mode 100644 index 3ea7e6c3ef..0000000000 --- a/changelogs/fragments/1516-ldap_entry-improvements.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - ldap_entry - improvements in documentation, simplifications and replaced code with better ``AnsibleModule`` arguments (https://github.com/ansible-collections/community.general/pull/1516). diff --git a/changelogs/fragments/1517-bridge-slave-from-list-of-ip-based-connections.yml b/changelogs/fragments/1517-bridge-slave-from-list-of-ip-based-connections.yml deleted file mode 100644 index 7239e5f9b5..0000000000 --- a/changelogs/fragments/1517-bridge-slave-from-list-of-ip-based-connections.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - nmcli - remove ``bridge-slave`` from list of IP based connections ((https://github.com/ansible-collections/community.general/issues/1500). diff --git a/changelogs/fragments/1522-yaml-callback-unicode.yml b/changelogs/fragments/1522-yaml-callback-unicode.yml deleted file mode 100644 index ed735abfea..0000000000 --- a/changelogs/fragments/1522-yaml-callback-unicode.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- "yaml callback plugin - do not remove non-ASCII Unicode characters from multiline string output (https://github.com/ansible-collections/community.general/issues/1519)." diff --git a/changelogs/fragments/1527-fix-nios-api-member-normalize.yaml b/changelogs/fragments/1527-fix-nios-api-member-normalize.yaml deleted file mode 100644 index 4e4720487c..0000000000 --- a/changelogs/fragments/1527-fix-nios-api-member-normalize.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - nios_member - fix Python 3 compatibility with nios api ``member_normalize`` function (https://github.com/ansible-collections/community.general/issues/1526). diff --git a/changelogs/fragments/1532-monit-support-all-services.yaml b/changelogs/fragments/1532-monit-support-all-services.yaml deleted file mode 100644 index e2b0121c80..0000000000 --- a/changelogs/fragments/1532-monit-support-all-services.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - monit - add support for all monit service checks (https://github.com/ansible-collections/community.general/pull/1532). diff --git a/changelogs/fragments/1549-add-tag-filter-to-linode-inventory.yml b/changelogs/fragments/1549-add-tag-filter-to-linode-inventory.yml deleted file mode 100644 index 4e11bf2463..0000000000 --- a/changelogs/fragments/1549-add-tag-filter-to-linode-inventory.yml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: - - linode inventory plugin - add support for ``tags`` option to filter - instances by tag - (https://github.com/ansible-collections/community.general/issues/1549). diff --git a/changelogs/fragments/1550-add-jobs-parameter-to-make.yml b/changelogs/fragments/1550-add-jobs-parameter-to-make.yml deleted file mode 100644 index 26526596ca..0000000000 --- a/changelogs/fragments/1550-add-jobs-parameter-to-make.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- make - add ``jobs`` parameter to allow specification of number of simultaneous jobs for make to run (https://github.com/ansible-collections/community.general/pull/1550). diff --git a/changelogs/fragments/1552_launchd.yml b/changelogs/fragments/1552_launchd.yml deleted file mode 100644 index 2e8df5471f..0000000000 --- a/changelogs/fragments/1552_launchd.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- launchd - handle deprecated APIs like ``readPlist`` and ``writePlist`` in ``plistlib`` (https://github.com/ansible-collections/community.general/issues/1552). diff --git a/changelogs/fragments/1553_sendgrid.yml b/changelogs/fragments/1553_sendgrid.yml deleted file mode 100644 index 9be912d2c5..0000000000 --- a/changelogs/fragments/1553_sendgrid.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- sendgrid - update documentation and warn user about sendgrid Python library version (https://github.com/ansible-collections/community.general/issues/1553). diff --git a/changelogs/fragments/1555-ipa-sudorule-add-commandgroup.yml b/changelogs/fragments/1555-ipa-sudorule-add-commandgroup.yml deleted file mode 100644 index e1b48b4fbf..0000000000 --- a/changelogs/fragments/1555-ipa-sudorule-add-commandgroup.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ipa_sudorule - added option to use command groups inside sudo rules (https://github.com/ansible-collections/community.general/issues/1555). diff --git a/changelogs/fragments/1574-make-question.yaml b/changelogs/fragments/1574-make-question.yaml deleted file mode 100644 index b2590f3e5b..0000000000 --- a/changelogs/fragments/1574-make-question.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - make - fixed ``make`` parameter used for check mode when running a non-GNU ``make`` (https://github.com/ansible-collections/community.general/pull/1574). diff --git a/changelogs/fragments/1589-passwordstore-fix-passwordstore.py-to-be-compatible-with-gopass.yaml b/changelogs/fragments/1589-passwordstore-fix-passwordstore.py-to-be-compatible-with-gopass.yaml deleted file mode 100644 index 8a781f4a90..0000000000 --- a/changelogs/fragments/1589-passwordstore-fix-passwordstore.py-to-be-compatible-with-gopass.yaml +++ /dev/null @@ -1,5 +0,0 @@ -bugfixes: - - passwordstore lookup plugin - fix compatibility with gopass when used with - ``create=true``. While pass returns 1 on a non-existent password, gopass - returns 10, or 11, depending on whether a similar named password was stored. - We now just check standard output and that the return code is not zero (https://github.com/ansible-collections/community.general/pull/1589). diff --git a/changelogs/fragments/1590-influxdb-shard-group-duration-parameter.yml b/changelogs/fragments/1590-influxdb-shard-group-duration-parameter.yml deleted file mode 100644 index 6dcb785707..0000000000 --- a/changelogs/fragments/1590-influxdb-shard-group-duration-parameter.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - influxdb_retention_policy - add shard group duration parameter ``shard_group_duration`` (https://github.com/ansible-collections/community.general/pull/1590). diff --git a/changelogs/fragments/1595-ldap-gssapi-sasl-authentication.yml b/changelogs/fragments/1595-ldap-gssapi-sasl-authentication.yml deleted file mode 100644 index 117d084e40..0000000000 --- a/changelogs/fragments/1595-ldap-gssapi-sasl-authentication.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- "ldap modules - add ``sasl_class`` parameter to support passwordless SASL authentication via GSSAPI (kerberos), next to external (https://github.com/ansible-collections/community.general/issues/1523)." diff --git a/changelogs/fragments/1610-bugfix-onepassword-lookup-plugin.yaml b/changelogs/fragments/1610-bugfix-onepassword-lookup-plugin.yaml deleted file mode 100644 index d3220a2c71..0000000000 --- a/changelogs/fragments/1610-bugfix-onepassword-lookup-plugin.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- "onepassword lookup plugin - updated to support password items, which place the password field directly in the payload's ``details`` attribute (https://github.com/ansible-collections/community.general/pull/1610)." diff --git a/changelogs/fragments/1614_npm.yml b/changelogs/fragments/1614_npm.yml deleted file mode 100644 index 0d39b1b6fb..0000000000 --- a/changelogs/fragments/1614_npm.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- npm - handle json decode exception while parsing command line output (https://github.com/ansible-collections/community.general/issues/1614). diff --git a/changelogs/fragments/1618-ldap_search-switch-off-cheasing-referrals.yaml b/changelogs/fragments/1618-ldap_search-switch-off-cheasing-referrals.yaml deleted file mode 100644 index eecf792963..0000000000 --- a/changelogs/fragments/1618-ldap_search-switch-off-cheasing-referrals.yaml +++ /dev/null @@ -1,4 +0,0 @@ -bugfixes: - - ldap_search - ignore returned referrals (https://github.com/ansible-collections/community.general/issues/1067). -minor_changes: - - ldap modules - allow to configure referral chasing (https://github.com/ansible-collections/community.general/pull/1618). diff --git a/changelogs/fragments/1620-terraform_init_reconfigure_fix.yml b/changelogs/fragments/1620-terraform_init_reconfigure_fix.yml deleted file mode 100644 index 9b8bab8f49..0000000000 --- a/changelogs/fragments/1620-terraform_init_reconfigure_fix.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- terraform - fix ``init_reconfigure`` option for proper CLI args (https://github.com/ansible-collections/community.general/pull/1620). diff --git a/changelogs/fragments/1632-using_check_rc_in_terraform.yml b/changelogs/fragments/1632-using_check_rc_in_terraform.yml deleted file mode 100644 index 481becb763..0000000000 --- a/changelogs/fragments/1632-using_check_rc_in_terraform.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- terraform - improve result code checking when executing terraform commands (https://github.com/ansible-collections/community.general/pull/1632). diff --git a/changelogs/fragments/1645-proxmox-env-passthrough.yml b/changelogs/fragments/1645-proxmox-env-passthrough.yml deleted file mode 100644 index ce5083707b..0000000000 --- a/changelogs/fragments/1645-proxmox-env-passthrough.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - proxmox inventory plugin - add environment variable passthrough (https://github.com/ansible-collections/community.general/pull/1645). diff --git a/changelogs/fragments/1654-dnsmadeeasy-http-400-fixes.yaml b/changelogs/fragments/1654-dnsmadeeasy-http-400-fixes.yaml deleted file mode 100644 index 1934228644..0000000000 --- a/changelogs/fragments/1654-dnsmadeeasy-http-400-fixes.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - dnsmadeeasy - fix HTTP 400 errors when creating a TXT record (https://github.com/ansible-collections/community.general/issues/1237). diff --git a/changelogs/fragments/1679-homebrew_search_path.yml b/changelogs/fragments/1679-homebrew_search_path.yml deleted file mode 100644 index fa2d995891..0000000000 --- a/changelogs/fragments/1679-homebrew_search_path.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -bugfixes: - - homebrew - add default search path for ``brew`` on Apple silicon hardware - (https://github.com/ansible-collections/community.general/pull/1679). - - homebrew_cask - add default search path for ``brew`` on Apple silicon hardware - (https://github.com/ansible-collections/community.general/pull/1679). - - homebrew_tap - add default search path for ``brew`` on Apple silicon hardware - (https://github.com/ansible-collections/community.general/pull/1679). diff --git a/changelogs/fragments/1681-add_passwordstore_yaml_support.yaml b/changelogs/fragments/1681-add_passwordstore_yaml_support.yaml deleted file mode 100644 index ebfac65a14..0000000000 --- a/changelogs/fragments/1681-add_passwordstore_yaml_support.yaml +++ /dev/null @@ -1,2 +0,0 @@ -breaking_changes: - - "passwordstore lookup plugin - now parsing a password store entry as YAML if possible, skipping the first line (which by convention only contains the password and nothing else). If it cannot be parsed as YAML, the old ``key: value`` parser will be used to process the entry. Can break backwards compatibility if YAML formatted code was parsed in a non-YAML interpreted way, e.g. ``foo: [bar, baz]`` will become a list with two elements in the new version, but a string ``'[bar, baz]'`` in the old (https://github.com/ansible-collections/community.general/issues/1673)." diff --git a/changelogs/fragments/1690-scaleway-regions.yaml b/changelogs/fragments/1690-scaleway-regions.yaml deleted file mode 100644 index 6788246f7d..0000000000 --- a/changelogs/fragments/1690-scaleway-regions.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - scaleway modules and inventory plugin - update regions and zones to add the new ones (https://github.com/ansible-collections/community.general/pull/1690). diff --git a/changelogs/fragments/216-fix-lxc-container-container_config-parameter.yaml b/changelogs/fragments/216-fix-lxc-container-container_config-parameter.yaml deleted file mode 100644 index ce3c175884..0000000000 --- a/changelogs/fragments/216-fix-lxc-container-container_config-parameter.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - lxc_container - fix the type of the ``container_config`` parameter. It is now processed as a list and not a string (https://github.com/ansible-collections/community.general/pull/216). diff --git a/changelogs/fragments/229_lvol_percentage_fix.yml b/changelogs/fragments/229_lvol_percentage_fix.yml deleted file mode 100644 index aaf98454ed..0000000000 --- a/changelogs/fragments/229_lvol_percentage_fix.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - lvol - fix idempotency issue when using lvol with ``%VG`` or ``%PVS`` size options and VG is fully allocated (https://github.com/ansible-collections/community.general/pull/229). diff --git a/changelogs/fragments/296-ansible-2.9.yml b/changelogs/fragments/296-ansible-2.9.yml deleted file mode 100644 index 7ad2a4e51e..0000000000 --- a/changelogs/fragments/296-ansible-2.9.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- "The collection is now actively tested in CI with the latest Ansible 2.9 release." diff --git a/changelogs/fragments/303-consul_kv-fix-env-variables-handling.yaml b/changelogs/fragments/303-consul_kv-fix-env-variables-handling.yaml deleted file mode 100644 index 1053842808..0000000000 --- a/changelogs/fragments/303-consul_kv-fix-env-variables-handling.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -bugfixes: - - consul_kv lookup - fix ``ANSIBLE_CONSUL_URL`` environment variable handling (https://github.com/ansible/ansible/issues/51960). - - consul_kv lookup - fix arguments handling (https://github.com/ansible-collections/community.general/pull/303). diff --git a/changelogs/fragments/311-jira-error-handling.yaml b/changelogs/fragments/311-jira-error-handling.yaml deleted file mode 100644 index 11d73455fe..0000000000 --- a/changelogs/fragments/311-jira-error-handling.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- jira - improve error message handling (https://github.com/ansible-collections/community.general/pull/311). diff --git a/changelogs/fragments/320_unsafe_text.yml b/changelogs/fragments/320_unsafe_text.yml deleted file mode 100644 index aa0621d085..0000000000 --- a/changelogs/fragments/320_unsafe_text.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- json_query - handle ``AnsibleUnicode`` and ``AnsibleUnsafeText`` (https://github.com/ansible-collections/community.general/issues/320). diff --git a/changelogs/fragments/331_keycloak.yml b/changelogs/fragments/331_keycloak.yml deleted file mode 100644 index 99c6474212..0000000000 --- a/changelogs/fragments/331_keycloak.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- keycloak module_utils - provide meaningful error message to user when auth URL does not start with http or https (https://github.com/ansible-collections/community.general/issues/331). diff --git a/changelogs/fragments/335-icinga2_host-return-error-code.yaml b/changelogs/fragments/335-icinga2_host-return-error-code.yaml deleted file mode 100644 index 26e2d2f5c9..0000000000 --- a/changelogs/fragments/335-icinga2_host-return-error-code.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - icinga2_host - fix returning error codes (https://github.com/ansible-collections/community.general/pull/335). diff --git a/changelogs/fragments/33979-xfs_growfs.yml b/changelogs/fragments/33979-xfs_growfs.yml deleted file mode 100644 index 2976695a24..0000000000 --- a/changelogs/fragments/33979-xfs_growfs.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- "filesystem - resizefs of xfs filesystems is fixed. Filesystem needs to be mounted." diff --git a/changelogs/fragments/349-pacman_improve_group_expansion_speed.yml b/changelogs/fragments/349-pacman_improve_group_expansion_speed.yml deleted file mode 100644 index 5f5412a5d4..0000000000 --- a/changelogs/fragments/349-pacman_improve_group_expansion_speed.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- "pacman - improve group expansion speed: query list of pacman groups once (https://github.com/ansible-collections/community.general/pull/349)." diff --git a/changelogs/fragments/360_syspatch_apply_patches_by_default.yml b/changelogs/fragments/360_syspatch_apply_patches_by_default.yml deleted file mode 100644 index 2bc4262c94..0000000000 --- a/changelogs/fragments/360_syspatch_apply_patches_by_default.yml +++ /dev/null @@ -1,4 +0,0 @@ -bugfixes: - - syspatch - fix bug where not setting ``apply=true`` would result in error (https://github.com/ansible-collections/community.general/pull/360). -deprecated_features: - - syspatch - deprecate the redundant ``apply`` argument (https://github.com/ansible-collections/community.general/pull/360). diff --git a/changelogs/fragments/409-datadog-monitor-include-tags.yaml b/changelogs/fragments/409-datadog-monitor-include-tags.yaml deleted file mode 100644 index 1c1ece6454..0000000000 --- a/changelogs/fragments/409-datadog-monitor-include-tags.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - datadog_monitor - add ``include_tags`` option (https://github.com/ansible/ansible/issues/57441). diff --git a/changelogs/fragments/436-infoblox-use-stderr-and-environment-for-config.yaml b/changelogs/fragments/436-infoblox-use-stderr-and-environment-for-config.yaml deleted file mode 100644 index 7a7a0445cb..0000000000 --- a/changelogs/fragments/436-infoblox-use-stderr-and-environment-for-config.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - infoblox inventory script - use stderr for reporting errors, and allow use of environment for configuration (https://github.com/ansible-collections/community.general/pull/436). diff --git a/changelogs/fragments/442-log_plays-add_playbook_task_name_and_action.yml b/changelogs/fragments/442-log_plays-add_playbook_task_name_and_action.yml deleted file mode 100644 index 924c48796e..0000000000 --- a/changelogs/fragments/442-log_plays-add_playbook_task_name_and_action.yml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: -- log_plays callback - use v2 methods (https://github.com/ansible-collections/community.general/pull/442). -breaking_changes: -- log_plays callback - add missing information to the logs generated by the callback plugin. This changes the log message format (https://github.com/ansible-collections/community.general/pull/442). diff --git a/changelogs/fragments/470-spacewalk-legacy-python-certificate-validation.yaml b/changelogs/fragments/470-spacewalk-legacy-python-certificate-validation.yaml deleted file mode 100644 index 35bfa33fe6..0000000000 --- a/changelogs/fragments/470-spacewalk-legacy-python-certificate-validation.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - rhn_channel - Python 2.7.5 fails if the certificate should not be validated. Fixed this by creating the correct ``ssl_context`` (https://github.com/ansible-collections/community.general/pull/470). diff --git a/changelogs/fragments/474-yarn_fix-outdated-fix-list.yml b/changelogs/fragments/474-yarn_fix-outdated-fix-list.yml deleted file mode 100644 index baa19b7070..0000000000 --- a/changelogs/fragments/474-yarn_fix-outdated-fix-list.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - yarn - fixed an index out of range error when no outdated packages where returned by yarn executable (see https://github.com/ansible-collections/community.general/pull/474). - - yarn - fixed an too many values to unpack error when scoped packages are installed (see https://github.com/ansible-collections/community.general/pull/474). diff --git a/changelogs/fragments/47680_pam_limits.yml b/changelogs/fragments/47680_pam_limits.yml deleted file mode 100644 index 7bfcb27559..0000000000 --- a/changelogs/fragments/47680_pam_limits.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- pam_limits - add support for nice and priority limits (https://github.com/ansible/ansible/pull/47680). diff --git a/changelogs/fragments/479-ini_file-empty-section.yaml b/changelogs/fragments/479-ini_file-empty-section.yaml deleted file mode 100644 index 70b2eb4dc4..0000000000 --- a/changelogs/fragments/479-ini_file-empty-section.yaml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: - - ini_file - module now can create an empty section (https://github.com/ansible-collections/community.general/issues/479). -bugfixes: - - ini_file - check for parameter ``value`` if ``state`` is ``present`` and ``allow_no_value`` is ``false`` (https://github.com/ansible-collections/community.general/issues/479). diff --git a/changelogs/fragments/522-parted_change_label.yml b/changelogs/fragments/522-parted_change_label.yml deleted file mode 100644 index 4d30dec1d8..0000000000 --- a/changelogs/fragments/522-parted_change_label.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "parted - fix creating partition when label is changed (https://github.com/ansible-collections/community.general/issues/522)." diff --git a/changelogs/fragments/548_apk.yml b/changelogs/fragments/548_apk.yml deleted file mode 100644 index 825dc830c4..0000000000 --- a/changelogs/fragments/548_apk.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- apk - added ``no_cache`` option (https://github.com/ansible-collections/community.general/pull/548). diff --git a/changelogs/fragments/560-pkgng-add-stdout-and-stderr.yaml b/changelogs/fragments/560-pkgng-add-stdout-and-stderr.yaml deleted file mode 100644 index f8dc4683ce..0000000000 --- a/changelogs/fragments/560-pkgng-add-stdout-and-stderr.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - pkgng - added ``stdout`` and ``stderr`` attributes to the result (https://github.com/ansible-collections/community.general/pull/560). diff --git a/changelogs/fragments/562-nmcli-fix-idempotency.yaml b/changelogs/fragments/562-nmcli-fix-idempotency.yaml deleted file mode 100644 index ad112a18dd..0000000000 --- a/changelogs/fragments/562-nmcli-fix-idempotency.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - nmcli - fix idempotetency when modifying an existing connection (https://github.com/ansible-collections/community.general/issues/481). \ No newline at end of file diff --git a/changelogs/fragments/563-update-terraform-status-test.yaml b/changelogs/fragments/563-update-terraform-status-test.yaml deleted file mode 100644 index 40b0477bff..0000000000 --- a/changelogs/fragments/563-update-terraform-status-test.yaml +++ /dev/null @@ -1,4 +0,0 @@ -bugfixes: - - terraform - fix incorrectly reporting a status of unchanged when - number of resources added or destroyed are multiples of 10 - (https://github.com/ansible-collections/community.general/issues/561). diff --git a/changelogs/fragments/568_packaging.yml b/changelogs/fragments/568_packaging.yml deleted file mode 100644 index ab0fa9778e..0000000000 --- a/changelogs/fragments/568_packaging.yml +++ /dev/null @@ -1,2 +0,0 @@ -deprecated_features: -- xbps - the ``force`` option never had any effect. It is now deprecated, and will be removed in 3.0.0 (https://github.com/ansible-collections/community.general/pull/568). diff --git a/changelogs/fragments/569-pkgng-add-upgrade-action.yaml b/changelogs/fragments/569-pkgng-add-upgrade-action.yaml deleted file mode 100644 index 0078c18865..0000000000 --- a/changelogs/fragments/569-pkgng-add-upgrade-action.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -minor_changes: - - "pkgng - added support for upgrading all packages using ``name: *, state: latest``, similar to other package providers (https://github.com/ansible-collections/community.general/pull/569)." -breaking_changes: - - "pkgng - passing ``name: *`` with ``state: latest`` or ``state: present`` will no longer install every package from the configured package repositories. Instead, ``name: *, state: latest`` will upgrade all already-installed packages, and ``name: *, state: present`` is a noop. (https://github.com/ansible-collections/community.general/pull/569)." - - "pkgng - passing ``name: *`` with ``state: absent`` will no longer remove every installed package from the system. It is now a noop. (https://github.com/ansible-collections/community.general/pull/569)." diff --git a/changelogs/fragments/596-splunk-add-option-to-not-validate-cert.yaml b/changelogs/fragments/596-splunk-add-option-to-not-validate-cert.yaml deleted file mode 100644 index ce2a0398c0..0000000000 --- a/changelogs/fragments/596-splunk-add-option-to-not-validate-cert.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - splunk callback - add an option to allow not to validate certificate from HEC (https://github.com/ansible-collections/community.general/pull/596). diff --git a/changelogs/fragments/604-lists_mergeby-new-filter.yml b/changelogs/fragments/604-lists_mergeby-new-filter.yml deleted file mode 100644 index 9f2d19c99f..0000000000 --- a/changelogs/fragments/604-lists_mergeby-new-filter.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -minor_changes: - - | - A new filter ``lists_mergeby`` to merge two lists of dictionaries by an attribute. - For example: - - .. code-block:: yaml - - [{'n': 'n1', 'p1': 'A', 'p2': 'F'}, - {'n': 'n2', 'p2': 'B'}] | community.general.lists_mergeby( - [{'n': 'n1', 'p1': 'C'}, - {'n': 'n2', 'p2': 'D'}, - {'n': 'n3', 'p3': 'E'}], 'n') | list - - evaluates to - - .. code-block:: yaml - - [{'n': 'n1', 'p1': 'C', 'p2': 'F'}, - {'n': 'n2', 'p2': 'D'}, - {'n': 'n3', 'p3': 'E'}] - - (https://github.com/ansible-collections/community.general/pull/604). diff --git a/changelogs/fragments/610_logstash_callback_add_ini_config.yml b/changelogs/fragments/610_logstash_callback_add_ini_config.yml deleted file mode 100644 index ad2bbad27b..0000000000 --- a/changelogs/fragments/610_logstash_callback_add_ini_config.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- "logstash callback - add ini config (https://github.com/ansible-collections/community.general/pull/610)." diff --git a/changelogs/fragments/611-gitlab-runners-env-vars-intput-and-default-item-limit.yaml b/changelogs/fragments/611-gitlab-runners-env-vars-intput-and-default-item-limit.yaml deleted file mode 100644 index de40435454..0000000000 --- a/changelogs/fragments/611-gitlab-runners-env-vars-intput-and-default-item-limit.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - gitlab_runners inventory plugin - permit environment variable input for ``server_url``, ``api_token`` and ``filter`` options (https://github.com/ansible-collections/community.general/pull/611). diff --git a/changelogs/fragments/613-snmp_facts-EndOfMibView.yml b/changelogs/fragments/613-snmp_facts-EndOfMibView.yml deleted file mode 100644 index acf4c8bd40..0000000000 --- a/changelogs/fragments/613-snmp_facts-EndOfMibView.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - snmp_facts - skip ``EndOfMibView`` values (https://github.com/ansible/ansible/issues/49044). diff --git a/changelogs/fragments/615-digital-ocean-tag-info-bugfix.yml b/changelogs/fragments/615-digital-ocean-tag-info-bugfix.yml deleted file mode 100644 index 384666161b..0000000000 --- a/changelogs/fragments/615-digital-ocean-tag-info-bugfix.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - digital_ocean_tag_info - fix crash when querying for an individual tag (https://github.com/ansible-collections/community.general/pull/615). diff --git a/changelogs/fragments/630-git_config-handling-invalid-dir.yaml b/changelogs/fragments/630-git_config-handling-invalid-dir.yaml deleted file mode 100644 index 538a463160..0000000000 --- a/changelogs/fragments/630-git_config-handling-invalid-dir.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- git_config - now raises an error for non-existent repository paths (https://github.com/ansible-collections/community.general/issues/630). diff --git a/changelogs/fragments/63767_selective.yml b/changelogs/fragments/63767_selective.yml deleted file mode 100644 index ce5d7fba7e..0000000000 --- a/changelogs/fragments/63767_selective.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- selective - mark task failed correctly (https://github.com/ansible/ansible/issues/63767). diff --git a/changelogs/fragments/638_cobbler_py3.yml b/changelogs/fragments/638_cobbler_py3.yml deleted file mode 100644 index 9c18c87ba7..0000000000 --- a/changelogs/fragments/638_cobbler_py3.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- cobbler inventory script - add Python 3 support (https://github.com/ansible-collections/community.general/issues/638). diff --git a/changelogs/fragments/641-update-ansible-logstash-callback.yml b/changelogs/fragments/641-update-ansible-logstash-callback.yml deleted file mode 100644 index d72a8e9bd1..0000000000 --- a/changelogs/fragments/641-update-ansible-logstash-callback.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: -- logstash callback - migrate to python3-logstash (https://github.com/ansible-collections/community.general/pull/641). -- logstash callback - improve logstash message structure, needs to be enabled with the ``format_version`` option (https://github.com/ansible-collections/community.general/pull/641). diff --git a/changelogs/fragments/650_pacman_support_zst_package_files.yaml b/changelogs/fragments/650_pacman_support_zst_package_files.yaml deleted file mode 100644 index b1e4041271..0000000000 --- a/changelogs/fragments/650_pacman_support_zst_package_files.yaml +++ /dev/null @@ -1,4 +0,0 @@ -bugfixes: - - pacman - treat package names containing .zst as package files during installation - (https://www.archlinux.org/news/now-using-zstandard-instead-of-xz-for-package-compression/, - https://github.com/ansible-collections/community.general/pull/650). diff --git a/changelogs/fragments/66813_gitlab_project.yml b/changelogs/fragments/66813_gitlab_project.yml deleted file mode 100644 index 97c51708c2..0000000000 --- a/changelogs/fragments/66813_gitlab_project.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- gitlab_project - add support for merge_method on projects (https://github.com/ansible/ansible/pull/66813). diff --git a/changelogs/fragments/676-osx_defaults_fix_handling_negative_ints.yml b/changelogs/fragments/676-osx_defaults_fix_handling_negative_ints.yml deleted file mode 100644 index d8fa6b0057..0000000000 --- a/changelogs/fragments/676-osx_defaults_fix_handling_negative_ints.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- osx_defaults - fix handling negative integers (https://github.com/ansible-collections/community.general/issues/134). diff --git a/changelogs/fragments/677-jenkins_plugins_sha1.yaml b/changelogs/fragments/677-jenkins_plugins_sha1.yaml deleted file mode 100644 index 2a39a1cd6d..0000000000 --- a/changelogs/fragments/677-jenkins_plugins_sha1.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -bugfixes: - - jenkins_plugin - replace MD5 checksum verification with SHA1 due to MD5 - being disabled on systems with FIPS-only algorithms enabled - (https://github.com/ansible/ansible/issues/34304). diff --git a/changelogs/fragments/687-fix-redfish-payload-decode-python35.yml b/changelogs/fragments/687-fix-redfish-payload-decode-python35.yml deleted file mode 100644 index 93b3ed2512..0000000000 --- a/changelogs/fragments/687-fix-redfish-payload-decode-python35.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - redfish_info, redfish_config, redfish_command - Fix Redfish response payload decode on Python 3.5 (https://github.com/ansible-collections/community.general/issues/686) diff --git a/changelogs/fragments/689-haproxy_agent_and_health.yml b/changelogs/fragments/689-haproxy_agent_and_health.yml deleted file mode 100644 index 0e86d75e65..0000000000 --- a/changelogs/fragments/689-haproxy_agent_and_health.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -minor_changes: - - haproxy - add options to dis/enable health and agent checks. When health - and agent checks are enabled for a service, a disabled service will - re-enable itself automatically. These options also change the state of - the agent checks to match the requested state for the backend - (https://github.com/ansible-collections/community.general/issues/684). diff --git a/changelogs/fragments/693-big-revamp-on-xfconf-adding-array-values.yml b/changelogs/fragments/693-big-revamp-on-xfconf-adding-array-values.yml deleted file mode 100644 index fb2b92b33a..0000000000 --- a/changelogs/fragments/693-big-revamp-on-xfconf-adding-array-values.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- xfconf - add arrays support (https://github.com/ansible/ansible/issues/46308). diff --git a/changelogs/fragments/699-django_manage-createcachetable-fix-idempotence.yml b/changelogs/fragments/699-django_manage-createcachetable-fix-idempotence.yml deleted file mode 100644 index 1f6e4054fb..0000000000 --- a/changelogs/fragments/699-django_manage-createcachetable-fix-idempotence.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - django_manage - fix idempotence for ``createcachetable`` (https://github.com/ansible-collections/community.general/pull/699). diff --git a/changelogs/fragments/702-slack-support-for-blocks.yaml b/changelogs/fragments/702-slack-support-for-blocks.yaml deleted file mode 100644 index 40c2a3452e..0000000000 --- a/changelogs/fragments/702-slack-support-for-blocks.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - slack - add support for sending messages built with block kit (https://github.com/ansible-collections/community.general/issues/380). diff --git a/changelogs/fragments/704-doas-set-correct-default-values.yml b/changelogs/fragments/704-doas-set-correct-default-values.yml deleted file mode 100644 index 911a1146ef..0000000000 --- a/changelogs/fragments/704-doas-set-correct-default-values.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- doas become plugin - address a bug with the parameters handling that was breaking the plugin in community.general when ``become_flags`` and ``become_user`` were not explicitly specified (https://github.com/ansible-collections/community.general/pull/704). diff --git a/changelogs/fragments/707-jira-error-handling.yaml b/changelogs/fragments/707-jira-error-handling.yaml deleted file mode 100644 index 23f4ac2629..0000000000 --- a/changelogs/fragments/707-jira-error-handling.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- jira - improve error message handling with multiple errors (https://github.com/ansible-collections/community.general/pull/707). diff --git a/changelogs/fragments/708-set-correct-default-values.yml b/changelogs/fragments/708-set-correct-default-values.yml deleted file mode 100644 index 27630da19e..0000000000 --- a/changelogs/fragments/708-set-correct-default-values.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: -- dzdo become plugin - address a bug with the parameters handling that was breaking the plugin in community.general when ``become_user`` was not explicitly specified (https://github.com/ansible-collections/community.general/pull/708). -- pbrun become plugin - address a bug with the parameters handling that was breaking the plugin in community.general when ``become_user`` was not explicitly specified (https://github.com/ansible-collections/community.general/pull/708). diff --git a/changelogs/fragments/711-lxd-target.yml b/changelogs/fragments/711-lxd-target.yml deleted file mode 100644 index 9f417ad742..0000000000 --- a/changelogs/fragments/711-lxd-target.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - lxd_container - added support of ``--target`` flag for cluster deployments (https://github.com/ansible-collections/community.general/issues/637). diff --git a/changelogs/fragments/713-maven-timestamp-snapshot.yml b/changelogs/fragments/713-maven-timestamp-snapshot.yml deleted file mode 100644 index 1b308bbcfa..0000000000 --- a/changelogs/fragments/713-maven-timestamp-snapshot.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- "maven_artifact - handle timestamped snapshot version strings properly (https://github.com/ansible-collections/community.general/issues/709)." diff --git a/changelogs/fragments/722-plugins.yml b/changelogs/fragments/722-plugins.yml deleted file mode 100644 index efbc4e0750..0000000000 --- a/changelogs/fragments/722-plugins.yml +++ /dev/null @@ -1,4 +0,0 @@ -bugfixes: -- cobbler inventory plugin - ``name`` needed FQCN (https://github.com/ansible-collections/community.general/pull/722). -- oc connection plugin - ``transport`` needed FQCN (https://github.com/ansible-collections/community.general/pull/722). -- inventory plugins - allow FQCN in ``plugin`` option (https://github.com/ansible-collections/community.general/pull/722). diff --git a/changelogs/fragments/738-ipa-python3.yml b/changelogs/fragments/738-ipa-python3.yml deleted file mode 100644 index e732837d35..0000000000 --- a/changelogs/fragments/738-ipa-python3.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - ipa_hostgroup - fix an issue with load-balanced ipa and cookie handling with Python 3 (https://github.com/ansible-collections/community.general/issues/737). diff --git a/changelogs/fragments/744-xfconf_make_locale-independent.yml b/changelogs/fragments/744-xfconf_make_locale-independent.yml deleted file mode 100644 index 05f060adb8..0000000000 --- a/changelogs/fragments/744-xfconf_make_locale-independent.yml +++ /dev/null @@ -1,5 +0,0 @@ -minor_changes: - - xfconf - add support for ``double`` type (https://github.com/ansible-collections/community.general/pull/744). -bugfixes: - - xfconf - make it work in non-english locales (https://github.com/ansible-collections/community.general/pull/744). - \ No newline at end of file diff --git a/changelogs/fragments/750-jc-new-filter.yaml b/changelogs/fragments/750-jc-new-filter.yaml deleted file mode 100644 index d4ff7845f7..0000000000 --- a/changelogs/fragments/750-jc-new-filter.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- jc - new filter to convert the output of many shell commands and file-types to JSON. Uses the jc library at https://github.com/kellyjonbrazil/jc. For example, filtering the STDOUT output of ``uname -a`` via ``{{ result.stdout | community.general.jc('uname') }}``. Requires Python 3.6+ (https://github.com/ansible-collections/community.general/pull/750). diff --git a/changelogs/fragments/768-facter.yml b/changelogs/fragments/768-facter.yml deleted file mode 100644 index d169427a81..0000000000 --- a/changelogs/fragments/768-facter.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - facter - added option for ``arguments`` (https://github.com/ansible-collections/community.general/pull/768). diff --git a/changelogs/fragments/773-resize-partition.yml b/changelogs/fragments/773-resize-partition.yml deleted file mode 100644 index 6763b6ec45..0000000000 --- a/changelogs/fragments/773-resize-partition.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - parted - add ``resize`` option to resize existing partitions (https://github.com/ansible-collections/community.general/pull/773). diff --git a/changelogs/fragments/777-interfaces_file-re-escape.yml b/changelogs/fragments/777-interfaces_file-re-escape.yml deleted file mode 100644 index a7daee5670..0000000000 --- a/changelogs/fragments/777-interfaces_file-re-escape.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: -- interfaces_file - escape regular expression characters in old value (https://github.com/ansible-collections/community.general/issues/777). diff --git a/changelogs/fragments/783-fix-gem-installed-versions.yaml b/changelogs/fragments/783-fix-gem-installed-versions.yaml deleted file mode 100644 index 5ad70f8295..0000000000 --- a/changelogs/fragments/783-fix-gem-installed-versions.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "gem - fix get_installed_versions: correctly parse ``default`` version (https://github.com/ansible-collections/community.general/pull/783)." diff --git a/changelogs/fragments/788-fix_omapi_host_on_python3.yaml b/changelogs/fragments/788-fix_omapi_host_on_python3.yaml deleted file mode 100644 index 08db3620df..0000000000 --- a/changelogs/fragments/788-fix_omapi_host_on_python3.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - omapi_host - fix compatibility with Python 3 (https://github.com/ansible-collections/community.general/issues/787). diff --git a/changelogs/fragments/789-pkg5-wrap-to-modify-package-list.yaml b/changelogs/fragments/789-pkg5-wrap-to-modify-package-list.yaml deleted file mode 100644 index 8d766485e5..0000000000 --- a/changelogs/fragments/789-pkg5-wrap-to-modify-package-list.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "pkg5 - now works when Python 3 is used on the target (https://github.com/ansible-collections/community.general/pull/789)." diff --git a/changelogs/fragments/797-proxmox-kvm-cloud-init.yaml b/changelogs/fragments/797-proxmox-kvm-cloud-init.yaml deleted file mode 100644 index 759746d455..0000000000 --- a/changelogs/fragments/797-proxmox-kvm-cloud-init.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "proxmox_kvm - add cloud-init support (new options: ``cicustom``, ``cipassword``, ``citype``, ``ciuser``, ``ipconfig``, ``nameservers``, ``searchdomains``, ``sshkeys``) (https://github.com/ansible-collections/community.general/pull/797)." diff --git a/changelogs/fragments/802-pushover-device-parameter.yml b/changelogs/fragments/802-pushover-device-parameter.yml deleted file mode 100644 index a9e86c0627..0000000000 --- a/changelogs/fragments/802-pushover-device-parameter.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - pushover - add device parameter (https://github.com/ansible-collections/community.general/pull/802). diff --git a/changelogs/fragments/811-proxmox-kvm-state-absent.yml b/changelogs/fragments/811-proxmox-kvm-state-absent.yml deleted file mode 100644 index 7f4f55d4aa..0000000000 --- a/changelogs/fragments/811-proxmox-kvm-state-absent.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - proxmox_kvm - defer error-checking for non-existent VMs in order to fix idempotency of tasks using ``state=absent`` and properly recognize a success (https://github.com/ansible-collections/community.general/pull/811). diff --git a/changelogs/fragments/820_nagios_added_acknowledge_and_servicecheck.yml b/changelogs/fragments/820_nagios_added_acknowledge_and_servicecheck.yml deleted file mode 100644 index 33d513ea06..0000000000 --- a/changelogs/fragments/820_nagios_added_acknowledge_and_servicecheck.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: -- nagios - add the ``acknowledge`` action (https://github.com/ansible-collections/community.general/pull/820). -- nagios - add the ``service_check`` action (https://github.com/ansible-collections/community.general/pull/820). diff --git a/changelogs/fragments/823-terraform_init_reconfigure.yaml b/changelogs/fragments/823-terraform_init_reconfigure.yaml deleted file mode 100644 index 408fe3a5a4..0000000000 --- a/changelogs/fragments/823-terraform_init_reconfigure.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- terraform - add ``init_reconfigure`` option, which controls the ``-reconfigure`` flag (backend reconfiguration) (https://github.com/ansible-collections/community.general/pull/823). diff --git a/changelogs/fragments/825-bootsource-override-option.yaml b/changelogs/fragments/825-bootsource-override-option.yaml deleted file mode 100644 index b7efc22d58..0000000000 --- a/changelogs/fragments/825-bootsource-override-option.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - redfish_command - add sub-command for ``EnableContinuousBootOverride`` and ``DisableBootOverride`` to allow setting BootSourceOverrideEnabled Redfish property (https://github.com/ansible-collections/community.general/issues/824). diff --git a/changelogs/fragments/830-pam-limits.yml b/changelogs/fragments/830-pam-limits.yml deleted file mode 100644 index 28b5ee3dc0..0000000000 --- a/changelogs/fragments/830-pam-limits.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - pam_limits - adds check mode (https://github.com/ansible-collections/community.general/issues/827). - - pam_limits - adds diff mode (https://github.com/ansible-collections/community.general/issues/828). diff --git a/changelogs/fragments/831-proxmox-kvm-wait.yml b/changelogs/fragments/831-proxmox-kvm-wait.yml deleted file mode 100644 index c1975bbe7d..0000000000 --- a/changelogs/fragments/831-proxmox-kvm-wait.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - proxmox_kvm - improve handling of long-running tasks by creating a dedicated function (https://github.com/ansible-collections/community.general/pull/831). diff --git a/changelogs/fragments/843-update-slack-messages.yml b/changelogs/fragments/843-update-slack-messages.yml deleted file mode 100644 index 3270df109d..0000000000 --- a/changelogs/fragments/843-update-slack-messages.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - slack - add support for updating messages (https://github.com/ansible-collections/community.general/issues/304). diff --git a/changelogs/fragments/849-proxmox-kvm-state-absent-force.yml b/changelogs/fragments/849-proxmox-kvm-state-absent-force.yml deleted file mode 100644 index cc8f672b00..0000000000 --- a/changelogs/fragments/849-proxmox-kvm-state-absent-force.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -breaking_changes: - - proxmox_kvm - recognize ``force=yes`` in conjunction with ``state=absent`` to forcibly remove a running VM (https://github.com/ansible-collections/community.general/pull/849). diff --git a/changelogs/fragments/850-proxmox_kvm-remove_hard_coded_defaults.yml b/changelogs/fragments/850-proxmox_kvm-remove_hard_coded_defaults.yml deleted file mode 100644 index 423419db56..0000000000 --- a/changelogs/fragments/850-proxmox_kvm-remove_hard_coded_defaults.yml +++ /dev/null @@ -1,6 +0,0 @@ -minor_changes: - - proxmox - add new ``proxmox_default_behavior`` option (https://github.com/ansible-collections/community.general/pull/850). - - proxmox_kvm - add new ``proxmox_default_behavior`` option (https://github.com/ansible-collections/community.general/pull/850). -deprecated_features: - - proxmox - the default of the new ``proxmox_default_behavior`` option will change from ``compatibility`` to ``no_defaults`` in community.general 4.0.0. Set the option to an explicit value to avoid a deprecation warning (https://github.com/ansible-collections/community.general/pull/850). - - proxmox_kvm - the default of the new ``proxmox_default_behavior`` option will change from ``compatibility`` to ``no_defaults`` in community.general 4.0.0. Set the option to an explicit value to avoid a deprecation warning (https://github.com/ansible-collections/community.general/pull/850). diff --git a/changelogs/fragments/891-packet_net-fix-not-subscriptable.yaml b/changelogs/fragments/891-packet_net-fix-not-subscriptable.yaml deleted file mode 100644 index 844a16eef0..0000000000 --- a/changelogs/fragments/891-packet_net-fix-not-subscriptable.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- packet_net.py inventory script - fixed failure w.r.t. operating system retrieval by changing array subscription back to attribute access (https://github.com/ansible-collections/community.general/pull/891). diff --git a/changelogs/fragments/892-slack-token-validation.yml b/changelogs/fragments/892-slack-token-validation.yml deleted file mode 100644 index 40eef1fdcc..0000000000 --- a/changelogs/fragments/892-slack-token-validation.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - slack - fix ``xox[abp]`` token identification to capture everything after ``xox[abp]``, as the token is the only thing that should be in this argument (https://github.com/ansible-collections/community.general/issues/862). diff --git a/changelogs/fragments/899_launchd_user_service.yml b/changelogs/fragments/899_launchd_user_service.yml deleted file mode 100644 index 5abca2a6a7..0000000000 --- a/changelogs/fragments/899_launchd_user_service.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- launchd - fix for user-level services (https://github.com/ansible-collections/community.general/issues/896). diff --git a/changelogs/fragments/900-aerospike-migration-handle-unstable-cluster.yaml b/changelogs/fragments/900-aerospike-migration-handle-unstable-cluster.yaml deleted file mode 100644 index ac8c0d0869..0000000000 --- a/changelogs/fragments/900-aerospike-migration-handle-unstable-cluster.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - aerospike_migrations - handle exception when unstable-cluster is returned (https://github.com/ansible-collections/community.general/pull/900). diff --git a/changelogs/fragments/903-enhance-redfish-manager-reset-actions.yml b/changelogs/fragments/903-enhance-redfish-manager-reset-actions.yml deleted file mode 100644 index 14109dbe8f..0000000000 --- a/changelogs/fragments/903-enhance-redfish-manager-reset-actions.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - redfish_command - support same reset actions on Managers as on Systems (https://github.com/ansible-collections/community.general/issues/901). diff --git a/changelogs/fragments/939-zypper_repository_proper_failure_on_missing_python-xml.yml b/changelogs/fragments/939-zypper_repository_proper_failure_on_missing_python-xml.yml deleted file mode 100644 index 7917972bb0..0000000000 --- a/changelogs/fragments/939-zypper_repository_proper_failure_on_missing_python-xml.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - zypper_repository - proper failure when python-xml is missing (https://github.com/ansible-collections/community.general/pull/939). diff --git a/changelogs/fragments/943-proxmox-kvm-code-cleanup.yml b/changelogs/fragments/943-proxmox-kvm-code-cleanup.yml deleted file mode 100644 index eab6f3a3e9..0000000000 --- a/changelogs/fragments/943-proxmox-kvm-code-cleanup.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - proxmox_kvm - improve code readability (https://github.com/ansible-collections/community.general/pull/934). diff --git a/changelogs/fragments/945-darwin-timezone-py3.yaml b/changelogs/fragments/945-darwin-timezone-py3.yaml deleted file mode 100644 index 98725b9454..0000000000 --- a/changelogs/fragments/945-darwin-timezone-py3.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- timezone - support Python3 on macos/darwin (https://github.com/ansible-collections/community.general/pull/945). diff --git a/changelogs/fragments/951-ipa_user-add-userauthtype-param.yaml b/changelogs/fragments/951-ipa_user-add-userauthtype-param.yaml deleted file mode 100644 index c13a83d3c9..0000000000 --- a/changelogs/fragments/951-ipa_user-add-userauthtype-param.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - "ipa_user - add ``userauthtype`` option (https://github.com/ansible-collections/community.general/pull/951)." diff --git a/changelogs/fragments/953_syslogger.yml b/changelogs/fragments/953_syslogger.yml deleted file mode 100644 index 1c1065321c..0000000000 --- a/changelogs/fragments/953_syslogger.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- syslogger - update ``syslog.openlog`` API call for older Python versions, and improve error handling (https://github.com/ansible-collections/community.general/issues/953). diff --git a/changelogs/fragments/967-use-fqcn-when-calling-a-module-from-action-plugin.yml b/changelogs/fragments/967-use-fqcn-when-calling-a-module-from-action-plugin.yml deleted file mode 100644 index 052e412404..0000000000 --- a/changelogs/fragments/967-use-fqcn-when-calling-a-module-from-action-plugin.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -minor_changes: - - iptables_state - use FQCN when calling a module from action plugin - (https://github.com/ansible-collections/community.general/pull/967). diff --git a/changelogs/fragments/968-gitlab_variables-pagination.yml b/changelogs/fragments/968-gitlab_variables-pagination.yml deleted file mode 100644 index 9f8e930ed6..0000000000 --- a/changelogs/fragments/968-gitlab_variables-pagination.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -bugfixes: - - gitlab_project_variable - support for GitLab pagination limitation by iterating over GitLab variable pages (https://github.com/ansible-collections/community.general/pull/968). - - gitlab_group_variable - support for GitLab pagination limitation by iterating over GitLab variable pages (https://github.com/ansible-collections/community.general/pull/968). diff --git a/changelogs/fragments/992-nmcli-locale.yml b/changelogs/fragments/992-nmcli-locale.yml deleted file mode 100644 index 599d795b12..0000000000 --- a/changelogs/fragments/992-nmcli-locale.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- "nmcli - set ``C`` locale when executing ``nmcli`` (https://github.com/ansible-collections/community.general/issues/989)." diff --git a/changelogs/fragments/993-file-capabilities.yml b/changelogs/fragments/993-file-capabilities.yml deleted file mode 100644 index a9e781c400..0000000000 --- a/changelogs/fragments/993-file-capabilities.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - capabilities - fix for a newer version of libcap release (https://github.com/ansible-collections/community.general/pull/1061). diff --git a/changelogs/fragments/998-nagios-added_forced_check_for_all_services_or_host.yml b/changelogs/fragments/998-nagios-added_forced_check_for_all_services_or_host.yml deleted file mode 100644 index bfa1397e08..0000000000 --- a/changelogs/fragments/998-nagios-added_forced_check_for_all_services_or_host.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: -- nagios - rename the ``service_check`` action to ``forced_check`` since we now are able to check both a particular service, all services of a particular host and the host itself (https://github.com/ansible-collections/community.general/pull/998). -- nagios - add the ``host`` and ``all`` values for the ``forced_check`` action (https://github.com/ansible-collections/community.general/pull/998). diff --git a/changelogs/fragments/add_argument_check_for_rundeck.yaml b/changelogs/fragments/add_argument_check_for_rundeck.yaml deleted file mode 100644 index 7d918d0048..0000000000 --- a/changelogs/fragments/add_argument_check_for_rundeck.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- rundeck_acl_policy - add check for rundeck_acl_policy name parameter (https://github.com/ansible-collections/community.general/pull/612). diff --git a/changelogs/fragments/airbrake_deployment_add_version.yml b/changelogs/fragments/airbrake_deployment_add_version.yml deleted file mode 100644 index 4e95b91769..0000000000 --- a/changelogs/fragments/airbrake_deployment_add_version.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - "airbrake_deployment - add ``version`` param; clarified docs on ``revision`` param (https://github.com/ansible-collections/community.general/pull/583)." diff --git a/changelogs/fragments/aix_filesystem-module_util-routing-issue.yml b/changelogs/fragments/aix_filesystem-module_util-routing-issue.yml deleted file mode 100644 index 1cb803e1c2..0000000000 --- a/changelogs/fragments/aix_filesystem-module_util-routing-issue.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - aix_filesystem - fix issues with ismount module_util pathing for Ansible 2.9 (https://github.com/ansible-collections/community.general/pull/567). diff --git a/changelogs/fragments/cloudflare_dns.yml b/changelogs/fragments/cloudflare_dns.yml deleted file mode 100644 index 1e873fa51c..0000000000 --- a/changelogs/fragments/cloudflare_dns.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- cloudflare_dns - add support for environment variable ``CLOUDFLARE_TOKEN`` (https://github.com/ansible-collections/community.general/pull/1238). diff --git a/changelogs/fragments/cve_bitbucket_pipeline_variable.yml b/changelogs/fragments/cve_bitbucket_pipeline_variable.yml deleted file mode 100644 index 1315755bb0..0000000000 --- a/changelogs/fragments/cve_bitbucket_pipeline_variable.yml +++ /dev/null @@ -1,2 +0,0 @@ -security_fixes: -- 'bitbucket_pipeline_variable - **CVE-2021-20180** - hide user sensitive information which are marked as ``secured`` from logging into the console (https://github.com/ansible-collections/community.general/pull/1635).' diff --git a/changelogs/fragments/cyberarkconjur-removal.yml b/changelogs/fragments/cyberarkconjur-removal.yml deleted file mode 100644 index 1c3cc6dadf..0000000000 --- a/changelogs/fragments/cyberarkconjur-removal.yml +++ /dev/null @@ -1,2 +0,0 @@ -removed_features: - - "conjur_variable lookup - has been moved to the ``cyberark.conjur`` collection. A redirection is active, which will be removed in version 2.0.0 (https://github.com/ansible-collections/community.general/pull/570)." diff --git a/changelogs/fragments/dconf_refactor.yml b/changelogs/fragments/dconf_refactor.yml deleted file mode 100644 index 215f1dd329..0000000000 --- a/changelogs/fragments/dconf_refactor.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- dconf - update documentation and logic code refactor (https://github.com/ansible-collections/community.general/pull/1585). diff --git a/changelogs/fragments/deprecation-removals.yml b/changelogs/fragments/deprecation-removals.yml deleted file mode 100644 index 4e8546dfcb..0000000000 --- a/changelogs/fragments/deprecation-removals.yml +++ /dev/null @@ -1,5 +0,0 @@ -removed_features: -- "iptables_state - the ``ANSIBLE_ASYNC_DIR`` environment is no longer supported, use the ``async_dir`` shell option instead (https://github.com/ansible-collections/community.general/pull/1371)." -- "memcached cache plugin - do not import ``CacheModule``s directly. Use ``ansible.plugins.loader.cache_loader`` instead (https://github.com/ansible-collections/community.general/pull/1371)." -- "redis cache plugin - do not import ``CacheModule``s directly. Use ``ansible.plugins.loader.cache_loader`` instead (https://github.com/ansible-collections/community.general/pull/1371)." -- "xml - when ``content=attribute``, the ``attribute`` option is ignored (https://github.com/ansible-collections/community.general/pull/1371)." diff --git a/changelogs/fragments/digital-ocean.yml b/changelogs/fragments/digital-ocean.yml deleted file mode 100644 index a8870cbc1a..0000000000 --- a/changelogs/fragments/digital-ocean.yml +++ /dev/null @@ -1,2 +0,0 @@ -removed_features: -- "digital_ocean_* - all DigitalOcean modules have been moved to the ``community.digitalocean`` collection. A redirection is active, which will be removed in version 2.0.0 (https://github.com/ansible-collections/community.general/pull/622)." \ No newline at end of file diff --git a/changelogs/fragments/docker-migration-removal.yml b/changelogs/fragments/docker-migration-removal.yml deleted file mode 100644 index 5740aeea44..0000000000 --- a/changelogs/fragments/docker-migration-removal.yml +++ /dev/null @@ -1,14 +0,0 @@ -removed_features: -- | - All ``docker`` modules and plugins have been removed from this collection. - They have been migrated to the `community.docker `_ collection. - If you use ansible-base 2.10 or newer, redirections have been provided. - - If you use Ansible 2.9 and installed this collection, you need to adjust the FQCNs (``community.general.docker_container`` → ``community.docker.docker_container``) and make sure to install the community.docker collection. -breaking_changes: -- | - If you use Ansible 2.9 and the ``docker`` plugins or modules from this collections, community.general 2.0.0 results in errors when trying to use the docker content by FQCN, like ``community.general.docker_container``. - Since Ansible 2.9 is not able to use redirections, you will have to adjust your playbooks and roles manually to use the new FQCNs (``community.docker.docker_container`` for the previous example) and to make sure that you have ``community.docker`` installed. - - If you use ansible-base 2.10 or newer and did not install Ansible 3.0.0, but installed (and/or upgraded) community.general manually, you need to make sure to also install ``community.docker`` if you are using any of the ``docker`` plugins or modules. - While ansible-base 2.10 or newer can use the redirects that community.general 2.0.0 adds, the collection they point to (community.docker) must be installed for them to work. diff --git a/changelogs/fragments/dsv_fix.yml b/changelogs/fragments/dsv_fix.yml deleted file mode 100644 index fedc90ba5b..0000000000 --- a/changelogs/fragments/dsv_fix.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- dsv lookup - use correct dict usage (https://github.com/ansible-collections/community.general/pull/743). diff --git a/changelogs/fragments/firewalld_migration.yml b/changelogs/fragments/firewalld_migration.yml deleted file mode 100644 index 57e7dbf1e4..0000000000 --- a/changelogs/fragments/firewalld_migration.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - firewalld - the module has been moved to the ``ansible.posix`` collection. A redirection is active, which will be removed in version 2.0.0 (https://github.com/ansible-collections/community.general/pull/623). diff --git a/changelogs/fragments/fix-plugin-imports.yml b/changelogs/fragments/fix-plugin-imports.yml deleted file mode 100644 index aa3cbecfd5..0000000000 --- a/changelogs/fragments/fix-plugin-imports.yml +++ /dev/null @@ -1,5 +0,0 @@ -bugfixes: -- "linode inventory plugin - make sure that plugin errors out on initialization if the required library is not found, and not on load-time (https://github.com/ansible-collections/community.general/pull/1297)." -- "redis cache plugin - make sure that plugin errors out on initialization if the required library is not found, and not on load-time (https://github.com/ansible-collections/community.general/pull/1297)." -- "memcached cache plugin - make sure that plugin errors out on initialization if the required library is not found, and not on load-time (https://github.com/ansible-collections/community.general/pull/1297)." -- "gcp_storage_files lookup plugin - make sure that plugin errors out on initialization if the required library is not found, and not on load-time (https://github.com/ansible-collections/community.general/pull/1297)." diff --git a/changelogs/fragments/fix_parsing_array_values_in_osx_defaults.yml b/changelogs/fragments/fix_parsing_array_values_in_osx_defaults.yml deleted file mode 100644 index aa6788df85..0000000000 --- a/changelogs/fragments/fix_parsing_array_values_in_osx_defaults.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - osx_defaults - unquote values and unescape double quotes when reading array values (https://github.com/ansible-collections/community.general/pull/358). diff --git a/changelogs/fragments/galaxy-yml.yml b/changelogs/fragments/galaxy-yml.yml deleted file mode 100644 index 4ba047e95f..0000000000 --- a/changelogs/fragments/galaxy-yml.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- "The collection dependencies were adjusted so that ``community.kubernetes`` is required to be of version 1.0.0 or newer (https://github.com/ansible-collections/community.general/pull/774)." diff --git a/changelogs/fragments/gluster-deprecation.yaml b/changelogs/fragments/gluster-deprecation.yaml deleted file mode 100644 index b8f54e0e00..0000000000 --- a/changelogs/fragments/gluster-deprecation.yaml +++ /dev/null @@ -1,2 +0,0 @@ -deprecated_features: -- The ``gluster_heal_info``, ``gluster_peer`` and ``gluster_volume`` modules have migrated to the `gluster.gluster `_ collection. Ansible-base 2.10.1 adjusted the routing target to point to the modules in that collection, so we will remove these modules in community.general 3.0.0. If you use Ansible 2.9, or use FQCNs ``community.general.gluster_*`` in your playbooks and/or roles, please update them to use the modules from ``gluster.gluster`` instead. diff --git a/changelogs/fragments/hashi_vault-migration-removal.yml b/changelogs/fragments/hashi_vault-migration-removal.yml deleted file mode 100644 index f0ccd52ee1..0000000000 --- a/changelogs/fragments/hashi_vault-migration-removal.yml +++ /dev/null @@ -1,14 +0,0 @@ -removed_features: -- | - The ``hashi_vault`` lookup plugin has been removed from this collection. - It has been migrated to the `community.hashi_vault `_ collection. - If you use ansible-base 2.10 or newer, redirections have been provided. - - If you use Ansible 2.9 and installed this collection, you need to adjust the FQCNs (``community.general.hashi_vault`` → ``community.hashi_vault.hashi_vault``) and make sure to install the community.hashi_vault collection. -breaking_changes: -- | - If you use Ansible 2.9 and the ``hashi_vault`` lookup plugin from this collections, community.general 2.0.0 results in errors when trying to use the Hashi Vault content by FQCN, like ``community.general.hashi_vault``. - Since Ansible 2.9 is not able to use redirections, you will have to adjust your inventories, variable files, playbooks and roles manually to use the new FQCN (``community.hashi_vault.hashi_vault``) and to make sure that you have ``community.hashi_vault`` installed. - - If you use ansible-base 2.10 or newer and did not install Ansible 3.0.0, but installed (and/or upgraded) community.general manually, you need to make sure to also install ``community.hashi_vault`` if you are using the ``hashi_vault`` plugin. - While ansible-base 2.10 or newer can use the redirects that community.general 2.0.0 adds, the collection they point to (community.hashi_vault) must be installed for them to work. diff --git a/changelogs/fragments/hetzner-migration-removal.yml b/changelogs/fragments/hetzner-migration-removal.yml deleted file mode 100644 index 23518ffabb..0000000000 --- a/changelogs/fragments/hetzner-migration-removal.yml +++ /dev/null @@ -1,14 +0,0 @@ -removed_features: -- | - All ``hetzner`` modules have been removed from this collection. - They have been migrated to the `community.hrobot `_ collection. - If you use ansible-base 2.10 or newer, redirections have been provided. - - If you use Ansible 2.9 and installed this collection, you need to adjust the FQCNs (``community.general.hetzner_firewall`` → ``community.hrobot.firewall``) and make sure to install the community.hrobot collection. -breaking_changes: -- | - If you use Ansible 2.9 and the ``hetzner`` modules from this collections, community.general 2.0.0 results in errors when trying to use the hetzner content by FQCN, like ``community.general.hetzner_firewall``. - Since Ansible 2.9 is not able to use redirections, you will have to adjust your playbooks and roles manually to use the new FQCNs (``community.hrobot.firewall`` for the previous example) and to make sure that you have ``community.hrobot`` installed. - - If you use ansible-base 2.10 or newer and did not install Ansible 3.0.0, but installed (and/or upgraded) community.general manually, you need to make sure to also install ``community.hrobot`` if you are using any of the ``hetzner`` modules. - While ansible-base 2.10 or newer can use the redirects that community.general 2.0.0 adds, the collection they point to (community.hrobot) must be installed for them to work. diff --git a/changelogs/fragments/homebrew-cask-at-symbol-fix.yaml b/changelogs/fragments/homebrew-cask-at-symbol-fix.yaml deleted file mode 100644 index 2b7d51bc52..0000000000 --- a/changelogs/fragments/homebrew-cask-at-symbol-fix.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - homebrew_cask - fixed issue where a cask with ``@`` in the name is incorrectly reported as invalid (https://github.com/ansible-collections/community.general/issues/733). diff --git a/changelogs/fragments/infinidat-removal.yml b/changelogs/fragments/infinidat-removal.yml deleted file mode 100644 index eaafd3f00a..0000000000 --- a/changelogs/fragments/infinidat-removal.yml +++ /dev/null @@ -1,2 +0,0 @@ -removed_features: - - "infini_* - all infinidat modules have been moved to the ``infinidat.infinibox`` collection. A redirection is active, which will be removed in version 2.0.0 (https://github.com/ansible-collections/community.general/pull/607)." diff --git a/changelogs/fragments/jira_improvements.yaml b/changelogs/fragments/jira_improvements.yaml deleted file mode 100644 index 1ad843ab96..0000000000 --- a/changelogs/fragments/jira_improvements.yaml +++ /dev/null @@ -1,7 +0,0 @@ -bugfixes: - - jira - ``fetch`` and ``search`` no longer indicate that something changed (https://github.com/ansible-collections/community.general/pull/1536). - - jira - module no longer incorrectly reports change for information gathering operations (https://github.com/ansible-collections/community.general/pull/1536). - - jira - replaced custom parameter validation with ``required_if`` (https://github.com/ansible-collections/community.general/pull/1536). - - jira - ensured parameter ``issue`` is mandatory for operation ``transition`` (https://github.com/ansible-collections/community.general/pull/1536). -minor_changes: - - jira - added the traceback output to ``fail_json()`` calls deriving from exceptions (https://github.com/ansible-collections/community.general/pull/1536). diff --git a/changelogs/fragments/lldp-use-get_bin_path-to-locate-the-lldpctl-executable.yaml b/changelogs/fragments/lldp-use-get_bin_path-to-locate-the-lldpctl-executable.yaml deleted file mode 100644 index 4fefd3dd87..0000000000 --- a/changelogs/fragments/lldp-use-get_bin_path-to-locate-the-lldpctl-executable.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - lldp - use ``get_bin_path`` to locate the ``lldpctl`` executable (https://github.com/ansible-collections/community.general/pull/1643). diff --git a/changelogs/fragments/logicmonitor-removal.yml b/changelogs/fragments/logicmonitor-removal.yml deleted file mode 100644 index d068b5ac5f..0000000000 --- a/changelogs/fragments/logicmonitor-removal.yml +++ /dev/null @@ -1,3 +0,0 @@ -removed_features: - - "logicmonitor - the module has been removed in 1.0.0 since it is unmaintained and the API used by the module has been turned off in 2017 (https://github.com/ansible-collections/community.general/issues/539, https://github.com/ansible-collections/community.general/pull/541)." - - "logicmonitor_facts - the module has been removed in 1.0.0 since it is unmaintained and the API used by the module has been turned off in 2017 (https://github.com/ansible-collections/community.general/issues/539, https://github.com/ansible-collections/community.general/pull/541)." diff --git a/changelogs/fragments/lookup-passwordstore-umask.yml b/changelogs/fragments/lookup-passwordstore-umask.yml deleted file mode 100644 index 1d9f3b711a..0000000000 --- a/changelogs/fragments/lookup-passwordstore-umask.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -minor_changes: - - passwordstore lookup plugin - added ``umask`` option to set the desired file permisions on creation. This is - done via the ``PASSWORD_STORE_UMASK`` environment variable - (https://github.com/ansible-collections/community.general/pull/1156). diff --git a/changelogs/fragments/mysql.yml b/changelogs/fragments/mysql.yml deleted file mode 100644 index acfc578125..0000000000 --- a/changelogs/fragments/mysql.yml +++ /dev/null @@ -1,2 +0,0 @@ -removed_features: -- "mysql_* - all MySQL modules have been moved to the ``community.mysql`` collection. A redirection is active, which will be removed in version 2.0.0 (https://github.com/ansible-collections/community.general/pull/633)." diff --git a/changelogs/fragments/nios-fix-ib_spec.yaml b/changelogs/fragments/nios-fix-ib_spec.yaml deleted file mode 100644 index 06993d68b9..0000000000 --- a/changelogs/fragments/nios-fix-ib_spec.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - nios modules - clean up module argument spec processing (https://github.com/ansible-collections/community.general/pull/1598). diff --git a/changelogs/fragments/nios_host_record-fix-aliases-removal.yml b/changelogs/fragments/nios_host_record-fix-aliases-removal.yml deleted file mode 100644 index 3003c78429..0000000000 --- a/changelogs/fragments/nios_host_record-fix-aliases-removal.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - nios_host_record - fix to remove ``aliases`` (CNAMES) for configuration comparison (https://github.com/ansible-collections/community.general/issues/1335). diff --git a/changelogs/fragments/nmcli-refactor.yml b/changelogs/fragments/nmcli-refactor.yml deleted file mode 100644 index 86a504ac90..0000000000 --- a/changelogs/fragments/nmcli-refactor.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -bugfixes: - - nmcli - use consistent autoconnect parameters (https://github.com/ansible-collections/community.general/issues/459). - - nmcli - cannot modify ``ifname`` after connection creation (https://github.com/ansible-collections/community.general/issues/1089). -minor_changes: - - nmcli - refactor internal methods for simplicity and enhance reuse to support existing and future connection types (https://github.com/ansible-collections/community.general/pull/1113). - - nmcli - the ``dns4``, ``dns4_search``, ``dns6``, and ``dns6_search`` arguments are retained internally as lists (https://github.com/ansible-collections/community.general/pull/1113). - - nmcli - remove Python DBus and GTK Object library dependencies (https://github.com/ansible-collections/community.general/issues/1112). diff --git a/changelogs/fragments/oc-migration-removal.yml b/changelogs/fragments/oc-migration-removal.yml deleted file mode 100644 index ddbd5f058a..0000000000 --- a/changelogs/fragments/oc-migration-removal.yml +++ /dev/null @@ -1,14 +0,0 @@ -removed_features: -- | - The ``oc`` connection plugin has been removed from this collection. - It has been migrated to the `community.okd `_ collection. - If you use ansible-base 2.10 or newer, redirections have been provided. - - If you use Ansible 2.9 and installed this collection, you need to adjust the FQCNs (``community.general.oc`` → ``community.okd.oc``) and make sure to install the community.okd collection. -breaking_changes: -- | - If you use Ansible 2.9 and the ``oc`` connection plugin from this collections, community.general 2.0.0 results in errors when trying to use the oc content by FQCN, like ``community.general.oc``. - Since Ansible 2.9 is not able to use redirections, you will have to adjust your inventories, variable files, playbooks and roles manually to use the new FQCN (``community.okd.oc``) and to make sure that you have ``community.okd`` installed. - - If you use ansible-base 2.10 or newer and did not install Ansible 3.0.0, but installed (and/or upgraded) community.general manually, you need to make sure to also install ``community.okd`` if you are using the ``oc`` plugin. - While ansible-base 2.10 or newer can use the redirects that community.general 2.0.0 adds, the collection they point to (community.okd) must be installed for them to work. diff --git a/changelogs/fragments/odbc.yml b/changelogs/fragments/odbc.yml deleted file mode 100644 index 606a26f421..0000000000 --- a/changelogs/fragments/odbc.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- "odbc - added a parameter ``commit`` which allows users to disable the explicit commit after the execute call (https://github.com/ansible-collections/community.general/pull/1139)." diff --git a/changelogs/fragments/openbsd_pkg.yml b/changelogs/fragments/openbsd_pkg.yml deleted file mode 100644 index 2daeb3d2ec..0000000000 --- a/changelogs/fragments/openbsd_pkg.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - openbsd_pkg - added ``snapshot`` option (https://github.com/ansible-collections/community.general/pull/965). diff --git a/changelogs/fragments/parted_negative_numbers.yml b/changelogs/fragments/parted_negative_numbers.yml deleted file mode 100644 index 9a54a2c173..0000000000 --- a/changelogs/fragments/parted_negative_numbers.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "parted - accept negative numbers in ``part_start`` and ``part_end``" diff --git a/changelogs/fragments/pkgutil-check-mode-etc.yaml b/changelogs/fragments/pkgutil-check-mode-etc.yaml deleted file mode 100644 index 9c659ba110..0000000000 --- a/changelogs/fragments/pkgutil-check-mode-etc.yaml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: - - pkgutil - module now supports check mode (https://github.com/ansible-collections/community.general/pull/799). - - pkgutil - module can now accept a list of packages (https://github.com/ansible-collections/community.general/pull/799). - - pkgutil - module has a new option, ``force``, equivalent to the ``-f`` option to the `pkgutil `_ command (https://github.com/ansible-collections/community.general/pull/799). diff --git a/changelogs/fragments/porting-guide-2.yml b/changelogs/fragments/porting-guide-2.yml deleted file mode 100644 index 71fea312ee..0000000000 --- a/changelogs/fragments/porting-guide-2.yml +++ /dev/null @@ -1,2 +0,0 @@ -deprecated_features: - - The ldap_attr module has been deprecated and will be removed in a later release; use ldap_attrs instead. diff --git a/changelogs/fragments/postgresql-migration-removal.yml b/changelogs/fragments/postgresql-migration-removal.yml deleted file mode 100644 index 7a6f0b5c07..0000000000 --- a/changelogs/fragments/postgresql-migration-removal.yml +++ /dev/null @@ -1,14 +0,0 @@ -removed_features: -- | - All ``postgresql`` modules have been removed from this collection. - They have been migrated to the `community.postgresql `_ collection. - - If you use ansible-base 2.10 or newer, redirections have been provided. - If you use Ansible 2.9 and installed this collection, you need to adjust the FQCNs (``community.general.postgresql_info`` → ``community.postgresql.postgresql_info``) and make sure to install the community.postgresql collection. -breaking_changes: -- | - If you use Ansible 2.9 and the ``postgresql`` modules from this collections, community.general 2.0.0 results in errors when trying to use the postgresql content by FQCN, like ``community.general.postgresql_info``. - Since Ansible 2.9 is not able to use redirections, you will have to adjust your playbooks and roles manually to use the new FQCNs (``community.postgresql.postgresql_info`` for the previous example) and to make sure that you have ``community.postgresql`` installed. - - If you use ansible-base 2.10 or newer and did not install Ansible 3.0.0, but installed (and/or upgraded) community.general manually, you need to make sure to also install ``community.postgresql`` if you are using any of the ``postgresql`` modules. - While ansible-base 2.10 or newer can use the redirects that community.general 2.0.0 adds, the collection they point to (community.postgresql) must be installed for them to work. diff --git a/changelogs/fragments/proxmox_template-appliance-download.yml b/changelogs/fragments/proxmox_template-appliance-download.yml deleted file mode 100644 index cbeadad0e4..0000000000 --- a/changelogs/fragments/proxmox_template-appliance-download.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - proxmox_template - download proxmox applicance templates (pveam) (https://github.com/ansible-collections/community.general/pull/1046). diff --git a/changelogs/fragments/proxysql.yml b/changelogs/fragments/proxysql.yml deleted file mode 100644 index c1192b05a1..0000000000 --- a/changelogs/fragments/proxysql.yml +++ /dev/null @@ -1,2 +0,0 @@ -removed_features: -- "proxysql_* - all ProxySQL modules have been moved to the ``community.proxysql`` collection. A redirection is active, which will be removed in version 2.0.0 (https://github.com/ansible-collections/community.general/pull/624)." diff --git a/changelogs/fragments/remove-ansible.netcommon-dependency.yml b/changelogs/fragments/remove-ansible.netcommon-dependency.yml deleted file mode 100644 index b91cfa54fd..0000000000 --- a/changelogs/fragments/remove-ansible.netcommon-dependency.yml +++ /dev/null @@ -1,4 +0,0 @@ -major_changes: -- "The community.general collection no longer depends on the ansible.netcommon collection (https://github.com/ansible-collections/community.general/pull/1561)." -minor_changes: -- "nios_network - no longer requires the ansible.netcommon collection (https://github.com/ansible-collections/community.general/pull/1561)." diff --git a/changelogs/fragments/remove-ansible.posix-dependency.yml b/changelogs/fragments/remove-ansible.posix-dependency.yml deleted file mode 100644 index 089432c019..0000000000 --- a/changelogs/fragments/remove-ansible.posix-dependency.yml +++ /dev/null @@ -1,2 +0,0 @@ -major_changes: -- "The community.general collection no longer depends on the ansible.posix collection (https://github.com/ansible-collections/community.general/pull/1157)." diff --git a/changelogs/fragments/remove-deprecated-modules-2.yml b/changelogs/fragments/remove-deprecated-modules-2.yml deleted file mode 100644 index 488c2b35e3..0000000000 --- a/changelogs/fragments/remove-deprecated-modules-2.yml +++ /dev/null @@ -1,10 +0,0 @@ -removed_features: -- The deprecated ``gcdns_record`` module has been removed. Use ``google.cloud.gcp_dns_resource_record_set`` instead (https://github.com/ansible-collections/community.general/pull/1370). -- The deprecated ``gcdns_zone`` module has been removed. Use ``google.cloud.gcp_dns_managed_zone`` instead (https://github.com/ansible-collections/community.general/pull/1370). -- The deprecated ``gce`` module has been removed. Use ``google.cloud.gcp_compute_instance`` instead (https://github.com/ansible-collections/community.general/pull/1370). -- The deprecated ``gcp_backend_service`` module has been removed. Use ``google.cloud.gcp_compute_backend_service`` instead (https://github.com/ansible-collections/community.general/pull/1370). -- The deprecated ``gcp_forwarding_rule`` module has been removed. Use ``google.cloud.gcp_compute_forwarding_rule`` or ``google.cloud.gcp_compute_global_forwarding_rule`` instead (https://github.com/ansible-collections/community.general/pull/1370). -- The deprecated ``gcp_healthcheck`` module has been removed. Use ``google.cloud.gcp_compute_health_check``, ``google.cloud.gcp_compute_http_health_check`` or ``google.cloud.gcp_compute_https_health_check`` instead (https://github.com/ansible-collections/community.general/pull/1370). -- The deprecated ``gcp_target_proxy`` module has been removed. Use ``google.cloud.gcp_compute_target_http_proxy`` instead (https://github.com/ansible-collections/community.general/pull/1370). -- The deprecated ``gcp_url_map`` module has been removed. Use ``google.cloud.gcp_compute_url_map`` instead (https://github.com/ansible-collections/community.general/pull/1370). -- The deprecated ``gcspanner`` module has been removed. Use ``google.cloud.gcp_spanner_database`` and/or ``google.cloud.gcp_spanner_instance`` instead (https://github.com/ansible-collections/community.general/pull/1370). diff --git a/changelogs/fragments/remove-deprecated-modules.yml b/changelogs/fragments/remove-deprecated-modules.yml deleted file mode 100644 index ff62051735..0000000000 --- a/changelogs/fragments/remove-deprecated-modules.yml +++ /dev/null @@ -1,20 +0,0 @@ -removed_features: -- The deprecated ``foreman`` module has been removed. Use the modules from the theforeman.foreman collection instead (https://github.com/ansible-collections/community.general/pull/1347) (https://github.com/ansible-collections/community.general/pull/1347). -- The deprecated ``katello`` module has been removed. Use the modules from the theforeman.foreman collection instead (https://github.com/ansible-collections/community.general/pull/1347). -- The deprecated ``github_hooks`` module has been removed. Use ``community.general.github_webhook`` and ``community.general.github_webhook_info`` instead (https://github.com/ansible-collections/community.general/pull/1347). -- The deprecated ``na_cdot_aggregate`` module has been removed. Use netapp.ontap.na_ontap_aggregate instead (https://github.com/ansible-collections/community.general/pull/1347). -- The deprecated ``na_cdot_license`` module has been removed. Use netapp.ontap.na_ontap_license instead (https://github.com/ansible-collections/community.general/pull/1347). -- The deprecated ``na_cdot_lun`` module has been removed. Use netapp.ontap.na_ontap_lun instead (https://github.com/ansible-collections/community.general/pull/1347). -- The deprecated ``na_cdot_qtree`` module has been removed. Use netapp.ontap.na_ontap_qtree instead (https://github.com/ansible-collections/community.general/pull/1347). -- The deprecated ``na_cdot_svm`` module has been removed. Use netapp.ontap.na_ontap_svm instead (https://github.com/ansible-collections/community.general/pull/1347). -- The deprecated ``na_cdot_user`` module has been removed. Use netapp.ontap.na_ontap_user instead (https://github.com/ansible-collections/community.general/pull/1347). -- The deprecated ``na_cdot_user_role`` module has been removed. Use netapp.ontap.na_ontap_user_role instead (https://github.com/ansible-collections/community.general/pull/1347). -- The deprecated ``na_cdot_volume`` module has been removed. Use netapp.ontap.na_ontap_volume instead (https://github.com/ansible-collections/community.general/pull/1347). -- The deprecated ``sf_account_manager`` module has been removed. Use netapp.elementsw.na_elementsw_account instead (https://github.com/ansible-collections/community.general/pull/1347). -- The deprecated ``sf_check_connections`` module has been removed. Use netapp.elementsw.na_elementsw_check_connections instead (https://github.com/ansible-collections/community.general/pull/1347). -- The deprecated ``sf_snapshot_schedule_manager`` module has been removed. Use netapp.elementsw.na_elementsw_snapshot_schedule instead (https://github.com/ansible-collections/community.general/pull/1347). -- The deprecated ``sf_volume_access_group_manager`` module has been removed. Use netapp.elementsw.na_elementsw_access_group instead (https://github.com/ansible-collections/community.general/pull/1347). -- The deprecated ``sf_volume_manager`` module has been removed. Use netapp.elementsw.na_elementsw_volume instead (https://github.com/ansible-collections/community.general/pull/1347). -- The deprecated ``actionable`` callback plugin has been removed. Use the ``ansible.builtin.default`` callback plugin with ``display_skipped_hosts = no`` and ``display_ok_hosts = no`` options instead (https://github.com/ansible-collections/community.general/pull/1347). -- The deprecated ``full_skip`` callback plugin has been removed. Use the ``ansible.builtin.default`` callback plugin with ``display_skipped_hosts = no`` option instead (https://github.com/ansible-collections/community.general/pull/1347). -- The deprecated ``stderr`` callback plugin has been removed. Use the ``ansible.builtin.default`` callback plugin with ``display_failed_stderr = yes`` option instead (https://github.com/ansible-collections/community.general/pull/1347). diff --git a/changelogs/fragments/remove-deprecated-redirects.yml b/changelogs/fragments/remove-deprecated-redirects.yml deleted file mode 100644 index f33c09201c..0000000000 --- a/changelogs/fragments/remove-deprecated-redirects.yml +++ /dev/null @@ -1,7 +0,0 @@ -removed_features: -- The redirect of the ``conjur_variable`` lookup plugin to ``cyberark.conjur.conjur_variable`` collection was removed (https://github.com/ansible-collections/community.general/pull/1346). -- The redirect of the ``firewalld`` module and the ``firewalld`` module_utils to the ``ansible.posix`` collection was removed (https://github.com/ansible-collections/community.general/pull/1346). -- "The redirect to the ``community.proxysql`` collection was removed for: the ``proxysql`` doc fragment, and the following modules: ``proxysql_backend_servers``, ``proxysql_global_variables``, ``proxysql_manage_config``, ``proxysql_mysql_users``, ``proxysql_query_rules``, ``proxysql_replication_hostgroups``, ``proxysql_scheduler`` (https://github.com/ansible-collections/community.general/pull/1346)." -- "The redirect to the ``community.mysql`` collection was removed for: the ``mysql`` doc fragment, the ``mysql`` module_utils, and the following modules: ``mysql_db``, ``mysql_info``, ``mysql_query``, ``mysql_replication``, ``mysql_user``, ``mysql_variables`` (https://github.com/ansible-collections/community.general/pull/1346)." -- "The redirect to the ``infinidat.infinibox`` collection was removed for: the ``infinibox`` doc fragment, the ``infinibox`` module_utils, and the following modules: ``infini_export``, ``infini_export_client``, ``infini_fs``, ``infini_host``, ``infini_pool``, ``infini_vol`` (https://github.com/ansible-collections/community.general/pull/1346)." -- "The redirect to the ``community.digitalocean`` collection was removed for: the ``digital_ocean`` doc fragment, the ``digital_ocean`` module_utils, and the following modules: ``digital_ocean``, ``digital_ocean_account_facts``, ``digital_ocean_account_info``, ``digital_ocean_block_storage``, ``digital_ocean_certificate``, ``digital_ocean_certificate_facts``, ``digital_ocean_certificate_info``, ``digital_ocean_domain``, ``digital_ocean_domain_facts``, ``digital_ocean_domain_info``, ``digital_ocean_droplet``, ``digital_ocean_firewall_facts``, ``digital_ocean_firewall_info``, ``digital_ocean_floating_ip``, ``digital_ocean_floating_ip_facts``, ``digital_ocean_floating_ip_info``, ``digital_ocean_image_facts``, ``digital_ocean_image_info``, ``digital_ocean_load_balancer_facts``, ``digital_ocean_load_balancer_info``, ``digital_ocean_region_facts``, ``digital_ocean_region_info``, ``digital_ocean_size_facts``, ``digital_ocean_size_info``, ``digital_ocean_snapshot_facts``, ``digital_ocean_snapshot_info``, ``digital_ocean_sshkey``, ``digital_ocean_sshkey_facts``, ``digital_ocean_sshkey_info``, ``digital_ocean_tag``, ``digital_ocean_tag_facts``, ``digital_ocean_tag_info``, ``digital_ocean_volume_facts``, ``digital_ocean_volume_info`` (https://github.com/ansible-collections/community.general/pull/1346)." diff --git a/changelogs/fragments/snmp_facts.yml b/changelogs/fragments/snmp_facts.yml deleted file mode 100644 index 09c5164b5e..0000000000 --- a/changelogs/fragments/snmp_facts.yml +++ /dev/null @@ -1,2 +0,0 @@ -security_fixes: -- 'snmp_facts - **CVE-2021-20178** - hide user sensitive information such as ``privkey`` and ``authkey`` from logging into the console (https://github.com/ansible-collections/community.general/pull/1621).' diff --git a/changelogs/fragments/telegram-api-update.yml b/changelogs/fragments/telegram-api-update.yml deleted file mode 100644 index c6422cf5f5..0000000000 --- a/changelogs/fragments/telegram-api-update.yml +++ /dev/null @@ -1,5 +0,0 @@ -minor_changes: - - telegram - now can call any methods in Telegram bot API. - Previously this module was hardcoded to use "SendMessage" only. - Usage of "SendMessage" API method was also librated, - and now you can specify any arguments you need, for example, "disable_notificaton" (https://github.com/ansible-collections/community.general/pull/1642). diff --git a/changelogs/fragments/xfconf_add_uint_type.yml b/changelogs/fragments/xfconf_add_uint_type.yml deleted file mode 100644 index 7022a3404b..0000000000 --- a/changelogs/fragments/xfconf_add_uint_type.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - xfconf - add support for ``uint`` type (https://github.com/ansible-collections/community.general/pull/696). diff --git a/changelogs/fragments/xml-remove-changed.yml b/changelogs/fragments/xml-remove-changed.yml deleted file mode 100644 index f1fc50b32c..0000000000 --- a/changelogs/fragments/xml-remove-changed.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - xml - fixed issue were changed was returned when removing non-existent xpath (https://github.com/ansible-collections/community.general/pull/1007). diff --git a/changelogs/fragments/zfs-root-snapshot.yml b/changelogs/fragments/zfs-root-snapshot.yml deleted file mode 100644 index 13b5e8ce0c..0000000000 --- a/changelogs/fragments/zfs-root-snapshot.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - zfs - fixed ``invalid character '@' in pool name"`` error when working with snapshots on a root zvol (https://github.com/ansible-collections/community.general/issues/932). From 00f5f7dfe73b75d781b4b26e8493749bc64612e8 Mon Sep 17 00:00:00 2001 From: Bill Dodd Date: Fri, 29 Jan 2021 00:17:57 -0600 Subject: [PATCH 0003/3093] Add Name and/or Id properties to resource inventory output (#1691) * add Name and/or Id properties to resource inventory output * add changelog fragment --- ...-and-id-props-to-redfish-inventory-output.yml | 2 ++ plugins/module_utils/redfish_utils.py | 16 ++++++++-------- 2 files changed, 10 insertions(+), 8 deletions(-) create mode 100644 changelogs/fragments/1691-add-name-and-id-props-to-redfish-inventory-output.yml diff --git a/changelogs/fragments/1691-add-name-and-id-props-to-redfish-inventory-output.yml b/changelogs/fragments/1691-add-name-and-id-props-to-redfish-inventory-output.yml new file mode 100644 index 0000000000..1cf8897018 --- /dev/null +++ b/changelogs/fragments/1691-add-name-and-id-props-to-redfish-inventory-output.yml @@ -0,0 +1,2 @@ +bugfixes: + - redfish_info module, redfish_utils module utils - add ``Name`` and ``Id`` properties to output of Redfish inventory commands (https://github.com/ansible-collections/community.general/issues/1650). diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index 79ef7213b9..8f14dbad78 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -469,7 +469,7 @@ class RedfishUtils(object): controller_results = [] # Get these entries, but does not fail if not found properties = ['CacheSummary', 'FirmwareVersion', 'Identifiers', - 'Location', 'Manufacturer', 'Model', 'Name', + 'Location', 'Manufacturer', 'Model', 'Name', 'Id', 'PartNumber', 'SerialNumber', 'SpeedGbps', 'Status'] key = "StorageControllers" @@ -1700,7 +1700,7 @@ class RedfishUtils(object): chassis_results = [] # Get these entries, but does not fail if not found - properties = ['ChassisType', 'PartNumber', 'AssetTag', + properties = ['Name', 'Id', 'ChassisType', 'PartNumber', 'AssetTag', 'Manufacturer', 'IndicatorLED', 'SerialNumber', 'Model'] # Go through list @@ -1724,7 +1724,7 @@ class RedfishUtils(object): fan_results = [] key = "Thermal" # Get these entries, but does not fail if not found - properties = ['FanName', 'Reading', 'ReadingUnits', 'Status'] + properties = ['Name', 'FanName', 'Reading', 'ReadingUnits', 'Status'] # Go through list for chassis_uri in self.chassis_uris: @@ -1836,8 +1836,8 @@ class RedfishUtils(object): cpu_results = [] key = "Processors" # Get these entries, but does not fail if not found - properties = ['Id', 'Manufacturer', 'Model', 'MaxSpeedMHz', 'TotalCores', - 'TotalThreads', 'Status'] + properties = ['Id', 'Name', 'Manufacturer', 'Model', 'MaxSpeedMHz', + 'TotalCores', 'TotalThreads', 'Status'] # Search for 'key' entry and extract URI from it response = self.get_request(self.root_uri + systems_uri) @@ -1886,7 +1886,7 @@ class RedfishUtils(object): memory_results = [] key = "Memory" # Get these entries, but does not fail if not found - properties = ['SerialNumber', 'MemoryDeviceType', 'PartNumber', + properties = ['Id', 'SerialNumber', 'MemoryDeviceType', 'PartNumber', 'MemoryLocation', 'RankCount', 'CapacityMiB', 'OperatingMemoryModes', 'Status', 'Manufacturer', 'Name'] # Search for 'key' entry and extract URI from it @@ -1943,7 +1943,7 @@ class RedfishUtils(object): nic_results = [] key = "EthernetInterfaces" # Get these entries, but does not fail if not found - properties = ['Description', 'FQDN', 'IPv4Addresses', 'IPv6Addresses', + properties = ['Name', 'Id', 'Description', 'FQDN', 'IPv4Addresses', 'IPv6Addresses', 'NameServers', 'MACAddress', 'PermanentMACAddress', 'SpeedMbps', 'MTUSize', 'AutoNeg', 'Status'] @@ -2368,7 +2368,7 @@ class RedfishUtils(object): properties = ['Status', 'HostName', 'PowerState', 'Model', 'Manufacturer', 'PartNumber', 'SystemType', 'AssetTag', 'ServiceTag', 'SerialNumber', 'SKU', 'BiosVersion', 'MemorySummary', - 'ProcessorSummary', 'TrustedModules'] + 'ProcessorSummary', 'TrustedModules', 'Name', 'Id'] response = self.get_request(self.root_uri + systems_uri) if response['ret'] is False: From 9c648c8e3acedf0881c9fbeb08b353230457c945 Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Fri, 29 Jan 2021 12:45:06 +0300 Subject: [PATCH 0004/3093] BOTMETA.yml: add logstash plugin maintainer (#1700) Co-authored-by: Andrew Klychkov --- .github/BOTMETA.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 263f462c84..f5bad2bbb6 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -15,6 +15,8 @@ files: labels: become $callbacks/: labels: callbacks + $callbacks/logstash.py: + maintainers: ujenmr $callbacks/say.py: notify: chris-short maintainers: $team_macos From d0f097c87191232106ce588f0ef73c5076c31095 Mon Sep 17 00:00:00 2001 From: Abhijeet Kasurde Date: Mon, 1 Feb 2021 13:12:32 +0530 Subject: [PATCH 0005/3093] homebrew_tap: Add support for brew search path (#1708) * homebrew_tap: Add support for brew search path User can specify search path for brew executable. Fixes: #1702 Signed-off-by: Abhijeet Kasurde * Change version Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- changelogs/fragments/1702_homebrew_tap.yml | 2 ++ plugins/modules/packaging/os/homebrew_tap.py | 21 +++++++++++++++++--- 2 files changed, 20 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/1702_homebrew_tap.yml diff --git a/changelogs/fragments/1702_homebrew_tap.yml b/changelogs/fragments/1702_homebrew_tap.yml new file mode 100644 index 0000000000..7eabc45a9b --- /dev/null +++ b/changelogs/fragments/1702_homebrew_tap.yml @@ -0,0 +1,2 @@ +minor_changes: +- homebrew_tap - add support to specify search path for ``brew`` executable (https://github.com/ansible-collections/community.general/issues/1702). diff --git a/plugins/modules/packaging/os/homebrew_tap.py b/plugins/modules/packaging/os/homebrew_tap.py index 99cff69b00..6b30fdb68f 100644 --- a/plugins/modules/packaging/os/homebrew_tap.py +++ b/plugins/modules/packaging/os/homebrew_tap.py @@ -45,6 +45,12 @@ options: required: false default: 'present' type: str + path: + description: + - "A ':' separated list of paths to search for C(brew) executable." + default: '/usr/local/bin:/opt/homebrew/bin' + type: path + version_added: '2.1.0' requirements: [ homebrew ] ''' @@ -127,7 +133,7 @@ def add_tap(module, brew_path, tap, url=None): def add_taps(module, brew_path, taps): '''Adds one or more taps.''' - failed, unchanged, added, msg = False, 0, 0, '' + failed, changed, unchanged, added, msg = False, False, 0, 0, '' for tap in taps: (failed, changed, msg) = add_tap(module, brew_path, tap) @@ -182,7 +188,7 @@ def remove_tap(module, brew_path, tap): def remove_taps(module, brew_path, taps): '''Removes one or more taps.''' - failed, unchanged, removed, msg = False, 0, 0, '' + failed, changed, unchanged, removed, msg = False, False, 0, 0, '' for tap in taps: (failed, changed, msg) = remove_tap(module, brew_path, tap) @@ -211,14 +217,23 @@ def main(): name=dict(aliases=['tap'], type='list', required=True, elements='str'), url=dict(default=None, required=False), state=dict(default='present', choices=['present', 'absent']), + path=dict( + default="/usr/local/bin:/opt/homebrew/bin", + required=False, + type='path', + ), ), supports_check_mode=True, ) + path = module.params['path'] + if path: + path = path.split(':') + brew_path = module.get_bin_path( 'brew', required=True, - opt_dirs=['/usr/local/bin', '/opt/homebrew/bin'] + opt_dirs=path, ) taps = module.params['name'] From 6af3c96d8e8c5c710d23072e83b3dfb3fdaeef89 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 1 Feb 2021 16:30:32 +0100 Subject: [PATCH 0006/3093] Fedora 30 and 31 are EOL and will eventually be removed from devel. (#1705) --- .azure-pipelines/azure-pipelines.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index 432d1afa04..055b57ca7e 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -206,10 +206,10 @@ stages: test: centos7 - name: CentOS 8 test: centos8 - - name: Fedora 31 - test: fedora31 - name: Fedora 32 test: fedora32 + - name: Fedora 33 + test: fedora33 - name: openSUSE 15 py2 test: opensuse15py2 - name: openSUSE 15 py3 From 8afdd23be4266d511e4247bdc28a761e0253dc29 Mon Sep 17 00:00:00 2001 From: Rajeev Arakkal <36444805+rajeevarakkal@users.noreply.github.com> Date: Mon, 1 Feb 2021 09:06:58 -0800 Subject: [PATCH 0007/3093] Migrating DellEMC collections from community.general collections (#1699) * migration modification for dellemc collections * removing dellemc collections * Update changelogs/fragments/948-dellemc-migration-removal.yml Co-authored-by: Felix Fontein * Update 948-dellemc-migration-removal.yml * Update 948-dellemc-migration-removal.yml * Update runtime.yml * Update meta/runtime.yml Co-authored-by: Felix Fontein * Update runtime.yml * deleted symlink entries * Update 948-dellemc-migration-removal.yml * Update changelogs/fragments/948-dellemc-migration-removal.yml Co-authored-by: Felix Fontein * Update changelogs/fragments/948-dellemc-migration-removal.yml Co-authored-by: Felix Fontein --- .github/BOTMETA.yml | 7 - .../948-dellemc-migration-removal.yml | 13 + meta/runtime.yml | 10 + .../remote_management/dellemc/__init__.py | 0 .../dellemc/dellemc_idrac.py | 56 --- .../remote_management/dellemc/ome.py | 163 ------- plugins/modules/idrac_firmware.py | 1 - .../modules/idrac_server_config_profile.py | 1 - plugins/modules/ome_device_info.py | 1 - .../dellemc/idrac_firmware.py | 207 --------- .../dellemc/idrac_server_config_profile.py | 301 ------------- .../dellemc/ome_device_info.py | 413 ------------------ tests/sanity/ignore-2.10.txt | 3 - tests/sanity/ignore-2.11.txt | 3 - tests/sanity/ignore-2.9.txt | 2 - .../remote_management/dellemc/__init__.py | 0 .../remote_management/dellemc/test_ome.py | 79 ---- .../remote_management/dellemc/__init__.py | 0 .../dellemc/test_ome_device_info.py | 196 --------- 19 files changed, 23 insertions(+), 1433 deletions(-) create mode 100644 changelogs/fragments/948-dellemc-migration-removal.yml delete mode 100644 plugins/module_utils/remote_management/dellemc/__init__.py delete mode 100644 plugins/module_utils/remote_management/dellemc/dellemc_idrac.py delete mode 100644 plugins/module_utils/remote_management/dellemc/ome.py delete mode 120000 plugins/modules/idrac_firmware.py delete mode 120000 plugins/modules/idrac_server_config_profile.py delete mode 120000 plugins/modules/ome_device_info.py delete mode 100644 plugins/modules/remote_management/dellemc/idrac_firmware.py delete mode 100644 plugins/modules/remote_management/dellemc/idrac_server_config_profile.py delete mode 100644 plugins/modules/remote_management/dellemc/ome_device_info.py delete mode 100644 tests/unit/plugins/module_utils/remote_management/dellemc/__init__.py delete mode 100644 tests/unit/plugins/module_utils/remote_management/dellemc/test_ome.py delete mode 100644 tests/unit/plugins/modules/remote_management/dellemc/__init__.py delete mode 100644 tests/unit/plugins/modules/remote_management/dellemc/test_ome_device_info.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index f5bad2bbb6..00a27cd837 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -139,7 +139,6 @@ files: $module_utils/redfish_utils.py: maintainers: $team_redfish labels: redfish_utils - $module_utils/remote_management/dellemc/: rajeevarakkal $module_utils/remote_management/lxca/common.py: navalkp prabhosa $module_utils/scaleway.py: maintainers: $team_scaleway @@ -691,12 +690,6 @@ files: maintainers: matze $modules/remote_management/cobbler/: maintainers: dagwieers - $modules/remote_management/dellemc/: - maintainers: rajeevarakkal - $modules/remote_management/dellemc/idrac_server_config_profile.py: - maintainers: jagadeeshnv - $modules/remote_management/dellemc/ome_device_info.py: - maintainers: Sajna-Shetty $modules/remote_management/hpilo/: maintainers: haad ignore: dagwieers diff --git a/changelogs/fragments/948-dellemc-migration-removal.yml b/changelogs/fragments/948-dellemc-migration-removal.yml new file mode 100644 index 0000000000..c4f64a815f --- /dev/null +++ b/changelogs/fragments/948-dellemc-migration-removal.yml @@ -0,0 +1,13 @@ +removed_features: + - | + The ``ome_device_info``, ``idrac_firmware`` and ``idrac_server_config_profile`` modules have now been migrated from community.general to the `dellemc.openmanage `_ Ansible collection. + If you use ansible-base 2.10 or newer, redirections have been provided. + + If you use Ansible 2.9 and installed this collection, you need to adjust the FQCNs (``community.general.idrac_firmware`` → ``dellemc.openmanage.idrac_firmware``) and make sure to install the dellemc.openmanage collection. +breaking_changes: + - | + If you use Ansible 2.9 and these plugins or modules from this collection, community.general 3.0.0 results in errors when trying to use the DellEMC content by FQCN, like ``community.general.idrac_firmware``. + Since Ansible 2.9 is not able to use redirections, you will have to adjust your playbooks and roles manually to use the new FQCNs (``dellemc.openmanage.idrac_firmware`` for the previous example) and to make sure that you have ``dellemc.openmanage`` installed. + + If you use ansible-base 2.10 or newer and did not install Ansible 4.0.0, but installed (and/or upgraded) community.general manually, you need to make sure to also install the ``dellemc.openmanage`` collection if you are using any of these plugins or modules. + While ansible-base 2.10 or newer can use the redirects that community.general 3.0.0 adds, the collection they point to (such as dellemc.openmanage) must be installed for them to work. diff --git a/meta/runtime.yml b/meta/runtime.yml index 91b5869f69..1d599d7728 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -199,10 +199,14 @@ plugin_routing: deprecation: removal_version: 3.0.0 warning_text: see plugin documentation for details + idrac_firmware: + redirect: dellemc.openmanage.idrac_firmware idrac_redfish_facts: deprecation: removal_version: 3.0.0 warning_text: see plugin documentation for details + idrac_server_config_profile: + redirect: dellemc.openmanage.idrac_server_config_profile jenkins_job_facts: deprecation: removal_version: 3.0.0 @@ -283,6 +287,8 @@ plugin_routing: deprecation: removal_version: 3.0.0 warning_text: see plugin documentation for details + ome_device_info: + redirect: dellemc.openmanage.ome_device_info one_image_facts: deprecation: removal_version: 3.0.0 @@ -565,6 +571,8 @@ plugin_routing: postgresql: redirect: community.postgresql.postgresql module_utils: + remote_management.dellemc.dellemc_idrac: + redirect: dellemc.openmanage.dellemc_idrac docker.common: redirect: community.docker.common docker.swarm: @@ -579,6 +587,8 @@ plugin_routing: redirect: community.hrobot.robot kubevirt: redirect: community.kubevirt.kubevirt + remote_management.dellemc.ome: + redirect: dellemc.openmanage.ome postgresql: redirect: community.postgresql.postgresql callback: diff --git a/plugins/module_utils/remote_management/dellemc/__init__.py b/plugins/module_utils/remote_management/dellemc/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/module_utils/remote_management/dellemc/dellemc_idrac.py b/plugins/module_utils/remote_management/dellemc/dellemc_idrac.py deleted file mode 100644 index 93d3bfcb74..0000000000 --- a/plugins/module_utils/remote_management/dellemc/dellemc_idrac.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- - -# -# Dell EMC OpenManage Ansible Modules -# Version 1.0 -# Copyright (C) 2018 Dell Inc. - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# All rights reserved. Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries. -# Other trademarks may be trademarks of their respective owners. -# - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -try: - from omsdk.sdkinfra import sdkinfra - from omsdk.sdkcreds import UserCredentials - from omsdk.sdkfile import FileOnShare, file_share_manager - from omsdk.sdkprotopref import ProtoPreference, ProtocolEnum - from omsdk.http.sdkwsmanbase import WsManOptions - HAS_OMSDK = True -except ImportError: - HAS_OMSDK = False - - -class iDRACConnection: - - def __init__(self, module_params): - if not HAS_OMSDK: - raise ImportError("Dell EMC OMSDK library is required for this module") - self.idrac_ip = module_params['idrac_ip'] - self.idrac_user = module_params['idrac_user'] - self.idrac_pwd = module_params['idrac_password'] - self.idrac_port = module_params['idrac_port'] - if not all((self.idrac_ip, self.idrac_user, self.idrac_pwd)): - raise ValueError("hostname, username and password required") - self.handle = None - self.creds = UserCredentials(self.idrac_user, self.idrac_pwd) - self.pOp = WsManOptions(port=self.idrac_port) - self.sdk = sdkinfra() - if self.sdk is None: - msg = "Could not initialize iDRAC drivers." - raise RuntimeError(msg) - - def __enter__(self): - self.sdk.importPath() - self.handle = self.sdk.get_driver(self.sdk.driver_enum.iDRAC, self.idrac_ip, self.creds, pOptions=self.pOp) - if self.handle is None: - msg = "Could not find device driver for iDRAC with IP Address: {0}".format(self.idrac_ip) - raise RuntimeError(msg) - return self.handle - - def __exit__(self, exc_type, exc_val, exc_tb): - self.handle.disconnect() - return False diff --git a/plugins/module_utils/remote_management/dellemc/ome.py b/plugins/module_utils/remote_management/dellemc/ome.py deleted file mode 100644 index 9d02e55004..0000000000 --- a/plugins/module_utils/remote_management/dellemc/ome.py +++ /dev/null @@ -1,163 +0,0 @@ -# -*- coding: utf-8 -*- - -# Dell EMC OpenManage Ansible Modules -# Version 1.3 -# Copyright (C) 2019 Dell Inc. or its subsidiaries. All Rights Reserved. -# -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import json -from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError -from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError -from ansible.module_utils.six.moves.urllib.parse import urlencode - -SESSION_RESOURCE_COLLECTION = { - "SESSION": "SessionService/Sessions", - "SESSION_ID": "SessionService/Sessions('{Id}')", -} - - -class OpenURLResponse(object): - """Handles HTTPResponse""" - - def __init__(self, resp): - self.body = None - self.resp = resp - if self.resp: - self.body = self.resp.read() - - @property - def json_data(self): - try: - return json.loads(self.body) - except ValueError: - raise ValueError("Unable to parse json") - - @property - def status_code(self): - return self.resp.getcode() - - @property - def success(self): - return self.status_code in (200, 201, 202, 204) - - @property - def token_header(self): - return self.resp.headers.get('X-Auth-Token') - - -class RestOME(object): - """Handles OME API requests""" - - def __init__(self, module_params=None, req_session=False): - self.module_params = module_params - self.hostname = self.module_params["hostname"] - self.username = self.module_params["username"] - self.password = self.module_params["password"] - self.port = self.module_params["port"] - self.req_session = req_session - self.session_id = None - self.protocol = 'https' - self._headers = {'Content-Type': 'application/json', 'Accept': 'application/json'} - - def _get_base_url(self): - """builds base url""" - return '{0}://{1}:{2}/api'.format(self.protocol, self.hostname, self.port) - - def _build_url(self, path, query_param=None): - """builds complete url""" - url = path - base_uri = self._get_base_url() - if path: - url = '{0}/{1}'.format(base_uri, path) - if query_param: - url += "?{0}".format(urlencode(query_param)) - return url - - def _url_common_args_spec(self, method, api_timeout, headers=None): - """Creates an argument common spec""" - req_header = self._headers - if headers: - req_header.update(headers) - url_kwargs = { - "method": method, - "validate_certs": False, - "use_proxy": True, - "headers": req_header, - "timeout": api_timeout, - "follow_redirects": 'all', - } - return url_kwargs - - def _args_without_session(self, method, api_timeout=30, headers=None): - """Creates an argument spec in case of basic authentication""" - req_header = self._headers - if headers: - req_header.update(headers) - url_kwargs = self._url_common_args_spec(method, api_timeout, headers=headers) - url_kwargs["url_username"] = self.username - url_kwargs["url_password"] = self.password - url_kwargs["force_basic_auth"] = True - return url_kwargs - - def _args_with_session(self, method, api_timeout=30, headers=None): - """Creates an argument spec, in case of authentication with session""" - url_kwargs = self._url_common_args_spec(method, api_timeout, headers=headers) - url_kwargs["force_basic_auth"] = False - return url_kwargs - - def invoke_request(self, method, path, data=None, query_param=None, headers=None, - api_timeout=30, dump=True): - """ - Sends a request via open_url - Returns :class:`OpenURLResponse` object. - :arg method: HTTP verb to use for the request - :arg path: path to request without query parameter - :arg data: (optional) Payload to send with the request - :arg query_param: (optional) Dictionary of query parameter to send with request - :arg headers: (optional) Dictionary of HTTP Headers to send with the - request - :arg api_timeout: (optional) How long to wait for the server to send - data before giving up - :arg dump: (Optional) boolean value for dumping payload data. - :returns: OpenURLResponse - """ - try: - if 'X-Auth-Token' in self._headers: - url_kwargs = self._args_with_session(method, api_timeout, headers=headers) - else: - url_kwargs = self._args_without_session(method, api_timeout, headers=headers) - if data and dump: - data = json.dumps(data) - url = self._build_url(path, query_param=query_param) - resp = open_url(url, data=data, **url_kwargs) - resp_data = OpenURLResponse(resp) - except (HTTPError, URLError, SSLValidationError, ConnectionError) as err: - raise err - return resp_data - - def __enter__(self): - """Creates sessions by passing it to header""" - if self.req_session: - payload = {'UserName': self.username, - 'Password': self.password, - 'SessionType': 'API', } - path = SESSION_RESOURCE_COLLECTION["SESSION"] - resp = self.invoke_request('POST', path, data=payload) - if resp and resp.success: - self.session_id = resp.json_data.get("Id") - self._headers["X-Auth-Token"] = resp.token_header - else: - msg = "Could not create the session" - raise ConnectionError(msg) - return self - - def __exit__(self, exc_type, exc_value, traceback): - """Deletes a session id, which is in use for request""" - if self.session_id: - path = SESSION_RESOURCE_COLLECTION["SESSION_ID"].format(Id=self.session_id) - self.invoke_request('DELETE', path) - return False diff --git a/plugins/modules/idrac_firmware.py b/plugins/modules/idrac_firmware.py deleted file mode 120000 index cb7e8da471..0000000000 --- a/plugins/modules/idrac_firmware.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/dellemc/idrac_firmware.py \ No newline at end of file diff --git a/plugins/modules/idrac_server_config_profile.py b/plugins/modules/idrac_server_config_profile.py deleted file mode 120000 index ff98a9d1aa..0000000000 --- a/plugins/modules/idrac_server_config_profile.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/dellemc/idrac_server_config_profile.py \ No newline at end of file diff --git a/plugins/modules/ome_device_info.py b/plugins/modules/ome_device_info.py deleted file mode 120000 index ccca666861..0000000000 --- a/plugins/modules/ome_device_info.py +++ /dev/null @@ -1 +0,0 @@ -./remote_management/dellemc/ome_device_info.py \ No newline at end of file diff --git a/plugins/modules/remote_management/dellemc/idrac_firmware.py b/plugins/modules/remote_management/dellemc/idrac_firmware.py deleted file mode 100644 index fa8ac66ce5..0000000000 --- a/plugins/modules/remote_management/dellemc/idrac_firmware.py +++ /dev/null @@ -1,207 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# -# Dell EMC OpenManage Ansible Modules -# Version 2.0 -# Copyright (C) 2018-2019 Dell Inc. or its subsidiaries. All Rights Reserved. - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# - - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: idrac_firmware -short_description: Firmware update from a repository on a network share (CIFS, NFS). -description: - - Update the Firmware by connecting to a network share (either CIFS or NFS) that contains a catalog of - available updates. - - Network share should contain a valid repository of Update Packages (DUPs) and a catalog file describing the DUPs. - - All applicable updates contained in the repository are applied to the system. - - This feature is available only with iDRAC Enterprise License. -options: - idrac_ip: - description: iDRAC IP Address. - type: str - required: True - idrac_user: - description: iDRAC username. - type: str - required: True - idrac_password: - description: iDRAC user password. - type: str - required: True - aliases: ['idrac_pwd'] - idrac_port: - description: iDRAC port. - type: int - default: 443 - share_name: - description: CIFS or NFS Network share. - type: str - required: True - share_user: - description: Network share user in the format 'user@domain' or 'domain\\user' if user is - part of a domain else 'user'. This option is mandatory for CIFS Network Share. - type: str - share_password: - description: Network share user password. This option is mandatory for CIFS Network Share. - type: str - aliases: ['share_pwd'] - share_mnt: - description: Local mount path of the network share with read-write permission for ansible user. - This option is mandatory for Network Share. - type: str - required: True - reboot: - description: Whether to reboots after applying the updates or not. - type: bool - default: false - job_wait: - description: Whether to wait for job completion or not. - type: bool - default: true - catalog_file_name: - required: False - description: Catalog file name relative to the I(share_name). - type: str - default: 'Catalog.xml' - -requirements: - - "omsdk" - - "python >= 2.7.5" -author: "Rajeev Arakkal (@rajeevarakkal)" -''' - -EXAMPLES = """ ---- -- name: Update firmware from repository on a Network Share - community.general.idrac_firmware: - idrac_ip: "192.168.0.1" - idrac_user: "user_name" - idrac_password: "user_password" - share_name: "192.168.0.0:/share" - share_user: "share_user_name" - share_password: "share_user_pwd" - share_mnt: "/mnt/share" - reboot: True - job_wait: True - catalog_file_name: "Catalog.xml" -""" - -RETURN = """ ---- -msg: - type: str - description: Over all firmware update status. - returned: always - sample: "Successfully updated the firmware." -update_status: - type: dict - description: Firmware Update job and progress details from the iDRAC. - returned: success - sample: { - 'InstanceID': 'JID_XXXXXXXXXXXX', - 'JobState': 'Completed', - 'Message': 'Job completed successfully.', - 'MessageId': 'REDXXX', - 'Name': 'Repository Update', - 'JobStartTime': 'NA', - 'Status': 'Success', - } -""" - - -from ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.dellemc_idrac import iDRACConnection -from ansible.module_utils.basic import AnsibleModule -try: - from omsdk.sdkcreds import UserCredentials - from omsdk.sdkfile import FileOnShare - HAS_OMSDK = True -except ImportError: - HAS_OMSDK = False - - -def _validate_catalog_file(catalog_file_name): - normilized_file_name = catalog_file_name.lower() - if not normilized_file_name: - raise ValueError('catalog_file_name should be a non-empty string.') - elif not normilized_file_name.endswith("xml"): - raise ValueError('catalog_file_name should be an XML file.') - - -def update_firmware(idrac, module): - """Update firmware from a network share and return the job details.""" - msg = {} - msg['changed'] = False - msg['update_status'] = {} - - try: - upd_share = FileOnShare(remote=module.params['share_name'] + "/" + module.params['catalog_file_name'], - mount_point=module.params['share_mnt'], - isFolder=False, - creds=UserCredentials( - module.params['share_user'], - module.params['share_password']) - ) - - idrac.use_redfish = True - if '12' in idrac.ServerGeneration or '13' in idrac.ServerGeneration: - idrac.use_redfish = False - - apply_update = True - msg['update_status'] = idrac.update_mgr.update_from_repo(upd_share, - apply_update, - module.params['reboot'], - module.params['job_wait']) - except RuntimeError as e: - module.fail_json(msg=str(e)) - - if "Status" in msg['update_status']: - if msg['update_status']['Status'] == "Success": - if module.params['job_wait']: - msg['changed'] = True - else: - module.fail_json(msg='Failed to update firmware.', update_status=msg['update_status']) - return msg - - -def main(): - module = AnsibleModule( - argument_spec={ - "idrac_ip": {"required": True, "type": 'str'}, - "idrac_user": {"required": True, "type": 'str'}, - "idrac_password": {"required": True, "type": 'str', "aliases": ['idrac_pwd'], "no_log": True}, - "idrac_port": {"required": False, "default": 443, "type": 'int'}, - - "share_name": {"required": True, "type": 'str'}, - "share_user": {"required": False, "type": 'str'}, - "share_password": {"required": False, "type": 'str', "aliases": ['share_pwd'], "no_log": True}, - "share_mnt": {"required": True, "type": 'str'}, - - "catalog_file_name": {"required": False, "type": 'str', "default": "Catalog.xml"}, - "reboot": {"required": False, "type": 'bool', "default": False}, - "job_wait": {"required": False, "type": 'bool', "default": True}, - }, - - supports_check_mode=False) - - try: - # Validate the catalog file - _validate_catalog_file(module.params['catalog_file_name']) - # Connect to iDRAC and update firmware - with iDRACConnection(module.params) as idrac: - update_status = update_firmware(idrac, module) - except (ImportError, ValueError, RuntimeError) as e: - module.fail_json(msg=str(e)) - - module.exit_json(msg='Successfully updated the firmware.', update_status=update_status) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/dellemc/idrac_server_config_profile.py b/plugins/modules/remote_management/dellemc/idrac_server_config_profile.py deleted file mode 100644 index 39857fd30a..0000000000 --- a/plugins/modules/remote_management/dellemc/idrac_server_config_profile.py +++ /dev/null @@ -1,301 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# -# Dell EMC OpenManage Ansible Modules -# Version 2.0 -# Copyright (C) 2019 Dell Inc. or its subsidiaries. All Rights Reserved. - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# - - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: idrac_server_config_profile -short_description: Export or Import iDRAC Server Configuration Profile (SCP). -description: - - Export the Server Configuration Profile (SCP) from the iDRAC or Import from a network share or a local file. -options: - idrac_ip: - description: iDRAC IP Address. - type: str - required: True - idrac_user: - description: iDRAC username. - type: str - required: True - idrac_password: - description: iDRAC user password. - type: str - required: True - aliases: ['idrac_pwd'] - idrac_port: - description: iDRAC port. - type: int - default: 443 - command: - description: - - If C(import), will perform SCP import operations. - - If C(export), will perform SCP export operations. - choices: ['import', 'export'] - default: 'export' - job_wait: - description: Whether to wait for job completion or not. - type: bool - required: True - share_name: - description: CIFS or NFS Network Share or a local path. - type: str - required: True - share_user: - description: Network share user in the format 'user@domain' or 'domain\\user' if user is - part of a domain else 'user'. This option is mandatory for CIFS Network Share. - type: str - share_password: - description: Network share user password. This option is mandatory for CIFS Network Share. - type: str - aliases: ['share_pwd'] - scp_file: - description: Server Configuration Profile file name. This option is mandatory for C(import) command. - type: str - scp_components: - description: - - If C(ALL), this module will import all components configurations from SCP file. - - If C(IDRAC), this module will import iDRAC configuration from SCP file. - - If C(BIOS), this module will import BIOS configuration from SCP file. - - If C(NIC), this module will import NIC configuration from SCP file. - - If C(RAID), this module will import RAID configuration from SCP file. - choices: ['ALL', 'IDRAC', 'BIOS', 'NIC', 'RAID'] - default: 'ALL' - shutdown_type: - description: - - This option is applicable for C(import) command. - - If C(Graceful), it gracefully shuts down the server. - - If C(Forced), it forcefully shuts down the server. - - If C(NoReboot), it does not reboot the server. - choices: ['Graceful', 'Forced', 'NoReboot'] - default: 'Graceful' - end_host_power_state: - description: - - This option is applicable for C(import) command. - - If C(On), End host power state is on. - - If C(Off), End host power state is off. - choices: ['On' ,'Off'] - default: 'On' - export_format: - description: Specify the output file format. This option is applicable for C(export) command. - choices: ['JSON', 'XML'] - default: 'XML' - export_use: - description: Specify the type of server configuration profile (SCP) to be exported. - This option is applicable for C(export) command. - choices: ['Default', 'Clone', 'Replace'] - default: 'Default' - -requirements: - - "omsdk" - - "python >= 2.7.5" -author: "Jagadeesh N V(@jagadeeshnv)" - -''' - -EXAMPLES = r''' ---- -- name: Import Server Configuration Profile from a network share - community.general.idrac_server_config_profile: - idrac_ip: "192.168.0.1" - idrac_user: "user_name" - idrac_password: "user_password" - command: "import" - share_name: "192.168.0.2:/share" - share_user: "share_user_name" - share_password: "share_user_password" - scp_file: "scp_filename.xml" - scp_components: "ALL" - job_wait: True - -- name: Import Server Configuration Profile from a local path - community.general.idrac_server_config_profile: - idrac_ip: "192.168.0.1" - idrac_user: "user_name" - idrac_password: "user_password" - command: "import" - share_name: "/scp_folder" - share_user: "share_user_name" - share_password: "share_user_password" - scp_file: "scp_filename.xml" - scp_components: "ALL" - job_wait: True - -- name: Export Server Configuration Profile to a network share - community.general.idrac_server_config_profile: - idrac_ip: "192.168.0.1" - idrac_user: "user_name" - idrac_password: "user_password" - share_name: "192.168.0.2:/share" - share_user: "share_user_name" - share_password: "share_user_password" - job_wait: False - -- name: Export Server Configuration Profile to a local path - community.general.idrac_server_config_profile: - idrac_ip: "192.168.0.1" - idrac_user: "user_name" - idrac_password: "user_password" - share_name: "/scp_folder" - share_user: "share_user_name" - share_password: "share_user_password" - job_wait: False -''' - -RETURN = r''' ---- -msg: - type: str - description: Status of the import or export SCP job. - returned: always - sample: "Successfully imported the Server Configuration Profile" -scp_status: - type: dict - description: SCP operation job and progress details from the iDRAC. - returned: success - sample: - { - "Id": "JID_XXXXXXXXX", - "JobState": "Completed", - "JobType": "ImportConfiguration", - "Message": "Successfully imported and applied Server Configuration Profile.", - "MessageArgs": [], - "MessageId": "XXX123", - "Name": "Import Configuration", - "PercentComplete": 100, - "StartTime": "TIME_NOW", - "Status": "Success", - "TargetSettingsURI": null, - "retval": true - } -''' - -import os -from ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.dellemc_idrac import iDRACConnection -from ansible.module_utils.basic import AnsibleModule -try: - from omsdk.sdkfile import file_share_manager - from omsdk.sdkcreds import UserCredentials - from omdrivers.enums.iDRAC.iDRACEnums import (SCPTargetEnum, EndHostPowerStateEnum, - ShutdownTypeEnum, ExportFormatEnum, ExportUseEnum) -except ImportError: - pass - - -def run_import_server_config_profile(idrac, module): - """Import Server Configuration Profile from a network share.""" - target = SCPTargetEnum[module.params['scp_components']] - job_wait = module.params['job_wait'] - end_host_power_state = EndHostPowerStateEnum[module.params['end_host_power_state']] - shutdown_type = ShutdownTypeEnum[module.params['shutdown_type']] - idrac.use_redfish = True - - try: - myshare = file_share_manager.create_share_obj( - share_path="{0}{1}{2}".format(module.params['share_name'], os.sep, module.params['scp_file']), - creds=UserCredentials(module.params['share_user'], - module.params['share_password']), isFolder=False) - import_status = idrac.config_mgr.scp_import(myshare, - target=target, shutdown_type=shutdown_type, - end_host_power_state=end_host_power_state, - job_wait=job_wait) - if not import_status or import_status.get('Status') != "Success": - module.fail_json(msg='Failed to import scp.', scp_status=import_status) - except RuntimeError as e: - module.fail_json(msg=str(e)) - return import_status - - -def run_export_server_config_profile(idrac, module): - """Export Server Configuration Profile to a network share.""" - export_format = ExportFormatEnum[module.params['export_format']] - scp_file_name_format = "%ip_%Y%m%d_%H%M%S_scp.{0}".format(module.params['export_format'].lower()) - target = SCPTargetEnum[module.params['scp_components']] - export_use = ExportUseEnum[module.params['export_use']] - idrac.use_redfish = True - - try: - myshare = file_share_manager.create_share_obj(share_path=module.params['share_name'], - creds=UserCredentials(module.params['share_user'], - module.params['share_password']), - isFolder=True) - scp_file_name = myshare.new_file(scp_file_name_format) - export_status = idrac.config_mgr.scp_export(scp_file_name, - target=target, - export_format=export_format, - export_use=export_use, - job_wait=module.params['job_wait']) - if not export_status or export_status.get('Status') != "Success": - module.fail_json(msg='Failed to export scp.', scp_status=export_status) - except RuntimeError as e: - module.fail_json(msg=str(e)) - return export_status - - -def main(): - module = AnsibleModule( - argument_spec={ - "idrac_ip": {"required": True, "type": 'str'}, - "idrac_user": {"required": True, "type": 'str'}, - "idrac_password": {"required": True, "type": 'str', - "aliases": ['idrac_pwd'], "no_log": True}, - "idrac_port": {"required": False, "default": 443, "type": 'int'}, - - "command": {"required": False, "type": 'str', - "choices": ['export', 'import'], "default": 'export'}, - "job_wait": {"required": True, "type": 'bool'}, - - "share_name": {"required": True, "type": 'str'}, - "share_user": {"required": False, "type": 'str'}, - "share_password": {"required": False, "type": 'str', - "aliases": ['share_pwd'], "no_log": True}, - "scp_components": {"required": False, - "choices": ['ALL', 'IDRAC', 'BIOS', 'NIC', 'RAID'], - "default": 'ALL'}, - - "scp_file": {"required": False, "type": 'str'}, - "shutdown_type": {"required": False, - "choices": ['Graceful', 'Forced', 'NoReboot'], - "default": 'Graceful'}, - "end_host_power_state": {"required": False, - "choices": ['On', 'Off'], - "default": 'On'}, - - "export_format": {"required": False, "type": 'str', - "choices": ['JSON', 'XML'], "default": 'XML'}, - "export_use": {"required": False, "type": 'str', - "choices": ['Default', 'Clone', 'Replace'], "default": 'Default'} - }, - required_if=[ - ["command", "import", ["scp_file"]] - ], - supports_check_mode=False) - - try: - changed = False - with iDRACConnection(module.params) as idrac: - command = module.params['command'] - if command == 'import': - scp_status = run_import_server_config_profile(idrac, module) - if "No changes were applied" not in scp_status.get('Message', ""): - changed = True - else: - scp_status = run_export_server_config_profile(idrac, module) - module.exit_json(changed=changed, msg="Successfully {0}ed the Server Configuration Profile.".format(command), - scp_status=scp_status) - except (ImportError, ValueError, RuntimeError) as e: - module.fail_json(msg=str(e)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/remote_management/dellemc/ome_device_info.py b/plugins/modules/remote_management/dellemc/ome_device_info.py deleted file mode 100644 index 68fbb1e680..0000000000 --- a/plugins/modules/remote_management/dellemc/ome_device_info.py +++ /dev/null @@ -1,413 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# -# Dell EMC OpenManage Ansible Modules -# Version 1.2 -# Copyright (C) 2019 Dell Inc. - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# All rights reserved. Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries. -# Other trademarks may be trademarks of their respective owners. -# - - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ome_device_info -short_description: Retrieves the information about Device. -description: - - This module retrieves the list of all devices information with the exhaustive inventory of each - device. -options: - hostname: - description: - - Target IP Address or hostname. - type: str - required: True - username: - description: - - Target username. - type: str - required: True - password: - description: - - Target user password. - type: str - required: True - port: - description: - - Target HTTPS port. - type: int - default: 443 - fact_subset: - description: - - C(basic_inventory) returns the list of the devices. - - C(detailed_inventory) returns the inventory details of specified devices. - - C(subsystem_health) returns the health status of specified devices. - type: str - choices: [basic_inventory, detailed_inventory, subsystem_health ] - default: basic_inventory - system_query_options: - description: - - I(system_query_options) applicable for the choices of the fact_subset. Either I(device_id) or I(device_service_tag) - is mandatory for C(detailed_inventory) and C(subsystem_health) or both can be applicable. - type: dict - suboptions: - device_id: - description: - - A list of unique identifier is applicable - for C(detailed_inventory) and C(subsystem_health). - type: list - device_service_tag: - description: - - A list of service tags are applicable for C(detailed_inventory) - and C(subsystem_health). - type: list - inventory_type: - description: - - For C(detailed_inventory), it returns details of the specified inventory type. - type: str - filter: - description: - - For C(basic_inventory), it filters the collection of devices. - I(filter) query format should be aligned with OData standards. - type: str - -requirements: - - "python >= 2.7.5" -author: "Sajna Shetty(@Sajna-Shetty)" -''' - -EXAMPLES = """ ---- -- name: Retrieve basic inventory of all devices. - community.general.ome_device_info: - hostname: "192.168.0.1" - username: "username" - password: "password" - -- name: Retrieve basic inventory for devices identified by IDs 33333 or 11111 using filtering. - community.general.ome_device_info: - hostname: "192.168.0.1" - username: "username" - password: "password" - fact_subset: "basic_inventory" - system_query_options: - filter: "Id eq 33333 or Id eq 11111" - -- name: Retrieve inventory details of specified devices identified by IDs 11111 and 22222. - community.general.ome_device_info: - hostname: "192.168.0.1" - username: "username" - password: "password" - fact_subset: "detailed_inventory" - system_query_options: - device_id: - - 11111 - - 22222 - -- name: Retrieve inventory details of specified devices identified by service tags MXL1234 and MXL4567. - community.general.ome_device_info: - hostname: "192.168.0.1" - username: "username" - password: "password" - fact_subset: "detailed_inventory" - system_query_options: - device_service_tag: - - MXL1234 - - MXL4567 - -- name: Retrieve details of specified inventory type of specified devices identified by ID and service tags. - community.general.ome_device_info: - hostname: "192.168.0.1" - username: "username" - password: "password" - fact_subset: "detailed_inventory" - system_query_options: - device_id: - - 11111 - device_service_tag: - - MXL1234 - - MXL4567 - inventory_type: "serverDeviceCards" - -- name: Retrieve subsystem health of specified devices identified by service tags. - community.general.ome_device_info: - hostname: "192.168.0.1" - username: "username" - password: "password" - fact_subset: "subsystem_health" - system_query_options: - device_service_tag: - - MXL1234 - - MXL4567 - -""" - -RETURN = ''' ---- -msg: - type: str - description: Over all device information status. - returned: on error - sample: "Failed to fetch the device information" -device_info: - type: dict - description: Returns the information collected from the Device. - returned: success - sample: { - "value": [ - { - "Actions": null, - "AssetTag": null, - "ChassisServiceTag": null, - "ConnectionState": true, - "DeviceManagement": [ - { - "DnsName": "dnsname.host.com", - "InstrumentationName": "MX-12345", - "MacAddress": "11:10:11:10:11:10", - "ManagementId": 12345, - "ManagementProfile": [ - { - "HasCreds": 0, - "ManagementId": 12345, - "ManagementProfileId": 12345, - "ManagementURL": "https://192.168.0.1:443", - "Status": 1000, - "StatusDateTime": "2019-01-21 06:30:08.501" - } - ], - "ManagementType": 2, - "NetworkAddress": "192.168.0.1" - } - ], - "DeviceName": "MX-0003I", - "DeviceServiceTag": "MXL1234", - "DeviceSubscription": null, - "LastInventoryTime": "2019-01-21 06:30:08.501", - "LastStatusTime": "2019-01-21 06:30:02.492", - "ManagedState": 3000, - "Model": "PowerEdge MX7000", - "PowerState": 17, - "SlotConfiguration": {}, - "Status": 4000, - "SystemId": 2031, - "Type": 2000 - } - ] - } -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.ome import RestOME -from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError -from ansible.module_utils.urls import ConnectionError, SSLValidationError - -DEVICES_INVENTORY_DETAILS = "detailed_inventory" -DEVICES_SUBSYSTEM_HEALTH = "subsystem_health" -DEVICES_INVENTORY_TYPE = "inventory_type" -DEVICE_LIST = "basic_inventory" -DESC_HTTP_ERROR = "HTTP Error 404: Not Found" -device_fact_error_report = {} - -DEVICE_RESOURCE_COLLECTION = { - DEVICE_LIST: {"resource": "DeviceService/Devices"}, - DEVICES_INVENTORY_DETAILS: {"resource": "DeviceService/Devices({Id})/InventoryDetails"}, - DEVICES_INVENTORY_TYPE: {"resource": "DeviceService/Devices({Id})/InventoryDetails('{InventoryType}')"}, - DEVICES_SUBSYSTEM_HEALTH: {"resource": "DeviceService/Devices({Id})/SubSystemHealth"}, -} - - -def _get_device_id_from_service_tags(service_tags, rest_obj): - """ - Get device ids from device service tag - Returns :dict : device_id to service_tag map - :arg service_tags: service tag - :arg rest_obj: RestOME class object in case of request with session. - :returns: dict eg: {1345:"MXL1245"} - """ - try: - path = DEVICE_RESOURCE_COLLECTION[DEVICE_LIST]["resource"] - resp = rest_obj.invoke_request('GET', path) - if resp.success: - devices_list = resp.json_data["value"] - service_tag_dict = {} - for item in devices_list: - if item["DeviceServiceTag"] in service_tags: - service_tag_dict.update({item["Id"]: item["DeviceServiceTag"]}) - available_service_tags = service_tag_dict.values() - not_available_service_tag = list(set(service_tags) - set(available_service_tags)) - device_fact_error_report.update(dict((tag, DESC_HTTP_ERROR) for tag in not_available_service_tag)) - else: - raise ValueError(resp.json_data) - except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError) as err: - raise err - return service_tag_dict - - -def is_int(val): - """check when device_id numeric represented value is int""" - try: - int(val) - return True - except ValueError: - return False - - -def _check_duplicate_device_id(device_id_list, service_tag_dict): - """If service_tag is duplicate of device_id, then updates the message as Duplicate report - :arg1: device_id_list : list of device_id - :arg2: service_tag_id_dict: dictionary of device_id to service tag map""" - if device_id_list: - device_id_represents_int = [int(device_id) for device_id in device_id_list if device_id and is_int(device_id)] - common_val = list(set(device_id_represents_int) & set(service_tag_dict.keys())) - for device_id in common_val: - device_fact_error_report.update( - {service_tag_dict[device_id]: "Duplicate report of device_id: {0}".format(device_id)}) - del service_tag_dict[device_id] - - -def _get_device_identifier_map(module_params, rest_obj): - """ - Builds the identifiers mapping - :returns: the dict of device_id to server_tag map - eg: {"device_id":{1234: None},"device_service_tag":{1345:"MXL1234"}}""" - system_query_options_param = module_params.get("system_query_options") - device_id_service_tag_dict = {} - if system_query_options_param is not None: - device_id_list = system_query_options_param.get("device_id") - device_service_tag_list = system_query_options_param.get("device_service_tag") - if device_id_list: - device_id_dict = dict((device_id, None) for device_id in list(set(device_id_list))) - device_id_service_tag_dict["device_id"] = device_id_dict - if device_service_tag_list: - service_tag_dict = _get_device_id_from_service_tags(device_service_tag_list, - rest_obj) - - _check_duplicate_device_id(device_id_list, service_tag_dict) - device_id_service_tag_dict["device_service_tag"] = service_tag_dict - return device_id_service_tag_dict - - -def _get_query_parameters(module_params): - """ - Builds query parameter - :returns: dictionary, which is applicable builds the query format - eg : {"$filter":"Type eq 2000"} - """ - system_query_options_param = module_params.get("system_query_options") - query_parameter = None - if system_query_options_param: - filter_by_val = system_query_options_param.get("filter") - if filter_by_val: - query_parameter = {"$filter": filter_by_val} - return query_parameter - - -def _get_resource_parameters(module_params, rest_obj): - """ - Identifies the resource path by different states - :returns: dictionary containing identifier with respective resource path - eg:{"device_id":{1234:""DeviceService/Devices(1234)/InventoryDetails"}, - "device_service_tag":{"MXL1234":"DeviceService/Devices(1345)/InventoryDetails"}} - """ - fact_subset = module_params["fact_subset"] - path_dict = {} - if fact_subset != DEVICE_LIST: - inventory_type = None - device_id_service_tag_dict = _get_device_identifier_map(module_params, rest_obj) - if fact_subset == DEVICES_INVENTORY_DETAILS: - system_query_options = module_params.get("system_query_options") - inventory_type = system_query_options.get(DEVICES_INVENTORY_TYPE) - path_identifier = DEVICES_INVENTORY_TYPE if inventory_type else fact_subset - for identifier_type, identifier_dict in device_id_service_tag_dict.items(): - path_dict[identifier_type] = {} - for device_id, service_tag in identifier_dict.items(): - key_identifier = service_tag if identifier_type == "device_service_tag" else device_id - path = DEVICE_RESOURCE_COLLECTION[path_identifier]["resource"].format(Id=device_id, - InventoryType=inventory_type) - path_dict[identifier_type].update({key_identifier: path}) - else: - path_dict.update({DEVICE_LIST: DEVICE_RESOURCE_COLLECTION[DEVICE_LIST]["resource"]}) - return path_dict - - -def _check_mutually_inclusive_arguments(val, module_params, required_args): - """" - Throws error if arguments detailed_inventory, subsystem_health - not exists with qualifier device_id or device_service_tag""" - system_query_options_param = module_params.get("system_query_options") - if system_query_options_param is None or (system_query_options_param is not None and not any( - system_query_options_param.get(qualifier) for qualifier in required_args)): - raise ValueError("One of the following {0} is required for {1}".format(required_args, val)) - - -def _validate_inputs(module_params): - """validates input parameters""" - fact_subset = module_params["fact_subset"] - if fact_subset != "basic_inventory": - _check_mutually_inclusive_arguments(fact_subset, module_params, ["device_id", "device_service_tag"]) - - -def main(): - system_query_options = {"type": 'dict', "required": False, "options": { - "device_id": {"type": 'list'}, - "device_service_tag": {"type": 'list'}, - "inventory_type": {"type": 'str'}, - "filter": {"type": 'str', "required": False}, - }} - - module = AnsibleModule( - argument_spec={ - "hostname": {"required": True, "type": 'str'}, - "username": {"required": True, "type": 'str'}, - "password": {"required": True, "type": 'str', "no_log": True}, - "port": {"required": False, "default": 443, "type": 'int'}, - "fact_subset": {"required": False, "default": "basic_inventory", - "choices": ['basic_inventory', 'detailed_inventory', 'subsystem_health']}, - "system_query_options": system_query_options, - }, - required_if=[['fact_subset', 'detailed_inventory', ['system_query_options']], - ['fact_subset', 'subsystem_health', ['system_query_options']], ], - supports_check_mode=False) - - try: - _validate_inputs(module.params) - with RestOME(module.params, req_session=True) as rest_obj: - device_facts = _get_resource_parameters(module.params, rest_obj) - resp_status = [] - if device_facts.get("basic_inventory"): - query_param = _get_query_parameters(module.params) - resp = rest_obj.invoke_request('GET', device_facts["basic_inventory"], query_param=query_param) - device_facts = resp.json_data - resp_status.append(resp.status_code) - else: - for identifier_type, path_dict_map in device_facts.items(): - for identifier, path in path_dict_map.items(): - try: - resp = rest_obj.invoke_request('GET', path) - data = resp.json_data - resp_status.append(resp.status_code) - except HTTPError as err: - data = str(err) - path_dict_map[identifier] = data - if any(device_fact_error_report): - if "device_service_tag" in device_facts: - device_facts["device_service_tag"].update(device_fact_error_report) - else: - device_facts["device_service_tag"] = device_fact_error_report - if 200 in resp_status: - module.exit_json(device_info=device_facts) - else: - module.fail_json(msg="Failed to fetch the device information") - except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError) as err: - module.fail_json(msg=str(err)) - - -if __name__ == '__main__': - main() diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index e1241f89d1..46142bf001 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -237,9 +237,6 @@ plugins/modules/packaging/os/redhat_subscription.py validate-modules:return-synt plugins/modules/packaging/os/slackpkg.py validate-modules:parameter-invalid plugins/modules/packaging/os/urpmi.py validate-modules:parameter-invalid plugins/modules/packaging/os/xbps.py validate-modules:parameter-invalid -plugins/modules/remote_management/dellemc/idrac_server_config_profile.py validate-modules:doc-missing-type -plugins/modules/remote_management/dellemc/idrac_server_config_profile.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/dellemc/ome_device_info.py validate-modules:parameter-list-no-elements plugins/modules/remote_management/hpilo/hpilo_boot.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hpilo_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hponcfg.py validate-modules:parameter-type-not-in-doc diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index e1241f89d1..46142bf001 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -237,9 +237,6 @@ plugins/modules/packaging/os/redhat_subscription.py validate-modules:return-synt plugins/modules/packaging/os/slackpkg.py validate-modules:parameter-invalid plugins/modules/packaging/os/urpmi.py validate-modules:parameter-invalid plugins/modules/packaging/os/xbps.py validate-modules:parameter-invalid -plugins/modules/remote_management/dellemc/idrac_server_config_profile.py validate-modules:doc-missing-type -plugins/modules/remote_management/dellemc/idrac_server_config_profile.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/dellemc/ome_device_info.py validate-modules:parameter-list-no-elements plugins/modules/remote_management/hpilo/hpilo_boot.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hpilo_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hponcfg.py validate-modules:parameter-type-not-in-doc diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 2a3c828361..604672ab19 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -202,8 +202,6 @@ plugins/modules/packaging/os/redhat_subscription.py validate-modules:return-synt plugins/modules/packaging/os/slackpkg.py validate-modules:parameter-invalid plugins/modules/packaging/os/urpmi.py validate-modules:parameter-invalid plugins/modules/packaging/os/xbps.py validate-modules:parameter-invalid -plugins/modules/remote_management/dellemc/idrac_server_config_profile.py validate-modules:doc-missing-type -plugins/modules/remote_management/dellemc/idrac_server_config_profile.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hpilo_boot.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hpilo_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hponcfg.py validate-modules:parameter-type-not-in-doc diff --git a/tests/unit/plugins/module_utils/remote_management/dellemc/__init__.py b/tests/unit/plugins/module_utils/remote_management/dellemc/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/module_utils/remote_management/dellemc/test_ome.py b/tests/unit/plugins/module_utils/remote_management/dellemc/test_ome.py deleted file mode 100644 index cc698d0b21..0000000000 --- a/tests/unit/plugins/module_utils/remote_management/dellemc/test_ome.py +++ /dev/null @@ -1,79 +0,0 @@ -# -*- coding: utf-8 -*- - -# -# Dell EMC OpenManage Ansible Modules -# Version 2.0 -# Copyright (C) 2019 Dell Inc. - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# All rights reserved. Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries. -# Other trademarks may be trademarks of their respective owners. -# - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import pytest -from ansible.module_utils.urls import ConnectionError, SSLValidationError -from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError -from ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.ome import RestOME -from ansible_collections.community.general.tests.unit.compat.mock import MagicMock -import json - - -class TestRestOME(object): - @pytest.fixture - def mock_response(self): - mock_response = MagicMock() - mock_response.getcode.return_value = 200 - mock_response.headers = mock_response.getheaders.return_value = {'X-Auth-Token': 'token_id'} - mock_response.read.return_value = json.dumps({"value": "data"}) - return mock_response - - def test_invoke_request_with_session(self, mock_response, mocker): - mocker.patch('ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.ome.open_url', - return_value=mock_response) - module_params = {'hostname': '192.168.0.1', 'username': 'username', - 'password': 'password', "port": 443} - req_session = True - with RestOME(module_params, req_session) as obj: - response = obj.invoke_request("/testpath", "GET") - assert response.status_code == 200 - assert response.json_data == {"value": "data"} - assert response.success is True - - def test_invoke_request_without_session(self, mock_response, mocker): - mocker.patch('ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.ome.open_url', - return_value=mock_response) - module_params = {'hostname': '192.168.0.1', 'username': 'username', - 'password': 'password', "port": 443} - req_session = False - with RestOME(module_params, req_session) as obj: - response = obj.invoke_request("/testpath", "GET") - assert response.status_code == 200 - assert response.json_data == {"value": "data"} - assert response.success is True - - @pytest.mark.parametrize("exc", [URLError, SSLValidationError, ConnectionError]) - def test_invoke_request_error_case_handling(self, exc, mock_response, mocker): - open_url_mock = mocker.patch('ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.ome.open_url', - return_value=mock_response) - open_url_mock.side_effect = exc("test") - module_params = {'hostname': '192.168.0.1', 'username': 'username', - 'password': 'password', "port": 443} - req_session = False - with pytest.raises(exc) as e: - with RestOME(module_params, req_session) as obj: - obj.invoke_request("/testpath", "GET") - - def test_invoke_request_http_error_handling(self, mock_response, mocker): - open_url_mock = mocker.patch('ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.ome.open_url', - return_value=mock_response) - open_url_mock.side_effect = HTTPError('http://testhost.com/', 400, - 'Bad Request Error', {}, None) - module_params = {'hostname': '192.168.0.1', 'username': 'username', - 'password': 'password', "port": 443} - req_session = False - with pytest.raises(HTTPError) as e: - with RestOME(module_params, req_session) as obj: - obj.invoke_request("/testpath", "GET") diff --git a/tests/unit/plugins/modules/remote_management/dellemc/__init__.py b/tests/unit/plugins/modules/remote_management/dellemc/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/modules/remote_management/dellemc/test_ome_device_info.py b/tests/unit/plugins/modules/remote_management/dellemc/test_ome_device_info.py deleted file mode 100644 index 5e825c4222..0000000000 --- a/tests/unit/plugins/modules/remote_management/dellemc/test_ome_device_info.py +++ /dev/null @@ -1,196 +0,0 @@ -# -*- coding: utf-8 -*- - -# -# Dell EMC OpenManage Ansible Modules -# Version 2.0 -# Copyright (C) 2019 Dell Inc. - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# All rights reserved. Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries. -# Other trademarks may be trademarks of their respective owners. -# - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import pytest -from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args, exit_json, \ - fail_json, AnsibleFailJson, AnsibleExitJson -from ansible.module_utils import basic -from ansible_collections.community.general.plugins.modules.remote_management.dellemc import ome_device_info -from ansible.module_utils.six.moves.urllib.error import HTTPError - -default_args = {'hostname': '192.168.0.1', 'username': 'username', 'password': 'password'} -resource_basic_inventory = {"basic_inventory": "DeviceService/Devices"} -resource_detailed_inventory = {"detailed_inventory:": {"device_id": {1234: None}, - "device_service_tag": {1345: "MXL1234"}}} - - -class TestOmeDeviceInfo(object): - module = ome_device_info - - @pytest.fixture(autouse=True) - def module_mock(self, mocker): - return mocker.patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json) - - @pytest.fixture - def connection_mock(self, mocker): - connection_class_mock = mocker.patch('ansible_collections.community.general.plugins.modules.remote_management.dellemc.ome_device_info.RestOME') - return connection_class_mock.return_value - - @pytest.fixture - def response_mock(self, mocker): - response_class_mock = mocker.patch('ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.ome.OpenURLResponse') - return response_class_mock - - @pytest.fixture - def validate_inputs_mock(self, mocker): - response_class_mock = mocker.patch('ansible_collections.community.general.plugins.modules.remote_management.dellemc.ome_device_info._validate_inputs') - response_class_mock.return_value = None - - @pytest.fixture - def get_device_identifier_map_mock(self, mocker): - response_class_mock = mocker.patch( - 'ansible_collections.community.general.plugins.modules.remote_management.dellemc.ome_device_info._get_device_identifier_map' - ) - response_class_mock.return_value = resource_detailed_inventory - return response_class_mock.return_value - - @pytest.fixture - def get_resource_parameters_mock(self, mocker): - response_class_mock = mocker.patch( - 'ansible_collections.community.general.plugins.modules.remote_management.dellemc.ome_device_info._get_resource_parameters' - ) - return response_class_mock - - def test_main_basic_inventory_success_case(self, module_mock, validate_inputs_mock, connection_mock, get_resource_parameters_mock, response_mock): - get_resource_parameters_mock.return_value = resource_basic_inventory - connection_mock.__enter__.return_value = connection_mock - connection_mock.invoke_request.return_value = response_mock - response_mock.json_data = {"value": [{"device_id1": "details", "device_id2": "details"}]} - response_mock.status_code = 200 - result = self._run_module(default_args) - assert result['changed'] is False - assert 'device_info' in result - - def test_main_basic_inventory_failure_case(self, module_mock, validate_inputs_mock, connection_mock, get_resource_parameters_mock, response_mock): - get_resource_parameters_mock.return_value = resource_basic_inventory - connection_mock.__enter__.return_value = connection_mock - connection_mock.invoke_request.return_value = response_mock - response_mock.status_code = 500 - result = self._run_module_with_fail_json(default_args) - assert result['msg'] == 'Failed to fetch the device information' - - def test_main_detailed_inventory_success_case(self, module_mock, validate_inputs_mock, connection_mock, get_resource_parameters_mock, response_mock): - default_args.update({"fact_subset": "detailed_inventory", "system_query_options": {"device_id": [1234], "device_service_tag": ["MXL1234"]}}) - detailed_inventory = {"detailed_inventory:": {"device_id": {1234: "DeviceService/Devices(1234)/InventoryDetails"}, - "device_service_tag": {"MXL1234": "DeviceService/Devices(4321)/InventoryDetails"}}} - get_resource_parameters_mock.return_value = detailed_inventory - connection_mock.__enter__.return_value = connection_mock - connection_mock.invoke_request.return_value = response_mock - response_mock.json_data = {"value": [{"device_id": {"1234": "details"}}, {"device_service_tag": {"MXL1234": "details"}}]} - response_mock.status_code = 200 - result = self._run_module(default_args) - assert result['changed'] is False - assert 'device_info' in result - - def test_main_HTTPError_error_case(self, module_mock, validate_inputs_mock, connection_mock, get_resource_parameters_mock, response_mock): - get_resource_parameters_mock.return_value = resource_basic_inventory - connection_mock.__enter__.return_value = connection_mock - connection_mock.invoke_request.side_effect = HTTPError('http://testhost.com', 400, '', {}, None) - response_mock.json_data = {"value": [{"device_id1": "details", "device_id2": "details"}]} - response_mock.status_code = 400 - result = self._run_module_with_fail_json(default_args) - assert 'device_info' not in result - assert result['failed'] is True - - @pytest.mark.parametrize("fact_subset, mutually_exclusive_call", [("basic_inventory", False), ("detailed_inventory", True)]) - def test_validate_inputs(self, fact_subset, mutually_exclusive_call, mocker): - module_params = {"fact_subset": fact_subset} - check_mutually_inclusive_arguments_mock = mocker.patch( - 'ansible_collections.community.general.plugins.modules.remote_management.dellemc.ome_device_info._check_mutually_inclusive_arguments') - check_mutually_inclusive_arguments_mock.return_value = None - self.module._validate_inputs(module_params) - if mutually_exclusive_call: - check_mutually_inclusive_arguments_mock.assert_called() - else: - check_mutually_inclusive_arguments_mock.assert_not_called() - check_mutually_inclusive_arguments_mock.reset_mock() - - system_query_options_params = [{"system_query_options": None}, {"system_query_options": {"device_id": None}}, - {"system_query_options": {"device_service_tag": None}}] - - @pytest.mark.parametrize("system_query_options_params", system_query_options_params) - def test_check_mutually_inclusive_arguments(self, system_query_options_params): - module_params = {"fact_subset": "subsystem_health"} - required_args = ["device_id", "device_service_tag"] - module_params.update(system_query_options_params) - with pytest.raises(ValueError) as ex: - self.module._check_mutually_inclusive_arguments(module_params["fact_subset"], module_params, ["device_id", "device_service_tag"]) - assert "One of the following {0} is required for {1}".format(required_args, module_params["fact_subset"]) == str(ex.value) - - params = [{"fact_subset": "basic_inventory", "system_query_options": {"device_id": [1234]}}, - {"fact_subset": "subsystem_health", "system_query_options": {"device_service_tag": ["MXL1234"]}}, - {"fact_subset": "detailed_inventory", "system_query_options": {"device_id": [1234], "inventory_type": "serverDeviceCards"}}] - - @pytest.mark.parametrize("module_params", params) - def test_get_resource_parameters(self, module_params, connection_mock): - self.module._get_resource_parameters(module_params, connection_mock) - - @pytest.mark.parametrize("module_params,data", [({"system_query_options": None}, None), ({"system_query_options": {"fileter": None}}, None), - ({"system_query_options": {"filter": "abc"}}, "$filter")]) - def test_get_query_parameters(self, module_params, data): - res = self.module._get_query_parameters(module_params) - if data is not None: - assert data in res - else: - assert res is None - - @pytest.mark.parametrize("module_params", params) - def test_get_device_identifier_map(self, module_params, connection_mock, mocker): - get_device_id_from_service_tags_mock = mocker.patch( - 'ansible_collections.community.general.plugins.modules.remote_management.dellemc.ome_device_info._get_device_id_from_service_tags' - ) - get_device_id_from_service_tags_mock.return_value = None - res = self.module._get_device_identifier_map(module_params, connection_mock) - assert isinstance(res, dict) - - def test_check_duplicate_device_id(self): - self.module._check_duplicate_device_id([1234], {1234: "MX1234"}) - assert self.module.device_fact_error_report["MX1234"] == "Duplicate report of device_id: 1234" - - @pytest.mark.parametrize("val,expected_res", [(123, True), ("abc", False)]) - def test_is_int(self, val, expected_res): - actual_res = self.module.is_int(val) - assert actual_res == expected_res - - def test_get_device_id_from_service_tags(self, connection_mock, response_mock): - connection_mock.__enter__.return_value = connection_mock - connection_mock.invoke_request.return_value = response_mock - response_mock.json_data = {"value": [{"DeviceServiceTag": "MX1234", "Id": 1234}]} - response_mock.status_code = 200 - response_mock.success = True - self.module._get_device_id_from_service_tags(["MX1234", "INVALID"], connection_mock) - - def test_get_device_id_from_service_tags_error_case(self, connection_mock, response_mock): - connection_mock.__enter__.return_value = connection_mock - connection_mock.invoke_request.side_effect = HTTPError('http://testhost.com', - 400, '', {}, None) - response_mock.json_data = {"value": [{"DeviceServiceTag": "MX1234", "Id": 1234}]} - response_mock.status_code = 200 - response_mock.success = True - with pytest.raises(HTTPError) as ex: - self.module._get_device_id_from_service_tags(["INVALID"], connection_mock) - - def _run_module(self, module_args): - set_module_args(module_args) - with pytest.raises(AnsibleExitJson) as ex: - self.module.main() - return ex.value.args[0] - - def _run_module_with_fail_json(self, module_args): - set_module_args(module_args) - with pytest.raises(AnsibleFailJson) as exc: - self.module.main() - result = exc.value.args[0] - return result From 1591d52b78f994fc62f47515e43b22dc30786500 Mon Sep 17 00:00:00 2001 From: John R Barker Date: Mon, 1 Feb 2021 18:27:15 +0000 Subject: [PATCH 0008/3093] Create bug_report.yml --- .github/ISSUE_TEMPLATE/bug_report.yml | 116 ++++++++++++++++++++++++++ 1 file changed, 116 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/bug_report.yml diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 0000000000..f56a2b069e --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,116 @@ +name: Bug Report +about: Create a report to help us improve + +body: + - id: help + type: TextBlock + isSubtle: true # Not implemented, just wanted a way to make it stand out + color: good + text: | + Verify first that your issue is not already reported on [GitHub](https://github.com/ansible-collections/community.general/labels/bug) + Also test if the latest release and devel branch are affected too + Complete *all* sections as described, this form is processed automatically + + - id: summary + type: Input.Text + isMultiline: true + isRequired: true + size: small + label: SUMMARY + placeholder: 'Explain the problem briefly below' + + - id: issue type + type: Input.ChoiceSet + isMultiSelect: false + isRequired: true + style: compact + size: large + label: ISSUE TYPE + value: Bug Report + choices: + - title: Bug Report + value: Bug Report + + - id: component name + type: Input.ChoiceSet + isMultiSelect: true + isRequired: true + style: compact + size: large + label: COMPONENT NAME + #value: Bug Report + choices: + # May generate this list via GitHub action and walking files under https://github.com/ansible-collections/community.general/tree/main/plugins + # Select from list, filter as you type (`mysql` would only show the 3 mysql components) + # OR freeform - doesn't seem to be supported in adaptivecards + # BUG: Multiselect only adds last item to issue + - title: mysql_user + value: mysql_user + - title: mysql_database + value: mysql_database + - title: mysql_connection + value: mysql_connection + - title: postgres_user + value: postgres_user + + - id: ansible version + type: Input.Text + isMultiline: true + size: large + label: ANSIBLE VERSION + value: | + + ```paste below + + ``` + - id: configuration + type: Input.Text + isMultiline: true + size: large + label: CONFIGURATION + value: | + + ```paste below + + ``` + + - id: configuration + type: Input.Text + isMultiline: true + size: large + label: OS / ENVIRONMENT + placeholder: Provide all relevant information below, e.g. target OS versions, network device firmware, etc + + - id: steps to repo + type: Input.Text + isMultiline: true + size: large + label: STEPS TO REPRODUCE + value: | + + + + ```yaml + + ``` + + - id: expected + type: Input.Text + isMultiline: true + size: large + label: EXPECTED RESULTS + placeholder: Describe what you expected to happen when running the steps above + + + + - id: actual results + type: Input.Text + isMultiline: true + size: large + label: ACTUAL RESULTS + value: | + + + ``` + + ``` From 165719d084994dfc3b1847258b8b83266184cc07 Mon Sep 17 00:00:00 2001 From: John R Barker Date: Mon, 1 Feb 2021 18:41:44 +0000 Subject: [PATCH 0009/3093] Update bug_report.yml --- .github/ISSUE_TEMPLATE/bug_report.yml | 198 ++++++++++++-------------- 1 file changed, 89 insertions(+), 109 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index f56a2b069e..bc1ce215b1 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -1,116 +1,96 @@ name: Bug Report about: Create a report to help us improve -body: - - id: help - type: TextBlock - isSubtle: true # Not implemented, just wanted a way to make it stand out - color: good - text: | - Verify first that your issue is not already reported on [GitHub](https://github.com/ansible-collections/community.general/labels/bug) - Also test if the latest release and devel branch are affected too - Complete *all* sections as described, this form is processed automatically - - - id: summary - type: Input.Text - isMultiline: true - isRequired: true - size: small - label: SUMMARY - placeholder: 'Explain the problem briefly below' +inputs: + - type: description + + attributes: + preview_only: true + text: | + Verify first that your issue is not already reported on [GitHub](https://github.com/ansible-collections/community.general/labels/bug) + Also test if the latest release and devel branch are affected too + Complete *all* sections as described, this form is processed automatically + - type: textarea + attributes: + label: SUMMARY + description: 'Explain the problem briefly below' + required: true + - type: textarea + attributes: + label: SUMMARY + description: 'Explain the problem briefly below' + required: true + + - type: dropdown + attributes: + choices: + - Bug Report + required: true - - id: issue type - type: Input.ChoiceSet - isMultiSelect: false - isRequired: true - style: compact - size: large - label: ISSUE TYPE - value: Bug Report - choices: - - title: Bug Report - value: Bug Report - - - id: component name - type: Input.ChoiceSet - isMultiSelect: true - isRequired: true - style: compact - size: large - label: COMPONENT NAME - #value: Bug Report - choices: - # May generate this list via GitHub action and walking files under https://github.com/ansible-collections/community.general/tree/main/plugins - # Select from list, filter as you type (`mysql` would only show the 3 mysql components) - # OR freeform - doesn't seem to be supported in adaptivecards - # BUG: Multiselect only adds last item to issue - - title: mysql_user - value: mysql_user - - title: mysql_database - value: mysql_database - - title: mysql_connection - value: mysql_connection - - title: postgres_user - value: postgres_user - - - id: ansible version - type: Input.Text - isMultiline: true - size: large - label: ANSIBLE VERSION - value: | - - ```paste below - - ``` - - id: configuration - type: Input.Text - isMultiline: true - size: large - label: CONFIGURATION - value: | - - ```paste below - - ``` - - - id: configuration - type: Input.Text - isMultiline: true - size: large - label: OS / ENVIRONMENT - placeholder: Provide all relevant information below, e.g. target OS versions, network device firmware, etc +- type: textarea + attributes: + # For smaller collections we could use a multi-select and hardcode the list + #May generate this list via GitHub action and walking files under https://github.com/ansible-collections/community.general/tree/main/plugins + # Select from list, filter as you type (`mysql` would only show the 3 mysql components) + # OR freeform - doesn't seem to be supported in adaptivecards - - id: steps to repo - type: Input.Text - isMultiline: true - size: large - label: STEPS TO REPRODUCE - value: | - - + label: COMPONENT NAME + description: 'List the component, ie `template`, `mysql_users` + required: true + - type: textarea + attributes: + label: ANSIBLE VERSION + required: false + description: | + If this issue has an example piece of YAML that can help to reproduce this problem, please provide it. + This can be a piece of YAML from, e.g., an automation, script, scene or configuration. + value: | + + ```paste below + + ``` + - type: textarea + attributes: + label: CONFIGURATION + required: false + description: | + If this issue has an example piece of YAML that can help to reproduce this problem, please provide it. + This can be a piece of YAML from, e.g., an automation, script, scene or configuration. + value: | + + ```paste below + + ``` + - type: textarea + attributes: + label: OS / ENVIRONMENT + description: 'Provide all relevant information below, e.g. target OS versions, network device firmware, etc' + required: false - ```yaml - - ``` + - type: textarea + attributes: + label: STEPS TO REPRO + description: 'Describe exactly how to reproduce the problem, using a minimal test-case' + required: false + value: | + + ```paste below + + ``` +- type: textarea + attributes: + label: EXPECTED RESULTS + description: 'Describe what you expected to happen when running the steps above' + required: false - - id: expected - type: Input.Text - isMultiline: true - size: large - label: EXPECTED RESULTS - placeholder: Describe what you expected to happen when running the steps above - - - - - id: actual results - type: Input.Text - isMultiline: true - size: large - label: ACTUAL RESULTS - value: | - - - ``` - - ``` +- type: textarea + attributes: + label: ACTUAL RESULTS + description: 'Describe what actually happened. If possible run with extra verbosity (-vvvv)' + required: false + value: | + + ```paste below + + ``` + From 6d865643085453db83c3ec580107d3844c9e8203 Mon Sep 17 00:00:00 2001 From: John R Barker Date: Mon, 1 Feb 2021 18:46:32 +0000 Subject: [PATCH 0010/3093] Update bug_report.yml --- .github/ISSUE_TEMPLATE/bug_report.yml | 41 +++++++++++++-------------- 1 file changed, 20 insertions(+), 21 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index bc1ce215b1..aee16cf74b 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -3,7 +3,7 @@ about: Create a report to help us improve inputs: - type: description - + attributes: preview_only: true text: | @@ -20,22 +20,22 @@ inputs: label: SUMMARY description: 'Explain the problem briefly below' required: true - + - type: dropdown - attributes: - choices: - - Bug Report + attributes: + choices: + - Bug Report required: true - -- type: textarea + + - type: textarea attributes: # For smaller collections we could use a multi-select and hardcode the list - #May generate this list via GitHub action and walking files under https://github.com/ansible-collections/community.general/tree/main/plugins + # May generate this list via GitHub action and walking files under https://github.com/ansible-collections/community.general/tree/main/plugins # Select from list, filter as you type (`mysql` would only show the 3 mysql components) # OR freeform - doesn't seem to be supported in adaptivecards - + label: COMPONENT NAME - description: 'List the component, ie `template`, `mysql_users` + description: 'List the component, ie `template`, `mysql_users`' required: true - type: textarea attributes: @@ -47,9 +47,9 @@ inputs: value: | ```paste below - + ``` - - type: textarea + - type: textarea attributes: label: CONFIGURATION required: false @@ -59,8 +59,8 @@ inputs: value: | ```paste below - - ``` + + ``` - type: textarea attributes: label: OS / ENVIRONMENT @@ -75,15 +75,15 @@ inputs: value: | ```paste below - - ``` -- type: textarea + + ``` + - type: textarea attributes: label: EXPECTED RESULTS description: 'Describe what you expected to happen when running the steps above' required: false -- type: textarea + - type: textarea attributes: label: ACTUAL RESULTS description: 'Describe what actually happened. If possible run with extra verbosity (-vvvv)' @@ -91,6 +91,5 @@ inputs: value: | ```paste below - - ``` - + + ``` From 8dbb13edd402a028206d0979eeda377c1d4fc006 Mon Sep 17 00:00:00 2001 From: John R Barker Date: Mon, 1 Feb 2021 18:51:10 +0000 Subject: [PATCH 0011/3093] Update bug_report.yml --- .github/ISSUE_TEMPLATE/bug_report.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index aee16cf74b..44b4ca6b39 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -6,13 +6,13 @@ inputs: attributes: preview_only: true - text: | + value: | Verify first that your issue is not already reported on [GitHub](https://github.com/ansible-collections/community.general/labels/bug) Also test if the latest release and devel branch are affected too Complete *all* sections as described, this form is processed automatically - type: textarea attributes: - label: SUMMARY + label: OVERVIEW description: 'Explain the problem briefly below' required: true - type: textarea @@ -23,6 +23,7 @@ inputs: - type: dropdown attributes: + label: ISSUE TYPE choices: - Bug Report required: true From e8886fa711fddce7094cd45795021a0a5fc2c4b2 Mon Sep 17 00:00:00 2001 From: John R Barker Date: Mon, 1 Feb 2021 18:53:06 +0000 Subject: [PATCH 0012/3093] Update bug_report.yml --- .github/ISSUE_TEMPLATE/bug_report.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 44b4ca6b39..c227118d75 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -2,19 +2,20 @@ name: Bug Report about: Create a report to help us improve inputs: - - type: description + - type: description attributes: preview_only: true value: | Verify first that your issue is not already reported on [GitHub](https://github.com/ansible-collections/community.general/labels/bug) Also test if the latest release and devel branch are affected too - Complete *all* sections as described, this form is processed automatically + - type: textarea attributes: label: OVERVIEW description: 'Explain the problem briefly below' required: true + - type: textarea attributes: label: SUMMARY From 4e70c0c55aef20698fce5297f21c70f272fb60f5 Mon Sep 17 00:00:00 2001 From: John R Barker Date: Mon, 1 Feb 2021 18:55:36 +0000 Subject: [PATCH 0013/3093] Update bug_report.yml --- .github/ISSUE_TEMPLATE/bug_report.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index c227118d75..ac2caac3ae 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -5,7 +5,6 @@ inputs: - type: description attributes: - preview_only: true value: | Verify first that your issue is not already reported on [GitHub](https://github.com/ansible-collections/community.general/labels/bug) Also test if the latest release and devel branch are affected too From b40a5ef09af043677b4b6e9d91d419cf29159b48 Mon Sep 17 00:00:00 2001 From: Stefan Walluhn Date: Tue, 2 Feb 2021 17:45:14 +0100 Subject: [PATCH 0014/3093] sensu-silence: fix json parsing of sensu API response (#1703) * sensu-silence: fix json parsing of sensu API response * use ansible helper function to decode bytestream * add changelog fragment * Update changelogs, link to PR Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../fragments/1703-sensu_silence-fix_json_parsing.yml | 2 ++ plugins/modules/monitoring/sensu/sensu_silence.py | 7 ++++--- 2 files changed, 6 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/1703-sensu_silence-fix_json_parsing.yml diff --git a/changelogs/fragments/1703-sensu_silence-fix_json_parsing.yml b/changelogs/fragments/1703-sensu_silence-fix_json_parsing.yml new file mode 100644 index 0000000000..18d39b5674 --- /dev/null +++ b/changelogs/fragments/1703-sensu_silence-fix_json_parsing.yml @@ -0,0 +1,2 @@ +bugfixes: + - sensu-silence module - fix json parsing of sensu API responses on Python 3.5 (https://github.com/ansible-collections/community.general/pull/1703). diff --git a/plugins/modules/monitoring/sensu/sensu_silence.py b/plugins/modules/monitoring/sensu/sensu_silence.py index acd64f975c..12dc5d2068 100644 --- a/plugins/modules/monitoring/sensu/sensu_silence.py +++ b/plugins/modules/monitoring/sensu/sensu_silence.py @@ -97,6 +97,7 @@ RETURN = ''' import json +from ansible.module_utils._text import to_native from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url @@ -129,7 +130,7 @@ def query(module, url, check, subscription): ) try: - json_out = json.loads(response.read()) + json_out = json.loads(to_native(response.read())) except Exception: json_out = "" @@ -181,7 +182,7 @@ def clear(module, url, check, subscription): ) try: - json_out = json.loads(response.read()) + json_out = json.loads(to_native(response.read())) except Exception: json_out = "" @@ -246,7 +247,7 @@ def create( ) try: - json_out = json.loads(response.read()) + json_out = json.loads(to_native(response.read())) except Exception: json_out = "" From aa95d8a5b7e368e69a31ed3dd163c0f13dcf92ee Mon Sep 17 00:00:00 2001 From: quidame Date: Wed, 3 Feb 2021 07:54:04 +0100 Subject: [PATCH 0015/3093] module filesystem: partially fix idempotency issue #1457 (resizefs) (#1478) * Use 'xfs_info' to query fs size, that doesn't always require the device be mounted. Although still query mountpoint first for backward compatibility. * Do not fail whith fstype=xfs and resizefs=yes if filesystem already fills its underlying device. * Include xfs in the tasks that test idempotency of resizefs option * Add changelogs/fragments/1478-filesystem-fix-1457-resizefs-idempotency.yml --- ...lesystem-fix-1457-resizefs-idempotency.yml | 5 ++ plugins/modules/system/filesystem.py | 27 +++++--- .../targets/filesystem/defaults/main.yml | 1 + .../targets/filesystem/meta/main.yml | 1 + .../filesystem/tasks/create_device.yml | 1 + .../targets/filesystem/tasks/create_fs.yml | 65 ++++++++++--------- .../targets/filesystem/tasks/main.yml | 1 + .../filesystem/tasks/overwrite_another_fs.yml | 1 + .../targets/filesystem/tasks/setup.yml | 1 + 9 files changed, 64 insertions(+), 39 deletions(-) create mode 100644 changelogs/fragments/1478-filesystem-fix-1457-resizefs-idempotency.yml diff --git a/changelogs/fragments/1478-filesystem-fix-1457-resizefs-idempotency.yml b/changelogs/fragments/1478-filesystem-fix-1457-resizefs-idempotency.yml new file mode 100644 index 0000000000..a90444308e --- /dev/null +++ b/changelogs/fragments/1478-filesystem-fix-1457-resizefs-idempotency.yml @@ -0,0 +1,5 @@ +--- +bugfixes: + - filesystem - do not fail when ``resizefs=yes`` and ``fstype=xfs`` if there is nothing to do, even if + the filesystem is not mounted. This only covers systems supporting access to unmounted XFS filesystems. + Others will still fail (https://github.com/ansible-collections/community.general/issues/1457, https://github.com/ansible-collections/community.general/pull/1478). diff --git a/plugins/modules/system/filesystem.py b/plugins/modules/system/filesystem.py index fca7f2c56d..e78eec4e86 100644 --- a/plugins/modules/system/filesystem.py +++ b/plugins/modules/system/filesystem.py @@ -240,26 +240,35 @@ class XFS(Filesystem): GROW = 'xfs_growfs' def get_fs_size(self, dev): - cmd = self.module.get_bin_path('xfs_growfs', required=True) + cmd = self.module.get_bin_path('xfs_info', required=True) + mountpoint = dev.get_mountpoint() + if mountpoint: + rc, out, err = self.module.run_command([cmd, str(mountpoint)], environ_update=self.LANG_ENV) + else: + # Recent GNU/Linux distros support access to unmounted XFS filesystems + rc, out, err = self.module.run_command([cmd, str(dev)], environ_update=self.LANG_ENV) + if rc != 0: + self.module.fail_json(msg="Error while attempting to query size of XFS filesystem: %s" % err) - if not mountpoint: - # xfs filesystem needs to be mounted - self.module.fail_json(msg="%s needs to be mounted for xfs operations" % dev) - - _, size, _ = self.module.run_command([cmd, '-n', str(mountpoint)], check_rc=True, environ_update=self.LANG_ENV) - for line in size.splitlines(): + for line in out.splitlines(): col = line.split('=') if col[0].strip() == 'data': if col[1].strip() != 'bsize': - self.module.fail_json(msg='Unexpected output format from xfs_growfs (could not locate "bsize")') + self.module.fail_json(msg='Unexpected output format from xfs_info (could not locate "bsize")') if col[2].split()[1] != 'blocks': - self.module.fail_json(msg='Unexpected output format from xfs_growfs (could not locate "blocks")') + self.module.fail_json(msg='Unexpected output format from xfs_info (could not locate "blocks")') block_size = int(col[2].split()[0]) block_count = int(col[3].split(',')[0]) return block_size * block_count def grow_cmd(self, dev): + # Check first if growing is needed, and then if it is doable or not. + devsize_in_bytes = dev.size() + fssize_in_bytes = self.get_fs_size(dev) + if not fssize_in_bytes < devsize_in_bytes: + self.module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (self.fstype, dev)) + mountpoint = dev.get_mountpoint() if not mountpoint: # xfs filesystem needs to be mounted diff --git a/tests/integration/targets/filesystem/defaults/main.yml b/tests/integration/targets/filesystem/defaults/main.yml index 721c056c1d..764b98b6ba 100644 --- a/tests/integration/targets/filesystem/defaults/main.yml +++ b/tests/integration/targets/filesystem/defaults/main.yml @@ -1,3 +1,4 @@ +--- tested_filesystems: # key: fstype # fssize: size (Mo) diff --git a/tests/integration/targets/filesystem/meta/main.yml b/tests/integration/targets/filesystem/meta/main.yml index 56bc554611..7853656a5b 100644 --- a/tests/integration/targets/filesystem/meta/main.yml +++ b/tests/integration/targets/filesystem/meta/main.yml @@ -1,3 +1,4 @@ +--- dependencies: - setup_pkg_mgr - setup_remote_tmp_dir diff --git a/tests/integration/targets/filesystem/tasks/create_device.yml b/tests/integration/targets/filesystem/tasks/create_device.yml index 052934cc54..e49861e7ca 100644 --- a/tests/integration/targets/filesystem/tasks/create_device.yml +++ b/tests/integration/targets/filesystem/tasks/create_device.yml @@ -1,3 +1,4 @@ +--- - name: 'Create a "disk" file' command: 'dd if=/dev/zero of={{ image_file }} bs=1M count={{ fssize }}' diff --git a/tests/integration/targets/filesystem/tasks/create_fs.yml b/tests/integration/targets/filesystem/tasks/create_fs.yml index b42f886ef0..688a4462db 100644 --- a/tests/integration/targets/filesystem/tasks/create_fs.yml +++ b/tests/integration/targets/filesystem/tasks/create_fs.yml @@ -43,40 +43,45 @@ - 'fs3_result is success' - 'uuid.stdout != uuid3.stdout' -- name: increase fake device - shell: 'dd if=/dev/zero bs=1M count=1 >> {{ image_file }}' - -- when: fstype == 'lvm' - block: - - name: Resize loop device for LVM - command: losetup -c {{ dev }} - when: 'grow|bool and (fstype != "vfat" or resize_vfat)' block: - - name: Expand filesystem - filesystem: - dev: '{{ dev }}' - fstype: '{{ fstype }}' - resizefs: yes - register: fs4_result + - name: increase fake device + shell: 'dd if=/dev/zero bs=1M count=1 >> {{ image_file }}' - - command: 'blkid -c /dev/null -o value -s UUID {{ dev }}' - register: uuid4 + - name: Resize loop device for LVM + command: losetup -c {{ dev }} + when: fstype == 'lvm' - - assert: - that: - - 'fs4_result is changed' - - 'fs4_result is success' - - 'uuid3.stdout == uuid4.stdout' # unchanged + - name: Expand filesystem + filesystem: + dev: '{{ dev }}' + fstype: '{{ fstype }}' + resizefs: yes + register: fs4_result - - name: Try to expand filesystem again - filesystem: - dev: '{{ dev }}' - fstype: '{{ fstype }}' - resizefs: yes - register: fs5_result + - command: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + register: uuid4 - - assert: - that: - - 'not (fs5_result is changed)' - - 'fs5_result is successful' + - assert: + that: + - 'fs4_result is changed' + - 'fs4_result is success' + - 'uuid3.stdout == uuid4.stdout' # unchanged + +- when: + - (grow | bool and (fstype != "vfat" or resize_vfat)) or + (fstype == "xfs" and ansible_system == "Linux" and + ansible_distribution not in ["CentOS", "Ubuntu"]) + block: + - name: Check that resizefs does nothing if device size is not changed + filesystem: + dev: '{{ dev }}' + fstype: '{{ fstype }}' + resizefs: yes + register: fs5_result + + - assert: + that: + - 'fs5_result is not changed' + - 'fs5_result is succeeded' diff --git a/tests/integration/targets/filesystem/tasks/main.yml b/tests/integration/targets/filesystem/tasks/main.yml index 81e5a6b380..44e8c49f61 100644 --- a/tests/integration/targets/filesystem/tasks/main.yml +++ b/tests/integration/targets/filesystem/tasks/main.yml @@ -1,3 +1,4 @@ +--- #################################################################### # WARNING: These are designed specifically for Ansible tests # # and should not be used as examples of how to write Ansible roles # diff --git a/tests/integration/targets/filesystem/tasks/overwrite_another_fs.yml b/tests/integration/targets/filesystem/tasks/overwrite_another_fs.yml index bdd238fba7..671d9b0bea 100644 --- a/tests/integration/targets/filesystem/tasks/overwrite_another_fs.yml +++ b/tests/integration/targets/filesystem/tasks/overwrite_another_fs.yml @@ -1,3 +1,4 @@ +--- - name: 'Recreate "disk" file' command: 'dd if=/dev/zero of={{ image_file }} bs=1M count={{ fssize }}' diff --git a/tests/integration/targets/filesystem/tasks/setup.yml b/tests/integration/targets/filesystem/tasks/setup.yml index 6069cbedd8..82fe7c54e6 100644 --- a/tests/integration/targets/filesystem/tasks/setup.yml +++ b/tests/integration/targets/filesystem/tasks/setup.yml @@ -1,3 +1,4 @@ +--- - name: install filesystem tools package: name: '{{ item }}' From 2297f2f802f286abdc76bbae0c9d3aae44255554 Mon Sep 17 00:00:00 2001 From: John R Barker Date: Wed, 3 Feb 2021 12:06:03 +0000 Subject: [PATCH 0016/3093] AZP nightly at 0800 --- .azure-pipelines/azure-pipelines.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index 055b57ca7e..cd12a86079 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -13,7 +13,7 @@ pr: - stable-* schedules: - - cron: 0 9 * * * + - cron: 0 8 * * * displayName: Nightly always: true branches: From ae8edc02e1943ef9232312471de389e72bef869c Mon Sep 17 00:00:00 2001 From: David Moreau Simard Date: Thu, 4 Feb 2021 15:02:21 -0500 Subject: [PATCH 0017/3093] Add no_log to some module arguments (#1725) * Add no_log to some module arguments This will prevent potentially sensitive information from being printed to the console. See: CVE-2021-20191 * Update changelogs/fragments/CVE-2021-20191_no_log.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- changelogs/fragments/CVE-2021-20191_no_log.yml | 4 ++++ plugins/module_utils/_netapp.py | 4 ++-- plugins/module_utils/identity/keycloak/keycloak.py | 2 +- .../web_infrastructure/sophos_utm/utm_proxy_auth_profile.py | 2 +- 4 files changed, 8 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/CVE-2021-20191_no_log.yml diff --git a/changelogs/fragments/CVE-2021-20191_no_log.yml b/changelogs/fragments/CVE-2021-20191_no_log.yml new file mode 100644 index 0000000000..a2c8740598 --- /dev/null +++ b/changelogs/fragments/CVE-2021-20191_no_log.yml @@ -0,0 +1,4 @@ +security_fixes: + - module_utils/_netapp, na_ontap_gather_facts - enabled ``no_log`` for the options ``api_key`` and ``secret_key`` to prevent accidental disclosure (CVE-2021-20191, https://github.com/ansible-collections/community.general/pull/1725). + - module_utils/identity/keycloak, keycloak_client, keycloak_clienttemplate, keycloak_group - enabled ``no_log`` for the option ``auth_client_secret`` to prevent accidental disclosure (CVE-2021-20191, https://github.com/ansible-collections/community.general/pull/1725). + - utm_proxy_auth_profile - enabled ``no_log`` for the option ``frontend_cookie_secret`` to prevent accidental disclosure (CVE-2021-20191, https://github.com/ansible-collections/community.general/pull/1725). diff --git a/plugins/module_utils/_netapp.py b/plugins/module_utils/_netapp.py index d6d0903f16..d80506bb9a 100644 --- a/plugins/module_utils/_netapp.py +++ b/plugins/module_utils/_netapp.py @@ -142,8 +142,8 @@ def aws_cvs_host_argument_spec(): return dict( api_url=dict(required=True, type='str'), validate_certs=dict(required=False, type='bool', default=True), - api_key=dict(required=True, type='str'), - secret_key=dict(required=True, type='str') + api_key=dict(required=True, type='str', no_log=True), + secret_key=dict(required=True, type='str', no_log=True) ) diff --git a/plugins/module_utils/identity/keycloak/keycloak.py b/plugins/module_utils/identity/keycloak/keycloak.py index 1859d37d0e..5c57e755da 100644 --- a/plugins/module_utils/identity/keycloak/keycloak.py +++ b/plugins/module_utils/identity/keycloak/keycloak.py @@ -58,7 +58,7 @@ def keycloak_argument_spec(): auth_keycloak_url=dict(type='str', aliases=['url'], required=True), auth_client_id=dict(type='str', default='admin-cli'), auth_realm=dict(type='str', required=True), - auth_client_secret=dict(type='str', default=None), + auth_client_secret=dict(type='str', default=None, no_log=True), auth_username=dict(type='str', aliases=['username'], required=True), auth_password=dict(type='str', aliases=['password'], required=True, no_log=True), validate_certs=dict(type='bool', default=True) diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py index 489a6c5602..ebd76e3d9c 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py @@ -336,7 +336,7 @@ def main(): backend_user_suffix=dict(type='str', required=False, default=""), comment=dict(type='str', required=False, default=""), frontend_cookie=dict(type='str', required=False), - frontend_cookie_secret=dict(type='str', required=False), + frontend_cookie_secret=dict(type='str', required=False, no_log=True), frontend_form=dict(type='str', required=False), frontend_form_template=dict(type='str', required=False, default=""), frontend_login=dict(type='str', required=False), From 43da5b88db3299bcf19c3f5b36d387cd7a3f8fb3 Mon Sep 17 00:00:00 2001 From: Anatoly Pugachev Date: Fri, 5 Feb 2021 09:21:57 +0300 Subject: [PATCH 0018/3093] parted: fix regex for version match and partition size output (#1695) * Fix 2 regex in parted related to parted version string and to parsing partition size output. * Added changelog fragment. * Updated changelog as per recommendation. * Fix the regex matching the parted version. The space character at the end of the string may or may not be always present * provided sample version output and corrected regex to match * add/correct changelog fragment * split parted_version function to allow creating a test unit * test unit for parted version info * ansible-test sanity fixes * review fix * Update changelogs/fragments/1695-parted-updatedregex.yaml Co-authored-by: Felix Fontein * comment fixes * better function name * Update plugins/modules/system/parted.py Co-authored-by: Felix Fontein * comment fixes Co-authored-by: Claude Robitaille Co-authored-by: Felix Fontein --- .../fragments/1695-parted-updatedregex.yaml | 4 ++ plugins/modules/system/parted.py | 44 +++++++++++++------ .../plugins/modules/system/test_parted.py | 32 ++++++++++++++ 3 files changed, 66 insertions(+), 14 deletions(-) create mode 100644 changelogs/fragments/1695-parted-updatedregex.yaml diff --git a/changelogs/fragments/1695-parted-updatedregex.yaml b/changelogs/fragments/1695-parted-updatedregex.yaml new file mode 100644 index 0000000000..fb3a5a5eaa --- /dev/null +++ b/changelogs/fragments/1695-parted-updatedregex.yaml @@ -0,0 +1,4 @@ +bugfixes: + - parted - change the regex that decodes the partition size to better support different formats that parted uses. + Change the regex that validates parted's version string + (https://github.com/ansible-collections/community.general/pull/1695). diff --git a/plugins/modules/system/parted.py b/plugins/modules/system/parted.py index 12c4e67d5c..daf68c298a 100644 --- a/plugins/modules/system/parted.py +++ b/plugins/modules/system/parted.py @@ -241,7 +241,7 @@ def parse_unit(size_str, unit=''): """ Parses a string containing a size or boundary information """ - matches = re.search(r'^(-?[\d.]+)([\w%]+)?$', size_str) + matches = re.search(r'^(-?[\d.]+) *([\w%]+)?$', size_str) if matches is None: # ",," format matches = re.search(r'^(\d+),(\d+),(\d+)$', size_str) @@ -500,6 +500,33 @@ def check_parted_label(device): return False +def parse_parted_version(out): + """ + Returns version tuple from the output of "parted --version" command + """ + lines = [x for x in out.split('\n') if x.strip() != ''] + if len(lines) == 0: + return None, None, None + + # Sample parted versions (see as well test unit): + # parted (GNU parted) 3.3 + # parted (GNU parted) 3.4.5 + # parted (GNU parted) 3.3.14-dfc61 + matches = re.search(r'^parted.+\s(\d+)\.(\d+)(?:\.(\d+))?', lines[0].strip()) + + if matches is None: + return None, None, None + + # Convert version to numbers + major = int(matches.group(1)) + minor = int(matches.group(2)) + rev = 0 + if matches.group(3) is not None: + rev = int(matches.group(3)) + + return major, minor, rev + + def parted_version(): """ Returns the major and minor version of parted installed on the system. @@ -512,21 +539,10 @@ def parted_version(): msg="Failed to get parted version.", rc=rc, out=out, err=err ) - lines = [x for x in out.split('\n') if x.strip() != ''] - if len(lines) == 0: + (major, minor, rev) = parse_parted_version(out) + if major is None: module.fail_json(msg="Failed to get parted version.", rc=0, out=out) - matches = re.search(r'^parted.+(\d+)\.(\d+)(?:\.(\d+))?$', lines[0]) - if matches is None: - module.fail_json(msg="Failed to get parted version.", rc=0, out=out) - - # Convert version to numbers - major = int(matches.group(1)) - minor = int(matches.group(2)) - rev = 0 - if matches.group(3) is not None: - rev = int(matches.group(3)) - return major, minor, rev diff --git a/tests/unit/plugins/modules/system/test_parted.py b/tests/unit/plugins/modules/system/test_parted.py index 5ed7514646..18faf6a6ab 100644 --- a/tests/unit/plugins/modules/system/test_parted.py +++ b/tests/unit/plugins/modules/system/test_parted.py @@ -6,6 +6,7 @@ __metaclass__ = type from ansible_collections.community.general.tests.unit.compat.mock import patch, call from ansible_collections.community.general.plugins.modules.system import parted as parted_module +from ansible_collections.community.general.plugins.modules.system.parted import parse_parted_version from ansible_collections.community.general.plugins.modules.system.parted import parse_partition_info from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args @@ -17,6 +18,32 @@ BYT; 2:106MB:368MB:262MB:ext2::; 3:368MB:256061MB:255692MB:::;""" +parted_version_info = {""" + parted (GNU parted) 3.3 + Copyright (C) 2019 Free Software Foundation, Inc. + License GPLv3+: GNU GPL version 3 or later . + This is free software: you are free to change and redistribute it. + There is NO WARRANTY, to the extent permitted by law. + + Written by . + """: (3, 3, 0), """ + parted (GNU parted) 3.4.5 + Copyright (C) 2019 Free Software Foundation, Inc. + License GPLv3+: GNU GPL version 3 or later . + This is free software: you are free to change and redistribute it. + There is NO WARRANTY, to the extent permitted by law. + + Written by . + """: (3, 4, 5), """ + parted (GNU parted) 3.3.14-dfc61 + Copyright (C) 2019 Free Software Foundation, Inc. + License GPLv3+: GNU GPL version 3 or later . + This is free software: you are free to change and redistribute it. + There is NO WARRANTY, to the extent permitted by law. + + Written by . + """: (3, 3, 14)} + # corresponding dictionary after parsing by parse_partition_info parted_dict1 = { "generic": { @@ -311,3 +338,8 @@ class TestParted(ModuleTestCase): }) with patch('ansible_collections.community.general.plugins.modules.system.parted.get_device_info', return_value=parted_dict3): self.execute_module(changed=True) + + def test_version_info(self): + """Test that the parse_parted_version returns the expected tuple""" + for key, value in parted_version_info.items(): + self.assertEqual(parse_parted_version(key), value) From f509f2c896a03546f2f5c1b72e5d44f1cbbc0a89 Mon Sep 17 00:00:00 2001 From: Rick Sherman Date: Fri, 5 Feb 2021 00:22:49 -0600 Subject: [PATCH 0019/3093] datadog_monitor: Add missing monitor types query alert, trace-analytics alert, rum alert (#1723) * Add missing Datadog monitor types This commit adds the following monitor types: query alert, trace-analytics alert, rum alert * changelog PR1723 datadog_monitor types * datadog_monitor 1723 Apply suggestions from code review Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../1723-datadog_monitor-add-missing-monitor-types.yml | 2 ++ plugins/modules/monitoring/datadog/datadog_monitor.py | 9 ++++++--- 2 files changed, 8 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/1723-datadog_monitor-add-missing-monitor-types.yml diff --git a/changelogs/fragments/1723-datadog_monitor-add-missing-monitor-types.yml b/changelogs/fragments/1723-datadog_monitor-add-missing-monitor-types.yml new file mode 100644 index 0000000000..8b01717897 --- /dev/null +++ b/changelogs/fragments/1723-datadog_monitor-add-missing-monitor-types.yml @@ -0,0 +1,2 @@ +minor_changes: + - datadog_monitor - add missing monitor types ``query alert``, ``trace-analytics alert``, ``rum alert`` (https://github.com/ansible-collections/community.general/pull/1723). diff --git a/plugins/modules/monitoring/datadog/datadog_monitor.py b/plugins/modules/monitoring/datadog/datadog_monitor.py index f6020c2bed..f63c66a57d 100644 --- a/plugins/modules/monitoring/datadog/datadog_monitor.py +++ b/plugins/modules/monitoring/datadog/datadog_monitor.py @@ -49,7 +49,8 @@ options: type: description: - The type of the monitor. - choices: ['metric alert', 'service check', 'event alert', 'process alert', 'log alert'] + - The types C(query alert), C(trace-analytics alert) and C(rum alert) were added in community.general 2.1.0. + choices: ['metric alert', 'service check', 'event alert', 'process alert', 'log alert', 'query alert', 'trace-analytics alert', 'rum alert'] type: str query: description: @@ -208,7 +209,9 @@ def main(): api_host=dict(required=False), app_key=dict(required=True, no_log=True), state=dict(required=True, choices=['present', 'absent', 'mute', 'unmute']), - type=dict(required=False, choices=['metric alert', 'service check', 'event alert', 'process alert', 'log alert']), + type=dict(required=False, choices=['metric alert', 'service check', 'event alert', + 'process alert', 'log alert', 'query alert', + 'trace-analytics alert', 'rum alert']), name=dict(required=True), query=dict(required=False), notification_message=dict(required=False, no_log=True, default=None, aliases=['message'], @@ -348,7 +351,7 @@ def install_monitor(module): if module.params['type'] == "service check": options["thresholds"] = module.params['thresholds'] or {'ok': 1, 'critical': 1, 'warning': 1} - if module.params['type'] in ["metric alert", "log alert"] and module.params['thresholds'] is not None: + if module.params['type'] in ["metric alert", "log alert", "query alert", "trace-analytics alert", "rum alert"] and module.params['thresholds'] is not None: options["thresholds"] = module.params['thresholds'] monitor = _get_monitor(module) From dd0b54b9b5161bb464f5c2c29ad396bd6310e980 Mon Sep 17 00:00:00 2001 From: morco Date: Fri, 5 Feb 2021 07:30:05 +0100 Subject: [PATCH 0020/3093] Feature/gitlab deploykey updkey (#1661) * feat(gitlab-deploy-key): automatically update ... ... the public key * add integrity test * fix sanity issues * added changelog fragment Co-authored-by: Mirko Wilhelmi --- .../1661-gitlab-deploy-key-update-pubkey.yml | 5 +++ .../gitlab/gitlab_deploy_key.py | 7 ++++ .../gitlab_deploy_key/defaults/main.yml | 1 + .../targets/gitlab_deploy_key/tasks/main.yml | 32 +++++++++++++++++++ 4 files changed, 45 insertions(+) create mode 100644 changelogs/fragments/1661-gitlab-deploy-key-update-pubkey.yml diff --git a/changelogs/fragments/1661-gitlab-deploy-key-update-pubkey.yml b/changelogs/fragments/1661-gitlab-deploy-key-update-pubkey.yml new file mode 100644 index 0000000000..f6edfc6f53 --- /dev/null +++ b/changelogs/fragments/1661-gitlab-deploy-key-update-pubkey.yml @@ -0,0 +1,5 @@ +--- +minor_changes: + - gitlab_deploy_key - when the given key title already exists but has a different public key, the public key will now be updated to given value (https://github.com/ansible-collections/community.general/pull/1661). +breaking_changes: + - gitlab_deploy_key - if for an already existing key title a different public key was given as parameter nothing happened, now this changed so that the public key is updated to the new value (https://github.com/ansible-collections/community.general/pull/1661). diff --git a/plugins/modules/source_control/gitlab/gitlab_deploy_key.py b/plugins/modules/source_control/gitlab/gitlab_deploy_key.py index c66a6f9da8..20caf4292b 100644 --- a/plugins/modules/source_control/gitlab/gitlab_deploy_key.py +++ b/plugins/modules/source_control/gitlab/gitlab_deploy_key.py @@ -145,6 +145,13 @@ class GitLabDeployKey(object): def createOrUpdateDeployKey(self, project, key_title, key_key, options): changed = False + # note: unfortunately public key cannot be updated directly by + # GitLab REST API, so for that case we need to delete and + # than recreate the key + if self.deployKeyObject and self.deployKeyObject.key != key_key: + self.deployKeyObject.delete() + self.deployKeyObject = None + # Because we have already call existsDeployKey in main() if self.deployKeyObject is None: deployKey = self.createDeployKey(project, { diff --git a/tests/integration/targets/gitlab_deploy_key/defaults/main.yml b/tests/integration/targets/gitlab_deploy_key/defaults/main.yml index 4e47591941..04d5b6ca83 100644 --- a/tests/integration/targets/gitlab_deploy_key/defaults/main.yml +++ b/tests/integration/targets/gitlab_deploy_key/defaults/main.yml @@ -1,2 +1,3 @@ gitlab_project_name: ansible_test_project gitlab_deploy_key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJnTYY7CYk1F/wBklpdRxudxN6KeXgfhutkiCigSfPhe ansible_test" +gitlab_deploy_key_new: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDL1TDkIY2uu6NYRD0G5qGeHTd/AoqQpCw1XENXDnTLDN5DNZVCO1+7xfA5DR5V2tcR691Q005BKxoCo+uUBd1aAM7JWyuXl050rZCXBj4oaUF7urjDANQ7FzYuvqp9h8NGkvzfBYz5YBfu4vh43ajnF0daSyZy4RlxeG9G44vnHElXTQ0igaOCSta/23FdERIYzKxuX4Ul42AwtSmCRwbkN4fC86o0UwW2q0zkgFOUoojtS/Avh0aX8UQyeagaPJFXCc/ldG1mMK020GQAEa8aQcUpysnEzZdq6no5Zyn/WQSobpnJ9CraHhdb1QQytg/+c+CgjSN0cERhTvLn0WsQ043jo5g1kSHNu+OiYXmVwTxe95nXCsoYmCNF/DmezjYVxe9BGlKRAEuHsNi87Il84nBnzKVHGlkq8eJNTR8ASjNkjI7pGS0zxCDB55c3LHh4Aa1xU+nwINRurn/TEDpDZc43/XOnt+aqbxkeWbMtOD/r2gfMj8lNZJ/IyamWy7HcFgGpTZJln4WxVLF+Cz56qa8Hf9WzJL+8Lq7eE3sJKOagn/zPgqeybXbTIPSr3fshq3yE8FYHpFKS4aLvQC/XSLCywrhr25DKBn9UHIZmgC9hxMnVJCKux+ltwGJOKIaoj+5n3+DvM+E3fK3fkADo5+Frzay6/rLTwKWUrzfjQQ== ansible_test_new" diff --git a/tests/integration/targets/gitlab_deploy_key/tasks/main.yml b/tests/integration/targets/gitlab_deploy_key/tasks/main.yml index ba82e378a5..430d46f4ab 100644 --- a/tests/integration/targets/gitlab_deploy_key/tasks/main.yml +++ b/tests/integration/targets/gitlab_deploy_key/tasks/main.yml @@ -39,3 +39,35 @@ - assert: that: - deploy_key_status is changed + - deploy_key_status.deploy_key.key == gitlab_deploy_key + + +- name: Update public key {{ gitlab_project_name }} (change expected) + gitlab_deploy_key: + login_token: "{{ gitlab_login_token }}" + project: "root/{{ gitlab_project_name }}" + server_url: "{{ gitlab_host }}" + title: "{{ gitlab_project_name }}" + key: "{{ gitlab_deploy_key_new }}" + state: present + register: deploy_key_status + +- assert: + that: + - deploy_key_status is changed + - deploy_key_status.deploy_key.key == gitlab_deploy_key_new + +- name: Update public key {{ gitlab_project_name }} (no change expected) + gitlab_deploy_key: + login_token: "{{ gitlab_login_token }}" + project: "root/{{ gitlab_project_name }}" + server_url: "{{ gitlab_host }}" + title: "{{ gitlab_project_name }}" + key: "{{ gitlab_deploy_key_new }}" + state: present + register: deploy_key_status + +- assert: + that: + - not deploy_key_status.changed + - deploy_key_status.deploy_key.key == gitlab_deploy_key_new From 701a89eb1ce88e45b30b79cffc909e5ad0c8af37 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Fri, 5 Feb 2021 08:08:06 +0100 Subject: [PATCH 0021/3093] Make sure mercurial is also installed on OpenSuSE. (#1734) --- tests/integration/targets/hg/tasks/install.yml | 5 +++++ tests/integration/targets/hg/tasks/uninstall.yml | 6 ++++++ 2 files changed, 11 insertions(+) diff --git a/tests/integration/targets/hg/tasks/install.yml b/tests/integration/targets/hg/tasks/install.yml index c4a1aba90e..40aba5e2fc 100644 --- a/tests/integration/targets/hg/tasks/install.yml +++ b/tests/integration/targets/hg/tasks/install.yml @@ -36,6 +36,11 @@ name: mercurial when: ansible_facts.pkg_mgr in ['pkgng', 'community.general.pkgng'] +- name: install mercurial (zypper) + package: + name: mercurial + when: ansible_facts.pkg_mgr in ['zypper', 'community.general.zypper'] + - name: preserve the updated python command: cp -av "{{ which_python.stdout }}" "{{ which_python.stdout }}.updated" diff --git a/tests/integration/targets/hg/tasks/uninstall.yml b/tests/integration/targets/hg/tasks/uninstall.yml index 55c5efcb8c..305a2ffd33 100644 --- a/tests/integration/targets/hg/tasks/uninstall.yml +++ b/tests/integration/targets/hg/tasks/uninstall.yml @@ -35,6 +35,12 @@ autoremove: yes when: ansible_facts.pkg_mgr in ['pkgng', 'community.general.pkgng'] +- name: uninstall packages which were not originally installed (zypper) + package: + name: mercurial + state: absent + when: ansible_facts.pkg_mgr in ['zypper', 'community.general.zypper'] + - name: restore the default python raw: mv "{{ which_python.stdout }}.default" "{{ which_python.stdout }}" From d4f3a47d48aa763913e172ac76d6e01a24736030 Mon Sep 17 00:00:00 2001 From: John R Barker Date: Fri, 5 Feb 2021 14:07:30 +0000 Subject: [PATCH 0022/3093] Update bug_report.yml --- .github/ISSUE_TEMPLATE/bug_report.yml | 28 +++++++++++---------------- 1 file changed, 11 insertions(+), 17 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index ac2caac3ae..bd9267017f 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -6,24 +6,18 @@ inputs: - type: description attributes: value: | - Verify first that your issue is not already reported on [GitHub](https://github.com/ansible-collections/community.general/labels/bug) - Also test if the latest release and devel branch are affected too + Verify first that your issue is not already reported on [GitHub](https://github.com/ansible-collections/community.general/issues) + Also test if the latest release and main branch are affected too - type: textarea attributes: - label: OVERVIEW - description: 'Explain the problem briefly below' - required: true - - - type: textarea - attributes: - label: SUMMARY + label: Summary description: 'Explain the problem briefly below' required: true - type: dropdown attributes: - label: ISSUE TYPE + label: Issue Type choices: - Bug Report required: true @@ -35,12 +29,12 @@ inputs: # Select from list, filter as you type (`mysql` would only show the 3 mysql components) # OR freeform - doesn't seem to be supported in adaptivecards - label: COMPONENT NAME + label: Component Name description: 'List the component, ie `template`, `mysql_users`' required: true - type: textarea attributes: - label: ANSIBLE VERSION + label: Ansible Version required: false description: | If this issue has an example piece of YAML that can help to reproduce this problem, please provide it. @@ -52,7 +46,7 @@ inputs: ``` - type: textarea attributes: - label: CONFIGURATION + label: Configuration required: false description: | If this issue has an example piece of YAML that can help to reproduce this problem, please provide it. @@ -64,13 +58,13 @@ inputs: ``` - type: textarea attributes: - label: OS / ENVIRONMENT + label: OS / Environment description: 'Provide all relevant information below, e.g. target OS versions, network device firmware, etc' required: false - type: textarea attributes: - label: STEPS TO REPRO + label: Steps To Reproduce description: 'Describe exactly how to reproduce the problem, using a minimal test-case' required: false value: | @@ -80,13 +74,13 @@ inputs: ``` - type: textarea attributes: - label: EXPECTED RESULTS + label: Expected Results description: 'Describe what you expected to happen when running the steps above' required: false - type: textarea attributes: - label: ACTUAL RESULTS + label: Actual Results description: 'Describe what actually happened. If possible run with extra verbosity (-vvvv)' required: false value: | From f4e60e09ac08c75e83d6e20484668c5e4fb6e194 Mon Sep 17 00:00:00 2001 From: John R Barker Date: Fri, 5 Feb 2021 14:36:35 +0000 Subject: [PATCH 0023/3093] bug_report.yml: inline comments to docs --- .github/ISSUE_TEMPLATE/bug_report.yml | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index bd9267017f..0e17d5b1d9 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -17,6 +17,7 @@ inputs: - type: dropdown attributes: + # FIXME: Once GitHub allows defining the default choice, update this label: Issue Type choices: - Bug Report @@ -32,15 +33,14 @@ inputs: label: Component Name description: 'List the component, ie `template`, `mysql_users`' required: true + - type: textarea attributes: label: Ansible Version required: false description: | - If this issue has an example piece of YAML that can help to reproduce this problem, please provide it. - This can be a piece of YAML from, e.g., an automation, script, scene or configuration. + Paste verbatim output from `ansible --version` between quotes value: | - ```paste below ``` @@ -51,8 +51,8 @@ inputs: description: | If this issue has an example piece of YAML that can help to reproduce this problem, please provide it. This can be a piece of YAML from, e.g., an automation, script, scene or configuration. + Paste verbatim output from `ansible-config dump --only-changed` between quotes value: | - ```paste below ``` @@ -68,23 +68,22 @@ inputs: description: 'Describe exactly how to reproduce the problem, using a minimal test-case' required: false value: | - ```paste below ``` - type: textarea attributes: label: Expected Results - description: 'Describe what you expected to happen when running the steps above' + description: | + Describe what you expected to happen when running the steps above required: false - type: textarea attributes: label: Actual Results - description: 'Describe what actually happened. If possible run with extra verbosity (-vvvv)' + description: 'Describe what actually happened. If possible run with extra verbosity (`ansible-playbook -vvvv`)' required: false value: | - ```paste below ``` From 29bd5a94862f2e12f1fce2c4a9e801c6f5b38405 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 8 Feb 2021 16:33:18 +0100 Subject: [PATCH 0024/3093] Fix a bunch of potential security issues (secret leaking) (#1736) * Fix a bunch of potential security issues (secret leaking). * oneandone_server was already ok. * Add more parameters for pagerduty_alert. * Add more no_log=True. --- changelogs/fragments/no_log-fixes.yml | 25 +++++++++++++++++++ .../modules/cloud/huawei/hwc_ecs_instance.py | 2 +- plugins/modules/cloud/misc/ovirt.py | 4 +-- .../oneandone/oneandone_firewall_policy.py | 2 +- .../oneandone/oneandone_load_balancer.py | 2 +- .../oneandone/oneandone_monitoring_policy.py | 2 +- .../oneandone/oneandone_private_network.py | 2 +- .../cloud/oneandone/oneandone_public_ip.py | 2 +- .../modules/cloud/rackspace/rax_clb_ssl.py | 2 +- .../spotinst/spotinst_aws_elastigroup.py | 4 +-- .../identity/keycloak/keycloak_client.py | 2 +- .../modules/monitoring/librato_annotation.py | 2 +- plugins/modules/monitoring/pagerduty_alert.py | 6 ++--- .../modules/monitoring/pagerduty_change.py | 2 +- plugins/modules/monitoring/pingdom.py | 2 +- .../modules/monitoring/rollbar_deployment.py | 2 +- plugins/modules/monitoring/stackdriver.py | 2 +- plugins/modules/net_tools/dnsmadeeasy.py | 2 +- .../modules/net_tools/nios/nios_nsgroup.py | 2 +- .../modules/notification/logentries_msg.py | 2 +- plugins/modules/packaging/os/pulp_repo.py | 2 +- .../redfish/redfish_command.py | 2 +- .../source_control/gitlab/gitlab_runner.py | 2 +- plugins/modules/storage/ibm/ibm_sa_host.py | 2 +- .../sophos_utm/utm_proxy_auth_profile.py | 3 --- 25 files changed, 52 insertions(+), 30 deletions(-) create mode 100644 changelogs/fragments/no_log-fixes.yml diff --git a/changelogs/fragments/no_log-fixes.yml b/changelogs/fragments/no_log-fixes.yml new file mode 100644 index 0000000000..70afd3229d --- /dev/null +++ b/changelogs/fragments/no_log-fixes.yml @@ -0,0 +1,25 @@ +security_fixes: + - "ovirt - mark the ``instance_rootpw`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." + - "oneandone_firewall_policy, oneandone_load_balancer, oneandone_monitoring_policy, oneandone_private_network, oneandone_public_ip - mark the ``auth_token`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." + - "rax_clb_ssl - mark the ``private_key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." + - "spotinst_aws_elastigroup - mark the ``multai_token`` and ``token`` parameters as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." + - "keycloak_client - mark the ``registration_access_token`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." + - "librato_annotation - mark the ``api_key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." + - "pagerduty_alert - mark the ``api_key``, ``service_key`` and ``integration_key`` parameters as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." + - "nios_nsgroup - mark the ``tsig_key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." + - "pulp_repo - mark the ``feed_client_key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." + - "gitlab_runner - mark the ``registration_token`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." + - "ibm_sa_host - mark the ``iscsi_chap_secret`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." + - "keycloak_* modules - mark the ``auth_client_secret`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." + - "hwc_ecs_instance - mark the ``admin_pass`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." + - "ovirt - mark the ``instance_key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." + - "pagerduty_change - mark the ``integration_key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." + - "pingdom - mark the ``key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." + - "rollbar_deployment - mark the ``token`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." + - "stackdriver - mark the ``key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." + - "dnsmadeeasy - mark the ``account_key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." + - "logentries_msg - mark the ``token`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." + - "redfish_command - mark the ``update_creds.password`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." + - "utm_proxy_auth_profile - mark the ``frontend_cookie_secret`` parameter as ``no_log`` to avoid leakage of secrets. This causes the ``utm_proxy_auth_profile`` return value to no longer containing the correct value, but a placeholder (https://github.com/ansible-collections/community.general/pull/1736)." +breaking_changes: + - "utm_proxy_auth_profile - the ``frontend_cookie_secret`` return value now contains a placeholder string instead of the module's ``frontend_cookie_secret`` parameter (https://github.com/ansible-collections/community.general/pull/1736)." diff --git a/plugins/modules/cloud/huawei/hwc_ecs_instance.py b/plugins/modules/cloud/huawei/hwc_ecs_instance.py index cbd5f79454..3d4ba84b64 100644 --- a/plugins/modules/cloud/huawei/hwc_ecs_instance.py +++ b/plugins/modules/cloud/huawei/hwc_ecs_instance.py @@ -543,7 +543,7 @@ def build_module(): snapshot_id=dict(type='str') )), vpc_id=dict(type='str', required=True), - admin_pass=dict(type='str'), + admin_pass=dict(type='str', no_log=True), data_volumes=dict(type='list', elements='dict', options=dict( volume_id=dict(type='str', required=True), device=dict(type='str') diff --git a/plugins/modules/cloud/misc/ovirt.py b/plugins/modules/cloud/misc/ovirt.py index e037261b16..25e3081c8f 100644 --- a/plugins/modules/cloud/misc/ovirt.py +++ b/plugins/modules/cloud/misc/ovirt.py @@ -405,8 +405,8 @@ def main(): instance_gateway=dict(type='str', aliases=['gateway']), instance_domain=dict(type='str', aliases=['domain']), instance_dns=dict(type='str', aliases=['dns']), - instance_rootpw=dict(type='str', aliases=['rootpw']), - instance_key=dict(type='str', aliases=['key']), + instance_rootpw=dict(type='str', aliases=['rootpw'], no_log=True), + instance_key=dict(type='str', aliases=['key'], no_log=True), sdomain=dict(type='str'), region=dict(type='str'), ), diff --git a/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py b/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py index 3f545e6eb7..90694861a7 100644 --- a/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py +++ b/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py @@ -500,7 +500,7 @@ def main(): module = AnsibleModule( argument_spec=dict( auth_token=dict( - type='str', + type='str', no_log=True, default=os.environ.get('ONEANDONE_AUTH_TOKEN')), api_url=dict( type='str', diff --git a/plugins/modules/cloud/oneandone/oneandone_load_balancer.py b/plugins/modules/cloud/oneandone/oneandone_load_balancer.py index 104302b9fa..62551560c2 100644 --- a/plugins/modules/cloud/oneandone/oneandone_load_balancer.py +++ b/plugins/modules/cloud/oneandone/oneandone_load_balancer.py @@ -594,7 +594,7 @@ def main(): module = AnsibleModule( argument_spec=dict( auth_token=dict( - type='str', + type='str', no_log=True, default=os.environ.get('ONEANDONE_AUTH_TOKEN')), api_url=dict( type='str', diff --git a/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py b/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py index 7488b7aeab..79fed9a66a 100644 --- a/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py +++ b/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py @@ -947,7 +947,7 @@ def main(): module = AnsibleModule( argument_spec=dict( auth_token=dict( - type='str', + type='str', no_log=True, default=os.environ.get('ONEANDONE_AUTH_TOKEN')), api_url=dict( type='str', diff --git a/plugins/modules/cloud/oneandone/oneandone_private_network.py b/plugins/modules/cloud/oneandone/oneandone_private_network.py index ebd44130d5..7eae6ea3dc 100644 --- a/plugins/modules/cloud/oneandone/oneandone_private_network.py +++ b/plugins/modules/cloud/oneandone/oneandone_private_network.py @@ -384,7 +384,7 @@ def main(): module = AnsibleModule( argument_spec=dict( auth_token=dict( - type='str', + type='str', no_log=True, default=os.environ.get('ONEANDONE_AUTH_TOKEN')), api_url=dict( type='str', diff --git a/plugins/modules/cloud/oneandone/oneandone_public_ip.py b/plugins/modules/cloud/oneandone/oneandone_public_ip.py index 62cb62da63..edefbc938f 100644 --- a/plugins/modules/cloud/oneandone/oneandone_public_ip.py +++ b/plugins/modules/cloud/oneandone/oneandone_public_ip.py @@ -274,7 +274,7 @@ def main(): module = AnsibleModule( argument_spec=dict( auth_token=dict( - type='str', + type='str', no_log=True, default=os.environ.get('ONEANDONE_AUTH_TOKEN')), api_url=dict( type='str', diff --git a/plugins/modules/cloud/rackspace/rax_clb_ssl.py b/plugins/modules/cloud/rackspace/rax_clb_ssl.py index ae886b2959..114128e8b1 100644 --- a/plugins/modules/cloud/rackspace/rax_clb_ssl.py +++ b/plugins/modules/cloud/rackspace/rax_clb_ssl.py @@ -238,7 +238,7 @@ def main(): loadbalancer=dict(required=True), state=dict(default='present', choices=['present', 'absent']), enabled=dict(type='bool', default=True), - private_key=dict(), + private_key=dict(no_log=True), certificate=dict(), intermediate_certificate=dict(), secure_port=dict(type='int', default=443), diff --git a/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py b/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py index 02da2c7b6f..8f05da7b09 100644 --- a/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py +++ b/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py @@ -1459,7 +1459,7 @@ def main(): min_size=dict(type='int', required=True), monitoring=dict(type='str'), multai_load_balancers=dict(type='list'), - multai_token=dict(type='str'), + multai_token=dict(type='str', no_log=True), name=dict(type='str', required=True), network_interfaces=dict(type='list'), on_demand_count=dict(type='int'), @@ -1483,7 +1483,7 @@ def main(): target_group_arns=dict(type='list'), tenancy=dict(type='str'), terminate_at_end_of_billing_hour=dict(type='bool'), - token=dict(type='str'), + token=dict(type='str', no_log=True), unit=dict(type='str'), user_data=dict(type='str'), utilize_reserved_instances=dict(type='bool'), diff --git a/plugins/modules/identity/keycloak/keycloak_client.py b/plugins/modules/identity/keycloak/keycloak_client.py index e1fa4aed13..b27155ba4a 100644 --- a/plugins/modules/identity/keycloak/keycloak_client.py +++ b/plugins/modules/identity/keycloak/keycloak_client.py @@ -707,7 +707,7 @@ def main(): enabled=dict(type='bool'), client_authenticator_type=dict(type='str', choices=['client-secret', 'client-jwt'], aliases=['clientAuthenticatorType']), secret=dict(type='str', no_log=True), - registration_access_token=dict(type='str', aliases=['registrationAccessToken']), + registration_access_token=dict(type='str', aliases=['registrationAccessToken'], no_log=True), default_roles=dict(type='list', aliases=['defaultRoles']), redirect_uris=dict(type='list', aliases=['redirectUris']), web_origins=dict(type='list', aliases=['webOrigins']), diff --git a/plugins/modules/monitoring/librato_annotation.py b/plugins/modules/monitoring/librato_annotation.py index ebdf371ce7..d0fd406d4f 100644 --- a/plugins/modules/monitoring/librato_annotation.py +++ b/plugins/modules/monitoring/librato_annotation.py @@ -148,7 +148,7 @@ def main(): module = AnsibleModule( argument_spec=dict( user=dict(required=True), - api_key=dict(required=True), + api_key=dict(required=True, no_log=True), name=dict(required=False), title=dict(required=True), source=dict(required=False), diff --git a/plugins/modules/monitoring/pagerduty_alert.py b/plugins/modules/monitoring/pagerduty_alert.py index dd17c174e8..736ada5e4a 100644 --- a/plugins/modules/monitoring/pagerduty_alert.py +++ b/plugins/modules/monitoring/pagerduty_alert.py @@ -197,9 +197,9 @@ def main(): argument_spec=dict( name=dict(required=False), service_id=dict(required=True), - service_key=dict(required=False), - integration_key=dict(required=False), - api_key=dict(required=True), + service_key=dict(required=False, no_log=True), + integration_key=dict(required=False, no_log=True), + api_key=dict(required=True, no_log=True), state=dict(required=True, choices=['triggered', 'acknowledged', 'resolved']), client=dict(required=False, default=None), diff --git a/plugins/modules/monitoring/pagerduty_change.py b/plugins/modules/monitoring/pagerduty_change.py index 3fecdba59f..358a69612e 100644 --- a/plugins/modules/monitoring/pagerduty_change.py +++ b/plugins/modules/monitoring/pagerduty_change.py @@ -108,7 +108,7 @@ from datetime import datetime def main(): module = AnsibleModule( argument_spec=dict( - integration_key=dict(required=True, type='str'), + integration_key=dict(required=True, type='str', no_log=True), summary=dict(required=True, type='str'), source=dict(required=False, default='Ansible', type='str'), user=dict(required=False, type='str'), diff --git a/plugins/modules/monitoring/pingdom.py b/plugins/modules/monitoring/pingdom.py index baf99eda58..23ed254543 100644 --- a/plugins/modules/monitoring/pingdom.py +++ b/plugins/modules/monitoring/pingdom.py @@ -112,7 +112,7 @@ def main(): checkid=dict(required=True), uid=dict(required=True), passwd=dict(required=True, no_log=True), - key=dict(required=True) + key=dict(required=True, no_log=True), ) ) diff --git a/plugins/modules/monitoring/rollbar_deployment.py b/plugins/modules/monitoring/rollbar_deployment.py index 0affd7c78c..161361b774 100644 --- a/plugins/modules/monitoring/rollbar_deployment.py +++ b/plugins/modules/monitoring/rollbar_deployment.py @@ -92,7 +92,7 @@ def main(): module = AnsibleModule( argument_spec=dict( - token=dict(required=True), + token=dict(required=True, no_log=True), environment=dict(required=True), revision=dict(required=True), user=dict(required=False), diff --git a/plugins/modules/monitoring/stackdriver.py b/plugins/modules/monitoring/stackdriver.py index bd1fc14514..8e2d19a9ab 100644 --- a/plugins/modules/monitoring/stackdriver.py +++ b/plugins/modules/monitoring/stackdriver.py @@ -152,7 +152,7 @@ def main(): module = AnsibleModule( argument_spec=dict( # @TODO add types - key=dict(required=True), + key=dict(required=True, no_log=True), event=dict(required=True, choices=['deploy', 'annotation']), msg=dict(), revision_id=dict(), diff --git a/plugins/modules/net_tools/dnsmadeeasy.py b/plugins/modules/net_tools/dnsmadeeasy.py index c6bc70324c..75135c8277 100644 --- a/plugins/modules/net_tools/dnsmadeeasy.py +++ b/plugins/modules/net_tools/dnsmadeeasy.py @@ -546,7 +546,7 @@ def main(): module = AnsibleModule( argument_spec=dict( - account_key=dict(required=True), + account_key=dict(required=True, no_log=True), account_secret=dict(required=True, no_log=True), domain=dict(required=True), sandbox=dict(default=False, type='bool'), diff --git a/plugins/modules/net_tools/nios/nios_nsgroup.py b/plugins/modules/net_tools/nios/nios_nsgroup.py index f94c379407..b56c3f0b8d 100644 --- a/plugins/modules/net_tools/nios/nios_nsgroup.py +++ b/plugins/modules/net_tools/nios/nios_nsgroup.py @@ -398,7 +398,7 @@ def main(): address=dict(required=True), name=dict(required=True), stealth=dict(type='bool', default=False), - tsig_key=dict(), + tsig_key=dict(no_log=True), tsig_key_alg=dict(choices=['HMAC-MD5', 'HMAC-SHA256'], default='HMAC-MD5'), tsig_key_name=dict(required=True) ) diff --git a/plugins/modules/notification/logentries_msg.py b/plugins/modules/notification/logentries_msg.py index 974e35f9a1..59e0f32565 100644 --- a/plugins/modules/notification/logentries_msg.py +++ b/plugins/modules/notification/logentries_msg.py @@ -73,7 +73,7 @@ def send_msg(module, token, msg, api, port): def main(): module = AnsibleModule( argument_spec=dict( - token=dict(type='str', required=True), + token=dict(type='str', required=True, no_log=True), msg=dict(type='str', required=True), api=dict(type='str', default="data.logentries.com"), port=dict(type='int', default=80)), diff --git a/plugins/modules/packaging/os/pulp_repo.py b/plugins/modules/packaging/os/pulp_repo.py index 37344233d8..8dbc6b9ac4 100644 --- a/plugins/modules/packaging/os/pulp_repo.py +++ b/plugins/modules/packaging/os/pulp_repo.py @@ -545,7 +545,7 @@ def main(): deprecated_aliases=[dict(name='ca_cert', version='3.0.0', collection_name='community.general')]), # was Ansible 2.14 feed_client_cert=dict(aliases=['importer_ssl_client_cert']), - feed_client_key=dict(aliases=['importer_ssl_client_key']), + feed_client_key=dict(aliases=['importer_ssl_client_key'], no_log=True), name=dict(required=True, aliases=['repo']), proxy_host=dict(), proxy_port=dict(), diff --git a/plugins/modules/remote_management/redfish/redfish_command.py b/plugins/modules/remote_management/redfish/redfish_command.py index 9646f9a3b7..78007f1de9 100644 --- a/plugins/modules/remote_management/redfish/redfish_command.py +++ b/plugins/modules/remote_management/redfish/redfish_command.py @@ -572,7 +572,7 @@ def main(): type='dict', options=dict( username=dict(), - password=dict() + password=dict(no_log=True) ) ), virtual_media=dict( diff --git a/plugins/modules/source_control/gitlab/gitlab_runner.py b/plugins/modules/source_control/gitlab/gitlab_runner.py index 8ebd1a3851..52354645df 100644 --- a/plugins/modules/source_control/gitlab/gitlab_runner.py +++ b/plugins/modules/source_control/gitlab/gitlab_runner.py @@ -309,7 +309,7 @@ def main(): locked=dict(type='bool', default=False), access_level=dict(type='str', default='ref_protected', choices=["not_protected", "ref_protected"]), maximum_timeout=dict(type='int', default=3600), - registration_token=dict(type='str', required=True), + registration_token=dict(type='str', required=True, no_log=True), state=dict(type='str', default="present", choices=["absent", "present"]), )) diff --git a/plugins/modules/storage/ibm/ibm_sa_host.py b/plugins/modules/storage/ibm/ibm_sa_host.py index 3853a5e04b..5ce12992bc 100644 --- a/plugins/modules/storage/ibm/ibm_sa_host.py +++ b/plugins/modules/storage/ibm/ibm_sa_host.py @@ -90,7 +90,7 @@ def main(): cluster=dict(), domain=dict(), iscsi_chap_name=dict(), - iscsi_chap_secret=dict() + iscsi_chap_secret=dict(no_log=True), ) ) diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py index ebd76e3d9c..caa0085c25 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py @@ -256,9 +256,6 @@ result: frontend_cookie: description: Frontend cookie name type: str - frontend_cookie_secret: - description: Frontend cookie secret - type: str frontend_form: description: Frontend authentication form name type: str From 909ac92fe274b3e7dfcc61ee8206010d0a8c3f6d Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 9 Feb 2021 07:47:08 +0100 Subject: [PATCH 0025/3093] Fix CI (#1752) * Limit cryptography to < 3.4 for Python < 3.6. * Work around old pip versions. * Use constraints file when installing stormssh. * Work around old pip in RHEL8.2, CentOS 8, Ubuntu 18.04, and OpenSuSE 15 --- tests/integration/targets/ssh_config/meta/main.yml | 2 ++ tests/integration/targets/ssh_config/tasks/main.yml | 1 + tests/utils/constraints.txt | 2 ++ 3 files changed, 5 insertions(+) create mode 100644 tests/integration/targets/ssh_config/meta/main.yml diff --git a/tests/integration/targets/ssh_config/meta/main.yml b/tests/integration/targets/ssh_config/meta/main.yml new file mode 100644 index 0000000000..91a63627f6 --- /dev/null +++ b/tests/integration/targets/ssh_config/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_remote_constraints diff --git a/tests/integration/targets/ssh_config/tasks/main.yml b/tests/integration/targets/ssh_config/tasks/main.yml index abb2b1b16c..12f277b455 100644 --- a/tests/integration/targets/ssh_config/tasks/main.yml +++ b/tests/integration/targets/ssh_config/tasks/main.yml @@ -6,6 +6,7 @@ pip: name: stormssh state: present + extra_args: "-c {{ remote_constraints }}" - set_fact: output_dir_test: '{{ output_dir }}/test_ssh_config' diff --git a/tests/utils/constraints.txt b/tests/utils/constraints.txt index ae6000ae18..c5db3156ab 100644 --- a/tests/utils/constraints.txt +++ b/tests/utils/constraints.txt @@ -1,6 +1,8 @@ coverage >= 4.2, < 5.0.0, != 4.3.2 ; python_version <= '3.7' # features in 4.2+ required, avoid known bug in 4.3.2 on python 2.6, coverage 5.0+ incompatible coverage >= 4.5.4, < 5.0.0 ; python_version > '3.7' # coverage had a bug in < 4.5.4 that would cause unit tests to hang in Python 3.8, coverage 5.0+ incompatible cryptography < 2.2 ; python_version < '2.7' # cryptography 2.2 drops support for python 2.6 +cryptography >= 3.0, < 3.4 ; python_version < '3.6' # cryptography 3.4 drops support for python 2.7 +cryptography >= 3.3, < 3.4 # FIXME: the upper limit is needed for RHEL8.2, CentOS 8, Ubuntu 18.04, and OpenSuSE 15 deepdiff < 4.0.0 ; python_version < '3' # deepdiff 4.0.0 and later require python 3 jinja2 < 2.11 ; python_version < '2.7' # jinja2 2.11 and later require python 2.7 or later urllib3 < 1.24 ; python_version < '2.7' # urllib3 1.24 and later require python 2.7 or later From c03ae754d2afdca98f48cc3b0da3e1847258469b Mon Sep 17 00:00:00 2001 From: morco Date: Tue, 9 Feb 2021 10:29:13 +0100 Subject: [PATCH 0026/3093] Various fixes for updating existing gitlab users (#1724) * fixes various issues related to updating an ... ... existing gitlab user, in detail: - fixes updating admin status not working - fixes user passwords not updated - fixes confirmation skipping param ignored for user updates - added tests for code changes * fixing sanity issues * fixing sanity issues 02 * fixing sanity issues 03 * fixing sanity issues 04 * fixing unit test failures * fixing unit test failures 02 * add changelog fragment * fixing unit test failures 03 * forgot to add changelog fragment * fix changelog sanity issues * fix changelog sanity issues 02 * incorporate review suggestions Co-authored-by: Mirko Wilhelmi --- ...ixes-for-updating-existing-gitlab-user.yml | 2 + .../source_control/gitlab/gitlab_user.py | 61 ++++-- .../targets/gitlab_user/tasks/main.yml | 207 +++++++++++++++++- .../source_control/gitlab/test_gitlab_user.py | 23 +- 4 files changed, 271 insertions(+), 22 deletions(-) create mode 100644 changelogs/fragments/1724-various-fixes-for-updating-existing-gitlab-user.yml diff --git a/changelogs/fragments/1724-various-fixes-for-updating-existing-gitlab-user.yml b/changelogs/fragments/1724-various-fixes-for-updating-existing-gitlab-user.yml new file mode 100644 index 0000000000..eab67e0f47 --- /dev/null +++ b/changelogs/fragments/1724-various-fixes-for-updating-existing-gitlab-user.yml @@ -0,0 +1,2 @@ +bugfixes: + - gitlab_user - make updates to the ``isadmin``, ``password`` and ``confirm`` options of an already existing GitLab user work (https://github.com/ansible-collections/community.general/pull/1724). diff --git a/plugins/modules/source_control/gitlab/gitlab_user.py b/plugins/modules/source_control/gitlab/gitlab_user.py index 458a4734b2..1e8ee65a67 100644 --- a/plugins/modules/source_control/gitlab/gitlab_user.py +++ b/plugins/modules/source_control/gitlab/gitlab_user.py @@ -205,6 +205,7 @@ class GitLabUser(object): ''' def createOrUpdateUser(self, username, options): changed = False + potentionally_changed = False # Because we have already call userExists in main() if self.userObject is None: @@ -218,11 +219,36 @@ class GitLabUser(object): 'external': options['external']}) changed = True else: - changed, user = self.updateUser(self.userObject, { - 'name': options['name'], - 'email': options['email'], - 'is_admin': options['isadmin'], - 'external': options['external']}) + changed, user = self.updateUser( + self.userObject, { + # add "normal" parameters here, put uncheckable + # params in the dict below + 'name': {'value': options['name']}, + 'email': {'value': options['email']}, + + # note: for some attributes like this one the key + # from reading back from server is unfortunately + # different to the one needed for pushing/writing, + # in that case use the optional setter key + 'is_admin': { + 'value': options['isadmin'], 'setter': 'admin' + }, + 'external': {'value': options['external']}, + }, + { + # put "uncheckable" params here, this means params + # which the gitlab does accept for setting but does + # not return any information about it + 'skip_reconfirmation': {'value': not options['confirm']}, + 'password': {'value': options['password']}, + } + ) + + # note: as we unfortunately have some uncheckable parameters + # where it is not possible to determine if the update + # changed something or not, we must assume here that a + # changed happend and that an user object update is needed + potentionally_changed = True # Assign ssh keys if options['sshkey_name'] and options['sshkey_file']: @@ -237,14 +263,15 @@ class GitLabUser(object): changed = changed or group_changed self.userObject = user - if changed: - if self._module.check_mode: - self._module.exit_json(changed=True, msg="Successfully created or updated the user %s" % username) - + if (changed or potentionally_changed) and not self._module.check_mode: try: user.save() except Exception as e: self._module.fail_json(msg="Failed to update user: %s " % to_native(e)) + + if changed: + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully created or updated the user %s" % username) return True else: return False @@ -348,15 +375,23 @@ class GitLabUser(object): @param user User object @param arguments User attributes ''' - def updateUser(self, user, arguments): + def updateUser(self, user, arguments, uncheckable_args): changed = False for arg_key, arg_value in arguments.items(): - if arguments[arg_key] is not None: - if getattr(user, arg_key) != arguments[arg_key]: - setattr(user, arg_key, arguments[arg_key]) + av = arg_value['value'] + + if av is not None: + if getattr(user, arg_key) != av: + setattr(user, arg_value.get('setter', arg_key), av) changed = True + for arg_key, arg_value in uncheckable_args.items(): + av = arg_value['value'] + + if av is not None: + setattr(user, arg_value.get('setter', arg_key), av) + return (changed, user) ''' diff --git a/tests/integration/targets/gitlab_user/tasks/main.yml b/tests/integration/targets/gitlab_user/tasks/main.yml index 6408a27f37..6cbcd14c34 100644 --- a/tests/integration/targets/gitlab_user/tasks/main.yml +++ b/tests/integration/targets/gitlab_user/tasks/main.yml @@ -10,25 +10,25 @@ - name: Clean up gitlab user gitlab_user: - server_url: "{{ gitlab_host }}" + api_url: "{{ gitlab_host }}" name: ansible_test_user username: ansible_test_user password: Secr3tPassw00rd email: root@localhost validate_certs: false - login_token: "{{ gitlab_login_token }}" + api_token: "{{ gitlab_login_token }}" state: absent - name: Create gitlab user gitlab_user: - server_url: "{{ gitlab_host }}" + api_url: "{{ gitlab_host }}" email: "{{ gitlab_user_email }}" name: "{{ gitlab_user }}" username: "{{ gitlab_user }}" password: "{{ gitlab_user_pass }}" validate_certs: False - login_token: "{{ gitlab_login_token }}" + api_token: "{{ gitlab_login_token }}" state: present register: gitlab_user_state @@ -39,13 +39,13 @@ - name: Create gitlab user again gitlab_user: - server_url: "{{ gitlab_host }}" + api_url: "{{ gitlab_host }}" email: root@localhost name: ansible_test_user username: ansible_test_user password: Secr3tPassw00rd validate_certs: False - login_token: "{{ gitlab_login_token }}" + api_token: "{{ gitlab_login_token }}" state: present register: gitlab_user_state_again @@ -53,3 +53,198 @@ assert: that: - gitlab_user_state_again is not changed + - gitlab_user_state_again.user.is_admin == False + + +- name: Update User Test => Make User Admin + gitlab_user: + api_url: "{{ gitlab_host }}" + email: "{{ gitlab_user_email }}" + name: "{{ gitlab_user }}" + username: "{{ gitlab_user }}" + isadmin: true + validate_certs: False + api_token: "{{ gitlab_login_token }}" + state: present + register: gitlab_user_state + +- name: Check if user is admin now + assert: + that: + - gitlab_user_state is changed + - gitlab_user_state.user.is_admin == True + +- name: Update User Test => Make User Admin (Again) + gitlab_user: + api_url: "{{ gitlab_host }}" + email: "{{ gitlab_user_email }}" + name: "{{ gitlab_user }}" + username: "{{ gitlab_user }}" + isadmin: true + validate_certs: False + api_token: "{{ gitlab_login_token }}" + state: present + register: gitlab_user_state + +- name: Check state is not changed + assert: + that: + - gitlab_user_state is not changed + - gitlab_user_state.user.is_admin == True + +- name: Update User Test => Remove Admin Rights + gitlab_user: + api_url: "{{ gitlab_host }}" + email: "{{ gitlab_user_email }}" + name: "{{ gitlab_user }}" + username: "{{ gitlab_user }}" + isadmin: false + validate_certs: False + api_token: "{{ gitlab_login_token }}" + state: present + register: gitlab_user_state + +- name: Check if user is not admin anymore + assert: + that: + - gitlab_user_state is changed + - gitlab_user_state.user.is_admin == False + + +- name: Update User Test => Try Changing Mail without Confirmation Skipping + gitlab_user: + api_url: "{{ gitlab_host }}" + email: foo@bar.baz + name: "{{ gitlab_user }}" + username: "{{ gitlab_user }}" + confirm: True + validate_certs: False + api_token: "{{ gitlab_login_token }}" + state: present + register: gitlab_user_state + +- name: Check that eMail is unchanged (Only works with confirmation skipping) + assert: + that: + - gitlab_user_state is changed + - gitlab_user_state.user.email == gitlab_user_email + +- name: Update User Test => Change Mail with Confirmation Skip + gitlab_user: + api_url: "{{ gitlab_host }}" + email: foo@bar.baz + name: "{{ gitlab_user }}" + username: "{{ gitlab_user }}" + confirm: false + validate_certs: False + api_token: "{{ gitlab_login_token }}" + state: present + register: gitlab_user_state + +- name: Check that mail has changed now + assert: + that: + - gitlab_user_state is changed + - gitlab_user_state.user.email == 'foo@bar.baz' + +- name: Update User Test => Change Mail with Confirmation Skip (Again) + gitlab_user: + api_url: "{{ gitlab_host }}" + email: foo@bar.baz + name: "{{ gitlab_user }}" + username: "{{ gitlab_user }}" + confirm: false + validate_certs: False + api_token: "{{ gitlab_login_token }}" + state: present + register: gitlab_user_state + +- name: Check state is not changed + assert: + that: + - gitlab_user_state is not changed + - gitlab_user_state.user.email == 'foo@bar.baz' + +- name: Update User Test => Revert to original Mail Address + gitlab_user: + api_url: "{{ gitlab_host }}" + email: "{{ gitlab_user_email }}" + name: "{{ gitlab_user }}" + username: "{{ gitlab_user }}" + confirm: false + validate_certs: False + api_token: "{{ gitlab_login_token }}" + state: present + register: gitlab_user_state + +- name: Check that reverting mail back to original has worked + assert: + that: + - gitlab_user_state is changed + - gitlab_user_state.user.email == gitlab_user_email + + +- name: Update User Test => Change User Password + gitlab_user: + api_url: "{{ gitlab_host }}" + validate_certs: False + + # note: the only way to check if a password really is what it is expected + # to be is to use it for login, so we use it here instead of the + # default token assuming that a user can always change its own password + api_username: "{{ gitlab_user }}" + api_password: "{{ gitlab_user_pass }}" + + email: "{{ gitlab_user_email }}" + name: "{{ gitlab_user }}" + username: "{{ gitlab_user }}" + password: new-super-password + state: present + register: gitlab_user_state + +- name: Check PW setting return state + assert: + that: + # note: there is no way to determine if a password has changed or + # not, so it can only be always yellow or always green, we + # decided for always green for now + - gitlab_user_state is not changed + +- name: Update User Test => Reset User Password + gitlab_user: + api_url: "{{ gitlab_host }}" + validate_certs: False + + api_username: "{{ gitlab_user }}" + api_password: new-super-password + + email: "{{ gitlab_user_email }}" + name: "{{ gitlab_user }}" + username: "{{ gitlab_user }}" + password: "{{ gitlab_user_pass }}" + state: present + register: gitlab_user_state + +- name: Check PW setting return state (Again) + assert: + that: + - gitlab_user_state is not changed + +- name: Update User Test => Check that password was reset + gitlab_user: + api_url: "{{ gitlab_host }}" + validate_certs: False + + api_username: "{{ gitlab_user }}" + api_password: "{{ gitlab_user_pass }}" + + email: "{{ gitlab_user_email }}" + name: "{{ gitlab_user }}" + username: "{{ gitlab_user }}" + state: present + register: gitlab_user_state + +- name: Check PW setting return state (Reset) + assert: + that: + - gitlab_user_state is not changed diff --git a/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_user.py b/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_user.py index f78f0efb71..4a47654a8c 100644 --- a/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_user.py +++ b/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_user.py @@ -88,16 +88,33 @@ class TestGitlabUser(GitlabModuleTestCase): @with_httmock(resp_get_user) def test_update_user(self): user = self.gitlab_instance.users.get(1) - changed, newUser = self.moduleUtil.updateUser(user, {'name': "Jack Smith", "is_admin": "true"}) + + changed, newUser = self.moduleUtil.updateUser( + user, + {'name': {'value': "Jack Smith"}, "is_admin": {'value': "true", 'setter': 'admin'}}, {} + ) self.assertEqual(changed, True) self.assertEqual(newUser.name, "Jack Smith") - self.assertEqual(newUser.is_admin, "true") + self.assertEqual(newUser.admin, "true") - changed, newUser = self.moduleUtil.updateUser(user, {'name': "Jack Smith"}) + changed, newUser = self.moduleUtil.updateUser(user, {'name': {'value': "Jack Smith"}}, {}) self.assertEqual(changed, False) + changed, newUser = self.moduleUtil.updateUser( + user, + {}, { + 'skip_reconfirmation': {'value': True}, + 'password': {'value': 'super_secret-super_secret'}, + } + ) + + # note: uncheckable parameters dont set changed state + self.assertEqual(changed, False) + self.assertEqual(newUser.skip_reconfirmation, True) + self.assertEqual(newUser.password, 'super_secret-super_secret') + @with_httmock(resp_find_user) @with_httmock(resp_delete_user) def test_delete_user(self): From 89ffb04dfff8995f94e9139e7792a116f6467e14 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Wed, 10 Feb 2021 19:19:32 +1300 Subject: [PATCH 0027/3093] Tidy up validate-modules ignores for remote_management/ipmi modules (#1767) * fixed validation-modules for plugins/modules/remote_management/ipmi/ipmi_boot.py * fixed validation-modules for plugins/modules/remote_management/ipmi/ipmi_power.py * Tidy up validate-modules ignores for remote_management/ipmi modules --- plugins/modules/remote_management/ipmi/ipmi_boot.py | 6 ++++++ plugins/modules/remote_management/ipmi/ipmi_power.py | 6 ++++++ tests/sanity/ignore-2.10.txt | 4 ---- tests/sanity/ignore-2.11.txt | 4 ---- tests/sanity/ignore-2.9.txt | 4 ---- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/plugins/modules/remote_management/ipmi/ipmi_boot.py b/plugins/modules/remote_management/ipmi/ipmi_boot.py index 6509ca21da..f4bdbb2112 100644 --- a/plugins/modules/remote_management/ipmi/ipmi_boot.py +++ b/plugins/modules/remote_management/ipmi/ipmi_boot.py @@ -19,18 +19,22 @@ options: description: - Hostname or ip address of the BMC. required: true + type: str port: description: - Remote RMCP port. default: 623 + type: int user: description: - Username to use to connect to the BMC. required: true + type: str password: description: - Password to connect to the BMC. required: true + type: str bootdev: description: - Set boot device to use on next reboot @@ -51,6 +55,7 @@ options: - optical - setup - default + type: str state: description: - Whether to ensure that boot devices is desired. @@ -59,6 +64,7 @@ options: - absent -- Request system turn on" default: present choices: [ present, absent ] + type: str persistent: description: - If set, ask that system firmware uses this device beyond next boot. diff --git a/plugins/modules/remote_management/ipmi/ipmi_power.py b/plugins/modules/remote_management/ipmi/ipmi_power.py index 4784015441..8a88679697 100644 --- a/plugins/modules/remote_management/ipmi/ipmi_power.py +++ b/plugins/modules/remote_management/ipmi/ipmi_power.py @@ -19,18 +19,22 @@ options: description: - Hostname or ip address of the BMC. required: true + type: str port: description: - Remote RMCP port. default: 623 + type: int user: description: - Username to use to connect to the BMC. required: true + type: str password: description: - Password to connect to the BMC. required: true + type: str state: description: - Whether to ensure that the machine in desired state. @@ -42,10 +46,12 @@ options: - boot -- If system is off, then 'on', else 'reset'" choices: ['on', 'off', shutdown, reset, boot] required: true + type: str timeout: description: - Maximum number of seconds before interrupt request. default: 300 + type: int requirements: - "python >= 2.6" - pyghmi diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 46142bf001..0ee04a42c9 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -241,10 +241,6 @@ plugins/modules/remote_management/hpilo/hpilo_boot.py validate-modules:parameter plugins/modules/remote_management/hpilo/hpilo_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hponcfg.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/imc/imc_rest.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/ipmi/ipmi_boot.py validate-modules:doc-missing-type -plugins/modules/remote_management/ipmi/ipmi_boot.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/ipmi/ipmi_power.py validate-modules:doc-missing-type -plugins/modules/remote_management/ipmi/ipmi_power.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/lxca/lxca_cmms.py validate-modules:doc-missing-type plugins/modules/remote_management/lxca/lxca_nodes.py validate-modules:doc-missing-type plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py validate-modules:parameter-list-no-elements diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 46142bf001..0ee04a42c9 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -241,10 +241,6 @@ plugins/modules/remote_management/hpilo/hpilo_boot.py validate-modules:parameter plugins/modules/remote_management/hpilo/hpilo_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hponcfg.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/imc/imc_rest.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/ipmi/ipmi_boot.py validate-modules:doc-missing-type -plugins/modules/remote_management/ipmi/ipmi_boot.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/ipmi/ipmi_power.py validate-modules:doc-missing-type -plugins/modules/remote_management/ipmi/ipmi_power.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/lxca/lxca_cmms.py validate-modules:doc-missing-type plugins/modules/remote_management/lxca/lxca_nodes.py validate-modules:doc-missing-type plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py validate-modules:parameter-list-no-elements diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 604672ab19..f746a78949 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -206,10 +206,6 @@ plugins/modules/remote_management/hpilo/hpilo_boot.py validate-modules:parameter plugins/modules/remote_management/hpilo/hpilo_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hponcfg.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/imc/imc_rest.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/ipmi/ipmi_boot.py validate-modules:doc-missing-type -plugins/modules/remote_management/ipmi/ipmi_boot.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/ipmi/ipmi_power.py validate-modules:doc-missing-type -plugins/modules/remote_management/ipmi/ipmi_power.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/lxca/lxca_cmms.py validate-modules:doc-missing-type plugins/modules/remote_management/lxca/lxca_nodes.py validate-modules:doc-missing-type plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:doc-choices-do-not-match-spec From e898e52d1bbb348b1ee8791fd8c0634d400bfc6d Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Wed, 10 Feb 2021 10:52:04 +0300 Subject: [PATCH 0028/3093] azure-pipelines: update container version (#1770) --- .azure-pipelines/azure-pipelines.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index cd12a86079..feee3f2c27 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -36,7 +36,7 @@ variables: resources: containers: - container: default - image: quay.io/ansible/azure-pipelines-test-container:1.7.1 + image: quay.io/ansible/azure-pipelines-test-container:1.8.0 pool: Standard From d1e54d2fd17cc4be9dae0027b9e1de69e8baee19 Mon Sep 17 00:00:00 2001 From: quidame Date: Wed, 10 Feb 2021 12:50:43 +0100 Subject: [PATCH 0029/3093] document what filesystem types are supported by 'resizefs' option (#1753) * document what filesystem types are supported by 'resizefs' option * add changelog fragment * remove info about lvol documentation changes in changelog fragment --- .../fragments/1753-document-fstypes-supported-by-resizefs.yml | 3 +++ plugins/modules/system/filesystem.py | 3 ++- plugins/modules/system/lvol.py | 2 ++ 3 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/1753-document-fstypes-supported-by-resizefs.yml diff --git a/changelogs/fragments/1753-document-fstypes-supported-by-resizefs.yml b/changelogs/fragments/1753-document-fstypes-supported-by-resizefs.yml new file mode 100644 index 0000000000..9b1329412c --- /dev/null +++ b/changelogs/fragments/1753-document-fstypes-supported-by-resizefs.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - filesystem - remove ``swap`` from list of FS supported by ``resizefs=yes`` (https://github.com/ansible-collections/community.general/issues/790). diff --git a/plugins/modules/system/filesystem.py b/plugins/modules/system/filesystem.py index e78eec4e86..48c68a35ad 100644 --- a/plugins/modules/system/filesystem.py +++ b/plugins/modules/system/filesystem.py @@ -57,7 +57,8 @@ options: resizefs: description: - If C(yes), if the block device and filesystem size differ, grow the filesystem into the space. - - Supported for C(ext2), C(ext3), C(ext4), C(ext4dev), C(f2fs), C(lvm), C(xfs), C(vfat), C(swap) filesystems. + - Supported for C(ext2), C(ext3), C(ext4), C(ext4dev), C(f2fs), C(lvm), C(xfs) and C(vfat) filesystems. + Attempts to resize other filesystem types will fail. - XFS Will only grow if mounted. Currently, the module is based on commands from C(util-linux) package to perform operations, so resizing of XFS is not supported on FreeBSD systems. diff --git a/plugins/modules/system/lvol.py b/plugins/modules/system/lvol.py index fa50007ebf..852d0f5cd7 100644 --- a/plugins/modules/system/lvol.py +++ b/plugins/modules/system/lvol.py @@ -76,6 +76,8 @@ options: resizefs: description: - Resize the underlying filesystem together with the logical volume. + - Supported for C(ext2), C(ext3), C(ext4), C(reiserfs) and C(XFS) filesystems. + Attempts to resize other filesystem types will fail. type: bool default: 'no' notes: From af64c9a432417b6694ae936fb5566fcfba763786 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 11 Feb 2021 07:42:40 +1300 Subject: [PATCH 0030/3093] xfconf: feature flag to disable facts and deprecation warning (#1747) * Added feature flag to disable facts and its associated deprecatoin warning * added changelog fragment * Update plugins/modules/system/xfconf.py Co-authored-by: Felix Fontein * Update plugins/modules/system/xfconf.py Co-authored-by: Felix Fontein * Update plugins/modules/system/xfconf.py Co-authored-by: Felix Fontein * Fixed deprecation message * Fixed changelog frag * Update changelogs/fragments/1475-xfconf-facts.yml * Update plugins/modules/system/xfconf.py Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- changelogs/fragments/1475-xfconf-facts.yml | 4 ++++ plugins/modules/system/xfconf.py | 22 ++++++++++++++++++++-- 2 files changed, 24 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/1475-xfconf-facts.yml diff --git a/changelogs/fragments/1475-xfconf-facts.yml b/changelogs/fragments/1475-xfconf-facts.yml new file mode 100644 index 0000000000..cffc6f023e --- /dev/null +++ b/changelogs/fragments/1475-xfconf-facts.yml @@ -0,0 +1,4 @@ +minor_changes: + - xfconf - added option ``disable_facts`` to disable facts and its associated deprecation warning (https://github.com/ansible-collections/community.general/issues/1475). +deprecated_features: + - xfconf - returning output as facts is deprecated, this will be removed in community.general 4.0.0. Please register the task output in a variable and use it instead. You can already switch to the new behavior now by using the new ``disable_facts`` option (https://github.com/ansible-collections/community.general/pull/1747). diff --git a/plugins/modules/system/xfconf.py b/plugins/modules/system/xfconf.py index 8d0700ae11..ce85a2ba47 100644 --- a/plugins/modules/system/xfconf.py +++ b/plugins/modules/system/xfconf.py @@ -57,6 +57,14 @@ options: default: 'no' aliases: ['array'] version_added: 1.0.0 + disable_facts: + description: + - For backward compatibility, output results are also returned as C(ansible_facts), but this behaviour is deprecated + and will be removed in community.general 4.0.0. + - This flag disables the output as facts and also disables the deprecation warning. + type: bool + default: no + version_added: 2.1.0 ''' EXAMPLES = """ @@ -158,13 +166,13 @@ class XFConfProperty(CmdMixin, StateMixin, ModuleHelper): elements='str', choices=('int', 'uint', 'bool', 'float', 'double', 'string')), value=dict(required=False, type='list', elements='raw'), force_array=dict(default=False, type='bool', aliases=['array']), + disable_facts=dict(type='bool', default=False), ), required_if=[('state', 'present', ['value', 'value_type'])], required_together=[('value', 'value_type')], supports_check_mode=True, ) - facts_name = "xfconf" default_state = 'present' command = 'xfconf-query' command_args_formats = dict( @@ -178,7 +186,8 @@ class XFConfProperty(CmdMixin, StateMixin, ModuleHelper): def update_xfconf_output(self, **kwargs): self.update_output(**kwargs) - self.update_facts(**kwargs) + if not self.module.params['disable_facts']: + self.update_facts(**kwargs) def __init_module__(self): self.does_not = 'Property "{0}" does not exist on channel "{1}".'.format(self.module.params['property'], @@ -187,6 +196,15 @@ class XFConfProperty(CmdMixin, StateMixin, ModuleHelper): self.update_xfconf_output(property=self.module.params['property'], channel=self.module.params['channel'], previous_value=None) + if not self.module.params['disable_facts']: + self.facts_name = "xfconf" + self.module.deprecate( + msg="Returning results as facts is deprecated. " + "Please register the module output to a variable instead." + " You can use the disable_facts option to switch to the " + "new behavior already now and disable this warning", + version="4.0.0", collection_name="community.general" + ) def process_command_output(self, rc, out, err): if err.rstrip() == self.does_not: From 9aef0ed17e0072b82408243eb2a4bc79f2c26d98 Mon Sep 17 00:00:00 2001 From: Anatoly Pugachev Date: Wed, 10 Feb 2021 21:44:09 +0300 Subject: [PATCH 0031/3093] timezone: add gentoo and alpine linux support (#1722) * add alpine linux as NosystemdTimezone * syntax error fix and more self variables, so it works now... * use timezone name instead of timezone path for setup-timezone command * alpine linux zoneinfo links to /etc/zoneinfo instead of /usr/share/zoneinfo, so correct re.search() pattern * add changelog fragment * add gentoo linux support * Update 1722_timezone.yml * refactor code --- changelogs/fragments/1722_timezone.yml | 2 ++ plugins/modules/system/timezone.py | 17 ++++++++++++----- 2 files changed, 14 insertions(+), 5 deletions(-) create mode 100644 changelogs/fragments/1722_timezone.yml diff --git a/changelogs/fragments/1722_timezone.yml b/changelogs/fragments/1722_timezone.yml new file mode 100644 index 0000000000..cae337effd --- /dev/null +++ b/changelogs/fragments/1722_timezone.yml @@ -0,0 +1,2 @@ +minor_changes: +- timezone - add Gentoo and Alpine Linux support (https://github.com/ansible-collections/community.general/issues/781). diff --git a/plugins/modules/system/timezone.py b/plugins/modules/system/timezone.py index d10dd9bb8b..18f8bd418b 100644 --- a/plugins/modules/system/timezone.py +++ b/plugins/modules/system/timezone.py @@ -355,20 +355,26 @@ class NosystemdTimezone(Timezone): # Validate given timezone if 'name' in self.value: tzfile = self._verify_timezone() + planned_tz = self.value['name']['planned'] # `--remove-destination` is needed if /etc/localtime is a symlink so # that it overwrites it instead of following it. self.update_timezone = ['%s --remove-destination %s /etc/localtime' % (self.module.get_bin_path('cp', required=True), tzfile)] self.update_hwclock = self.module.get_bin_path('hwclock', required=True) + distribution = get_distribution() + self.conf_files['name'] = '/etc/timezone' + self.regexps['name'] = re.compile(r'^([^\s]+)', re.MULTILINE) + self.tzline_format = '%s\n' # Distribution-specific configurations if self.module.get_bin_path('dpkg-reconfigure') is not None: # Debian/Ubuntu if 'name' in self.value: self.update_timezone = ['%s -sf %s /etc/localtime' % (self.module.get_bin_path('ln', required=True), tzfile), '%s --frontend noninteractive tzdata' % self.module.get_bin_path('dpkg-reconfigure', required=True)] - self.conf_files['name'] = '/etc/timezone' self.conf_files['hwclock'] = '/etc/default/rcS' - self.regexps['name'] = re.compile(r'^([^\s]+)', re.MULTILINE) - self.tzline_format = '%s\n' + elif distribution == 'Alpine' or distribution == 'Gentoo': + self.conf_files['hwclock'] = '/etc/conf.d/hwclock' + if distribution == 'Alpine': + self.update_timezone = ['%s -z %s' % (self.module.get_bin_path('setup-timezone', required=True), planned_tz)] else: # RHEL/CentOS/SUSE if self.module.get_bin_path('tzdata-update') is not None: @@ -386,7 +392,6 @@ class NosystemdTimezone(Timezone): except IOError as err: if self._allow_ioerror(err, 'name'): # If the config file doesn't exist detect the distribution and set regexps. - distribution = get_distribution() if distribution == 'SuSE': # For SUSE self.regexps['name'] = self.dist_regexps['SuSE'] @@ -536,7 +541,9 @@ class NosystemdTimezone(Timezone): # to other zone files, so it's hard to get which TZ is actually set # if we follow the symlink. path = os.readlink('/etc/localtime') - linktz = re.search(r'/usr/share/zoneinfo/(.*)', path, re.MULTILINE) + # most linuxes has it in /usr/share/zoneinfo + # alpine linux links under /etc/zoneinfo + linktz = re.search(r'(?:/(?:usr/share|etc)/zoneinfo/)(.*)', path, re.MULTILINE) if linktz: valuelink = linktz.group(1) if valuelink != planned: From 93e0aa7557232ebb1636153b22e1a1091754e8c1 Mon Sep 17 00:00:00 2001 From: lukurde <47138492+lukurde@users.noreply.github.com> Date: Wed, 10 Feb 2021 19:47:09 +0100 Subject: [PATCH 0032/3093] redfish module_utils: case insesitive search for situations where the hostname/FQDN on iLO is in caps (#1744) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * case insesitive search for situations where the hostname/FQDN on iLO is in caps * handle explicit and implicit nic_addr variable, added changelog fragment * changelog linter fix * changelog typo fix Co-authored-by: Łukasz Kurdziel --- .../fragments/1744-case-insensitive-hostname-fqdn-matching.yml | 2 ++ plugins/module_utils/redfish_utils.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/1744-case-insensitive-hostname-fqdn-matching.yml diff --git a/changelogs/fragments/1744-case-insensitive-hostname-fqdn-matching.yml b/changelogs/fragments/1744-case-insensitive-hostname-fqdn-matching.yml new file mode 100644 index 0000000000..0e9c086b96 --- /dev/null +++ b/changelogs/fragments/1744-case-insensitive-hostname-fqdn-matching.yml @@ -0,0 +1,2 @@ +minor_changes: + - redfish_config - case insensitive search for situations where the hostname/FQDN case on iLO doesn't match variable's case (https://github.com/ansible-collections/community.general/pull/1744). diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index 8f14dbad78..01b1f9a29f 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -2632,7 +2632,7 @@ class RedfishUtils(object): if response['ret'] is False: return response data = response['data'] - if '"' + nic_addr + '"' in str(data) or "'" + nic_addr + "'" in str(data): + if '"' + nic_addr.lower() + '"' in str(data).lower() or "'" + nic_addr.lower() + "'" in str(data).lower(): target_ethernet_uri = uri target_ethernet_current_setting = data break From 562ff7efb7e6c69bfaa4c534693c70425cdc1e6c Mon Sep 17 00:00:00 2001 From: topperharly Date: Wed, 10 Feb 2021 22:07:27 +0100 Subject: [PATCH 0033/3093] only set param features when variable is not empty (#1763) * only set param features when variable is not empty * Apply suggestions from code review Co-authored-by: Felix Fontein Co-authored-by: Topper Harly Co-authored-by: Felix Fontein --- .../816-only-invocate-feature-when-variable-is-set.yml | 2 ++ plugins/modules/cloud/misc/proxmox.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/816-only-invocate-feature-when-variable-is-set.yml diff --git a/changelogs/fragments/816-only-invocate-feature-when-variable-is-set.yml b/changelogs/fragments/816-only-invocate-feature-when-variable-is-set.yml new file mode 100644 index 0000000000..7d48c77298 --- /dev/null +++ b/changelogs/fragments/816-only-invocate-feature-when-variable-is-set.yml @@ -0,0 +1,2 @@ +bugfixes: + - proxmox lxc - only add the features flag when module parameter ``features`` is set. Before an empty string was send to proxmox in case the parameter was not used, which required to use ``root@pam`` for module execution (https://github.com/ansible-collections/community.general/pull/1763). diff --git a/plugins/modules/cloud/misc/proxmox.py b/plugins/modules/cloud/misc/proxmox.py index d3cfda3d92..c4cfd9b5e7 100644 --- a/plugins/modules/cloud/misc/proxmox.py +++ b/plugins/modules/cloud/misc/proxmox.py @@ -622,7 +622,7 @@ def main(): searchdomain=module.params['searchdomain'], force=int(module.params['force']), pubkey=module.params['pubkey'], - features=",".join(module.params['features'] or []), + features=",".join(module.params['features']) if module.params['features'] is not None else None, unprivileged=int(module.params['unprivileged']), description=module.params['description'], hookscript=module.params['hookscript']) From 9a6031ab4ee00de1caa14e329d15699c1c72cf9f Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 11 Feb 2021 07:04:45 +0100 Subject: [PATCH 0034/3093] Fix typo in aerospike_migration module. (#1740) --- changelogs/fragments/1740-aerospike_migration.yml | 2 ++ plugins/modules/database/aerospike/aerospike_migrations.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/1740-aerospike_migration.yml diff --git a/changelogs/fragments/1740-aerospike_migration.yml b/changelogs/fragments/1740-aerospike_migration.yml new file mode 100644 index 0000000000..e66963aae7 --- /dev/null +++ b/changelogs/fragments/1740-aerospike_migration.yml @@ -0,0 +1,2 @@ +bugfixes: +- "aerospike_migration - fix typo that caused ``migrate_tx_key`` instead of ``migrate_rx_key`` being used (https://github.com/ansible-collections/community.general/pull/1739)." diff --git a/plugins/modules/database/aerospike/aerospike_migrations.py b/plugins/modules/database/aerospike/aerospike_migrations.py index 27bfc1a95b..bf6ac60e8f 100644 --- a/plugins/modules/database/aerospike/aerospike_migrations.py +++ b/plugins/modules/database/aerospike/aerospike_migrations.py @@ -338,7 +338,7 @@ class Migrations: namespace_tx = \ int(namespace_stats[self.module.params['migrate_tx_key']]) namespace_rx = \ - int(namespace_stats[self.module.params['migrate_tx_key']]) + int(namespace_stats[self.module.params['migrate_rx_key']]) except KeyError: self.module.fail_json( msg="Did not find partition remaining key:" + From e9551df5ed8000f5b13ae14d22dfef24be829b5f Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 11 Feb 2021 10:24:58 +0100 Subject: [PATCH 0035/3093] Update CI (#1782) * Update targets for CI for devel branch; move some targets to stable-2.10. * Skipping test on RHEL 8.3 (it is already skipped on RHEL 8.2). * Linting. * Shut 2.9/2.10 pylint complaints up. --- .azure-pipelines/azure-pipelines.yml | 22 ++++++++++--------- plugins/inventory/proxmox.py | 6 ++--- plugins/inventory/virtualbox.py | 2 +- plugins/module_utils/cloud.py | 2 +- plugins/module_utils/module_helper.py | 2 +- .../modules/cloud/ovirt/ovirt_tag_facts.py | 8 ++----- plugins/modules/packaging/os/openbsd_pkg.py | 2 +- scripts/inventory/freeipa.py | 4 ++-- scripts/inventory/lxc_inventory.py | 2 +- .../integration/targets/django_manage/aliases | 1 + tests/sanity/ignore-2.10.txt | 1 + tests/sanity/ignore-2.9.txt | 1 + .../plugins/modules/monitoring/test_monit.py | 2 +- .../interfaces_file/test_interfaces_file.py | 2 +- 14 files changed, 28 insertions(+), 29 deletions(-) diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index feee3f2c27..61569b0c2d 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -140,16 +140,12 @@ stages: parameters: testFormat: devel/{0} targets: - - name: OS X 10.11 - test: osx/10.11 - - name: macOS 10.15 - test: macos/10.15 - name: macOS 11.1 test: macos/11.1 - - name: RHEL 7.8 - test: rhel/7.8 - - name: RHEL 8.2 - test: rhel/8.2 + - name: RHEL 7.9 + test: rhel/7.9 + - name: RHEL 8.3 + test: rhel/8.3 - name: FreeBSD 11.4 test: freebsd/11.4 - name: FreeBSD 12.2 @@ -166,8 +162,14 @@ stages: parameters: testFormat: 2.10/{0} targets: + - name: OS X 10.11 + test: osx/10.11 + - name: macOS 10.15 + test: macos/10.15 - name: macOS 11.1 test: macos/11.1 + - name: RHEL 7.8 + test: rhel/7.8 - name: RHEL 8.2 test: rhel/8.2 - name: FreeBSD 12.1 @@ -214,8 +216,6 @@ stages: test: opensuse15py2 - name: openSUSE 15 py3 test: opensuse15 - - name: Ubuntu 16.04 - test: ubuntu1604 - name: Ubuntu 18.04 test: ubuntu1804 - name: Ubuntu 20.04 @@ -238,6 +238,8 @@ stages: test: fedora32 - name: openSUSE 15 py3 test: opensuse15 + - name: Ubuntu 16.04 + test: ubuntu1604 groups: - 2 - 3 diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py index 60623cd2f6..da727b7a20 100644 --- a/plugins/inventory/proxmox.py +++ b/plugins/inventory/proxmox.py @@ -226,10 +226,8 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): if config == 'rootfs' or config.startswith(('virtio', 'sata', 'ide', 'scsi')): value = ('disk_image=' + value) - if isinstance(value, int) or ',' not in value: - value = value - # split off strings with commas to a dict - else: + if not (isinstance(value, int) or ',' not in value): + # split off strings with commas to a dict # skip over any keys that cannot be processed try: value = dict(key.split("=") for key in value.split(",")) diff --git a/plugins/inventory/virtualbox.py b/plugins/inventory/virtualbox.py index 683e4ddd69..3827aa0d1a 100644 --- a/plugins/inventory/virtualbox.py +++ b/plugins/inventory/virtualbox.py @@ -216,7 +216,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): yield host not in v yield True - return all([found_host for found_host in find_host(host, inventory)]) + return all(find_host(host, inventory)) def verify_file(self, path): diff --git a/plugins/module_utils/cloud.py b/plugins/module_utils/cloud.py index 33b3308452..d90d1f5234 100644 --- a/plugins/module_utils/cloud.py +++ b/plugins/module_utils/cloud.py @@ -130,7 +130,7 @@ class CloudRetry(object): try: return f(*args, **kwargs) except Exception as e: - if isinstance(e, cls.base_class): + if isinstance(e, cls.base_class): # pylint: disable=isinstance-second-argument-not-valid-type response_code = cls.status_code_from_exception(e) if cls.found(response_code, catch_extra_error_codes): msg = "{0}: Retrying in {1} seconds...".format(str(e), delay) diff --git a/plugins/module_utils/module_helper.py b/plugins/module_utils/module_helper.py index 42fb17d210..f35db8283d 100644 --- a/plugins/module_utils/module_helper.py +++ b/plugins/module_utils/module_helper.py @@ -284,7 +284,7 @@ class CmdMixin(object): def _calculate_args(self, extra_params=None, params=None): def add_arg_formatted_param(_cmd_args, arg_format, _value): - args = [x for x in arg_format.to_text(_value)] + args = list(arg_format.to_text(_value)) return _cmd_args + args def find_format(_param): diff --git a/plugins/modules/cloud/ovirt/ovirt_tag_facts.py b/plugins/modules/cloud/ovirt/ovirt_tag_facts.py index c6e9b7441a..a6ce97dd42 100644 --- a/plugins/modules/cloud/ovirt/ovirt_tag_facts.py +++ b/plugins/modules/cloud/ovirt/ovirt_tag_facts.py @@ -137,17 +137,13 @@ def main(): host = search_by_name(hosts_service, module.params['host']) if host is None: raise Exception("Host '%s' was not found." % module.params['host']) - tags.extend([ - tag for tag in hosts_service.host_service(host.id).tags_service().list() - ]) + tags.extend(hosts_service.host_service(host.id).tags_service().list()) if module.params['vm']: vms_service = connection.system_service().vms_service() vm = search_by_name(vms_service, module.params['vm']) if vm is None: raise Exception("Vm '%s' was not found." % module.params['vm']) - tags.extend([ - tag for tag in vms_service.vm_service(vm.id).tags_service().list() - ]) + tags.extend(vms_service.vm_service(vm.id).tags_service().list()) if not (module.params['vm'] or module.params['host'] or module.params['name']): tags = all_tags diff --git a/plugins/modules/packaging/os/openbsd_pkg.py b/plugins/modules/packaging/os/openbsd_pkg.py index 7432c48a63..61e2a5e52b 100644 --- a/plugins/modules/packaging/os/openbsd_pkg.py +++ b/plugins/modules/packaging/os/openbsd_pkg.py @@ -164,7 +164,7 @@ def get_package_state(names, pkg_spec, module): if stdout: # If the requested package name is just a stem, like "python", we may # find multiple packages with that name. - pkg_spec[name]['installed_names'] = [installed_name for installed_name in stdout.splitlines()] + pkg_spec[name]['installed_names'] = stdout.splitlines() module.debug("get_package_state(): installed_names = %s" % pkg_spec[name]['installed_names']) pkg_spec[name]['installed_state'] = True else: diff --git a/scripts/inventory/freeipa.py b/scripts/inventory/freeipa.py index 4a5bf67883..f7ffe1d223 100644 --- a/scripts/inventory/freeipa.py +++ b/scripts/inventory/freeipa.py @@ -67,10 +67,10 @@ def list_groups(api): members = [] if 'member_host' in hostgroup: - members = [host for host in hostgroup['member_host']] + members = list(hostgroup['member_host']) if 'memberindirect_host' in hostgroup: members += (host for host in hostgroup['memberindirect_host']) - inventory[hostgroup['cn'][0]] = {'hosts': [host for host in members]} + inventory[hostgroup['cn'][0]] = {'hosts': list(members)} for member in members: hostvars[member] = {} diff --git a/scripts/inventory/lxc_inventory.py b/scripts/inventory/lxc_inventory.py index 00de15c2be..5a40b40837 100644 --- a/scripts/inventory/lxc_inventory.py +++ b/scripts/inventory/lxc_inventory.py @@ -38,7 +38,7 @@ def build_dict(): for c in lxc.list_containers()]) # Extract the groups, flatten the list, and remove duplicates - groups = set(sum([g for g in containers.values()], [])) + groups = set(sum(containers.values(), [])) # Create a dictionary for each group (including the 'all' group return dict([(g, {'hosts': [k for k, v in containers.items() if g in v], diff --git a/tests/integration/targets/django_manage/aliases b/tests/integration/targets/django_manage/aliases index fcd92e1a91..e9c002109c 100644 --- a/tests/integration/targets/django_manage/aliases +++ b/tests/integration/targets/django_manage/aliases @@ -4,3 +4,4 @@ skip/freebsd skip/macos skip/osx skip/rhel8.2 +skip/rhel8.3 diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 0ee04a42c9..cee8ab22ca 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -1,5 +1,6 @@ plugins/callback/hipchat.py pylint:blacklisted-name plugins/connection/lxc.py pylint:blacklisted-name +plugins/module_utils/cloud.py pylint:bad-option-value # a pylint test that is disabled was modified over time plugins/module_utils/compat/ipaddress.py no-assert plugins/module_utils/compat/ipaddress.py no-unicode-literals plugins/module_utils/_mount.py future-import-boilerplate diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index f746a78949..c14feab0c5 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -1,5 +1,6 @@ plugins/callback/hipchat.py pylint:blacklisted-name plugins/connection/lxc.py pylint:blacklisted-name +plugins/module_utils/cloud.py pylint:bad-option-value # a pylint test that is disabled was modified over time plugins/module_utils/compat/ipaddress.py no-assert plugins/module_utils/compat/ipaddress.py no-unicode-literals plugins/module_utils/_mount.py future-import-boilerplate diff --git a/tests/unit/plugins/modules/monitoring/test_monit.py b/tests/unit/plugins/modules/monitoring/test_monit.py index 1d30812efe..f0d6ac4935 100644 --- a/tests/unit/plugins/modules/monitoring/test_monit.py +++ b/tests/unit/plugins/modules/monitoring/test_monit.py @@ -97,7 +97,7 @@ class MonitTest(unittest.TestCase): self.monit.wait_for_monit_to_stop_pending() -@pytest.mark.parametrize('status_name', [name for name in monit.StatusValue.ALL_STATUS]) +@pytest.mark.parametrize('status_name', monit.StatusValue.ALL_STATUS) def test_status_value(status_name): value = getattr(monit.StatusValue, status_name.upper()) status = monit.StatusValue(value) diff --git a/tests/unit/plugins/modules/system/interfaces_file/test_interfaces_file.py b/tests/unit/plugins/modules/system/interfaces_file/test_interfaces_file.py index a96737c7a8..c39842f5a1 100644 --- a/tests/unit/plugins/modules/system/interfaces_file/test_interfaces_file.py +++ b/tests/unit/plugins/modules/system/interfaces_file/test_interfaces_file.py @@ -60,7 +60,7 @@ class TestInterfacesFileModule(unittest.TestCase): tofile=os.path.basename(backup)) # Restore backup move(backup, path) - deltas = [d for d in diffs] + deltas = list(diffs) self.assertTrue(len(deltas) == 0) def compareInterfacesLinesToFile(self, interfaces_lines, path, testname=None): From 436bbb0077e21a68f4e226662362516e2e8404ba Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Fri, 12 Feb 2021 18:13:05 +1300 Subject: [PATCH 0036/3093] Prevented the expansion of parameters in run_command() (#1794) --- .../fragments/1776-git_config-tilde_value.yml | 2 ++ plugins/modules/source_control/git_config.py | 10 +++--- .../targets/git_config/tasks/main.yml | 2 ++ .../git_config/tasks/set_value_with_tilde.yml | 33 +++++++++++++++++++ 4 files changed, 41 insertions(+), 6 deletions(-) create mode 100644 changelogs/fragments/1776-git_config-tilde_value.yml create mode 100644 tests/integration/targets/git_config/tasks/set_value_with_tilde.yml diff --git a/changelogs/fragments/1776-git_config-tilde_value.yml b/changelogs/fragments/1776-git_config-tilde_value.yml new file mode 100644 index 0000000000..c98912a24d --- /dev/null +++ b/changelogs/fragments/1776-git_config-tilde_value.yml @@ -0,0 +1,2 @@ +bugfixes: + - git_config - prevent ``run_command`` from expanding values (https://github.com/ansible-collections/community.general/issues/1776). diff --git a/plugins/modules/source_control/git_config.py b/plugins/modules/source_control/git_config.py index 21c3af6802..cbc8219cf0 100644 --- a/plugins/modules/source_control/git_config.py +++ b/plugins/modules/source_control/git_config.py @@ -157,7 +157,6 @@ config_values: import os from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import shlex_quote def main(): @@ -230,7 +229,7 @@ def main(): # Run from root directory to avoid accidentally picking up any local config settings dir = "/" - (rc, out, err) = module.run_command(' '.join(args), cwd=dir) + (rc, out, err) = module.run_command(args, cwd=dir, expand_user_and_vars=False) if params['list_all'] and scope and rc == 128 and 'unable to read config file' in err: # This just means nothing has been set at the given scope module.exit_json(changed=False, msg='', config_values={}) @@ -259,15 +258,14 @@ def main(): args.insert(len(args) - 1, "--" + unset) cmd = args else: - new_value_quoted = shlex_quote(new_value) - cmd = args + [new_value_quoted] + cmd = args + [new_value] try: # try using extra parameter from ansible-base 2.10.4 onwards - (rc, out, err) = module.run_command(cmd, cwd=dir, ignore_invalid_cwd=False) + (rc, out, err) = module.run_command(cmd, cwd=dir, ignore_invalid_cwd=False, expand_user_and_vars=False) except TypeError: # @TODO remove try/except when community.general drop support for 2.10.x if not os.path.isdir(dir): module.fail_json(msg="Cannot find directory '{0}'".format(dir)) - (rc, out, err) = module.run_command(cmd, cwd=dir) + (rc, out, err) = module.run_command(cmd, cwd=dir, expand_user_and_vars=False) if err: module.fail_json(rc=rc, msg=err, cmd=cmd) diff --git a/tests/integration/targets/git_config/tasks/main.yml b/tests/integration/targets/git_config/tasks/main.yml index 36eee37013..74127eb5c6 100644 --- a/tests/integration/targets/git_config/tasks/main.yml +++ b/tests/integration/targets/git_config/tasks/main.yml @@ -24,5 +24,7 @@ - import_tasks: precedence_between_unset_and_value.yml # testing state=absent with check mode - import_tasks: unset_check_mode.yml + # testing for case in issue #1776 + - import_tasks: set_value_with_tilde.yml when: git_installed is succeeded and git_version.stdout is version(git_version_supporting_includes, ">=") ... diff --git a/tests/integration/targets/git_config/tasks/set_value_with_tilde.yml b/tests/integration/targets/git_config/tasks/set_value_with_tilde.yml new file mode 100644 index 0000000000..f55eb73066 --- /dev/null +++ b/tests/integration/targets/git_config/tasks/set_value_with_tilde.yml @@ -0,0 +1,33 @@ +--- +#- import_tasks: setup_no_value.yml + +- name: setting value + git_config: + name: core.hooksPath + value: '~/foo/bar' + state: present + scope: global + register: set_result + +- name: setting value again + git_config: + name: core.hooksPath + value: '~/foo/bar' + state: present + scope: global + register: set_result2 + +- name: getting value + git_config: + name: core.hooksPath + scope: global + register: get_result + +- name: assert set changed and value is correct + assert: + that: + - set_result is changed + - set_result2 is not changed + - get_result is not changed + - get_result.config_value == '~/foo/bar' +... From ba4a98b1be31699dc6b467ed20324c9407a7bdf9 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Fri, 12 Feb 2021 18:47:23 +1300 Subject: [PATCH 0037/3093] Removed parameter-list-no-elements validation errors from redfish modules (#1761) * Removed parameter-list-no-elements validation errors from redfish modules * added changelog fragment per PR * Update changelogs/fragments/1761-redfish-tidy-up-validation.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- changelogs/fragments/1761-redfish-tidy-up-validation.yml | 2 ++ .../remote_management/redfish/idrac_redfish_command.py | 3 ++- .../remote_management/redfish/idrac_redfish_config.py | 3 ++- .../modules/remote_management/redfish/idrac_redfish_info.py | 3 ++- .../modules/remote_management/redfish/redfish_command.py | 3 ++- plugins/modules/remote_management/redfish/redfish_config.py | 3 ++- plugins/modules/remote_management/redfish/redfish_info.py | 6 ++++-- tests/sanity/ignore-2.10.txt | 6 ------ tests/sanity/ignore-2.11.txt | 6 ------ 9 files changed, 16 insertions(+), 19 deletions(-) create mode 100644 changelogs/fragments/1761-redfish-tidy-up-validation.yml diff --git a/changelogs/fragments/1761-redfish-tidy-up-validation.yml b/changelogs/fragments/1761-redfish-tidy-up-validation.yml new file mode 100644 index 0000000000..751c7ca30d --- /dev/null +++ b/changelogs/fragments/1761-redfish-tidy-up-validation.yml @@ -0,0 +1,2 @@ +minor_changes: + - redfish modules - explicitly setting lists' elements to ``str`` (https://github.com/ansible-collections/community.general/pull/1761). diff --git a/plugins/modules/remote_management/redfish/idrac_redfish_command.py b/plugins/modules/remote_management/redfish/idrac_redfish_command.py index ea97ecdcd5..fb32248271 100644 --- a/plugins/modules/remote_management/redfish/idrac_redfish_command.py +++ b/plugins/modules/remote_management/redfish/idrac_redfish_command.py @@ -26,6 +26,7 @@ options: description: - List of commands to execute on OOB controller type: list + elements: str baseuri: required: true description: @@ -134,7 +135,7 @@ def main(): module = AnsibleModule( argument_spec=dict( category=dict(required=True), - command=dict(required=True, type='list'), + command=dict(required=True, type='list', elements='str'), baseuri=dict(required=True), username=dict(required=True), password=dict(required=True, no_log=True), diff --git a/plugins/modules/remote_management/redfish/idrac_redfish_config.py b/plugins/modules/remote_management/redfish/idrac_redfish_config.py index 485d54cd78..9248da2030 100644 --- a/plugins/modules/remote_management/redfish/idrac_redfish_config.py +++ b/plugins/modules/remote_management/redfish/idrac_redfish_config.py @@ -29,6 +29,7 @@ options: I(SetSystemAttributes) are mutually exclusive commands when C(category) is I(Manager) type: list + elements: str baseuri: required: true description: @@ -245,7 +246,7 @@ def main(): module = AnsibleModule( argument_spec=dict( category=dict(required=True), - command=dict(required=True, type='list'), + command=dict(required=True, type='list', elements='str'), baseuri=dict(required=True), username=dict(required=True), password=dict(required=True, no_log=True), diff --git a/plugins/modules/remote_management/redfish/idrac_redfish_info.py b/plugins/modules/remote_management/redfish/idrac_redfish_info.py index f5b7fe1af9..9044aa56bf 100644 --- a/plugins/modules/remote_management/redfish/idrac_redfish_info.py +++ b/plugins/modules/remote_management/redfish/idrac_redfish_info.py @@ -30,6 +30,7 @@ options: - C(GetManagerAttributes) returns the list of dicts containing iDRAC, LifecycleController and System attributes type: list + elements: str baseuri: required: true description: @@ -171,7 +172,7 @@ def main(): module = AnsibleModule( argument_spec=dict( category=dict(required=True), - command=dict(required=True, type='list'), + command=dict(required=True, type='list', elements='str'), baseuri=dict(required=True), username=dict(required=True), password=dict(required=True, no_log=True), diff --git a/plugins/modules/remote_management/redfish/redfish_command.py b/plugins/modules/remote_management/redfish/redfish_command.py index 78007f1de9..d409c0e023 100644 --- a/plugins/modules/remote_management/redfish/redfish_command.py +++ b/plugins/modules/remote_management/redfish/redfish_command.py @@ -28,6 +28,7 @@ options: description: - List of commands to execute on OOB controller type: list + elements: str baseuri: required: true description: @@ -550,7 +551,7 @@ def main(): module = AnsibleModule( argument_spec=dict( category=dict(required=True), - command=dict(required=True, type='list'), + command=dict(required=True, type='list', elements='str'), baseuri=dict(required=True), username=dict(required=True), password=dict(required=True, no_log=True), diff --git a/plugins/modules/remote_management/redfish/redfish_config.py b/plugins/modules/remote_management/redfish/redfish_config.py index 26b692a6f3..60612da35f 100644 --- a/plugins/modules/remote_management/redfish/redfish_config.py +++ b/plugins/modules/remote_management/redfish/redfish_config.py @@ -27,6 +27,7 @@ options: description: - List of commands to execute on OOB controller type: list + elements: str baseuri: required: true description: @@ -228,7 +229,7 @@ def main(): module = AnsibleModule( argument_spec=dict( category=dict(required=True), - command=dict(required=True, type='list'), + command=dict(required=True, type='list', elements='str'), baseuri=dict(required=True), username=dict(required=True), password=dict(required=True, no_log=True), diff --git a/plugins/modules/remote_management/redfish/redfish_info.py b/plugins/modules/remote_management/redfish/redfish_info.py index cfdb1aef5b..7bf209b7f6 100644 --- a/plugins/modules/remote_management/redfish/redfish_info.py +++ b/plugins/modules/remote_management/redfish/redfish_info.py @@ -24,11 +24,13 @@ options: - List of categories to execute on OOB controller default: ['Systems'] type: list + elements: str command: required: false description: - List of commands to execute on OOB controller type: list + elements: str baseuri: required: true description: @@ -296,8 +298,8 @@ def main(): category_list = [] module = AnsibleModule( argument_spec=dict( - category=dict(type='list', default=['Systems']), - command=dict(type='list'), + category=dict(type='list', elements='str', default=['Systems']), + command=dict(type='list', elements='str'), baseuri=dict(required=True), username=dict(required=True), password=dict(required=True, no_log=True), diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index cee8ab22ca..6297878d97 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -290,12 +290,6 @@ plugins/modules/remote_management/oneview/oneview_san_manager.py validate-module plugins/modules/remote_management/oneview/oneview_san_manager.py validate-modules:undocumented-parameter plugins/modules/remote_management/oneview/oneview_san_manager_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/oneview/oneview_san_manager_info.py validate-modules:undocumented-parameter -plugins/modules/remote_management/redfish/idrac_redfish_command.py validate-modules:parameter-list-no-elements -plugins/modules/remote_management/redfish/idrac_redfish_config.py validate-modules:parameter-list-no-elements -plugins/modules/remote_management/redfish/idrac_redfish_info.py validate-modules:parameter-list-no-elements -plugins/modules/remote_management/redfish/redfish_command.py validate-modules:parameter-list-no-elements -plugins/modules/remote_management/redfish/redfish_config.py validate-modules:parameter-list-no-elements -plugins/modules/remote_management/redfish/redfish_info.py validate-modules:parameter-list-no-elements plugins/modules/remote_management/stacki/stacki_host.py validate-modules:doc-default-does-not-match-spec plugins/modules/remote_management/stacki/stacki_host.py validate-modules:no-default-for-required-parameter plugins/modules/remote_management/stacki/stacki_host.py validate-modules:parameter-type-not-in-doc diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 0ee04a42c9..495ce2de09 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -289,12 +289,6 @@ plugins/modules/remote_management/oneview/oneview_san_manager.py validate-module plugins/modules/remote_management/oneview/oneview_san_manager.py validate-modules:undocumented-parameter plugins/modules/remote_management/oneview/oneview_san_manager_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/oneview/oneview_san_manager_info.py validate-modules:undocumented-parameter -plugins/modules/remote_management/redfish/idrac_redfish_command.py validate-modules:parameter-list-no-elements -plugins/modules/remote_management/redfish/idrac_redfish_config.py validate-modules:parameter-list-no-elements -plugins/modules/remote_management/redfish/idrac_redfish_info.py validate-modules:parameter-list-no-elements -plugins/modules/remote_management/redfish/redfish_command.py validate-modules:parameter-list-no-elements -plugins/modules/remote_management/redfish/redfish_config.py validate-modules:parameter-list-no-elements -plugins/modules/remote_management/redfish/redfish_info.py validate-modules:parameter-list-no-elements plugins/modules/remote_management/stacki/stacki_host.py validate-modules:doc-default-does-not-match-spec plugins/modules/remote_management/stacki/stacki_host.py validate-modules:no-default-for-required-parameter plugins/modules/remote_management/stacki/stacki_host.py validate-modules:parameter-type-not-in-doc From f12df1d21bd4711ef21116ec7182b794e3153cf9 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Fri, 12 Feb 2021 07:58:30 +0100 Subject: [PATCH 0038/3093] Change type str -> path in argspec. (#1741) --- changelogs/fragments/1741-use-path-argspec.yml | 4 ++++ plugins/doc_fragments/oracle.py | 2 +- plugins/module_utils/oracle/oci_utils.py | 2 +- plugins/modules/cloud/lxd/lxd_container.py | 8 ++++---- plugins/modules/cloud/lxd/lxd_profile.py | 8 ++++---- 5 files changed, 14 insertions(+), 10 deletions(-) create mode 100644 changelogs/fragments/1741-use-path-argspec.yml diff --git a/changelogs/fragments/1741-use-path-argspec.yml b/changelogs/fragments/1741-use-path-argspec.yml new file mode 100644 index 0000000000..ed05fee16a --- /dev/null +++ b/changelogs/fragments/1741-use-path-argspec.yml @@ -0,0 +1,4 @@ +minor_changes: +- "oci_vcn - ``api_user_key_file`` is now of type ``path`` and no longer ``str``. A side effect is that certain expansions are made, like ``~`` is replaced by the user's home directory, and environment variables like ``$HOME`` or ``$TEMP`` are evaluated (https://github.com/ansible-collections/community.general/pull/1741)." +- "lxd_container - ``client_key`` and ``client_cert`` are now of type ``path`` and no longer ``str``. A side effect is that certain expansions are made, like ``~`` is replaced by the user's home directory, and environment variables like ``$HOME`` or ``$TEMP`` are evaluated (https://github.com/ansible-collections/community.general/pull/1741)." +- "lxd_profile - ``client_key`` and ``client_cert`` are now of type ``path`` and no longer ``str``. A side effect is that certain expansions are made, like ``~`` is replaced by the user's home directory, and environment variables like ``$HOME`` or ``$TEMP`` are evaluated (https://github.com/ansible-collections/community.general/pull/1741)." diff --git a/plugins/doc_fragments/oracle.py b/plugins/doc_fragments/oracle.py index 776c8f52f1..5ad04a2220 100644 --- a/plugins/doc_fragments/oracle.py +++ b/plugins/doc_fragments/oracle.py @@ -47,7 +47,7 @@ class ModuleDocFragment(object): OCI_USER_KEY_FILE variable, if any, is used. This option is required if the private key is not specified through a configuration file (See C(config_file_location)). If the key is encrypted with a pass-phrase, the C(api_user_key_pass_phrase) option must also be provided. - type: str + type: path api_user_key_pass_phrase: description: - Passphrase used by the key referenced in C(api_user_key_file), if it is encrypted. If not set, then diff --git a/plugins/module_utils/oracle/oci_utils.py b/plugins/module_utils/oracle/oci_utils.py index 72a872fcb3..973d68845e 100644 --- a/plugins/module_utils/oracle/oci_utils.py +++ b/plugins/module_utils/oracle/oci_utils.py @@ -90,7 +90,7 @@ def get_common_arg_spec(supports_create=False, supports_wait=False): config_profile_name=dict(type="str", default="DEFAULT"), api_user=dict(type="str"), api_user_fingerprint=dict(type="str", no_log=True), - api_user_key_file=dict(type="str"), + api_user_key_file=dict(type="path"), api_user_key_pass_phrase=dict(type="str", no_log=True), auth_type=dict( type="str", diff --git a/plugins/modules/cloud/lxd/lxd_container.py b/plugins/modules/cloud/lxd/lxd_container.py index 119387f97b..1e73c5cf12 100644 --- a/plugins/modules/cloud/lxd/lxd_container.py +++ b/plugins/modules/cloud/lxd/lxd_container.py @@ -132,14 +132,14 @@ options: - If not specified, it defaults to C(${HOME}/.config/lxc/client.key). required: false aliases: [ key_file ] - type: str + type: path client_cert: description: - The client certificate file path. - If not specified, it defaults to C(${HOME}/.config/lxc/client.crt). required: false aliases: [ cert_file ] - type: str + type: path trust_password: description: - The client trusted password. @@ -690,11 +690,11 @@ def main(): default='unix:/var/snap/lxd/common/lxd/unix.socket' ), client_key=dict( - type='str', + type='path', aliases=['key_file'] ), client_cert=dict( - type='str', + type='path', aliases=['cert_file'] ), trust_password=dict(type='str', no_log=True) diff --git a/plugins/modules/cloud/lxd/lxd_profile.py b/plugins/modules/cloud/lxd/lxd_profile.py index ccd74d42f9..9a119d26a2 100644 --- a/plugins/modules/cloud/lxd/lxd_profile.py +++ b/plugins/modules/cloud/lxd/lxd_profile.py @@ -79,14 +79,14 @@ options: - If not specified, it defaults to C($HOME/.config/lxc/client.key). required: false aliases: [ key_file ] - type: str + type: path client_cert: description: - The client certificate file path. - If not specified, it defaults to C($HOME/.config/lxc/client.crt). required: false aliases: [ cert_file ] - type: str + type: path trust_password: description: - The client trusted password. @@ -384,11 +384,11 @@ def main(): default='unix:/var/snap/lxd/common/lxd/unix.socket' ), client_key=dict( - type='str', + type='path', aliases=['key_file'] ), client_cert=dict( - type='str', + type='path', aliases=['cert_file'] ), trust_password=dict(type='str', no_log=True) From 0a5f79724c5b63ec099e6f46cbf6286a2d00bb18 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sat, 13 Feb 2021 01:01:52 +1300 Subject: [PATCH 0039/3093] Improved parameter handling on proxmox modules (#1765) * Improved parameter handling on proxmox modules * removed unused imports * rollback change in plugins/modules/cloud/misc/proxmox_user_info.py * added changelog fragment * Update changelogs/fragments/1765-proxmox-params.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- changelogs/fragments/1765-proxmox-params.yml | 2 ++ plugins/modules/cloud/misc/proxmox.py | 20 ++++++---------- plugins/modules/cloud/misc/proxmox_kvm.py | 24 +++++++------------ plugins/modules/cloud/misc/proxmox_snap.py | 13 +++------- .../modules/cloud/misc/proxmox_template.py | 19 ++++++--------- 5 files changed, 28 insertions(+), 50 deletions(-) create mode 100644 changelogs/fragments/1765-proxmox-params.yml diff --git a/changelogs/fragments/1765-proxmox-params.yml b/changelogs/fragments/1765-proxmox-params.yml new file mode 100644 index 0000000000..fd6d63c788 --- /dev/null +++ b/changelogs/fragments/1765-proxmox-params.yml @@ -0,0 +1,2 @@ +bugfixes: + - proxmox* modules - refactored some parameter validation code into use of ``env_fallback``, ``required_if``, ``required_together``, ``required_one_of`` (https://github.com/ansible-collections/community.general/pull/1765). diff --git a/plugins/modules/cloud/misc/proxmox.py b/plugins/modules/cloud/misc/proxmox.py index c4cfd9b5e7..4f495da34e 100644 --- a/plugins/modules/cloud/misc/proxmox.py +++ b/plugins/modules/cloud/misc/proxmox.py @@ -345,7 +345,6 @@ EXAMPLES = r''' state: absent ''' -import os import time import traceback from distutils.version import LooseVersion @@ -356,7 +355,7 @@ try: except ImportError: HAS_PROXMOXER = False -from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import AnsibleModule, env_fallback from ansible.module_utils._text import to_native @@ -481,7 +480,7 @@ def main(): module = AnsibleModule( argument_spec=dict( api_host=dict(required=True), - api_password=dict(no_log=True), + api_password=dict(no_log=True, fallback=(env_fallback, ['PROXMOX_PASSWORD'])), api_token_id=dict(no_log=True), api_token_secret=dict(no_log=True), api_user=dict(required=True), @@ -514,7 +513,10 @@ def main(): description=dict(type='str'), hookscript=dict(type='str'), proxmox_default_behavior=dict(type='str', choices=['compatibility', 'no_defaults']), - ) + ), + required_if=[('state', 'present', ['node', 'hostname', 'password', 'ostemplate'])], + required_together=[('api_token_id', 'api_token_secret')], + required_one_of=[('api_password', 'api_token_id')], ) if not HAS_PROXMOXER: @@ -561,13 +563,7 @@ def main(): module.params[param] = value auth_args = {'user': api_user} - if not (api_token_id and api_token_secret): - # If password not set get it from PROXMOX_PASSWORD env - if not api_password: - try: - api_password = os.environ['PROXMOX_PASSWORD'] - except KeyError as e: - module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable') + if not api_token_id: auth_args['password'] = api_password else: auth_args['token_name'] = api_token_id @@ -599,8 +595,6 @@ def main(): # If no vmid was passed, there cannot be another VM named 'hostname' if not module.params['vmid'] and get_vmid(proxmox, hostname) and not module.params['force']: module.exit_json(changed=False, msg="VM with hostname %s already exists and has ID number %s" % (hostname, get_vmid(proxmox, hostname)[0])) - elif not (node, module.params['hostname'] and module.params['password'] and module.params['ostemplate']): - module.fail_json(msg='node, hostname, password and ostemplate are mandatory for creating vm') elif not node_check(proxmox, node): module.fail_json(msg="node '%s' not exists in cluster" % node) elif not content_check(proxmox, node, module.params['ostemplate'], template_store): diff --git a/plugins/modules/cloud/misc/proxmox_kvm.py b/plugins/modules/cloud/misc/proxmox_kvm.py index c239a8b85d..348725661b 100644 --- a/plugins/modules/cloud/misc/proxmox_kvm.py +++ b/plugins/modules/cloud/misc/proxmox_kvm.py @@ -769,7 +769,6 @@ status: }' ''' -import os import re import time import traceback @@ -782,7 +781,7 @@ try: except ImportError: HAS_PROXMOXER = False -from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import AnsibleModule, env_fallback from ansible.module_utils._text import to_native @@ -1015,7 +1014,7 @@ def main(): agent=dict(type='bool'), args=dict(type='str'), api_host=dict(required=True), - api_password=dict(no_log=True), + api_password=dict(no_log=True, fallback=(env_fallback, ['PROXMOX_PASSWORD'])), api_token_id=dict(no_log=True), api_token_secret=dict(no_log=True), api_user=dict(required=True), @@ -1028,12 +1027,12 @@ def main(): cipassword=dict(type='str', no_log=True), citype=dict(type='str', choices=['nocloud', 'configdrive2']), ciuser=dict(type='str'), - clone=dict(type='str', default=None), + clone=dict(type='str'), cores=dict(type='int'), cpu=dict(type='str'), cpulimit=dict(type='int'), cpuunits=dict(type='int'), - delete=dict(type='str', default=None), + delete=dict(type='str'), description=dict(type='str'), digest=dict(type='str'), force=dict(type='bool'), @@ -1056,7 +1055,7 @@ def main(): name=dict(type='str'), nameservers=dict(type='list', elements='str'), net=dict(type='dict'), - newid=dict(type='int', default=None), + newid=dict(type='int'), node=dict(), numa=dict(type='dict'), numa_enabled=dict(type='bool'), @@ -1092,13 +1091,14 @@ def main(): vcpus=dict(type='int'), vga=dict(choices=['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']), virtio=dict(type='dict'), - vmid=dict(type='int', default=None), + vmid=dict(type='int'), watchdog=dict(), proxmox_default_behavior=dict(type='str', choices=['compatibility', 'no_defaults']), ), mutually_exclusive=[('delete', 'revert'), ('delete', 'update'), ('revert', 'update'), ('clone', 'update'), ('clone', 'delete'), ('clone', 'revert')], - required_one_of=[('name', 'vmid',)], - required_if=[('state', 'present', ['node'])] + required_together=[('api_token_id', 'api_token_secret')], + required_one_of=[('name', 'vmid'), ('api_password', 'api_token_id')], + required_if=[('state', 'present', ['node'])], ) if not HAS_PROXMOXER: @@ -1159,12 +1159,6 @@ def main(): auth_args = {'user': api_user} if not (api_token_id and api_token_secret): - # If password not set get it from PROXMOX_PASSWORD env - if not api_password: - try: - api_password = os.environ['PROXMOX_PASSWORD'] - except KeyError: - module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable') auth_args['password'] = api_password else: auth_args['token_name'] = api_token_id diff --git a/plugins/modules/cloud/misc/proxmox_snap.py b/plugins/modules/cloud/misc/proxmox_snap.py index 7cf2f471a6..17c6ef335a 100644 --- a/plugins/modules/cloud/misc/proxmox_snap.py +++ b/plugins/modules/cloud/misc/proxmox_snap.py @@ -31,6 +31,7 @@ options: - The password to authenticate with. - You can use PROXMOX_PASSWORD environment variable. type: str + required: yes hostname: description: - The instance name. @@ -106,7 +107,6 @@ EXAMPLES = r''' RETURN = r'''#''' -import os import time import traceback @@ -118,7 +118,7 @@ except ImportError: PROXMOXER_IMP_ERR = traceback.format_exc() HAS_PROXMOXER = False -from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback from ansible.module_utils._text import to_native @@ -182,7 +182,7 @@ def main(): argument_spec=dict( api_host=dict(required=True), api_user=dict(required=True), - api_password=dict(no_log=True), + api_password=dict(no_log=True, required=True, fallback=(env_fallback, ['PROXMOX_PASSWORD'])), vmid=dict(required=False), validate_certs=dict(type='bool', default='no'), hostname=dict(), @@ -213,13 +213,6 @@ def main(): force = module.params['force'] vmstate = module.params['vmstate'] - # If password not set get it from PROXMOX_PASSWORD env - if not api_password: - try: - api_password = os.environ['PROXMOX_PASSWORD'] - except KeyError as e: - module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable' % to_native(e)) - try: proxmox = setup_api(api_host, api_user, api_password, validate_certs) diff --git a/plugins/modules/cloud/misc/proxmox_template.py b/plugins/modules/cloud/misc/proxmox_template.py index 76228e9a44..d7fb9341e6 100644 --- a/plugins/modules/cloud/misc/proxmox_template.py +++ b/plugins/modules/cloud/misc/proxmox_template.py @@ -122,7 +122,7 @@ try: except ImportError: HAS_PROXMOXER = False -from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import AnsibleModule, env_fallback def get_template(proxmox, node, storage, content_type, template): @@ -175,7 +175,7 @@ def main(): module = AnsibleModule( argument_spec=dict( api_host=dict(required=True), - api_password=dict(no_log=True), + api_password=dict(no_log=True, fallback=(env_fallback, ['PROXMOX_PASSWORD'])), api_token_id=dict(no_log=True), api_token_secret=dict(no_log=True), api_user=dict(required=True), @@ -188,7 +188,10 @@ def main(): timeout=dict(type='int', default=30), force=dict(type='bool', default=False), state=dict(default='present', choices=['present', 'absent']), - ) + ), + required_together=[('api_token_id', 'api_token_secret')], + required_one_of=[('api_password', 'api_token_id')], + required_if=[('state', 'absent', ['template'])] ) if not HAS_PROXMOXER: @@ -207,12 +210,6 @@ def main(): auth_args = {'user': api_user} if not (api_token_id and api_token_secret): - # If password not set get it from PROXMOX_PASSWORD env - if not api_password: - try: - api_password = os.environ['PROXMOX_PASSWORD'] - except KeyError as e: - module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable') auth_args['password'] = api_password else: auth_args['token_name'] = api_token_id @@ -261,9 +258,7 @@ def main(): content_type = module.params['content_type'] template = module.params['template'] - if not template: - module.fail_json(msg='template param is mandatory') - elif not get_template(proxmox, node, storage, content_type, template): + if not get_template(proxmox, node, storage, content_type, template): module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already deleted' % (storage, content_type, template)) if delete_template(module, proxmox, node, storage, content_type, template, timeout): From 367c3c43ff205e0263cbaa0637ba068d0d2bf787 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 13 Feb 2021 11:09:40 +0100 Subject: [PATCH 0040/3093] Make selective callback work with ansible-core 2.11. (#1807) --- changelogs/fragments/selective-core-2.11.yml | 2 ++ plugins/callback/selective.py | 11 ++++++++++- 2 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/selective-core-2.11.yml diff --git a/changelogs/fragments/selective-core-2.11.yml b/changelogs/fragments/selective-core-2.11.yml new file mode 100644 index 0000000000..994e555c7c --- /dev/null +++ b/changelogs/fragments/selective-core-2.11.yml @@ -0,0 +1,2 @@ +bugfixes: +- "selective callback plugin - adjust import so that the plugin also works with ansible-core 2.11 (https://github.com/ansible-collections/community.general/pull/1807)." diff --git a/plugins/callback/selective.py b/plugins/callback/selective.py index e46391d099..71620c18c4 100644 --- a/plugins/callback/selective.py +++ b/plugins/callback/selective.py @@ -41,7 +41,16 @@ import difflib from ansible import constants as C from ansible.plugins.callback import CallbackBase from ansible.module_utils._text import to_text -from ansible.utils.color import codeCodes + +try: + codeCodes = C.COLOR_CODES +except AttributeError: + # This constant was moved to ansible.constants in + # https://github.com/ansible/ansible/commit/1202dd000f10b0e8959019484f1c3b3f9628fc67 + # (will be included in ansible-core 2.11.0). For older Ansible/ansible-base versions, + # we include from the original location. + from ansible.utils.color import codeCodes + DONT_COLORIZE = False COLORS = { From e247300523d58c698e8cc4de0a1d985bfc690f76 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 14 Feb 2021 00:09:54 +1300 Subject: [PATCH 0041/3093] Adjusted django_manage integration test files to reduce ignore lines in sanity tests (#1805) --- .../single_app_project/manage.py | 8 +++++++- .../files/base_test/simple_project/p1/manage.py | 4 ++++ .../files/base_test/simple_project/p1/p1/settings.py | 3 +++ .../files/base_test/simple_project/p1/p1/urls.py | 3 +++ tests/sanity/ignore-2.10.txt | 11 ----------- tests/sanity/ignore-2.11.txt | 11 ----------- tests/sanity/ignore-2.9.txt | 11 ----------- 7 files changed, 17 insertions(+), 34 deletions(-) diff --git a/tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/manage.py b/tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/manage.py index 979a04d8dd..1f5463a26b 100755 --- a/tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/manage.py +++ b/tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/manage.py @@ -1,12 +1,18 @@ -#! /usr/bin/env python3 +#!/usr/bin/env python + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + # single_app_project/manage.py import os import sys + def main(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'single_app_project.core.settings') from django.core.management import execute_from_command_line execute_from_command_line(sys.argv) + if __name__ == '__main__': main() diff --git a/tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py b/tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py index 475ed1dd5d..ea2cb4cd22 100755 --- a/tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py +++ b/tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py @@ -1,4 +1,8 @@ #!/usr/bin/env python + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + """Django's command-line utility for administrative tasks.""" import os import sys diff --git a/tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/settings.py b/tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/settings.py index 9655a45337..0a11583aba 100644 --- a/tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/settings.py +++ b/tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/settings.py @@ -1,3 +1,6 @@ +from __future__ import absolute_import, division, print_function +__metaclass__ = type + """ Django settings for p1 project. diff --git a/tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/urls.py b/tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/urls.py index 83774947c3..6710c0b7aa 100644 --- a/tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/urls.py +++ b/tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/urls.py @@ -1,3 +1,6 @@ +from __future__ import absolute_import, division, print_function +__metaclass__ = type + """p1 URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 6297878d97..ec69dc3f86 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -365,18 +365,7 @@ plugins/modules/system/xfconf.py validate-modules:parameter-state-invalid-choice plugins/modules/system/xfconf.py validate-modules:return-syntax-error plugins/modules/web_infrastructure/jenkins_plugin.py use-argspec-type-path plugins/modules/web_infrastructure/rundeck_acl_policy.py pylint:blacklisted-name -tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/manage.py metaclass-boilerplate # django generated code -tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/manage.py future-import-boilerplate # django generated code -tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/manage.py pep8:E302 # django generated code -tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/manage.py pep8:E305 # django generated code -tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/manage.py shebang # django generated code tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.6 # django generated code tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.7 # django generated code -tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py metaclass-boilerplate # django generated code -tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py future-import-boilerplate # django generated code -tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/settings.py metaclass-boilerplate # django generated code -tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/settings.py future-import-boilerplate # django generated code -tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/urls.py metaclass-boilerplate # django generated code -tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/urls.py future-import-boilerplate # django generated code tests/utils/shippable/check_matrix.py replace-urlopen tests/utils/shippable/timing.py shebang diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 495ce2de09..696082a772 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -364,18 +364,7 @@ plugins/modules/system/xfconf.py validate-modules:parameter-state-invalid-choice plugins/modules/system/xfconf.py validate-modules:return-syntax-error plugins/modules/web_infrastructure/jenkins_plugin.py use-argspec-type-path plugins/modules/web_infrastructure/rundeck_acl_policy.py pylint:blacklisted-name -tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/manage.py metaclass-boilerplate # django generated code -tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/manage.py future-import-boilerplate # django generated code -tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/manage.py pep8:E302 # django generated code -tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/manage.py pep8:E305 # django generated code -tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/manage.py shebang # django generated code tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.6 # django generated code tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.7 # django generated code -tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py metaclass-boilerplate # django generated code -tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py future-import-boilerplate # django generated code -tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/settings.py metaclass-boilerplate # django generated code -tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/settings.py future-import-boilerplate # django generated code -tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/urls.py metaclass-boilerplate # django generated code -tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/urls.py future-import-boilerplate # django generated code tests/utils/shippable/check_matrix.py replace-urlopen tests/utils/shippable/timing.py shebang diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index c14feab0c5..09610a5cd5 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -320,18 +320,7 @@ plugins/modules/web_infrastructure/jenkins_plugin.py use-argspec-type-path plugins/modules/web_infrastructure/nginx_status_facts.py validate-modules:deprecation-mismatch plugins/modules/web_infrastructure/nginx_status_facts.py validate-modules:invalid-documentation plugins/modules/web_infrastructure/rundeck_acl_policy.py pylint:blacklisted-name -tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/manage.py metaclass-boilerplate # django generated code -tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/manage.py future-import-boilerplate # django generated code -tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/manage.py pep8:E302 # django generated code -tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/manage.py pep8:E305 # django generated code -tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/manage.py shebang # django generated code tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.6 # django generated code tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.7 # django generated code -tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py metaclass-boilerplate # django generated code -tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py future-import-boilerplate # django generated code -tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/settings.py metaclass-boilerplate # django generated code -tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/settings.py future-import-boilerplate # django generated code -tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/urls.py metaclass-boilerplate # django generated code -tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/urls.py future-import-boilerplate # django generated code tests/utils/shippable/check_matrix.py replace-urlopen tests/utils/shippable/timing.py shebang From 865acdd4cf91699c6d3589ce9b315514d357d79d Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 14 Feb 2021 00:14:06 +1300 Subject: [PATCH 0042/3093] Tidy up validate-modules ignores for cloud/centurylink modules (#1771) * fixed validation-modules for plugins/modules/cloud/centurylink/clc_alert_policy.py * fixed validation-modules for plugins/modules/cloud/centurylink/clc_blueprint_package.py * fixed validation-modules for plugins/modules/cloud/centurylink/clc_firewall_policy.py * fixed validation-modules for plugins/modules/cloud/centurylink/clc_loadbalancer.py * fixed validation-modules for plugins/modules/cloud/centurylink/clc_modify_server.py * fixed validation-modules for plugins/modules/cloud/centurylink/clc_publicip.py * fixed validation-modules for plugins/modules/cloud/centurylink/clc_server_snapshot.py * fixed validation-modules for plugins/modules/cloud/centurylink/clc_server.py * Tidy up validate-modules ignores for cloud/centurylink modules * added changelog fragment per PR --- .../1771-centurylink-validation-elements.yml | 2 ++ .../modules/cloud/centurylink/clc_alert_policy.py | 3 ++- .../cloud/centurylink/clc_blueprint_package.py | 3 ++- .../cloud/centurylink/clc_firewall_policy.py | 9 ++++++--- .../modules/cloud/centurylink/clc_loadbalancer.py | 3 ++- .../cloud/centurylink/clc_modify_server.py | 3 ++- plugins/modules/cloud/centurylink/clc_publicip.py | 6 ++++-- plugins/modules/cloud/centurylink/clc_server.py | 15 ++++++++++----- .../cloud/centurylink/clc_server_snapshot.py | 3 ++- tests/sanity/ignore-2.10.txt | 8 -------- tests/sanity/ignore-2.11.txt | 8 -------- 11 files changed, 32 insertions(+), 31 deletions(-) create mode 100644 changelogs/fragments/1771-centurylink-validation-elements.yml diff --git a/changelogs/fragments/1771-centurylink-validation-elements.yml b/changelogs/fragments/1771-centurylink-validation-elements.yml new file mode 100644 index 0000000000..4c7a9bbbe4 --- /dev/null +++ b/changelogs/fragments/1771-centurylink-validation-elements.yml @@ -0,0 +1,2 @@ +minor_changes: + - clc_* modules - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1771). diff --git a/plugins/modules/cloud/centurylink/clc_alert_policy.py b/plugins/modules/cloud/centurylink/clc_alert_policy.py index 7a10c0b386..374f1cada1 100644 --- a/plugins/modules/cloud/centurylink/clc_alert_policy.py +++ b/plugins/modules/cloud/centurylink/clc_alert_policy.py @@ -32,6 +32,7 @@ options: - A list of recipient email ids to notify the alert. This is required for state 'present' type: list + elements: str metric: description: - The metric on which to measure the condition that will trigger the alert. @@ -220,7 +221,7 @@ class ClcAlertPolicy: name=dict(), id=dict(), alias=dict(required=True), - alert_recipients=dict(type='list'), + alert_recipients=dict(type='list', elements='str'), metric=dict( choices=[ 'cpu', diff --git a/plugins/modules/cloud/centurylink/clc_blueprint_package.py b/plugins/modules/cloud/centurylink/clc_blueprint_package.py index c45ca91959..4071b67c7c 100644 --- a/plugins/modules/cloud/centurylink/clc_blueprint_package.py +++ b/plugins/modules/cloud/centurylink/clc_blueprint_package.py @@ -18,6 +18,7 @@ options: - A list of server Ids to deploy the blue print package. type: list required: True + elements: str package_id: description: - The package id of the blue print. @@ -164,7 +165,7 @@ class ClcBlueprintPackage: :return: the package dictionary object """ argument_spec = dict( - server_ids=dict(type='list', required=True), + server_ids=dict(type='list', elements='str', required=True), package_id=dict(required=True), package_params=dict(type='dict', default={}), wait=dict(default=True), # @FIXME should be bool? diff --git a/plugins/modules/cloud/centurylink/clc_firewall_policy.py b/plugins/modules/cloud/centurylink/clc_firewall_policy.py index 105d793ce3..ad26dc92f7 100644 --- a/plugins/modules/cloud/centurylink/clc_firewall_policy.py +++ b/plugins/modules/cloud/centurylink/clc_firewall_policy.py @@ -29,17 +29,20 @@ options: - The list of source addresses for traffic on the originating firewall. This is required when state is 'present' type: list + elements: str destination: description: - The list of destination addresses for traffic on the terminating firewall. This is required when state is 'present' type: list + elements: str ports: description: - The list of ports associated with the policy. TCP and UDP can take in single ports or port ranges. - "Example: C(['any', 'icmp', 'TCP/123', 'UDP/123', 'TCP/123-456', 'UDP/123-456'])." type: list + elements: str firewall_policy_id: description: - Id of the firewall policy. This is required to update or delete an existing firewall policy @@ -217,9 +220,9 @@ class ClcFirewallPolicy: source_account_alias=dict(required=True), destination_account_alias=dict(), firewall_policy_id=dict(), - ports=dict(type='list'), - source=dict(type='list'), - destination=dict(type='list'), + ports=dict(type='list', elements='str'), + source=dict(type='list', elements='str'), + destination=dict(type='list', elements='str'), wait=dict(default=True), # @FIXME type=bool state=dict(default='present', choices=['present', 'absent']), enabled=dict(default=True, choices=[True, False]) diff --git a/plugins/modules/cloud/centurylink/clc_loadbalancer.py b/plugins/modules/cloud/centurylink/clc_loadbalancer.py index 2a8d2e9b6c..400a8b9c3f 100644 --- a/plugins/modules/cloud/centurylink/clc_loadbalancer.py +++ b/plugins/modules/cloud/centurylink/clc_loadbalancer.py @@ -53,6 +53,7 @@ options: - A list of nodes that needs to be added to the load balancer pool type: list default: [] + elements: dict status: description: - The status of the loadbalancer @@ -869,7 +870,7 @@ class ClcLoadBalancer: port=dict(choices=[80, 443]), method=dict(choices=['leastConnection', 'roundRobin']), persistence=dict(choices=['standard', 'sticky']), - nodes=dict(type='list', default=[]), + nodes=dict(type='list', default=[], elements='dict'), status=dict(default='enabled', choices=['enabled', 'disabled']), state=dict( default='present', diff --git a/plugins/modules/cloud/centurylink/clc_modify_server.py b/plugins/modules/cloud/centurylink/clc_modify_server.py index 3c1b08cdc6..c0730a9c2b 100644 --- a/plugins/modules/cloud/centurylink/clc_modify_server.py +++ b/plugins/modules/cloud/centurylink/clc_modify_server.py @@ -18,6 +18,7 @@ options: - A list of server Ids to modify. type: list required: True + elements: str cpu: description: - How many CPUs to update on the server @@ -396,7 +397,7 @@ class ClcModifyServer: :return: argument spec dictionary """ argument_spec = dict( - server_ids=dict(type='list', required=True), + server_ids=dict(type='list', required=True, elements='str'), state=dict(default='present', choices=['present', 'absent']), cpu=dict(), memory=dict(), diff --git a/plugins/modules/cloud/centurylink/clc_publicip.py b/plugins/modules/cloud/centurylink/clc_publicip.py index e31546b2a0..8b5ac4cb4e 100644 --- a/plugins/modules/cloud/centurylink/clc_publicip.py +++ b/plugins/modules/cloud/centurylink/clc_publicip.py @@ -23,11 +23,13 @@ options: description: - A list of ports to expose. This is required when state is 'present' type: list + elements: int server_ids: description: - A list of servers to create public ips on. type: list required: True + elements: str state: description: - Determine whether to create or delete public IPs. If present module will not create a second public ip if one @@ -193,9 +195,9 @@ class ClcPublicIp(object): :return: argument spec dictionary """ argument_spec = dict( - server_ids=dict(type='list', required=True), + server_ids=dict(type='list', required=True, elements='str'), protocol=dict(default='TCP', choices=['TCP', 'UDP', 'ICMP']), - ports=dict(type='list'), + ports=dict(type='list', elements='int'), wait=dict(type='bool', default=True), state=dict(default='present', choices=['present', 'absent']), ) diff --git a/plugins/modules/cloud/centurylink/clc_server.py b/plugins/modules/cloud/centurylink/clc_server.py index 6b7e9c4b95..4e02421892 100644 --- a/plugins/modules/cloud/centurylink/clc_server.py +++ b/plugins/modules/cloud/centurylink/clc_server.py @@ -17,6 +17,7 @@ options: description: - The list of additional disks for the server type: list + elements: dict default: [] add_public_ip: description: @@ -66,6 +67,7 @@ options: - The list of custom fields to set on the server. type: list default: [] + elements: dict description: description: - The description to set for the server. @@ -111,6 +113,7 @@ options: description: - The list of blue print packages to run on the server after its created. type: list + elements: dict default: [] password: description: @@ -130,6 +133,7 @@ options: description: - A list of ports to allow on the firewall to the servers public ip, if add_public_ip is set to True. type: list + elements: dict default: [] secondary_dns: description: @@ -141,6 +145,7 @@ options: A list of server Ids to insure are started, stopped, or absent. type: list default: [] + elements: str source_server_password: description: - The password for the source server if a clone is specified. @@ -575,8 +580,8 @@ class ClcServer: type=dict(default='standard', choices=['standard', 'hyperscale', 'bareMetal']), primary_dns=dict(default=None), secondary_dns=dict(default=None), - additional_disks=dict(type='list', default=[]), - custom_fields=dict(type='list', default=[]), + additional_disks=dict(type='list', default=[], elements='dict'), + custom_fields=dict(type='list', default=[], elements='dict'), ttl=dict(default=None), managed_os=dict(type='bool', default=False), description=dict(default=None), @@ -586,7 +591,7 @@ class ClcServer: anti_affinity_policy_name=dict(default=None), alert_policy_id=dict(default=None), alert_policy_name=dict(default=None), - packages=dict(type='list', default=[]), + packages=dict(type='list', default=[], elements='dict'), state=dict( default='present', choices=[ @@ -597,7 +602,7 @@ class ClcServer: count=dict(type='int', default=1), exact_count=dict(type='int', default=None), count_group=dict(), - server_ids=dict(type='list', default=[]), + server_ids=dict(type='list', default=[], elements='str'), add_public_ip=dict(type='bool', default=False), public_ip_protocol=dict( default='TCP', @@ -605,7 +610,7 @@ class ClcServer: 'TCP', 'UDP', 'ICMP']), - public_ip_ports=dict(type='list', default=[]), + public_ip_ports=dict(type='list', default=[], elements='dict'), configuration_id=dict(default=None), os_type=dict(default=None, choices=[ diff --git a/plugins/modules/cloud/centurylink/clc_server_snapshot.py b/plugins/modules/cloud/centurylink/clc_server_snapshot.py index 1d289f6657..1c706b07a4 100644 --- a/plugins/modules/cloud/centurylink/clc_server_snapshot.py +++ b/plugins/modules/cloud/centurylink/clc_server_snapshot.py @@ -18,6 +18,7 @@ options: - The list of CLC server Ids. type: list required: True + elements: str expiration_days: description: - The number of days to keep the server snapshot before it expires. @@ -330,7 +331,7 @@ class ClcSnapshot: :return: the package dictionary object """ argument_spec = dict( - server_ids=dict(type='list', required=True), + server_ids=dict(type='list', required=True, elements='str'), expiration_days=dict(default=7, type='int'), wait=dict(default=True), state=dict( diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index ec69dc3f86..17e7e43fc8 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -5,14 +5,6 @@ plugins/module_utils/compat/ipaddress.py no-assert plugins/module_utils/compat/ipaddress.py no-unicode-literals plugins/module_utils/_mount.py future-import-boilerplate plugins/module_utils/_mount.py metaclass-boilerplate -plugins/modules/cloud/centurylink/clc_alert_policy.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/centurylink/clc_blueprint_package.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/centurylink/clc_firewall_policy.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/centurylink/clc_loadbalancer.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/centurylink/clc_modify_server.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/centurylink/clc_publicip.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/centurylink/clc_server.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/centurylink/clc_server_snapshot.py validate-modules:parameter-list-no-elements plugins/modules/cloud/heroku/heroku_collaborator.py validate-modules:parameter-list-no-elements plugins/modules/cloud/linode/linode.py validate-modules:parameter-list-no-elements plugins/modules/cloud/linode/linode.py validate-modules:parameter-type-not-in-doc diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 696082a772..1964623ba5 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -4,14 +4,6 @@ plugins/module_utils/compat/ipaddress.py no-assert plugins/module_utils/compat/ipaddress.py no-unicode-literals plugins/module_utils/_mount.py future-import-boilerplate plugins/module_utils/_mount.py metaclass-boilerplate -plugins/modules/cloud/centurylink/clc_alert_policy.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/centurylink/clc_blueprint_package.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/centurylink/clc_firewall_policy.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/centurylink/clc_loadbalancer.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/centurylink/clc_modify_server.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/centurylink/clc_publicip.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/centurylink/clc_server.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/centurylink/clc_server_snapshot.py validate-modules:parameter-list-no-elements plugins/modules/cloud/heroku/heroku_collaborator.py validate-modules:parameter-list-no-elements plugins/modules/cloud/linode/linode.py validate-modules:parameter-list-no-elements plugins/modules/cloud/linode/linode.py validate-modules:parameter-type-not-in-doc From 41bc7816f36de27b5cf151eb523c8bd08fe06f05 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 15 Feb 2021 03:08:32 +1300 Subject: [PATCH 0043/3093] Implemented fix pointed in issue (#1760) --- .../fragments/1714-gitlab_runner-required-reg-token.yml | 2 ++ plugins/modules/source_control/gitlab/gitlab_runner.py | 7 +++++-- 2 files changed, 7 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/1714-gitlab_runner-required-reg-token.yml diff --git a/changelogs/fragments/1714-gitlab_runner-required-reg-token.yml b/changelogs/fragments/1714-gitlab_runner-required-reg-token.yml new file mode 100644 index 0000000000..ec73bf422c --- /dev/null +++ b/changelogs/fragments/1714-gitlab_runner-required-reg-token.yml @@ -0,0 +1,2 @@ +bugfixes: + - gitlab_runner - parameter ``registration_token`` was required but is used only when ``state`` is ``present`` (https://github.com/ansible-collections/community.general/issues/1714). diff --git a/plugins/modules/source_control/gitlab/gitlab_runner.py b/plugins/modules/source_control/gitlab/gitlab_runner.py index 52354645df..8470739fd8 100644 --- a/plugins/modules/source_control/gitlab/gitlab_runner.py +++ b/plugins/modules/source_control/gitlab/gitlab_runner.py @@ -55,7 +55,7 @@ options: registration_token: description: - The registration token is used to register new runners. - required: True + - Required if I(state) is C(present). type: str owned: description: @@ -309,7 +309,7 @@ def main(): locked=dict(type='bool', default=False), access_level=dict(type='str', default='ref_protected', choices=["not_protected", "ref_protected"]), maximum_timeout=dict(type='int', default=3600), - registration_token=dict(type='str', required=True, no_log=True), + registration_token=dict(type='str', no_log=True), state=dict(type='str', default="present", choices=["absent", "present"]), )) @@ -325,6 +325,9 @@ def main(): required_one_of=[ ['api_username', 'api_token'], ], + required_if=[ + ('state', 'present', ['registration_token']), + ], supports_check_mode=True, ) From bd372939bc18a8fed3c3caed0023339fc649117b Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 15 Feb 2021 19:16:08 +1300 Subject: [PATCH 0044/3093] Tidy up validation for storage/zfs modules (#1766) * fixed validation-modules for plugins/modules/storage/zfs/zfs.py * fixed validation-modules for plugins/modules/storage/zfs/zfs_delegate_admin.py * fixed validation-modules for plugins/modules/storage/zfs/zfs_facts.py * fixed validation-modules for plugins/modules/storage/zfs/zpool_facts.py * Tidy up validate-modules ignores for storage/zfs modules * removed ignore lines in 2.11 files as well * added changelog fragment per PR * Update changelogs/fragments/1766-zfs-fixed-sanity.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- changelogs/fragments/1766-zfs-fixed-sanity.yml | 2 ++ plugins/modules/storage/zfs/zfs.py | 4 ++++ plugins/modules/storage/zfs/zfs_delegate_admin.py | 10 +++++++--- plugins/modules/storage/zfs/zfs_facts.py | 4 ++++ plugins/modules/storage/zfs/zpool_facts.py | 2 ++ tests/sanity/ignore-2.10.txt | 5 ----- tests/sanity/ignore-2.11.txt | 5 ----- tests/sanity/ignore-2.9.txt | 4 ---- 8 files changed, 19 insertions(+), 17 deletions(-) create mode 100644 changelogs/fragments/1766-zfs-fixed-sanity.yml diff --git a/changelogs/fragments/1766-zfs-fixed-sanity.yml b/changelogs/fragments/1766-zfs-fixed-sanity.yml new file mode 100644 index 0000000000..ac31084e2c --- /dev/null +++ b/changelogs/fragments/1766-zfs-fixed-sanity.yml @@ -0,0 +1,2 @@ +bugfixes: + - zfs_delegate_admin - the elements of ``users``, ``groups`` and ``permissions`` are now enforced to be strings (https://github.com/ansible-collections/community.general/pull/1766). diff --git a/plugins/modules/storage/zfs/zfs.py b/plugins/modules/storage/zfs/zfs.py index 6b2260fb14..8013dd1128 100644 --- a/plugins/modules/storage/zfs/zfs.py +++ b/plugins/modules/storage/zfs/zfs.py @@ -19,6 +19,7 @@ options: description: - File system, snapshot or volume name e.g. C(rpool/myfs). required: true + type: str state: description: - Whether to create (C(present)), or remove (C(absent)) a @@ -26,13 +27,16 @@ options: will be created/destroyed as needed to reach the desired state. choices: [ absent, present ] required: true + type: str origin: description: - Snapshot from which to create a clone. + type: str extra_zfs_properties: description: - A dictionary of zfs properties to be set. - See the zfs(8) man page for more information. + type: dict author: - Johan Wiren (@johanwiren) ''' diff --git a/plugins/modules/storage/zfs/zfs_delegate_admin.py b/plugins/modules/storage/zfs/zfs_delegate_admin.py index 223d7f72ad..71225fa155 100644 --- a/plugins/modules/storage/zfs/zfs_delegate_admin.py +++ b/plugins/modules/storage/zfs/zfs_delegate_admin.py @@ -32,14 +32,17 @@ options: - When set to C(absent), removes permissions from the specified entities, or removes all permissions if no entity params are specified. choices: [ absent, present ] default: present + type: str users: description: - List of users to whom permission(s) should be granted. type: list + elements: str groups: description: - List of groups to whom permission(s) should be granted. type: list + elements: str everyone: description: - Apply permissions to everyone. @@ -50,6 +53,7 @@ options: - The list of permission(s) to delegate (required if C(state) is C(present)). type: list choices: [ allow, clone, create, destroy, diff, hold, mount, promote, readonly, receive, release, rename, rollback, send, share, snapshot, unallow ] + elements: str local: description: - Apply permissions to C(name) locally (C(zfs allow -l)). @@ -241,10 +245,10 @@ def main(): argument_spec=dict( name=dict(type='str', required=True), state=dict(type='str', default='present', choices=['absent', 'present']), - users=dict(type='list'), - groups=dict(type='list'), + users=dict(type='list', elements='str'), + groups=dict(type='list', elements='str'), everyone=dict(type='bool', default=False), - permissions=dict(type='list', + permissions=dict(type='list', elements='str', choices=['allow', 'clone', 'create', 'destroy', 'diff', 'hold', 'mount', 'promote', 'readonly', 'receive', 'release', 'rename', 'rollback', 'send', 'share', 'snapshot', 'unallow']), diff --git a/plugins/modules/storage/zfs/zfs_facts.py b/plugins/modules/storage/zfs/zfs_facts.py index e7719f688f..930214743a 100644 --- a/plugins/modules/storage/zfs/zfs_facts.py +++ b/plugins/modules/storage/zfs/zfs_facts.py @@ -21,6 +21,7 @@ options: - ZFS dataset name. required: yes aliases: [ "ds", "dataset" ] + type: str recurse: description: - Specifies if properties for any children should be recursively @@ -38,15 +39,18 @@ options: - Specifies which dataset properties should be queried in comma-separated format. For more information about dataset properties, check zfs(1M) man page. default: all + type: str type: description: - Specifies which datasets types to display. Multiple values have to be provided in comma-separated form. choices: [ 'all', 'filesystem', 'volume', 'snapshot', 'bookmark' ] default: all + type: str depth: description: - Specifies recursion depth. + type: int ''' EXAMPLES = ''' diff --git a/plugins/modules/storage/zfs/zpool_facts.py b/plugins/modules/storage/zfs/zpool_facts.py index 728c077902..eced85000e 100644 --- a/plugins/modules/storage/zfs/zpool_facts.py +++ b/plugins/modules/storage/zfs/zpool_facts.py @@ -19,6 +19,7 @@ options: name: description: - ZFS pool name. + type: str aliases: [ "pool", "zpool" ] required: false parsable: @@ -32,6 +33,7 @@ options: description: - Specifies which dataset properties should be queried in comma-separated format. For more information about dataset properties, check zpool(1M) man page. + type: str default: all required: false ''' diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 17e7e43fc8..4a4280ee85 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -322,11 +322,6 @@ plugins/modules/storage/purestorage/purefa_facts.py validate-modules:parameter-l plugins/modules/storage/purestorage/purefa_facts.py validate-modules:return-syntax-error plugins/modules/storage/purestorage/purefb_facts.py validate-modules:parameter-list-no-elements plugins/modules/storage/purestorage/purefb_facts.py validate-modules:return-syntax-error -plugins/modules/storage/zfs/zfs.py validate-modules:parameter-type-not-in-doc -plugins/modules/storage/zfs/zfs_delegate_admin.py validate-modules:parameter-list-no-elements -plugins/modules/storage/zfs/zfs_delegate_admin.py validate-modules:parameter-type-not-in-doc -plugins/modules/storage/zfs/zfs_facts.py validate-modules:parameter-type-not-in-doc -plugins/modules/storage/zfs/zpool_facts.py validate-modules:parameter-type-not-in-doc plugins/modules/system/alternatives.py pylint:blacklisted-name plugins/modules/system/beadm.py pylint:blacklisted-name plugins/modules/system/cronvar.py pylint:blacklisted-name diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 1964623ba5..9ac48e2871 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -321,11 +321,6 @@ plugins/modules/storage/purestorage/purefa_facts.py validate-modules:parameter-l plugins/modules/storage/purestorage/purefa_facts.py validate-modules:return-syntax-error plugins/modules/storage/purestorage/purefb_facts.py validate-modules:parameter-list-no-elements plugins/modules/storage/purestorage/purefb_facts.py validate-modules:return-syntax-error -plugins/modules/storage/zfs/zfs.py validate-modules:parameter-type-not-in-doc -plugins/modules/storage/zfs/zfs_delegate_admin.py validate-modules:parameter-list-no-elements -plugins/modules/storage/zfs/zfs_delegate_admin.py validate-modules:parameter-type-not-in-doc -plugins/modules/storage/zfs/zfs_facts.py validate-modules:parameter-type-not-in-doc -plugins/modules/storage/zfs/zpool_facts.py validate-modules:parameter-type-not-in-doc plugins/modules/system/alternatives.py pylint:blacklisted-name plugins/modules/system/beadm.py pylint:blacklisted-name plugins/modules/system/cronvar.py pylint:blacklisted-name diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 09610a5cd5..181a1cc3a2 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -289,10 +289,6 @@ plugins/modules/storage/purestorage/purefa_facts.py validate-modules:return-synt plugins/modules/storage/purestorage/purefb_facts.py validate-modules:deprecation-mismatch plugins/modules/storage/purestorage/purefb_facts.py validate-modules:invalid-documentation plugins/modules/storage/purestorage/purefb_facts.py validate-modules:return-syntax-error -plugins/modules/storage/zfs/zfs.py validate-modules:parameter-type-not-in-doc -plugins/modules/storage/zfs/zfs_delegate_admin.py validate-modules:parameter-type-not-in-doc -plugins/modules/storage/zfs/zfs_facts.py validate-modules:parameter-type-not-in-doc -plugins/modules/storage/zfs/zpool_facts.py validate-modules:parameter-type-not-in-doc plugins/modules/system/alternatives.py pylint:blacklisted-name plugins/modules/system/beadm.py pylint:blacklisted-name plugins/modules/system/cronvar.py pylint:blacklisted-name From d09a558fda7255a55ad2e79eb464ad38b29cf45b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?V=C3=ADctor=20Su=C3=A1rez=20Fern=C3=A1ndez?= <15276132+vicsufer@users.noreply.github.com> Date: Mon, 15 Feb 2021 08:27:33 +0100 Subject: [PATCH 0045/3093] Fix undeclared result for nomad_job_info module (#1721) * Fix unassigned variable * Create 1721-fix-nomad_job_info-no-jobs-failure.yml * refactor usage of variables in nomad_job_info * Update changelogs/fragments/1721-fix-nomad_job_info-no-jobs-failure.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../fragments/1721-fix-nomad_job_info-no-jobs-failure.yml | 2 ++ plugins/modules/clustering/nomad/nomad_job_info.py | 5 ++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/1721-fix-nomad_job_info-no-jobs-failure.yml diff --git a/changelogs/fragments/1721-fix-nomad_job_info-no-jobs-failure.yml b/changelogs/fragments/1721-fix-nomad_job_info-no-jobs-failure.yml new file mode 100644 index 0000000000..c3c3d804e3 --- /dev/null +++ b/changelogs/fragments/1721-fix-nomad_job_info-no-jobs-failure.yml @@ -0,0 +1,2 @@ +bugfixes: + - nomad_job_info - fix module failure when nomad client returns no jobs (https://github.com/ansible-collections/community.general/pull/1721). diff --git a/plugins/modules/clustering/nomad/nomad_job_info.py b/plugins/modules/clustering/nomad/nomad_job_info.py index 9e93532843..d10c0a0438 100644 --- a/plugins/modules/clustering/nomad/nomad_job_info.py +++ b/plugins/modules/clustering/nomad/nomad_job_info.py @@ -312,12 +312,11 @@ def run(): ) changed = False - nomad_jobs = list() + result = list() try: job_list = nomad_client.jobs.get_jobs() for job in job_list: - nomad_jobs.append(nomad_client.job.get_job(job.get('ID'))) - result = nomad_jobs + result.append(nomad_client.job.get_job(job.get('ID'))) except Exception as e: module.fail_json(msg=to_native(e)) From 1cce279424288d9dcbeac54b38c1d3a94044258c Mon Sep 17 00:00:00 2001 From: Jeffrey van Pelt Date: Mon, 15 Feb 2021 08:27:43 +0100 Subject: [PATCH 0046/3093] Added VMID to all returns (#1715) * Added VMID to all returns Also added in the docs promised return of MAC and devices when state == current. Fixes: #1641 * Revert devices and mac as get_vminfo works differently then I expected * Added status output to a few more calls * Update RETURNS docs * Remove vmid where it is not available * Added changelog fragment * Update changelogs/fragments/1715-proxmox_kvm-add-vmid-to-returns.yml You're right, this message is way better then mine.. :-) Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../1715-proxmox_kvm-add-vmid-to-returns.yml | 2 + plugins/modules/cloud/misc/proxmox_kvm.py | 84 +++++++------------ 2 files changed, 33 insertions(+), 53 deletions(-) create mode 100644 changelogs/fragments/1715-proxmox_kvm-add-vmid-to-returns.yml diff --git a/changelogs/fragments/1715-proxmox_kvm-add-vmid-to-returns.yml b/changelogs/fragments/1715-proxmox_kvm-add-vmid-to-returns.yml new file mode 100644 index 0000000000..b4561f5145 --- /dev/null +++ b/changelogs/fragments/1715-proxmox_kvm-add-vmid-to-returns.yml @@ -0,0 +1,2 @@ +minor_changes: + - proxmox_kvm module - actually implemented ``vmid`` and ``status`` return values. Updated documentation to reflect current situation (https://github.com/ansible-collections/community.general/issues/1410, https://github.com/ansible-collections/community.general/pull/1715). diff --git a/plugins/modules/cloud/misc/proxmox_kvm.py b/plugins/modules/cloud/misc/proxmox_kvm.py index 348725661b..3e4e6449da 100644 --- a/plugins/modules/cloud/misc/proxmox_kvm.py +++ b/plugins/modules/cloud/misc/proxmox_kvm.py @@ -730,27 +730,6 @@ EXAMPLES = ''' ''' RETURN = ''' -devices: - description: The list of devices created or used. - returned: success - type: dict - sample: ' - { - "ide0": "VMS_LVM:vm-115-disk-1", - "ide1": "VMs:115/vm-115-disk-3.raw", - "virtio0": "VMS_LVM:vm-115-disk-2", - "virtio1": "VMs:115/vm-115-disk-1.qcow2", - "virtio2": "VMs:115/vm-115-disk-2.raw" - }' -mac: - description: List of mac address created and net[n] attached. Useful when you want to use provision systems like Foreman via PXE. - returned: success - type: dict - sample: ' - { - "net0": "3E:6E:97:D2:31:9F", - "net1": "B6:A1:FC:EF:78:A4" - }' vmid: description: The VM vmid. returned: success @@ -759,7 +738,6 @@ vmid: status: description: - The current virtual machine status. - - Returned only when C(state=current) returned: success type: dict sample: '{ @@ -1201,36 +1179,36 @@ def main(): # Ensure source VM id exists when cloning if not get_vm(proxmox, vmid): - module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid) + module.fail_json(vmid=vmid, msg='VM with vmid = %s does not exist in cluster' % vmid) # Ensure the choosen VM name doesn't already exist when cloning if get_vmid(proxmox, name): - module.exit_json(changed=False, msg="VM with name <%s> already exists" % name) + module.exit_json(changed=False, vmid=vmid, msg="VM with name <%s> already exists" % name) # Ensure the choosen VM id doesn't already exist when cloning if get_vm(proxmox, newid): - module.exit_json(changed=False, msg="vmid %s with VM name %s already exists" % (newid, name)) + module.exit_json(changed=False, vmid=vmid, msg="vmid %s with VM name %s already exists" % (newid, name)) if delete is not None: try: settings(module, proxmox, vmid, node, name, delete=delete) - module.exit_json(changed=True, msg="Settings has deleted on VM {0} with vmid {1}".format(name, vmid)) + module.exit_json(changed=True, vmid=vmid, msg="Settings has deleted on VM {0} with vmid {1}".format(name, vmid)) except Exception as e: - module.fail_json(msg='Unable to delete settings on VM {0} with vmid {1}: '.format(name, vmid) + str(e)) + module.fail_json(vmid=vmid, msg='Unable to delete settings on VM {0} with vmid {1}: '.format(name, vmid) + str(e)) if revert is not None: try: settings(module, proxmox, vmid, node, name, revert=revert) - module.exit_json(changed=True, msg="Settings has reverted on VM {0} with vmid {1}".format(name, vmid)) + module.exit_json(changed=True, vmid=vmid, msg="Settings has reverted on VM {0} with vmid {1}".format(name, vmid)) except Exception as e: - module.fail_json(msg='Unable to revert settings on VM {0} with vmid {1}: Maybe is not a pending task... '.format(name, vmid) + str(e)) + module.fail_json(vmid=vmid, msg='Unable to revert settings on VM {0} with vmid {1}: Maybe is not a pending task... '.format(name, vmid) + str(e)) if state == 'present': try: if get_vm(proxmox, vmid) and not (update or clone): - module.exit_json(changed=False, msg="VM with vmid <%s> already exists" % vmid) + module.exit_json(changed=False, vmid=vmid, msg="VM with vmid <%s> already exists" % vmid) elif get_vmid(proxmox, name) and not (update or clone): - module.exit_json(changed=False, msg="VM with name <%s> already exists" % name) + module.exit_json(changed=False, vmid=vmid, msg="VM with name <%s> already exists" % name) elif not (node, name): module.fail_json(msg='node, name is mandatory for creating/updating vm') elif not node_check(proxmox, node): @@ -1303,18 +1281,18 @@ def main(): scsi=module.params['scsi'], virtio=module.params['virtio']) if update: - module.exit_json(changed=True, msg="VM %s with vmid %s updated" % (name, vmid)) + module.exit_json(changed=True, vmid=vmid, msg="VM %s with vmid %s updated" % (name, vmid)) elif clone is not None: - module.exit_json(changed=True, msg="VM %s with newid %s cloned from vm with vmid %s" % (name, newid, vmid)) + module.exit_json(changed=True, vmid=vmid, msg="VM %s with newid %s cloned from vm with vmid %s" % (name, newid, vmid)) else: - module.exit_json(changed=True, msg="VM %s with vmid %s deployed" % (name, vmid), **results) + module.exit_json(changed=True, vmid=vmid, msg="VM %s with vmid %s deployed" % (name, vmid), **results) except Exception as e: if update: - module.fail_json(msg="Unable to update vm {0} with vmid {1}=".format(name, vmid) + str(e)) + module.fail_json(vmid=vmid, msg="Unable to update vm {0} with vmid {1}=".format(name, vmid) + str(e)) elif clone is not None: - module.fail_json(msg="Unable to clone vm {0} from vmid {1}=".format(name, vmid) + str(e)) + module.fail_json(vmid=vmid, msg="Unable to clone vm {0} from vmid {1}=".format(name, vmid) + str(e)) else: - module.fail_json(msg="creation of qemu VM %s with vmid %s failed with exception=%s" % (name, vmid, e)) + module.fail_json(vmid=vmid, msg="creation of qemu VM %s with vmid %s failed with exception=%s" % (name, vmid, e)) elif state == 'started': try: @@ -1322,14 +1300,14 @@ def main(): module.fail_json(msg='VM with name = %s does not exist in cluster' % name) vm = get_vm(proxmox, vmid) if not vm: - module.fail_json(msg='VM with vmid <%s> does not exist in cluster' % vmid) + module.fail_json(vmid=vmid, msg='VM with vmid <%s> does not exist in cluster' % vmid) if vm[0]['status'] == 'running': - module.exit_json(changed=False, msg="VM %s is already running" % vmid) + module.exit_json(changed=False, vmid=vmid, msg="VM %s is already running" % vmid, **status) if start_vm(module, proxmox, vm): - module.exit_json(changed=True, msg="VM %s started" % vmid) + module.exit_json(changed=True, vmid=vmid, msg="VM %s started" % vmid, **status) except Exception as e: - module.fail_json(msg="starting of VM %s failed with exception: %s" % (vmid, e)) + module.fail_json(vmid=vmid, msg="starting of VM %s failed with exception: %s" % (vmid, e), **status) elif state == 'stopped': try: @@ -1338,15 +1316,15 @@ def main(): vm = get_vm(proxmox, vmid) if not vm: - module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid) + module.fail_json(vmid=vmid, msg='VM with vmid = %s does not exist in cluster' % vmid) if vm[0]['status'] == 'stopped': - module.exit_json(changed=False, msg="VM %s is already stopped" % vmid) + module.exit_json(changed=False, vmid=vmid, msg="VM %s is already stopped" % vmid, **status) if stop_vm(module, proxmox, vm, force=module.params['force']): - module.exit_json(changed=True, msg="VM %s is shutting down" % vmid) + module.exit_json(changed=True, vmid=vmid, msg="VM %s is shutting down" % vmid, **status) except Exception as e: - module.fail_json(msg="stopping of VM %s failed with exception: %s" % (vmid, e)) + module.fail_json(vmid=vmid, msg="stopping of VM %s failed with exception: %s" % (vmid, e), **status) elif state == 'restarted': try: @@ -1355,33 +1333,33 @@ def main(): vm = get_vm(proxmox, vmid) if not vm: - module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid) + module.fail_json(vmid=vmid, msg='VM with vmid = %s does not exist in cluster' % vmid) if vm[0]['status'] == 'stopped': - module.exit_json(changed=False, msg="VM %s is not running" % vmid) + module.exit_json(changed=False, vmid=vmid, msg="VM %s is not running" % vmid, **status) if stop_vm(module, proxmox, vm, force=module.params['force']) and start_vm(module, proxmox, vm): - module.exit_json(changed=True, msg="VM %s is restarted" % vmid) + module.exit_json(changed=True, vmid=vmid, msg="VM %s is restarted" % vmid, **status) except Exception as e: - module.fail_json(msg="restarting of VM %s failed with exception: %s" % (vmid, e)) + module.fail_json(vmid=vmid, msg="restarting of VM %s failed with exception: %s" % (vmid, e), **status) elif state == 'absent': try: vm = get_vm(proxmox, vmid) if not vm: - module.exit_json(changed=False) + module.exit_json(changed=False, vmid=vmid) proxmox_node = proxmox.nodes(vm[0]['node']) if vm[0]['status'] == 'running': if module.params['force']: stop_vm(module, proxmox, vm, True) else: - module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion or use force=yes." % vmid) + module.exit_json(changed=False, vmid=vmid, msg="VM %s is running. Stop it before deletion or use force=yes." % vmid) taskid = proxmox_node.qemu.delete(vmid) if not wait_for_task(module, proxmox, vm[0]['node'], taskid): module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s' % proxmox_node.tasks(taskid).log.get()[:1]) else: - module.exit_json(changed=True, msg="VM %s removed" % vmid) + module.exit_json(changed=True, vmid=vmid, msg="VM %s removed" % vmid) except Exception as e: module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, e)) @@ -1395,7 +1373,7 @@ def main(): current = proxmox.nodes(vm[0]['node']).qemu(vmid).status.current.get()['status'] status['status'] = current if status: - module.exit_json(changed=False, msg="VM %s with vmid = %s is %s" % (name, vmid, current), **status) + module.exit_json(changed=False, vmid=vmid, msg="VM %s with vmid = %s is %s" % (name, vmid, current), **status) if __name__ == '__main__': From 8fae693d9cf003ac34bd51814a418fa49038f263 Mon Sep 17 00:00:00 2001 From: shieni Date: Mon, 15 Feb 2021 09:29:04 +0200 Subject: [PATCH 0047/3093] ease limitation for nios_host_record DNS Bypass (#1788) * ease limitation for nios_host_record DNS Bypass, the bypass should be allowed when configure_dns is disabled and view is set other than default * add changelog fragment Co-authored-by: Nils --- .../1788-ease-nios_host_record-dns-bypass-check.yml | 3 +++ plugins/module_utils/net_tools/nios/api.py | 5 +---- 2 files changed, 4 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/1788-ease-nios_host_record-dns-bypass-check.yml diff --git a/changelogs/fragments/1788-ease-nios_host_record-dns-bypass-check.yml b/changelogs/fragments/1788-ease-nios_host_record-dns-bypass-check.yml new file mode 100644 index 0000000000..6b1a43cc25 --- /dev/null +++ b/changelogs/fragments/1788-ease-nios_host_record-dns-bypass-check.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - nios_host_record - allow DNS Bypass for views other than default (https://github.com/ansible-collections/community.general/issues/1786). diff --git a/plugins/module_utils/net_tools/nios/api.py b/plugins/module_utils/net_tools/nios/api.py index 3eac0fe1e8..440ea391c9 100644 --- a/plugins/module_utils/net_tools/nios/api.py +++ b/plugins/module_utils/net_tools/nios/api.py @@ -260,13 +260,10 @@ class WapiModule(WapiBase): else: proposed_object[key] = self.module.params[key] - # If configure_by_dns is set to False, then delete the default dns set in the param else throw exception + # If configure_by_dns is set to False and view is 'default', then delete the default dns if not proposed_object.get('configure_for_dns') and proposed_object.get('view') == 'default'\ and ib_obj_type == NIOS_HOST_RECORD: del proposed_object['view'] - elif not proposed_object.get('configure_for_dns') and proposed_object.get('view') != 'default'\ - and ib_obj_type == NIOS_HOST_RECORD: - self.module.fail_json(msg='DNS Bypass is not allowed if DNS view is set other than \'default\'') if ib_obj_ref: if len(ib_obj_ref) > 1: From 5aac81bdd1d1711009d74e6e263ddd86ae915c62 Mon Sep 17 00:00:00 2001 From: almdudler777 <43605831+almdudler777@users.noreply.github.com> Date: Mon, 15 Feb 2021 22:40:51 +0100 Subject: [PATCH 0048/3093] proxmox_kvm: 500 error args parameter (#1783) * don't add args if set to 'no_defaults' * never add force even if false, will require archive parameter other which is not implemented * remove trailing whitespace * add changelog fragment * Update changelogs/fragments/1783-proxmox-kvm-fix-args-500-error.yaml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../fragments/1783-proxmox-kvm-fix-args-500-error.yaml | 3 +++ plugins/modules/cloud/misc/proxmox_kvm.py | 8 +++++--- 2 files changed, 8 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/1783-proxmox-kvm-fix-args-500-error.yaml diff --git a/changelogs/fragments/1783-proxmox-kvm-fix-args-500-error.yaml b/changelogs/fragments/1783-proxmox-kvm-fix-args-500-error.yaml new file mode 100644 index 0000000000..5e46b066a8 --- /dev/null +++ b/changelogs/fragments/1783-proxmox-kvm-fix-args-500-error.yaml @@ -0,0 +1,3 @@ +bugfixes: + - proxmox_kvm - do not add ``args`` if ``proxmox_default_behavior`` is set to no_defaults (https://github.com/ansible-collections/community.general/issues/1641). + - proxmox_kvm - stop implicitly adding ``force`` equal to ``false``. Proxmox API requires not implemented parameters otherwise, and assumes ``force`` to be ``false`` by default anyways (https://github.com/ansible-collections/community.general/pull/1783). diff --git a/plugins/modules/cloud/misc/proxmox_kvm.py b/plugins/modules/cloud/misc/proxmox_kvm.py index 3e4e6449da..eee698405e 100644 --- a/plugins/modules/cloud/misc/proxmox_kvm.py +++ b/plugins/modules/cloud/misc/proxmox_kvm.py @@ -31,6 +31,9 @@ options: description: - Pass arbitrary arguments to kvm. - This option is for experts only! + - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this + option has a default of C(-serial unix:/var/run/qemu-server/.serial,server,nowait). + Note that the default value of I(proxmox_default_behavior) changes in community.general 4.0.0. type: str autostart: description: @@ -925,9 +928,9 @@ def create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sock if searchdomains: kwargs['searchdomain'] = ' '.join(searchdomains) - # -args and skiplock require root@pam user + # -args and skiplock require root@pam user - but can not use api tokens if module.params['api_user'] == "root@pam" and module.params['args'] is None: - if not update: + if not update and module.params['proxmox_default_behavior'] == 'compatibility': kwargs['args'] = vm_args elif module.params['api_user'] == "root@pam" and module.params['args'] is not None: kwargs['args'] = module.params['args'] @@ -1118,7 +1121,6 @@ def main(): cores=1, cpu='kvm64', cpuunits=1000, - force=False, format='qcow2', kvm=True, memory=512, From f33323ca8930ee566cb29de4f8218b71e0a1417a Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 16 Feb 2021 19:11:37 +1300 Subject: [PATCH 0049/3093] Tidy up validate-modules:parameter-list-no-elements (batch 1) (#1795) * fixed validation-modules for plugins/modules/storage/netapp/na_ontap_gather_facts.py * fixed validation-modules for plugins/modules/source_control/gitlab/gitlab_runner.py * fixed validation-modules for plugins/modules/packaging/os/redhat_subscription.py * fixed validation-modules for plugins/modules/notification/twilio.py * fixed validation-modules for plugins/modules/notification/slack.py * fixed validation-modules for plugins/modules/notification/sendgrid.py * fixed validation-modules for plugins/modules/notification/rocketchat.py * fixed validation-modules for plugins/modules/notification/office_365_connector_card.py * fixed validation-modules for plugins/modules/notification/nexmo.py * fixed validation-modules for plugins/modules/notification/mail.py * fixed validation-modules for plugins/modules/net_tools/omapi_host.py * fixed validation-modules for plugins/modules/net_tools/nsupdate.py * fixed validation-modules for plugins/modules/net_tools/dnsimple.py * fixed validation-modules for plugins/modules/monitoring/pagerduty.py * fixed validation-modules for plugins/modules/monitoring/librato_annotation.py * fixed validation-modules for plugins/modules/identity/onepassword_info.py * fixed validation-modules for plugins/modules/identity/keycloak/keycloak_client.py * fixed validation-modules for plugins/modules/files/xml.py * fixed validation-modules for plugins/modules/cloud/softlayer/sl_vm.py * fixed validation-modules for plugins/modules/cloud/smartos/vmadm.py * fixed validation-modules for plugins/modules/cloud/pubnub/pubnub_blocks.py * fixed validation-modules for plugins/modules/cloud/packet/packet_device.py * fixed validation-modules for plugins/modules/cloud/lxd/lxd_container.py * fixed validation-modules for plugins/module_utils/oracle/oci_utils.py * fixed validation-modules for plugins/doc_fragments/oracle_creatable_resource.py * Tidy up validate-modules:parameter-list-no-elements for some modules * fixed validation-modules for plugins/modules/monitoring/statusio_maintenance.py * Fixed pending issues from CI validation * Fixed xml module elements for add_children & set_children * added changelog fragment * typo * fix wording in changelog frag --- .../fragments/1795-list-elements-batch1.yml | 27 ++++++++++++++++ .../oracle_creatable_resource.py | 1 + plugins/module_utils/oracle/oci_utils.py | 2 +- plugins/modules/cloud/lxd/lxd_container.py | 2 ++ plugins/modules/cloud/packet/packet_device.py | 23 ++++++++++++-- plugins/modules/cloud/pubnub/pubnub_blocks.py | 3 +- plugins/modules/cloud/smartos/vmadm.py | 9 +++++- plugins/modules/cloud/softlayer/sl_vm.py | 6 ++-- plugins/modules/files/xml.py | 6 ++-- .../identity/keycloak/keycloak_client.py | 9 ++++-- plugins/modules/identity/onepassword_info.py | 3 +- .../modules/monitoring/librato_annotation.py | 3 +- plugins/modules/monitoring/pagerduty.py | 3 +- .../monitoring/statusio_maintenance.py | 6 ++-- plugins/modules/net_tools/dnsimple.py | 3 +- plugins/modules/net_tools/nsupdate.py | 15 ++++++++- plugins/modules/net_tools/omapi_host.py | 3 +- plugins/modules/notification/mail.py | 15 ++++++--- plugins/modules/notification/nexmo.py | 3 +- .../notification/office_365_connector_card.py | 6 ++-- plugins/modules/notification/rocketchat.py | 3 +- plugins/modules/notification/sendgrid.py | 12 ++++--- plugins/modules/notification/slack.py | 3 +- plugins/modules/notification/twilio.py | 3 +- .../packaging/os/redhat_subscription.py | 8 +++-- .../source_control/gitlab/gitlab_runner.py | 3 +- .../storage/netapp/na_ontap_gather_facts.py | 27 +++++++++------- tests/sanity/ignore-2.10.txt | 31 ------------------- tests/sanity/ignore-2.11.txt | 31 ------------------- tests/sanity/ignore-2.9.txt | 5 --- 30 files changed, 156 insertions(+), 118 deletions(-) create mode 100644 changelogs/fragments/1795-list-elements-batch1.yml diff --git a/changelogs/fragments/1795-list-elements-batch1.yml b/changelogs/fragments/1795-list-elements-batch1.yml new file mode 100644 index 0000000000..9b057c7712 --- /dev/null +++ b/changelogs/fragments/1795-list-elements-batch1.yml @@ -0,0 +1,27 @@ +minor_changes: + - plugins/module_utils/oracle/oci_utils.py - elements of list parameter ``key_by`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). + - lxd_container - elements of list parameter ``profiles`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). + - packet_device - elements of list parameters ``device_ids``, ``hostnames`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). + - pubnub_blocks - elements of list parameters ``event_handlers`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). + - vmadm - elements of list parameters ``disks``, ``nics``, ``resolvers``, ``filesystems`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). + - sl_vm - elements of list parameters ``disks``, ``ssh_keys`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). + - xml - elements of list parameters ``add_children``, ``set_children`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). + - keycloak_client - elements of list parameters ``default_roles``, ``redirect_uris``, ``web_origins`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). + - onepassword_info - elements of list parameters ``search_terms`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). + - librato_annotation - elements of list parameters ``links`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). + - pagerduty - elements of list parameters ``service`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). + - statusio_maintenance - elements of list parameters ``components``, ``containers`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). + - dnsimple - elements of list parameters ``record_ids`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). + - nsupdate - elements of list parameters ``value`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). + - omapi_host - elements of list parameters ``statements`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). + - mail - elements of list parameters ``to``, ``cc``, ``bcc``, ``attach``, ``headers`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). + - nexmo - elements of list parameters ``dest`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). + - rocketchat - elements of list parameters ``attachments`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). + - sendgrid - elements of list parameters ``to_addresses``, ``cc``, ``bcc``, ``attachments`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). + - slack - elements of list parameters ``attachments`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). + - twilio - elements of list parameters ``to_numbers`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). + - redhat_subscription - elements of list parameters ``pool_ids``, ``addons`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). + - gitlab_runner - elements of list parameters ``tag_list`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). + - na_ontap_gather_facts - elements of list parameters ``gather_subset`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). +bugfixes: + - redhat_subscription - ``mutually_exclusive`` was referring to parameter alias instead of name (https://github.com/ansible-collections/community.general/pull/1795). diff --git a/plugins/doc_fragments/oracle_creatable_resource.py b/plugins/doc_fragments/oracle_creatable_resource.py index d8f2210104..468eaabe3f 100644 --- a/plugins/doc_fragments/oracle_creatable_resource.py +++ b/plugins/doc_fragments/oracle_creatable_resource.py @@ -20,4 +20,5 @@ class ModuleDocFragment(object): identify an instance of the resource. By default, all the attributes of a resource except I(freeform_tags) are used to uniquely identify a resource. type: list + elements: str """ diff --git a/plugins/module_utils/oracle/oci_utils.py b/plugins/module_utils/oracle/oci_utils.py index 973d68845e..7252f68110 100644 --- a/plugins/module_utils/oracle/oci_utils.py +++ b/plugins/module_utils/oracle/oci_utils.py @@ -104,7 +104,7 @@ def get_common_arg_spec(supports_create=False, supports_wait=False): if supports_create: common_args.update( - key_by=dict(type="list"), + key_by=dict(type="list", elements="str"), force_create=dict(type="bool", default=False), ) diff --git a/plugins/modules/cloud/lxd/lxd_container.py b/plugins/modules/cloud/lxd/lxd_container.py index 1e73c5cf12..0cadaa9ac0 100644 --- a/plugins/modules/cloud/lxd/lxd_container.py +++ b/plugins/modules/cloud/lxd/lxd_container.py @@ -45,6 +45,7 @@ options: description: - Profile to be used by the container type: list + elements: str devices: description: - 'The devices for the container @@ -658,6 +659,7 @@ def main(): ), profiles=dict( type='list', + elements='str', ), source=dict( type='dict', diff --git a/plugins/modules/cloud/packet/packet_device.py b/plugins/modules/cloud/packet/packet_device.py index c76530f552..5dc662a255 100644 --- a/plugins/modules/cloud/packet/packet_device.py +++ b/plugins/modules/cloud/packet/packet_device.py @@ -31,20 +31,25 @@ options: auth_token: description: - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN). + type: str count: description: - The number of devices to create. Count number can be included in hostname via the %d string formatter. default: 1 + type: int count_offset: description: - From which number to start the count. default: 1 + type: int device_ids: description: - List of device IDs on which to operate. + type: list + elements: str tags: description: @@ -57,10 +62,12 @@ options: facility: description: - Facility slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/facilities/). + type: str features: description: - Dict with "features" for device creation. See Packet API docs for details. + type: dict hostnames: description: @@ -68,6 +75,8 @@ options: - If given string or one-item list, you can use the C("%d") Python string format to expand numbers from I(count). - If only one hostname, it might be expanded to list if I(count)>1. aliases: [name] + type: list + elements: str locked: description: @@ -79,15 +88,18 @@ options: operating_system: description: - OS slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/operatingsystems/). + type: str plan: description: - Plan slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/plans/). + type: str project_id: description: - ID of project of the device. required: true + type: str state: description: @@ -96,10 +108,12 @@ options: - If set to C(active), the module call will block until all the specified devices are in state active due to the Packet API, or until I(wait_timeout). choices: [present, absent, active, inactive, rebooted] default: present + type: str user_data: description: - Userdata blob made available to the machine + type: str wait_for_public_IPv: description: @@ -107,16 +121,21 @@ options: - If set to 4, it will wait until IPv4 is assigned to the instance. - If set to 6, wait until public IPv6 is assigned to the instance. choices: [4,6] + type: int wait_timeout: description: - How long (seconds) to wait either for automatic IP address assignment, or for the device to reach the C(active) I(state). - If I(wait_for_public_IPv) is set and I(state) is C(active), the module will wait for both events consequently, applying the timeout twice. default: 900 + type: int + ipxe_script_url: description: - URL of custom iPXE script for provisioning. - More about custom iPXE for Packet devices at U(https://help.packet.net/technical/infrastructure/custom-ipxe). + type: str + always_pxe: description: - Persist PXE as the first boot option. @@ -601,10 +620,10 @@ def main(): no_log=True), count=dict(type='int', default=1), count_offset=dict(type='int', default=1), - device_ids=dict(type='list'), + device_ids=dict(type='list', elements='str'), facility=dict(), features=dict(type='dict'), - hostnames=dict(type='list', aliases=['name']), + hostnames=dict(type='list', elements='str', aliases=['name']), tags=dict(type='list', elements='str'), locked=dict(type='bool', default=False, aliases=['lock']), operating_system=dict(), diff --git a/plugins/modules/cloud/pubnub/pubnub_blocks.py b/plugins/modules/cloud/pubnub/pubnub_blocks.py index 8d9374a862..640f6d925e 100644 --- a/plugins/modules/cloud/pubnub/pubnub_blocks.py +++ b/plugins/modules/cloud/pubnub/pubnub_blocks.py @@ -111,6 +111,7 @@ options: required: false default: [] type: list + elements: dict changes: description: - "List of fields which should be changed by block itself (doesn't @@ -552,7 +553,7 @@ def main(): state=dict(default='present', type='str', choices=['started', 'stopped', 'present', 'absent']), name=dict(required=True, type='str'), description=dict(type='str'), - event_handlers=dict(default=list(), type='list'), + event_handlers=dict(default=list(), type='list', elements='dict'), changes=dict(default=dict(), type='dict'), cache=dict(default=dict(), type='dict'), validate_certs=dict(default=True, type='bool')) diff --git a/plugins/modules/cloud/smartos/vmadm.py b/plugins/modules/cloud/smartos/vmadm.py index 553e6efcca..45dc86cc14 100644 --- a/plugins/modules/cloud/smartos/vmadm.py +++ b/plugins/modules/cloud/smartos/vmadm.py @@ -78,6 +78,7 @@ options: description: - A list of disks to add, valid properties are documented in vmadm(1M). type: list + elements: dict dns_domain: required: false description: @@ -93,6 +94,7 @@ options: description: - Mount additional filesystems into an OS VM. type: list + elements: dict firewall_enabled: required: false description: @@ -198,6 +200,7 @@ options: description: - A list of nics to add, valid properties are documented in vmadm(1M). type: list + elements: dict nowait: required: false description: @@ -230,6 +233,7 @@ options: description: - List of resolvers to be put into C(/etc/resolv.conf). type: list + elements: dict routes: required: false description: @@ -670,7 +674,6 @@ def main(): 'zfs_snapshot_limit' ], 'dict': ['customer_metadata', 'internal_metadata', 'routes'], - 'list': ['disks', 'nics', 'resolvers', 'filesystems'] } # Start with the options that are not as trivial as those above. @@ -697,6 +700,10 @@ def main(): # Regular strings, however these require additional options. spice_password=dict(type='str', no_log=True), vnc_password=dict(type='str', no_log=True), + disks=dict(type='list', elements='dict'), + nics=dict(type='list', elements='dict'), + resolvers=dict(type='list', elements='dict'), + filesystems=dict(type='list', elements='dict'), ) # Add our 'simple' options to options dict. diff --git a/plugins/modules/cloud/softlayer/sl_vm.py b/plugins/modules/cloud/softlayer/sl_vm.py index 22556d9156..72d520ddeb 100644 --- a/plugins/modules/cloud/softlayer/sl_vm.py +++ b/plugins/modules/cloud/softlayer/sl_vm.py @@ -115,6 +115,7 @@ options: - List of disk sizes to be assigned to new virtual instance. default: [ 25 ] type: list + elements: int os_code: description: - OS Code to be used for new virtual instance. @@ -140,6 +141,7 @@ options: description: - List of ssh keys by their Id to be assigned to a virtual instance. type: list + elements: str post_uri: description: - URL of a post provisioning script to be loaded and executed on virtual instance. @@ -396,13 +398,13 @@ def main(): cpus=dict(type='int', choices=CPU_SIZES), memory=dict(type='int', choices=MEMORY_SIZES), flavor=dict(type='str'), - disks=dict(type='list', default=[25]), + disks=dict(type='list', elements='int', default=[25]), os_code=dict(type='str'), image_id=dict(type='str'), nic_speed=dict(type='int', choices=NIC_SPEEDS), public_vlan=dict(type='str'), private_vlan=dict(type='str'), - ssh_keys=dict(type='list', default=[]), + ssh_keys=dict(type='list', elements='str', default=[]), post_uri=dict(type='str'), state=dict(type='str', default='present', choices=STATES), wait=dict(type='bool', default=True), diff --git a/plugins/modules/files/xml.py b/plugins/modules/files/xml.py index 2ee7a98d0f..df3562df8c 100644 --- a/plugins/modules/files/xml.py +++ b/plugins/modules/files/xml.py @@ -66,6 +66,7 @@ options: or a hash where the key is an element name and the value is the element value. - This parameter requires C(xpath) to be set. type: list + elements: raw set_children: description: - Set the child-element(s) of a selected element for a given C(xpath). @@ -73,6 +74,7 @@ options: - Child elements must be specified as in C(add_children). - This parameter requires C(xpath) to be set. type: list + elements: raw count: description: - Search for a given C(xpath) and provide the count of any matches. @@ -809,8 +811,8 @@ def main(): state=dict(type='str', default='present', choices=['absent', 'present'], aliases=['ensure']), value=dict(type='raw'), attribute=dict(type='raw'), - add_children=dict(type='list'), - set_children=dict(type='list'), + add_children=dict(type='list', elements='raw'), + set_children=dict(type='list', elements='raw'), count=dict(type='bool', default=False), print_match=dict(type='bool', default=False), pretty_print=dict(type='bool', default=False), diff --git a/plugins/modules/identity/keycloak/keycloak_client.py b/plugins/modules/identity/keycloak/keycloak_client.py index b27155ba4a..e49edcf1d2 100644 --- a/plugins/modules/identity/keycloak/keycloak_client.py +++ b/plugins/modules/identity/keycloak/keycloak_client.py @@ -137,6 +137,7 @@ options: aliases: - defaultRoles type: list + elements: str redirect_uris: description: @@ -145,6 +146,7 @@ options: aliases: - redirectUris type: list + elements: str web_origins: description: @@ -153,6 +155,7 @@ options: aliases: - webOrigins type: list + elements: str not_before: description: @@ -708,9 +711,9 @@ def main(): client_authenticator_type=dict(type='str', choices=['client-secret', 'client-jwt'], aliases=['clientAuthenticatorType']), secret=dict(type='str', no_log=True), registration_access_token=dict(type='str', aliases=['registrationAccessToken'], no_log=True), - default_roles=dict(type='list', aliases=['defaultRoles']), - redirect_uris=dict(type='list', aliases=['redirectUris']), - web_origins=dict(type='list', aliases=['webOrigins']), + default_roles=dict(type='list', elements='str', aliases=['defaultRoles']), + redirect_uris=dict(type='list', elements='str', aliases=['redirectUris']), + web_origins=dict(type='list', elements='str', aliases=['webOrigins']), not_before=dict(type='int', aliases=['notBefore']), bearer_only=dict(type='bool', aliases=['bearerOnly']), consent_required=dict(type='bool', aliases=['consentRequired']), diff --git a/plugins/modules/identity/onepassword_info.py b/plugins/modules/identity/onepassword_info.py index 6a5c3d9290..a085331e7d 100644 --- a/plugins/modules/identity/onepassword_info.py +++ b/plugins/modules/identity/onepassword_info.py @@ -34,6 +34,7 @@ description: options: search_terms: type: list + elements: dict description: - A list of one or more search terms. - Each search term can either be a simple string or it can be a dictionary for more control. @@ -372,7 +373,7 @@ def main(): master_password=dict(required=True, type='str', no_log=True), secret_key=dict(type='str', no_log=True), ), default=None), - search_terms=dict(required=True, type='list') + search_terms=dict(required=True, type='list', elements='dict'), ), supports_check_mode=True ) diff --git a/plugins/modules/monitoring/librato_annotation.py b/plugins/modules/monitoring/librato_annotation.py index d0fd406d4f..6fcabcf34e 100644 --- a/plugins/modules/monitoring/librato_annotation.py +++ b/plugins/modules/monitoring/librato_annotation.py @@ -63,6 +63,7 @@ options: required: false links: type: list + elements: dict description: - See examples ''' @@ -155,7 +156,7 @@ def main(): description=dict(required=False), start_time=dict(required=False, default=None, type='int'), end_time=dict(required=False, default=None, type='int'), - links=dict(type='list') + links=dict(type='list', elements='dict') ) ) diff --git a/plugins/modules/monitoring/pagerduty.py b/plugins/modules/monitoring/pagerduty.py index 306b596b51..dba931ab96 100644 --- a/plugins/modules/monitoring/pagerduty.py +++ b/plugins/modules/monitoring/pagerduty.py @@ -47,6 +47,7 @@ options: - ID of user making the request. Only needed when creating a maintenance_window. service: type: list + elements: str description: - A comma separated list of PagerDuty service IDs. aliases: [ services ] @@ -233,7 +234,7 @@ def main(): name=dict(required=False), user=dict(required=False), token=dict(required=True, no_log=True), - service=dict(required=False, type='list', aliases=["services"]), + service=dict(required=False, type='list', elements='str', aliases=["services"]), window_id=dict(required=False), requester_id=dict(required=False), hours=dict(default='1', required=False), # @TODO change to int? diff --git a/plugins/modules/monitoring/statusio_maintenance.py b/plugins/modules/monitoring/statusio_maintenance.py index 0414f6e861..01411cf17f 100644 --- a/plugins/modules/monitoring/statusio_maintenance.py +++ b/plugins/modules/monitoring/statusio_maintenance.py @@ -59,11 +59,13 @@ options: default: "https://api.status.io" components: type: list + elements: str description: - The given name of your component (server name) aliases: ['component'] containers: type: list + elements: str description: - The given name of your container (data center) aliases: ['container'] @@ -339,9 +341,9 @@ def main(): state=dict(required=False, default='present', choices=['present', 'absent']), url=dict(default='https://api.status.io', required=False), - components=dict(type='list', required=False, default=None, + components=dict(type='list', elements='str', required=False, default=None, aliases=['component']), - containers=dict(type='list', required=False, default=None, + containers=dict(type='list', elements='str', required=False, default=None, aliases=['container']), all_infrastructure_affected=dict(type='bool', default=False, required=False), diff --git a/plugins/modules/net_tools/dnsimple.py b/plugins/modules/net_tools/dnsimple.py index 1c814a9b66..f802d35f51 100644 --- a/plugins/modules/net_tools/dnsimple.py +++ b/plugins/modules/net_tools/dnsimple.py @@ -40,6 +40,7 @@ options: description: - List of records to ensure they either exist or do not exist. type: list + elements: str type: description: - The type of DNS record to create. @@ -167,7 +168,7 @@ def main(): account_api_token=dict(type='str', no_log=True), domain=dict(type='str'), record=dict(type='str'), - record_ids=dict(type='list'), + record_ids=dict(type='list', elements='str'), type=dict(type='str', choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL']), ttl=dict(type='int', default=3600), diff --git a/plugins/modules/net_tools/nsupdate.py b/plugins/modules/net_tools/nsupdate.py index 9d4a5186b0..b110c6fe20 100644 --- a/plugins/modules/net_tools/nsupdate.py +++ b/plugins/modules/net_tools/nsupdate.py @@ -28,51 +28,64 @@ options: - Manage DNS record. choices: ['present', 'absent'] default: 'present' + type: str server: description: - Apply DNS modification on this server, specified by IPv4 or IPv6 address. required: true + type: str port: description: - Use this TCP port when connecting to C(server). default: 53 + type: int key_name: description: - Use TSIG key name to authenticate against DNS C(server) + type: str key_secret: description: - Use TSIG key secret, associated with C(key_name), to authenticate against C(server) + type: str key_algorithm: description: - Specify key algorithm used by C(key_secret). choices: ['HMAC-MD5.SIG-ALG.REG.INT', 'hmac-md5', 'hmac-sha1', 'hmac-sha224', 'hmac-sha256', 'hmac-sha384', 'hmac-sha512'] default: 'hmac-md5' + type: str zone: description: - DNS record will be modified on this C(zone). - When omitted DNS will be queried to attempt finding the correct zone. - Starting with Ansible 2.7 this parameter is optional. + type: str record: description: - Sets the DNS record to modify. When zone is omitted this has to be absolute (ending with a dot). required: true + type: str type: description: - Sets the record type. default: 'A' + type: str ttl: description: - Sets the record TTL. default: 3600 + type: int value: description: - Sets the record value. + type: list + elements: str protocol: description: - Sets the transport protocol (TCP or UDP). TCP is the recommended and a more robust option. default: 'tcp' choices: ['tcp', 'udp'] + type: str ''' EXAMPLES = ''' @@ -432,7 +445,7 @@ def main(): record=dict(required=True, type='str'), type=dict(required=False, default='A', type='str'), ttl=dict(required=False, default=3600, type='int'), - value=dict(required=False, default=None, type='list'), + value=dict(required=False, default=None, type='list', elements='str'), protocol=dict(required=False, default='tcp', choices=['tcp', 'udp'], type='str') ), supports_check_mode=True diff --git a/plugins/modules/net_tools/omapi_host.py b/plugins/modules/net_tools/omapi_host.py index 4e6738cdf4..41c68a471a 100644 --- a/plugins/modules/net_tools/omapi_host.py +++ b/plugins/modules/net_tools/omapi_host.py @@ -63,6 +63,7 @@ options: description: - Attach a list of OMAPI DHCP statements with host lease (without ending semicolon). type: list + elements: str default: [] ddns: description: @@ -282,7 +283,7 @@ def main(): hostname=dict(type='str', aliases=['name']), ip=dict(type='str'), ddns=dict(type='bool', default=False), - statements=dict(type='list', default=[]), + statements=dict(type='list', elements='str', default=[]), ), supports_check_mode=False, ) diff --git a/plugins/modules/notification/mail.py b/plugins/modules/notification/mail.py index 574f847856..3b5936d134 100644 --- a/plugins/modules/notification/mail.py +++ b/plugins/modules/notification/mail.py @@ -39,6 +39,7 @@ options: - The email-address(es) the mail is being sent to. - This is a list, which may contain address and phrase portions. type: list + elements: str default: root aliases: [ recipients ] cc: @@ -46,11 +47,13 @@ options: - The email-address(es) the mail is being copied to. - This is a list, which may contain address and phrase portions. type: list + elements: str bcc: description: - The email-address(es) the mail is being 'blind' copied to. - This is a list, which may contain address and phrase portions. type: list + elements: str subject: description: - The subject of the email being sent. @@ -85,12 +88,14 @@ options: - A list of pathnames of files to attach to the message. - Attached files will have their content-type set to C(application/octet-stream). type: list + elements: path default: [] headers: description: - A list of headers which should be added to the message. - Each individual header is specified as C(header=value) (see example below). type: list + elements: str default: [] charset: description: @@ -211,13 +216,13 @@ def main(): host=dict(type='str', default='localhost'), port=dict(type='int', default=25), sender=dict(type='str', default='root', aliases=['from']), - to=dict(type='list', default=['root'], aliases=['recipients']), - cc=dict(type='list', default=[]), - bcc=dict(type='list', default=[]), + to=dict(type='list', elements='str', default=['root'], aliases=['recipients']), + cc=dict(type='list', elements='str', default=[]), + bcc=dict(type='list', elements='str', default=[]), subject=dict(type='str', required=True, aliases=['msg']), body=dict(type='str'), - attach=dict(type='list', default=[]), - headers=dict(type='list', default=[]), + attach=dict(type='list', elements='path', default=[]), + headers=dict(type='list', elements='str', default=[]), charset=dict(type='str', default='utf-8'), subtype=dict(type='str', default='plain', choices=['html', 'plain']), secure=dict(type='str', default='try', choices=['always', 'never', 'starttls', 'try']), diff --git a/plugins/modules/notification/nexmo.py b/plugins/modules/notification/nexmo.py index e6135cc2b6..d239bb4456 100644 --- a/plugins/modules/notification/nexmo.py +++ b/plugins/modules/notification/nexmo.py @@ -32,6 +32,7 @@ options: required: true dest: type: list + elements: int description: - Phone number(s) to send SMS message to required: true @@ -119,7 +120,7 @@ def main(): api_key=dict(required=True, no_log=True), api_secret=dict(required=True, no_log=True), src=dict(required=True, type='int'), - dest=dict(required=True, type='list'), + dest=dict(required=True, type='list', elements='int'), msg=dict(required=True), ), ) diff --git a/plugins/modules/notification/office_365_connector_card.py b/plugins/modules/notification/office_365_connector_card.py index 2574a75055..04d5e385d4 100644 --- a/plugins/modules/notification/office_365_connector_card.py +++ b/plugins/modules/notification/office_365_connector_card.py @@ -45,11 +45,13 @@ options: - and above any sections or actions present. actions: type: list + elements: dict description: - This array of objects will power the action links - found at the bottom of the card. sections: type: list + elements: dict description: - Contains a list of sections to display in the card. - For more information see https://dev.outlook.com/Connectors/reference. @@ -264,8 +266,8 @@ def main(): color=dict(type='str'), title=dict(type='str'), text=dict(type='str'), - actions=dict(type='list'), - sections=dict(type='list') + actions=dict(type='list', elements='dict'), + sections=dict(type='list', elements='dict') ), supports_check_mode=True ) diff --git a/plugins/modules/notification/rocketchat.py b/plugins/modules/notification/rocketchat.py index 13a93dd808..500560e417 100644 --- a/plugins/modules/notification/rocketchat.py +++ b/plugins/modules/notification/rocketchat.py @@ -89,6 +89,7 @@ options: - 'danger' attachments: type: list + elements: dict description: - Define a list of attachments. ''' @@ -215,7 +216,7 @@ def main(): link_names=dict(type='int', default=1, choices=[0, 1]), validate_certs=dict(default=True, type='bool'), color=dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger']), - attachments=dict(type='list', required=False) + attachments=dict(type='list', elements='dict', required=False) ) ) diff --git a/plugins/modules/notification/sendgrid.py b/plugins/modules/notification/sendgrid.py index 67132771c0..02ab072270 100644 --- a/plugins/modules/notification/sendgrid.py +++ b/plugins/modules/notification/sendgrid.py @@ -44,6 +44,7 @@ options: required: true to_addresses: type: list + elements: str description: - A list with one or more recipient email addresses. required: true @@ -58,14 +59,17 @@ options: - Sendgrid API key to use instead of username/password. cc: type: list + elements: str description: - A list of email addresses to cc. bcc: type: list + elements: str description: - A list of email addresses to bcc. attachments: type: list + elements: path description: - A list of relative or explicit paths of files you want to attach (7MB limit as per SendGrid docs). from_name: @@ -209,16 +213,16 @@ def main(): username=dict(required=False), password=dict(required=False, no_log=True), api_key=dict(required=False, no_log=True), - bcc=dict(required=False, type='list'), - cc=dict(required=False, type='list'), + bcc=dict(required=False, type='list', elements='str'), + cc=dict(required=False, type='list', elements='str'), headers=dict(required=False, type='dict'), from_address=dict(required=True), from_name=dict(required=False), - to_addresses=dict(required=True, type='list'), + to_addresses=dict(required=True, type='list', elements='str'), subject=dict(required=True), body=dict(required=True), html_body=dict(required=False, default=False, type='bool'), - attachments=dict(required=False, type='list') + attachments=dict(required=False, type='list', elements='path') ), supports_check_mode=True, mutually_exclusive=[ diff --git a/plugins/modules/notification/slack.py b/plugins/modules/notification/slack.py index 946fc9aa8b..197e5f9498 100644 --- a/plugins/modules/notification/slack.py +++ b/plugins/modules/notification/slack.py @@ -116,6 +116,7 @@ options: default: 'normal' attachments: type: list + elements: dict description: - Define a list of attachments. This list mirrors the Slack JSON API. - For more information, see U(https://api.slack.com/docs/attachments). @@ -420,7 +421,7 @@ def main(): parse=dict(type='str', default=None, choices=['none', 'full']), validate_certs=dict(default=True, type='bool'), color=dict(type='str', default='normal'), - attachments=dict(type='list', required=False, default=None), + attachments=dict(type='list', elements='dict', required=False, default=None), blocks=dict(type='list', elements='dict'), message_id=dict(type='str', default=None), ), diff --git a/plugins/modules/notification/twilio.py b/plugins/modules/notification/twilio.py index 5ec995f4c9..88851a6ad3 100644 --- a/plugins/modules/notification/twilio.py +++ b/plugins/modules/notification/twilio.py @@ -37,6 +37,7 @@ options: required: true to_numbers: type: list + elements: str description: one or more phone numbers to send the text message to, format +15551112222 @@ -143,7 +144,7 @@ def main(): auth_token=dict(required=True, no_log=True), msg=dict(required=True), from_number=dict(required=True), - to_numbers=dict(required=True, aliases=['to_number'], type='list'), + to_numbers=dict(required=True, aliases=['to_number'], type='list', elements='str'), media_url=dict(default=None, required=False), ), supports_check_mode=True diff --git a/plugins/modules/packaging/os/redhat_subscription.py b/plugins/modules/packaging/os/redhat_subscription.py index a4599588d6..18e20df7db 100644 --- a/plugins/modules/packaging/os/redhat_subscription.py +++ b/plugins/modules/packaging/os/redhat_subscription.py @@ -105,6 +105,7 @@ options: entitlements from a pool (the pool must support this). Mutually exclusive with I(pool). default: [] type: list + elements: raw consumer_type: description: - The type of unit to register, defaults to system @@ -153,6 +154,7 @@ options: addons: description: Syspurpose attribute addons type: list + elements: str sync: description: - When this option is true, then syspurpose attributes are synchronized with @@ -787,7 +789,7 @@ def main(): 'org_id': {}, 'environment': {}, 'pool': {'default': '^$'}, - 'pool_ids': {'default': [], 'type': 'list'}, + 'pool_ids': {'default': [], 'type': 'list', 'elements': 'raw'}, 'consumer_type': {}, 'consumer_name': {}, 'consumer_id': {}, @@ -803,7 +805,7 @@ def main(): 'role': {}, 'usage': {}, 'service_level_agreement': {}, - 'addons': {'type': 'list'}, + 'addons': {'type': 'list', 'elements': 'str'}, 'sync': {'type': 'bool', 'default': False} } } @@ -814,7 +816,7 @@ def main(): mutually_exclusive=[['activationkey', 'username'], ['activationkey', 'consumer_id'], ['activationkey', 'environment'], - ['activationkey', 'autosubscribe'], + ['activationkey', 'auto_attach'], ['pool', 'pool_ids']], required_if=[['state', 'present', ['username', 'activationkey'], True]], ) diff --git a/plugins/modules/source_control/gitlab/gitlab_runner.py b/plugins/modules/source_control/gitlab/gitlab_runner.py index 8470739fd8..8803990f22 100644 --- a/plugins/modules/source_control/gitlab/gitlab_runner.py +++ b/plugins/modules/source_control/gitlab/gitlab_runner.py @@ -99,6 +99,7 @@ options: required: False default: [] type: list + elements: str ''' EXAMPLES = ''' @@ -304,7 +305,7 @@ def main(): description=dict(type='str', required=True, aliases=["name"]), active=dict(type='bool', default=True), owned=dict(type='bool', default=False), - tag_list=dict(type='list', default=[]), + tag_list=dict(type='list', elements='str', default=[]), run_untagged=dict(type='bool', default=True), locked=dict(type='bool', default=False), access_level=dict(type='str', default='ref_protected', choices=["not_protected", "ref_protected"]), diff --git a/plugins/modules/storage/netapp/na_ontap_gather_facts.py b/plugins/modules/storage/netapp/na_ontap_gather_facts.py index 0fc61afbbb..c7b541ff08 100644 --- a/plugins/modules/storage/netapp/na_ontap_gather_facts.py +++ b/plugins/modules/storage/netapp/na_ontap_gather_facts.py @@ -26,25 +26,28 @@ options: state: description: - Returns "info" - default: "info" - choices: ['info'] + default: info + choices: [info] + type: str gather_subset: description: - When supplied, this argument will restrict the facts collected to a given subset. Possible values for this argument include - "aggregate_info", "cluster_node_info", "igroup_info", "lun_info", "net_dns_info", - "net_ifgrp_info", - "net_interface_info", "net_port_info", "nvme_info", "nvme_interface_info", - "nvme_namespace_info", "nvme_subsystem_info", "ontap_version", - "qos_adaptive_policy_info", "qos_policy_info", "security_key_manager_key_info", - "security_login_account_info", "storage_failover_info", "volume_info", - "vserver_info", "vserver_login_banner_info", "vserver_motd_info", "vserver_nfs_info" + C(aggregate_info), C(cluster_node_info), C(igroup_info), C(lun_info), C(net_dns_info), + C(net_ifgrp_info), + C(net_interface_info), C(net_port_info), C(nvme_info), C(nvme_interface_info), + C(nvme_namespace_info), C(nvme_subsystem_info), C(ontap_version), + C(qos_adaptive_policy_info), C(qos_policy_info), C(security_key_manager_key_info), + C(security_login_account_info), C(storage_failover_info), C(volume_info), + C(vserver_info), C(vserver_login_banner_info), C(vserver_motd_info), C(vserver_nfs_info) Can specify a list of values to include a larger subset. Values can also be used with an initial C(M(!)) to specify that a specific subset should not be collected. - nvme is supported with ONTAP 9.4 onwards. - - use "help" to get a list of supported facts for your system. - default: "all" + - use C(help) to get a list of supported facts for your system. + default: all + type: list + elements: str ''' EXAMPLES = ''' @@ -582,7 +585,7 @@ def main(): argument_spec = netapp_utils.na_ontap_host_argument_spec() argument_spec.update(dict( state=dict(default='info', choices=['info']), - gather_subset=dict(default=['all'], type='list'), + gather_subset=dict(default=['all'], type='list', elements='str'), )) module = AnsibleModule( diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 4a4280ee85..72c5682525 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -15,7 +15,6 @@ plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:invalid-ansiblemodule-schema plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/cloud/lxd/lxd_container.py validate-modules:invalid-ansiblemodule-schema -plugins/modules/cloud/lxd/lxd_container.py validate-modules:parameter-list-no-elements plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-choice plugins/modules/cloud/oneandone/oneandone_firewall_policy.py validate-modules:parameter-list-no-elements plugins/modules/cloud/oneandone/oneandone_load_balancer.py validate-modules:parameter-list-no-elements @@ -29,7 +28,6 @@ plugins/modules/cloud/online/online_user_info.py validate-modules:return-syntax- plugins/modules/cloud/opennebula/one_host.py validate-modules:parameter-list-no-elements plugins/modules/cloud/opennebula/one_image_info.py validate-modules:parameter-list-no-elements plugins/modules/cloud/opennebula/one_vm.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/oracle/oci_vcn.py validate-modules:parameter-list-no-elements plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py validate-modules:doc-missing-type plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py validate-modules:parameter-list-no-elements plugins/modules/cloud/ovirt/ovirt_api_facts.py validate-modules:parameter-list-no-elements @@ -80,9 +78,6 @@ plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:parameter-list-no plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:doc-missing-type plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/packet/packet_device.py validate-modules:doc-missing-type -plugins/modules/cloud/packet/packet_device.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/packet/packet_device.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/packet/packet_sshkey.py validate-modules:doc-missing-type plugins/modules/cloud/packet/packet_sshkey.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/packet/packet_sshkey.py validate-modules:undocumented-parameter @@ -91,7 +86,6 @@ plugins/modules/cloud/profitbricks/profitbricks.py validate-modules:parameter-li plugins/modules/cloud/profitbricks/profitbricks_volume.py validate-modules:doc-missing-type plugins/modules/cloud/profitbricks/profitbricks_volume.py validate-modules:parameter-list-no-elements plugins/modules/cloud/profitbricks/profitbricks_volume.py validate-modules:undocumented-parameter -plugins/modules/cloud/pubnub/pubnub_blocks.py validate-modules:parameter-list-no-elements plugins/modules/cloud/rackspace/rax.py use-argspec-type-path # fix needed plugins/modules/cloud/rackspace/rax.py validate-modules:doc-missing-type plugins/modules/cloud/rackspace/rax.py validate-modules:parameter-list-no-elements @@ -119,10 +113,8 @@ plugins/modules/cloud/scaleway/scaleway_snapshot_info.py validate-modules:return plugins/modules/cloud/scaleway/scaleway_volume_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_volume_info.py validate-modules:return-syntax-error plugins/modules/cloud/smartos/smartos_image_info.py validate-modules:doc-missing-type -plugins/modules/cloud/smartos/vmadm.py validate-modules:parameter-list-no-elements plugins/modules/cloud/smartos/vmadm.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/smartos/vmadm.py validate-modules:undocumented-parameter -plugins/modules/cloud/softlayer/sl_vm.py validate-modules:parameter-list-no-elements plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:parameter-list-no-elements plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:undocumented-parameter @@ -175,9 +167,6 @@ plugins/modules/database/vertica/vertica_schema.py validate-modules:undocumented plugins/modules/database/vertica/vertica_user.py validate-modules:doc-missing-type plugins/modules/database/vertica/vertica_user.py validate-modules:undocumented-parameter plugins/modules/files/iso_extract.py validate-modules:doc-default-does-not-match-spec -plugins/modules/files/xml.py validate-modules:parameter-list-no-elements -plugins/modules/identity/keycloak/keycloak_client.py validate-modules:parameter-list-no-elements -plugins/modules/identity/onepassword_info.py validate-modules:parameter-list-no-elements plugins/modules/identity/opendj/opendj_backendprop.py validate-modules:doc-missing-type plugins/modules/identity/opendj/opendj_backendprop.py validate-modules:parameter-type-not-in-doc plugins/modules/monitoring/bigpanda.py validate-modules:invalid-argument-name @@ -185,35 +174,21 @@ plugins/modules/monitoring/datadog/datadog_event.py validate-modules:parameter-l plugins/modules/monitoring/datadog/datadog_monitor.py validate-modules:invalid-argument-name plugins/modules/monitoring/datadog/datadog_monitor.py validate-modules:parameter-list-no-elements plugins/modules/monitoring/icinga2_host.py validate-modules:undocumented-parameter -plugins/modules/monitoring/librato_annotation.py validate-modules:parameter-list-no-elements plugins/modules/monitoring/logstash_plugin.py validate-modules:invalid-ansiblemodule-schema -plugins/modules/monitoring/pagerduty.py validate-modules:parameter-list-no-elements plugins/modules/monitoring/sensu/sensu_check.py validate-modules:parameter-list-no-elements plugins/modules/monitoring/sensu/sensu_client.py validate-modules:parameter-list-no-elements plugins/modules/monitoring/sensu/sensu_handler.py validate-modules:parameter-list-no-elements plugins/modules/monitoring/statusio_maintenance.py pylint:blacklisted-name -plugins/modules/monitoring/statusio_maintenance.py validate-modules:parameter-list-no-elements -plugins/modules/net_tools/dnsimple.py validate-modules:parameter-list-no-elements plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:parameter-type-not-in-doc # This triggers when a parameter is undocumented plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:doc-missing-type plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:parameter-type-not-in-doc plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code -plugins/modules/net_tools/nsupdate.py validate-modules:parameter-list-no-elements -plugins/modules/net_tools/nsupdate.py validate-modules:parameter-type-not-in-doc -plugins/modules/net_tools/omapi_host.py validate-modules:parameter-list-no-elements plugins/modules/notification/cisco_webex.py validate-modules:invalid-argument-name plugins/modules/notification/grove.py validate-modules:invalid-argument-name plugins/modules/notification/grove.py validate-modules:nonexistent-parameter-documented -plugins/modules/notification/mail.py validate-modules:parameter-list-no-elements -plugins/modules/notification/nexmo.py validate-modules:parameter-list-no-elements -plugins/modules/notification/office_365_connector_card.py validate-modules:parameter-list-no-elements plugins/modules/notification/pushbullet.py validate-modules:parameter-type-not-in-doc plugins/modules/notification/pushbullet.py validate-modules:undocumented-parameter -plugins/modules/notification/rocketchat.py validate-modules:parameter-list-no-elements -plugins/modules/notification/sendgrid.py validate-modules:parameter-list-no-elements -plugins/modules/notification/slack.py validate-modules:parameter-list-no-elements -plugins/modules/notification/twilio.py validate-modules:parameter-list-no-elements plugins/modules/packaging/language/bundler.py validate-modules:parameter-list-no-elements plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid plugins/modules/packaging/language/maven_artifact.py validate-modules:parameter-type-not-in-doc @@ -224,8 +199,6 @@ plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid plugins/modules/packaging/os/homebrew_cask.py validate-modules:parameter-invalid plugins/modules/packaging/os/opkg.py validate-modules:parameter-invalid plugins/modules/packaging/os/pacman.py validate-modules:parameter-invalid -plugins/modules/packaging/os/redhat_subscription.py validate-modules:mutually_exclusive-unknown -plugins/modules/packaging/os/redhat_subscription.py validate-modules:parameter-list-no-elements plugins/modules/packaging/os/redhat_subscription.py validate-modules:return-syntax-error plugins/modules/packaging/os/slackpkg.py validate-modules:parameter-invalid plugins/modules/packaging/os/urpmi.py validate-modules:parameter-invalid @@ -299,7 +272,6 @@ plugins/modules/source_control/github/github_release.py validate-modules:doc-mis plugins/modules/source_control/github/github_release.py validate-modules:parameter-type-not-in-doc plugins/modules/source_control/github/github_webhook.py validate-modules:parameter-type-not-in-doc plugins/modules/source_control/github/github_webhook_info.py validate-modules:parameter-type-not-in-doc -plugins/modules/source_control/gitlab/gitlab_runner.py validate-modules:parameter-list-no-elements plugins/modules/source_control/hg.py validate-modules:parameter-type-not-in-doc plugins/modules/storage/emc/emc_vnx_sg_member.py validate-modules:doc-missing-type plugins/modules/storage/emc/emc_vnx_sg_member.py validate-modules:parameter-type-not-in-doc @@ -313,10 +285,7 @@ plugins/modules/storage/ibm/ibm_sa_host_ports.py validate-modules:doc-missing-ty plugins/modules/storage/ibm/ibm_sa_pool.py validate-modules:doc-missing-type plugins/modules/storage/ibm/ibm_sa_vol.py validate-modules:doc-missing-type plugins/modules/storage/ibm/ibm_sa_vol_map.py validate-modules:doc-missing-type -plugins/modules/storage/netapp/na_ontap_gather_facts.py validate-modules:doc-missing-type -plugins/modules/storage/netapp/na_ontap_gather_facts.py validate-modules:parameter-list-no-elements plugins/modules/storage/netapp/na_ontap_gather_facts.py validate-modules:parameter-state-invalid-choice -plugins/modules/storage/netapp/na_ontap_gather_facts.py validate-modules:parameter-type-not-in-doc plugins/modules/storage/purestorage/purefa_facts.py validate-modules:doc-required-mismatch plugins/modules/storage/purestorage/purefa_facts.py validate-modules:parameter-list-no-elements plugins/modules/storage/purestorage/purefa_facts.py validate-modules:return-syntax-error diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 9ac48e2871..37b0d6f239 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -14,7 +14,6 @@ plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:invalid-ansiblemodule-schema plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/cloud/lxd/lxd_container.py validate-modules:invalid-ansiblemodule-schema -plugins/modules/cloud/lxd/lxd_container.py validate-modules:parameter-list-no-elements plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-choice plugins/modules/cloud/oneandone/oneandone_firewall_policy.py validate-modules:parameter-list-no-elements plugins/modules/cloud/oneandone/oneandone_load_balancer.py validate-modules:parameter-list-no-elements @@ -28,7 +27,6 @@ plugins/modules/cloud/online/online_user_info.py validate-modules:return-syntax- plugins/modules/cloud/opennebula/one_host.py validate-modules:parameter-list-no-elements plugins/modules/cloud/opennebula/one_image_info.py validate-modules:parameter-list-no-elements plugins/modules/cloud/opennebula/one_vm.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/oracle/oci_vcn.py validate-modules:parameter-list-no-elements plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py validate-modules:doc-missing-type plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py validate-modules:parameter-list-no-elements plugins/modules/cloud/ovirt/ovirt_api_facts.py validate-modules:parameter-list-no-elements @@ -79,9 +77,6 @@ plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:parameter-list-no plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:doc-missing-type plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/packet/packet_device.py validate-modules:doc-missing-type -plugins/modules/cloud/packet/packet_device.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/packet/packet_device.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/packet/packet_sshkey.py validate-modules:doc-missing-type plugins/modules/cloud/packet/packet_sshkey.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/packet/packet_sshkey.py validate-modules:undocumented-parameter @@ -90,7 +85,6 @@ plugins/modules/cloud/profitbricks/profitbricks.py validate-modules:parameter-li plugins/modules/cloud/profitbricks/profitbricks_volume.py validate-modules:doc-missing-type plugins/modules/cloud/profitbricks/profitbricks_volume.py validate-modules:parameter-list-no-elements plugins/modules/cloud/profitbricks/profitbricks_volume.py validate-modules:undocumented-parameter -plugins/modules/cloud/pubnub/pubnub_blocks.py validate-modules:parameter-list-no-elements plugins/modules/cloud/rackspace/rax.py use-argspec-type-path # fix needed plugins/modules/cloud/rackspace/rax.py validate-modules:doc-missing-type plugins/modules/cloud/rackspace/rax.py validate-modules:parameter-list-no-elements @@ -118,10 +112,8 @@ plugins/modules/cloud/scaleway/scaleway_snapshot_info.py validate-modules:return plugins/modules/cloud/scaleway/scaleway_volume_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_volume_info.py validate-modules:return-syntax-error plugins/modules/cloud/smartos/smartos_image_info.py validate-modules:doc-missing-type -plugins/modules/cloud/smartos/vmadm.py validate-modules:parameter-list-no-elements plugins/modules/cloud/smartos/vmadm.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/smartos/vmadm.py validate-modules:undocumented-parameter -plugins/modules/cloud/softlayer/sl_vm.py validate-modules:parameter-list-no-elements plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:parameter-list-no-elements plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:undocumented-parameter @@ -174,9 +166,6 @@ plugins/modules/database/vertica/vertica_schema.py validate-modules:undocumented plugins/modules/database/vertica/vertica_user.py validate-modules:doc-missing-type plugins/modules/database/vertica/vertica_user.py validate-modules:undocumented-parameter plugins/modules/files/iso_extract.py validate-modules:doc-default-does-not-match-spec -plugins/modules/files/xml.py validate-modules:parameter-list-no-elements -plugins/modules/identity/keycloak/keycloak_client.py validate-modules:parameter-list-no-elements -plugins/modules/identity/onepassword_info.py validate-modules:parameter-list-no-elements plugins/modules/identity/opendj/opendj_backendprop.py validate-modules:doc-missing-type plugins/modules/identity/opendj/opendj_backendprop.py validate-modules:parameter-type-not-in-doc plugins/modules/monitoring/bigpanda.py validate-modules:invalid-argument-name @@ -184,35 +173,21 @@ plugins/modules/monitoring/datadog/datadog_event.py validate-modules:parameter-l plugins/modules/monitoring/datadog/datadog_monitor.py validate-modules:invalid-argument-name plugins/modules/monitoring/datadog/datadog_monitor.py validate-modules:parameter-list-no-elements plugins/modules/monitoring/icinga2_host.py validate-modules:undocumented-parameter -plugins/modules/monitoring/librato_annotation.py validate-modules:parameter-list-no-elements plugins/modules/monitoring/logstash_plugin.py validate-modules:invalid-ansiblemodule-schema -plugins/modules/monitoring/pagerduty.py validate-modules:parameter-list-no-elements plugins/modules/monitoring/sensu/sensu_check.py validate-modules:parameter-list-no-elements plugins/modules/monitoring/sensu/sensu_client.py validate-modules:parameter-list-no-elements plugins/modules/monitoring/sensu/sensu_handler.py validate-modules:parameter-list-no-elements plugins/modules/monitoring/statusio_maintenance.py pylint:blacklisted-name -plugins/modules/monitoring/statusio_maintenance.py validate-modules:parameter-list-no-elements -plugins/modules/net_tools/dnsimple.py validate-modules:parameter-list-no-elements plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:parameter-type-not-in-doc # This triggers when a parameter is undocumented plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:doc-missing-type plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:parameter-type-not-in-doc plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code -plugins/modules/net_tools/nsupdate.py validate-modules:parameter-list-no-elements -plugins/modules/net_tools/nsupdate.py validate-modules:parameter-type-not-in-doc -plugins/modules/net_tools/omapi_host.py validate-modules:parameter-list-no-elements plugins/modules/notification/cisco_webex.py validate-modules:invalid-argument-name plugins/modules/notification/grove.py validate-modules:invalid-argument-name plugins/modules/notification/grove.py validate-modules:nonexistent-parameter-documented -plugins/modules/notification/mail.py validate-modules:parameter-list-no-elements -plugins/modules/notification/nexmo.py validate-modules:parameter-list-no-elements -plugins/modules/notification/office_365_connector_card.py validate-modules:parameter-list-no-elements plugins/modules/notification/pushbullet.py validate-modules:parameter-type-not-in-doc plugins/modules/notification/pushbullet.py validate-modules:undocumented-parameter -plugins/modules/notification/rocketchat.py validate-modules:parameter-list-no-elements -plugins/modules/notification/sendgrid.py validate-modules:parameter-list-no-elements -plugins/modules/notification/slack.py validate-modules:parameter-list-no-elements -plugins/modules/notification/twilio.py validate-modules:parameter-list-no-elements plugins/modules/packaging/language/bundler.py validate-modules:parameter-list-no-elements plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid plugins/modules/packaging/language/maven_artifact.py validate-modules:parameter-type-not-in-doc @@ -223,8 +198,6 @@ plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid plugins/modules/packaging/os/homebrew_cask.py validate-modules:parameter-invalid plugins/modules/packaging/os/opkg.py validate-modules:parameter-invalid plugins/modules/packaging/os/pacman.py validate-modules:parameter-invalid -plugins/modules/packaging/os/redhat_subscription.py validate-modules:mutually_exclusive-unknown -plugins/modules/packaging/os/redhat_subscription.py validate-modules:parameter-list-no-elements plugins/modules/packaging/os/redhat_subscription.py validate-modules:return-syntax-error plugins/modules/packaging/os/slackpkg.py validate-modules:parameter-invalid plugins/modules/packaging/os/urpmi.py validate-modules:parameter-invalid @@ -298,7 +271,6 @@ plugins/modules/source_control/github/github_release.py validate-modules:doc-mis plugins/modules/source_control/github/github_release.py validate-modules:parameter-type-not-in-doc plugins/modules/source_control/github/github_webhook.py validate-modules:parameter-type-not-in-doc plugins/modules/source_control/github/github_webhook_info.py validate-modules:parameter-type-not-in-doc -plugins/modules/source_control/gitlab/gitlab_runner.py validate-modules:parameter-list-no-elements plugins/modules/source_control/hg.py validate-modules:parameter-type-not-in-doc plugins/modules/storage/emc/emc_vnx_sg_member.py validate-modules:doc-missing-type plugins/modules/storage/emc/emc_vnx_sg_member.py validate-modules:parameter-type-not-in-doc @@ -312,10 +284,7 @@ plugins/modules/storage/ibm/ibm_sa_host_ports.py validate-modules:doc-missing-ty plugins/modules/storage/ibm/ibm_sa_pool.py validate-modules:doc-missing-type plugins/modules/storage/ibm/ibm_sa_vol.py validate-modules:doc-missing-type plugins/modules/storage/ibm/ibm_sa_vol_map.py validate-modules:doc-missing-type -plugins/modules/storage/netapp/na_ontap_gather_facts.py validate-modules:doc-missing-type -plugins/modules/storage/netapp/na_ontap_gather_facts.py validate-modules:parameter-list-no-elements plugins/modules/storage/netapp/na_ontap_gather_facts.py validate-modules:parameter-state-invalid-choice -plugins/modules/storage/netapp/na_ontap_gather_facts.py validate-modules:parameter-type-not-in-doc plugins/modules/storage/purestorage/purefa_facts.py validate-modules:doc-required-mismatch plugins/modules/storage/purestorage/purefa_facts.py validate-modules:parameter-list-no-elements plugins/modules/storage/purestorage/purefa_facts.py validate-modules:return-syntax-error diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 181a1cc3a2..72a51e8e0a 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -96,8 +96,6 @@ plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:parameter-type-no plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:deprecation-mismatch plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:doc-missing-type plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:invalid-documentation -plugins/modules/cloud/packet/packet_device.py validate-modules:doc-missing-type -plugins/modules/cloud/packet/packet_device.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/packet/packet_sshkey.py validate-modules:doc-missing-type plugins/modules/cloud/packet/packet_sshkey.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/packet/packet_sshkey.py validate-modules:undocumented-parameter @@ -189,7 +187,6 @@ plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:undocumented-parame plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:doc-missing-type plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:parameter-type-not-in-doc plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code -plugins/modules/net_tools/nsupdate.py validate-modules:parameter-type-not-in-doc plugins/modules/notification/pushbullet.py validate-modules:parameter-type-not-in-doc plugins/modules/notification/pushbullet.py validate-modules:undocumented-parameter plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid @@ -280,9 +277,7 @@ plugins/modules/storage/ibm/ibm_sa_pool.py validate-modules:doc-missing-type plugins/modules/storage/ibm/ibm_sa_vol.py validate-modules:doc-missing-type plugins/modules/storage/ibm/ibm_sa_vol_map.py validate-modules:doc-missing-type plugins/modules/storage/netapp/na_ontap_gather_facts.py validate-modules:deprecation-mismatch -plugins/modules/storage/netapp/na_ontap_gather_facts.py validate-modules:doc-missing-type plugins/modules/storage/netapp/na_ontap_gather_facts.py validate-modules:invalid-documentation -plugins/modules/storage/netapp/na_ontap_gather_facts.py validate-modules:parameter-type-not-in-doc plugins/modules/storage/purestorage/purefa_facts.py validate-modules:deprecation-mismatch plugins/modules/storage/purestorage/purefa_facts.py validate-modules:invalid-documentation plugins/modules/storage/purestorage/purefa_facts.py validate-modules:return-syntax-error From 03fd6bd008abee3f8335acc7d1162bca033811b7 Mon Sep 17 00:00:00 2001 From: Craig Roberts Date: Tue, 16 Feb 2021 14:15:19 +0800 Subject: [PATCH 0050/3093] dnsimple: Add support for CAA records (#1814) --- .../fragments/1814-dnsimple-add-support-for-caa-records.yml | 2 ++ plugins/modules/net_tools/dnsimple.py | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/1814-dnsimple-add-support-for-caa-records.yml diff --git a/changelogs/fragments/1814-dnsimple-add-support-for-caa-records.yml b/changelogs/fragments/1814-dnsimple-add-support-for-caa-records.yml new file mode 100644 index 0000000000..bc4915b7b9 --- /dev/null +++ b/changelogs/fragments/1814-dnsimple-add-support-for-caa-records.yml @@ -0,0 +1,2 @@ +minor_changes: + - dnsimple - add CAA records to the whitelist of valid record types (https://github.com/ansible-collections/community.general/pull/1814). diff --git a/plugins/modules/net_tools/dnsimple.py b/plugins/modules/net_tools/dnsimple.py index f802d35f51..c4314b6539 100644 --- a/plugins/modules/net_tools/dnsimple.py +++ b/plugins/modules/net_tools/dnsimple.py @@ -44,7 +44,7 @@ options: type: description: - The type of DNS record to create. - choices: [ 'A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL' ] + choices: [ 'A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL', 'CAA' ] type: str ttl: description: @@ -170,7 +170,7 @@ def main(): record=dict(type='str'), record_ids=dict(type='list', elements='str'), type=dict(type='str', choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', - 'POOL']), + 'POOL', 'CAA']), ttl=dict(type='int', default=3600), value=dict(type='str'), priority=dict(type='int'), From 6dd4cd0eb782be4caf9d708143121a4272b9600e Mon Sep 17 00:00:00 2001 From: Frank Dornheim <524257+conloos@users.noreply.github.com> Date: Tue, 16 Feb 2021 07:15:50 +0100 Subject: [PATCH 0051/3093] Previously LXD profiles were overwritten, now these are merged. (#1813) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * added ``merge_profile`` parameter to merge configurations from the play to an existing profile * add fragment * cosmetic changes Co-authored-by: Frank Dornheim <“dornheim@posteo.de@users.noreply.github.com”> --- .../1813-lxd_profile-merge-profiles.yml | 2 + plugins/modules/cloud/lxd/lxd_profile.py | 124 +++++++++++++++++- 2 files changed, 121 insertions(+), 5 deletions(-) create mode 100644 changelogs/fragments/1813-lxd_profile-merge-profiles.yml diff --git a/changelogs/fragments/1813-lxd_profile-merge-profiles.yml b/changelogs/fragments/1813-lxd_profile-merge-profiles.yml new file mode 100644 index 0000000000..d374347a5e --- /dev/null +++ b/changelogs/fragments/1813-lxd_profile-merge-profiles.yml @@ -0,0 +1,2 @@ +minor_changes: +- lxd_profile - added ``merge_profile`` parameter to merge configurations from the play to an existing profile (https://github.com/ansible-collections/community.general/pull/1813). diff --git a/plugins/modules/cloud/lxd/lxd_profile.py b/plugins/modules/cloud/lxd/lxd_profile.py index 9a119d26a2..3094898f2c 100644 --- a/plugins/modules/cloud/lxd/lxd_profile.py +++ b/plugins/modules/cloud/lxd/lxd_profile.py @@ -2,12 +2,12 @@ # -*- coding: utf-8 -*- # Copyright: (c) 2016, Hiroaki Nakamura +# Copyright: (c) 2020, Frank Dornheim # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type - DOCUMENTATION = ''' --- module: lxd_profile @@ -52,6 +52,14 @@ options: See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-11) required: false type: str + merge_profile: + description: + - Merge the configuration of the present profile with the new desired configuration, + instead of replacing it. + required: false + default: false + type: bool + version_added: 2.1.0 state: choices: - present @@ -142,6 +150,23 @@ EXAMPLES = ''' parent: br0 type: nic +# An example for modify/merge a profile +- hosts: localhost + connection: local + tasks: + - name: Merge a profile + community.general.lxd_profile: + merge_profile: true + name: macvlan + state: present + config: {} + description: my macvlan profile + devices: + eth0: + nictype: macvlan + parent: br0 + type: nic + # An example for deleting a profile - hosts: localhost connection: local @@ -181,7 +206,6 @@ actions: ''' import os - from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException @@ -266,7 +290,7 @@ class LXDProfileManagement(object): self._create_profile() else: self.module.fail_json( - msg='new_name must not be set when the profile does not exist and the specified state is present', + msg='new_name must not be set when the profile does not exist and the state is present', changed=False) else: if self.new_name is not None and self.new_name != self.name: @@ -307,10 +331,96 @@ class LXDProfileManagement(object): self._needs_to_change_profile_config('devices') ) - def _apply_profile_configs(self): - config = self.old_profile_json.copy() + def _merge_dicts(self, source, destination): + """Merge Dictionarys + + Get a list of filehandle numbers from logger to be handed to + DaemonContext.files_preserve + + Args: + dict(source): source dict + dict(destination): destination dict + Kwargs: + None + Raises: + None + Returns: + dict(destination): merged dict""" + for key, value in source.items(): + if isinstance(value, dict): + # get node or create one + node = destination.setdefault(key, {}) + self._merge_dicts(value, node) + else: + destination[key] = value + return destination + + def _merge_config(self, config): + """ merge profile + + Merge Configuration of the present profile and the new desired configitems + + Args: + dict(config): Dict with the old config in 'metadata' and new config in 'config' + Kwargs: + None + Raises: + None + Returns: + dict(config): new config""" + # merge or copy the sections from the existing profile to 'config' + for item in ['config', 'description', 'devices', 'name', 'used_by']: + if item in config: + config[item] = self._merge_dicts(config['metadata'][item], config[item]) + else: + config[item] = config['metadata'][item] + # merge or copy the sections from the ansible-task to 'config' + return self._merge_dicts(self.config, config) + + def _generate_new_config(self, config): + """ rebuild profile + + Rebuild the Profile by the configuration provided in the play. + Existing configurations are discarded. + + This ist the default behavior. + + Args: + dict(config): Dict with the old config in 'metadata' and new config in 'config' + Kwargs: + None + Raises: + None + Returns: + dict(config): new config""" for k, v in self.config.items(): config[k] = v + return config + + def _apply_profile_configs(self): + """ Selection of the procedure: rebuild or merge + + The standard behavior is that all information not contained + in the play is discarded. + + If "merge_profile" is provides in the play and "True", then existing + configurations from the profile and new ones defined are merged. + + Args: + None + Kwargs: + None + Raises: + None + Returns: + None""" + config = self.old_profile_json.copy() + if self.module.params['merge_profile']: + config = self._merge_config(config) + else: + config = self._generate_new_config(config) + + # upload config to lxd self.client.do('PUT', '/1.0/profiles/{0}'.format(self.name), config) self.actions.append('apply_profile_configs') @@ -371,6 +481,10 @@ def main(): devices=dict( type='dict', ), + merge_profile=dict( + type='bool', + default=False + ), state=dict( choices=PROFILES_STATES, default='present' From 03b7b39424d639821a7994e66459ee25e94365e8 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 16 Feb 2021 20:03:51 +1300 Subject: [PATCH 0052/3093] Tidy up all pylint:blacklisted-name ignore lines (#1819) * fixed validation-modules for plugins/callback/hipchat.py * fixed validation-modules for plugins/connection/lxc.py * fixed validation-modules for plugins/modules/cloud/lxc/lxc_container.py * fixed validation-modules for plugins/modules/monitoring/statusio_maintenance.py * fixed validation-modules for plugins/modules/system/alternatives.py * fixed validation-modules for plugins/modules/system/beadm.py * fixed validation-modules for plugins/modules/system/cronvar.py * fixed validation-modules for plugins/modules/system/dconf.py * fixed validation-modules for plugins/modules/system/interfaces_file.py * fixed validation-modules for plugins/modules/system/java_cert.py * fixed validation-modules for plugins/modules/system/lvg.py * fixed validation-modules for plugins/modules/system/lvol.py * fixed validation-modules for plugins/modules/system/parted.py * fixed validation-modules for plugins/modules/system/timezone.py * fixed validation-modules for plugins/modules/web_infrastructure/rundeck_acl_policy.py * Tidy up all pylint:blacklisted-name sanity checks ignore lines * Missed one in statusio_maintenace.py * fixed validation-modules for plugins/modules/system/filesystem.py * Missed one in gconftool2.py * Missed one in alternatives.py * Using dummies now * fixed indentation * Made all the changes about replacing _ with dummy * Rollback bug fixed * Rollback bug fixed, part II * added changelog fragment * Improved changelog frag message per PR --- .../1819-tidyup-pylint-blacklistnames.yml | 17 ++++++++++++++++ plugins/callback/hipchat.py | 3 +-- plugins/connection/lxc.py | 2 +- plugins/modules/cloud/lxc/lxc_container.py | 6 +++--- .../monitoring/statusio_maintenance.py | 4 ++-- plugins/modules/system/alternatives.py | 4 ++-- plugins/modules/system/beadm.py | 6 +++--- plugins/modules/system/cronvar.py | 8 ++++---- plugins/modules/system/dconf.py | 4 ++-- plugins/modules/system/filesystem.py | 20 +++++++++---------- plugins/modules/system/gconftool2.py | 2 +- plugins/modules/system/interfaces_file.py | 2 +- plugins/modules/system/java_cert.py | 4 ++-- plugins/modules/system/lvg.py | 14 ++++++------- plugins/modules/system/lvol.py | 8 ++++---- plugins/modules/system/parted.py | 2 +- plugins/modules/system/timezone.py | 4 ++-- .../web_infrastructure/rundeck_acl_policy.py | 12 +++++------ tests/sanity/ignore-2.10.txt | 17 ---------------- tests/sanity/ignore-2.11.txt | 17 ---------------- tests/sanity/ignore-2.9.txt | 17 ---------------- 21 files changed, 69 insertions(+), 104 deletions(-) create mode 100644 changelogs/fragments/1819-tidyup-pylint-blacklistnames.yml diff --git a/changelogs/fragments/1819-tidyup-pylint-blacklistnames.yml b/changelogs/fragments/1819-tidyup-pylint-blacklistnames.yml new file mode 100644 index 0000000000..fdbc850528 --- /dev/null +++ b/changelogs/fragments/1819-tidyup-pylint-blacklistnames.yml @@ -0,0 +1,17 @@ +bugfixes: + - "alternatives - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." + - "beadm - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." + - "cronvar - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." + - "dconf - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." + - "filesystem - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." + - "hipchat - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." + - "interfaces_file - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." + - "java_cert - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." + - "lvg - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." + - "lvol - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." + - "lxc - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." + - "lxc_container - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." + - "parted - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." + - "rundeck_acl_policy - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." + - "statusio_maintenance - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." + - "timezone - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." diff --git a/plugins/callback/hipchat.py b/plugins/callback/hipchat.py index 3046a9c1ca..e097ac8eb6 100644 --- a/plugins/callback/hipchat.py +++ b/plugins/callback/hipchat.py @@ -173,8 +173,7 @@ class CallbackModule(CallbackBase): # Displays info about playbook being started by a person on an # inventory, as well as Tags, Skip Tags and Limits if not self.printed_playbook: - self.playbook_name, _ = os.path.splitext( - os.path.basename(self.play.playbook.filename)) + self.playbook_name, dummy = os.path.splitext(os.path.basename(self.play.playbook.filename)) host_list = self.play.playbook.inventory.host_list inventory = os.path.basename(os.path.realpath(host_list)) self.send_msg("%s: Playbook initiated by %s against %s" % diff --git a/plugins/connection/lxc.py b/plugins/connection/lxc.py index 4b893c020b..8de1acc35d 100644 --- a/plugins/connection/lxc.py +++ b/plugins/connection/lxc.py @@ -86,7 +86,7 @@ class Connection(ConnectionBase): write_fds = [] while len(read_fds) > 0 or len(write_fds) > 0: try: - ready_reads, ready_writes, _ = select.select(read_fds, write_fds, []) + ready_reads, ready_writes, dummy = select.select(read_fds, write_fds, []) except select.error as e: if e.args[0] == errno.EINTR: continue diff --git a/plugins/modules/cloud/lxc/lxc_container.py b/plugins/modules/cloud/lxc/lxc_container.py index c1a3d1c424..2b5efe9cd7 100644 --- a/plugins/modules/cloud/lxc/lxc_container.py +++ b/plugins/modules/cloud/lxc/lxc_container.py @@ -730,7 +730,7 @@ class LxcContainerManagement(object): for option_line in container_config: # Look for key in config if keyre.match(option_line): - _, _value = option_line.split('=', 1) + dummy, _value = option_line.split('=', 1) config_value = ' '.join(_value.split()) line_index = container_config.index(option_line) # If the sanitized values don't match replace them @@ -953,7 +953,7 @@ class LxcContainerManagement(object): """ self.container = self.get_container_bind() - for _ in xrange(timeout): + for dummy in xrange(timeout): if self._get_state() != 'running': self.container.start() self.state_change = True @@ -1006,7 +1006,7 @@ class LxcContainerManagement(object): :type timeout: ``int`` """ - for _ in xrange(timeout): + for dummy in xrange(timeout): if not self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): break diff --git a/plugins/modules/monitoring/statusio_maintenance.py b/plugins/modules/monitoring/statusio_maintenance.py index 01411cf17f..3a6124f8b0 100644 --- a/plugins/modules/monitoring/statusio_maintenance.py +++ b/plugins/modules/monitoring/statusio_maintenance.py @@ -425,7 +425,7 @@ def main(): if module.check_mode: module.exit_json(changed=True) else: - (rc, _, error) = create_maintenance( + (rc, dummy, error) = create_maintenance( auth_headers, url, statuspage, host_ids, all_infrastructure_affected, automation, title, desc, returned_date, maintenance_notify_now, @@ -451,7 +451,7 @@ def main(): if module.check_mode: module.exit_json(changed=True) else: - (rc, _, error) = delete_maintenance( + (rc, dummy, error) = delete_maintenance( auth_headers, url, statuspage, maintenance_id) if rc == 0: module.exit_json( diff --git a/plugins/modules/system/alternatives.py b/plugins/modules/system/alternatives.py index 56db6dc69c..5831382680 100644 --- a/plugins/modules/system/alternatives.py +++ b/plugins/modules/system/alternatives.py @@ -94,7 +94,7 @@ def main(): all_alternatives = [] # Run `update-alternatives --display ` to find existing alternatives - (rc, display_output, _) = module.run_command( + (rc, display_output, dummy) = module.run_command( ['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--display', name] ) @@ -117,7 +117,7 @@ def main(): # # This is only compatible on Debian-based systems, as the other # alternatives don't have --query available - rc, query_output, _ = module.run_command( + rc, query_output, dummy = module.run_command( ['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--query', name] ) if rc == 0: diff --git a/plugins/modules/system/beadm.py b/plugins/modules/system/beadm.py index ab53d0661a..d34c5e7d96 100644 --- a/plugins/modules/system/beadm.py +++ b/plugins/modules/system/beadm.py @@ -191,7 +191,7 @@ class BE(object): return None def exists(self): - (rc, out, _) = self._beadm_list() + (rc, out, dummy) = self._beadm_list() if rc == 0: if self._find_be_by_name(out): @@ -202,7 +202,7 @@ class BE(object): return False def is_activated(self): - (rc, out, _) = self._beadm_list() + (rc, out, dummy) = self._beadm_list() if rc == 0: line = self._find_be_by_name(out) @@ -257,7 +257,7 @@ class BE(object): return self.module.run_command(cmd) def is_mounted(self): - (rc, out, _) = self._beadm_list() + (rc, out, dummy) = self._beadm_list() if rc == 0: line = self._find_be_by_name(out) diff --git a/plugins/modules/system/cronvar.py b/plugins/modules/system/cronvar.py index a76f6a7891..9871668ac0 100644 --- a/plugins/modules/system/cronvar.py +++ b/plugins/modules/system/cronvar.py @@ -228,7 +228,7 @@ class CronVar(object): var_names = [] for l in self.lines: try: - (var_name, _) = self.parse_for_var(l) + var_name, dummy = self.parse_for_var(l) var_names.append(var_name) except CronVarError: pass @@ -242,7 +242,7 @@ class CronVar(object): newlines = [] for l in self.lines: try: - (varname, _) = self.parse_for_var(l) # Throws if not a var line + varname, dummy = self.parse_for_var(l) # Throws if not a var line if varname == insertbefore: newlines.append("%s=%s" % (name, value)) newlines.append(l) @@ -263,7 +263,7 @@ class CronVar(object): newlines = [] for l in self.lines: try: - (varname, _) = self.parse_for_var(l) # Throws if not a var line + varname, dummy = self.parse_for_var(l) # Throws if not a var line if varname != name: raise CronVarError # Append. if not remove: @@ -377,7 +377,7 @@ def main(): # if requested make a backup before making a change if backup: - (_, backup_file) = tempfile.mkstemp(prefix='cronvar') + dummy, backup_file = tempfile.mkstemp(prefix='cronvar') cronvar.write(backup_file) if cronvar.cron_file and not name and not ensure_present: diff --git a/plugins/modules/system/dconf.py b/plugins/modules/system/dconf.py index 6284faaa2d..50f4369f4f 100644 --- a/plugins/modules/system/dconf.py +++ b/plugins/modules/system/dconf.py @@ -181,14 +181,14 @@ class DBusWrapper(object): for pid in psutil.pids(): process = psutil.Process(pid) - process_real_uid, _, _ = process.uids() + process_real_uid, dummy, dummy = process.uids() try: if process_real_uid == uid and 'DBUS_SESSION_BUS_ADDRESS' in process.environ(): dbus_session_bus_address_candidate = process.environ()['DBUS_SESSION_BUS_ADDRESS'] self.module.debug("Found D-Bus user session candidate at address: %s" % dbus_session_bus_address_candidate) dbus_send_cmd = self.module.get_bin_path('dbus-send', required=True) command = [dbus_send_cmd, '--address=%s' % dbus_session_bus_address_candidate, '--type=signal', '/', 'com.example.test'] - rc, _, _ = self.module.run_command(command) + rc, dummy, dummy = self.module.run_command(command) if rc == 0: self.module.debug("Verified D-Bus user session candidate as usable at address: %s" % dbus_session_bus_address_candidate) diff --git a/plugins/modules/system/filesystem.py b/plugins/modules/system/filesystem.py index 48c68a35ad..6944178da1 100644 --- a/plugins/modules/system/filesystem.py +++ b/plugins/modules/system/filesystem.py @@ -114,7 +114,7 @@ class Device(object): statinfo = os.stat(self.path) if stat.S_ISBLK(statinfo.st_mode): blockdev_cmd = self.module.get_bin_path("blockdev", required=True) - _, devsize_in_bytes, _ = self.module.run_command([blockdev_cmd, "--getsize64", self.path], check_rc=True) + dummy, devsize_in_bytes, dummy = self.module.run_command([blockdev_cmd, "--getsize64", self.path], check_rc=True) return int(devsize_in_bytes) elif os.path.isfile(self.path): return os.path.getsize(self.path) @@ -126,8 +126,8 @@ class Device(object): cmd_findmnt = self.module.get_bin_path("findmnt", required=True) # find mountpoint - rc, mountpoint, _ = self.module.run_command([cmd_findmnt, "--mtab", "--noheadings", "--output", - "TARGET", "--source", self.path], check_rc=False) + rc, mountpoint, dummy = self.module.run_command([cmd_findmnt, "--mtab", "--noheadings", "--output", + "TARGET", "--source", self.path], check_rc=False) if rc != 0: mountpoint = None else: @@ -203,7 +203,7 @@ class Filesystem(object): elif self.module.check_mode: self.module.exit_json(changed=True, msg="Resizing filesystem %s on device %s" % (self.fstype, dev)) else: - _, out, _ = self.module.run_command(self.grow_cmd(dev), check_rc=True) + dummy, out, dummy = self.module.run_command(self.grow_cmd(dev), check_rc=True) return out @@ -214,7 +214,7 @@ class Ext(Filesystem): def get_fs_size(self, dev): cmd = self.module.get_bin_path('tune2fs', required=True) # Get Block count and Block size - _, size, _ = self.module.run_command([cmd, '-l', str(dev)], check_rc=True, environ_update=self.LANG_ENV) + dummy, size, dummy = self.module.run_command([cmd, '-l', str(dev)], check_rc=True, environ_update=self.LANG_ENV) for line in size.splitlines(): if 'Block count:' in line: block_count = int(line.split(':')[1].strip()) @@ -290,7 +290,7 @@ class Btrfs(Filesystem): def __init__(self, module): super(Btrfs, self).__init__(module) - _, stdout, stderr = self.module.run_command('%s --version' % self.MKFS, check_rc=True) + dummy, stdout, stderr = self.module.run_command('%s --version' % self.MKFS, check_rc=True) match = re.search(r" v([0-9.]+)", stdout) if not match: # v0.20-rc1 use stderr @@ -320,7 +320,7 @@ class F2fs(Filesystem): def MKFS_FORCE_FLAGS(self): mkfs = self.module.get_bin_path(self.MKFS, required=True) cmd = "%s %s" % (mkfs, os.devnull) - _, out, _ = self.module.run_command(cmd, check_rc=False, environ_update=self.LANG_ENV) + dummy, out, dummy = self.module.run_command(cmd, check_rc=False, environ_update=self.LANG_ENV) # Looking for " F2FS-tools: mkfs.f2fs Ver: 1.10.0 (2018-01-30)" # mkfs.f2fs displays version since v1.2.0 match = re.search(r"F2FS-tools: mkfs.f2fs Ver: ([0-9.]+) \(", out) @@ -335,7 +335,7 @@ class F2fs(Filesystem): def get_fs_size(self, dev): cmd = self.module.get_bin_path('dump.f2fs', required=True) # Get sector count and sector size - _, dump, _ = self.module.run_command([cmd, str(dev)], check_rc=True, environ_update=self.LANG_ENV) + dummy, dump, dummy = self.module.run_command([cmd, str(dev)], check_rc=True, environ_update=self.LANG_ENV) sector_size = None sector_count = None for line in dump.splitlines(): @@ -364,7 +364,7 @@ class VFAT(Filesystem): def get_fs_size(self, dev): cmd = self.module.get_bin_path(self.GROW, required=True) - _, output, _ = self.module.run_command([cmd, '--info', str(dev)], check_rc=True, environ_update=self.LANG_ENV) + dummy, output, dummy = self.module.run_command([cmd, '--info', str(dev)], check_rc=True, environ_update=self.LANG_ENV) for line in output.splitlines()[1:]: param, value = line.split(':', 1) if param.strip() == 'Size': @@ -383,7 +383,7 @@ class LVM(Filesystem): def get_fs_size(self, dev): cmd = self.module.get_bin_path('pvs', required=True) - _, size, _ = self.module.run_command([cmd, '--noheadings', '-o', 'pv_size', '--units', 'b', '--nosuffix', str(dev)], check_rc=True) + dummy, size, dummy = self.module.run_command([cmd, '--noheadings', '-o', 'pv_size', '--units', 'b', '--nosuffix', str(dev)], check_rc=True) block_count = int(size) return block_count diff --git a/plugins/modules/system/gconftool2.py b/plugins/modules/system/gconftool2.py index b1df1da85a..a4acad5580 100644 --- a/plugins/modules/system/gconftool2.py +++ b/plugins/modules/system/gconftool2.py @@ -200,7 +200,7 @@ def main(): gconf_pref = GConf2Preference(module, key, value_type, value, direct, config_source) # Now we get the current value, if not found don't fail - _, current_value = gconf_pref.call("get", fail_onerr=False) + dummy, current_value = gconf_pref.call("get", fail_onerr=False) # Check if the current value equals the value we want to set. If not, make # a change diff --git a/plugins/modules/system/interfaces_file.py b/plugins/modules/system/interfaces_file.py index d1e3757304..618a472d91 100644 --- a/plugins/modules/system/interfaces_file.py +++ b/plugins/modules/system/interfaces_file.py @@ -385,7 +385,7 @@ def main(): changed, lines = setInterfaceOption(module, lines, iface, option, value, state, address_family) if changed: - _, ifaces = read_interfaces_lines(module, [d['line'] for d in lines if 'line' in d]) + dummy, ifaces = read_interfaces_lines(module, [d['line'] for d in lines if 'line' in d]) if changed and not module.check_mode: if backup: diff --git a/plugins/modules/system/java_cert.py b/plugins/modules/system/java_cert.py index 7333397bfd..6594ed235b 100644 --- a/plugins/modules/system/java_cert.py +++ b/plugins/modules/system/java_cert.py @@ -185,7 +185,7 @@ def check_cert_present(module, executable, keystore_path, keystore_pass, alias, test_cmd = ("%s -noprompt -list -keystore '%s' -storepass '%s' " "-alias '%s' %s") % (executable, keystore_path, keystore_pass, alias, get_keystore_type(keystore_type)) - (check_rc, _, _) = module.run_command(test_cmd) + check_rc, dummy, dummy = module.run_command(test_cmd) if check_rc == 0: return True return False @@ -221,7 +221,7 @@ def import_cert_url(module, executable, url, port, keystore_path, keystore_pass, import_cmd = import_cmd + " -trustcacerts" # Fetch SSL certificate from remote host. - (_, fetch_out, _) = module.run_command(fetch_cmd, check_rc=True) + dummy, fetch_out, dummy = module.run_command(fetch_cmd, check_rc=True) # Use remote certificate from remote host and import it to a java keystore (import_rc, import_out, import_err) = module.run_command(import_cmd, diff --git a/plugins/modules/system/lvg.py b/plugins/modules/system/lvg.py index 25f261aef8..47d3d6c230 100644 --- a/plugins/modules/system/lvg.py +++ b/plugins/modules/system/lvg.py @@ -232,13 +232,13 @@ def main(): # create PV pvcreate_cmd = module.get_bin_path('pvcreate', True) for current_dev in dev_list: - rc, _, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)]) + rc, dummy, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)]) if rc == 0: changed = True else: module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err) vgcreate_cmd = module.get_bin_path('vgcreate') - rc, _, err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', pesize, vg] + dev_list) + rc, dummy, err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', pesize, vg] + dev_list) if rc == 0: changed = True else: @@ -251,7 +251,7 @@ def main(): if this_vg['lv_count'] == 0 or force: # remove VG vgremove_cmd = module.get_bin_path('vgremove', True) - rc, _, err = module.run_command("%s --force %s" % (vgremove_cmd, vg)) + rc, dummy, err = module.run_command("%s --force %s" % (vgremove_cmd, vg)) if rc == 0: module.exit_json(changed=True) else: @@ -283,7 +283,7 @@ def main(): if module.check_mode: changed = True else: - rc, _, err = module.run_command([pvresize_cmd, device]) + rc, dummy, err = module.run_command([pvresize_cmd, device]) if rc != 0: module.fail_json(msg="Failed executing pvresize command.", rc=rc, err=err) else: @@ -298,14 +298,14 @@ def main(): # create PV pvcreate_cmd = module.get_bin_path('pvcreate', True) for current_dev in devs_to_add: - rc, _, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)]) + rc, dummy, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)]) if rc == 0: changed = True else: module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err) # add PV to our VG vgextend_cmd = module.get_bin_path('vgextend', True) - rc, _, err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string)) + rc, dummy, err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string)) if rc == 0: changed = True else: @@ -315,7 +315,7 @@ def main(): if devs_to_remove: devs_to_remove_string = ' '.join(devs_to_remove) vgreduce_cmd = module.get_bin_path('vgreduce', True) - rc, _, err = module.run_command("%s --force %s %s" % (vgreduce_cmd, vg, devs_to_remove_string)) + rc, dummy, err = module.run_command("%s --force %s %s" % (vgreduce_cmd, vg, devs_to_remove_string)) if rc == 0: changed = True else: diff --git a/plugins/modules/system/lvol.py b/plugins/modules/system/lvol.py index 852d0f5cd7..bef515b8ec 100644 --- a/plugins/modules/system/lvol.py +++ b/plugins/modules/system/lvol.py @@ -447,7 +447,7 @@ def main(): cmd = "%s %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, yesopt, size_opt, size, size_unit, opts, vg, thinpool) else: cmd = "%s %s %s -n %s -%s %s%s %s %s %s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, pvs) - rc, _, err = module.run_command(cmd) + rc, dummy, err = module.run_command(cmd) if rc == 0: changed = True else: @@ -458,7 +458,7 @@ def main(): if not force: module.fail_json(msg="Sorry, no removal of logical volume %s without force=yes." % (this_lv['name'])) lvremove_cmd = module.get_bin_path("lvremove", required=True) - rc, _, err = module.run_command("%s %s --force %s/%s" % (lvremove_cmd, test_opt, vg, this_lv['name'])) + rc, dummy, err = module.run_command("%s %s --force %s/%s" % (lvremove_cmd, test_opt, vg, this_lv['name'])) if rc == 0: module.exit_json(changed=True) else: @@ -548,14 +548,14 @@ def main(): if this_lv is not None: if active: lvchange_cmd = module.get_bin_path("lvchange", required=True) - rc, _, err = module.run_command("%s -ay %s/%s" % (lvchange_cmd, vg, this_lv['name'])) + rc, dummy, err = module.run_command("%s -ay %s/%s" % (lvchange_cmd, vg, this_lv['name'])) if rc == 0: module.exit_json(changed=((not this_lv['active']) or changed), vg=vg, lv=this_lv['name'], size=this_lv['size']) else: module.fail_json(msg="Failed to activate logical volume %s" % (lv), rc=rc, err=err) else: lvchange_cmd = module.get_bin_path("lvchange", required=True) - rc, _, err = module.run_command("%s -an %s/%s" % (lvchange_cmd, vg, this_lv['name'])) + rc, dummy, err = module.run_command("%s -an %s/%s" % (lvchange_cmd, vg, this_lv['name'])) if rc == 0: module.exit_json(changed=(this_lv['active'] or changed), vg=vg, lv=this_lv['name'], size=this_lv['size']) else: diff --git a/plugins/modules/system/parted.py b/plugins/modules/system/parted.py index daf68c298a..bbb8c1408b 100644 --- a/plugins/modules/system/parted.py +++ b/plugins/modules/system/parted.py @@ -488,7 +488,7 @@ def check_parted_label(device): global parted_exec # Check the version - parted_major, parted_minor, _ = parted_version() + parted_major, parted_minor, dummy = parted_version() if (parted_major == 3 and parted_minor >= 1) or parted_major > 3: return False diff --git a/plugins/modules/system/timezone.py b/plugins/modules/system/timezone.py index 18f8bd418b..3cb7601441 100644 --- a/plugins/modules/system/timezone.py +++ b/plugins/modules/system/timezone.py @@ -117,7 +117,7 @@ class Timezone(object): # running in the global zone where changing the timezone has no effect. zonename_cmd = module.get_bin_path('zonename') if zonename_cmd is not None: - (rc, stdout, _) = module.run_command(zonename_cmd) + (rc, stdout, dummy) = module.run_command(zonename_cmd) if rc == 0 and stdout.strip() == 'global': module.fail_json(msg='Adjusting timezone is not supported in Global Zone') @@ -734,7 +734,7 @@ class BSDTimezone(Timezone): # Strategy 3: # (If /etc/localtime is not symlinked) # Check all files in /usr/share/zoneinfo and return first non-link match. - for dname, _, fnames in sorted(os.walk(zoneinfo_dir)): + for dname, dummy, fnames in sorted(os.walk(zoneinfo_dir)): for fname in sorted(fnames): zoneinfo_file = os.path.join(dname, fname) if not os.path.islink(zoneinfo_file) and filecmp.cmp(zoneinfo_file, localtime_file): diff --git a/plugins/modules/web_infrastructure/rundeck_acl_policy.py b/plugins/modules/web_infrastructure/rundeck_acl_policy.py index 1caa159b1a..8c2043d22c 100644 --- a/plugins/modules/web_infrastructure/rundeck_acl_policy.py +++ b/plugins/modules/web_infrastructure/rundeck_acl_policy.py @@ -173,9 +173,9 @@ class RundeckACLManager: if self.module.check_mode: self.module.exit_json(changed=True, before={}, after=self.module.params["policy"]) - _, info = self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"], - method="POST", - data={"contents": self.module.params["policy"]}) + dummy, info = self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"], + method="POST", + data={"contents": self.module.params["policy"]}) if info["status"] == 201: self.module.exit_json(changed=True, before={}, after=self.get_acl()) @@ -194,9 +194,9 @@ class RundeckACLManager: if self.module.check_mode: self.module.exit_json(changed=True, before=facts, after=facts) - _, info = self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"], - method="PUT", - data={"contents": self.module.params["policy"]}) + dummy, info = self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"], + method="PUT", + data={"contents": self.module.params["policy"]}) if info["status"] == 200: self.module.exit_json(changed=True, before=facts, after=self.get_acl()) diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 72c5682525..99c8ff9b9c 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -1,5 +1,3 @@ -plugins/callback/hipchat.py pylint:blacklisted-name -plugins/connection/lxc.py pylint:blacklisted-name plugins/module_utils/cloud.py pylint:bad-option-value # a pylint test that is disabled was modified over time plugins/module_utils/compat/ipaddress.py no-assert plugins/module_utils/compat/ipaddress.py no-unicode-literals @@ -10,7 +8,6 @@ plugins/modules/cloud/linode/linode.py validate-modules:parameter-list-no-elemen plugins/modules/cloud/linode/linode.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/linode/linode.py validate-modules:undocumented-parameter plugins/modules/cloud/linode/linode_v4.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/lxc/lxc_container.py pylint:blacklisted-name plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:invalid-ansiblemodule-schema plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen @@ -178,7 +175,6 @@ plugins/modules/monitoring/logstash_plugin.py validate-modules:invalid-ansiblemo plugins/modules/monitoring/sensu/sensu_check.py validate-modules:parameter-list-no-elements plugins/modules/monitoring/sensu/sensu_client.py validate-modules:parameter-list-no-elements plugins/modules/monitoring/sensu/sensu_handler.py validate-modules:parameter-list-no-elements -plugins/modules/monitoring/statusio_maintenance.py pylint:blacklisted-name plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:parameter-type-not-in-doc # This triggers when a parameter is undocumented plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:doc-missing-type @@ -291,21 +287,10 @@ plugins/modules/storage/purestorage/purefa_facts.py validate-modules:parameter-l plugins/modules/storage/purestorage/purefa_facts.py validate-modules:return-syntax-error plugins/modules/storage/purestorage/purefb_facts.py validate-modules:parameter-list-no-elements plugins/modules/storage/purestorage/purefb_facts.py validate-modules:return-syntax-error -plugins/modules/system/alternatives.py pylint:blacklisted-name -plugins/modules/system/beadm.py pylint:blacklisted-name -plugins/modules/system/cronvar.py pylint:blacklisted-name -plugins/modules/system/dconf.py pylint:blacklisted-name -plugins/modules/system/filesystem.py pylint:blacklisted-name -plugins/modules/system/gconftool2.py pylint:blacklisted-name plugins/modules/system/gconftool2.py validate-modules:parameter-state-invalid-choice -plugins/modules/system/interfaces_file.py pylint:blacklisted-name plugins/modules/system/iptables_state.py validate-modules:undocumented-parameter -plugins/modules/system/java_cert.py pylint:blacklisted-name plugins/modules/system/launchd.py use-argspec-type-path # False positive -plugins/modules/system/lvg.py pylint:blacklisted-name -plugins/modules/system/lvol.py pylint:blacklisted-name plugins/modules/system/osx_defaults.py validate-modules:parameter-state-invalid-choice -plugins/modules/system/parted.py pylint:blacklisted-name plugins/modules/system/parted.py validate-modules:parameter-state-invalid-choice plugins/modules/system/puppet.py use-argspec-type-path plugins/modules/system/puppet.py validate-modules:doc-default-does-not-match-spec # show_diff is not documented @@ -316,11 +301,9 @@ plugins/modules/system/runit.py validate-modules:doc-default-does-not-match-spec plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc plugins/modules/system/runit.py validate-modules:undocumented-parameter plugins/modules/system/ssh_config.py use-argspec-type-path # Required since module uses other methods to specify path -plugins/modules/system/timezone.py pylint:blacklisted-name plugins/modules/system/xfconf.py validate-modules:parameter-state-invalid-choice plugins/modules/system/xfconf.py validate-modules:return-syntax-error plugins/modules/web_infrastructure/jenkins_plugin.py use-argspec-type-path -plugins/modules/web_infrastructure/rundeck_acl_policy.py pylint:blacklisted-name tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.6 # django generated code tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.7 # django generated code tests/utils/shippable/check_matrix.py replace-urlopen diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 37b0d6f239..bf06b98030 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -1,5 +1,3 @@ -plugins/callback/hipchat.py pylint:blacklisted-name -plugins/connection/lxc.py pylint:blacklisted-name plugins/module_utils/compat/ipaddress.py no-assert plugins/module_utils/compat/ipaddress.py no-unicode-literals plugins/module_utils/_mount.py future-import-boilerplate @@ -9,7 +7,6 @@ plugins/modules/cloud/linode/linode.py validate-modules:parameter-list-no-elemen plugins/modules/cloud/linode/linode.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/linode/linode.py validate-modules:undocumented-parameter plugins/modules/cloud/linode/linode_v4.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/lxc/lxc_container.py pylint:blacklisted-name plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:invalid-ansiblemodule-schema plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen @@ -177,7 +174,6 @@ plugins/modules/monitoring/logstash_plugin.py validate-modules:invalid-ansiblemo plugins/modules/monitoring/sensu/sensu_check.py validate-modules:parameter-list-no-elements plugins/modules/monitoring/sensu/sensu_client.py validate-modules:parameter-list-no-elements plugins/modules/monitoring/sensu/sensu_handler.py validate-modules:parameter-list-no-elements -plugins/modules/monitoring/statusio_maintenance.py pylint:blacklisted-name plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:parameter-type-not-in-doc # This triggers when a parameter is undocumented plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:doc-missing-type @@ -290,21 +286,10 @@ plugins/modules/storage/purestorage/purefa_facts.py validate-modules:parameter-l plugins/modules/storage/purestorage/purefa_facts.py validate-modules:return-syntax-error plugins/modules/storage/purestorage/purefb_facts.py validate-modules:parameter-list-no-elements plugins/modules/storage/purestorage/purefb_facts.py validate-modules:return-syntax-error -plugins/modules/system/alternatives.py pylint:blacklisted-name -plugins/modules/system/beadm.py pylint:blacklisted-name -plugins/modules/system/cronvar.py pylint:blacklisted-name -plugins/modules/system/dconf.py pylint:blacklisted-name -plugins/modules/system/filesystem.py pylint:blacklisted-name -plugins/modules/system/gconftool2.py pylint:blacklisted-name plugins/modules/system/gconftool2.py validate-modules:parameter-state-invalid-choice -plugins/modules/system/interfaces_file.py pylint:blacklisted-name plugins/modules/system/iptables_state.py validate-modules:undocumented-parameter -plugins/modules/system/java_cert.py pylint:blacklisted-name plugins/modules/system/launchd.py use-argspec-type-path # False positive -plugins/modules/system/lvg.py pylint:blacklisted-name -plugins/modules/system/lvol.py pylint:blacklisted-name plugins/modules/system/osx_defaults.py validate-modules:parameter-state-invalid-choice -plugins/modules/system/parted.py pylint:blacklisted-name plugins/modules/system/parted.py validate-modules:parameter-state-invalid-choice plugins/modules/system/puppet.py use-argspec-type-path plugins/modules/system/puppet.py validate-modules:doc-default-does-not-match-spec # show_diff is not documented @@ -315,11 +300,9 @@ plugins/modules/system/runit.py validate-modules:doc-default-does-not-match-spec plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc plugins/modules/system/runit.py validate-modules:undocumented-parameter plugins/modules/system/ssh_config.py use-argspec-type-path # Required since module uses other methods to specify path -plugins/modules/system/timezone.py pylint:blacklisted-name plugins/modules/system/xfconf.py validate-modules:parameter-state-invalid-choice plugins/modules/system/xfconf.py validate-modules:return-syntax-error plugins/modules/web_infrastructure/jenkins_plugin.py use-argspec-type-path -plugins/modules/web_infrastructure/rundeck_acl_policy.py pylint:blacklisted-name tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.6 # django generated code tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.7 # django generated code tests/utils/shippable/check_matrix.py replace-urlopen diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 72a51e8e0a..269526cb2e 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -1,5 +1,3 @@ -plugins/callback/hipchat.py pylint:blacklisted-name -plugins/connection/lxc.py pylint:blacklisted-name plugins/module_utils/cloud.py pylint:bad-option-value # a pylint test that is disabled was modified over time plugins/module_utils/compat/ipaddress.py no-assert plugins/module_utils/compat/ipaddress.py no-unicode-literals @@ -7,7 +5,6 @@ plugins/module_utils/_mount.py future-import-boilerplate plugins/module_utils/_mount.py metaclass-boilerplate plugins/modules/cloud/linode/linode.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/linode/linode.py validate-modules:undocumented-parameter -plugins/modules/cloud/lxc/lxc_container.py pylint:blacklisted-name plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/cloud/misc/helm.py validate-modules:deprecation-mismatch @@ -179,7 +176,6 @@ plugins/modules/files/iso_extract.py validate-modules:doc-default-does-not-match plugins/modules/identity/opendj/opendj_backendprop.py validate-modules:doc-missing-type plugins/modules/identity/opendj/opendj_backendprop.py validate-modules:parameter-type-not-in-doc plugins/modules/monitoring/icinga2_host.py validate-modules:undocumented-parameter -plugins/modules/monitoring/statusio_maintenance.py pylint:blacklisted-name plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:deprecation-mismatch plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:invalid-documentation plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:parameter-type-not-in-doc @@ -284,19 +280,8 @@ plugins/modules/storage/purestorage/purefa_facts.py validate-modules:return-synt plugins/modules/storage/purestorage/purefb_facts.py validate-modules:deprecation-mismatch plugins/modules/storage/purestorage/purefb_facts.py validate-modules:invalid-documentation plugins/modules/storage/purestorage/purefb_facts.py validate-modules:return-syntax-error -plugins/modules/system/alternatives.py pylint:blacklisted-name -plugins/modules/system/beadm.py pylint:blacklisted-name -plugins/modules/system/cronvar.py pylint:blacklisted-name -plugins/modules/system/dconf.py pylint:blacklisted-name -plugins/modules/system/filesystem.py pylint:blacklisted-name -plugins/modules/system/gconftool2.py pylint:blacklisted-name -plugins/modules/system/interfaces_file.py pylint:blacklisted-name plugins/modules/system/iptables_state.py validate-modules:undocumented-parameter -plugins/modules/system/java_cert.py pylint:blacklisted-name plugins/modules/system/launchd.py use-argspec-type-path # False positive -plugins/modules/system/lvg.py pylint:blacklisted-name -plugins/modules/system/lvol.py pylint:blacklisted-name -plugins/modules/system/parted.py pylint:blacklisted-name plugins/modules/system/puppet.py use-argspec-type-path plugins/modules/system/puppet.py validate-modules:parameter-invalid plugins/modules/system/puppet.py validate-modules:parameter-type-not-in-doc @@ -305,12 +290,10 @@ plugins/modules/system/runit.py validate-modules:doc-default-does-not-match-spec plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc plugins/modules/system/runit.py validate-modules:undocumented-parameter plugins/modules/system/ssh_config.py use-argspec-type-path # Required since module uses other methods to specify path -plugins/modules/system/timezone.py pylint:blacklisted-name plugins/modules/system/xfconf.py validate-modules:return-syntax-error plugins/modules/web_infrastructure/jenkins_plugin.py use-argspec-type-path plugins/modules/web_infrastructure/nginx_status_facts.py validate-modules:deprecation-mismatch plugins/modules/web_infrastructure/nginx_status_facts.py validate-modules:invalid-documentation -plugins/modules/web_infrastructure/rundeck_acl_policy.py pylint:blacklisted-name tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.6 # django generated code tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.7 # django generated code tests/utils/shippable/check_matrix.py replace-urlopen From 3778eac1bab4eb223ecb2478945aae60f55a89db Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 16 Feb 2021 21:27:24 +1300 Subject: [PATCH 0053/3093] Tidy up validations + bug fixes + deprecations (#1830) * fixed validation-modules for plugins/modules/database/misc/elasticsearch_plugin.py * fixed validation-modules for plugins/modules/database/misc/kibana_plugin.py * fixed validation-modules for plugins/modules/database/misc/riak.py * fixed validation-modules for plugins/modules/database/vertica/vertica_info.py * fixed validation-modules for plugins/modules/database/vertica/vertica_role.py * fixed validation-modules for plugins/modules/database/vertica/vertica_schema.py * fixed validation-modules for plugins/modules/database/vertica/vertica_user.py * fixed validation-modules for plugins/modules/storage/ibm/ibm_sa_domain.py * fixed validation-modules for plugins/modules/storage/ibm/ibm_sa_host_ports.py * fixed validation-modules for plugins/modules/storage/ibm/ibm_sa_host.py * fixed validation-modules for plugins/modules/storage/ibm/ibm_sa_pool.py * fixed validation-modules for plugins/modules/storage/ibm/ibm_sa_vol.py * fixed validation-modules for plugins/modules/storage/ibm/ibm_sa_vol_map.py * fixed validation-modules for plugins/modules/storage/ibm/ibm_sa_host_ports.py * fixed validation-modules for plugins/modules/system/runit.py * fixed validation-modules for plugins/modules/source_control/bzr.py * fixed validation-modules for plugins/modules/source_control/hg.py * fixed validation-modules for plugins/modules/storage/emc/emc_vnx_sg_member.py * fixed validation-modules for plugins/modules/identity/opendj/opendj_backendprop.py * fixed validation-modules for plugins/modules/files/iso_extract.py * fixed validation-modules for plugins/modules/monitoring/logstash_plugin.py * fixed validation-modules for plugins/modules/database/aerospike/aerospike_migrations.py * Tidy up a number of sanity checks for some modules * added changelog fragment * Some parameters in vertica_* had their aliases documented as the name, and sometimes vice-versa as well * Adjustments per PR * Rolled back sanity ignores for runit * Update changelogs/fragments/1830-valmod_docmissingtype_batch1.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../1830-valmod_docmissingtype_batch1.yml | 7 +++++ .../aerospike/aerospike_migrations.py | 1 - .../database/misc/elasticsearch_plugin.py | 12 ++++++- .../modules/database/misc/kibana_plugin.py | 9 +++++- plugins/modules/database/misc/riak.py | 13 ++++++-- .../database/vertica/vertica_configuration.py | 4 +-- .../modules/database/vertica/vertica_info.py | 7 ++++- .../modules/database/vertica/vertica_role.py | 15 +++++++-- .../database/vertica/vertica_schema.py | 23 ++++++++++---- .../modules/database/vertica/vertica_user.py | 30 ++++++++++++------ plugins/modules/files/iso_extract.py | 9 ++---- .../identity/opendj/opendj_backendprop.py | 10 ++++++ plugins/modules/monitoring/logstash_plugin.py | 8 ++--- plugins/modules/source_control/bzr.py | 4 +++ plugins/modules/source_control/hg.py | 4 +++ .../modules/storage/emc/emc_vnx_sg_member.py | 3 ++ plugins/modules/storage/ibm/ibm_sa_domain.py | 12 +++++++ plugins/modules/storage/ibm/ibm_sa_host.py | 6 ++++ .../modules/storage/ibm/ibm_sa_host_ports.py | 6 +++- plugins/modules/storage/ibm/ibm_sa_pool.py | 6 ++++ plugins/modules/storage/ibm/ibm_sa_vol.py | 4 +++ plugins/modules/storage/ibm/ibm_sa_vol_map.py | 6 ++++ plugins/modules/system/runit.py | 27 ++-------------- tests/sanity/ignore-2.10.txt | 31 ------------------- tests/sanity/ignore-2.11.txt | 31 ------------------- tests/sanity/ignore-2.9.txt | 28 ----------------- 26 files changed, 164 insertions(+), 152 deletions(-) create mode 100644 changelogs/fragments/1830-valmod_docmissingtype_batch1.yml diff --git a/changelogs/fragments/1830-valmod_docmissingtype_batch1.yml b/changelogs/fragments/1830-valmod_docmissingtype_batch1.yml new file mode 100644 index 0000000000..83a27f7e77 --- /dev/null +++ b/changelogs/fragments/1830-valmod_docmissingtype_batch1.yml @@ -0,0 +1,7 @@ +bugfixes: + - kibana_plugin - ``state`` parameter choices must use ``list()`` in python3 (https://github.com/ansible-collections/community.general/pull/1830). + - elasticsearch_plugin - ``state`` parameter choices must use ``list()`` in python3 (https://github.com/ansible-collections/community.general/pull/1830). + - riak - parameters ``wait_for_handoffs`` and ``wait_for_ring`` are ``int`` but the default value was ``false`` (https://github.com/ansible-collections/community.general/pull/1830). + - logstash_plugin - wrapped ``dict.keys()`` with ``list`` for use in ``choices`` setting (https://github.com/ansible-collections/community.general/pull/1830). + - iso_extract - use proper alias deprecation mechanism for ``thirsty`` alias of ``force`` (https://github.com/ansible-collections/community.general/pull/1830). + - runit - removed unused code, and passing command as ``list`` instead of ``str`` to ``run_command()`` (https://github.com/ansible-collections/community.general/pull/1830). diff --git a/plugins/modules/database/aerospike/aerospike_migrations.py b/plugins/modules/database/aerospike/aerospike_migrations.py index bf6ac60e8f..95eda4775c 100644 --- a/plugins/modules/database/aerospike/aerospike_migrations.py +++ b/plugins/modules/database/aerospike/aerospike_migrations.py @@ -115,7 +115,6 @@ EXAMPLES = ''' local_only: False # example playbook: ---- - name: Upgrade aerospike hosts: all become: true diff --git a/plugins/modules/database/misc/elasticsearch_plugin.py b/plugins/modules/database/misc/elasticsearch_plugin.py index 27a67406a8..bc7df931b6 100644 --- a/plugins/modules/database/misc/elasticsearch_plugin.py +++ b/plugins/modules/database/misc/elasticsearch_plugin.py @@ -22,11 +22,13 @@ options: description: - Name of the plugin to install. required: True + type: str state: description: - Desired state of a plugin. choices: ["present", "absent"] default: present + type: str src: description: - Optionally set the source location to retrieve the plugin from. This can be a file:// @@ -38,16 +40,19 @@ options: effect. - For ES 1.x use url. required: False + type: str url: description: - Set exact URL to download the plugin from (Only works for ES 1.x). - For ES 2.x and higher, use src. required: False + type: str timeout: description: - "Timeout setting: 30s, 1m, 1h..." - Only valid for Elasticsearch < 5.0. This option is ignored for Elasticsearch > 5.0. default: 1m + type: str force: description: - "Force batch mode when installing plugins. This is only necessary if a plugin requires additional permissions and console detection fails." @@ -57,20 +62,25 @@ options: description: - Location of the plugin binary. If this file is not found, the default plugin binaries will be used. - The default changed in Ansible 2.4 to None. + type: path plugin_dir: description: - Your configured plugin directory specified in Elasticsearch default: /usr/share/elasticsearch/plugins/ + type: path proxy_host: description: - Proxy host to use during plugin installation + type: str proxy_port: description: - Proxy port to use during plugin installation + type: str version: description: - Version of the plugin to be installed. If plugin exists with previous version, it will NOT be updated + type: str ''' EXAMPLES = ''' @@ -241,7 +251,7 @@ def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True), - state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()), + state=dict(default="present", choices=list(PACKAGE_STATE_MAP.keys())), src=dict(default=None), url=dict(default=None), timeout=dict(default="1m"), diff --git a/plugins/modules/database/misc/kibana_plugin.py b/plugins/modules/database/misc/kibana_plugin.py index e84d8a6bf0..33bc86229b 100644 --- a/plugins/modules/database/misc/kibana_plugin.py +++ b/plugins/modules/database/misc/kibana_plugin.py @@ -22,31 +22,38 @@ options: description: - Name of the plugin to install. required: True + type: str state: description: - Desired state of a plugin. choices: ["present", "absent"] default: present + type: str url: description: - Set exact URL to download the plugin from. - For local file, prefix its absolute path with file:// + type: str timeout: description: - "Timeout setting: 30s, 1m, 1h etc." default: 1m + type: str plugin_bin: description: - Location of the Kibana binary. default: /opt/kibana/bin/kibana + type: path plugin_dir: description: - Your configured plugin directory specified in Kibana. default: /opt/kibana/installedPlugins/ + type: path version: description: - Version of the plugin to be installed. - If plugin exists with previous version, plugin will NOT be updated unless C(force) is set to yes. + type: str force: description: - Delete and re-install the plugin. Can be useful for plugins update. @@ -209,7 +216,7 @@ def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True), - state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()), + state=dict(default="present", choices=list(PACKAGE_STATE_MAP.keys())), url=dict(default=None), timeout=dict(default="1m"), plugin_bin=dict(default="/opt/kibana/bin/kibana", type="path"), diff --git a/plugins/modules/database/misc/riak.py b/plugins/modules/database/misc/riak.py index 848a5e3f16..4ee7b5b674 100644 --- a/plugins/modules/database/misc/riak.py +++ b/plugins/modules/database/misc/riak.py @@ -23,28 +23,37 @@ options: description: - The command you would like to perform against the cluster. choices: ['ping', 'kv_test', 'join', 'plan', 'commit'] + type: str config_dir: description: - The path to the riak configuration directory default: /etc/riak + type: path http_conn: description: - The ip address and port that is listening for Riak HTTP queries default: 127.0.0.1:8098 + type: str target_node: description: - The target node for certain operations (join, ping) default: riak@127.0.0.1 + type: str wait_for_handoffs: description: - Number of seconds to wait for handoffs to complete. + type: int + default: 0 wait_for_ring: description: - Number of seconds to wait for all nodes to agree on the ring. + type: int + default: 0 wait_for_service: description: - Waits for a riak service to come online before continuing. choices: ['kv'] + type: str validate_certs: description: - If C(no), SSL certificates will not be validated. This should only be used @@ -93,8 +102,8 @@ def main(): config_dir=dict(default='/etc/riak', type='path'), http_conn=dict(required=False, default='127.0.0.1:8098'), target_node=dict(default='riak@127.0.0.1', required=False), - wait_for_handoffs=dict(default=False, type='int'), - wait_for_ring=dict(default=False, type='int'), + wait_for_handoffs=dict(default=0, type='int'), + wait_for_ring=dict(default=0, type='int'), wait_for_service=dict( required=False, default=None, choices=['kv']), validate_certs=dict(default=True, type='bool')) diff --git a/plugins/modules/database/vertica/vertica_configuration.py b/plugins/modules/database/vertica/vertica_configuration.py index 3d0788e67f..1d67a831d9 100644 --- a/plugins/modules/database/vertica/vertica_configuration.py +++ b/plugins/modules/database/vertica/vertica_configuration.py @@ -14,11 +14,11 @@ short_description: Updates Vertica configuration parameters. description: - Updates Vertica configuration parameters. options: - name: + parameter: description: - Name of the parameter to update. required: true - aliases: [parameter] + aliases: [name] type: str value: description: diff --git a/plugins/modules/database/vertica/vertica_info.py b/plugins/modules/database/vertica/vertica_info.py index a5741719b7..ace130b89d 100644 --- a/plugins/modules/database/vertica/vertica_info.py +++ b/plugins/modules/database/vertica/vertica_info.py @@ -21,25 +21,30 @@ options: description: - Name of the cluster running the schema. default: localhost + type: str port: description: Database port to connect to. default: 5433 + type: str db: description: - Name of the database running the schema. + type: str login_user: description: - The username used to authenticate with. default: dbadmin + type: str login_password: description: - The password used to authenticate with. + type: str notes: - The default authentication assumes that you are either logging in as or sudo'ing to the C(dbadmin) account on the host. - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure - that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. + that C(unixODBC) and C(pyodbc) are installed on the host and properly configured. - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) diff --git a/plugins/modules/database/vertica/vertica_role.py b/plugins/modules/database/vertica/vertica_role.py index bba411d03f..fc80907cc6 100644 --- a/plugins/modules/database/vertica/vertica_role.py +++ b/plugins/modules/database/vertica/vertica_role.py @@ -15,37 +15,46 @@ short_description: Adds or removes Vertica database roles and assigns roles to t description: - Adds or removes Vertica database role and, optionally, assign other roles. options: - name: + role: description: - Name of the role to add or remove. required: true + type: str + aliases: ['name'] assigned_roles: description: - Comma separated list of roles to assign to the role. aliases: ['assigned_role'] + type: str state: description: - Whether to create C(present), drop C(absent) or lock C(locked) a role. choices: ['present', 'absent'] default: present + type: str db: description: - Name of the Vertica database. + type: str cluster: description: - Name of the Vertica cluster. default: localhost + type: str port: description: - Vertica cluster port to connect to. default: 5433 + type: str login_user: description: - The username used to authenticate with. default: dbadmin + type: str login_password: description: - The password used to authenticate with. + type: str notes: - The default authentication assumes that you are either logging in as or sudo'ing to the C(dbadmin) account on the host. @@ -168,11 +177,11 @@ def main(): role=dict(required=True, aliases=['name']), assigned_roles=dict(default=None, aliases=['assigned_role']), state=dict(default='present', choices=['absent', 'present']), - db=dict(default=None), + db=dict(), cluster=dict(default='localhost'), port=dict(default='5433'), login_user=dict(default='dbadmin'), - login_password=dict(default=None, no_log=True), + login_password=dict(no_log=True), ), supports_check_mode=True) if not pyodbc_found: diff --git a/plugins/modules/database/vertica/vertica_schema.py b/plugins/modules/database/vertica/vertica_schema.py index 424de564dc..0c85e3e091 100644 --- a/plugins/modules/database/vertica/vertica_schema.py +++ b/plugins/modules/database/vertica/vertica_schema.py @@ -20,44 +20,55 @@ description: will fail and only remove roles created for the schema if they have no dependencies. options: - name: + schema: description: - Name of the schema to add or remove. required: true + aliases: ['name'] + type: str usage_roles: description: - Comma separated list of roles to create and grant usage access to the schema. aliases: ['usage_role'] + type: str create_roles: description: - Comma separated list of roles to create and grant usage and create access to the schema. aliases: ['create_role'] + type: str owner: description: - Name of the user to set as owner of the schema. + type: str state: description: - Whether to create C(present), or drop C(absent) a schema. default: present choices: ['present', 'absent'] + type: str db: description: - Name of the Vertica database. + type: str cluster: description: - Name of the Vertica cluster. default: localhost + type: str port: description: - Vertica cluster port to connect to. default: 5433 + type: str login_user: description: - The username used to authenticate with. default: dbadmin + type: str login_password: description: - The password used to authenticate with. + type: str notes: - The default authentication assumes that you are either logging in as or sudo'ing to the C(dbadmin) account on the host. @@ -230,15 +241,15 @@ def main(): module = AnsibleModule( argument_spec=dict( schema=dict(required=True, aliases=['name']), - usage_roles=dict(default=None, aliases=['usage_role']), - create_roles=dict(default=None, aliases=['create_role']), - owner=dict(default=None), + usage_roles=dict(aliases=['usage_role']), + create_roles=dict(aliases=['create_role']), + owner=dict(), state=dict(default='present', choices=['absent', 'present']), - db=dict(default=None), + db=dict(), cluster=dict(default='localhost'), port=dict(default='5433'), login_user=dict(default='dbadmin'), - login_password=dict(default=None, no_log=True), + login_password=dict(no_log=True), ), supports_check_mode=True) if not pyodbc_found: diff --git a/plugins/modules/database/vertica/vertica_user.py b/plugins/modules/database/vertica/vertica_user.py index f550f190c7..791ef5fef9 100644 --- a/plugins/modules/database/vertica/vertica_user.py +++ b/plugins/modules/database/vertica/vertica_user.py @@ -17,22 +17,27 @@ description: - In such a situation, if the module tries to remove the user it will fail and only remove roles granted to the user. options: - name: + user: description: - Name of the user to add or remove. required: true + type: str + aliases: ['name'] profile: description: - Sets the user's profile. + type: str resource_pool: description: - Sets the user's resource pool. + type: str password: description: - The user's password encrypted by the MD5 algorithm. - The password must be generated with the format C("md5" + md5[password + username]), resulting in a total of 35 characters. An easy way to do this is by querying the Vertica database with select 'md5'||md5(''). + type: str expired: description: - Sets the user's password expiration. @@ -46,29 +51,36 @@ options: description: - Comma separated list of roles to assign to the user. aliases: ['role'] + type: str state: description: - Whether to create C(present), drop C(absent) or lock C(locked) a user. choices: ['present', 'absent', 'locked'] default: present + type: str db: description: - Name of the Vertica database. + type: str cluster: description: - Name of the Vertica cluster. default: localhost + type: str port: description: - Vertica cluster port to connect to. default: 5433 + type: str login_user: description: - The username used to authenticate with. default: dbadmin + type: str login_password: description: - The password used to authenticate with. + type: str notes: - The default authentication assumes that you are either logging in as or sudo'ing to the C(dbadmin) account on the host. @@ -282,18 +294,18 @@ def main(): module = AnsibleModule( argument_spec=dict( user=dict(required=True, aliases=['name']), - profile=dict(default=None), - resource_pool=dict(default=None), - password=dict(default=None, no_log=True), - expired=dict(type='bool', default=None), - ldap=dict(type='bool', default=None), - roles=dict(default=None, aliases=['role']), + profile=dict(), + resource_pool=dict(), + password=dict(no_log=True), + expired=dict(type='bool'), + ldap=dict(type='bool'), + roles=dict(aliases=['role']), state=dict(default='present', choices=['absent', 'present', 'locked']), - db=dict(default=None), + db=dict(), cluster=dict(default='localhost'), port=dict(default='5433'), login_user=dict(default='dbadmin'), - login_password=dict(default=None, no_log=True), + login_password=dict(no_log=True), ), supports_check_mode=True) if not pyodbc_found: diff --git a/plugins/modules/files/iso_extract.py b/plugins/modules/files/iso_extract.py index 0c73ac96ee..b84db39756 100644 --- a/plugins/modules/files/iso_extract.py +++ b/plugins/modules/files/iso_extract.py @@ -59,8 +59,8 @@ options: executable: description: - The path to the C(7z) executable to use for extracting files from the ISO. + - If not provided, it will assume the value C(7z). type: path - default: '7z' notes: - Only the file checksum (content) is taken into account when extracting files from the ISO image. If C(force=no), only checks the presence of the file. @@ -101,7 +101,8 @@ def main(): image=dict(type='path', required=True, aliases=['path', 'src']), dest=dict(type='path', required=True), files=dict(type='list', elements='str', required=True), - force=dict(type='bool', default=True, aliases=['thirsty']), + force=dict(type='bool', default=True, aliases=['thirsty'], + deprecated_aliases=[dict(name='thirsty', version='3.0.0', collection_name='community.general')]), executable=dict(type='path'), # No default on purpose ), supports_check_mode=True, @@ -112,10 +113,6 @@ def main(): force = module.params['force'] executable = module.params['executable'] - if module.params.get('thirsty'): - module.deprecate('The alias "thirsty" has been deprecated and will be removed, use "force" instead', - version='3.0.0', collection_name='community.general') # was Ansible 2.13 - result = dict( changed=False, dest=dest, diff --git a/plugins/modules/identity/opendj/opendj_backendprop.py b/plugins/modules/identity/opendj/opendj_backendprop.py index aa477e42d7..be118a505d 100644 --- a/plugins/modules/identity/opendj/opendj_backendprop.py +++ b/plugins/modules/identity/opendj/opendj_backendprop.py @@ -22,46 +22,56 @@ options: - The path to the bin directory of OpenDJ. required: false default: /opt/opendj/bin + type: path hostname: description: - The hostname of the OpenDJ server. required: true + type: str port: description: - The Admin port on which the OpenDJ instance is available. required: true + type: str username: description: - The username to connect to. required: false default: cn=Directory Manager + type: str password: description: - The password for the cn=Directory Manager user. - Either password or passwordfile is needed. required: false + type: str passwordfile: description: - Location to the password file which holds the password for the cn=Directory Manager user. - Either password or passwordfile is needed. required: false + type: path backend: description: - The name of the backend on which the property needs to be updated. required: true + type: str name: description: - The configuration setting to update. required: true + type: str value: description: - The value for the configuration item. required: true + type: str state: description: - If configuration needs to be added/updated required: false default: "present" + type: str ''' EXAMPLES = ''' diff --git a/plugins/modules/monitoring/logstash_plugin.py b/plugins/modules/monitoring/logstash_plugin.py index 4a45c04a77..5d1cd488ab 100644 --- a/plugins/modules/monitoring/logstash_plugin.py +++ b/plugins/modules/monitoring/logstash_plugin.py @@ -138,11 +138,11 @@ def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True), - state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()), + state=dict(default="present", choices=list(PACKAGE_STATE_MAP.keys())), plugin_bin=dict(default="/usr/share/logstash/bin/logstash-plugin", type="path"), - proxy_host=dict(default=None), - proxy_port=dict(default=None), - version=dict(default=None) + proxy_host=dict(), + proxy_port=dict(), + version=dict() ), supports_check_mode=True ) diff --git a/plugins/modules/source_control/bzr.py b/plugins/modules/source_control/bzr.py index 7af3f27993..a4ce4bc075 100644 --- a/plugins/modules/source_control/bzr.py +++ b/plugins/modules/source_control/bzr.py @@ -22,15 +22,18 @@ options: - SSH or HTTP protocol address of the parent branch. aliases: [ parent ] required: yes + type: str dest: description: - Absolute path of where the branch should be cloned to. required: yes + type: path version: description: - What version of the branch to clone. This can be the bzr revno or revid. default: head + type: str force: description: - If C(yes), any modified files in the working @@ -42,6 +45,7 @@ options: description: - Path to bzr executable to use. If not supplied, the normal mechanism for resolving binary paths will be used. + type: str ''' EXAMPLES = ''' diff --git a/plugins/modules/source_control/hg.py b/plugins/modules/source_control/hg.py index 5c084d3ad1..810b918bd6 100644 --- a/plugins/modules/source_control/hg.py +++ b/plugins/modules/source_control/hg.py @@ -21,15 +21,18 @@ options: - The repository address. required: yes aliases: [ name ] + type: str dest: description: - Absolute path of where the repository should be cloned to. This parameter is required, unless clone and update are set to no + type: path revision: description: - Equivalent C(-r) option in hg command which could be the changeset, revision number, branch name or even tag. aliases: [ version ] + type: str force: description: - Discards uncommitted changes. Runs C(hg update -C). Prior to @@ -55,6 +58,7 @@ options: description: - Path to hg executable to use. If not supplied, the normal mechanism for resolving binary paths will be used. + type: str notes: - This module does not support push capability. See U(https://github.com/ansible/ansible/issues/31156). - "If the task seems to be hanging, first verify remote host is in C(known_hosts). diff --git a/plugins/modules/storage/emc/emc_vnx_sg_member.py b/plugins/modules/storage/emc/emc_vnx_sg_member.py index dfac03ef40..b5b68d4ef4 100644 --- a/plugins/modules/storage/emc/emc_vnx_sg_member.py +++ b/plugins/modules/storage/emc/emc_vnx_sg_member.py @@ -29,10 +29,12 @@ options: description: - Name of the Storage group to manage. required: true + type: str lunid: description: - Lun id to be added. required: true + type: int state: description: - Indicates the desired lunid state. @@ -40,6 +42,7 @@ options: - C(absent) ensures specified lunid is absent from Storage Group. default: present choices: [ "present", "absent"] + type: str author: diff --git a/plugins/modules/storage/ibm/ibm_sa_domain.py b/plugins/modules/storage/ibm/ibm_sa_domain.py index 29690497c3..9c5e6c50c8 100644 --- a/plugins/modules/storage/ibm/ibm_sa_domain.py +++ b/plugins/modules/storage/ibm/ibm_sa_domain.py @@ -23,51 +23,63 @@ options: description: - Name of the domain to be managed. required: true + type: str state: description: - The desired state of the domain. default: "present" choices: [ "present", "absent" ] + type: str ldap_id: description: - ldap id to add to the domain. required: false + type: str size: description: - Size of the domain. required: false + type: str hard_capacity: description: - Hard capacity of the domain. required: false + type: str soft_capacity: description: - Soft capacity of the domain. required: false + type: str max_cgs: description: - Number of max cgs. required: false + type: str max_dms: description: - Number of max dms. required: false + type: str max_mirrors: description: - Number of max_mirrors. required: false + type: str max_pools: description: - Number of max_pools. required: false + type: str max_volumes: description: - Number of max_volumes. required: false + type: str perf_class: description: - Add the domain to a performance class. required: false + type: str extends_documentation_fragment: - community.general.ibm_storage diff --git a/plugins/modules/storage/ibm/ibm_sa_host.py b/plugins/modules/storage/ibm/ibm_sa_host.py index 5ce12992bc..27a7287f8a 100644 --- a/plugins/modules/storage/ibm/ibm_sa_host.py +++ b/plugins/modules/storage/ibm/ibm_sa_host.py @@ -22,15 +22,18 @@ options: description: - Host name. required: true + type: str state: description: - Host state. default: "present" choices: [ "present", "absent" ] + type: str cluster: description: - The name of the cluster to include the host. required: false + type: str domain: description: - The domains the cluster will be attached to. @@ -38,15 +41,18 @@ options: separate domain names with commas. To include all existing domains, use an asterisk ("*"). required: false + type: str iscsi_chap_name: description: - The host's CHAP name identifier required: false + type: str iscsi_chap_secret: description: - The password of the initiator used to authenticate to the system when CHAP is enable required: false + type: str extends_documentation_fragment: - community.general.ibm_storage diff --git a/plugins/modules/storage/ibm/ibm_sa_host_ports.py b/plugins/modules/storage/ibm/ibm_sa_host_ports.py index 981bc553ff..32daa9f3c7 100644 --- a/plugins/modules/storage/ibm/ibm_sa_host_ports.py +++ b/plugins/modules/storage/ibm/ibm_sa_host_ports.py @@ -23,28 +23,32 @@ options: description: - Host name. required: true + type: str state: description: - Host ports state. default: "present" choices: [ "present", "absent" ] + type: str iscsi_name: description: - iSCSI initiator name. required: false + type: str fcaddress: description: - Fiber channel address. required: false + type: str num_of_visible_targets: description: - Number of visible targets. required: false + type: str extends_documentation_fragment: - community.general.ibm_storage - author: - Tzur Eliyahu (@tzure) ''' diff --git a/plugins/modules/storage/ibm/ibm_sa_pool.py b/plugins/modules/storage/ibm/ibm_sa_pool.py index 812904eb77..67c963ace1 100644 --- a/plugins/modules/storage/ibm/ibm_sa_pool.py +++ b/plugins/modules/storage/ibm/ibm_sa_pool.py @@ -22,27 +22,33 @@ options: description: - Pool name. required: true + type: str state: description: - Pool state. default: "present" choices: [ "present", "absent" ] + type: str size: description: - Pool size in GB required: false + type: str snapshot_size: description: - Pool snapshot size in GB required: false + type: str domain: description: - Adds the pool to the specified domain. required: false + type: str perf_class: description: - Assigns a perf_class to the pool. required: false + type: str extends_documentation_fragment: - community.general.ibm_storage diff --git a/plugins/modules/storage/ibm/ibm_sa_vol.py b/plugins/modules/storage/ibm/ibm_sa_vol.py index bf578ceef7..7820d26828 100644 --- a/plugins/modules/storage/ibm/ibm_sa_vol.py +++ b/plugins/modules/storage/ibm/ibm_sa_vol.py @@ -22,19 +22,23 @@ options: description: - Volume name. required: true + type: str pool: description: - Volume pool. required: false + type: str state: description: - Volume state. default: "present" choices: [ "present", "absent" ] + type: str size: description: - Volume size. required: false + type: str extends_documentation_fragment: - community.general.ibm_storage diff --git a/plugins/modules/storage/ibm/ibm_sa_vol_map.py b/plugins/modules/storage/ibm/ibm_sa_vol_map.py index f1f5a807d3..b449ba8de4 100644 --- a/plugins/modules/storage/ibm/ibm_sa_vol_map.py +++ b/plugins/modules/storage/ibm/ibm_sa_vol_map.py @@ -24,29 +24,35 @@ options: description: - Volume name. required: true + type: str state: default: "present" choices: [ "present", "absent" ] description: - When the state is present the volume is mapped. When the state is absent, the volume is meant to be unmapped. + type: str cluster: description: - Maps the volume to a cluster. required: false + type: str host: description: - Maps the volume to a host. required: false + type: str lun: description: - The LUN identifier. required: false + type: str override: description: - Overrides the existing volume mapping. required: false + type: str extends_documentation_fragment: - community.general.ibm_storage diff --git a/plugins/modules/system/runit.py b/plugins/modules/system/runit.py index b80ed8cb24..f8f3ada3da 100644 --- a/plugins/modules/system/runit.py +++ b/plugins/modules/system/runit.py @@ -87,34 +87,12 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native -def _load_dist_subclass(cls, *args, **kwargs): # @FIXME remove unused function? - ''' - Used for derivative implementations - ''' - subclass = None - - distro = kwargs['module'].params['distro'] - - # get the most specific superclass for this platform - if distro is not None: - for sc in cls.__subclasses__(): - if sc.distro is not None and sc.distro == distro: - subclass = sc - if subclass is None: - subclass = cls - - return super(cls, subclass).__new__(subclass) - - class Sv(object): """ Main class that handles daemontools, can be subclassed and overridden in case we want to use a 'derivative' like encore, s6, etc """ - # def __new__(cls, *args, **kwargs): - # return _load_dist_subclass(cls, args, kwargs) - def __init__(self, module): self.extra_paths = [] self.report_vars = ['state', 'enabled', 'svc_full', 'src_full', 'pid', 'duration', 'full_state'] @@ -220,10 +198,10 @@ class Sv(object): def execute_command(self, cmd): try: - (rc, out, err) = self.module.run_command(' '.join(cmd)) + (rc, out, err) = self.module.run_command(cmd) except Exception as e: self.module.fail_json(msg="failed to execute: %s" % to_native(e)) - return (rc, out, err) + return rc, out, err def report(self): self.get_status() @@ -253,7 +231,6 @@ def main(): sv = Sv(module) changed = False - orig_state = sv.report() if enabled is not None and enabled != sv.enabled: changed = True diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 99c8ff9b9c..ca6abc7413 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -146,32 +146,11 @@ plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-s plugins/modules/clustering/etcd3.py validate-modules:parameter-type-not-in-doc plugins/modules/clustering/znode.py validate-modules:doc-missing-type plugins/modules/clustering/znode.py validate-modules:parameter-type-not-in-doc -plugins/modules/database/aerospike/aerospike_migrations.py yamllint:unparsable-with-libyaml -plugins/modules/database/misc/elasticsearch_plugin.py validate-modules:doc-missing-type -plugins/modules/database/misc/elasticsearch_plugin.py validate-modules:invalid-ansiblemodule-schema -plugins/modules/database/misc/elasticsearch_plugin.py validate-modules:parameter-type-not-in-doc -plugins/modules/database/misc/kibana_plugin.py validate-modules:doc-missing-type -plugins/modules/database/misc/kibana_plugin.py validate-modules:invalid-ansiblemodule-schema -plugins/modules/database/misc/kibana_plugin.py validate-modules:parameter-type-not-in-doc -plugins/modules/database/misc/riak.py validate-modules:doc-default-does-not-match-spec -plugins/modules/database/misc/riak.py validate-modules:doc-missing-type -plugins/modules/database/misc/riak.py validate-modules:parameter-type-not-in-doc -plugins/modules/database/vertica/vertica_info.py validate-modules:doc-missing-type -plugins/modules/database/vertica/vertica_role.py validate-modules:doc-missing-type -plugins/modules/database/vertica/vertica_role.py validate-modules:undocumented-parameter -plugins/modules/database/vertica/vertica_schema.py validate-modules:doc-missing-type -plugins/modules/database/vertica/vertica_schema.py validate-modules:undocumented-parameter -plugins/modules/database/vertica/vertica_user.py validate-modules:doc-missing-type -plugins/modules/database/vertica/vertica_user.py validate-modules:undocumented-parameter -plugins/modules/files/iso_extract.py validate-modules:doc-default-does-not-match-spec -plugins/modules/identity/opendj/opendj_backendprop.py validate-modules:doc-missing-type -plugins/modules/identity/opendj/opendj_backendprop.py validate-modules:parameter-type-not-in-doc plugins/modules/monitoring/bigpanda.py validate-modules:invalid-argument-name plugins/modules/monitoring/datadog/datadog_event.py validate-modules:parameter-list-no-elements plugins/modules/monitoring/datadog/datadog_monitor.py validate-modules:invalid-argument-name plugins/modules/monitoring/datadog/datadog_monitor.py validate-modules:parameter-list-no-elements plugins/modules/monitoring/icinga2_host.py validate-modules:undocumented-parameter -plugins/modules/monitoring/logstash_plugin.py validate-modules:invalid-ansiblemodule-schema plugins/modules/monitoring/sensu/sensu_check.py validate-modules:parameter-list-no-elements plugins/modules/monitoring/sensu/sensu_client.py validate-modules:parameter-list-no-elements plugins/modules/monitoring/sensu/sensu_handler.py validate-modules:parameter-list-no-elements @@ -255,7 +234,6 @@ plugins/modules/remote_management/stacki/stacki_host.py validate-modules:doc-def plugins/modules/remote_management/stacki/stacki_host.py validate-modules:no-default-for-required-parameter plugins/modules/remote_management/stacki/stacki_host.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/stacki/stacki_host.py validate-modules:undocumented-parameter -plugins/modules/source_control/bzr.py validate-modules:parameter-type-not-in-doc plugins/modules/source_control/git_config.py validate-modules:doc-missing-type plugins/modules/source_control/git_config.py validate-modules:parameter-type-not-in-doc plugins/modules/source_control/github/github_deploy_key.py validate-modules:doc-missing-type @@ -268,19 +246,10 @@ plugins/modules/source_control/github/github_release.py validate-modules:doc-mis plugins/modules/source_control/github/github_release.py validate-modules:parameter-type-not-in-doc plugins/modules/source_control/github/github_webhook.py validate-modules:parameter-type-not-in-doc plugins/modules/source_control/github/github_webhook_info.py validate-modules:parameter-type-not-in-doc -plugins/modules/source_control/hg.py validate-modules:parameter-type-not-in-doc -plugins/modules/storage/emc/emc_vnx_sg_member.py validate-modules:doc-missing-type -plugins/modules/storage/emc/emc_vnx_sg_member.py validate-modules:parameter-type-not-in-doc plugins/modules/storage/glusterfs/gluster_heal_info.py validate-modules:parameter-type-not-in-doc plugins/modules/storage/glusterfs/gluster_peer.py validate-modules:parameter-list-no-elements plugins/modules/storage/glusterfs/gluster_volume.py validate-modules:parameter-list-no-elements plugins/modules/storage/glusterfs/gluster_volume.py validate-modules:parameter-type-not-in-doc -plugins/modules/storage/ibm/ibm_sa_domain.py validate-modules:doc-missing-type -plugins/modules/storage/ibm/ibm_sa_host.py validate-modules:doc-missing-type -plugins/modules/storage/ibm/ibm_sa_host_ports.py validate-modules:doc-missing-type -plugins/modules/storage/ibm/ibm_sa_pool.py validate-modules:doc-missing-type -plugins/modules/storage/ibm/ibm_sa_vol.py validate-modules:doc-missing-type -plugins/modules/storage/ibm/ibm_sa_vol_map.py validate-modules:doc-missing-type plugins/modules/storage/netapp/na_ontap_gather_facts.py validate-modules:parameter-state-invalid-choice plugins/modules/storage/purestorage/purefa_facts.py validate-modules:doc-required-mismatch plugins/modules/storage/purestorage/purefa_facts.py validate-modules:parameter-list-no-elements diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index bf06b98030..f34db6b314 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -145,32 +145,11 @@ plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-s plugins/modules/clustering/etcd3.py validate-modules:parameter-type-not-in-doc plugins/modules/clustering/znode.py validate-modules:doc-missing-type plugins/modules/clustering/znode.py validate-modules:parameter-type-not-in-doc -plugins/modules/database/aerospike/aerospike_migrations.py yamllint:unparsable-with-libyaml -plugins/modules/database/misc/elasticsearch_plugin.py validate-modules:doc-missing-type -plugins/modules/database/misc/elasticsearch_plugin.py validate-modules:invalid-ansiblemodule-schema -plugins/modules/database/misc/elasticsearch_plugin.py validate-modules:parameter-type-not-in-doc -plugins/modules/database/misc/kibana_plugin.py validate-modules:doc-missing-type -plugins/modules/database/misc/kibana_plugin.py validate-modules:invalid-ansiblemodule-schema -plugins/modules/database/misc/kibana_plugin.py validate-modules:parameter-type-not-in-doc -plugins/modules/database/misc/riak.py validate-modules:doc-default-does-not-match-spec -plugins/modules/database/misc/riak.py validate-modules:doc-missing-type -plugins/modules/database/misc/riak.py validate-modules:parameter-type-not-in-doc -plugins/modules/database/vertica/vertica_info.py validate-modules:doc-missing-type -plugins/modules/database/vertica/vertica_role.py validate-modules:doc-missing-type -plugins/modules/database/vertica/vertica_role.py validate-modules:undocumented-parameter -plugins/modules/database/vertica/vertica_schema.py validate-modules:doc-missing-type -plugins/modules/database/vertica/vertica_schema.py validate-modules:undocumented-parameter -plugins/modules/database/vertica/vertica_user.py validate-modules:doc-missing-type -plugins/modules/database/vertica/vertica_user.py validate-modules:undocumented-parameter -plugins/modules/files/iso_extract.py validate-modules:doc-default-does-not-match-spec -plugins/modules/identity/opendj/opendj_backendprop.py validate-modules:doc-missing-type -plugins/modules/identity/opendj/opendj_backendprop.py validate-modules:parameter-type-not-in-doc plugins/modules/monitoring/bigpanda.py validate-modules:invalid-argument-name plugins/modules/monitoring/datadog/datadog_event.py validate-modules:parameter-list-no-elements plugins/modules/monitoring/datadog/datadog_monitor.py validate-modules:invalid-argument-name plugins/modules/monitoring/datadog/datadog_monitor.py validate-modules:parameter-list-no-elements plugins/modules/monitoring/icinga2_host.py validate-modules:undocumented-parameter -plugins/modules/monitoring/logstash_plugin.py validate-modules:invalid-ansiblemodule-schema plugins/modules/monitoring/sensu/sensu_check.py validate-modules:parameter-list-no-elements plugins/modules/monitoring/sensu/sensu_client.py validate-modules:parameter-list-no-elements plugins/modules/monitoring/sensu/sensu_handler.py validate-modules:parameter-list-no-elements @@ -254,7 +233,6 @@ plugins/modules/remote_management/stacki/stacki_host.py validate-modules:doc-def plugins/modules/remote_management/stacki/stacki_host.py validate-modules:no-default-for-required-parameter plugins/modules/remote_management/stacki/stacki_host.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/stacki/stacki_host.py validate-modules:undocumented-parameter -plugins/modules/source_control/bzr.py validate-modules:parameter-type-not-in-doc plugins/modules/source_control/git_config.py validate-modules:doc-missing-type plugins/modules/source_control/git_config.py validate-modules:parameter-type-not-in-doc plugins/modules/source_control/github/github_deploy_key.py validate-modules:doc-missing-type @@ -267,19 +245,10 @@ plugins/modules/source_control/github/github_release.py validate-modules:doc-mis plugins/modules/source_control/github/github_release.py validate-modules:parameter-type-not-in-doc plugins/modules/source_control/github/github_webhook.py validate-modules:parameter-type-not-in-doc plugins/modules/source_control/github/github_webhook_info.py validate-modules:parameter-type-not-in-doc -plugins/modules/source_control/hg.py validate-modules:parameter-type-not-in-doc -plugins/modules/storage/emc/emc_vnx_sg_member.py validate-modules:doc-missing-type -plugins/modules/storage/emc/emc_vnx_sg_member.py validate-modules:parameter-type-not-in-doc plugins/modules/storage/glusterfs/gluster_heal_info.py validate-modules:parameter-type-not-in-doc plugins/modules/storage/glusterfs/gluster_peer.py validate-modules:parameter-list-no-elements plugins/modules/storage/glusterfs/gluster_volume.py validate-modules:parameter-list-no-elements plugins/modules/storage/glusterfs/gluster_volume.py validate-modules:parameter-type-not-in-doc -plugins/modules/storage/ibm/ibm_sa_domain.py validate-modules:doc-missing-type -plugins/modules/storage/ibm/ibm_sa_host.py validate-modules:doc-missing-type -plugins/modules/storage/ibm/ibm_sa_host_ports.py validate-modules:doc-missing-type -plugins/modules/storage/ibm/ibm_sa_pool.py validate-modules:doc-missing-type -plugins/modules/storage/ibm/ibm_sa_vol.py validate-modules:doc-missing-type -plugins/modules/storage/ibm/ibm_sa_vol_map.py validate-modules:doc-missing-type plugins/modules/storage/netapp/na_ontap_gather_facts.py validate-modules:parameter-state-invalid-choice plugins/modules/storage/purestorage/purefa_facts.py validate-modules:doc-required-mismatch plugins/modules/storage/purestorage/purefa_facts.py validate-modules:parameter-list-no-elements diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 269526cb2e..6ad3960bf1 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -157,24 +157,6 @@ plugins/modules/clustering/consul/consul_kv.py validate-modules:parameter-type-n plugins/modules/clustering/etcd3.py validate-modules:parameter-type-not-in-doc plugins/modules/clustering/znode.py validate-modules:doc-missing-type plugins/modules/clustering/znode.py validate-modules:parameter-type-not-in-doc -plugins/modules/database/misc/elasticsearch_plugin.py validate-modules:doc-missing-type -plugins/modules/database/misc/elasticsearch_plugin.py validate-modules:parameter-type-not-in-doc -plugins/modules/database/misc/kibana_plugin.py validate-modules:doc-missing-type -plugins/modules/database/misc/kibana_plugin.py validate-modules:parameter-type-not-in-doc -plugins/modules/database/misc/riak.py validate-modules:doc-default-does-not-match-spec -plugins/modules/database/misc/riak.py validate-modules:doc-missing-type -plugins/modules/database/misc/riak.py validate-modules:parameter-type-not-in-doc -plugins/modules/database/vertica/vertica_configuration.py validate-modules:doc-missing-type -plugins/modules/database/vertica/vertica_info.py validate-modules:doc-missing-type -plugins/modules/database/vertica/vertica_role.py validate-modules:doc-missing-type -plugins/modules/database/vertica/vertica_role.py validate-modules:undocumented-parameter -plugins/modules/database/vertica/vertica_schema.py validate-modules:doc-missing-type -plugins/modules/database/vertica/vertica_schema.py validate-modules:undocumented-parameter -plugins/modules/database/vertica/vertica_user.py validate-modules:doc-missing-type -plugins/modules/database/vertica/vertica_user.py validate-modules:undocumented-parameter -plugins/modules/files/iso_extract.py validate-modules:doc-default-does-not-match-spec -plugins/modules/identity/opendj/opendj_backendprop.py validate-modules:doc-missing-type -plugins/modules/identity/opendj/opendj_backendprop.py validate-modules:parameter-type-not-in-doc plugins/modules/monitoring/icinga2_host.py validate-modules:undocumented-parameter plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:deprecation-mismatch plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:invalid-documentation @@ -242,7 +224,6 @@ plugins/modules/remote_management/stacki/stacki_host.py validate-modules:doc-def plugins/modules/remote_management/stacki/stacki_host.py validate-modules:no-default-for-required-parameter plugins/modules/remote_management/stacki/stacki_host.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/stacki/stacki_host.py validate-modules:undocumented-parameter -plugins/modules/source_control/bzr.py validate-modules:parameter-type-not-in-doc plugins/modules/source_control/git_config.py validate-modules:doc-missing-type plugins/modules/source_control/git_config.py validate-modules:parameter-type-not-in-doc plugins/modules/source_control/github/github_deploy_key.py validate-modules:doc-missing-type @@ -255,9 +236,6 @@ plugins/modules/source_control/github/github_release.py validate-modules:doc-mis plugins/modules/source_control/github/github_release.py validate-modules:parameter-type-not-in-doc plugins/modules/source_control/github/github_webhook.py validate-modules:parameter-type-not-in-doc plugins/modules/source_control/github/github_webhook_info.py validate-modules:parameter-type-not-in-doc -plugins/modules/source_control/hg.py validate-modules:parameter-type-not-in-doc -plugins/modules/storage/emc/emc_vnx_sg_member.py validate-modules:doc-missing-type -plugins/modules/storage/emc/emc_vnx_sg_member.py validate-modules:parameter-type-not-in-doc plugins/modules/storage/glusterfs/gluster_heal_info.py validate-modules:deprecation-mismatch plugins/modules/storage/glusterfs/gluster_heal_info.py validate-modules:invalid-documentation plugins/modules/storage/glusterfs/gluster_heal_info.py validate-modules:parameter-type-not-in-doc @@ -266,12 +244,6 @@ plugins/modules/storage/glusterfs/gluster_peer.py validate-modules:invalid-docum plugins/modules/storage/glusterfs/gluster_volume.py validate-modules:deprecation-mismatch plugins/modules/storage/glusterfs/gluster_volume.py validate-modules:invalid-documentation plugins/modules/storage/glusterfs/gluster_volume.py validate-modules:parameter-type-not-in-doc -plugins/modules/storage/ibm/ibm_sa_domain.py validate-modules:doc-missing-type -plugins/modules/storage/ibm/ibm_sa_host.py validate-modules:doc-missing-type -plugins/modules/storage/ibm/ibm_sa_host_ports.py validate-modules:doc-missing-type -plugins/modules/storage/ibm/ibm_sa_pool.py validate-modules:doc-missing-type -plugins/modules/storage/ibm/ibm_sa_vol.py validate-modules:doc-missing-type -plugins/modules/storage/ibm/ibm_sa_vol_map.py validate-modules:doc-missing-type plugins/modules/storage/netapp/na_ontap_gather_facts.py validate-modules:deprecation-mismatch plugins/modules/storage/netapp/na_ontap_gather_facts.py validate-modules:invalid-documentation plugins/modules/storage/purestorage/purefa_facts.py validate-modules:deprecation-mismatch From 616543868920964fb8b285a391f83ea9a6b47aca Mon Sep 17 00:00:00 2001 From: Mark Mercado Date: Tue, 16 Feb 2021 05:46:39 -0500 Subject: [PATCH 0054/3093] StatsD Module (#1793) * Pushing my WIP * Update DOCUMENTATION * Update EXAMPLES * More friendly name * Finish up the counter and gauge logic * Cleanup DOCUMENTATION and add metric_type * Apply autopep8 * Fixup the exits * Stubbing out unit tests * Whitespace * Whitespace * Removing unused modules * Remove unused modules * Might have have a prefix * Rearrange imported modules * Cleanup the if/elif blob * Require python >= 2.7 * Update DOCUMENTATION Co-authored-by: Felix Fontein * Update DOCUMENTATION Co-authored-by: Felix Fontein * Add import guarding on statsd * Add missing future import * Include missing_required_lib * Fixing sanity tests * Fixing delta default and choices * Formatting * Close tcp connection * Refactoring and unit tests * Fix pep8 sanity tests * Putting requirements.txt back to main * Apply suggestions from code review Co-authored-by: Mark Mercado Co-authored-by: Felix Fontein --- .github/BOTMETA.yml | 2 + plugins/modules/monitoring/statsd.py | 170 ++++++++++++++++++ plugins/modules/statsd.py | 1 + .../plugins/modules/monitoring/test_statsd.py | 101 +++++++++++ 4 files changed, 274 insertions(+) create mode 100644 plugins/modules/monitoring/statsd.py create mode 120000 plugins/modules/statsd.py create mode 100644 tests/unit/plugins/modules/monitoring/test_statsd.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 00a27cd837..bbd52b544e 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -418,6 +418,8 @@ files: maintainers: orgito $modules/monitoring/stackdriver.py: maintainers: bwhaley + $modules/monitoring/statsd.py: + maintainers: mamercad $modules/monitoring/statusio_maintenance.py: maintainers: bhcopeland $modules/monitoring/uptimerobot.py: diff --git a/plugins/modules/monitoring/statsd.py b/plugins/modules/monitoring/statsd.py new file mode 100644 index 0000000000..b07851641b --- /dev/null +++ b/plugins/modules/monitoring/statsd.py @@ -0,0 +1,170 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: statsd +short_description: Send metrics to StatsD +version_added: 2.1.0 +description: + - The C(statsd) module sends metrics to StatsD. + - For more information, see U(https://statsd-metrics.readthedocs.io/en/latest/). + - Supported metric types are C(counter) and C(gauge). + Currently unupported metric types are C(timer), C(set), and C(gaugedelta). +author: "Mark Mercado (@mamercad)" +requirements: + - statsd +options: + state: + type: str + description: + - State of the check, only C(present) makes sense. + choices: ["present"] + default: present + host: + type: str + default: localhost + description: + - StatsD host (hostname or IP) to send metrics to. + port: + type: int + default: 8125 + description: + - The port on C(host) which StatsD is listening on. + protocol: + type: str + default: udp + choices: ["udp", "tcp"] + description: + - The transport protocol to send metrics over. + timeout: + type: float + default: 1.0 + description: + - Sender timeout, only applicable if C(protocol) is C(tcp). + metric: + type: str + required: true + description: + - The name of the metric. + metric_type: + type: str + required: true + choices: ["counter", "gauge"] + description: + - The type of metric. + metric_prefix: + type: str + description: + - The prefix to add to the metric. + value: + type: int + required: true + description: + - The value of the metric. + delta: + type: bool + default: false + description: + - If the metric is of type C(gauge), change the value by C(delta). +''' + +EXAMPLES = ''' +- name: Increment the metric my_counter by 1 + community.general.statsd: + host: localhost + port: 9125 + protocol: tcp + metric: my_counter + metric_type: counter + value: 1 + +- name: Set the gauge my_gauge to 7 + community.general.statsd: + host: localhost + port: 9125 + protocol: tcp + metric: my_gauge + metric_type: gauge + value: 7 +''' + + +from ansible.module_utils.basic import (AnsibleModule, missing_required_lib) + +try: + from statsd import StatsClient, TCPStatsClient + HAS_STATSD = True +except ImportError: + HAS_STATSD = False + + +def udp_statsd_client(**client_params): + return StatsClient(**client_params) + + +def tcp_statsd_client(**client_params): + return TCPStatsClient(**client_params) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + state=dict(type='str', default='present', choices=['present']), + host=dict(type='str', default='localhost'), + port=dict(type='int', default=8125), + protocol=dict(type='str', default='udp', choices=['udp', 'tcp']), + timeout=dict(type='float', default=1.0), + metric=dict(type='str', required=True), + metric_type=dict(type='str', required=True, choices=['counter', 'gauge']), + metric_prefix=dict(type='str', default=''), + value=dict(type='int', required=True), + delta=dict(type='bool', default=False), + ), + supports_check_mode=False + ) + + if not HAS_STATSD: + module.fail_json(msg=missing_required_lib('statsd')) + + host = module.params.get('host') + port = module.params.get('port') + protocol = module.params.get('protocol') + timeout = module.params.get('timeout') + metric = module.params.get('metric') + metric_type = module.params.get('metric_type') + metric_prefix = module.params.get('metric_prefix') + value = module.params.get('value') + delta = module.params.get('delta') + + if protocol == 'udp': + client = udp_statsd_client(host=host, port=port, prefix=metric_prefix, maxudpsize=512, ipv6=False) + elif protocol == 'tcp': + client = tcp_statsd_client(host=host, port=port, timeout=timeout, prefix=metric_prefix, ipv6=False) + + metric_name = '%s/%s' % (metric_prefix, metric) if metric_prefix else metric + metric_display_value = '%s (delta=%s)' % (value, delta) if metric_type == 'gauge' else value + + try: + if metric_type == 'counter': + client.incr(metric, value) + elif metric_type == 'gauge': + client.gauge(metric, value, delta=delta) + + except Exception as exc: + module.fail_json(msg='Failed sending to StatsD %s' % str(exc)) + + finally: + if protocol == 'tcp': + client.close() + + module.exit_json(msg="Sent %s %s -> %s to StatsD" % (metric_type, metric_name, str(metric_display_value)), changed=True) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/statsd.py b/plugins/modules/statsd.py new file mode 120000 index 0000000000..a906f4df1a --- /dev/null +++ b/plugins/modules/statsd.py @@ -0,0 +1 @@ +monitoring/statsd.py \ No newline at end of file diff --git a/tests/unit/plugins/modules/monitoring/test_statsd.py b/tests/unit/plugins/modules/monitoring/test_statsd.py new file mode 100644 index 0000000000..205080e754 --- /dev/null +++ b/tests/unit/plugins/modules/monitoring/test_statsd.py @@ -0,0 +1,101 @@ +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible_collections.community.general.plugins.modules.monitoring import statsd +from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock +from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args + + +class FakeStatsD(MagicMock): + + def incr(self, *args, **kwargs): + pass + + def gauge(self, *args, **kwargs): + pass + + def close(self, *args, **kwargs): + pass + + +class TestStatsDModule(ModuleTestCase): + + def setUp(self): + super(TestStatsDModule, self).setUp() + statsd.HAS_STATSD = True + self.module = statsd + + def tearDown(self): + super(TestStatsDModule, self).tearDown() + + def patch_udp_statsd_client(self, **kwargs): + return patch('ansible_collections.community.general.plugins.modules.monitoring.statsd.udp_statsd_client', autospec=True, **kwargs) + + def patch_tcp_statsd_client(self, **kwargs): + return patch('ansible_collections.community.general.plugins.modules.monitoring.statsd.tcp_statsd_client', autospec=True, **kwargs) + + def test_udp_without_parameters(self): + """Test udp without parameters""" + with self.patch_udp_statsd_client(side_effect=FakeStatsD) as fake_statsd: + with self.assertRaises(AnsibleFailJson) as result: + set_module_args({}) + self.module.main() + + def test_tcp_without_parameters(self): + """Test tcp without parameters""" + with self.patch_tcp_statsd_client(side_effect=FakeStatsD) as fake_statsd: + with self.assertRaises(AnsibleFailJson) as result: + set_module_args({}) + self.module.main() + + def test_udp_with_parameters(self): + """Test udp with parameters""" + with self.patch_udp_statsd_client(side_effect=FakeStatsD) as fake_statsd: + with self.assertRaises(AnsibleExitJson) as result: + set_module_args({ + 'metric': 'my_counter', + 'metric_type': 'counter', + 'value': 1, + }) + self.module.main() + self.assertEqual(result.exception.args[0]['msg'], 'Sent counter my_counter -> 1 to StatsD') + self.assertEqual(result.exception.args[0]['changed'], True) + with self.patch_udp_statsd_client(side_effect=FakeStatsD) as fake_statsd: + with self.assertRaises(AnsibleExitJson) as result: + set_module_args({ + 'metric': 'my_gauge', + 'metric_type': 'gauge', + 'value': 3, + }) + self.module.main() + self.assertEqual(result.exception.args[0]['msg'], 'Sent gauge my_gauge -> 3 (delta=False) to StatsD') + self.assertEqual(result.exception.args[0]['changed'], True) + + def test_tcp_with_parameters(self): + """Test tcp with parameters""" + with self.patch_tcp_statsd_client(side_effect=FakeStatsD) as fake_statsd: + with self.assertRaises(AnsibleExitJson) as result: + set_module_args({ + 'protocol': 'tcp', + 'metric': 'my_counter', + 'metric_type': 'counter', + 'value': 1, + }) + self.module.main() + self.assertEqual(result.exception.args[0]['msg'], 'Sent counter my_counter -> 1 to StatsD') + self.assertEqual(result.exception.args[0]['changed'], True) + with self.patch_tcp_statsd_client(side_effect=FakeStatsD) as fake_statsd: + with self.assertRaises(AnsibleExitJson) as result: + set_module_args({ + 'protocol': 'tcp', + 'metric': 'my_gauge', + 'metric_type': 'gauge', + 'value': 3, + }) + self.module.main() + self.assertEqual(result.exception.args[0]['msg'], 'Sent gauge my_gauge -> 3 (delta=False) to StatsD') + self.assertEqual(result.exception.args[0]['changed'], True) From a1badbb5b28737884e1215b8a7675860e17eae18 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 16 Feb 2021 13:00:53 +0100 Subject: [PATCH 0055/3093] Next release will be 2.2.0. --- galaxy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/galaxy.yml b/galaxy.yml index 20b24b64e9..2440118c6f 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,6 +1,6 @@ namespace: community name: general -version: 2.1.0 +version: 2.2.0 readme: README.md authors: - Ansible (https://github.com/ansible) From e0dd4b240f8e845492e1c9812aee6ef331e3da86 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Wed, 17 Feb 2021 09:45:25 +1300 Subject: [PATCH 0056/3093] runit - deprecate param dist (#1838) * Deprecate param dist * added changelog fragment * added changelog fragment * Removed ignore lines for runit --- changelogs/fragments/1838-runit-deprecate-param-dist.yml | 2 ++ plugins/modules/system/runit.py | 2 +- tests/sanity/ignore-2.10.txt | 2 -- tests/sanity/ignore-2.11.txt | 2 -- tests/sanity/ignore-2.9.txt | 2 -- 5 files changed, 3 insertions(+), 7 deletions(-) create mode 100644 changelogs/fragments/1838-runit-deprecate-param-dist.yml diff --git a/changelogs/fragments/1838-runit-deprecate-param-dist.yml b/changelogs/fragments/1838-runit-deprecate-param-dist.yml new file mode 100644 index 0000000000..5d133c074e --- /dev/null +++ b/changelogs/fragments/1838-runit-deprecate-param-dist.yml @@ -0,0 +1,2 @@ +deprecated_features: + - runit - unused parameter ``dist`` marked for deprecation (https://github.com/ansible-collections/community.general/pull/1830). diff --git a/plugins/modules/system/runit.py b/plugins/modules/system/runit.py index f8f3ada3da..30cd611b29 100644 --- a/plugins/modules/system/runit.py +++ b/plugins/modules/system/runit.py @@ -217,7 +217,7 @@ def main(): name=dict(type='str', required=True), state=dict(type='str', choices=['killed', 'once', 'reloaded', 'restarted', 'started', 'stopped']), enabled=dict(type='bool'), - dist=dict(type='str', default='runit'), # @FIXME unused param? + dist=dict(type='str', removed_in_version='4.0.0', removed_from_collection='community.general'), service_dir=dict(type='str', default='/var/service'), service_src=dict(type='str', default='/etc/sv'), ), diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index ca6abc7413..24e4392fdc 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -266,9 +266,7 @@ plugins/modules/system/puppet.py validate-modules:doc-default-does-not-match-spe plugins/modules/system/puppet.py validate-modules:parameter-invalid plugins/modules/system/puppet.py validate-modules:parameter-type-not-in-doc plugins/modules/system/puppet.py validate-modules:undocumented-parameter -plugins/modules/system/runit.py validate-modules:doc-default-does-not-match-spec plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc -plugins/modules/system/runit.py validate-modules:undocumented-parameter plugins/modules/system/ssh_config.py use-argspec-type-path # Required since module uses other methods to specify path plugins/modules/system/xfconf.py validate-modules:parameter-state-invalid-choice plugins/modules/system/xfconf.py validate-modules:return-syntax-error diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index f34db6b314..1c92d62579 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -265,9 +265,7 @@ plugins/modules/system/puppet.py validate-modules:doc-default-does-not-match-spe plugins/modules/system/puppet.py validate-modules:parameter-invalid plugins/modules/system/puppet.py validate-modules:parameter-type-not-in-doc plugins/modules/system/puppet.py validate-modules:undocumented-parameter -plugins/modules/system/runit.py validate-modules:doc-default-does-not-match-spec plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc -plugins/modules/system/runit.py validate-modules:undocumented-parameter plugins/modules/system/ssh_config.py use-argspec-type-path # Required since module uses other methods to specify path plugins/modules/system/xfconf.py validate-modules:parameter-state-invalid-choice plugins/modules/system/xfconf.py validate-modules:return-syntax-error diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 6ad3960bf1..82aeb58156 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -258,9 +258,7 @@ plugins/modules/system/puppet.py use-argspec-type-path plugins/modules/system/puppet.py validate-modules:parameter-invalid plugins/modules/system/puppet.py validate-modules:parameter-type-not-in-doc plugins/modules/system/puppet.py validate-modules:undocumented-parameter -plugins/modules/system/runit.py validate-modules:doc-default-does-not-match-spec plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc -plugins/modules/system/runit.py validate-modules:undocumented-parameter plugins/modules/system/ssh_config.py use-argspec-type-path # Required since module uses other methods to specify path plugins/modules/system/xfconf.py validate-modules:return-syntax-error plugins/modules/web_infrastructure/jenkins_plugin.py use-argspec-type-path From 5135587c16efbcfa1c22dc62f344e93582adf8a1 Mon Sep 17 00:00:00 2001 From: Tristan Le Guern Date: Wed, 17 Feb 2021 21:25:05 +0100 Subject: [PATCH 0057/3093] proxmox_kvm: fix undefined local variable status (#1847) * proxmox_kvm: undefined local variable status * proxmox_kvm: Add a changelog fragment * Update changelogs/fragments/1847-proxmox-kvm-fix-status.yml Co-authored-by: Felix Fontein * proxmox_kvm: fix the RETURN document * proxmox_kvm: fix name variable when state=current The variable name is not always defined as the module can be called with just a vmid. Before: > "msg": "VM None with vmid = 118 is stopped" After: > "msg": "VM test-instance with vmid = 118 is stopped" Co-authored-by: Felix Fontein --- .../fragments/1847-proxmox-kvm-fix-status.yml | 2 ++ plugins/modules/cloud/misc/proxmox_kvm.py | 36 ++++++++++++------- 2 files changed, 25 insertions(+), 13 deletions(-) create mode 100644 changelogs/fragments/1847-proxmox-kvm-fix-status.yml diff --git a/changelogs/fragments/1847-proxmox-kvm-fix-status.yml b/changelogs/fragments/1847-proxmox-kvm-fix-status.yml new file mode 100644 index 0000000000..0863f1bed2 --- /dev/null +++ b/changelogs/fragments/1847-proxmox-kvm-fix-status.yml @@ -0,0 +1,2 @@ +bugfixes: + - proxmox_kvm - fix undefined local variable ``status`` when the parameter ``state`` is either ``stopped``, ``started``, ``restarted`` or ``absent`` (https://github.com/ansible-collections/community.general/pull/1847). diff --git a/plugins/modules/cloud/misc/proxmox_kvm.py b/plugins/modules/cloud/misc/proxmox_kvm.py index eee698405e..9f7bd58ff6 100644 --- a/plugins/modules/cloud/misc/proxmox_kvm.py +++ b/plugins/modules/cloud/misc/proxmox_kvm.py @@ -734,20 +734,20 @@ EXAMPLES = ''' RETURN = ''' vmid: - description: The VM vmid. - returned: success - type: int - sample: 115 + description: The VM vmid. + returned: success + type: int + sample: 115 status: - description: - - The current virtual machine status. - returned: success - type: dict - sample: '{ - "changed": false, - "msg": "VM kropta with vmid = 110 is running", - "status": "running" - }' + description: The current virtual machine status. + returned: success, not clone, not absent, not update + type: str + sample: running +msg: + description: A short message + returned: always + type: str + sample: "VM kropta with vmid = 110 is running" ''' import re @@ -1297,12 +1297,14 @@ def main(): module.fail_json(vmid=vmid, msg="creation of qemu VM %s with vmid %s failed with exception=%s" % (name, vmid, e)) elif state == 'started': + status = {} try: if -1 == vmid: module.fail_json(msg='VM with name = %s does not exist in cluster' % name) vm = get_vm(proxmox, vmid) if not vm: module.fail_json(vmid=vmid, msg='VM with vmid <%s> does not exist in cluster' % vmid) + status['status'] = vm[0]['status'] if vm[0]['status'] == 'running': module.exit_json(changed=False, vmid=vmid, msg="VM %s is already running" % vmid, **status) @@ -1312,6 +1314,7 @@ def main(): module.fail_json(vmid=vmid, msg="starting of VM %s failed with exception: %s" % (vmid, e), **status) elif state == 'stopped': + status = {} try: if -1 == vmid: module.fail_json(msg='VM with name = %s does not exist in cluster' % name) @@ -1320,6 +1323,7 @@ def main(): if not vm: module.fail_json(vmid=vmid, msg='VM with vmid = %s does not exist in cluster' % vmid) + status['status'] = vm[0]['status'] if vm[0]['status'] == 'stopped': module.exit_json(changed=False, vmid=vmid, msg="VM %s is already stopped" % vmid, **status) @@ -1329,6 +1333,7 @@ def main(): module.fail_json(vmid=vmid, msg="stopping of VM %s failed with exception: %s" % (vmid, e), **status) elif state == 'restarted': + status = {} try: if -1 == vmid: module.fail_json(msg='VM with name = %s does not exist in cluster' % name) @@ -1336,6 +1341,7 @@ def main(): vm = get_vm(proxmox, vmid) if not vm: module.fail_json(vmid=vmid, msg='VM with vmid = %s does not exist in cluster' % vmid) + status['status'] = vm[0]['status'] if vm[0]['status'] == 'stopped': module.exit_json(changed=False, vmid=vmid, msg="VM %s is not running" % vmid, **status) @@ -1345,12 +1351,14 @@ def main(): module.fail_json(vmid=vmid, msg="restarting of VM %s failed with exception: %s" % (vmid, e), **status) elif state == 'absent': + status = {} try: vm = get_vm(proxmox, vmid) if not vm: module.exit_json(changed=False, vmid=vmid) proxmox_node = proxmox.nodes(vm[0]['node']) + status['status'] = vm[0]['status'] if vm[0]['status'] == 'running': if module.params['force']: stop_vm(module, proxmox, vm, True) @@ -1372,6 +1380,8 @@ def main(): vm = get_vm(proxmox, vmid) if not vm: module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid) + if not name: + name = vm[0]['name'] current = proxmox.nodes(vm[0]['node']).qemu(vmid).status.current.get()['status'] status['status'] = current if status: From 682674dd5fb39f89d98830a34d45d3a55bfbf5d6 Mon Sep 17 00:00:00 2001 From: Tristan Le Guern Date: Thu, 18 Feb 2021 11:54:30 +0100 Subject: [PATCH 0058/3093] proxmox_kvm: add integration tests (#1849) --- .../targets/proxmox/tasks/main.yml | 167 ++++++++++++++++++ 1 file changed, 167 insertions(+) diff --git a/tests/integration/targets/proxmox/tasks/main.yml b/tests/integration/targets/proxmox/tasks/main.yml index c615faf516..235e412d98 100644 --- a/tests/integration/targets/proxmox/tasks/main.yml +++ b/tests/integration/targets/proxmox/tasks/main.yml @@ -109,3 +109,170 @@ - results_userid.proxmox_users[0].domain == "{{ domain }}" - results_userid.proxmox_users[0].user == "{{ user }}" - results_userid.proxmox_users[0].userid == "{{ user }}@{{ domain }}" + +- name: VM creation + tags: [ 'create' ] + block: + - name: Create test vm test-instance + proxmox_kvm: + api_host: "{{ api_host }}" + api_user: "{{ user }}@{{ domain }}" + api_password: "{{ api_password | default(omit) }}" + api_token_id: "{{ api_token_id | default(omit) }}" + api_token_secret: "{{ api_token_secret | default(omit) }}" + validate_certs: "{{ validate_certs }}" + node: "{{ node }}" + storage: "{{ storage }}" + vmid: "{{ from_vmid }}" + name: test-instance + clone: 'yes' + state: present + timeout: 500 + register: results_kvm + + - set_fact: + vmid: "{{ results_kvm.msg.split(' ')[-7] }}" + + - assert: + that: + - results_kvm is changed + - results_kvm.vmid == from_vmid + - results_kvm.msg == "VM test-instance with newid {{ vmid }} cloned from vm with vmid {{ from_vmid }}" + + - pause: + seconds: 30 + +- name: VM start + tags: [ 'start' ] + block: + - name: Start test VM + proxmox_kvm: + api_host: "{{ api_host }}" + api_user: "{{ user }}@{{ domain }}" + api_password: "{{ api_password | default(omit) }}" + api_token_id: "{{ api_token_id | default(omit) }}" + api_token_secret: "{{ api_token_secret | default(omit) }}" + validate_certs: "{{ validate_certs }}" + node: "{{ node }}" + vmid: "{{ vmid }}" + state: started + register: results_action_start + + - assert: + that: + - results_action_start is changed + - results_action_start.status == 'stopped' + - results_action_start.vmid == {{ vmid }} + - results_action_start.msg == "VM {{ vmid }} started" + + - pause: + seconds: 90 + + - name: Try to start test VM again + proxmox_kvm: + api_host: "{{ api_host }}" + api_user: "{{ user }}@{{ domain }}" + api_password: "{{ api_password | default(omit) }}" + api_token_id: "{{ api_token_id | default(omit) }}" + api_token_secret: "{{ api_token_secret | default(omit) }}" + validate_certs: "{{ validate_certs }}" + node: "{{ node }}" + vmid: "{{ vmid }}" + state: started + register: results_action_start_again + + - assert: + that: + - results_action_start_again is not changed + - results_action_start_again.status == 'running' + - results_action_start_again.vmid == {{ vmid }} + - results_action_start_again.msg == "VM {{ vmid }} is already running" + + - name: Check current status + proxmox_kvm: + api_host: "{{ api_host }}" + api_user: "{{ user }}@{{ domain }}" + api_password: "{{ api_password | default(omit) }}" + api_token_id: "{{ api_token_id | default(omit) }}" + api_token_secret: "{{ api_token_secret | default(omit) }}" + validate_certs: "{{ validate_certs }}" + node: "{{ node }}" + vmid: "{{ vmid }}" + state: current + register: results_action_current + + - assert: + that: + - results_action_current is not changed + - results_action_current.status == 'running' + - results_action_current.vmid == {{ vmid }} + - results_action_current.msg == "VM test-instance with vmid = {{ vmid }} is running" + +- name: VM stop + tags: [ 'stop' ] + block: + - name: Stop test VM + proxmox_kvm: + api_host: "{{ api_host }}" + api_user: "{{ user }}@{{ domain }}" + api_password: "{{ api_password | default(omit) }}" + api_token_id: "{{ api_token_id | default(omit) }}" + api_token_secret: "{{ api_token_secret | default(omit) }}" + validate_certs: "{{ validate_certs }}" + node: "{{ node }}" + vmid: "{{ vmid }}" + state: stopped + register: results_action_stop + + - assert: + that: + - results_action_stop is changed + - results_action_stop.status == 'running' + - results_action_stop.vmid == {{ vmid }} + - results_action_stop.msg == "VM {{ vmid }} is shutting down" + + - pause: + seconds: 5 + + - name: Check current status again + proxmox_kvm: + api_host: "{{ api_host }}" + api_user: "{{ user }}@{{ domain }}" + api_password: "{{ api_password | default(omit) }}" + api_token_id: "{{ api_token_id | default(omit) }}" + api_token_secret: "{{ api_token_secret | default(omit) }}" + validate_certs: "{{ validate_certs }}" + node: "{{ node }}" + vmid: "{{ vmid }}" + state: current + register: results_action_current + + - assert: + that: + - results_action_current is not changed + - results_action_current.status == 'stopped' + - results_action_current.vmid == {{ vmid }} + - results_action_current.msg == "VM test-instance with vmid = {{ vmid }} is stopped" + +- name: VM destroy + tags: [ 'destroy' ] + block: + - name: Destroy test VM + proxmox_kvm: + api_host: "{{ api_host }}" + api_user: "{{ user }}@{{ domain }}" + api_password: "{{ api_password | default(omit) }}" + api_token_id: "{{ api_token_id | default(omit) }}" + api_token_secret: "{{ api_token_secret | default(omit) }}" + validate_certs: "{{ validate_certs }}" + proxmox_default_behavior: "no_defaults" + node: "{{ node }}" + vmid: "{{ vmid }}" + state: absent + register: results_kvm_destroy + + - assert: + that: + - results_kvm_destroy is changed + - results_kvm_destroy.vmid == {{ vmid }} + - results_kvm_destroy.msg == "VM {{ vmid }} removed" From a44ffdc20d5a845b9373722397f14c79bd3a8270 Mon Sep 17 00:00:00 2001 From: Denise Yu Date: Sat, 20 Feb 2021 03:51:43 -0500 Subject: [PATCH 0059/3093] Update bug_report.yml (#1858) This fixes the bug report template. --- .github/ISSUE_TEMPLATE/bug_report.yml | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 0e17d5b1d9..e7d5579433 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -1,9 +1,9 @@ name: Bug Report about: Create a report to help us improve -inputs: +body: - - type: description + - type: markdown attributes: value: | Verify first that your issue is not already reported on [GitHub](https://github.com/ansible-collections/community.general/issues) @@ -13,14 +13,16 @@ inputs: attributes: label: Summary description: 'Explain the problem briefly below' + validations: required: true - type: dropdown attributes: # FIXME: Once GitHub allows defining the default choice, update this label: Issue Type - choices: + options: - Bug Report + validations: required: true - type: textarea @@ -32,12 +34,12 @@ inputs: label: Component Name description: 'List the component, ie `template`, `mysql_users`' + validations: required: true - type: textarea attributes: label: Ansible Version - required: false description: | Paste verbatim output from `ansible --version` between quotes value: | @@ -47,7 +49,6 @@ inputs: - type: textarea attributes: label: Configuration - required: false description: | If this issue has an example piece of YAML that can help to reproduce this problem, please provide it. This can be a piece of YAML from, e.g., an automation, script, scene or configuration. @@ -60,13 +61,11 @@ inputs: attributes: label: OS / Environment description: 'Provide all relevant information below, e.g. target OS versions, network device firmware, etc' - required: false - type: textarea attributes: label: Steps To Reproduce description: 'Describe exactly how to reproduce the problem, using a minimal test-case' - required: false value: | ```paste below @@ -76,13 +75,11 @@ inputs: label: Expected Results description: | Describe what you expected to happen when running the steps above - required: false - type: textarea attributes: label: Actual Results description: 'Describe what actually happened. If possible run with extra verbosity (`ansible-playbook -vvvv`)' - required: false value: | ```paste below From 57f56b02d850f475de616b63a09cec78efe1bfcd Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 20 Feb 2021 10:20:23 +0100 Subject: [PATCH 0060/3093] Disable flatpack_remote test due to expired key. (#1862) --- tests/integration/targets/flatpak_remote/aliases | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/targets/flatpak_remote/aliases b/tests/integration/targets/flatpak_remote/aliases index 39291d435b..3623baa5c2 100644 --- a/tests/integration/targets/flatpak_remote/aliases +++ b/tests/integration/targets/flatpak_remote/aliases @@ -6,3 +6,4 @@ skip/osx skip/macos skip/rhel needs/root +disabled # FIXME From fdb66d556767b66961503b525a3fa37d1732fb3c Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 22 Feb 2021 05:53:30 +1300 Subject: [PATCH 0061/3093] Fixed imc_rest session logout (#1743) * Fixed imc_rest session logout * Update plugins/modules/remote_management/imc/imc_rest.py Co-authored-by: Felix Fontein * Update changelogs/fragments/1735-imc-sessions.yml Co-authored-by: Felix Fontein * Trying with try/finally Co-authored-by: Felix Fontein --- changelogs/fragments/1735-imc-sessions.yml | 2 + .../modules/remote_management/imc/imc_rest.py | 83 ++++++++++--------- tests/sanity/ignore-2.10.txt | 1 - tests/sanity/ignore-2.11.txt | 1 - tests/sanity/ignore-2.9.txt | 1 - 5 files changed, 47 insertions(+), 41 deletions(-) create mode 100644 changelogs/fragments/1735-imc-sessions.yml diff --git a/changelogs/fragments/1735-imc-sessions.yml b/changelogs/fragments/1735-imc-sessions.yml new file mode 100644 index 0000000000..057393d06c --- /dev/null +++ b/changelogs/fragments/1735-imc-sessions.yml @@ -0,0 +1,2 @@ +bugfixes: + - imc_rest - explicitly logging out instead of registering the call in ```atexit``` (https://github.com/ansible-collections/community.general/issues/1735). diff --git a/plugins/modules/remote_management/imc/imc_rest.py b/plugins/modules/remote_management/imc/imc_rest.py index ca318b4e68..239c76fab3 100644 --- a/plugins/modules/remote_management/imc/imc_rest.py +++ b/plugins/modules/remote_management/imc/imc_rest.py @@ -27,21 +27,25 @@ options: - IP Address or hostname of Cisco IMC, resolvable by Ansible control host. required: true aliases: [ host, ip ] + type: str username: description: - Username used to login to the switch. default: admin aliases: [ user ] + type: str password: description: - The password to use for authentication. default: password + type: str path: description: - Name of the absolute path of the filename that includes the body of the http request being sent to the Cisco IMC REST API. - Parameter C(path) is mutual exclusive with parameter C(content). aliases: [ 'src', 'config_file' ] + type: path content: description: - When used instead of C(path), sets the content of the API requests directly. @@ -49,11 +53,13 @@ options: - You can collate multiple IMC XML fragments and they will be processed sequentially in a single stream, the Cisco IMC output is subsequently merged. - Parameter C(content) is mutual exclusive with parameter C(path). + type: str protocol: description: - Connection protocol to use. default: https choices: [ http, https ] + type: str timeout: description: - The socket level timeout in seconds. @@ -61,6 +67,7 @@ options: If this C(timeout) is reached, the module will fail with a C(Connection failure) indicating that C(The read operation timed out). default: 60 + type: int validate_certs: description: - If C(no), SSL certificates will not be validated. @@ -253,11 +260,11 @@ output: errorDescr="XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed.\n"/> ''' -import atexit import datetime import itertools import os import traceback +from functools import partial LXML_ETREE_IMP_ERR = None try: @@ -317,7 +324,6 @@ def merge(one, two): def main(): - module = AnsibleModule( argument_spec=dict( hostname=dict(type='str', required=True, aliases=['host', 'ip']), @@ -374,53 +380,54 @@ def main(): result.update(imc_response(module, resp.read())) # Store cookie for future requests + cookie = '' try: cookie = result['aaaLogin']['attributes']['outCookie'] except Exception: module.fail_json(msg='Could not find cookie in output', **result) - # If we would not log out properly, we run out of sessions quickly - atexit.register(logout, module, url, cookie, timeout) + try: + # Prepare request data + if content: + rawdata = content + elif file_exists: + with open(path, 'r') as config_object: + rawdata = config_object.read() - # Prepare request data - if content: - rawdata = content - elif file_exists: - with open(path, 'r') as config_object: - rawdata = config_object.read() + # Wrap the XML documents in a element + xmldata = lxml.etree.fromstring('%s' % rawdata.replace('\n', '')) - # Wrap the XML documents in a element - xmldata = lxml.etree.fromstring('%s' % rawdata.replace('\n', '')) + # Handle each XML document separately in the same session + for xmldoc in list(xmldata): + if xmldoc.tag is lxml.etree.Comment: + continue + # Add cookie to XML + xmldoc.set('cookie', cookie) + data = lxml.etree.tostring(xmldoc) - # Handle each XML document separately in the same session - for xmldoc in list(xmldata): - if xmldoc.tag is lxml.etree.Comment: - continue - # Add cookie to XML - xmldoc.set('cookie', cookie) - data = lxml.etree.tostring(xmldoc) + # Perform actual request + resp, info = fetch_url(module, url, data=data, method='POST', timeout=timeout) + if resp is None or info['status'] != 200: + result['elapsed'] = (datetime.datetime.utcnow() - start).seconds + module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % info, **result) - # Perform actual request - resp, info = fetch_url(module, url, data=data, method='POST', timeout=timeout) - if resp is None or info['status'] != 200: - result['elapsed'] = (datetime.datetime.utcnow() - start).seconds - module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % info, **result) + # Merge results with previous results + rawoutput = resp.read() + result = merge(result, imc_response(module, rawoutput, rawinput=data)) + result['response'] = info['msg'] + result['status'] = info['status'] - # Merge results with previous results - rawoutput = resp.read() - result = merge(result, imc_response(module, rawoutput, rawinput=data)) - result['response'] = info['msg'] - result['status'] = info['status'] + # Check for any changes + # NOTE: Unfortunately IMC API always report status as 'modified' + xmloutput = lxml.etree.fromstring(rawoutput) + results = xmloutput.xpath('/configConfMo/outConfig/*/@status') + result['changed'] = ('modified' in results) - # Check for any changes - # NOTE: Unfortunately IMC API always report status as 'modified' - xmloutput = lxml.etree.fromstring(rawoutput) - results = xmloutput.xpath('/configConfMo/outConfig/*/@status') - result['changed'] = ('modified' in results) - - # Report success - result['elapsed'] = (datetime.datetime.utcnow() - start).seconds - module.exit_json(**result) + # Report success + result['elapsed'] = (datetime.datetime.utcnow() - start).seconds + module.exit_json(**result) + finally: + logout(module, url, cookie, timeout) if __name__ == '__main__': diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 24e4392fdc..6983355157 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -181,7 +181,6 @@ plugins/modules/packaging/os/xbps.py validate-modules:parameter-invalid plugins/modules/remote_management/hpilo/hpilo_boot.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hpilo_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hponcfg.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/imc/imc_rest.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/lxca/lxca_cmms.py validate-modules:doc-missing-type plugins/modules/remote_management/lxca/lxca_nodes.py validate-modules:doc-missing-type plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py validate-modules:parameter-list-no-elements diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 1c92d62579..650f39c369 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -180,7 +180,6 @@ plugins/modules/packaging/os/xbps.py validate-modules:parameter-invalid plugins/modules/remote_management/hpilo/hpilo_boot.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hpilo_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hponcfg.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/imc/imc_rest.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/lxca/lxca_cmms.py validate-modules:doc-missing-type plugins/modules/remote_management/lxca/lxca_nodes.py validate-modules:doc-missing-type plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py validate-modules:parameter-list-no-elements diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 82aeb58156..3823989bbd 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -181,7 +181,6 @@ plugins/modules/packaging/os/xbps.py validate-modules:parameter-invalid plugins/modules/remote_management/hpilo/hpilo_boot.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hpilo_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hponcfg.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/imc/imc_rest.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/lxca/lxca_cmms.py validate-modules:doc-missing-type plugins/modules/remote_management/lxca/lxca_nodes.py validate-modules:doc-missing-type plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:doc-choices-do-not-match-spec From 389b004879c4d2c05656c81c8f122e523a4a2a78 Mon Sep 17 00:00:00 2001 From: Sergey Mikhaltsov Date: Sun, 21 Feb 2021 19:56:53 +0300 Subject: [PATCH 0062/3093] added gitlab_project_members (#1829) * added gitlab_project_members * fix code style * added some arg to doc * Update plugins/modules/source_control/gitlab/gitlab_project_members.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_project_members.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_project_members.py Co-authored-by: Felix Fontein * integration test for gitlab_project_members module Co-authored-by: Sergey Mikhaltsov Co-authored-by: Felix Fontein --- plugins/modules/gitlab_project_members.py | 1 + .../gitlab/gitlab_project_members.py | 280 ++++++++++++++++++ .../targets/gitlab_project_members/aliases | 1 + .../gitlab_project_members/defaults/main.yml | 5 + .../gitlab_project_members/tasks/main.yml | 80 +++++ 5 files changed, 367 insertions(+) create mode 120000 plugins/modules/gitlab_project_members.py create mode 100644 plugins/modules/source_control/gitlab/gitlab_project_members.py create mode 100644 tests/integration/targets/gitlab_project_members/aliases create mode 100644 tests/integration/targets/gitlab_project_members/defaults/main.yml create mode 100644 tests/integration/targets/gitlab_project_members/tasks/main.yml diff --git a/plugins/modules/gitlab_project_members.py b/plugins/modules/gitlab_project_members.py new file mode 120000 index 0000000000..2e1e69acf9 --- /dev/null +++ b/plugins/modules/gitlab_project_members.py @@ -0,0 +1 @@ +source_control/gitlab/gitlab_project_members.py \ No newline at end of file diff --git a/plugins/modules/source_control/gitlab/gitlab_project_members.py b/plugins/modules/source_control/gitlab/gitlab_project_members.py new file mode 100644 index 0000000000..163d6dbe63 --- /dev/null +++ b/plugins/modules/source_control/gitlab/gitlab_project_members.py @@ -0,0 +1,280 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Sergey Mikhaltsov +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: gitlab_project_members +short_description: Manage project members on GitLab Server +version_added: 2.2.0 +description: + - This module allows to add and remove members to/from a project, or change a member's access level in a project on GitLab. +author: Sergey Mikhaltsov (@metanovii) +requirements: + - python-gitlab python module <= 1.15.0 + - owner or maintainer rights to project on the GitLab server +options: + api_token: + description: + - A personal access token to authenticate with the GitLab API. + required: true + type: str + validate_certs: + description: + - Whether or not to validate TLS/SSL certificates when supplying a HTTPS endpoint. + - Should only be set to C(false) if you can guarantee that you are talking to the correct server + and no man-in-the-middle attack can happen. + default: true + type: bool + api_username: + description: + - The username to use for authentication against the API. + type: str + api_password: + description: + - The password to use for authentication against the API. + type: str + api_url: + description: + - The resolvable endpoint for the API. + type: str + project: + description: + - The name of the GitLab project the member is added to/removed from. + required: true + type: str + gitlab_user: + description: + - The username of the member to add to/remove from the GitLab project. + required: true + type: str + access_level: + description: + - The access level for the user. + - Required if I(state=present), user state is set to present. + type: str + choices: ['guest', 'reporter', 'developer', 'maintainer'] + state: + description: + - State of the member in the project. + - On C(present), it adds a user to a GitLab project. + - On C(absent), it removes a user from a GitLab project. + choices: ['present', 'absent'] + default: 'present' + type: str +notes: + - Supports C(check_mode). +''' + +EXAMPLES = r''' +- name: Add a user to a GitLab Project + community.general.gitlab_project_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + validate_certs: True + project: projectname + gitlab_user: username + access_level: developer + state: present + +- name: Remove a user from a GitLab project + community.general.gitlab_project_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + validate_certs: False + project: projectname + gitlab_user: username + state: absent +''' + +RETURN = r''' # ''' + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + +from ansible_collections.community.general.plugins.module_utils.gitlab import gitlabAuthentication + +import traceback + +try: + import gitlab + HAS_PY_GITLAB = True +except ImportError: + GITLAB_IMP_ERR = traceback.format_exc() + HAS_PY_GITLAB = False + + +class GitLabProjectMembers(object): + def __init__(self, module, gl): + self._module = module + self._gitlab = gl + + def get_project(self, project_name): + project_exists = self._gitlab.projects.list(search=project_name) + if project_exists: + return project_exists[0].id + + def get_user_id(self, gitlab_user): + user_exists = self._gitlab.users.list(username=gitlab_user) + if user_exists: + return user_exists[0].id + + # get all members in a project + def get_members_in_a_project(self, gitlab_project_id): + project = self._gitlab.projects.get(gitlab_project_id) + return project.members.list() + + # check if the user is a member of the project + def is_user_a_member(self, members, gitlab_user_id): + for member in members: + if member.id == gitlab_user_id: + return True + return False + + # add user to a project + def add_member_to_project(self, gitlab_user_id, gitlab_project_id, access_level): + try: + project = self._gitlab.projects.get(gitlab_project_id) + add_member = project.members.create( + {'user_id': gitlab_user_id, 'access_level': access_level}) + + if add_member: + return add_member.username + + except (gitlab.exceptions.GitlabCreateError) as e: + self._module.fail_json( + msg="Failed to add member to the project, project ID %s: %s" % (gitlab_project_id, e)) + + # remove user from a project + def remove_user_from_project(self, gitlab_user_id, gitlab_project_id): + try: + project = self._gitlab.projects.get(gitlab_project_id) + project.members.delete(gitlab_user_id) + + except (gitlab.exceptions.GitlabDeleteError) as e: + self._module.fail_json( + msg="Failed to remove member from GitLab project, ID %s: %s" % (gitlab_project_id, e)) + + # get user's access level + def get_user_access_level(self, members, gitlab_user_id): + for member in members: + if member.id == gitlab_user_id: + return member.access_level + + # update user's access level in a project + def update_user_access_level(self, members, gitlab_user_id, access_level): + for member in members: + if member.id == gitlab_user_id: + try: + member.access_level = access_level + member.save() + except (gitlab.exceptions.GitlabCreateError) as e: + self._module.fail_json( + msg="Failed to update the access level for the member, %s: %s" % (gitlab_user_id, e)) + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(dict( + api_token=dict(type='str', required=True, no_log=True), + project=dict(type='str', required=True), + gitlab_user=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + access_level=dict(type='str', required=False, choices=['guest', 'reporter', 'developer', 'maintainer']) + )) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_password', 'api_token'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token'], + ], + required_if=[ + ['state', 'present', ['access_level']], + ], + supports_check_mode=True, + ) + + if not HAS_PY_GITLAB: + module.fail_json(msg=missing_required_lib('python-gitlab', url='https://python-gitlab.readthedocs.io/en/stable/'), exception=GITLAB_IMP_ERR) + + gitlab_project = module.params['project'] + gitlab_user = module.params['gitlab_user'] + state = module.params['state'] + access_level = module.params['access_level'] + + # convert access level string input to int + if access_level: + access_level_int = { + 'guest': gitlab.GUEST_ACCESS, + 'reporter': gitlab.REPORTER_ACCESS, + 'developer': gitlab.DEVELOPER_ACCESS, + 'maintainer': gitlab.MAINTAINER_ACCESS + } + + access_level = access_level_int[access_level] + + # connect to gitlab server + gl = gitlabAuthentication(module) + + project = GitLabProjectMembers(module, gl) + + gitlab_user_id = project.get_user_id(gitlab_user) + gitlab_project_id = project.get_project(gitlab_project) + + # project doesn't exist + if not gitlab_project_id: + module.fail_json(msg="project '%s' not found." % gitlab_project) + + # user doesn't exist + if not gitlab_user_id: + if state == 'absent': + module.exit_json(changed=False, result="user '%s' not found, and thus also not part of the project" % gitlab_user) + else: + module.fail_json(msg="user '%s' not found." % gitlab_user) + + members = project.get_members_in_a_project(gitlab_project_id) + is_user_a_member = project.is_user_a_member(members, gitlab_user_id) + + # check if the user is a member in the project + if not is_user_a_member: + if state == 'present': + # add user to the project + if not module.check_mode: + project.add_member_to_project(gitlab_user_id, gitlab_project_id, access_level) + module.exit_json(changed=True, result="Successfully added user '%s' to the project." % gitlab_user) + # state as absent + else: + module.exit_json(changed=False, result="User, '%s', is not a member in the project. No change to report" % gitlab_user) + # in case that a user is a member + else: + if state == 'present': + # compare the access level + user_access_level = project.get_user_access_level(members, gitlab_user_id) + if user_access_level == access_level: + module.exit_json(changed=False, result="User, '%s', is already a member in the project. No change to report" % gitlab_user) + else: + # update the access level for the user + if not module.check_mode: + project.update_user_access_level(members, gitlab_user_id, access_level) + module.exit_json(changed=True, result="Successfully updated the access level for the user, '%s'" % gitlab_user) + else: + # remove the user from the project + if not module.check_mode: + project.remove_user_from_project(gitlab_user_id, gitlab_project_id) + module.exit_json(changed=True, result="Successfully removed user, '%s', from the project" % gitlab_user) + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/gitlab_project_members/aliases b/tests/integration/targets/gitlab_project_members/aliases new file mode 100644 index 0000000000..89aea537d1 --- /dev/null +++ b/tests/integration/targets/gitlab_project_members/aliases @@ -0,0 +1 @@ +unsupported \ No newline at end of file diff --git a/tests/integration/targets/gitlab_project_members/defaults/main.yml b/tests/integration/targets/gitlab_project_members/defaults/main.yml new file mode 100644 index 0000000000..a31fc0f2d6 --- /dev/null +++ b/tests/integration/targets/gitlab_project_members/defaults/main.yml @@ -0,0 +1,5 @@ +gitlab_server_url: https://gitlab.com +gitlab_api_access_token: "token" +gitlab_project: some_project +username: some_user +gitlab_access_level: developer diff --git a/tests/integration/targets/gitlab_project_members/tasks/main.yml b/tests/integration/targets/gitlab_project_members/tasks/main.yml new file mode 100644 index 0000000000..c3330bae41 --- /dev/null +++ b/tests/integration/targets/gitlab_project_members/tasks/main.yml @@ -0,0 +1,80 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Test code for gitlab_project_members module +# +# Copyright: (c) 2021, Sergey Mikhaltsov +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +- name: Install required library + pip: + name: python-gitlab + state: present + +- name: Clean UP before tests + community.general.gitlab_project_members: + api_url: "{{ gitlab_server_url }}" + api_token: "{{ gitlab_api_access_token }}" + project: "{{ gitlab_project }}" + gitlab_user: "{{ username }}" + state: absent + +- name: Add a User to A GitLab Project + community.general.gitlab_project_members: + api_url: "{{ gitlab_server_url }}" + api_token: "{{ gitlab_api_access_token }}" + project: "{{ gitlab_project }}" + gitlab_user: "{{ username }}" + access_level: "{{ gitlab_access_level }}" + state: present + register: gitlab_project_members_state + +- name: Test member added to project + assert: + that: + - gitlab_project_members_state is changed + +- name: Add a User to A GitLab Project ( Idempotency test ) + community.general.gitlab_project_members: + api_url: "{{ gitlab_server_url }}" + api_token: "{{ gitlab_api_access_token }}" + project: "{{ gitlab_project }}" + gitlab_user: "{{ username }}" + access_level: "{{ gitlab_access_level }}" + state: present + register: gitlab_project_members_state_again + +- name: Test module is idempotent + assert: + that: + - gitlab_project_members_state_again is not changed + +- name: Remove a User from A GitLab Project + community.general.gitlab_project_members: + api_url: "{{ gitlab_server_url }}" + api_token: "{{ gitlab_api_access_token }}" + project: "{{ gitlab_project }}" + gitlab_user: "{{ username }}" + state: absent + register: remove_gitlab_project_members_state + +- name: Test member removed from project + assert: + that: + - remove_gitlab_project_members_state is changed + +- name: Remove a User from A GitLab Project ( Idempotency test ) + community.general.gitlab_project_members: + api_url: "{{ gitlab_server_url }}" + api_token: "{{ gitlab_api_access_token }}" + project: "{{ gitlab_project }}" + gitlab_user: "{{ username }}" + state: absent + register: remove_gitlab_project_members_state_again + +- name: Test module is idempotent + assert: + that: + - remove_gitlab_project_members_state_again is not changed From c0f3a63e1826b793c0955ed65620b2bc9ff3c023 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 21 Feb 2021 21:11:54 +0100 Subject: [PATCH 0063/3093] Improve infoblox inventory script dependencies. (#1871) --- .../fragments/1871-infoblox-inventory.yml | 2 ++ scripts/inventory/infoblox.py | 18 +++++++++++++++--- 2 files changed, 17 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/1871-infoblox-inventory.yml diff --git a/changelogs/fragments/1871-infoblox-inventory.yml b/changelogs/fragments/1871-infoblox-inventory.yml new file mode 100644 index 0000000000..d49d176f1b --- /dev/null +++ b/changelogs/fragments/1871-infoblox-inventory.yml @@ -0,0 +1,2 @@ +bugfixes: +- "infoblox inventory script - make sure that the script also works with Ansible 2.9, and returns a more helpful error when community.general is not installed as part of Ansible 2.10/3 (https://github.com/ansible-collections/community.general/pull/1871)." diff --git a/scripts/inventory/infoblox.py b/scripts/inventory/infoblox.py index 9e985a9eb9..209509025e 100644 --- a/scripts/inventory/infoblox.py +++ b/scripts/inventory/infoblox.py @@ -13,10 +13,22 @@ import json import argparse from ansible.parsing.dataloader import DataLoader -from ansible.module_utils.six import iteritems +from ansible.module_utils.six import iteritems, raise_from from ansible.module_utils._text import to_text -from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiInventory -from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import normalize_extattrs, flatten_extattrs +try: + from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiInventory + from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import normalize_extattrs, flatten_extattrs +except ImportError as exc: + try: + # Fallback for Ansible 2.9 + from ansible.module_utils.net_tools.nios.api import WapiInventory + from ansible.module_utils.net_tools.nios.api import normalize_extattrs, flatten_extattrs + except ImportError: + raise_from( + Exception( + 'This inventory plugin only works with Ansible 2.9, 2.10, or 3, or when community.general is installed correctly in PYTHONPATH.' + ' Try using the inventory plugin from infoblox.nios_modules instead.'), + exc) CONFIG_FILES = [ From abfbe2a48d45f22479e214dc5ff5dd7c80c8b034 Mon Sep 17 00:00:00 2001 From: Ramon de la Fuente Date: Mon, 22 Feb 2021 09:12:21 +0100 Subject: [PATCH 0064/3093] deploy_helper: fix a bug when not defining release on state=clean (#1859) * Fix a bug when not defining release on state=clean * Add changelog fragment --- .../1852-deploy-helper-fix-state-is-clean-without-release.yaml | 2 ++ plugins/modules/web_infrastructure/deploy_helper.py | 3 +++ 2 files changed, 5 insertions(+) create mode 100644 changelogs/fragments/1852-deploy-helper-fix-state-is-clean-without-release.yaml diff --git a/changelogs/fragments/1852-deploy-helper-fix-state-is-clean-without-release.yaml b/changelogs/fragments/1852-deploy-helper-fix-state-is-clean-without-release.yaml new file mode 100644 index 0000000000..0946a4f38f --- /dev/null +++ b/changelogs/fragments/1852-deploy-helper-fix-state-is-clean-without-release.yaml @@ -0,0 +1,2 @@ +bugfixes: + - deploy_helper - allow ``state=clean`` to be used without defining a ``release`` (https://github.com/ansible-collections/community.general/issues/1852). \ No newline at end of file diff --git a/plugins/modules/web_infrastructure/deploy_helper.py b/plugins/modules/web_infrastructure/deploy_helper.py index 641cc1d4ec..a07281819b 100644 --- a/plugins/modules/web_infrastructure/deploy_helper.py +++ b/plugins/modules/web_infrastructure/deploy_helper.py @@ -408,6 +408,9 @@ class DeployHelper(object): def remove_unfinished_link(self, path): changed = False + if not self.release: + return changed + tmp_link_name = os.path.join(path, self.release + '.' + self.unfinished_filename) if not self.module.check_mode and os.path.exists(tmp_link_name): changed = True From 305748b3331c099981396460e90cff7ada3e45b3 Mon Sep 17 00:00:00 2001 From: Tristan Le Guern Date: Mon, 22 Feb 2021 17:59:27 +0100 Subject: [PATCH 0065/3093] New module proxmox_storage_info (#1844) * proxmox_storage_info: new module Simple info module dedicated to the retrieval of information about the storages available on a Proxmox VE cluster. * Update plugins/modules/cloud/misc/proxmox_storage_info.py Co-authored-by: Felix Fontein * Update plugins/modules/cloud/misc/proxmox_storage_info.py Co-authored-by: Felix Fontein * Update plugins/modules/cloud/misc/proxmox_storage_info.py Co-authored-by: Felix Fontein * Update plugins/modules/cloud/misc/proxmox_storage_info.py Co-authored-by: Felix Fontein * Update plugins/modules/cloud/misc/proxmox_storage_info.py Co-authored-by: Felix Fontein --- .../cloud/misc/proxmox_storage_info.py | 190 ++++++++++++++++++ plugins/modules/proxmox_storage_info.py | 1 + tests/integration/targets/proxmox/aliases | 1 + .../targets/proxmox/tasks/main.yml | 18 ++ 4 files changed, 210 insertions(+) create mode 100644 plugins/modules/cloud/misc/proxmox_storage_info.py create mode 120000 plugins/modules/proxmox_storage_info.py diff --git a/plugins/modules/cloud/misc/proxmox_storage_info.py b/plugins/modules/cloud/misc/proxmox_storage_info.py new file mode 100644 index 0000000000..fb495435e0 --- /dev/null +++ b/plugins/modules/cloud/misc/proxmox_storage_info.py @@ -0,0 +1,190 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright: Tristan Le Guern (@Aversiste) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: proxmox_storage_info +short_description: Retrieve information about one or more Proxmox VE storages +version_added: 2.2.0 +description: + - Retrieve information about one or more Proxmox VE storages. +options: + storage: + description: + - Only return informations on a specific storage. + aliases: ['name'] + type: str + type: + description: + - Filter on a specifc storage type. + type: str +author: Tristan Le Guern (@Aversiste) +extends_documentation_fragment: community.general.proxmox.documentation +notes: + - Storage specific options can be returned by this module, please look at the documentation at U(https://pve.proxmox.com/wiki/Storage). +''' + + +EXAMPLES = ''' +- name: List existing storages + community.general.proxmox_storage_info: + api_host: helldorado + api_user: root@pam + api_password: "{{ password | default(omit) }}" + api_token_id: "{{ token_id | default(omit) }}" + api_token_secret: "{{ token_secret | default(omit) }}" + register: proxmox_storages + +- name: List NFS storages only + community.general.proxmox_storage_info: + api_host: helldorado + api_user: root@pam + api_password: "{{ password | default(omit) }}" + api_token_id: "{{ token_id | default(omit) }}" + api_token_secret: "{{ token_secret | default(omit) }}" + type: nfs + register: proxmox_storages_nfs + +- name: Retrieve information about the lvm2 storage + community.general.proxmox_storage_info: + api_host: helldorado + api_user: root@pam + api_password: "{{ password | default(omit) }}" + api_token_id: "{{ token_id | default(omit) }}" + api_token_secret: "{{ token_secret | default(omit) }}" + storage: lvm2 + register: proxmox_storage_lvm +''' + + +RETURN = ''' +proxmox_storages: + description: List of storage pools. + returned: on success + type: list + elements: dict + contains: + content: + description: Proxmox content types available in this storage + returned: on success + type: list + elements: str + digest: + description: Storage's digest + returned: on success + type: str + nodes: + description: List of nodes associated to this storage + returned: on success, if storage is not local + type: list + elements: str + path: + description: Physical path to this storage + returned: on success + type: str + prune-backups: + description: Backup retention options + returned: on success + type: list + elements: dict + shared: + description: Is this storage shared + returned: on success + type: bool + storage: + description: Storage name + returned: on success + type: str + type: + description: Storage type + returned: on success + type: str +''' + + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible_collections.community.general.plugins.module_utils.proxmox import ( + proxmox_auth_argument_spec, ProxmoxAnsible, HAS_PROXMOXER, PROXMOXER_IMP_ERR, proxmox_to_ansible_bool) + + +class ProxmoxStorageInfoAnsible(ProxmoxAnsible): + def get_storage(self, storage): + try: + storage = self.proxmox_api.storage.get(storage) + except Exception: + self.module.fail_json(msg="Storage '%s' does not exist" % storage) + return ProxmoxStorage(storage) + + def get_storages(self, type=None): + storages = self.proxmox_api.storage.get(type=type) + storages = [ProxmoxStorage(storage) for storage in storages] + return storages + + +class ProxmoxStorage: + def __init__(self, storage): + self.storage = storage + # Convert proxmox representation of lists, dicts and boolean for easier + # manipulation within ansible. + if 'shared' in self.storage: + self.storage['shared'] = proxmox_to_ansible_bool(self.storage['shared']) + if 'content' in self.storage: + self.storage['content'] = self.storage['content'].split(',') + if 'nodes' in self.storage: + self.storage['nodes'] = self.storage['nodes'].split(',') + if 'prune-backups' in storage: + options = storage['prune-backups'].split(',') + self.storage['prune-backups'] = dict() + for option in options: + k, v = option.split('=') + self.storage['prune-backups'][k] = v + + +def proxmox_storage_info_argument_spec(): + return dict( + storage=dict(type='str', aliases=['name']), + type=dict(type='str'), + ) + + +def main(): + module_args = proxmox_auth_argument_spec() + storage_info_args = proxmox_storage_info_argument_spec() + module_args.update(storage_info_args) + + module = AnsibleModule( + argument_spec=module_args, + required_one_of=[('api_password', 'api_token_id')], + required_together=[('api_token_id', 'api_token_secret')], + mutually_exclusive=[('storage', 'type')], + supports_check_mode=True + ) + result = dict( + changed=False + ) + + if not HAS_PROXMOXER: + module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR) + + proxmox = ProxmoxStorageInfoAnsible(module) + storage = module.params['storage'] + storagetype = module.params['type'] + + if storage: + storages = [proxmox.get_storage(storage)] + else: + storages = proxmox.get_storages(type=storagetype) + result['proxmox_storages'] = [storage.storage for storage in storages] + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/proxmox_storage_info.py b/plugins/modules/proxmox_storage_info.py new file mode 120000 index 0000000000..8128300547 --- /dev/null +++ b/plugins/modules/proxmox_storage_info.py @@ -0,0 +1 @@ +cloud/misc/proxmox_storage_info.py \ No newline at end of file diff --git a/tests/integration/targets/proxmox/aliases b/tests/integration/targets/proxmox/aliases index d5a5dcd139..caa35b7354 100644 --- a/tests/integration/targets/proxmox/aliases +++ b/tests/integration/targets/proxmox/aliases @@ -2,3 +2,4 @@ unsupported proxmox_domain_info proxmox_group_info proxmox_user_info +proxmox_storage_info diff --git a/tests/integration/targets/proxmox/tasks/main.yml b/tests/integration/targets/proxmox/tasks/main.yml index 235e412d98..6301cb66ef 100644 --- a/tests/integration/targets/proxmox/tasks/main.yml +++ b/tests/integration/targets/proxmox/tasks/main.yml @@ -110,6 +110,24 @@ - results_userid.proxmox_users[0].user == "{{ user }}" - results_userid.proxmox_users[0].userid == "{{ user }}@{{ domain }}" +- name: Retrieve info about storage + proxmox_storage_info: + api_host: "{{ api_host }}" + api_user: "{{ user }}@{{ domain }}" + api_password: "{{ api_password | default(omit) }}" + api_token_id: "{{ api_token_id | default(omit) }}" + api_token_secret: "{{ api_token_secret | default(omit) }}" + validate_certs: "{{ validate_certs }}" + storage: "{{ storage }}" + register: results_storage + +- assert: + that: + - results_storage is not changed + - results_storage.proxmox_storages is defined + - results_storage.proxmox_storages|length == 1 + - results_storage.proxmox_storages[0].storage == "{{ storage }}" + - name: VM creation tags: [ 'create' ] block: From 0b9893959f6fa27d6ec0c53c99d4534f1f613c2c Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 23 Feb 2021 08:57:40 +0100 Subject: [PATCH 0066/3093] Since gitlab_project_members is a copy if gitlab_group_members with small modifications, it needs to contain the copyright notices of that module as well as the authors. (#1874) --- .../modules/source_control/gitlab/gitlab_project_members.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/plugins/modules/source_control/gitlab/gitlab_project_members.py b/plugins/modules/source_control/gitlab/gitlab_project_members.py index 163d6dbe63..8e23dca426 100644 --- a/plugins/modules/source_control/gitlab/gitlab_project_members.py +++ b/plugins/modules/source_control/gitlab/gitlab_project_members.py @@ -2,6 +2,7 @@ # -*- coding: utf-8 -*- # Copyright: (c) 2021, Sergey Mikhaltsov +# Copyright: (c) 2020, Zainab Alsaffar # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function @@ -14,7 +15,9 @@ short_description: Manage project members on GitLab Server version_added: 2.2.0 description: - This module allows to add and remove members to/from a project, or change a member's access level in a project on GitLab. -author: Sergey Mikhaltsov (@metanovii) +author: + - Sergey Mikhaltsov (@metanovii) + - Zainab Alsaffar (@zanssa) requirements: - python-gitlab python module <= 1.15.0 - owner or maintainer rights to project on the GitLab server From e353390e6c3f044c0907f7825c430cd797eb84fc Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 23 Feb 2021 23:11:49 +0100 Subject: [PATCH 0067/3093] Remove unneeded fields (with typos). (#1887) --- plugins/inventory/linode.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/plugins/inventory/linode.py b/plugins/inventory/linode.py index 327b58ca8e..049d67c973 100644 --- a/plugins/inventory/linode.py +++ b/plugins/inventory/linode.py @@ -34,18 +34,15 @@ DOCUMENTATION = r''' description: Populate inventory with instances in this region. default: [] type: list - required: false tags: description: Populate inventory only with instances which have at least one of the tags listed here. default: [] type: list - reqired: false version_added: 2.0.0 types: description: Populate inventory with instances with this type. default: [] type: list - required: false strict: version_added: 2.0.0 compose: From 434f383ae92d9e34e0997755adb924724b69a8c6 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Wed, 24 Feb 2021 11:19:27 +1300 Subject: [PATCH 0068/3093] fixed Python 3 keys() usage (#1861) * fixed python3 keys() * added changelog fragment * Update plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py Co-authored-by: Felix Fontein * Update plugins/cache/redis.py Co-authored-by: Felix Fontein * rolledback redis.py per PR * Update plugins/modules/monitoring/sensu/sensu_check.py Co-authored-by: Felix Fontein * removed unnecessary ignore lines * adding memcached and one case in redis is indeed necessary * Update changelogs/fragments/1861-python3-keys.yml Co-authored-by: Felix Fontein * Update changelogs/fragments/1861-python3-keys.yml * Update changelogs/fragments/1861-python3-keys.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- changelogs/fragments/1861-python3-keys.yml | 22 +++++++++++++++++ plugins/cache/memcached.py | 2 +- plugins/cache/redis.py | 6 ++--- plugins/callback/diy.py | 2 +- plugins/callback/selective.py | 2 +- plugins/lookup/chef_databag.py | 2 +- plugins/module_utils/net_tools/nios/api.py | 16 ++++++------- plugins/module_utils/utm_utils.py | 2 +- plugins/modules/cloud/lxc/lxc_container.py | 6 ++--- plugins/modules/cloud/lxd/lxd_container.py | 2 +- .../oneandone/oneandone_monitoring_policy.py | 24 +++++++++---------- plugins/modules/cloud/oracle/oci_vcn.py | 2 +- .../spotinst/spotinst_aws_elastigroup.py | 6 ++--- .../packaging/os/redhat_subscription.py | 2 +- .../redfish/idrac_redfish_command.py | 2 +- .../redfish/idrac_redfish_config.py | 2 +- .../redfish/idrac_redfish_info.py | 2 +- .../redfish/redfish_command.py | 2 +- .../redfish/redfish_config.py | 2 +- plugins/modules/system/vdo.py | 2 +- scripts/inventory/nsot.py | 2 +- tests/sanity/ignore-2.10.txt | 2 -- tests/sanity/ignore-2.11.txt | 2 -- 23 files changed, 64 insertions(+), 50 deletions(-) create mode 100644 changelogs/fragments/1861-python3-keys.yml diff --git a/changelogs/fragments/1861-python3-keys.yml b/changelogs/fragments/1861-python3-keys.yml new file mode 100644 index 0000000000..029ed93575 --- /dev/null +++ b/changelogs/fragments/1861-python3-keys.yml @@ -0,0 +1,22 @@ +bugfixes: + - redis cache plugin - wrapped usages of ``keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). + - memcached cache plugin - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). + - diy callback plugin - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). + - selective callback plugin - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). + - chef_databag lookup plugin - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). + - net_tools.nios.api module_utils - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). + - utm_utils module_utils - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). + - lxc_container - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). + - lxd_container - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). + - oneandone_monitoring_policy - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). + - oci_vcn - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). + - spotinst_aws_elastigroup - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). + - sensu_check - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). + - redhat_subscription - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). + - idrac_redfish_command - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). + - idrac_redfish_config - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). + - idrac_redfish_info - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). + - redfish_command - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). + - redfish_config - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). + - vdo - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). + - nsot inventory script - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). diff --git a/plugins/cache/memcached.py b/plugins/cache/memcached.py index e483238952..5c9e54aaa0 100644 --- a/plugins/cache/memcached.py +++ b/plugins/cache/memcached.py @@ -162,7 +162,7 @@ class CacheModuleKeys(MutableSet): self._cache.set(self.PREFIX, self._keyset) def remove_by_timerange(self, s_min, s_max): - for k in self._keyset.keys(): + for k in list(self._keyset.keys()): t = self._keyset[k] if s_min < t < s_max: del self._keyset[k] diff --git a/plugins/cache/redis.py b/plugins/cache/redis.py index c9a3c0fd7a..7a376d6d7c 100644 --- a/plugins/cache/redis.py +++ b/plugins/cache/redis.py @@ -217,14 +217,12 @@ class CacheModule(BaseCacheModule): self._db.zrem(self._keys_set, key) def flush(self): - for key in self.keys(): + for key in list(self.keys()): self.delete(key) def copy(self): # TODO: there is probably a better way to do this in redis - ret = dict() - for key in self.keys(): - ret[key] = self.get(key) + ret = dict([(k, self.get(k)) for k in self.keys()]) return ret def __getstate__(self): diff --git a/plugins/callback/diy.py b/plugins/callback/diy.py index 918262805c..dfed68b791 100644 --- a/plugins/callback/diy.py +++ b/plugins/callback/diy.py @@ -1013,7 +1013,7 @@ class CallbackModule(Default): for attr in _stats_attributes: _ret[self.DIY_NS]['stats'].update({attr: _get_value(obj=stats, attr=attr)}) - _ret[self.DIY_NS].update({'top_level_var_names': _ret.keys()}) + _ret[self.DIY_NS].update({'top_level_var_names': list(_ret.keys())}) return _ret diff --git a/plugins/callback/selective.py b/plugins/callback/selective.py index 71620c18c4..23813b0e3c 100644 --- a/plugins/callback/selective.py +++ b/plugins/callback/selective.py @@ -67,7 +67,7 @@ COLORS = { def dict_diff(prv, nxt): """Return a dict of keys that differ with another config object.""" - keys = set(prv.keys() + nxt.keys()) + keys = set(list(prv.keys()) + list(nxt.keys())) result = {} for k in keys: if prv.get(k) != nxt.get(k): diff --git a/plugins/lookup/chef_databag.py b/plugins/lookup/chef_databag.py index d9f0fe02bd..0a1c6de3ed 100644 --- a/plugins/lookup/chef_databag.py +++ b/plugins/lookup/chef_databag.py @@ -81,7 +81,7 @@ class LookupModule(LookupBase): ) if args: raise AnsibleError( - "unrecognized arguments to with_sequence: %r" % args.keys() + "unrecognized arguments to with_sequence: %r" % list(args.keys()) ) def run(self, terms, variables=None, **kwargs): diff --git a/plugins/module_utils/net_tools/nios/api.py b/plugins/module_utils/net_tools/nios/api.py index 440ea391c9..eadc66fc37 100644 --- a/plugins/module_utils/net_tools/nios/api.py +++ b/plugins/module_utils/net_tools/nios/api.py @@ -499,12 +499,12 @@ class WapiModule(WapiBase): else: test_obj_filter = dict([('name', old_name)]) # get the object reference - ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=ib_spec.keys()) + ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=list(ib_spec.keys())) if ib_obj: obj_filter['name'] = new_name else: test_obj_filter['name'] = new_name - ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=ib_spec.keys()) + ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=list(ib_spec.keys())) update = True return ib_obj, update, new_name if (ib_obj_type == NIOS_HOST_RECORD): @@ -538,7 +538,7 @@ class WapiModule(WapiBase): # check if test_obj_filter is empty copy passed obj_filter else: test_obj_filter = obj_filter - ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=ib_spec.keys()) + ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=list(ib_spec.keys())) elif (ib_obj_type == NIOS_A_RECORD): # resolves issue where multiple a_records with same name and different IP address test_obj_filter = obj_filter @@ -548,7 +548,7 @@ class WapiModule(WapiBase): except TypeError: ipaddr = obj_filter['ipv4addr'] test_obj_filter['ipv4addr'] = ipaddr - ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=ib_spec.keys()) + ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=list(ib_spec.keys())) elif (ib_obj_type == NIOS_TXT_RECORD): # resolves issue where multiple txt_records with same name and different text test_obj_filter = obj_filter @@ -558,12 +558,12 @@ class WapiModule(WapiBase): except TypeError: txt = obj_filter['text'] test_obj_filter['text'] = txt - ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=ib_spec.keys()) + ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=list(ib_spec.keys())) elif (ib_obj_type == NIOS_ZONE): # del key 'restart_if_needed' as nios_zone get_object fails with the key present temp = ib_spec['restart_if_needed'] del ib_spec['restart_if_needed'] - ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys()) + ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=list(ib_spec.keys())) # reinstate restart_if_needed if ib_obj is none, meaning there's no existing nios_zone ref if not ib_obj: ib_spec['restart_if_needed'] = temp @@ -571,12 +571,12 @@ class WapiModule(WapiBase): # del key 'create_token' as nios_member get_object fails with the key present temp = ib_spec['create_token'] del ib_spec['create_token'] - ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys()) + ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=list(ib_spec.keys())) if temp: # reinstate 'create_token' key ib_spec['create_token'] = temp else: - ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys()) + ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=list(ib_spec.keys())) return ib_obj, update, new_name def on_update(self, proposed_object, ib_spec): diff --git a/plugins/module_utils/utm_utils.py b/plugins/module_utils/utm_utils.py index 0966dc500f..591305a4b3 100644 --- a/plugins/module_utils/utm_utils.py +++ b/plugins/module_utils/utm_utils.py @@ -84,7 +84,7 @@ class UTM: raise UTMModuleConfigurationError( "The keys " + to_native( self.change_relevant_keys) + " to check are not in the modules keys:\n" + to_native( - module.params.keys())) + list(module.params.keys()))) def execute(self): try: diff --git a/plugins/modules/cloud/lxc/lxc_container.py b/plugins/modules/cloud/lxc/lxc_container.py index 2b5efe9cd7..636508dbda 100644 --- a/plugins/modules/cloud/lxc/lxc_container.py +++ b/plugins/modules/cloud/lxc/lxc_container.py @@ -1662,7 +1662,7 @@ def main(): ), backing_store=dict( type='str', - choices=LXC_BACKING_STORE.keys(), + choices=list(LXC_BACKING_STORE.keys()), default='dir' ), template_options=dict( @@ -1699,7 +1699,7 @@ def main(): type='path' ), state=dict( - choices=LXC_ANSIBLE_STATES.keys(), + choices=list(LXC_ANSIBLE_STATES.keys()), default='started' ), container_command=dict( @@ -1733,7 +1733,7 @@ def main(): type='path', ), archive_compression=dict( - choices=LXC_COMPRESSION_MAP.keys(), + choices=list(LXC_COMPRESSION_MAP.keys()), default='gzip' ) ), diff --git a/plugins/modules/cloud/lxd/lxd_container.py b/plugins/modules/cloud/lxd/lxd_container.py index 0cadaa9ac0..f4a97288ae 100644 --- a/plugins/modules/cloud/lxd/lxd_container.py +++ b/plugins/modules/cloud/lxd/lxd_container.py @@ -665,7 +665,7 @@ def main(): type='dict', ), state=dict( - choices=LXD_ANSIBLE_STATES.keys(), + choices=list(LXD_ANSIBLE_STATES.keys()), default='started' ), target=dict( diff --git a/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py b/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py index 79fed9a66a..27ebebd6c7 100644 --- a/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py +++ b/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py @@ -695,15 +695,15 @@ def update_monitoring_policy(module, oneandone_conn): threshold_entities = ['cpu', 'ram', 'disk', 'internal_ping', 'transfer'] _thresholds = [] - for treshold in thresholds: - key = treshold.keys()[0] + for threshold in thresholds: + key = list(threshold.keys())[0] if key in threshold_entities: _threshold = oneandone.client.Threshold( entity=key, - warning_value=treshold[key]['warning']['value'], - warning_alert=str(treshold[key]['warning']['alert']).lower(), - critical_value=treshold[key]['critical']['value'], - critical_alert=str(treshold[key]['critical']['alert']).lower()) + warning_value=threshold[key]['warning']['value'], + warning_alert=str(threshold[key]['warning']['alert']).lower(), + critical_value=threshold[key]['critical']['value'], + critical_alert=str(threshold[key]['critical']['alert']).lower()) _thresholds.append(_threshold) if name or description or email or thresholds: @@ -864,15 +864,15 @@ def create_monitoring_policy(module, oneandone_conn): threshold_entities = ['cpu', 'ram', 'disk', 'internal_ping', 'transfer'] _thresholds = [] - for treshold in thresholds: - key = treshold.keys()[0] + for threshold in thresholds: + key = list(threshold.keys())[0] if key in threshold_entities: _threshold = oneandone.client.Threshold( entity=key, - warning_value=treshold[key]['warning']['value'], - warning_alert=str(treshold[key]['warning']['alert']).lower(), - critical_value=treshold[key]['critical']['value'], - critical_alert=str(treshold[key]['critical']['alert']).lower()) + warning_value=threshold[key]['warning']['value'], + warning_alert=str(threshold[key]['warning']['alert']).lower(), + critical_value=threshold[key]['critical']['value'], + critical_alert=str(threshold[key]['critical']['alert']).lower()) _thresholds.append(_threshold) _ports = [] diff --git a/plugins/modules/cloud/oracle/oci_vcn.py b/plugins/modules/cloud/oracle/oci_vcn.py index 06dc4af04e..e2906357ae 100644 --- a/plugins/modules/cloud/oracle/oci_vcn.py +++ b/plugins/modules/cloud/oracle/oci_vcn.py @@ -128,7 +128,7 @@ def update_vcn(virtual_network_client, module): primitive_params_update=["vcn_id"], kwargs_non_primitive_update={UpdateVcnDetails: "update_vcn_details"}, module=module, - update_attributes=UpdateVcnDetails().attribute_map.keys(), + update_attributes=list(UpdateVcnDetails().attribute_map.keys()), ) return result diff --git a/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py b/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py index 8f05da7b09..a1dcd94efb 100644 --- a/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py +++ b/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py @@ -1305,10 +1305,8 @@ def expand_tags(eg_launchspec, tags): for tag in tags: eg_tag = spotinst.aws_elastigroup.Tag() - if tag.keys(): - eg_tag.tag_key = tag.keys()[0] - if tag.values(): - eg_tag.tag_value = tag.values()[0] + if tag: + eg_tag.tag_key, eg_tag.tag_value = list(tag.items())[0] eg_tags.append(eg_tag) diff --git a/plugins/modules/packaging/os/redhat_subscription.py b/plugins/modules/packaging/os/redhat_subscription.py index 18e20df7db..b62a7f391c 100644 --- a/plugins/modules/packaging/os/redhat_subscription.py +++ b/plugins/modules/packaging/os/redhat_subscription.py @@ -596,7 +596,7 @@ class Rhsm(RegistrationBase): if missing_pools or serials: changed = True - return {'changed': changed, 'subscribed_pool_ids': missing_pools.keys(), + return {'changed': changed, 'subscribed_pool_ids': list(missing_pools.keys()), 'unsubscribed_serials': serials} def sync_syspurpose(self): diff --git a/plugins/modules/remote_management/redfish/idrac_redfish_command.py b/plugins/modules/remote_management/redfish/idrac_redfish_command.py index fb32248271..49e12e811a 100644 --- a/plugins/modules/remote_management/redfish/idrac_redfish_command.py +++ b/plugins/modules/remote_management/redfish/idrac_redfish_command.py @@ -165,7 +165,7 @@ def main(): # Check that Category is valid if category not in CATEGORY_COMMANDS_ALL: - module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys()))) + module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) # Check that all commands are valid for cmd in command_list: diff --git a/plugins/modules/remote_management/redfish/idrac_redfish_config.py b/plugins/modules/remote_management/redfish/idrac_redfish_config.py index 9248da2030..b8b8cce10a 100644 --- a/plugins/modules/remote_management/redfish/idrac_redfish_config.py +++ b/plugins/modules/remote_management/redfish/idrac_redfish_config.py @@ -279,7 +279,7 @@ def main(): # Check that Category is valid if category not in CATEGORY_COMMANDS_ALL: - module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys()))) + module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) # Check that all commands are valid for cmd in command_list: diff --git a/plugins/modules/remote_management/redfish/idrac_redfish_info.py b/plugins/modules/remote_management/redfish/idrac_redfish_info.py index 9044aa56bf..42a5efcba9 100644 --- a/plugins/modules/remote_management/redfish/idrac_redfish_info.py +++ b/plugins/modules/remote_management/redfish/idrac_redfish_info.py @@ -202,7 +202,7 @@ def main(): # Check that Category is valid if category not in CATEGORY_COMMANDS_ALL: - module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys()))) + module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) # Check that all commands are valid for cmd in command_list: diff --git a/plugins/modules/remote_management/redfish/redfish_command.py b/plugins/modules/remote_management/redfish/redfish_command.py index d409c0e023..9e23fd4626 100644 --- a/plugins/modules/remote_management/redfish/redfish_command.py +++ b/plugins/modules/remote_management/redfish/redfish_command.py @@ -639,7 +639,7 @@ def main(): # Check that Category is valid if category not in CATEGORY_COMMANDS_ALL: - module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys()))) + module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) # Check that all commands are valid for cmd in command_list: diff --git a/plugins/modules/remote_management/redfish/redfish_config.py b/plugins/modules/remote_management/redfish/redfish_config.py index 60612da35f..1ca30bfa86 100644 --- a/plugins/modules/remote_management/redfish/redfish_config.py +++ b/plugins/modules/remote_management/redfish/redfish_config.py @@ -288,7 +288,7 @@ def main(): # Check that Category is valid if category not in CATEGORY_COMMANDS_ALL: - module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys()))) + module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) # Check that all commands are valid for cmd in command_list: diff --git a/plugins/modules/system/vdo.py b/plugins/modules/system/vdo.py index 15fd9c62b8..4049f82ed3 100644 --- a/plugins/modules/system/vdo.py +++ b/plugins/modules/system/vdo.py @@ -327,7 +327,7 @@ def inventory_vdos(module, vdocmd): vdoyamls = vdostatusyaml['VDOs'] if vdoyamls is not None: - vdolist = vdoyamls.keys() + vdolist = list(vdoyamls.keys()) return vdolist diff --git a/scripts/inventory/nsot.py b/scripts/inventory/nsot.py index 1394e3e3bd..10f24ad48b 100644 --- a/scripts/inventory/nsot.py +++ b/scripts/inventory/nsot.py @@ -181,7 +181,7 @@ class NSoTInventory(object): sys.exit('%s\n' % e) else: # Use defaults if env var missing self._config_default() - self.groups = self.config.keys() + self.groups = list(self.config.keys()) self.client = get_api_client() self._meta = {'hostvars': dict()} diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 6983355157..d77e3fa9f4 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -9,9 +9,7 @@ plugins/modules/cloud/linode/linode.py validate-modules:parameter-type-not-in-do plugins/modules/cloud/linode/linode.py validate-modules:undocumented-parameter plugins/modules/cloud/linode/linode_v4.py validate-modules:parameter-list-no-elements plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path -plugins/modules/cloud/lxc/lxc_container.py validate-modules:invalid-ansiblemodule-schema plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen -plugins/modules/cloud/lxd/lxd_container.py validate-modules:invalid-ansiblemodule-schema plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-choice plugins/modules/cloud/oneandone/oneandone_firewall_policy.py validate-modules:parameter-list-no-elements plugins/modules/cloud/oneandone/oneandone_load_balancer.py validate-modules:parameter-list-no-elements diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 650f39c369..abf4e7e7b3 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -8,9 +8,7 @@ plugins/modules/cloud/linode/linode.py validate-modules:parameter-type-not-in-do plugins/modules/cloud/linode/linode.py validate-modules:undocumented-parameter plugins/modules/cloud/linode/linode_v4.py validate-modules:parameter-list-no-elements plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path -plugins/modules/cloud/lxc/lxc_container.py validate-modules:invalid-ansiblemodule-schema plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen -plugins/modules/cloud/lxd/lxd_container.py validate-modules:invalid-ansiblemodule-schema plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-choice plugins/modules/cloud/oneandone/oneandone_firewall_policy.py validate-modules:parameter-list-no-elements plugins/modules/cloud/oneandone/oneandone_load_balancer.py validate-modules:parameter-list-no-elements From cf5e9bf44c46932d59cdf5c8140342ee2f80eed1 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 25 Feb 2021 21:39:48 +1300 Subject: [PATCH 0069/3093] Tidy up sanity checks ignore lines for source_control/* modules (#1893) * fixed validation-modules for plugins/modules/source_control/git* * Tidy up sanity checks ignore lines for source_control/* modules * removed unnecessary ignore lines from 2.9 and 2.11 --- plugins/modules/source_control/git_config.py | 5 +++++ .../source_control/github/github_deploy_key.py | 9 +++++++++ plugins/modules/source_control/github/github_issue.py | 4 ++++ plugins/modules/source_control/github/github_key.py | 4 ++++ .../modules/source_control/github/github_release.py | 9 +++++++++ .../modules/source_control/github/github_webhook.py | 9 +++++++++ .../source_control/github/github_webhook_info.py | 5 +++++ tests/sanity/ignore-2.10.txt | 11 ----------- tests/sanity/ignore-2.11.txt | 11 ----------- tests/sanity/ignore-2.9.txt | 11 ----------- 10 files changed, 45 insertions(+), 33 deletions(-) diff --git a/plugins/modules/source_control/git_config.py b/plugins/modules/source_control/git_config.py index cbc8219cf0..16126b3bfa 100644 --- a/plugins/modules/source_control/git_config.py +++ b/plugins/modules/source_control/git_config.py @@ -34,10 +34,12 @@ options: description: - The name of the setting. If no value is supplied, the value will be read from the config if it has been set. + type: str repo: description: - Path to a git repository for reading and writing values from a specific repo. + type: path file: description: - Path to an adhoc git configuration file to be managed using the C(file) scope. @@ -51,6 +53,7 @@ options: - If this is set to C(file), you must also specify the C(file) parameter. - It defaults to system only when not using I(list_all)=C(yes). choices: [ "file", "local", "global", "system" ] + type: str state: description: - "Indicates the setting should be set/unset. @@ -58,10 +61,12 @@ options: when I(state)=absent and I(value) is defined, I(value) is discarded." choices: [ 'present', 'absent' ] default: 'present' + type: str value: description: - When specifying the name of a single setting, supply a value to set that setting to the given value. + type: str ''' EXAMPLES = ''' diff --git a/plugins/modules/source_control/github/github_deploy_key.py b/plugins/modules/source_control/github/github_deploy_key.py index 8836454e05..8954317b71 100644 --- a/plugins/modules/source_control/github/github_deploy_key.py +++ b/plugins/modules/source_control/github/github_deploy_key.py @@ -29,20 +29,24 @@ options: - The name of the individual account or organization that owns the GitHub repository. required: true aliases: [ 'account', 'organization' ] + type: str repo: description: - The name of the GitHub repository. required: true aliases: [ 'repository' ] + type: str name: description: - The name for the deploy key. required: true aliases: [ 'title', 'label' ] + type: str key: description: - The SSH public key to add to the repository as a deploy key. required: true + type: str read_only: description: - If C(true), the deploy key will only be able to read repository contents. Otherwise, the deploy key will be able to read and write. @@ -53,6 +57,7 @@ options: - The state of the deploy key. default: "present" choices: [ "present", "absent" ] + type: str force: description: - If C(true), forcefully adds the deploy key by deleting any existing deploy key with the same public key or title. @@ -61,16 +66,20 @@ options: username: description: - The username to authenticate with. Should not be set when using personal access token + type: str password: description: - The password to authenticate with. Alternatively, a personal access token can be used instead of I(username) and I(password) combination. + type: str token: description: - The OAuth2 token or personal access token to authenticate with. Mutually exclusive with I(password). + type: str otp: description: - The 6 digit One Time Password for 2-Factor Authentication. Required together with I(username) and I(password). aliases: ['2fa_token'] + type: int notes: - "Refer to GitHub's API documentation here: https://developer.github.com/v3/repos/keys/." ''' diff --git a/plugins/modules/source_control/github/github_issue.py b/plugins/modules/source_control/github/github_issue.py index 9c4b558bd5..66d26c8301 100644 --- a/plugins/modules/source_control/github/github_issue.py +++ b/plugins/modules/source_control/github/github_issue.py @@ -18,20 +18,24 @@ options: description: - Name of repository from which issue needs to be retrieved. required: true + type: str organization: description: - Name of the GitHub organization in which the repository is hosted. required: true + type: str issue: description: - Issue number for which information is required. required: true + type: int action: description: - Get various details about issue depending upon action specified. default: 'get_status' choices: - 'get_status' + type: str author: - Abhijeet Kasurde (@Akasurde) ''' diff --git a/plugins/modules/source_control/github/github_key.py b/plugins/modules/source_control/github/github_key.py index 415065f88e..616636edea 100644 --- a/plugins/modules/source_control/github/github_key.py +++ b/plugins/modules/source_control/github/github_key.py @@ -17,18 +17,22 @@ options: description: - GitHub Access Token with permission to list and create public keys. required: true + type: str name: description: - SSH key name required: true + type: str pubkey: description: - SSH public key value. Required when C(state=present). + type: str state: description: - Whether to remove a key, ensure that it exists, or update its value. choices: ['present', 'absent'] default: 'present' + type: str force: description: - The default is C(yes), which will replace the existing remote key diff --git a/plugins/modules/source_control/github/github_release.py b/plugins/modules/source_control/github/github_release.py index 5372d6e898..7813ba1d89 100644 --- a/plugins/modules/source_control/github/github_release.py +++ b/plugins/modules/source_control/github/github_release.py @@ -18,34 +18,43 @@ options: token: description: - GitHub Personal Access Token for authenticating. Mutually exclusive with C(password). + type: str user: description: - The GitHub account that owns the repository + type: str required: true password: description: - The GitHub account password for the user. Mutually exclusive with C(token). + type: str repo: description: - Repository name + type: str required: true action: description: - Action to perform + type: str required: true choices: [ 'latest_release', 'create_release' ] tag: description: - Tag name when creating a release. Required when using action is set to C(create_release). + type: str target: description: - Target of release when creating a release + type: str name: description: - Name of release when creating a release + type: str body: description: - Description of the release when creating a release + type: str draft: description: - Sets if the release is a draft or not. (boolean) diff --git a/plugins/modules/source_control/github/github_webhook.py b/plugins/modules/source_control/github/github_webhook.py index ac15368986..2a737ef5a4 100644 --- a/plugins/modules/source_control/github/github_webhook.py +++ b/plugins/modules/source_control/github/github_webhook.py @@ -18,22 +18,26 @@ options: repository: description: - Full name of the repository to configure a hook for + type: str required: true aliases: - repo url: description: - URL to which payloads will be delivered + type: str required: true content_type: description: - The media type used to serialize the payloads + type: str required: false choices: [ form, json ] default: form secret: description: - The shared secret between GitHub and the payload URL. + type: str required: false insecure_ssl: description: @@ -61,24 +65,29 @@ options: state: description: - Whether the hook should be present or absent + type: str required: false choices: [ absent, present ] default: present user: description: - User to authenticate to GitHub as + type: str required: true password: description: - Password to authenticate to GitHub with + type: str required: false token: description: - Token to authenticate to GitHub with + type: str required: false github_url: description: - Base URL of the GitHub API + type: str required: false default: https://api.github.com diff --git a/plugins/modules/source_control/github/github_webhook_info.py b/plugins/modules/source_control/github/github_webhook_info.py index f99a0a0328..0fd0b97bc2 100644 --- a/plugins/modules/source_control/github/github_webhook_info.py +++ b/plugins/modules/source_control/github/github_webhook_info.py @@ -19,24 +19,29 @@ options: repository: description: - Full name of the repository to configure a hook for + type: str required: true aliases: - repo user: description: - User to authenticate to GitHub as + type: str required: true password: description: - Password to authenticate to GitHub with + type: str required: false token: description: - Token to authenticate to GitHub with + type: str required: false github_url: description: - Base URL of the github api + type: str required: false default: https://api.github.com diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index d77e3fa9f4..e6dc30c96c 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -231,18 +231,7 @@ plugins/modules/remote_management/stacki/stacki_host.py validate-modules:doc-def plugins/modules/remote_management/stacki/stacki_host.py validate-modules:no-default-for-required-parameter plugins/modules/remote_management/stacki/stacki_host.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/stacki/stacki_host.py validate-modules:undocumented-parameter -plugins/modules/source_control/git_config.py validate-modules:doc-missing-type -plugins/modules/source_control/git_config.py validate-modules:parameter-type-not-in-doc -plugins/modules/source_control/github/github_deploy_key.py validate-modules:doc-missing-type plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-invalid -plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-type-not-in-doc -plugins/modules/source_control/github/github_issue.py validate-modules:doc-missing-type -plugins/modules/source_control/github/github_issue.py validate-modules:parameter-type-not-in-doc -plugins/modules/source_control/github/github_key.py validate-modules:doc-missing-type -plugins/modules/source_control/github/github_release.py validate-modules:doc-missing-type -plugins/modules/source_control/github/github_release.py validate-modules:parameter-type-not-in-doc -plugins/modules/source_control/github/github_webhook.py validate-modules:parameter-type-not-in-doc -plugins/modules/source_control/github/github_webhook_info.py validate-modules:parameter-type-not-in-doc plugins/modules/storage/glusterfs/gluster_heal_info.py validate-modules:parameter-type-not-in-doc plugins/modules/storage/glusterfs/gluster_peer.py validate-modules:parameter-list-no-elements plugins/modules/storage/glusterfs/gluster_volume.py validate-modules:parameter-list-no-elements diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index abf4e7e7b3..9283c7841b 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -230,18 +230,7 @@ plugins/modules/remote_management/stacki/stacki_host.py validate-modules:doc-def plugins/modules/remote_management/stacki/stacki_host.py validate-modules:no-default-for-required-parameter plugins/modules/remote_management/stacki/stacki_host.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/stacki/stacki_host.py validate-modules:undocumented-parameter -plugins/modules/source_control/git_config.py validate-modules:doc-missing-type -plugins/modules/source_control/git_config.py validate-modules:parameter-type-not-in-doc -plugins/modules/source_control/github/github_deploy_key.py validate-modules:doc-missing-type plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-invalid -plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-type-not-in-doc -plugins/modules/source_control/github/github_issue.py validate-modules:doc-missing-type -plugins/modules/source_control/github/github_issue.py validate-modules:parameter-type-not-in-doc -plugins/modules/source_control/github/github_key.py validate-modules:doc-missing-type -plugins/modules/source_control/github/github_release.py validate-modules:doc-missing-type -plugins/modules/source_control/github/github_release.py validate-modules:parameter-type-not-in-doc -plugins/modules/source_control/github/github_webhook.py validate-modules:parameter-type-not-in-doc -plugins/modules/source_control/github/github_webhook_info.py validate-modules:parameter-type-not-in-doc plugins/modules/storage/glusterfs/gluster_heal_info.py validate-modules:parameter-type-not-in-doc plugins/modules/storage/glusterfs/gluster_peer.py validate-modules:parameter-list-no-elements plugins/modules/storage/glusterfs/gluster_volume.py validate-modules:parameter-list-no-elements diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 3823989bbd..836eec3514 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -223,18 +223,7 @@ plugins/modules/remote_management/stacki/stacki_host.py validate-modules:doc-def plugins/modules/remote_management/stacki/stacki_host.py validate-modules:no-default-for-required-parameter plugins/modules/remote_management/stacki/stacki_host.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/stacki/stacki_host.py validate-modules:undocumented-parameter -plugins/modules/source_control/git_config.py validate-modules:doc-missing-type -plugins/modules/source_control/git_config.py validate-modules:parameter-type-not-in-doc -plugins/modules/source_control/github/github_deploy_key.py validate-modules:doc-missing-type plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-invalid -plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-type-not-in-doc -plugins/modules/source_control/github/github_issue.py validate-modules:doc-missing-type -plugins/modules/source_control/github/github_issue.py validate-modules:parameter-type-not-in-doc -plugins/modules/source_control/github/github_key.py validate-modules:doc-missing-type -plugins/modules/source_control/github/github_release.py validate-modules:doc-missing-type -plugins/modules/source_control/github/github_release.py validate-modules:parameter-type-not-in-doc -plugins/modules/source_control/github/github_webhook.py validate-modules:parameter-type-not-in-doc -plugins/modules/source_control/github/github_webhook_info.py validate-modules:parameter-type-not-in-doc plugins/modules/storage/glusterfs/gluster_heal_info.py validate-modules:deprecation-mismatch plugins/modules/storage/glusterfs/gluster_heal_info.py validate-modules:invalid-documentation plugins/modules/storage/glusterfs/gluster_heal_info.py validate-modules:parameter-type-not-in-doc From bb7ce740fe97e4aae8907a94c5a5e9d25f53bc13 Mon Sep 17 00:00:00 2001 From: Tristan Le Guern Date: Thu, 25 Feb 2021 12:34:53 +0100 Subject: [PATCH 0070/3093] proxmox_kvm: trivial patch for github issue #1875 (#1895) * proxmox_kvm: trivial patch for Github issue #1875 * proxmox_kvm: add a changelog fragment * Update changelogs/fragments/1895-proxmox-kvm-fix-issue-1875.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- changelogs/fragments/1895-proxmox-kvm-fix-issue-1875.yml | 3 +++ plugins/modules/cloud/misc/proxmox_kvm.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/1895-proxmox-kvm-fix-issue-1875.yml diff --git a/changelogs/fragments/1895-proxmox-kvm-fix-issue-1875.yml b/changelogs/fragments/1895-proxmox-kvm-fix-issue-1875.yml new file mode 100644 index 0000000000..73d908cfa8 --- /dev/null +++ b/changelogs/fragments/1895-proxmox-kvm-fix-issue-1875.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - proxmox_kvm - fix parameter ``vmid`` passed twice to ``exit_json`` while creating a virtual machine without cloning (https://github.com/ansible-collections/community.general/issues/1875, https://github.com/ansible-collections/community.general/pull/1895). diff --git a/plugins/modules/cloud/misc/proxmox_kvm.py b/plugins/modules/cloud/misc/proxmox_kvm.py index 9f7bd58ff6..7fb997abc7 100644 --- a/plugins/modules/cloud/misc/proxmox_kvm.py +++ b/plugins/modules/cloud/misc/proxmox_kvm.py @@ -1287,7 +1287,7 @@ def main(): elif clone is not None: module.exit_json(changed=True, vmid=vmid, msg="VM %s with newid %s cloned from vm with vmid %s" % (name, newid, vmid)) else: - module.exit_json(changed=True, vmid=vmid, msg="VM %s with vmid %s deployed" % (name, vmid), **results) + module.exit_json(changed=True, msg="VM %s with vmid %s deployed" % (name, vmid), **results) except Exception as e: if update: module.fail_json(vmid=vmid, msg="Unable to update vm {0} with vmid {1}=".format(name, vmid) + str(e)) From 36dea9ab97e53d20946afb7f62093cd05dd97b31 Mon Sep 17 00:00:00 2001 From: Nicolas Marcq Date: Thu, 25 Feb 2021 14:55:45 +0100 Subject: [PATCH 0071/3093] cobbler_sync cobbler_system fix TLS check when validate_certs (#1880) Ref: https://www.python.org/dev/peps/pep-0476/ Issue #1878 add changelog fragment Co-authored-by: Nicolas Marcq --- .../fragments/1880-fix_cobbler_system_ssl.yml | 2 ++ .../remote_management/cobbler/cobbler_sync.py | 14 ++++++++------ .../remote_management/cobbler/cobbler_system.py | 14 ++++++++------ 3 files changed, 18 insertions(+), 12 deletions(-) create mode 100644 changelogs/fragments/1880-fix_cobbler_system_ssl.yml diff --git a/changelogs/fragments/1880-fix_cobbler_system_ssl.yml b/changelogs/fragments/1880-fix_cobbler_system_ssl.yml new file mode 100644 index 0000000000..849f703130 --- /dev/null +++ b/changelogs/fragments/1880-fix_cobbler_system_ssl.yml @@ -0,0 +1,2 @@ +bugfixes: + - cobbler_sync, cobbler_system - fix SSL/TLS certificate check when ``validate_certs`` set to ``false`` (https://github.com/ansible-collections/community.general/pull/1880). diff --git a/plugins/modules/remote_management/cobbler/cobbler_sync.py b/plugins/modules/remote_management/cobbler/cobbler_sync.py index 2e5f080d80..3ce1c25564 100644 --- a/plugins/modules/remote_management/cobbler/cobbler_sync.py +++ b/plugins/modules/remote_management/cobbler/cobbler_sync.py @@ -106,12 +106,14 @@ def main(): ssl_context = None if not validate_certs: - try: # Python 2.7.9 and newer - ssl_context = ssl.create_unverified_context() - except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default - ssl._create_default_context = ssl._create_unverified_context - else: # Python 2.7.8 and older - ssl._create_default_https_context = ssl._create_unverified_https_context + try: + ssl_context = ssl._create_unverified_context() + except AttributeError: + # Legacy Python that doesn't verify HTTPS certificates by default + pass + else: + # Handle target environment that doesn't support HTTPS verification + ssl._create_default_https_context = ssl._create_unverified_context url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params) if ssl_context: diff --git a/plugins/modules/remote_management/cobbler/cobbler_system.py b/plugins/modules/remote_management/cobbler/cobbler_system.py index ecabcc8e4d..504369e56a 100644 --- a/plugins/modules/remote_management/cobbler/cobbler_system.py +++ b/plugins/modules/remote_management/cobbler/cobbler_system.py @@ -229,12 +229,14 @@ def main(): ssl_context = None if not validate_certs: - try: # Python 2.7.9 and newer - ssl_context = ssl.create_unverified_context() - except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default - ssl._create_default_context = ssl._create_unverified_context - else: # Python 2.7.8 and older - ssl._create_default_https_context = ssl._create_unverified_https_context + try: + ssl_context = ssl._create_unverified_context() + except AttributeError: + # Legacy Python that doesn't verify HTTPS certificates by default + pass + else: + # Handle target environment that doesn't support HTTPS verification + ssl._create_default_https_context = ssl._create_unverified_context url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params) if ssl_context: From 71706031c7daf1c6bdfe70b7c3e17e86a13c5d53 Mon Sep 17 00:00:00 2001 From: Victor Fauth Date: Thu, 25 Feb 2021 15:17:48 +0100 Subject: [PATCH 0072/3093] BOTMETA.yml: Remove vfauth as etcd3 maintainer (#1900) --- .github/BOTMETA.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index bbd52b544e..c9a62d5d29 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -294,7 +294,8 @@ files: $modules/clustering/consul/: maintainers: $team_consul $modules/clustering/etcd3.py: - maintainers: evrardjp vfauth + maintainers: evrardjp + ignore: vfauth $modules/clustering/nomad/: maintainers: chris93111 $modules/clustering/pacemaker_cluster.py: From afe9d0fdb3d91c607ffe42d42f25fcae569e6b00 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Fri, 26 Feb 2021 11:17:39 +0100 Subject: [PATCH 0073/3093] Disable way too slow yum_versionlock tests. (#1907) --- tests/integration/targets/yum_versionlock/aliases | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/targets/yum_versionlock/aliases b/tests/integration/targets/yum_versionlock/aliases index abe0a21e22..895088b2c6 100644 --- a/tests/integration/targets/yum_versionlock/aliases +++ b/tests/integration/targets/yum_versionlock/aliases @@ -1,3 +1,4 @@ +disabled # The tests are way too slow - the locking/unlocking steps need 10 minutes each! shippable/posix/group1 skip/aix skip/freebsd From 76d9fe4ec660210b808432738d7d9e14880b7d6f Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Fri, 26 Feb 2021 23:37:23 +1300 Subject: [PATCH 0074/3093] Tidy up sanity checks (2021/Batch 3 - who's counting) (#1885) * fixed validation-modules for plugins/modules/packaging/language/pip_package_info.py * fixed validation-modules for plugins/modules/packaging/language/maven_artifact.py * fixed validation-modules for plugins/modules/packaging/language/bundler.py * fixed validation-modules for plugins/modules/notification/pushbullet.py * fixed validation-modules for plugins/modules/monitoring/sensu/sensu_handler.py * fixed validation-modules for plugins/modules/monitoring/sensu/sensu_check.py * fixed validation-modules for plugins/modules/monitoring/sensu/sensu_client.py * fixed validation-modules for plugins/modules/monitoring/icinga2_host.py * fixed validation-modules for plugins/modules/monitoring/datadog/datadog_monitor.py * fixed validation-modules for plugins/modules/monitoring/datadog/datadog_event.py * fixed validation-modules for plugins/modules/clustering/znode.py * fixed validation-modules for plugins/modules/clustering/etcd3.py * fixed validation-modules for plugins/modules/clustering/consul/consul_session.py * fixed validation-modules for plugins/modules/clustering/consul/consul_kv.py * fixed validation-modules for plugins/modules/clustering/consul/consul.py * fixed validation-modules for plugins/modules/cloud/profitbricks/profitbricks.py * fixed validation-modules for plugins/modules/cloud/profitbricks/profitbricks_volume.py * fixed validation-modules for plugins/modules/cloud/packet/packet_sshkey.py * fixed validation-modules for plugins/modules/cloud/oneandone/oneandone_server.py * fixed validation-modules for plugins/modules/cloud/oneandone/oneandone_private_network.py * fixed validation-modules for plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py * fixed validation-modules for plugins/modules/cloud/oneandone/oneandone_load_balancer.py * fixed validation-modules for plugins/modules/cloud/oneandone/oneandone_firewall_policy.py * fixed validation-modules for plugins/modules/cloud/webfaction/webfaction_app.py * fixed validation-modules for plugins/modules/cloud/webfaction/webfaction_db.py * fixed validation-modules for plugins/modules/cloud/webfaction/webfaction_domain.py * fixed validation-modules for plugins/modules/cloud/webfaction/webfaction_mailbox.py * fixed validation-modules for plugins/modules/cloud/webfaction/webfaction_site.py * fixed validation-modules for plugins/modules/remote_management/lxca/lxca_cmms.py * fixed validation-modules for plugins/modules/remote_management/lxca/lxca_nodes.py * missed one "elements" in sensu_handler * Tidy up batch of sanity checks ignore lines * missed lines in ignore-2.9.txt * fixed validation-modules for plugins/modules/clustering/consul/consul_acl.py * Update ignore-2.9.txt Removed consul_acl.py from ignore-2.9.txt * Apply suggestions from code review Co-authored-by: Andrew Klychkov * Update plugins/modules/notification/pushbullet.py Co-authored-by: Felix Fontein * Update plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py Co-authored-by: Felix Fontein * added changelog fragment * Update plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py * Update changelogs/fragments/1885-sanity-check-fixes-batch3.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein Co-authored-by: Andrew Klychkov --- .../1885-sanity-check-fixes-batch3.yml | 18 ++++++ .../oneandone/oneandone_firewall_policy.py | 15 +++-- .../oneandone/oneandone_load_balancer.py | 15 +++-- .../oneandone/oneandone_monitoring_policy.py | 55 +++++++++++-------- .../oneandone/oneandone_private_network.py | 6 +- .../cloud/oneandone/oneandone_server.py | 3 +- plugins/modules/cloud/packet/packet_sshkey.py | 24 +++++--- .../cloud/profitbricks/profitbricks.py | 6 +- .../cloud/profitbricks/profitbricks_volume.py | 12 +++- .../cloud/webfaction/webfaction_app.py | 7 +++ .../modules/cloud/webfaction/webfaction_db.py | 7 +++ .../cloud/webfaction/webfaction_domain.py | 10 +++- .../cloud/webfaction/webfaction_mailbox.py | 5 ++ .../cloud/webfaction/webfaction_site.py | 15 ++++- plugins/modules/clustering/consul/consul.py | 44 ++++++++------- .../modules/clustering/consul/consul_acl.py | 20 +++++-- .../modules/clustering/consul/consul_kv.py | 1 + .../clustering/consul/consul_session.py | 3 +- plugins/modules/clustering/etcd3.py | 11 ++++ plugins/modules/clustering/znode.py | 14 +++-- .../monitoring/datadog/datadog_event.py | 18 +++--- .../monitoring/datadog/datadog_monitor.py | 40 +++++++------- plugins/modules/monitoring/icinga2_host.py | 1 + .../modules/monitoring/sensu/sensu_check.py | 12 ++-- .../modules/monitoring/sensu/sensu_client.py | 34 ++++++------ .../modules/monitoring/sensu/sensu_handler.py | 30 +++++----- plugins/modules/notification/pushbullet.py | 4 ++ plugins/modules/packaging/language/bundler.py | 3 +- .../packaging/language/maven_artifact.py | 9 ++- .../packaging/language/pip_package_info.py | 7 ++- .../remote_management/lxca/lxca_cmms.py | 3 + .../remote_management/lxca/lxca_nodes.py | 3 + tests/sanity/ignore-2.10.txt | 43 --------------- tests/sanity/ignore-2.11.txt | 43 --------------- tests/sanity/ignore-2.9.txt | 23 -------- 35 files changed, 299 insertions(+), 265 deletions(-) create mode 100644 changelogs/fragments/1885-sanity-check-fixes-batch3.yml diff --git a/changelogs/fragments/1885-sanity-check-fixes-batch3.yml b/changelogs/fragments/1885-sanity-check-fixes-batch3.yml new file mode 100644 index 0000000000..bf819a6e21 --- /dev/null +++ b/changelogs/fragments/1885-sanity-check-fixes-batch3.yml @@ -0,0 +1,18 @@ +minor_changes: + - oneandone_firewall_policy - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). + - oneandone_load_balancer - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). + - oneandone_monitoring_policy - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). + - oneandone_private_network - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). + - oneandone_server - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). + - profitbricks - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). + - profitbricks_volume - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). + - webfaction_domain - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). + - webfaction_site - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). + - consul - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). + - consul_acl - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). + - consul_session - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). + - datadog_monitor - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). + - sensu_check - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). + - sensu_client - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). + - sensu_handler - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). + - bundler - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). diff --git a/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py b/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py index 90694861a7..32e42ea865 100644 --- a/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py +++ b/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py @@ -57,27 +57,32 @@ options: Each rule must contain protocol parameter, in addition to three optional parameters (port_from, port_to, and source) type: list + elements: dict add_server_ips: description: - A list of server identifiers (id or name) to be assigned to a firewall policy. Used in combination with update state. type: list + elements: str required: false remove_server_ips: description: - A list of server IP ids to be unassigned from a firewall policy. Used in combination with update state. type: list + elements: str required: false add_rules: description: - A list of rules that will be added to an existing firewall policy. It is syntax is the same as the one used for rules parameter. Used in combination with update state. type: list + elements: dict required: false remove_rules: description: - A list of rule ids that will be removed from an existing firewall policy. Used in combination with update state. type: list + elements: str required: false description: description: @@ -508,11 +513,11 @@ def main(): name=dict(type='str'), firewall_policy=dict(type='str'), description=dict(type='str'), - rules=dict(type='list', default=[]), - add_server_ips=dict(type='list', default=[]), - remove_server_ips=dict(type='list', default=[]), - add_rules=dict(type='list', default=[]), - remove_rules=dict(type='list', default=[]), + rules=dict(type='list', elements="dict", default=[]), + add_server_ips=dict(type='list', elements="str", default=[]), + remove_server_ips=dict(type='list', elements="str", default=[]), + add_rules=dict(type='list', elements="dict", default=[]), + remove_rules=dict(type='list', elements="str", default=[]), wait=dict(type='bool', default=True), wait_timeout=dict(type='int', default=600), wait_interval=dict(type='int', default=5), diff --git a/plugins/modules/cloud/oneandone/oneandone_load_balancer.py b/plugins/modules/cloud/oneandone/oneandone_load_balancer.py index 62551560c2..71f1d96b9c 100644 --- a/plugins/modules/cloud/oneandone/oneandone_load_balancer.py +++ b/plugins/modules/cloud/oneandone/oneandone_load_balancer.py @@ -95,6 +95,7 @@ options: - A list of rule objects that will be set for the load balancer. Each rule must contain protocol, port_balancer, and port_server parameters, in addition to source parameter, which is optional. type: list + elements: dict description: description: - Description of the load balancer. maxLength=256 @@ -105,22 +106,26 @@ options: - A list of server identifiers (id or name) to be assigned to a load balancer. Used in combination with update state. type: list + elements: str required: false remove_server_ips: description: - A list of server IP ids to be unassigned from a load balancer. Used in combination with update state. type: list + elements: str required: false add_rules: description: - A list of rules that will be added to an existing load balancer. It is syntax is the same as the one used for rules parameter. Used in combination with update state. type: list + elements: dict required: false remove_rules: description: - A list of rule ids that will be removed from an existing load balancer. Used in combination with update state. type: list + elements: str required: false wait: description: @@ -613,11 +618,11 @@ def main(): choices=METHODS), datacenter=dict( choices=DATACENTERS), - rules=dict(type='list', default=[]), - add_server_ips=dict(type='list', default=[]), - remove_server_ips=dict(type='list', default=[]), - add_rules=dict(type='list', default=[]), - remove_rules=dict(type='list', default=[]), + rules=dict(type='list', elements="dict", default=[]), + add_server_ips=dict(type='list', elements="str", default=[]), + remove_server_ips=dict(type='list', elements="str", default=[]), + add_rules=dict(type='list', elements="dict", default=[]), + remove_rules=dict(type='list', elements="str", default=[]), wait=dict(type='bool', default=True), wait_timeout=dict(type='int', default=600), wait_interval=dict(type='int', default=5), diff --git a/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py b/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py index 27ebebd6c7..67f2ce9cc0 100644 --- a/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py +++ b/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py @@ -71,6 +71,7 @@ options: warning alerts, critical is used to set critical alerts. alert enables alert, and value is used to advise when the value is exceeded. type: list + elements: dict suboptions: cpu: description: @@ -96,6 +97,7 @@ options: description: - Array of ports that will be monitoring. type: list + elements: dict suboptions: protocol: description: @@ -119,6 +121,7 @@ options: description: - Array of processes that will be monitoring. type: list + elements: dict suboptions: process: description: @@ -133,41 +136,49 @@ options: description: - Ports to add to the monitoring policy. type: list + elements: dict required: false add_processes: description: - Processes to add to the monitoring policy. type: list + elements: dict required: false add_servers: description: - Servers to add to the monitoring policy. type: list + elements: str required: false remove_ports: description: - Ports to remove from the monitoring policy. type: list + elements: str required: false remove_processes: description: - Processes to remove from the monitoring policy. type: list + elements: str required: false remove_servers: description: - Servers to remove from the monitoring policy. type: list + elements: str required: false update_ports: description: - Ports to be updated on the monitoring policy. type: list + elements: dict required: false update_processes: description: - Processes to be updated on the monitoring policy. type: list + elements: dict required: false wait: description: @@ -197,7 +208,7 @@ author: EXAMPLES = ''' - name: Create a monitoring policy - oneandone_moitoring_policy: + community.general.oneandone_monitoring_policy: auth_token: oneandone_private_api_key name: ansible monitoring policy description: Testing creation of a monitoring policy with ansible @@ -258,13 +269,13 @@ EXAMPLES = ''' wait: true - name: Destroy a monitoring policy - oneandone_moitoring_policy: + community.general.oneandone_monitoring_policy: auth_token: oneandone_private_api_key state: absent name: ansible monitoring policy - name: Update a monitoring policy - oneandone_moitoring_policy: + community.general.oneandone_monitoring_policy: auth_token: oneandone_private_api_key monitoring_policy: ansible monitoring policy name: ansible monitoring policy updated @@ -315,7 +326,7 @@ EXAMPLES = ''' state: update - name: Add a port to a monitoring policy - oneandone_moitoring_policy: + community.general.oneandone_monitoring_policy: auth_token: oneandone_private_api_key monitoring_policy: ansible monitoring policy updated add_ports: @@ -328,7 +339,7 @@ EXAMPLES = ''' state: update - name: Update existing ports of a monitoring policy - oneandone_moitoring_policy: + community.general.oneandone_monitoring_policy: auth_token: oneandone_private_api_key monitoring_policy: ansible monitoring policy updated update_ports: @@ -348,7 +359,7 @@ EXAMPLES = ''' state: update - name: Remove a port from a monitoring policy - oneandone_moitoring_policy: + community.general.oneandone_monitoring_policy: auth_token: oneandone_private_api_key monitoring_policy: ansible monitoring policy updated remove_ports: @@ -356,7 +367,7 @@ EXAMPLES = ''' state: update - name: Add a process to a monitoring policy - oneandone_moitoring_policy: + community.general.oneandone_monitoring_policy: auth_token: oneandone_private_api_key monitoring_policy: ansible monitoring policy updated add_processes: @@ -368,7 +379,7 @@ EXAMPLES = ''' state: update - name: Update existing processes of a monitoring policy - oneandone_moitoring_policy: + community.general.oneandone_monitoring_policy: auth_token: oneandone_private_api_key monitoring_policy: ansible monitoring policy updated update_processes: @@ -386,7 +397,7 @@ EXAMPLES = ''' state: update - name: Remove a process from a monitoring policy - oneandone_moitoring_policy: + community.general.oneandone_monitoring_policy: auth_token: oneandone_private_api_key monitoring_policy: ansible monitoring policy updated remove_processes: @@ -395,7 +406,7 @@ EXAMPLES = ''' state: update - name: Add server to a monitoring policy - oneandone_moitoring_policy: + community.general.oneandone_monitoring_policy: auth_token: oneandone_private_api_key monitoring_policy: ansible monitoring policy updated add_servers: @@ -404,7 +415,7 @@ EXAMPLES = ''' state: update - name: Remove server from a monitoring policy - oneandone_moitoring_policy: + community.general.oneandone_monitoring_policy: auth_token: oneandone_private_api_key monitoring_policy: ansible monitoring policy updated remove_servers: @@ -957,17 +968,17 @@ def main(): agent=dict(type='str'), email=dict(type='str'), description=dict(type='str'), - thresholds=dict(type='list', default=[]), - ports=dict(type='list', default=[]), - processes=dict(type='list', default=[]), - add_ports=dict(type='list', default=[]), - update_ports=dict(type='list', default=[]), - remove_ports=dict(type='list', default=[]), - add_processes=dict(type='list', default=[]), - update_processes=dict(type='list', default=[]), - remove_processes=dict(type='list', default=[]), - add_servers=dict(type='list', default=[]), - remove_servers=dict(type='list', default=[]), + thresholds=dict(type='list', elements="dict", default=[]), + ports=dict(type='list', elements="dict", default=[]), + processes=dict(type='list', elements="dict", default=[]), + add_ports=dict(type='list', elements="dict", default=[]), + update_ports=dict(type='list', elements="dict", default=[]), + remove_ports=dict(type='list', elements="str", default=[]), + add_processes=dict(type='list', elements="dict", default=[]), + update_processes=dict(type='list', elements="dict", default=[]), + remove_processes=dict(type='list', elements="str", default=[]), + add_servers=dict(type='list', elements="str", default=[]), + remove_servers=dict(type='list', elements="str", default=[]), wait=dict(type='bool', default=True), wait_timeout=dict(type='int', default=600), wait_interval=dict(type='int', default=5), diff --git a/plugins/modules/cloud/oneandone/oneandone_private_network.py b/plugins/modules/cloud/oneandone/oneandone_private_network.py index 7eae6ea3dc..edbdc9f8ce 100644 --- a/plugins/modules/cloud/oneandone/oneandone_private_network.py +++ b/plugins/modules/cloud/oneandone/oneandone_private_network.py @@ -71,10 +71,12 @@ options: description: - List of server identifiers (name or id) to be added to the private network. type: list + elements: str remove_members: description: - List of server identifiers (name or id) to be removed from the private network. type: list + elements: str wait: description: - wait for the instance to be in state 'running' before returning @@ -394,8 +396,8 @@ def main(): description=dict(type='str'), network_address=dict(type='str'), subnet_mask=dict(type='str'), - add_members=dict(type='list', default=[]), - remove_members=dict(type='list', default=[]), + add_members=dict(type='list', elements="str", default=[]), + remove_members=dict(type='list', elements="str", default=[]), datacenter=dict( choices=DATACENTERS), wait=dict(type='bool', default=True), diff --git a/plugins/modules/cloud/oneandone/oneandone_server.py b/plugins/modules/cloud/oneandone/oneandone_server.py index 1e6caab5eb..a91fc55ac9 100644 --- a/plugins/modules/cloud/oneandone/oneandone_server.py +++ b/plugins/modules/cloud/oneandone/oneandone_server.py @@ -87,6 +87,7 @@ options: - A list of hard disks with nested "size" and "is_main" properties. It must be provided with vcore, cores_per_processor, and ram parameters. type: list + elements: dict private_network: description: - The private network name or ID. @@ -627,7 +628,7 @@ def main(): vcore=dict(type='int'), cores_per_processor=dict(type='int'), ram=dict(type='float'), - hdds=dict(type='list'), + hdds=dict(type='list', elements='dict'), count=dict(type='int', default=1), ssh_key=dict(type='raw'), auto_increment=dict(type='bool', default=True), diff --git a/plugins/modules/cloud/packet/packet_sshkey.py b/plugins/modules/cloud/packet/packet_sshkey.py index 73233d8933..97589cddb9 100644 --- a/plugins/modules/cloud/packet/packet_sshkey.py +++ b/plugins/modules/cloud/packet/packet_sshkey.py @@ -17,27 +17,35 @@ author: "Tomas Karasek (@t0mk) " options: state: description: - - Indicate desired state of the target. + - Indicate desired state of the target. default: present choices: ['present', 'absent'] + type: str auth_token: description: - - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN). + - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN). + type: str label: - description: - - Label for the key. If you keep it empty, it will be read from key string. + description: + - Label for the key. If you keep it empty, it will be read from key string. + type: str + aliases: [name] id: description: - - UUID of the key which you want to remove. + - UUID of the key which you want to remove. + type: str fingerprint: description: - - Fingerprint of the key which you want to remove. + - Fingerprint of the key which you want to remove. + type: str key: description: - - Public Key string ({type} {base64 encoded key} {description}). + - Public Key string ({type} {base64 encoded key} {description}). + type: str key_file: description: - - File with the public key. + - File with the public key. + type: path requirements: - "python >= 2.6" diff --git a/plugins/modules/cloud/profitbricks/profitbricks.py b/plugins/modules/cloud/profitbricks/profitbricks.py index 90798672c0..507bea5339 100644 --- a/plugins/modules/cloud/profitbricks/profitbricks.py +++ b/plugins/modules/cloud/profitbricks/profitbricks.py @@ -35,6 +35,7 @@ options: description: - Public SSH keys allowing access to the virtual machine. type: list + elements: str datacenter: description: - The datacenter to provision this virtual machine. @@ -70,6 +71,7 @@ options: description: - list of instance ids, currently only used when state='absent' to remove instances. type: list + elements: str count: description: - The number of virtual machines to create. @@ -581,12 +583,12 @@ def main(): volume_size=dict(type='int', default=10), disk_type=dict(choices=['HDD', 'SSD'], default='HDD'), image_password=dict(default=None, no_log=True), - ssh_keys=dict(type='list', default=[]), + ssh_keys=dict(type='list', elements='str', default=[]), bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'), lan=dict(type='int', default=1), count=dict(type='int', default=1), auto_increment=dict(type='bool', default=True), - instance_ids=dict(type='list', default=[]), + instance_ids=dict(type='list', elements='str', default=[]), subscription_user=dict(), subscription_password=dict(no_log=True), location=dict(choices=LOCATIONS, default='us/las'), diff --git a/plugins/modules/cloud/profitbricks/profitbricks_volume.py b/plugins/modules/cloud/profitbricks/profitbricks_volume.py index a63cbcdd3b..0719c025fa 100644 --- a/plugins/modules/cloud/profitbricks/profitbricks_volume.py +++ b/plugins/modules/cloud/profitbricks/profitbricks_volume.py @@ -47,6 +47,7 @@ options: description: - Public SSH keys allowing access to the virtual machine. type: list + elements: str required: false disk_type: description: @@ -77,6 +78,7 @@ options: description: - list of instance ids, currently only used when state='absent' to remove instances. type: list + elements: str required: false subscription_user: description: @@ -106,6 +108,10 @@ options: type: str required: false default: 'present' + server: + description: + - Server name to attach the volume to. + type: str requirements: [ "profitbricks" ] author: Matt Baldwin (@baldwinSPC) @@ -369,13 +375,13 @@ def main(): size=dict(type='int', default=10), bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'), image=dict(), - image_password=dict(default=None, no_log=True), - ssh_keys=dict(type='list', default=[]), + image_password=dict(no_log=True), + ssh_keys=dict(type='list', elements='str', default=[]), disk_type=dict(choices=['HDD', 'SSD'], default='HDD'), licence_type=dict(default='UNKNOWN'), count=dict(type='int', default=1), auto_increment=dict(type='bool', default=True), - instance_ids=dict(type='list', default=[]), + instance_ids=dict(type='list', elements='str', default=[]), subscription_user=dict(), subscription_password=dict(no_log=True), wait=dict(type='bool', default=True), diff --git a/plugins/modules/cloud/webfaction/webfaction_app.py b/plugins/modules/cloud/webfaction/webfaction_app.py index 9a69ce54fc..1839db3810 100644 --- a/plugins/modules/cloud/webfaction/webfaction_app.py +++ b/plugins/modules/cloud/webfaction/webfaction_app.py @@ -35,17 +35,20 @@ options: description: - The name of the application required: true + type: str state: description: - Whether the application should exist choices: ['present', 'absent'] default: "present" + type: str type: description: - The type of application to create. See the Webfaction docs at U(https://docs.webfaction.com/xmlrpc-api/apps.html) for a list. required: true + type: str autostart: description: @@ -57,6 +60,7 @@ options: description: - Any extra parameters required by the app default: '' + type: str port_open: description: @@ -68,15 +72,18 @@ options: description: - The webfaction account to use required: true + type: str login_password: description: - The webfaction password to use required: true + type: str machine: description: - The machine name to use (optional for accounts with only one machine) + type: str ''' diff --git a/plugins/modules/cloud/webfaction/webfaction_db.py b/plugins/modules/cloud/webfaction/webfaction_db.py index 19bc6ea212..11563426d7 100644 --- a/plugins/modules/cloud/webfaction/webfaction_db.py +++ b/plugins/modules/cloud/webfaction/webfaction_db.py @@ -32,36 +32,43 @@ options: description: - The name of the database required: true + type: str state: description: - Whether the database should exist choices: ['present', 'absent'] default: "present" + type: str type: description: - The type of database to create. required: true choices: ['mysql', 'postgresql'] + type: str password: description: - The password for the new database user. + type: str login_name: description: - The webfaction account to use required: true + type: str login_password: description: - The webfaction password to use required: true + type: str machine: description: - The machine name to use (optional for accounts with only one machine) + type: str ''' EXAMPLES = ''' diff --git a/plugins/modules/cloud/webfaction/webfaction_domain.py b/plugins/modules/cloud/webfaction/webfaction_domain.py index a348ef5134..f9c3b7db7a 100644 --- a/plugins/modules/cloud/webfaction/webfaction_domain.py +++ b/plugins/modules/cloud/webfaction/webfaction_domain.py @@ -32,27 +32,33 @@ options: description: - The name of the domain required: true + type: str state: description: - Whether the domain should exist choices: ['present', 'absent'] default: "present" + type: str subdomains: description: - Any subdomains to create. default: [] + type: list + elements: str login_name: description: - The webfaction account to use required: true + type: str login_password: description: - The webfaction password to use required: true + type: str ''' EXAMPLES = ''' @@ -87,8 +93,8 @@ def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True), - state=dict(required=False, choices=['present', 'absent'], default='present'), - subdomains=dict(required=False, default=[], type='list'), + state=dict(choices=['present', 'absent'], default='present'), + subdomains=dict(default=[], type='list', elements='str'), login_name=dict(required=True), login_password=dict(required=True, no_log=True), ), diff --git a/plugins/modules/cloud/webfaction/webfaction_mailbox.py b/plugins/modules/cloud/webfaction/webfaction_mailbox.py index 144fad2983..37755763a2 100644 --- a/plugins/modules/cloud/webfaction/webfaction_mailbox.py +++ b/plugins/modules/cloud/webfaction/webfaction_mailbox.py @@ -29,27 +29,32 @@ options: description: - The name of the mailbox required: true + type: str mailbox_password: description: - The password for the mailbox required: true + type: str state: description: - Whether the mailbox should exist choices: ['present', 'absent'] default: "present" + type: str login_name: description: - The webfaction account to use required: true + type: str login_password: description: - The webfaction password to use required: true + type: str ''' EXAMPLES = ''' diff --git a/plugins/modules/cloud/webfaction/webfaction_site.py b/plugins/modules/cloud/webfaction/webfaction_site.py index 8ae982804d..87faade3e2 100644 --- a/plugins/modules/cloud/webfaction/webfaction_site.py +++ b/plugins/modules/cloud/webfaction/webfaction_site.py @@ -33,17 +33,20 @@ options: description: - The name of the website required: true + type: str state: description: - Whether the website should exist choices: ['present', 'absent'] default: "present" + type: str host: description: - The webfaction host on which the site should be created. required: true + type: str https: description: @@ -55,21 +58,27 @@ options: description: - A mapping of URLs to apps default: [] + type: list + elements: list subdomains: description: - A list of subdomains associated with this site. default: [] + type: list + elements: str login_name: description: - The webfaction account to use required: true + type: str login_password: description: - The webfaction password to use required: true + type: str ''' EXAMPLES = ''' @@ -101,12 +110,12 @@ def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True), - state=dict(required=False, choices=['present', 'absent'], default='present'), + state=dict(choices=['present', 'absent'], default='present'), # You can specify an IP address or hostname. host=dict(required=True), https=dict(required=False, type='bool', default=False), - subdomains=dict(required=False, type='list', default=[]), - site_apps=dict(required=False, type='list', default=[]), + subdomains=dict(type='list', elements='str', default=[]), + site_apps=dict(type='list', elements='list', default=[]), login_name=dict(required=True), login_password=dict(required=True, no_log=True), ), diff --git a/plugins/modules/clustering/consul/consul.py b/plugins/modules/clustering/consul/consul.py index dd8a5f50c3..cd695c4754 100644 --- a/plugins/modules/clustering/consul/consul.py +++ b/plugins/modules/clustering/consul/consul.py @@ -33,6 +33,7 @@ requirements: author: "Steve Gargan (@sgargan)" options: state: + type: str description: - register or deregister the consul service, defaults to present default: present @@ -86,6 +87,7 @@ options: documentation for further details. tags: type: list + elements: str description: - tags that will be attached to the service registration. script: @@ -345,7 +347,7 @@ def remove_service(module, service_id): module.exit_json(changed=False, id=service_id) -def get_consul_api(module, token=None): +def get_consul_api(module): consulClient = consul.Consul(host=module.params.get('host'), port=module.params.get('port'), scheme=module.params.get('scheme'), @@ -398,7 +400,7 @@ def parse_service(module): module.fail_json(msg="service_name is required to configure a service.") -class ConsulService(): +class ConsulService(object): def __init__(self, service_id=None, name=None, address=None, port=-1, tags=None, loaded=None): @@ -564,26 +566,26 @@ def main(): argument_spec=dict( host=dict(default='localhost'), port=dict(default=8500, type='int'), - scheme=dict(required=False, default='http'), - validate_certs=dict(required=False, default=True, type='bool'), - check_id=dict(required=False), - check_name=dict(required=False), - check_node=dict(required=False), - check_host=dict(required=False), - notes=dict(required=False), - script=dict(required=False), - service_id=dict(required=False), - service_name=dict(required=False), - service_address=dict(required=False, type='str', default=None), - service_port=dict(required=False, type='int', default=None), + scheme=dict(default='http'), + validate_certs=dict(default=True, type='bool'), + check_id=dict(), + check_name=dict(), + check_node=dict(), + check_host=dict(), + notes=dict(), + script=dict(), + service_id=dict(), + service_name=dict(), + service_address=dict(type='str'), + service_port=dict(type='int'), state=dict(default='present', choices=['present', 'absent']), - interval=dict(required=False, type='str'), - ttl=dict(required=False, type='str'), - tcp=dict(required=False, type='str'), - http=dict(required=False, type='str'), - timeout=dict(required=False, type='str'), - tags=dict(required=False, type='list'), - token=dict(required=False, no_log=True) + interval=dict(type='str'), + ttl=dict(type='str'), + tcp=dict(type='str'), + http=dict(type='str'), + timeout=dict(type='str'), + tags=dict(type='list', elements='str'), + token=dict(no_log=True) ), supports_check_mode=False, ) diff --git a/plugins/modules/clustering/consul/consul_acl.py b/plugins/modules/clustering/consul/consul_acl.py index 06feeea16d..c8d08f8e26 100644 --- a/plugins/modules/clustering/consul/consul_acl.py +++ b/plugins/modules/clustering/consul/consul_acl.py @@ -22,29 +22,35 @@ options: description: - a management token is required to manipulate the acl lists required: true + type: str state: description: - whether the ACL pair should be present or absent required: false choices: ['present', 'absent'] default: present + type: str token_type: description: - the type of token that should be created choices: ['client', 'management'] default: client + type: str name: description: - the name that should be associated with the acl key, this is opaque to Consul required: false + type: str token: description: - the token key identifying an ACL rule set. If generated by consul this will be a UUID required: false + type: str rules: type: list + elements: dict description: - rules that should be associated with a given token required: false @@ -53,6 +59,7 @@ options: - host of the consul agent defaults to localhost required: false default: localhost + type: str port: type: int description: @@ -64,6 +71,7 @@ options: - the protocol scheme on which the consul agent is running required: false default: http + type: str validate_certs: type: bool description: @@ -215,14 +223,14 @@ _POLICY_HCL_PROPERTY = "policy" _ARGUMENT_SPEC = { MANAGEMENT_PARAMETER_NAME: dict(required=True, no_log=True), HOST_PARAMETER_NAME: dict(default='localhost'), - SCHEME_PARAMETER_NAME: dict(required=False, default='http'), - VALIDATE_CERTS_PARAMETER_NAME: dict(required=False, type='bool', default=True), - NAME_PARAMETER_NAME: dict(required=False), + SCHEME_PARAMETER_NAME: dict(default='http'), + VALIDATE_CERTS_PARAMETER_NAME: dict(type='bool', default=True), + NAME_PARAMETER_NAME: dict(), PORT_PARAMETER_NAME: dict(default=8500, type='int'), - RULES_PARAMETER_NAME: dict(default=None, required=False, type='list'), + RULES_PARAMETER_NAME: dict(type='list', elements='dict'), STATE_PARAMETER_NAME: dict(default=PRESENT_STATE_VALUE, choices=[PRESENT_STATE_VALUE, ABSENT_STATE_VALUE]), - TOKEN_PARAMETER_NAME: dict(required=False), - TOKEN_TYPE_PARAMETER_NAME: dict(required=False, choices=[CLIENT_TOKEN_TYPE_VALUE, MANAGEMENT_TOKEN_TYPE_VALUE], + TOKEN_PARAMETER_NAME: dict(), + TOKEN_TYPE_PARAMETER_NAME: dict(choices=[CLIENT_TOKEN_TYPE_VALUE, MANAGEMENT_TOKEN_TYPE_VALUE], default=CLIENT_TOKEN_TYPE_VALUE) } diff --git a/plugins/modules/clustering/consul/consul_kv.py b/plugins/modules/clustering/consul/consul_kv.py index ee5c397054..bafa7fd6d9 100644 --- a/plugins/modules/clustering/consul/consul_kv.py +++ b/plugins/modules/clustering/consul/consul_kv.py @@ -37,6 +37,7 @@ options: 'release' respectively. a valid session must be supplied to make the attempt changed will be true if the attempt is successful, false otherwise. + type: str choices: [ absent, acquire, present, release ] default: present key: diff --git a/plugins/modules/clustering/consul/consul_session.py b/plugins/modules/clustering/consul/consul_session.py index f28d3a5eca..7ace1f89a8 100644 --- a/plugins/modules/clustering/consul/consul_session.py +++ b/plugins/modules/clustering/consul/consul_session.py @@ -67,6 +67,7 @@ options: associated with the session will be release and can be acquired once the associated lock delay has expired. type: list + elements: str host: description: - The host of the consul agent defaults to localhost. @@ -237,7 +238,7 @@ def test_dependencies(module): def main(): argument_spec = dict( - checks=dict(type='list'), + checks=dict(type='list', elements='str'), delay=dict(type='int', default='15'), behavior=dict(type='str', default='release', choices=['release', 'delete']), host=dict(type='str', default='localhost'), diff --git a/plugins/modules/clustering/etcd3.py b/plugins/modules/clustering/etcd3.py index 7883842998..df7319ecfe 100644 --- a/plugins/modules/clustering/etcd3.py +++ b/plugins/modules/clustering/etcd3.py @@ -18,47 +18,58 @@ description: - Needs python etcd3 lib to work options: key: + type: str description: - the key where the information is stored in the cluster required: true value: + type: str description: - the information stored required: true host: + type: str description: - the IP address of the cluster default: 'localhost' port: + type: int description: - the port number used to connect to the cluster default: 2379 state: + type: str description: - the state of the value for the key. - can be present or absent required: true choices: [ present, absent ] user: + type: str description: - The etcd user to authenticate with. password: + type: str description: - The password to use for authentication. - Required if I(user) is defined. ca_cert: + type: path description: - The Certificate Authority to use to verify the etcd host. - Required if I(client_cert) and I(client_key) are defined. client_cert: + type: path description: - PEM formatted certificate chain file to be used for SSL client authentication. - Required if I(client_key) is defined. client_key: + type: path description: - PEM formatted file that contains your private key to be used for SSL client authentication. - Required if I(client_cert) is defined. timeout: + type: int description: - The socket level timeout in seconds. author: diff --git a/plugins/modules/clustering/znode.py b/plugins/modules/clustering/znode.py index 156a63767b..e85537e6e8 100644 --- a/plugins/modules/clustering/znode.py +++ b/plugins/modules/clustering/znode.py @@ -17,25 +17,31 @@ options: description: - A list of ZooKeeper servers (format '[server]:[port]'). required: true + type: str name: description: - The path of the znode. required: true + type: str value: description: - The value assigned to the znode. + type: str op: description: - An operation to perform. Mutually exclusive with state. choices: [ get, wait, list ] + type: str state: description: - The state to enforce. Mutually exclusive with op. choices: [ present, absent ] + type: str timeout: description: - The amount of time to wait for a node to appear. default: 300 + type: int recursive: description: - Recursively delete node and all its children. @@ -110,11 +116,11 @@ def main(): argument_spec=dict( hosts=dict(required=True, type='str'), name=dict(required=True, type='str'), - value=dict(required=False, default=None, type='str'), - op=dict(required=False, default=None, choices=['get', 'wait', 'list']), + value=dict(type='str'), + op=dict(choices=['get', 'wait', 'list']), state=dict(choices=['present', 'absent']), - timeout=dict(required=False, default=300, type='int'), - recursive=dict(required=False, default=False, type='bool') + timeout=dict(default=300, type='int'), + recursive=dict(default=False, type='bool') ), supports_check_mode=False ) diff --git a/plugins/modules/monitoring/datadog/datadog_event.py b/plugins/modules/monitoring/datadog/datadog_event.py index a6327dde4b..fd15eaf46c 100644 --- a/plugins/modules/monitoring/datadog/datadog_event.py +++ b/plugins/modules/monitoring/datadog/datadog_event.py @@ -56,6 +56,7 @@ options: - If not specified, it defaults to the remote system's hostname. tags: type: list + elements: str description: ["Comma separated list of tags to apply to the event."] alert_type: type: str @@ -114,17 +115,12 @@ def main(): app_key=dict(required=True, no_log=True), title=dict(required=True), text=dict(required=True), - date_happened=dict(required=False, default=None, type='int'), - priority=dict( - required=False, default='normal', choices=['normal', 'low'] - ), - host=dict(required=False, default=None), - tags=dict(required=False, default=None, type='list'), - alert_type=dict( - required=False, default='info', - choices=['error', 'warning', 'info', 'success'] - ), - aggregation_key=dict(required=False, default=None), + date_happened=dict(type='int'), + priority=dict(default='normal', choices=['normal', 'low']), + host=dict(), + tags=dict(type='list', elements='str'), + alert_type=dict(default='info', choices=['error', 'warning', 'info', 'success']), + aggregation_key=dict(), validate_certs=dict(default=True, type='bool'), ) ) diff --git a/plugins/modules/monitoring/datadog/datadog_monitor.py b/plugins/modules/monitoring/datadog/datadog_monitor.py index f63c66a57d..a737dd2085 100644 --- a/plugins/modules/monitoring/datadog/datadog_monitor.py +++ b/plugins/modules/monitoring/datadog/datadog_monitor.py @@ -46,6 +46,7 @@ options: - A list of tags to associate with your monitor when creating or updating. - This can help you categorize and filter monitors. type: list + elements: str type: description: - The type of the monitor. @@ -206,31 +207,30 @@ def main(): module = AnsibleModule( argument_spec=dict( api_key=dict(required=True, no_log=True), - api_host=dict(required=False), + api_host=dict(), app_key=dict(required=True, no_log=True), state=dict(required=True, choices=['present', 'absent', 'mute', 'unmute']), - type=dict(required=False, choices=['metric alert', 'service check', 'event alert', - 'process alert', 'log alert', 'query alert', - 'trace-analytics alert', 'rum alert']), + type=dict(choices=['metric alert', 'service check', 'event alert', 'process alert', + 'log alert', 'query alert', 'trace-analytics alert', 'rum alert']), name=dict(required=True), - query=dict(required=False), - notification_message=dict(required=False, no_log=True, default=None, aliases=['message'], + query=dict(), + notification_message=dict(no_log=True, aliases=['message'], deprecated_aliases=[dict(name='message', version='3.0.0', collection_name='community.general')]), # was Ansible 2.14 - silenced=dict(required=False, default=None, type='dict'), - notify_no_data=dict(required=False, default=False, type='bool'), - no_data_timeframe=dict(required=False, default=None), - timeout_h=dict(required=False, default=None), - renotify_interval=dict(required=False, default=None), - escalation_message=dict(required=False, default=None), - notify_audit=dict(required=False, default=False, type='bool'), - thresholds=dict(required=False, type='dict', default=None), - tags=dict(required=False, type='list', default=None), - locked=dict(required=False, default=False, type='bool'), - require_full_window=dict(required=False, default=None, type='bool'), - new_host_delay=dict(required=False, default=None), - evaluation_delay=dict(required=False, default=None), - id=dict(required=False), + silenced=dict(type='dict'), + notify_no_data=dict(default=False, type='bool'), + no_data_timeframe=dict(), + timeout_h=dict(), + renotify_interval=dict(), + escalation_message=dict(), + notify_audit=dict(default=False, type='bool'), + thresholds=dict(type='dict', default=None), + tags=dict(type='list', elements='str', default=None), + locked=dict(default=False, type='bool'), + require_full_window=dict(type='bool'), + new_host_delay=dict(), + evaluation_delay=dict(), + id=dict(), include_tags=dict(required=False, default=True, type='bool'), ) ) diff --git a/plugins/modules/monitoring/icinga2_host.py b/plugins/modules/monitoring/icinga2_host.py index 65c95812ca..b4c4cdbcfb 100644 --- a/plugins/modules/monitoring/icinga2_host.py +++ b/plugins/modules/monitoring/icinga2_host.py @@ -75,6 +75,7 @@ options: description: - Name used to create / delete the host. This does not need to be the FQDN, but does needs to be unique. required: true + aliases: [host] zone: type: str description: diff --git a/plugins/modules/monitoring/sensu/sensu_check.py b/plugins/modules/monitoring/sensu/sensu_check.py index 9ebe27653b..71e8f07228 100644 --- a/plugins/modules/monitoring/sensu/sensu_check.py +++ b/plugins/modules/monitoring/sensu/sensu_check.py @@ -49,11 +49,13 @@ options: - Path to the sensu check to run (not required when I(state=absent)) handlers: type: list + elements: str description: - List of handlers to notify when the check fails default: [] subscribers: type: list + elements: str description: - List of subscribers/channels this check should run for - See sensu_subscribers to subscribe a machine to a channel @@ -86,9 +88,9 @@ options: - When to enable handling of check failures dependencies: type: list + elements: str description: - - Other checks this check depends on, if dependencies fail, - - handling of this check will be disabled + - Other checks this check depends on, if dependencies fail handling of this check will be disabled default: [] metric: description: @@ -327,15 +329,15 @@ def main(): 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']}, 'backup': {'type': 'bool', 'default': 'no'}, 'command': {'type': 'str'}, - 'handlers': {'type': 'list'}, - 'subscribers': {'type': 'list'}, + 'handlers': {'type': 'list', 'elements': 'str'}, + 'subscribers': {'type': 'list', 'elements': 'str'}, 'interval': {'type': 'int'}, 'timeout': {'type': 'int'}, 'ttl': {'type': 'int'}, 'handle': {'type': 'bool'}, 'subdue_begin': {'type': 'str'}, 'subdue_end': {'type': 'str'}, - 'dependencies': {'type': 'list'}, + 'dependencies': {'type': 'list', 'elements': 'str'}, 'metric': {'type': 'bool', 'default': 'no'}, 'standalone': {'type': 'bool'}, 'publish': {'type': 'bool'}, diff --git a/plugins/modules/monitoring/sensu/sensu_client.py b/plugins/modules/monitoring/sensu/sensu_client.py index 35444f60e4..ee67a6e75b 100644 --- a/plugins/modules/monitoring/sensu/sensu_client.py +++ b/plugins/modules/monitoring/sensu/sensu_client.py @@ -33,6 +33,7 @@ options: - If not specified it defaults to non-loopback IPv4 address as determined by Ruby Socket.ip_address_list (provided by Sensu). subscriptions: type: list + elements: str description: - An array of client subscriptions, a list of roles and/or responsibilities assigned to the system (e.g. webserver). - These subscriptions determine which monitoring checks are executed by the client, as check requests are sent to subscriptions. @@ -44,6 +45,7 @@ options: default: 'no' redact: type: list + elements: str description: - Client definition attributes to redact (values) when logging and sending client keepalives. socket: @@ -160,22 +162,22 @@ def main(): module = AnsibleModule( supports_check_mode=True, argument_spec=dict( - state=dict(type='str', required=False, choices=['present', 'absent'], default='present'), - name=dict(type='str', required=False), - address=dict(type='str', required=False), - subscriptions=dict(type='list', required=False), - safe_mode=dict(type='bool', required=False, default=False), - redact=dict(type='list', required=False), - socket=dict(type='dict', required=False), - keepalives=dict(type='bool', required=False, default=True), - keepalive=dict(type='dict', required=False), - registration=dict(type='dict', required=False), - deregister=dict(type='bool', required=False), - deregistration=dict(type='dict', required=False), - ec2=dict(type='dict', required=False), - chef=dict(type='dict', required=False), - puppet=dict(type='dict', required=False), - servicenow=dict(type='dict', required=False) + state=dict(type='str', choices=['present', 'absent'], default='present'), + name=dict(type='str', ), + address=dict(type='str', ), + subscriptions=dict(type='list', elements="str"), + safe_mode=dict(type='bool', default=False), + redact=dict(type='list', elements="str"), + socket=dict(type='dict'), + keepalives=dict(type='bool', default=True), + keepalive=dict(type='dict'), + registration=dict(type='dict'), + deregister=dict(type='bool'), + deregistration=dict(type='dict'), + ec2=dict(type='dict'), + chef=dict(type='dict'), + puppet=dict(type='dict'), + servicenow=dict(type='dict') ), required_if=[ ['state', 'present', ['subscriptions']] diff --git a/plugins/modules/monitoring/sensu/sensu_handler.py b/plugins/modules/monitoring/sensu/sensu_handler.py index 53152edc0f..0a56831ae0 100644 --- a/plugins/modules/monitoring/sensu/sensu_handler.py +++ b/plugins/modules/monitoring/sensu/sensu_handler.py @@ -37,11 +37,13 @@ options: - The Sensu event filter (name) to use when filtering events for the handler. filters: type: list + elements: str description: - An array of Sensu event filters (names) to use when filtering events for the handler. - Each array item must be a string. severities: type: list + elements: str description: - An array of check result severities the handler will handle. - 'NOTE: event resolution bypasses this filtering.' @@ -84,9 +86,9 @@ options: - 'NOTE: the pipe attribute is only required for Transport handlers (i.e. handlers configured with "type": "transport").' handlers: type: list + elements: str description: - An array of Sensu event handlers (names) to use for events using the handler set. - - Each array item must be a string. - 'NOTE: the handlers attribute is only required for handler sets (i.e. handlers configured with "type": "set").' notes: - Check mode is supported @@ -165,20 +167,20 @@ def main(): module = AnsibleModule( supports_check_mode=True, argument_spec=dict( - state=dict(type='str', required=False, choices=['present', 'absent'], default='present'), + state=dict(type='str', choices=['present', 'absent'], default='present'), name=dict(type='str', required=True), - type=dict(type='str', required=False, choices=['pipe', 'tcp', 'udp', 'transport', 'set']), - filter=dict(type='str', required=False), - filters=dict(type='list', required=False), - severities=dict(type='list', required=False), - mutator=dict(type='str', required=False), - timeout=dict(type='int', required=False, default=10), - handle_silenced=dict(type='bool', required=False, default=False), - handle_flapping=dict(type='bool', required=False, default=False), - command=dict(type='str', required=False), - socket=dict(type='dict', required=False), - pipe=dict(type='dict', required=False), - handlers=dict(type='list', required=False), + type=dict(type='str', choices=['pipe', 'tcp', 'udp', 'transport', 'set']), + filter=dict(type='str'), + filters=dict(type='list', elements='str'), + severities=dict(type='list', elements='str'), + mutator=dict(type='str'), + timeout=dict(type='int', default=10), + handle_silenced=dict(type='bool', default=False), + handle_flapping=dict(type='bool', default=False), + command=dict(type='str'), + socket=dict(type='dict'), + pipe=dict(type='dict'), + handlers=dict(type='list', elements='str'), ), required_if=[ ['state', 'present', ['type']], diff --git a/plugins/modules/notification/pushbullet.py b/plugins/modules/notification/pushbullet.py index ab27fd5e11..435fcf2fcb 100644 --- a/plugins/modules/notification/pushbullet.py +++ b/plugins/modules/notification/pushbullet.py @@ -48,6 +48,10 @@ options: type: str description: - Body of the notification, e.g. Details of the fault you're alerting. + url: + type: str + description: + - URL field, used when I(push_type) is C(link). notes: - Requires pushbullet.py Python package on the remote host. diff --git a/plugins/modules/packaging/language/bundler.py b/plugins/modules/packaging/language/bundler.py index 8be17d6f08..43f8cfa2ee 100644 --- a/plugins/modules/packaging/language/bundler.py +++ b/plugins/modules/packaging/language/bundler.py @@ -33,6 +33,7 @@ options: - If not specified, it will default to the temporary working directory exclude_groups: type: list + elements: str description: - A list of Gemfile groups to exclude during operations. This only applies when state is C(present). Bundler considers this @@ -134,7 +135,7 @@ def main(): executable=dict(default=None, required=False), state=dict(default='present', required=False, choices=['present', 'latest']), chdir=dict(default=None, required=False, type='path'), - exclude_groups=dict(default=None, required=False, type='list'), + exclude_groups=dict(default=None, required=False, type='list', elements='str'), clean=dict(default=False, required=False, type='bool'), gemfile=dict(default=None, required=False, type='path'), local=dict(default=False, required=False, type='bool'), diff --git a/plugins/modules/packaging/language/maven_artifact.py b/plugins/modules/packaging/language/maven_artifact.py index 03c3d4d44f..50b808f57a 100644 --- a/plugins/modules/packaging/language/maven_artifact.py +++ b/plugins/modules/packaging/language/maven_artifact.py @@ -141,6 +141,10 @@ options: required: false default: 'download' choices: ['never', 'download', 'change', 'always'] + directory_mode: + type: str + description: + - Filesystem permission mode applied recursively to I(dest) when it is a directory. extends_documentation_fragment: - files ''' @@ -342,7 +346,7 @@ class Artifact(object): if len(parts) >= 3: g = parts[0] a = parts[1] - v = parts[len(parts) - 1] + v = parts[-1] t = None c = None if len(parts) == 4: @@ -595,8 +599,7 @@ def main(): client_key=dict(type="path", required=False), keep_name=dict(required=False, default=False, type='bool'), verify_checksum=dict(required=False, default='download', choices=['never', 'download', 'change', 'always']), - directory_mode=dict(type='str'), # Used since https://github.com/ansible/ansible/pull/24965, not sure - # if this should really be here. + directory_mode=dict(type='str'), ), add_file_common_args=True, mutually_exclusive=([('version', 'version_by_spec')]) diff --git a/plugins/modules/packaging/language/pip_package_info.py b/plugins/modules/packaging/language/pip_package_info.py index 08eb2e952c..b769afb866 100644 --- a/plugins/modules/packaging/language/pip_package_info.py +++ b/plugins/modules/packaging/language/pip_package_info.py @@ -20,6 +20,7 @@ options: default: ['pip'] required: False type: list + elements: path requirements: - The requested pip executables must be installed on the target. author: @@ -115,7 +116,11 @@ def main(): # start work global module - module = AnsibleModule(argument_spec=dict(clients={'type': 'list', 'default': ['pip']},), supports_check_mode=True) + module = AnsibleModule( + argument_spec=dict( + clients=dict(type='list', elements='path', default=['pip']), + ), + supports_check_mode=True) packages = {} results = {'packages': {}} clients = module.params['clients'] diff --git a/plugins/modules/remote_management/lxca/lxca_cmms.py b/plugins/modules/remote_management/lxca/lxca_cmms.py index 7bd7b9ffec..776ee49fd4 100644 --- a/plugins/modules/remote_management/lxca/lxca_cmms.py +++ b/plugins/modules/remote_management/lxca/lxca_cmms.py @@ -20,6 +20,7 @@ options: uuid: description: uuid of device, this is string with length greater than 16. + type: str command_options: description: @@ -29,10 +30,12 @@ options: - cmms - cmms_by_uuid - cmms_by_chassis_uuid + type: str chassis: description: uuid of chassis, this is string with length greater than 16. + type: str extends_documentation_fragment: - community.general.lxca_common diff --git a/plugins/modules/remote_management/lxca/lxca_nodes.py b/plugins/modules/remote_management/lxca/lxca_nodes.py index febe2fd556..f788229d3d 100644 --- a/plugins/modules/remote_management/lxca/lxca_nodes.py +++ b/plugins/modules/remote_management/lxca/lxca_nodes.py @@ -20,6 +20,7 @@ options: uuid: description: uuid of device, this is string with length greater than 16. + type: str command_options: description: @@ -31,10 +32,12 @@ options: - nodes_by_chassis_uuid - nodes_status_managed - nodes_status_unmanaged + type: str chassis: description: uuid of chassis, this is string with length greater than 16. + type: str extends_documentation_fragment: - community.general.lxca_common diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index e6dc30c96c..2e6fcbaf8f 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -11,11 +11,6 @@ plugins/modules/cloud/linode/linode_v4.py validate-modules:parameter-list-no-ele plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-choice -plugins/modules/cloud/oneandone/oneandone_firewall_policy.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/oneandone/oneandone_load_balancer.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/oneandone/oneandone_private_network.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/oneandone/oneandone_server.py validate-modules:parameter-list-no-elements plugins/modules/cloud/online/online_server_facts.py validate-modules:return-syntax-error plugins/modules/cloud/online/online_server_info.py validate-modules:return-syntax-error plugins/modules/cloud/online/online_user_facts.py validate-modules:return-syntax-error @@ -73,14 +68,7 @@ plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:parameter-list-no plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:doc-missing-type plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/packet/packet_sshkey.py validate-modules:doc-missing-type -plugins/modules/cloud/packet/packet_sshkey.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/packet/packet_sshkey.py validate-modules:undocumented-parameter plugins/modules/cloud/packet/packet_volume_attachment.py pylint:ansible-bad-function -plugins/modules/cloud/profitbricks/profitbricks.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/profitbricks/profitbricks_volume.py validate-modules:doc-missing-type -plugins/modules/cloud/profitbricks/profitbricks_volume.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/profitbricks/profitbricks_volume.py validate-modules:undocumented-parameter plugins/modules/cloud/rackspace/rax.py use-argspec-type-path # fix needed plugins/modules/cloud/rackspace/rax.py validate-modules:doc-missing-type plugins/modules/cloud/rackspace/rax.py validate-modules:parameter-list-no-elements @@ -119,39 +107,16 @@ plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:parameter-type plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:undocumented-parameter plugins/modules/cloud/univention/udm_share.py validate-modules:parameter-list-no-elements plugins/modules/cloud/univention/udm_user.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/webfaction/webfaction_app.py validate-modules:doc-missing-type -plugins/modules/cloud/webfaction/webfaction_db.py validate-modules:doc-missing-type -plugins/modules/cloud/webfaction/webfaction_domain.py validate-modules:doc-missing-type -plugins/modules/cloud/webfaction/webfaction_domain.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/webfaction/webfaction_domain.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/webfaction/webfaction_mailbox.py validate-modules:doc-missing-type -plugins/modules/cloud/webfaction/webfaction_site.py validate-modules:doc-missing-type -plugins/modules/cloud/webfaction/webfaction_site.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/webfaction/webfaction_site.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:doc-choices-do-not-match-spec plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:doc-required-mismatch plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:missing-suboption-docs plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type -plugins/modules/clustering/consul/consul.py validate-modules:parameter-list-no-elements plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter -plugins/modules/clustering/consul/consul_acl.py validate-modules:doc-missing-type -plugins/modules/clustering/consul/consul_acl.py validate-modules:parameter-list-no-elements -plugins/modules/clustering/consul/consul_kv.py validate-modules:parameter-type-not-in-doc -plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-list-no-elements plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-state-invalid-choice -plugins/modules/clustering/etcd3.py validate-modules:parameter-type-not-in-doc -plugins/modules/clustering/znode.py validate-modules:doc-missing-type -plugins/modules/clustering/znode.py validate-modules:parameter-type-not-in-doc plugins/modules/monitoring/bigpanda.py validate-modules:invalid-argument-name -plugins/modules/monitoring/datadog/datadog_event.py validate-modules:parameter-list-no-elements plugins/modules/monitoring/datadog/datadog_monitor.py validate-modules:invalid-argument-name -plugins/modules/monitoring/datadog/datadog_monitor.py validate-modules:parameter-list-no-elements -plugins/modules/monitoring/icinga2_host.py validate-modules:undocumented-parameter -plugins/modules/monitoring/sensu/sensu_check.py validate-modules:parameter-list-no-elements -plugins/modules/monitoring/sensu/sensu_client.py validate-modules:parameter-list-no-elements -plugins/modules/monitoring/sensu/sensu_handler.py validate-modules:parameter-list-no-elements plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:parameter-type-not-in-doc # This triggers when a parameter is undocumented plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:doc-missing-type @@ -160,13 +125,7 @@ plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:undocumented-param plugins/modules/notification/cisco_webex.py validate-modules:invalid-argument-name plugins/modules/notification/grove.py validate-modules:invalid-argument-name plugins/modules/notification/grove.py validate-modules:nonexistent-parameter-documented -plugins/modules/notification/pushbullet.py validate-modules:parameter-type-not-in-doc -plugins/modules/notification/pushbullet.py validate-modules:undocumented-parameter -plugins/modules/packaging/language/bundler.py validate-modules:parameter-list-no-elements plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid -plugins/modules/packaging/language/maven_artifact.py validate-modules:parameter-type-not-in-doc -plugins/modules/packaging/language/maven_artifact.py validate-modules:undocumented-parameter -plugins/modules/packaging/language/pip_package_info.py validate-modules:parameter-list-no-elements plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid plugins/modules/packaging/os/homebrew_cask.py validate-modules:parameter-invalid @@ -179,8 +138,6 @@ plugins/modules/packaging/os/xbps.py validate-modules:parameter-invalid plugins/modules/remote_management/hpilo/hpilo_boot.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hpilo_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hponcfg.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/lxca/lxca_cmms.py validate-modules:doc-missing-type -plugins/modules/remote_management/lxca/lxca_nodes.py validate-modules:doc-missing-type plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py validate-modules:parameter-list-no-elements plugins/modules/remote_management/manageiq/manageiq_policies.py validate-modules:parameter-list-no-elements plugins/modules/remote_management/manageiq/manageiq_policies.py validate-modules:parameter-state-invalid-choice diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 9283c7841b..095b163096 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -10,11 +10,6 @@ plugins/modules/cloud/linode/linode_v4.py validate-modules:parameter-list-no-ele plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-choice -plugins/modules/cloud/oneandone/oneandone_firewall_policy.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/oneandone/oneandone_load_balancer.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/oneandone/oneandone_private_network.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/oneandone/oneandone_server.py validate-modules:parameter-list-no-elements plugins/modules/cloud/online/online_server_facts.py validate-modules:return-syntax-error plugins/modules/cloud/online/online_server_info.py validate-modules:return-syntax-error plugins/modules/cloud/online/online_user_facts.py validate-modules:return-syntax-error @@ -72,14 +67,7 @@ plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:parameter-list-no plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:doc-missing-type plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/packet/packet_sshkey.py validate-modules:doc-missing-type -plugins/modules/cloud/packet/packet_sshkey.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/packet/packet_sshkey.py validate-modules:undocumented-parameter plugins/modules/cloud/packet/packet_volume_attachment.py pylint:ansible-bad-function -plugins/modules/cloud/profitbricks/profitbricks.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/profitbricks/profitbricks_volume.py validate-modules:doc-missing-type -plugins/modules/cloud/profitbricks/profitbricks_volume.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/profitbricks/profitbricks_volume.py validate-modules:undocumented-parameter plugins/modules/cloud/rackspace/rax.py use-argspec-type-path # fix needed plugins/modules/cloud/rackspace/rax.py validate-modules:doc-missing-type plugins/modules/cloud/rackspace/rax.py validate-modules:parameter-list-no-elements @@ -118,39 +106,16 @@ plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:parameter-type plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:undocumented-parameter plugins/modules/cloud/univention/udm_share.py validate-modules:parameter-list-no-elements plugins/modules/cloud/univention/udm_user.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/webfaction/webfaction_app.py validate-modules:doc-missing-type -plugins/modules/cloud/webfaction/webfaction_db.py validate-modules:doc-missing-type -plugins/modules/cloud/webfaction/webfaction_domain.py validate-modules:doc-missing-type -plugins/modules/cloud/webfaction/webfaction_domain.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/webfaction/webfaction_domain.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/webfaction/webfaction_mailbox.py validate-modules:doc-missing-type -plugins/modules/cloud/webfaction/webfaction_site.py validate-modules:doc-missing-type -plugins/modules/cloud/webfaction/webfaction_site.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/webfaction/webfaction_site.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:doc-choices-do-not-match-spec plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:doc-required-mismatch plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:missing-suboption-docs plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type -plugins/modules/clustering/consul/consul.py validate-modules:parameter-list-no-elements plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter -plugins/modules/clustering/consul/consul_acl.py validate-modules:doc-missing-type -plugins/modules/clustering/consul/consul_acl.py validate-modules:parameter-list-no-elements -plugins/modules/clustering/consul/consul_kv.py validate-modules:parameter-type-not-in-doc -plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-list-no-elements plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-state-invalid-choice -plugins/modules/clustering/etcd3.py validate-modules:parameter-type-not-in-doc -plugins/modules/clustering/znode.py validate-modules:doc-missing-type -plugins/modules/clustering/znode.py validate-modules:parameter-type-not-in-doc plugins/modules/monitoring/bigpanda.py validate-modules:invalid-argument-name -plugins/modules/monitoring/datadog/datadog_event.py validate-modules:parameter-list-no-elements plugins/modules/monitoring/datadog/datadog_monitor.py validate-modules:invalid-argument-name -plugins/modules/monitoring/datadog/datadog_monitor.py validate-modules:parameter-list-no-elements -plugins/modules/monitoring/icinga2_host.py validate-modules:undocumented-parameter -plugins/modules/monitoring/sensu/sensu_check.py validate-modules:parameter-list-no-elements -plugins/modules/monitoring/sensu/sensu_client.py validate-modules:parameter-list-no-elements -plugins/modules/monitoring/sensu/sensu_handler.py validate-modules:parameter-list-no-elements plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:parameter-type-not-in-doc # This triggers when a parameter is undocumented plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:doc-missing-type @@ -159,13 +124,7 @@ plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:undocumented-param plugins/modules/notification/cisco_webex.py validate-modules:invalid-argument-name plugins/modules/notification/grove.py validate-modules:invalid-argument-name plugins/modules/notification/grove.py validate-modules:nonexistent-parameter-documented -plugins/modules/notification/pushbullet.py validate-modules:parameter-type-not-in-doc -plugins/modules/notification/pushbullet.py validate-modules:undocumented-parameter -plugins/modules/packaging/language/bundler.py validate-modules:parameter-list-no-elements plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid -plugins/modules/packaging/language/maven_artifact.py validate-modules:parameter-type-not-in-doc -plugins/modules/packaging/language/maven_artifact.py validate-modules:undocumented-parameter -plugins/modules/packaging/language/pip_package_info.py validate-modules:parameter-list-no-elements plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid plugins/modules/packaging/os/homebrew_cask.py validate-modules:parameter-invalid @@ -178,8 +137,6 @@ plugins/modules/packaging/os/xbps.py validate-modules:parameter-invalid plugins/modules/remote_management/hpilo/hpilo_boot.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hpilo_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hponcfg.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/lxca/lxca_cmms.py validate-modules:doc-missing-type -plugins/modules/remote_management/lxca/lxca_nodes.py validate-modules:doc-missing-type plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py validate-modules:parameter-list-no-elements plugins/modules/remote_management/manageiq/manageiq_policies.py validate-modules:parameter-list-no-elements plugins/modules/remote_management/manageiq/manageiq_policies.py validate-modules:parameter-state-invalid-choice diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 836eec3514..1e2b56a684 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -93,11 +93,6 @@ plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:parameter-type-no plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:deprecation-mismatch plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:doc-missing-type plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:invalid-documentation -plugins/modules/cloud/packet/packet_sshkey.py validate-modules:doc-missing-type -plugins/modules/cloud/packet/packet_sshkey.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/packet/packet_sshkey.py validate-modules:undocumented-parameter -plugins/modules/cloud/profitbricks/profitbricks_volume.py validate-modules:doc-missing-type -plugins/modules/cloud/profitbricks/profitbricks_volume.py validate-modules:undocumented-parameter plugins/modules/cloud/rackspace/rax.py use-argspec-type-path plugins/modules/cloud/rackspace/rax.py validate-modules:doc-missing-type plugins/modules/cloud/rackspace/rax.py validate-modules:undocumented-parameter @@ -139,25 +134,12 @@ plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:undo plugins/modules/cloud/univention/udm_dns_record.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:undocumented-parameter -plugins/modules/cloud/webfaction/webfaction_app.py validate-modules:doc-missing-type -plugins/modules/cloud/webfaction/webfaction_db.py validate-modules:doc-missing-type -plugins/modules/cloud/webfaction/webfaction_domain.py validate-modules:doc-missing-type -plugins/modules/cloud/webfaction/webfaction_domain.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/webfaction/webfaction_mailbox.py validate-modules:doc-missing-type -plugins/modules/cloud/webfaction/webfaction_site.py validate-modules:doc-missing-type -plugins/modules/cloud/webfaction/webfaction_site.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:doc-choices-do-not-match-spec plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:missing-suboption-docs plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter -plugins/modules/clustering/consul/consul_acl.py validate-modules:doc-missing-type -plugins/modules/clustering/consul/consul_kv.py validate-modules:parameter-type-not-in-doc -plugins/modules/clustering/etcd3.py validate-modules:parameter-type-not-in-doc -plugins/modules/clustering/znode.py validate-modules:doc-missing-type -plugins/modules/clustering/znode.py validate-modules:parameter-type-not-in-doc -plugins/modules/monitoring/icinga2_host.py validate-modules:undocumented-parameter plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:deprecation-mismatch plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:invalid-documentation plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:parameter-type-not-in-doc @@ -165,10 +147,7 @@ plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:undocumented-parame plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:doc-missing-type plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:parameter-type-not-in-doc plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code -plugins/modules/notification/pushbullet.py validate-modules:parameter-type-not-in-doc -plugins/modules/notification/pushbullet.py validate-modules:undocumented-parameter plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid -plugins/modules/packaging/language/maven_artifact.py validate-modules:parameter-type-not-in-doc plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid plugins/modules/packaging/os/homebrew_cask.py validate-modules:parameter-invalid @@ -181,8 +160,6 @@ plugins/modules/packaging/os/xbps.py validate-modules:parameter-invalid plugins/modules/remote_management/hpilo/hpilo_boot.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hpilo_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hponcfg.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/lxca/lxca_cmms.py validate-modules:doc-missing-type -plugins/modules/remote_management/lxca/lxca_nodes.py validate-modules:doc-missing-type plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:doc-choices-do-not-match-spec plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:doc-missing-type plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:parameter-type-not-in-doc From 5447910a0ba42217b4b991f5f4ed6f13e7cfd94a Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Fri, 26 Feb 2021 21:35:53 +0100 Subject: [PATCH 0075/3093] Improve meta/runtime.yml deprecation messages. (#1918) --- .../fragments/meta-runtime-deprecations.yml | 2 + meta/runtime.yml | 120 +++++++++--------- 2 files changed, 62 insertions(+), 60 deletions(-) create mode 100644 changelogs/fragments/meta-runtime-deprecations.yml diff --git a/changelogs/fragments/meta-runtime-deprecations.yml b/changelogs/fragments/meta-runtime-deprecations.yml new file mode 100644 index 0000000000..8863f346af --- /dev/null +++ b/changelogs/fragments/meta-runtime-deprecations.yml @@ -0,0 +1,2 @@ +bugfixes: +- "meta/runtime.yml - improve deprecation messages (https://github.com/ansible-collections/community.general/pull/1918)." diff --git a/meta/runtime.yml b/meta/runtime.yml index 1d599d7728..b13cbc549c 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -41,7 +41,7 @@ plugin_routing: ali_instance_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use community.general.ali_instance_info instead. docker_compose: redirect: community.docker.docker_compose docker_config: @@ -198,19 +198,19 @@ plugin_routing: hpilo_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use community.general.hpilo_info instead. idrac_firmware: redirect: dellemc.openmanage.idrac_firmware idrac_redfish_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use community.general.idrac_redfish_info instead. idrac_server_config_profile: redirect: dellemc.openmanage.idrac_server_config_profile jenkins_job_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use community.general.jenkins_job_info instead. katello: tombstone: removal_version: 2.0.0 @@ -230,7 +230,7 @@ plugin_routing: ldap_attr: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use community.general.ldap_attrs instead. logicmonitor: tombstone: removal_version: 1.0.0 @@ -242,11 +242,11 @@ plugin_routing: memset_memstore_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use community.general.memset_memstore_info instead. memset_server_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use community.general.memset_server_info instead. na_cdot_aggregate: tombstone: removal_version: 2.0.0 @@ -282,161 +282,161 @@ plugin_routing: na_ontap_gather_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use netapp.ontap.na_ontap_info instead. nginx_status_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use community.general.nginx_status_info instead. ome_device_info: redirect: dellemc.openmanage.ome_device_info one_image_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use community.general.one_image_info instead. onepassword_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use community.general.onepassword_info instead. oneview_datacenter_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use community.general.oneview_datacenter_info instead. oneview_enclosure_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use community.general.oneview_enclosure_info instead. oneview_ethernet_network_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use community.general.oneview_ethernet_network_info instead. oneview_fc_network_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use community.general.oneview_fc_network_info instead. oneview_fcoe_network_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use community.general.oneview_fcoe_network_info instead. oneview_logical_interconnect_group_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use community.general.oneview_logical_interconnect_group_info instead. oneview_network_set_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use community.general.oneview_network_set_info instead. oneview_san_manager_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use community.general.oneview_san_manager_info instead. online_server_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use community.general.online_server_info instead. online_user_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use community.general.online_user_info instead. ovirt: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use ovirt.ovirt.ovirt_vm instead. ovirt_affinity_label_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use ovirt.ovirt.ovirt_affinity_label_info instead. ovirt_api_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use ovirt.ovirt.ovirt_api_info instead. ovirt_cluster_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use ovirt.ovirt.ovirt_cluster_info instead. ovirt_datacenter_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use ovirt.ovirt.ovirt_datacenter_info instead. ovirt_disk_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use ovirt.ovirt.ovirt_disk_info instead. ovirt_event_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use ovirt.ovirt.ovirt_event_info instead. ovirt_external_provider_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use ovirt.ovirt.ovirt_external_provider_info instead. ovirt_group_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use ovirt.ovirt.ovirt_group_info instead. ovirt_host_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use ovirt.ovirt.ovirt_host_info instead. ovirt_host_storage_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use ovirt.ovirt.ovirt_host_storage_info instead. ovirt_network_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use ovirt.ovirt.ovirt_network_info instead. ovirt_nic_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use ovirt.ovirt.ovirt_nic_info instead. ovirt_permission_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use ovirt.ovirt.ovirt_permission_info instead. ovirt_quota_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use ovirt.ovirt.ovirt_quota_info instead. ovirt_scheduling_policy_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use ovirt.ovirt.ovirt_scheduling_policy_info instead. ovirt_snapshot_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use ovirt.ovirt.ovirt_snapshot_info instead. ovirt_storage_domain_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use ovirt.ovirt.ovirt_storage_domain_info instead. ovirt_storage_template_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use ovirt.ovirt.ovirt_storage_template_info instead. ovirt_storage_vm_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use ovirt.ovirt.ovirt_storage_vm_info instead. ovirt_tag_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use ovirt.ovirt.ovirt_tag_info instead. ovirt_template_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use ovirt.ovirt.ovirt_template_info instead. ovirt_user_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use ovirt.ovirt.ovirt_user_info instead. ovirt_vm_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use ovirt.ovirt.ovirt_vm_info instead. ovirt_vmpool_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use ovirt.ovirt.ovirt_vmpool_info instead. postgresql_copy: redirect: community.postgresql.postgresql_copy postgresql_db: @@ -484,47 +484,47 @@ plugin_routing: purefa_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use purestorage.flasharray.purefa_info instead. purefb_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use purestorage.flashblade.purefb_info instead. python_requirements_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use community.general.python_requirements_info instead. redfish_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use community.general.redfish_info instead. scaleway_image_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use community.general.scaleway_image_info instead. scaleway_ip_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use community.general.scaleway_ip_info instead. scaleway_organization_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use community.general.scaleway_organization_info instead. scaleway_security_group_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use community.general.scaleway_security_group_info instead. scaleway_server_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use community.general.scaleway_server_info instead. scaleway_snapshot_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use community.general.scaleway_snapshot_info instead. scaleway_volume_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use community.general.scaleway_volume_info instead. sf_account_manager: tombstone: removal_version: 2.0.0 @@ -548,15 +548,15 @@ plugin_routing: smartos_image_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use community.general.smartos_image_info instead. vertica_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use community.general.vertica_info instead. xenserver_guest_facts: deprecation: removal_version: 3.0.0 - warning_text: see plugin documentation for details + warning_text: Use community.general.xenserver_guest_info instead. doc_fragments: _gcp: redirect: community.google._gcp From 811b609b05012717290356f08321072cb1d12b56 Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Sat, 27 Feb 2021 01:46:40 +0330 Subject: [PATCH 0076/3093] yum_versionlock: do lock/unlock concurrently (#1912) * Change all packages at once in yum_versionlock module * Re-enable tests * Convert package list to packages string * Fix module * Change variable name to make it appropriate * Fix module check_mode * Revert "Fix module" and apply felixfontein suggestion This reverts commit 5936da31987219a0cfc9a7b1bdaee0c093d2ccf8. * Rename package to packages * Only change packages which are needed * Ignore if list is empty * Add changelog --- ...yum_versionlock-lock_unlock_concurrently.yml | 3 +++ plugins/modules/packaging/os/yum_versionlock.py | 17 +++++++++++------ .../integration/targets/yum_versionlock/aliases | 1 - 3 files changed, 14 insertions(+), 7 deletions(-) create mode 100644 changelogs/fragments/1912-yum_versionlock-lock_unlock_concurrently.yml diff --git a/changelogs/fragments/1912-yum_versionlock-lock_unlock_concurrently.yml b/changelogs/fragments/1912-yum_versionlock-lock_unlock_concurrently.yml new file mode 100644 index 0000000000..36f40da0fe --- /dev/null +++ b/changelogs/fragments/1912-yum_versionlock-lock_unlock_concurrently.yml @@ -0,0 +1,3 @@ +--- +minor_changes: + - yum_versionlock - Do the lock/unlock concurrently to speed up (https://github.com/ansible-collections/community.general/pull/1912). diff --git a/plugins/modules/packaging/os/yum_versionlock.py b/plugins/modules/packaging/os/yum_versionlock.py index 0830b3a4ab..13319f6711 100644 --- a/plugins/modules/packaging/os/yum_versionlock.py +++ b/plugins/modules/packaging/os/yum_versionlock.py @@ -13,7 +13,7 @@ module: yum_versionlock version_added: 2.0.0 short_description: Locks / unlocks a installed package(s) from being updated by yum package manager description: - - This module adds installed packages to yum versionlock to prevent the package from being updated. + - This module adds installed packages to yum versionlock to prevent the package(s) from being updated. options: name: description: @@ -93,9 +93,9 @@ class YumVersionLock: self.module.fail_json(msg="Error: Please install rpm package yum-plugin-versionlock : " + to_native(err) + to_native(out)) self.module.fail_json(msg="Error: " + to_native(err) + to_native(out)) - def ensure_state(self, package, command): - """ Ensure package state """ - rc, out, err = self.module.run_command([self.yum_bin, "-q", "versionlock", command, package]) + def ensure_state(self, packages, command): + """ Ensure packages state """ + rc, out, err = self.module.run_command([self.yum_bin, "-q", "versionlock", command] + packages) if rc == 0: return True self.module.fail_json(msg="Error: " + to_native(err) + to_native(out)) @@ -121,6 +121,7 @@ def main(): versionlock_packages = yum_v.get_versionlock_packages() # Ensure versionlock state of packages + packages_list = [] if state in ('present'): command = 'add' for single_pkg in packages: @@ -128,7 +129,9 @@ def main(): if module.check_mode: changed = True continue - changed = yum_v.ensure_state(single_pkg, command) + packages_list.append(single_pkg) + if packages_list: + changed = yum_v.ensure_state(packages_list, command) elif state in ('absent'): command = 'delete' for single_pkg in packages: @@ -136,7 +139,9 @@ def main(): if module.check_mode: changed = True continue - changed = yum_v.ensure_state(single_pkg, command) + packages_list.append(single_pkg) + if packages_list: + changed = yum_v.ensure_state(packages_list, command) module.exit_json( changed=changed, diff --git a/tests/integration/targets/yum_versionlock/aliases b/tests/integration/targets/yum_versionlock/aliases index 895088b2c6..abe0a21e22 100644 --- a/tests/integration/targets/yum_versionlock/aliases +++ b/tests/integration/targets/yum_versionlock/aliases @@ -1,4 +1,3 @@ -disabled # The tests are way too slow - the locking/unlocking steps need 10 minutes each! shippable/posix/group1 skip/aix skip/freebsd From ea65ce8e0d33b66f65d4beb5e952a5f2b461122b Mon Sep 17 00:00:00 2001 From: William Leemans Date: Sat, 27 Feb 2021 08:38:24 +0100 Subject: [PATCH 0077/3093] bugfix: xfs_quota feedback on projects not initialized has changed (#1596) * bugfix: xfs_quota feedback on projects not initialized has changed * changelog fragment * Update changelogs/fragments/1596-xfs_quota-feedback_on_projects_not_initialized_has_changed.yml Thanks for this, felixfontein Co-authored-by: Felix Fontein * xfs_quota is not necessarily in PATH * pep8 and formatting * Test was wrong. It needs to be changed * formatting * pep8 and formatting * xfs_quota is not necessarily in PATH * pep8 and formatting Co-authored-by: Felix Fontein --- ...n_projects_not_initialized_has_changed.yml | 3 + plugins/modules/system/xfs_quota.py | 297 ++++++++++-------- .../targets/xfs_quota/tasks/pquota.yml | 2 +- 3 files changed, 178 insertions(+), 124 deletions(-) create mode 100644 changelogs/fragments/1596-xfs_quota-feedback_on_projects_not_initialized_has_changed.yml diff --git a/changelogs/fragments/1596-xfs_quota-feedback_on_projects_not_initialized_has_changed.yml b/changelogs/fragments/1596-xfs_quota-feedback_on_projects_not_initialized_has_changed.yml new file mode 100644 index 0000000000..ba75a86a62 --- /dev/null +++ b/changelogs/fragments/1596-xfs_quota-feedback_on_projects_not_initialized_has_changed.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - xfs_quota - the feedback for initializing project quota using xfs_quota binary from ``xfsprogs`` has changed since the version it was written for (https://github.com/ansible-collections/community.general/pull/1596). diff --git a/plugins/modules/system/xfs_quota.py b/plugins/modules/system/xfs_quota.py index 907f1bae8f..7437ddfd70 100644 --- a/plugins/modules/system/xfs_quota.py +++ b/plugins/modules/system/xfs_quota.py @@ -7,9 +7,10 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: xfs_quota short_description: Manage quotas on XFS filesystems @@ -77,9 +78,9 @@ options: requirements: - xfsprogs -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Set default project soft and hard limit on /opt of 1g community.general.xfs_quota: type: project @@ -101,9 +102,9 @@ EXAMPLES = r''' isoft: 1024 ihard: 2048 -''' +""" -RETURN = r''' +RETURN = r""" bhard: description: the current bhard setting in bytes returned: always @@ -134,7 +135,7 @@ rtbsoft: returned: always type: int sample: 1024 -''' +""" import grp import os @@ -146,30 +147,32 @@ from ansible.module_utils.basic import AnsibleModule, human_to_bytes def main(): module = AnsibleModule( argument_spec=dict( - bhard=dict(type='str'), - bsoft=dict(type='str'), - ihard=dict(type='int'), - isoft=dict(type='int'), - mountpoint=dict(type='str', required=True), - name=dict(type='str'), - rtbhard=dict(type='str'), - rtbsoft=dict(type='str'), - state=dict(type='str', default='present', choices=['absent', 'present']), - type=dict(type='str', required=True, choices=['group', 'project', 'user']) + bhard=dict(type="str"), + bsoft=dict(type="str"), + ihard=dict(type="int"), + isoft=dict(type="int"), + mountpoint=dict(type="str", required=True), + name=dict(type="str"), + rtbhard=dict(type="str"), + rtbsoft=dict(type="str"), + state=dict(type="str", default="present", choices=["absent", "present"]), + type=dict(type="str", required=True, choices=["group", "project", "user"]), ), supports_check_mode=True, ) - quota_type = module.params['type'] - name = module.params['name'] - mountpoint = module.params['mountpoint'] - bhard = module.params['bhard'] - bsoft = module.params['bsoft'] - ihard = module.params['ihard'] - isoft = module.params['isoft'] - rtbhard = module.params['rtbhard'] - rtbsoft = module.params['rtbsoft'] - state = module.params['state'] + quota_type = module.params["type"] + name = module.params["name"] + mountpoint = module.params["mountpoint"] + bhard = module.params["bhard"] + bsoft = module.params["bsoft"] + ihard = module.params["ihard"] + isoft = module.params["isoft"] + rtbhard = module.params["rtbhard"] + rtbsoft = module.params["rtbsoft"] + state = module.params["state"] + + xfs_quota_bin = module.get_bin_path("xfs_quota", True) if bhard is not None: bhard = human_to_bytes(bhard) @@ -192,90 +195,122 @@ def main(): mp = get_fs_by_mountpoint(mountpoint) if mp is None: - module.fail_json(msg="Path '%s' is not a mount point or not located on an xfs file system." % mountpoint, **result) + module.fail_json( + msg="Path '%s' is not a mount point or not located on an xfs file system." + % mountpoint, + **result + ) - if quota_type == 'user': - type_arg = '-u' - quota_default = 'root' + if quota_type == "user": + type_arg = "-u" + quota_default = "root" if name is None: name = quota_default - if 'uquota' not in mp['mntopts'] and 'usrquota' not in mp['mntopts'] and 'quota' not in mp['mntopts'] and 'uqnoenforce' not in mp['mntopts'] and \ - 'qnoenforce' not in mp['mntopts']: + if ( + "uquota" not in mp["mntopts"] + and "usrquota" not in mp["mntopts"] + and "quota" not in mp["mntopts"] + and "uqnoenforce" not in mp["mntopts"] + and "qnoenforce" not in mp["mntopts"] + ): module.fail_json( - msg="Path '%s' is not mounted with the uquota/usrquota/quota/uqnoenforce/qnoenforce option." % mountpoint, **result + msg="Path '%s' is not mounted with the uquota/usrquota/quota/uqnoenforce/qnoenforce option." + % mountpoint, + **result ) try: pwd.getpwnam(name) except KeyError as e: module.fail_json(msg="User '%s' does not exist." % name, **result) - elif quota_type == 'group': - type_arg = '-g' - quota_default = 'root' + elif quota_type == "group": + type_arg = "-g" + quota_default = "root" if name is None: name = quota_default - if 'gquota' not in mp['mntopts'] and 'grpquota' not in mp['mntopts'] and 'gqnoenforce' not in mp['mntopts']: + if ( + "gquota" not in mp["mntopts"] + and "grpquota" not in mp["mntopts"] + and "gqnoenforce" not in mp["mntopts"] + ): module.fail_json( - msg="Path '%s' is not mounted with the gquota/grpquota/gqnoenforce option. (current options: %s)" % (mountpoint, mp['mntopts']), **result + msg="Path '%s' is not mounted with the gquota/grpquota/gqnoenforce option. (current options: %s)" + % (mountpoint, mp["mntopts"]), + **result ) try: grp.getgrnam(name) except KeyError as e: module.fail_json(msg="User '%s' does not exist." % name, **result) - elif quota_type == 'project': - type_arg = '-p' - quota_default = '#0' + elif quota_type == "project": + type_arg = "-p" + quota_default = "#0" if name is None: name = quota_default - if 'pquota' not in mp['mntopts'] and 'prjquota' not in mp['mntopts'] and 'pqnoenforce' not in mp['mntopts']: - module.fail_json(msg="Path '%s' is not mounted with the pquota/prjquota/pqnoenforce option." % mountpoint, **result) + if ( + "pquota" not in mp["mntopts"] + and "prjquota" not in mp["mntopts"] + and "pqnoenforce" not in mp["mntopts"] + ): + module.fail_json( + msg="Path '%s' is not mounted with the pquota/prjquota/pqnoenforce option." + % mountpoint, + **result + ) - if name != quota_default and not os.path.isfile('/etc/projects'): + if name != quota_default and not os.path.isfile("/etc/projects"): module.fail_json(msg="Path '/etc/projects' does not exist.", **result) - if name != quota_default and not os.path.isfile('/etc/projid'): + if name != quota_default and not os.path.isfile("/etc/projid"): module.fail_json(msg="Path '/etc/projid' does not exist.", **result) if name != quota_default and name is not None and get_project_id(name) is None: - module.fail_json(msg="Entry '%s' has not been defined in /etc/projid." % name, **result) + module.fail_json( + msg="Entry '%s' has not been defined in /etc/projid." % name, **result + ) prj_set = True if name != quota_default: - cmd = 'project %s' % name - rc, stdout, stderr = exec_quota(module, cmd, mountpoint) + cmd = "project %s" % name + rc, stdout, stderr = exec_quota(module, xfs_quota_bin, cmd, mountpoint) if rc != 0: - result['cmd'] = cmd - result['rc'] = rc - result['stdout'] = stdout - result['stderr'] = stderr - module.fail_json(msg='Could not get project state.', **result) + result["cmd"] = cmd + result["rc"] = rc + result["stdout"] = stdout + result["stderr"] = stderr + module.fail_json(msg="Could not get project state.", **result) else: - for line in stdout.split('\n'): - if "Project Id '%s' - is not set." in line: + for line in stdout.split("\n"): + if ( + "Project Id '%s' - is not set." in line + or "project identifier is not set" in line + ): prj_set = False break if not prj_set and not module.check_mode: - cmd = 'project -s' - rc, stdout, stderr = exec_quota(module, cmd, mountpoint) + cmd = "project -s" + rc, stdout, stderr = exec_quota(module, xfs_quota_bin, cmd, mountpoint) if rc != 0: - result['cmd'] = cmd - result['rc'] = rc - result['stdout'] = stdout - result['stderr'] = stderr - module.fail_json(msg='Could not get quota realtime block report.', **result) + result["cmd"] = cmd + result["rc"] = rc + result["stdout"] = stdout + result["stderr"] = stderr + module.fail_json( + msg="Could not get quota realtime block report.", **result + ) - result['changed'] = True + result["changed"] = True elif not prj_set and module.check_mode: - result['changed'] = True + result["changed"] = True # Set limits - if state == 'absent': + if state == "absent": bhard = 0 bsoft = 0 ihard = 0 @@ -283,91 +318,99 @@ def main(): rtbhard = 0 rtbsoft = 0 - current_bsoft, current_bhard = quota_report(module, mountpoint, name, quota_type, 'b') - current_isoft, current_ihard = quota_report(module, mountpoint, name, quota_type, 'i') - current_rtbsoft, current_rtbhard = quota_report(module, mountpoint, name, quota_type, 'rtb') + current_bsoft, current_bhard = quota_report( + module, xfs_quota_bin, mountpoint, name, quota_type, "b" + ) + current_isoft, current_ihard = quota_report( + module, xfs_quota_bin, mountpoint, name, quota_type, "i" + ) + current_rtbsoft, current_rtbhard = quota_report( + module, xfs_quota_bin, mountpoint, name, quota_type, "rtb" + ) - result['xfs_quota'] = dict( + result["xfs_quota"] = dict( bsoft=current_bsoft, bhard=current_bhard, isoft=current_isoft, ihard=current_ihard, rtbsoft=current_rtbsoft, - rtbhard=current_rtbhard + rtbhard=current_rtbhard, ) limit = [] if bsoft is not None and int(bsoft) != current_bsoft: - limit.append('bsoft=%s' % bsoft) - result['bsoft'] = int(bsoft) + limit.append("bsoft=%s" % bsoft) + result["bsoft"] = int(bsoft) if bhard is not None and int(bhard) != current_bhard: - limit.append('bhard=%s' % bhard) - result['bhard'] = int(bhard) + limit.append("bhard=%s" % bhard) + result["bhard"] = int(bhard) if isoft is not None and isoft != current_isoft: - limit.append('isoft=%s' % isoft) - result['isoft'] = isoft + limit.append("isoft=%s" % isoft) + result["isoft"] = isoft if ihard is not None and ihard != current_ihard: - limit.append('ihard=%s' % ihard) - result['ihard'] = ihard + limit.append("ihard=%s" % ihard) + result["ihard"] = ihard if rtbsoft is not None and int(rtbsoft) != current_rtbsoft: - limit.append('rtbsoft=%s' % rtbsoft) - result['rtbsoft'] = int(rtbsoft) + limit.append("rtbsoft=%s" % rtbsoft) + result["rtbsoft"] = int(rtbsoft) if rtbhard is not None and int(rtbhard) != current_rtbhard: - limit.append('rtbhard=%s' % rtbhard) - result['rtbhard'] = int(rtbhard) + limit.append("rtbhard=%s" % rtbhard) + result["rtbhard"] = int(rtbhard) if len(limit) > 0 and not module.check_mode: if name == quota_default: - cmd = 'limit %s -d %s' % (type_arg, ' '.join(limit)) + cmd = "limit %s -d %s" % (type_arg, " ".join(limit)) else: - cmd = 'limit %s %s %s' % (type_arg, ' '.join(limit), name) + cmd = "limit %s %s %s" % (type_arg, " ".join(limit), name) - rc, stdout, stderr = exec_quota(module, cmd, mountpoint) + rc, stdout, stderr = exec_quota(module, xfs_quota_bin, cmd, mountpoint) if rc != 0: - result['cmd'] = cmd - result['rc'] = rc - result['stdout'] = stdout - result['stderr'] = stderr - module.fail_json(msg='Could not set limits.', **result) + result["cmd"] = cmd + result["rc"] = rc + result["stdout"] = stdout + result["stderr"] = stderr + module.fail_json(msg="Could not set limits.", **result) - result['changed'] = True + result["changed"] = True elif len(limit) > 0 and module.check_mode: - result['changed'] = True + result["changed"] = True module.exit_json(**result) -def quota_report(module, mountpoint, name, quota_type, used_type): +def quota_report(module, xfs_quota_bin, mountpoint, name, quota_type, used_type): soft = None hard = None - if quota_type == 'project': - type_arg = '-p' - elif quota_type == 'user': - type_arg = '-u' - elif quota_type == 'group': - type_arg = '-g' + if quota_type == "project": + type_arg = "-p" + elif quota_type == "user": + type_arg = "-u" + elif quota_type == "group": + type_arg = "-g" - if used_type == 'b': - used_arg = '-b' - used_name = 'blocks' + if used_type == "b": + used_arg = "-b" + used_name = "blocks" factor = 1024 - elif used_type == 'i': - used_arg = '-i' - used_name = 'inodes' + elif used_type == "i": + used_arg = "-i" + used_name = "inodes" factor = 1 - elif used_type == 'rtb': - used_arg = '-r' - used_name = 'realtime blocks' + elif used_type == "rtb": + used_arg = "-r" + used_name = "realtime blocks" factor = 1024 - rc, stdout, stderr = exec_quota(module, 'report %s %s' % (type_arg, used_arg), mountpoint) + rc, stdout, stderr = exec_quota( + module, xfs_quota_bin, "report %s %s" % (type_arg, used_arg), mountpoint + ) if rc != 0: result = dict( @@ -376,9 +419,9 @@ def quota_report(module, mountpoint, name, quota_type, used_type): stdout=stdout, stderr=stderr, ) - module.fail_json(msg='Could not get quota report for %s.' % used_name, **result) + module.fail_json(msg="Could not get quota report for %s." % used_name, **result) - for line in stdout.split('\n'): + for line in stdout.split("\n"): line = line.strip().split() if len(line) > 3 and line[0] == name: soft = int(line[2]) * factor @@ -388,33 +431,41 @@ def quota_report(module, mountpoint, name, quota_type, used_type): return soft, hard -def exec_quota(module, cmd, mountpoint): - cmd = ['xfs_quota', '-x', '-c'] + [cmd, mountpoint] +def exec_quota(module, xfs_quota_bin, cmd, mountpoint): + cmd = [xfs_quota_bin, "-x", "-c"] + [cmd, mountpoint] (rc, stdout, stderr) = module.run_command(cmd, use_unsafe_shell=True) - if "XFS_GETQUOTA: Operation not permitted" in stderr.split('\n') or \ - rc == 1 and 'xfs_quota: cannot set limits: Operation not permitted' in stderr.split('\n'): - module.fail_json(msg='You need to be root or have CAP_SYS_ADMIN capability to perform this operation') + if ( + "XFS_GETQUOTA: Operation not permitted" in stderr.split("\n") + or rc == 1 + and "xfs_quota: cannot set limits: Operation not permitted" + in stderr.split("\n") + ): + module.fail_json( + msg="You need to be root or have CAP_SYS_ADMIN capability to perform this operation" + ) return rc, stdout, stderr def get_fs_by_mountpoint(mountpoint): mpr = None - with open('/proc/mounts', 'r') as s: + with open("/proc/mounts", "r") as s: for line in s.readlines(): mp = line.strip().split() - if len(mp) == 6 and mp[1] == mountpoint and mp[2] == 'xfs': - mpr = dict(zip(['spec', 'file', 'vfstype', 'mntopts', 'freq', 'passno'], mp)) - mpr['mntopts'] = mpr['mntopts'].split(',') + if len(mp) == 6 and mp[1] == mountpoint and mp[2] == "xfs": + mpr = dict( + zip(["spec", "file", "vfstype", "mntopts", "freq", "passno"], mp) + ) + mpr["mntopts"] = mpr["mntopts"].split(",") break return mpr def get_project_id(name): prjid = None - with open('/etc/projid', 'r') as s: + with open("/etc/projid", "r") as s: for line in s.readlines(): - line = line.strip().partition(':') + line = line.strip().partition(":") if line[0] == name: prjid = line[2] break @@ -422,5 +473,5 @@ def get_project_id(name): return prjid -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/tests/integration/targets/xfs_quota/tasks/pquota.yml b/tests/integration/targets/xfs_quota/tasks/pquota.yml index 5d89ba44a4..4e82f059dc 100644 --- a/tests/integration/targets/xfs_quota/tasks/pquota.yml +++ b/tests/integration/targets/xfs_quota/tasks/pquota.yml @@ -118,7 +118,7 @@ - name: Assert project limits results for xft_quotaval after re-apply assert: that: - - not test_pquota_project_after.changed + - test_pquota_project_after.changed - name: Reset default project limits xfs_quota: mountpoint: '{{ remote_tmp_dir }}/pquota' From 20bd065e7700d029154f45187899f75813caa150 Mon Sep 17 00:00:00 2001 From: Jeffrey van Pelt Date: Sat, 27 Feb 2021 17:06:00 +0100 Subject: [PATCH 0078/3093] Proxmox inventory: Add some sanitization to url parameter (#1914) * Added rstrip to the URL field to prevent issues when users add a trailing / in the config of this module * Added changelog fragment * Sorry Mr. Linter, I have removed the empty line :-) * Fixed punctuation * Fixed punctuation --- changelogs/fragments/1914-add-sanitization-to-url.yml | 3 +++ plugins/inventory/proxmox.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/1914-add-sanitization-to-url.yml diff --git a/changelogs/fragments/1914-add-sanitization-to-url.yml b/changelogs/fragments/1914-add-sanitization-to-url.yml new file mode 100644 index 0000000000..3b41bcb7af --- /dev/null +++ b/changelogs/fragments/1914-add-sanitization-to-url.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - proxmox inventory - added handling of extra trailing slashes in the URL (https://github.com/ansible-collections/community.general/pull/1914). diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py index da727b7a20..d21688c4d4 100644 --- a/plugins/inventory/proxmox.py +++ b/plugins/inventory/proxmox.py @@ -352,7 +352,7 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): self._read_config_data(path) # get connection host - self.proxmox_url = self.get_option('url') + self.proxmox_url = self.get_option('url').rstrip('/') self.proxmox_user = self.get_option('user') self.proxmox_password = self.get_option('password') self.cache_key = self.get_cache_key(path) From 4a8d6cf7cca95145896d3bd7dfafba7abc140ba4 Mon Sep 17 00:00:00 2001 From: Eric L Date: Sat, 27 Feb 2021 17:42:45 +0100 Subject: [PATCH 0079/3093] Add version_sort filter to properly sort list of versions (#1916) * Add version_sort filter to properly sort list of versions * Fix all comments from Felix - add changelog fragment - fix test by removing runme.sh/yml and renaming to filter_version_sort - use fully qualified name of filter in test case * Remove wrong plugin.test changelog fragment Ups... * Properly name the file version_sort.py * Update changelogs/fragments/1916-add-version-sort-filter.yml Co-authored-by: Felix Fontein Co-authored-by: Eric L Co-authored-by: Felix Fontein --- .../1916-add-version-sort-filter.yml | 3 +++ plugins/filter/version_sort.py | 21 +++++++++++++++++++ .../targets/filter_version_sort/aliases | 2 ++ .../filter_version_sort/tasks/main.yml | 10 +++++++++ 4 files changed, 36 insertions(+) create mode 100644 changelogs/fragments/1916-add-version-sort-filter.yml create mode 100644 plugins/filter/version_sort.py create mode 100644 tests/integration/targets/filter_version_sort/aliases create mode 100644 tests/integration/targets/filter_version_sort/tasks/main.yml diff --git a/changelogs/fragments/1916-add-version-sort-filter.yml b/changelogs/fragments/1916-add-version-sort-filter.yml new file mode 100644 index 0000000000..a06b464e55 --- /dev/null +++ b/changelogs/fragments/1916-add-version-sort-filter.yml @@ -0,0 +1,3 @@ +add plugin.filter: + - name: version_sort + description: Sort a list according to version order instead of pure alphabetical one diff --git a/plugins/filter/version_sort.py b/plugins/filter/version_sort.py new file mode 100644 index 0000000000..598b8f2088 --- /dev/null +++ b/plugins/filter/version_sort.py @@ -0,0 +1,21 @@ +# Copyright (C) 2021 Eric Lavarde +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from distutils.version import LooseVersion + + +def version_sort(value, reverse=False): + '''Sort a list according to loose versions so that e.g. 2.9 is smaller than 2.10''' + return sorted(value, key=LooseVersion, reverse=reverse) + + +class FilterModule(object): + ''' Version sort filter ''' + + def filters(self): + return { + 'version_sort': version_sort + } diff --git a/tests/integration/targets/filter_version_sort/aliases b/tests/integration/targets/filter_version_sort/aliases new file mode 100644 index 0000000000..f04737b845 --- /dev/null +++ b/tests/integration/targets/filter_version_sort/aliases @@ -0,0 +1,2 @@ +shippable/posix/group2 +skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller diff --git a/tests/integration/targets/filter_version_sort/tasks/main.yml b/tests/integration/targets/filter_version_sort/tasks/main.yml new file mode 100644 index 0000000000..2edca18c9c --- /dev/null +++ b/tests/integration/targets/filter_version_sort/tasks/main.yml @@ -0,0 +1,10 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- name: validate that versions are properly sorted in a stable way + assert: + that: + - "['a-1.9.rpm', 'a-1.10-1.rpm', 'a-1.09.rpm', 'b-1.01.rpm', 'a-2.1-0.rpm', 'a-1.10-0.rpm'] | community.general.version_sort == ['a-1.9.rpm', 'a-1.09.rpm', 'a-1.10-0.rpm', 'a-1.10-1.rpm', 'a-2.1-0.rpm', 'b-1.01.rpm']" From b4c136125e6671b64ba0aac941205f47d9999e8f Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 27 Feb 2021 21:23:07 +0100 Subject: [PATCH 0080/3093] Actually use option. (#1928) --- changelogs/fragments/1928-bigpanda-message.yml | 2 ++ plugins/modules/monitoring/bigpanda.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/1928-bigpanda-message.yml diff --git a/changelogs/fragments/1928-bigpanda-message.yml b/changelogs/fragments/1928-bigpanda-message.yml new file mode 100644 index 0000000000..081b51cc0f --- /dev/null +++ b/changelogs/fragments/1928-bigpanda-message.yml @@ -0,0 +1,2 @@ +bugfixes: +- "bigpanda - actually use the ``deployment_message`` option (https://github.com/ansible-collections/community.general/pull/1928)." diff --git a/plugins/modules/monitoring/bigpanda.py b/plugins/modules/monitoring/bigpanda.py index ea693eb886..8faec5d030 100644 --- a/plugins/modules/monitoring/bigpanda.py +++ b/plugins/modules/monitoring/bigpanda.py @@ -183,7 +183,7 @@ def main(): request_url = url + '/data/events/deployments/start' else: - message = module.params['message'] + message = module.params['deployment_message'] if message is not None: body['errorMessage'] = message From bec43041a9e04e84b9fd1991bd385cad739c3097 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 27 Feb 2021 21:23:14 +0100 Subject: [PATCH 0081/3093] grove: message -> message_content (#1929) * grove: message -> message_content * Remove no longer needed ignore.txt lines. --- changelogs/fragments/1929-grove-message.yml | 4 ++++ plugins/modules/notification/grove.py | 13 +++++++++---- tests/sanity/ignore-2.10.txt | 1 - tests/sanity/ignore-2.11.txt | 1 - 4 files changed, 13 insertions(+), 6 deletions(-) create mode 100644 changelogs/fragments/1929-grove-message.yml diff --git a/changelogs/fragments/1929-grove-message.yml b/changelogs/fragments/1929-grove-message.yml new file mode 100644 index 0000000000..402aa24639 --- /dev/null +++ b/changelogs/fragments/1929-grove-message.yml @@ -0,0 +1,4 @@ +minor_changes: +- "grove - the option ``message`` has been renamed to ``message_content``. The old name ``message`` is kept as an alias and will be removed for community.general 4.0.0. This was done because ``message`` is used internally by Ansible (https://github.com/ansible-collections/community.general/pull/1929)." +deprecated_features: +- "grove - the option ``message`` will be removed in community.general 4.0.0. Use the new option ``message_content`` instead (https://github.com/ansible-collections/community.general/pull/1929)." diff --git a/plugins/modules/notification/grove.py b/plugins/modules/notification/grove.py index c1816e63d2..56838e5a1b 100644 --- a/plugins/modules/notification/grove.py +++ b/plugins/modules/notification/grove.py @@ -27,11 +27,14 @@ options: - Name of the service (displayed as the "user" in the message) required: false default: ansible - message: + message_content: type: str description: - - Message content + - Message content. + - The alias I(message) is deprecated and will be removed in community.general 4.0.0. required: true + aliases: + - message url: type: str description: @@ -92,7 +95,9 @@ def main(): module = AnsibleModule( argument_spec=dict( channel_token=dict(type='str', required=True, no_log=True), - message=dict(type='str', required=True), + message_content=dict(type='str', required=True, aliases=['message'], + deprecated_aliases=[dict(name='message', version='4.0.0', + collection_name='community.general')]), service=dict(type='str', default='ansible'), url=dict(type='str', default=None), icon_url=dict(type='str', default=None), @@ -102,7 +107,7 @@ def main(): channel_token = module.params['channel_token'] service = module.params['service'] - message = module.params['message'] + message = module.params['message_content'] url = module.params['url'] icon_url = module.params['icon_url'] diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 2e6fcbaf8f..54f5b279ea 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -124,7 +124,6 @@ plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:parameter-type-not plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code plugins/modules/notification/cisco_webex.py validate-modules:invalid-argument-name plugins/modules/notification/grove.py validate-modules:invalid-argument-name -plugins/modules/notification/grove.py validate-modules:nonexistent-parameter-documented plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 095b163096..9da66ae9dc 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -123,7 +123,6 @@ plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:parameter-type-not plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code plugins/modules/notification/cisco_webex.py validate-modules:invalid-argument-name plugins/modules/notification/grove.py validate-modules:invalid-argument-name -plugins/modules/notification/grove.py validate-modules:nonexistent-parameter-documented plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid From 585dd0b6ed4f0fff0a99aef7ebd10da358f783c8 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 1 Mar 2021 03:06:36 +1300 Subject: [PATCH 0082/3093] Improved ModuleHelper.run_command() (#1867) * Improved run_command signature and behaviour - extra_params has been removed from the signature - params now can be either str or dict (containing the param value) * Reverted the removal of the method parameter, and added changelog fragment * Update changelogs/fragments/1867-modhelper-cmdmixin-dict-params.yml Co-authored-by: Felix Fontein * Update plugins/module_utils/module_helper.py Co-authored-by: Felix Fontein * adjustement per PR * Update plugins/module_utils/module_helper.py Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../1867-modhelper-cmdmixin-dict-params.yml | 2 ++ plugins/module_utils/module_helper.py | 29 +++++++++++++------ plugins/modules/system/xfconf.py | 14 ++++----- 3 files changed, 27 insertions(+), 18 deletions(-) create mode 100644 changelogs/fragments/1867-modhelper-cmdmixin-dict-params.yml diff --git a/changelogs/fragments/1867-modhelper-cmdmixin-dict-params.yml b/changelogs/fragments/1867-modhelper-cmdmixin-dict-params.yml new file mode 100644 index 0000000000..3f757b233a --- /dev/null +++ b/changelogs/fragments/1867-modhelper-cmdmixin-dict-params.yml @@ -0,0 +1,2 @@ +minor_changes: + - module_helper module utils - ``CmdMixin.run_command()`` now accepts ``dict`` command arguments, providing the parameter and its value (https://github.com/ansible-collections/community.general/pull/1867). diff --git a/plugins/module_utils/module_helper.py b/plugins/module_utils/module_helper.py index f35db8283d..3d145e713e 100644 --- a/plugins/module_utils/module_helper.py +++ b/plugins/module_utils/module_helper.py @@ -296,17 +296,28 @@ class CmdMixin(object): param_list = params if params else self.module.params.keys() for param in param_list: - if param in self.module.argument_spec: - if param not in self.module.params: + if isinstance(param, dict): + if len(param) != 1: + raise ModuleHelperException("run_command parameter as a dict must " + "contain only one key: {0}".format(param)) + _param = list(param.keys())[0] + fmt = find_format(_param) + value = param[_param] + elif isinstance(param, str): + if param in self.module.argument_spec: + fmt = find_format(param) + value = self.module.params[param] + elif param in extra_params: + fmt = find_format(param) + value = extra_params[param] + else: + self.module.deprecate("Cannot determine value for parameter: {0}. " + "From version 4.0.0 onwards this will generate an exception".format(param), + version="4.0.0", collection_name="community.general") continue - fmt = find_format(param) - value = self.module.params[param] + else: - if param not in extra_params: - continue - fmt = find_format(param) - value = extra_params[param] - self.cmd_args = cmd_args + raise ModuleHelperException("run_command parameter must be either a str or a dict: {0}".format(param)) cmd_args = add_arg_formatted_param(cmd_args, fmt, value) return cmd_args diff --git a/plugins/modules/system/xfconf.py b/plugins/modules/system/xfconf.py index ce85a2ba47..b6e6110e87 100644 --- a/plugins/modules/system/xfconf.py +++ b/plugins/modules/system/xfconf.py @@ -238,7 +238,7 @@ class XFConfProperty(CmdMixin, StateMixin, ModuleHelper): def state_absent(self): self.vars.value = None - self.run_command(params=('channel', 'property', 'reset'), extra_params={"reset": True}) + self.run_command(params=('channel', 'property', {'reset': True})) self.update_xfconf_output(previous_value=self.vars.previous_value, value=None) @@ -267,17 +267,13 @@ class XFConfProperty(CmdMixin, StateMixin, ModuleHelper): isinstance(self.vars.previous_value, list) or \ values_len > 1 - params = ['channel', 'property', 'create'] + params = ['channel', 'property', {'create': True}] if self.vars.is_array: - params.append('is_array') - params.append('values_and_types') - - extra_params = dict(values_and_types=(self.vars.value, value_type)) - extra_params['create'] = True - extra_params['is_array'] = self.vars.is_array + params.append({'is_array': True}) + params.append({'values_and_types': (self.vars.value, value_type)}) if not self.module.check_mode: - self.run_command(params=params, extra_params=extra_params) + self.run_command(params=params) if not self.vars.is_array: self.vars.value = self.vars.value[0] From 4f98136771e32a7ad87976e20bac7ff5eaf4666d Mon Sep 17 00:00:00 2001 From: Ox Date: Tue, 2 Mar 2021 12:46:21 +0100 Subject: [PATCH 0083/3093] Feat: nmcli - Add method4 and method6 (#1894) * Feat: nmcli - Add method4 and method6 Allows to manipulate ipv4.method and ipv6.method. Is mandatory to manage Bond interfaces with no native vlans but only tagged vlans. * Fix: nmcli - Add changelog fragment for 1894 * Fix: nmcli - Add choices for method4 and method6 * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Felix Fontein * Update changelogs/fragments/1894-feat-nmcli-add-method4-and-method6.yml Co-authored-by: Felix Fontein * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Felix Fontein * Fix: nmcli - Update documentation * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Felix Fontein * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Felix Fontein * Fix: nmcli - Simplify code * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Felix Fontein * Fix: nmcli - Update ip6 documentation Co-authored-by: Felix Fontein --- ...894-feat-nmcli-add-method4-and-method6.yml | 2 ++ plugins/modules/net_tools/nmcli.py | 32 +++++++++++++++---- 2 files changed, 28 insertions(+), 6 deletions(-) create mode 100644 changelogs/fragments/1894-feat-nmcli-add-method4-and-method6.yml diff --git a/changelogs/fragments/1894-feat-nmcli-add-method4-and-method6.yml b/changelogs/fragments/1894-feat-nmcli-add-method4-and-method6.yml new file mode 100644 index 0000000000..05daac483c --- /dev/null +++ b/changelogs/fragments/1894-feat-nmcli-add-method4-and-method6.yml @@ -0,0 +1,2 @@ +bugfixes: + - nmcli - add ``method4`` and ``method6`` options (https://github.com/ansible-collections/community.general/pull/1894). diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index 5925fa7cb7..d469cbf1c2 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -69,6 +69,7 @@ options: description: - The IPv4 address to this interface. - Use the format C(192.0.2.24/24). + - If defined and I(method4) is not specified, automatically set C(ipv4.method) to C(manual). type: str gw4: description: @@ -106,10 +107,18 @@ options: - A list of DNS search domains. elements: str type: list + method4: + description: + - Configuration method to be used for IPv4. + - If I(ip4) is set, C(ipv4.method) is automatically set to C(manual) and this parameter is not needed. + type: str + choices: [auto, link-local, manual, shared, disabled] + version_added: 2.2.0 ip6: description: - The IPv6 address to this interface. - Use the format C(abbe::cafe). + - If defined and I(method6) is not specified, automatically set C(ipv6.method) to C(manual). type: str gw6: description: @@ -127,6 +136,13 @@ options: - A list of DNS search domains. elements: str type: list + method6: + description: + - Configuration method to be used for IPv6 + - If I(ip6) is set, C(ipv6.method) is automatically set to C(manual) and this parameter is not needed. + type: str + choices: [ignore, auto, dhcp, link-local, manual, shared] + version_added: 2.2.0 mtu: description: - The connection MTU, e.g. 9000. This can't be applied when creating the interface and is done once the interface has been created. @@ -611,10 +627,12 @@ class Nmcli(object): self.never_default4 = module.params['never_default4'] self.dns4 = module.params['dns4'] self.dns4_search = module.params['dns4_search'] + self.method4 = module.params['method4'] self.ip6 = module.params['ip6'] self.gw6 = module.params['gw6'] self.dns6 = module.params['dns6'] self.dns6_search = module.params['dns6_search'] + self.method6 = module.params['method6'] self.mtu = module.params['mtu'] self.stp = module.params['stp'] self.priority = module.params['priority'] @@ -648,18 +666,18 @@ class Nmcli(object): self.dhcp_client_id = module.params['dhcp_client_id'] self.zone = module.params['zone'] - if self.ip4: + if self.method4: + self.ipv4_method = self.method4 + elif self.ip4: self.ipv4_method = 'manual' else: - # supported values for 'ipv4.method': [auto, link-local, manual, shared, disabled] - # TODO: add a new module parameter to specify a non 'manual' value self.ipv4_method = None - if self.ip6: + if self.method6: + self.ipv6_method = self.method6 + elif self.ip6: self.ipv6_method = 'manual' else: - # supported values for 'ipv6.method': [ignore, auto, dhcp, link-local, manual, shared] - # TODO: add a new module parameter to specify a non 'manual' value self.ipv6_method = None def execute_command(self, cmd, use_unsafe_shell=False, data=None): @@ -1075,11 +1093,13 @@ def main(): never_default4=dict(type='bool', default=False), dns4=dict(type='list', elements='str'), dns4_search=dict(type='list', elements='str'), + method4=dict(type='str', choices=['auto', 'link-local', 'manual', 'shared', 'disabled']), dhcp_client_id=dict(type='str'), ip6=dict(type='str'), gw6=dict(type='str'), dns6=dict(type='list', elements='str'), dns6_search=dict(type='list', elements='str'), + method6=dict(type='str', choices=['ignore', 'auto', 'dhcp', 'link-local', 'manual', 'shared']), # Bond Specific vars mode=dict(type='str', default='balance-rr', choices=['802.3ad', 'active-backup', 'balance-alb', 'balance-rr', 'balance-tlb', 'balance-xor', 'broadcast']), From 677ab8e383cbe156a0f354582df56324606cfaed Mon Sep 17 00:00:00 2001 From: Jim Speir Date: Tue, 2 Mar 2021 20:58:55 +0000 Subject: [PATCH 0084/3093] Adding another example for tss lookup (#1945) * Adding another example for tss lookup A more detailed example using self-hosted secrets server as investigated in #1943 * Update plugins/lookup/tss.py Co-authored-by: Felix Fontein * Better line breaking * Update plugins/lookup/tss.py Seconded! Co-authored-by: Felix Fontein * Remove newline to pass tests * Update plugins/lookup/tss.py Co-authored-by: Felix Fontein --- plugins/lookup/tss.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/plugins/lookup/tss.py b/plugins/lookup/tss.py index f24d79373a..2c25532699 100644 --- a/plugins/lookup/tss.py +++ b/plugins/lookup/tss.py @@ -82,6 +82,27 @@ EXAMPLES = r""" | items2dict(key_name='slug', value_name='itemValue'))['password'] }} + +- hosts: localhost + vars: + secret: >- + {{ + lookup( + 'community.general.tss', + 102, + base_url='https://secretserver.domain.com/SecretServer/', + username='user.name', + password='password' + ) + }} + tasks: + - ansible.builtin.debug: + msg: > + the password is {{ + (secret['items'] + | items2dict(key_name='slug', + value_name='itemValue'))['password'] + }} """ from ansible.errors import AnsibleError, AnsibleOptionsError From 3f882ee6a2e8e4e52bce8e1371dd81e61733befb Mon Sep 17 00:00:00 2001 From: atbore-phx Date: Thu, 4 Mar 2021 07:45:05 +0100 Subject: [PATCH 0085/3093] consul_io inventory script: fixes awx and python 3 kv_group (#620) * feat(env): extending configuration options by env variables * feat(env): extending config option docs * feat(consul_io): fix byte chain decoding for python3 * fix(pep8): E128 * changelog added * Update changelogs/fragments/620-consul_io-env-variables-conf-based.yml Co-authored-by: Felix Fontein * Update changelogs/fragments/620-consul_io-env-variables-conf-based.yml Co-authored-by: Felix Fontein * Update changelogs/fragments/620-consul_io-env-variables-conf-based.yml Co-authored-by: Felix Fontein * fix(typo): adding dots Co-authored-by: Davy Bondeau Co-authored-by: Felix Fontein --- ...620-consul_io-env-variables-conf-based.yml | 5 +++ scripts/inventory/consul_io.py | 36 ++++++++++++++++--- 2 files changed, 36 insertions(+), 5 deletions(-) create mode 100644 changelogs/fragments/620-consul_io-env-variables-conf-based.yml diff --git a/changelogs/fragments/620-consul_io-env-variables-conf-based.yml b/changelogs/fragments/620-consul_io-env-variables-conf-based.yml new file mode 100644 index 0000000000..e3378428c5 --- /dev/null +++ b/changelogs/fragments/620-consul_io-env-variables-conf-based.yml @@ -0,0 +1,5 @@ +--- +bugfixes: + - consul_io inventory script - kv_groups - fix byte chain decoding for Python 3 (https://github.com/ansible-collections/community.general/pull/620). +minor_changes: + - consul_io inventory script - conf options - allow custom configuration options via env variables (https://github.com/ansible-collections/community.general/pull/620). diff --git a/scripts/inventory/consul_io.py b/scripts/inventory/consul_io.py index 00832241d0..6af0675707 100644 --- a/scripts/inventory/consul_io.py +++ b/scripts/inventory/consul_io.py @@ -28,7 +28,7 @@ This script can be run with the switches --host to restrict the inventory to a single named node. (requires datacenter config) The configuration for this plugin is read from a consul_io.ini file located in the -same directory as this inventory script. All config options in the config file +same directory as this inventory script or via environment variables. All config options in the config file are optional except the host and port, which must point to a valid agent or server running the http api. For more information on enabling the endpoint see. @@ -36,17 +36,23 @@ http://www.consul.io/docs/agent/options.html Other options include: +'bulk_load' + +boolean flag. Load all possible data before building inventory JSON +If true, script processes in-memory data. JSON generation reduces drastically +This can also be set with the environmental variable CONSUL_BULK_LOAD. + 'datacenter': which restricts the included nodes to those from the given datacenter -This can also be set with the environmental variable CONSUL_DATACENTER +This can also be set with the environmental variable CONSUL_DATACENTER. 'url': the URL of the Consul cluster. host, port and scheme are derived from the URL. If not specified, connection configuration defaults to http requests to localhost on port 8500. -This can also be set with the environmental variable CONSUL_URL +This can also be set with the environmental variable CONSUL_URL. 'domain': @@ -57,6 +63,15 @@ have consul hooked into your DNS server for these to resolve. See the consul DNS docs for more info. which restricts the included nodes to those from the given datacenter +This can also be set with the environmental variable CONSUL_DOMAIN. + +'suffixes': + +boolean flag. By default, final JSON is built based on all available info in consul. +Suffixes means that services groups will be added in addition to basic information. See servers_suffix for additional info +There are cases when speed is preferable than having services groups +False value will reduce script execution time drastically. +This can also be set with the environmental variable CONSUL_SUFFIXES. 'servers_suffix': @@ -64,6 +79,7 @@ defining the a suffix to add to the service name when creating the service group. e.g Service name of 'redis' and a suffix of '_servers' will add each nodes address to the group name 'redis_servers'. No suffix is added if this is not set +This can also be set with the environmental variable CONSUL_SERVERS_SUFFIX. 'tags': @@ -71,11 +87,13 @@ boolean flag defining if service tags should be used to create Inventory groups e.g. an nginx service with the tags ['master', 'v1'] will create groups nginx_master and nginx_v1 to which the node running the service will be added. No tag groups are created if this is missing. +This can also be set with the environmental variable CONSUL_TAGS. 'token': ACL token to use to authorize access to the key value store. May be required to retrieve the kv_groups and kv_metadata based on your consul configuration. +This can also be set with the environmental variable CONSUL_TOKEN. 'kv_groups': @@ -86,6 +104,7 @@ names to which the node should be added e.g. if the inventory contains node 'nyc-web-1' in datacenter 'nyc-dc1' and kv_groups = 'ansible/groups' then the key 'ansible/groups/nyc-dc1/nyc-web-1' will be queried for a group list. If this query returned 'test,honeypot' then the node address to both groups. +This can also be set with the environmental variable CONSUL_KV_GROUPS. 'kv_metadata': @@ -95,6 +114,7 @@ find a json dictionary of metadata entries. If found, each key/value pair in the dictionary is added to the metadata for the node. eg node 'nyc-web-1' in datacenter 'nyc-dc1' and kv_metadata = 'ansible/metadata', then the key 'ansible/metadata/nyc-dc1/nyc-web-1' should contain '{"databse": "postgres"}' +This can also be set with the environmental variable CONSUL_KV_METADATA. 'availability': @@ -102,6 +122,7 @@ if true then availability groups will be created for each service. The node will be added to one of the groups based on the health status of the service. The group name is derived from the service name and the configurable availability suffixes +This can also be set with the environmental variable CONSUL_AVAILABILITY. 'available_suffix': @@ -109,10 +130,12 @@ suffix that should be appended to the service availability groups for available services e.g. if the suffix is '_up' and the service is nginx, then nodes with healthy nginx services will be added to the nginix_up group. Defaults to '_available' +This can also be set with the environmental variable CONSUL_AVAILABLE_SUFFIX. 'unavailable_suffix': as above but for unhealthy services, defaults to '_unavailable' +This can also be set with the environmental variable CONSUL_UNAVAILABLE_SUFFIX. Note that if the inventory discovers an 'ssh' service running on a node it will register the port as ansible_ssh_port in the node's metadata and this port will @@ -337,7 +360,7 @@ class ConsulInventory(object): else: index, groups = self.consul_api.kv.get(key) if groups and groups['Value']: - for group in groups['Value'].split(','): + for group in groups['Value'].decode().split(','): self.add_node_to_map(self.nodes_by_kv, group.strip(), node) def load_data_from_service(self, service_name, service, node_data): @@ -488,7 +511,10 @@ class ConsulConfig(dict): setattr(self, arg, getattr(args, arg)) def read_env_vars(self): - env_var_options = ['datacenter', 'url'] + env_var_options = ['host', 'token', 'datacenter', 'servers_suffix', + 'tags', 'kv_metadata', 'kv_groups', 'availability', + 'unavailable_suffix', 'available_suffix', 'url', + 'domain', 'suffixes', 'bulk_load'] for option in env_var_options: value = None env_var = 'CONSUL_' + option.upper() From ad8aa1b1e6130c457a74d1e8566b97ff2c700f3d Mon Sep 17 00:00:00 2001 From: Brett <19863984+brettmilford@users.noreply.github.com> Date: Thu, 4 Mar 2021 16:46:37 +1000 Subject: [PATCH 0086/3093] Jenkins build module (#745) * Jenkins build module A module for queuing and deleting jenkins builds. * CI fixes * More CI fixes. * Even more CI fixes * Fixing symlink * Update plugins/modules/web_infrastructure/jenkins_build.py Co-authored-by: Felix Fontein * removed ansible meta section * Added unit tests. * fix tests * more test fixes. * Completed tests. Mocked jenkins api calls. Fixed some logging. * Update plugins/modules/web_infrastructure/jenkins_build.py Co-authored-by: Andrew Klychkov * Update plugins/modules/web_infrastructure/jenkins_build.py Co-authored-by: Andrew Klychkov * Update plugins/modules/web_infrastructure/jenkins_build.py Co-authored-by: Andrew Klychkov * Update plugins/modules/web_infrastructure/jenkins_build.py Co-authored-by: Andrew Klychkov * Cleaned up default items And removed supports check mode flag. * setting name param required * Update plugins/modules/web_infrastructure/jenkins_build.py Co-authored-by: Brett Milford Co-authored-by: Felix Fontein Co-authored-by: Andrew Klychkov --- plugins/modules/jenkins_build.py | 1 + .../web_infrastructure/jenkins_build.py | 243 ++++++++++++++++++ .../web_infrastructure/test_jenkins_build.py | 110 ++++++++ 3 files changed, 354 insertions(+) create mode 120000 plugins/modules/jenkins_build.py create mode 100644 plugins/modules/web_infrastructure/jenkins_build.py create mode 100644 tests/unit/plugins/modules/web_infrastructure/test_jenkins_build.py diff --git a/plugins/modules/jenkins_build.py b/plugins/modules/jenkins_build.py new file mode 120000 index 0000000000..13e660e7b0 --- /dev/null +++ b/plugins/modules/jenkins_build.py @@ -0,0 +1 @@ +./web_infrastructure/jenkins_build.py \ No newline at end of file diff --git a/plugins/modules/web_infrastructure/jenkins_build.py b/plugins/modules/web_infrastructure/jenkins_build.py new file mode 100644 index 0000000000..7f1d32b602 --- /dev/null +++ b/plugins/modules/web_infrastructure/jenkins_build.py @@ -0,0 +1,243 @@ +#!/usr/bin/python +# +# Copyright: (c) Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: jenkins_build +short_description: Manage jenkins builds +version_added: 2.2.0 +description: + - Manage Jenkins builds with Jenkins REST API. +requirements: + - "python-jenkins >= 0.4.12" +author: Brett Milford (@brettmilford) +options: + args: + description: + - A list of parameters to pass to the build. + type: dict + name: + description: + - Name of the Jenkins job to build. + required: true + type: str + build_number: + description: + - An integer which specifies a build of a job. Is required to remove a build from the queue. + type: int + password: + description: + - Password to authenticate with the Jenkins server. + type: str + state: + description: + - Attribute that specifies if the build is to be created or deleted. + default: present + choices: ['present', 'absent'] + type: str + token: + description: + - API token used to authenticate with the Jenkins server. + type: str + url: + description: + - URL of the Jenkins server. + default: http://localhost:8080 + type: str + user: + description: + - User to authenticate with the Jenkins server. + type: str +''' + +EXAMPLES = ''' +- name: Create a jenkins build using basic authentication + community.general.jenkins_build: + name: "test-check" + args: + cloud: "test" + availability_zone: "test_az" + user: admin + password: asdfg + url: http://localhost:8080 +''' + +RETURN = ''' +--- +name: + description: Name of the jenkins job. + returned: success + type: str + sample: "test-job" +state: + description: State of the jenkins job. + returned: success + type: str + sample: present +user: + description: User used for authentication. + returned: success + type: str + sample: admin +url: + description: Url to connect to the Jenkins server. + returned: success + type: str + sample: https://jenkins.mydomain.com +build_info: + description: Build info of the jenkins job. + returned: success + type: dict +''' + +import traceback +from time import sleep + +JENKINS_IMP_ERR = None +try: + import jenkins + python_jenkins_installed = True +except ImportError: + JENKINS_IMP_ERR = traceback.format_exc() + python_jenkins_installed = False + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils._text import to_native + + +class JenkinsBuild: + + def __init__(self, module): + self.module = module + + self.name = module.params.get('name') + self.password = module.params.get('password') + self.args = module.params.get('args') + self.state = module.params.get('state') + self.token = module.params.get('token') + self.user = module.params.get('user') + self.jenkins_url = module.params.get('url') + self.build_number = module.params.get('build_number') + self.server = self.get_jenkins_connection() + + self.result = { + 'changed': False, + 'url': self.jenkins_url, + 'name': self.name, + 'user': self.user, + 'state': self.state, + } + + self.EXCL_STATE = "excluded state" + + def get_jenkins_connection(self): + try: + if (self.user and self.password): + return jenkins.Jenkins(self.jenkins_url, self.user, self.password) + elif (self.user and self.token): + return jenkins.Jenkins(self.jenkins_url, self.user, self.token) + elif (self.user and not (self.password or self.token)): + return jenkins.Jenkins(self.jenkins_url, self.user) + else: + return jenkins.Jenkins(self.jenkins_url) + except Exception as e: + self.module.fail_json(msg='Unable to connect to Jenkins server, %s' % to_native(e)) + + def get_next_build(self): + try: + build_number = self.server.get_job_info(self.name)['nextBuildNumber'] + except Exception as e: + self.module.fail_json(msg='Unable to get job info from Jenkins server, %s' % to_native(e), exception=traceback.format_exc()) + + return build_number + + def get_build_status(self): + try: + response = self.server.get_build_info(self.name, self.build_number) + return response + + except Exception as e: + self.module.fail_json(msg='Unable to fetch build information, %s' % to_native(e), exception=traceback.format_exc()) + + def present_build(self): + self.build_number = self.get_next_build() + + try: + if self.args is None: + self.server.build_job(self.name) + else: + self.server.build_job(self.name, self.args) + except Exception as e: + self.module.fail_json(msg='Unable to create build for %s: %s' % (self.jenkins_url, to_native(e)), + exception=traceback.format_exc()) + + def absent_build(self): + try: + self.server.delete_build(self.name, self.build_number) + except Exception as e: + self.module.fail_json(msg='Unable to delete build for %s: %s' % (self.jenkins_url, to_native(e)), + exception=traceback.format_exc()) + + def get_result(self): + result = self.result + build_status = self.get_build_status() + + if build_status['result'] is None: + sleep(10) + self.get_result() + else: + if build_status['result'] == "SUCCESS": + result['changed'] = True + result['build_info'] = build_status + else: + result['failed'] = True + result['build_info'] = build_status + + return result + + +def test_dependencies(module): + if not python_jenkins_installed: + module.fail_json( + msg=missing_required_lib("python-jenkins", + url="https://python-jenkins.readthedocs.io/en/latest/install.html"), + exception=JENKINS_IMP_ERR) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + args=dict(type='dict'), + build_number=dict(type='int'), + name=dict(required=True), + password=dict(no_log=True), + state=dict(choices=['present', 'absent'], default="present"), + token=dict(no_log=True), + url=dict(default="http://localhost:8080"), + user=dict(), + ), + mutually_exclusive=[ + ['password', 'token'], + ], + ) + + test_dependencies(module) + jenkins_build = JenkinsBuild(module) + + if module.params.get('state') == "present": + jenkins_build.present_build() + else: + jenkins_build.absent_build() + + sleep(10) + result = jenkins_build.get_result() + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/tests/unit/plugins/modules/web_infrastructure/test_jenkins_build.py b/tests/unit/plugins/modules/web_infrastructure/test_jenkins_build.py new file mode 100644 index 0000000000..d0bbafcc91 --- /dev/null +++ b/tests/unit/plugins/modules/web_infrastructure/test_jenkins_build.py @@ -0,0 +1,110 @@ +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +from ansible_collections.community.general.plugins.modules.web_infrastructure import jenkins_build + +import json + + +def set_module_args(args): + """prepare arguments so that they will be picked up during module creation""" + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) + + +class AnsibleExitJson(Exception): + """Exception class to be raised by module.exit_json and caught by the test case""" + pass + + +class AnsibleFailJson(Exception): + """Exception class to be raised by module.fail_json and caught by the test case""" + pass + + +def exit_json(*args, **kwargs): + """function to patch over exit_json; package return data into an exception""" + if 'changed' not in kwargs: + kwargs['changed'] = False + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): + """function to patch over fail_json; package return data into an exception""" + kwargs['failed'] = True + raise AnsibleFailJson(kwargs) + + +class JenkinsMock(): + + def get_job_info(self, name): + return { + "nextBuildNumber": 1234 + } + + def get_build_info(self, name, build_number): + return { + "result": "SUCCESS" + } + + def get_build_status(self): + pass + + def build_job(self, *args): + return None + + def delete_build(self, name, build_number): + return None + + +class TestJenkinsBuild(unittest.TestCase): + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + @patch('ansible_collections.community.general.plugins.modules.web_infrastructure.jenkins_build.test_dependencies') + def test_module_fail_when_required_args_missing(self, test_deps): + test_deps.return_value = None + with self.assertRaises(AnsibleFailJson): + set_module_args({}) + jenkins_build.main() + + @patch('ansible_collections.community.general.plugins.modules.web_infrastructure.jenkins_build.test_dependencies') + @patch('ansible_collections.community.general.plugins.modules.web_infrastructure.jenkins_build.JenkinsBuild.get_jenkins_connection') + def test_module_create_build(self, jenkins_connection, test_deps): + test_deps.return_value = None + jenkins_connection.return_value = JenkinsMock() + + with self.assertRaises(AnsibleExitJson): + set_module_args({ + "name": "host-check", + "user": "abc", + "token": "xyz" + }) + jenkins_build.main() + + @patch('ansible_collections.community.general.plugins.modules.web_infrastructure.jenkins_build.test_dependencies') + @patch('ansible_collections.community.general.plugins.modules.web_infrastructure.jenkins_build.JenkinsBuild.get_jenkins_connection') + def test_module_delete_build(self, jenkins_connection, test_deps): + test_deps.return_value = None + jenkins_connection.return_value = JenkinsMock() + + with self.assertRaises(AnsibleExitJson): + set_module_args({ + "name": "host-check", + "build_number": "1234", + "state": "absent", + "user": "abc", + "token": "xyz" + }) + jenkins_build.main() From ce5aea790d5fc8c3664a323a3e6551da065619fc Mon Sep 17 00:00:00 2001 From: sam-lunt Date: Thu, 4 Mar 2021 00:47:45 -0600 Subject: [PATCH 0087/3093] zfs: avoid errors with creation-only properties (#1833) * avoid errors with creation-only properties * add changelog fragment * Apply suggestion to changelog fragment Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- changelogs/fragments/1833-zfs-creation-only-properties.yaml | 2 ++ plugins/modules/storage/zfs/zfs.py | 5 ++++- 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/1833-zfs-creation-only-properties.yaml diff --git a/changelogs/fragments/1833-zfs-creation-only-properties.yaml b/changelogs/fragments/1833-zfs-creation-only-properties.yaml new file mode 100644 index 0000000000..deb972a6d2 --- /dev/null +++ b/changelogs/fragments/1833-zfs-creation-only-properties.yaml @@ -0,0 +1,2 @@ +bugfixes: + - zfs - some ZFS properties could be passed when the dataset/volume did not exist, but would fail if the dataset already existed, even if the property matched what was specified in the ansible task (https://github.com/ansible-collections/community.general/issues/868, https://github.com/ansible-collections/community.general/pull/1833). diff --git a/plugins/modules/storage/zfs/zfs.py b/plugins/modules/storage/zfs/zfs.py index 8013dd1128..fe693a5045 100644 --- a/plugins/modules/storage/zfs/zfs.py +++ b/plugins/modules/storage/zfs/zfs.py @@ -203,7 +203,10 @@ class Zfs(object): rc, out, err = self.module.run_command(" ".join(cmd)) properties = dict() for prop, value, source in [l.split('\t')[1:4] for l in out.splitlines()]: - if source == 'local': + # include source '-' so that creation-only properties are not removed + # to avoids errors when the dataset already exists and the property is not changed + # this scenario is most likely when the same playbook is run more than once + if source == 'local' or source == '-': properties[prop] = value # Add alias for enhanced sharing properties if self.enhanced_sharing: From dec345b818c47198c0e1c1f846a528ba02d22b63 Mon Sep 17 00:00:00 2001 From: Ox Date: Thu, 4 Mar 2021 07:49:38 +0100 Subject: [PATCH 0088/3093] Fix: nmcli - Ensure slave-type for bond-slave (#1882) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fix: nmcli - Ensure slave-type for bond-slave Hello 🙂 When using bond-slave type, by default command sent to nmcl is: ['/usr/bin/nmcli', 'con', 'add', 'type', 'bond-slave', 'con-name', 'enp129s0f0', 'connection.interface-name', 'enp129s0f0', 'connection.autoconnect', 'yes', 'connection.master', 'bond0'] Which is not enough, nmcli will complain that connection.slave-type is missing. This small fix solve this issue. If this change is approved, I will add the changelog fragment. * Fix: nmcli - Adding changelog fragment for 1882 * Update changelogs/fragments/1882-fix-nmcli-ensure-slave-type-for-bond-slave.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../1882-fix-nmcli-ensure-slave-type-for-bond-slave.yml | 2 ++ plugins/modules/net_tools/nmcli.py | 4 ++++ 2 files changed, 6 insertions(+) create mode 100644 changelogs/fragments/1882-fix-nmcli-ensure-slave-type-for-bond-slave.yml diff --git a/changelogs/fragments/1882-fix-nmcli-ensure-slave-type-for-bond-slave.yml b/changelogs/fragments/1882-fix-nmcli-ensure-slave-type-for-bond-slave.yml new file mode 100644 index 0000000000..47569b6a24 --- /dev/null +++ b/changelogs/fragments/1882-fix-nmcli-ensure-slave-type-for-bond-slave.yml @@ -0,0 +1,2 @@ +bugfixes: + - nmcli - ensure the ``slave-type`` option is passed to ``nmcli`` for type ``bond-slave`` (https://github.com/ansible-collections/community.general/pull/1882). diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index d469cbf1c2..2967996f3c 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -737,6 +737,10 @@ class Nmcli(object): 'primary': self.primary, 'updelay': self.updelay, }) + elif self.type == 'bond-slave': + options.update({ + 'connection.slave-type': 'bond', + }) elif self.type == 'bridge': options.update({ 'bridge.ageing-time': self.ageingtime, From ac95ff5b455ceb624abdd88f9065b98b469d901a Mon Sep 17 00:00:00 2001 From: phospi Date: Thu, 4 Mar 2021 07:51:39 +0100 Subject: [PATCH 0089/3093] Bugfix/manageiq inventory (#720) * Extending modules with resource_id * Added documentation * Revert previous PR * Added filter for active vm's * Added changelog fragment * Update changelogs/fragments/720-cloudforms_inventory.yml Co-authored-by: Felix Fontein --- .../fragments/720-cloudforms_inventory.yml | 2 + scripts/inventory/cloudforms.py | 49 +++++++++++++++---- 2 files changed, 41 insertions(+), 10 deletions(-) create mode 100644 changelogs/fragments/720-cloudforms_inventory.yml diff --git a/changelogs/fragments/720-cloudforms_inventory.yml b/changelogs/fragments/720-cloudforms_inventory.yml new file mode 100644 index 0000000000..f5675205d1 --- /dev/null +++ b/changelogs/fragments/720-cloudforms_inventory.yml @@ -0,0 +1,2 @@ +bugfixes: + - cloudforms inventory - fixed issue that non-existing (archived) VMs were synced (https://github.com/ansible-collections/community.general/pull/720). diff --git a/scripts/inventory/cloudforms.py b/scripts/inventory/cloudforms.py index 72b4419cc8..3514698d59 100644 --- a/scripts/inventory/cloudforms.py +++ b/scripts/inventory/cloudforms.py @@ -216,9 +216,9 @@ class CloudFormsInventory(object): parser.add_argument('--debug', action='store_true', default=False, help='Show debug output while running (default: False)') self.args = parser.parse_args() - def _get_json(self, url): + def _http_request(self, url): """ - Make a request and return the JSON + Make a request and return the result converted from JSON """ results = [] @@ -231,7 +231,8 @@ class CloudFormsInventory(object): try: results = json.loads(ret.text) except ValueError: - warnings.warn("Unexpected response from {0} ({1}): {2}".format(self.cloudforms_url, ret.status_code, ret.reason)) + warnings.warn( + "Unexpected response from {0} ({1}): {2}".format(self.cloudforms_url, ret.status_code, ret.reason)) results = {} if self.args.debug: @@ -245,11 +246,18 @@ class CloudFormsInventory(object): return results - def _get_hosts(self): + def _get_json(self, endpoint, url_suffix): """ - Get all hosts by paging through the results + Make a request by given url, split request by configured limit, + go through all sub-requests and return the aggregated data received + by cloudforms + + :param endpoint: api endpoint to access + :param url_suffix: additional api parameters + """ - limit = self.cloudforms_limit + + limit = int(self.cloudforms_limit) page = 0 last_page = False @@ -258,14 +266,35 @@ class CloudFormsInventory(object): while not last_page: offset = page * limit - ret = self._get_json("%s/api/vms?offset=%s&limit=%s&expand=resources,tags,hosts,&attributes=ipaddresses" % (self.cloudforms_url, offset, limit)) - results += ret['resources'] - if ret['subcount'] < limit: + url = "%s%s?offset=%s&limit=%s%s" % ( + self.cloudforms_url, endpoint, offset, limit, url_suffix) + + if self.args.debug: + print("Connecting to url '%s'" % url) + + ret = self._http_request(url) + results += [ret] + + if 'subcount' in ret: + if ret['subcount'] < limit: + last_page = True + page += 1 + else: last_page = True - page += 1 return results + def _get_hosts(self): + """ + Get all hosts + """ + endpoint = "/api/vms" + url_suffix = "&expand=resources,tags,hosts,&attributes=active,ipaddresses&filter[]=active=true" + results = self._get_json(endpoint, url_suffix) + resources = [item for sublist in results for item in sublist['resources']] + + return resources + def update_cache(self): """ Make calls to cloudforms and save the output in a cache From e9866a2ccdef996e73ecf820edfe7f2b7e73779d Mon Sep 17 00:00:00 2001 From: John R Barker Date: Thu, 4 Mar 2021 12:06:25 +0000 Subject: [PATCH 0090/3093] bug_report issue form (#1966) * Bring inline with ansible/ansible's issue template. * Add more placeholders --- .github/ISSUE_TEMPLATE/bug_report.yml | 185 ++++++++++++++++---------- 1 file changed, 113 insertions(+), 72 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index e7d5579433..24061f6028 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -1,86 +1,127 @@ -name: Bug Report -about: Create a report to help us improve +--- +name: Bug report +description: Create a report to help us improve +issue_body: false # default: true, adds a classic WSYWIG textarea, if on body: +- type: markdown + attributes: + value: | + ⚠ + Verify first that your issue is not [already reported on GitHub][issue search]. + Also test if the latest release and devel branch are affected too. + *Complete **all** sections as described, this form is processed automatically.* - - type: markdown - attributes: - value: | - Verify first that your issue is not already reported on [GitHub](https://github.com/ansible-collections/community.general/issues) - Also test if the latest release and main branch are affected too + [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues - - type: textarea - attributes: - label: Summary - description: 'Explain the problem briefly below' - validations: - required: true - - type: dropdown - attributes: - # FIXME: Once GitHub allows defining the default choice, update this - label: Issue Type - options: - - Bug Report - validations: - required: true +- type: textarea + attributes: + label: Summary + description: Explain the problem briefly below. + placeholder: >- + When I try to do X with the collection from the main branch on GitHub, Y + breaks in a way Z under the env E. Here are all the details I know + about this problem... + validations: + required: true - - type: textarea - attributes: - # For smaller collections we could use a multi-select and hardcode the list - # May generate this list via GitHub action and walking files under https://github.com/ansible-collections/community.general/tree/main/plugins - # Select from list, filter as you type (`mysql` would only show the 3 mysql components) - # OR freeform - doesn't seem to be supported in adaptivecards +- type: dropdown + attributes: + label: Issue Type + # FIXME: Once GitHub allows defining the default choice, update this + options: + - Bug Report + validations: + required: true - label: Component Name - description: 'List the component, ie `template`, `mysql_users`' - validations: - required: true +- type: textarea + attributes: + # For smaller collections we could use a multi-select and hardcode the list + # May generate this list via GitHub action and walking files under https://github.com/ansible-collections/community.general/tree/main/plugins + # Select from list, filter as you type (`mysql` would only show the 3 mysql components) + # OR freeform - doesn't seem to be supported in adaptivecards - - type: textarea - attributes: - label: Ansible Version - description: | - Paste verbatim output from `ansible --version` between quotes - value: | - ```paste below + description: >- + Write the short name of the module, plugin, task or feature below, + *use your best guess if unsure*. + placeholder: dnf, apt, yum, pip, user etc. + validations: + required: true - ``` - - type: textarea - attributes: - label: Configuration - description: | - If this issue has an example piece of YAML that can help to reproduce this problem, please provide it. - This can be a piece of YAML from, e.g., an automation, script, scene or configuration. - Paste verbatim output from `ansible-config dump --only-changed` between quotes - value: | - ```paste below +- type: textarea + attributes: + label: Ansible Version + description: >- + Paste verbatim output from `ansible --version` between + tripple backticks. + value: | + ```console (paste below) + $ ansible --version - ``` - - type: textarea - attributes: - label: OS / Environment - description: 'Provide all relevant information below, e.g. target OS versions, network device firmware, etc' + ``` + validations: + required: true - - type: textarea - attributes: - label: Steps To Reproduce - description: 'Describe exactly how to reproduce the problem, using a minimal test-case' - value: | - ```paste below +- type: textarea + attributes: + label: Configuration + description: >- + If this issue has an example piece of YAML that can help to reproduce this problem, please provide it. + This can be a piece of YAML from, e.g., an automation, script, scene or configuration. + Paste verbatim output from `ansible-config dump --only-changed` between quotes + value: | + ```console (paste below) + $ ansible-config dump --only-changed - ``` - - type: textarea - attributes: - label: Expected Results - description: | - Describe what you expected to happen when running the steps above + ``` - - type: textarea - attributes: - label: Actual Results - description: 'Describe what actually happened. If possible run with extra verbosity (`ansible-playbook -vvvv`)' - value: | - ```paste below - ``` +- type: textarea + attributes: + label: OS / Environment + description: >- + Provide all relevant information below, e.g. target OS versions, + network device firmware, etc. + placeholder: RHEL 8, CentOS Stream etc. + validations: + required: false + + +- type: textarea + attributes: + label: Steps to Reproduce + description: | + Describe exactly how to reproduce the problem, using a minimal test-case. It would *really* help us understand your problem if you could also pased any playbooks, configs and commands you used. + + **HINT:** You can paste https://gist.github.com links for larger files. + value: | + + ```yaml (paste below) + + ``` + validations: + required: true + +- type: textarea + attributes: + label: Expected Results + description: >- + Describe what you expected to happen when running the steps above. + placeholder: >- + I expected X to happen because I assumed Y. + that it did not. + validations: + required: true + +- type: textarea + attributes: + label: Actual Results + description: | + Describe what actually happened. If possible run with extra verbosity (`-vvvv`). + + Paste verbatim command output between quotes. + value: | + ```console (paste below) + + ``` From 5fdbe084e7eb48f68e8bdd4ebc48facffc2bd43c Mon Sep 17 00:00:00 2001 From: John R Barker Date: Thu, 4 Mar 2021 12:08:16 +0000 Subject: [PATCH 0091/3093] bug_report: missing label --- .github/ISSUE_TEMPLATE/bug_report.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 24061f6028..9d597bac22 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -41,7 +41,7 @@ body: # May generate this list via GitHub action and walking files under https://github.com/ansible-collections/community.general/tree/main/plugins # Select from list, filter as you type (`mysql` would only show the 3 mysql components) # OR freeform - doesn't seem to be supported in adaptivecards - + label: Component Name description: >- Write the short name of the module, plugin, task or feature below, *use your best guess if unsure*. From f49cf2c22df485f547062370d9b75969e2983a9a Mon Sep 17 00:00:00 2001 From: John R Barker Date: Thu, 4 Mar 2021 18:34:04 +0000 Subject: [PATCH 0092/3093] Add other GH Issue forms (#1968) * Add other GH Issue forms * review comments --- .github/ISSUE_TEMPLATE/bug_report.yml | 10 ++ .github/ISSUE_TEMPLATE/config.yml | 27 +++++ .../ISSUE_TEMPLATE/documentation_report.yml | 113 ++++++++++++++++++ .github/ISSUE_TEMPLATE/feature_request.yml | 71 +++++++++++ 4 files changed, 221 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/config.yml create mode 100644 .github/ISSUE_TEMPLATE/documentation_report.yml create mode 100644 .github/ISSUE_TEMPLATE/feature_request.yml diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 9d597bac22..cd9785d7cf 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -125,3 +125,13 @@ body: ```console (paste below) ``` +- type: checkboxes + attributes: + label: Code of Conduct + description: | + Read the [Ansible Code of Conduct][CoC] first. + [CoC]: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections + options: + - label: I agree to follow the Ansible Code of Conduct + required: true +... diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000000..f90bd1ad86 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,27 @@ +--- +# Ref: https://help.github.com/en/github/building-a-strong-community/configuring-issue-templates-for-your-repository#configuring-the-template-chooser +blank_issues_enabled: false # default: true +contact_links: +- name: Security bug report + url: https://docs.ansible.com/ansible-core/devel/community/reporting_bugs_and_features.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections + about: | + Please learn how to report security vulnerabilities here. + + For all security related bugs, email security@ansible.com + instead of using this issue tracker and you will receive + a prompt response. + + For more information, see + https://docs.ansible.com/ansible/latest/community/reporting_bugs_and_features.html +- name: Ansible Code of Conduct + url: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections + about: Be nice to other members of the community. +- name: Talks to the community + url: https://docs.ansible.com/ansible/latest/community/communication.html?utm_medium=github&utm_source=issue_template_chooser#mailing-list-information + about: Please ask and answer usage questions here +- name: Working groups + url: https://github.com/ansible/community/wiki + about: Interested in improving a specific area? Become a part of a working group! +- name: For Enterprise + url: https://www.ansible.com/products/engine?utm_medium=github&utm_source=issue_template_chooser_ansible_collections + about: Red Hat offers support for the Ansible Automation Platform diff --git a/.github/ISSUE_TEMPLATE/documentation_report.yml b/.github/ISSUE_TEMPLATE/documentation_report.yml new file mode 100644 index 0000000000..2ee94b85dd --- /dev/null +++ b/.github/ISSUE_TEMPLATE/documentation_report.yml @@ -0,0 +1,113 @@ +--- +name: Documentation Report +description: Ask us about docs +# NOTE: issue body is enabled to allow screenshots +issue_body: true # default: true, adds a classic WSYWIG textarea, if on + +body: +- type: markdown + attributes: + value: | + ⚠ + Verify first that your issue is not [already reported on GitHub][issue search]. + Also test if the latest release and devel branch are affected too. + *Complete **all** sections as described, this form is processed automatically.* + + [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues + + +- type: textarea + attributes: + label: Summary + description: | + Explain the problem briefly below, add suggestions to wording or structure. + + **HINT:** Did you know the documentation has an `Edit on GitHub` link on every page? + placeholder: >- + I was reading the Collection documentation of version X and I'm having + problems understanding Y. It would be very helpful if that got + rephrased as Z. + validations: + required: true + +- type: dropdown + attributes: + label: Issue Type + # FIXME: Once GitHub allows defining the default choice, update this + options: + - Documentation Report + validations: + required: true + +- type: input + attributes: + label: Component Name + description: >- + Write the short name of the rst file, module, plugin, task or + feature below, *use your best guess if unsure*. + placeholder: mysql_user + validations: + required: true + +- type: textarea + attributes: + label: Ansible Version + description: >- + Paste verbatim output from `ansible --version` between + tripple backticks. + value: | + ```console (paste below) + $ ansible --version + + ``` + validations: + required: false + +- type: textarea + attributes: + label: Configuration + description: >- + Paste verbatim output from `ansible-config dump --only-changed` between quotes. + value: | + ```console (paste below) + $ ansible-config dump --only-changed + + ``` + validations: + required: false + +- type: textarea + attributes: + label: OS / Environment + description: >- + Provide all relevant information below, e.g. OS version, + browser, etc. + placeholder: Fedora 33, Firefox etc. + validations: + required: false + +- type: textarea + attributes: + label: Additional Information + description: | + Describe how this improves the documentation, e.g. before/after situation or screenshots. + + **Tip:** It's not possible to upload the screenshot via this field directly but you can use the last textarea in this form to attach them. + + **HINT:** You can paste https://gist.github.com links for larger files. + placeholder: >- + When the improvement is applied, it makes it more straightforward + to understand X. + validations: + required: false + +- type: checkboxes + attributes: + label: Code of Conduct + description: | + Read the [Ansible Code of Conduct][CoC] first. + [CoC]: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections + options: + - label: I agree to follow the Ansible Code of Conduct + required: true +... diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 0000000000..481e17e122 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,71 @@ +--- +name: Feature request +description: Suggest an idea for this project +issue_body: false # default: true, adds a classic WSYWIG textarea, if on + +body: +- type: markdown + attributes: + value: | + ⚠ + Verify first that your issue is not [already reported on GitHub][issue search]. + Also test if the latest release and devel branch are affected too. + *Complete **all** sections as described, this form is processed automatically.* + + [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues + + +- type: textarea + attributes: + label: Summary + description: Describe the new feature/improvement briefly below. + placeholder: >- + I am trying to do X with the collection from the main branch on GitHub and + I think that implementing a feature Y would be very helpful for me and + every other user of ansible-core because of Z. + validations: + required: true + +- type: dropdown + attributes: + label: Issue Type + # FIXME: Once GitHub allows defining the default choice, update this + options: + - Feature Idea + validations: + required: true + +- type: input + attributes: + label: Component Name + description: >- + Write the short name of the module, plugin, task or feature below, + *use your best guess if unsure*. + placeholder: dnf, apt, yum, pip, user etc. + validations: + required: true + +- type: textarea + attributes: + label: Additional Information + description: | + Describe how the feature would be used, why it is needed and what it would solve. + + **HINT:** You can paste https://gist.github.com links for larger files. + value: | + + ```yaml (paste below) + + ``` + validations: + required: false +- type: checkboxes + attributes: + label: Code of Conduct + description: | + Read the [Ansible Code of Conduct][CoC] first. + [CoC]: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections + options: + - label: I agree to follow the Ansible Code of Conduct + required: true +... From 1133e5c8653b0ef2d16e2e47cb800747687114e4 Mon Sep 17 00:00:00 2001 From: John Barker Date: Thu, 4 Mar 2021 19:56:29 +0000 Subject: [PATCH 0093/3093] Fix CoC links --- .github/ISSUE_TEMPLATE/bug_report.yml | 3 +-- .github/ISSUE_TEMPLATE/documentation_report.yml | 3 +-- .github/ISSUE_TEMPLATE/feature_request.yml | 3 +-- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index cd9785d7cf..4ebff09890 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -129,8 +129,7 @@ body: attributes: label: Code of Conduct description: | - Read the [Ansible Code of Conduct][CoC] first. - [CoC]: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections + Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. options: - label: I agree to follow the Ansible Code of Conduct required: true diff --git a/.github/ISSUE_TEMPLATE/documentation_report.yml b/.github/ISSUE_TEMPLATE/documentation_report.yml index 2ee94b85dd..2d30534840 100644 --- a/.github/ISSUE_TEMPLATE/documentation_report.yml +++ b/.github/ISSUE_TEMPLATE/documentation_report.yml @@ -105,8 +105,7 @@ body: attributes: label: Code of Conduct description: | - Read the [Ansible Code of Conduct][CoC] first. - [CoC]: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections + Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. options: - label: I agree to follow the Ansible Code of Conduct required: true diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index 481e17e122..0116d94d45 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -63,8 +63,7 @@ body: attributes: label: Code of Conduct description: | - Read the [Ansible Code of Conduct][CoC] first. - [CoC]: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections + Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. options: - label: I agree to follow the Ansible Code of Conduct required: true From 7425e9840dcb1c39b2c2cd90eb023403402154ff Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Fri, 5 Mar 2021 20:17:36 +1300 Subject: [PATCH 0094/3093] Deprecation of parameters triggering the parameter-invalid sanity-check (#1927) * fixed validation-modules for plugins/modules/packaging/language/composer.py * fixed validation-modules for plugins/modules/packaging/os/apt_rpm.py * fixed validation-modules for plugins/modules/packaging/os/homebrew.py * fixed validation-modules for plugins/modules/packaging/os/homebrew_cask.py * fixed validation-modules for plugins/modules/packaging/os/opkg.py * fixed validation-modules for plugins/modules/packaging/os/pacman.py * fixed validation-modules for plugins/modules/packaging/os/slackpkg.py * fixed validation-modules for plugins/modules/packaging/os/urpmi.py * fixed validation-modules for plugins/modules/packaging/os/xbps.py * fixed validation-modules for plugins/modules/source_control/github/github_deploy_key.py * fixed validation-modules for plugins/modules/system/puppet.py * added changelog fragment * adjustments from PR + fixes in changelog frag * fixed deprecation of param "show_diff" in module "puppet" * Update changelogs/fragments/1927-removed-parameter-invalid.yml Co-authored-by: Felix Fontein * Update plugins/modules/system/puppet.py Co-authored-by: Felix Fontein * removed unnecessary ignore lines, adjustment in changelog frag * no need to explicitly call deprecate() when param marked for removal * Update changelogs/fragments/1927-removed-parameter-invalid.yml Co-authored-by: Felix Fontein * Adjustments in changelog fragment, per PR * bumping deprecation to 7.0.0 Co-authored-by: Felix Fontein --- .../1927-removed-parameter-invalid.yml | 12 ++++ .../modules/packaging/language/composer.py | 55 +++++++++++++++---- plugins/modules/packaging/os/apt_rpm.py | 5 +- plugins/modules/packaging/os/homebrew.py | 2 + plugins/modules/packaging/os/homebrew_cask.py | 2 + plugins/modules/packaging/os/opkg.py | 5 +- plugins/modules/packaging/os/pacman.py | 5 +- plugins/modules/packaging/os/slackpkg.py | 6 +- plugins/modules/packaging/os/urpmi.py | 10 +++- plugins/modules/packaging/os/xbps.py | 6 +- .../github/github_deploy_key.py | 5 +- plugins/modules/system/puppet.py | 11 ++-- tests/sanity/ignore-2.10.txt | 2 - tests/sanity/ignore-2.11.txt | 2 - tests/sanity/ignore-2.9.txt | 2 - 15 files changed, 97 insertions(+), 33 deletions(-) create mode 100644 changelogs/fragments/1927-removed-parameter-invalid.yml diff --git a/changelogs/fragments/1927-removed-parameter-invalid.yml b/changelogs/fragments/1927-removed-parameter-invalid.yml new file mode 100644 index 0000000000..6dbc2e187b --- /dev/null +++ b/changelogs/fragments/1927-removed-parameter-invalid.yml @@ -0,0 +1,12 @@ +deprecated_features: + - composer - deprecated invalid parameter aliases ``working-dir``, ``global-command``, ``prefer-source``, ``prefer-dist``, ``no-dev``, ``no-scripts``, ``no-plugins``, ``optimize-autoloader``, ``classmap-authoritative``, ``apcu-autoloader``, ``ignore-platform-reqs``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927). + - apt_rpm - deprecated invalid parameter alias ``update-cache``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927). + - homebrew - deprecated invalid parameter alias ``update-brew``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927). + - homebrew_cask - deprecated invalid parameter alias ``update-brew``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927). + - opkg - deprecated invalid parameter alias ``update-cache``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927). + - pacman - deprecated invalid parameter alias ``update-cache``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927). + - slackpkg - deprecated invalid parameter alias ``update-cache``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927). + - urmpi - deprecated invalid parameter aliases ``update-cache`` and ``no-recommends``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927). + - xbps - deprecated invalid parameter alias ``update-cache``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927). + - github_deploy_key - deprecated invalid parameter alias ``2fa_token``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927). + - puppet - deprecated undocumented parameter ``show_diff``, will be removed in 7.0.0. (https://github.com/ansible-collections/community.general/pull/1927). diff --git a/plugins/modules/packaging/language/composer.py b/plugins/modules/packaging/language/composer.py index 3bc09c2ddc..c792098b04 100644 --- a/plugins/modules/packaging/language/composer.py +++ b/plugins/modules/packaging/language/composer.py @@ -41,40 +41,47 @@ options: - Directory of your project (see --working-dir). This is required when the command is not run globally. - Will be ignored if C(global_command=true). + - Alias C(working-dir) has been deprecated and will be removed in community.general 5.0.0. aliases: [ working-dir ] global_command: description: - Runs the specified command globally. + - Alias C(global-command) has been deprecated and will be removed in community.general 5.0.0. type: bool default: false aliases: [ global-command ] prefer_source: description: - Forces installation from package sources when possible (see --prefer-source). + - Alias C(prefer-source) has been deprecated and will be removed in community.general 5.0.0. default: false type: bool aliases: [ prefer-source ] prefer_dist: description: - Forces installation from package dist even for dev versions (see --prefer-dist). + - Alias C(prefer-dist) has been deprecated and will be removed in community.general 5.0.0. default: false type: bool aliases: [ prefer-dist ] no_dev: description: - Disables installation of require-dev packages (see --no-dev). + - Alias C(no-dev) has been deprecated and will be removed in community.general 5.0.0. default: true type: bool aliases: [ no-dev ] no_scripts: description: - Skips the execution of all scripts defined in composer.json (see --no-scripts). + - Alias C(no-scripts) has been deprecated and will be removed in community.general 5.0.0. default: false type: bool aliases: [ no-scripts ] no_plugins: description: - Disables all plugins ( see --no-plugins ). + - Alias C(no-plugins) has been deprecated and will be removed in community.general 5.0.0. default: false type: bool aliases: [ no-plugins ] @@ -83,6 +90,7 @@ options: - Optimize autoloader during autoloader dump (see --optimize-autoloader). - Convert PSR-0/4 autoloading to classmap to get a faster autoloader. - Recommended especially for production, but can take a bit of time to run. + - Alias C(optimize-autoloader) has been deprecated and will be removed in community.general 5.0.0. default: true type: bool aliases: [ optimize-autoloader ] @@ -91,18 +99,21 @@ options: - Autoload classes from classmap only. - Implicitely enable optimize_autoloader. - Recommended especially for production, but can take a bit of time to run. + - Alias C(classmap-authoritative) has been deprecated and will be removed in community.general 5.0.0. default: false type: bool aliases: [ classmap-authoritative ] apcu_autoloader: description: - Uses APCu to cache found/not-found classes + - Alias C(apcu-autoloader) has been deprecated and will be removed in community.general 5.0.0. default: false type: bool aliases: [ apcu-autoloader ] ignore_platform_reqs: description: - Ignore php, hhvm, lib-* and ext-* requirements and force the installation even if the local machine does not fulfill these. + - Alias C(ignore-platform-reqs) has been deprecated and will be removed in community.general 5.0.0. default: false type: bool aliases: [ ignore-platform-reqs ] @@ -187,17 +198,39 @@ def main(): command=dict(default="install", type="str"), arguments=dict(default="", type="str"), executable=dict(type="path", aliases=["php_path"]), - working_dir=dict(type="path", aliases=["working-dir"]), - global_command=dict(default=False, type="bool", aliases=["global-command"]), - prefer_source=dict(default=False, type="bool", aliases=["prefer-source"]), - prefer_dist=dict(default=False, type="bool", aliases=["prefer-dist"]), - no_dev=dict(default=True, type="bool", aliases=["no-dev"]), - no_scripts=dict(default=False, type="bool", aliases=["no-scripts"]), - no_plugins=dict(default=False, type="bool", aliases=["no-plugins"]), - apcu_autoloader=dict(default=False, type="bool", aliases=["apcu-autoloader"]), - optimize_autoloader=dict(default=True, type="bool", aliases=["optimize-autoloader"]), - classmap_authoritative=dict(default=False, type="bool", aliases=["classmap-authoritative"]), - ignore_platform_reqs=dict(default=False, type="bool", aliases=["ignore-platform-reqs"]), + working_dir=dict( + type="path", aliases=["working-dir"], + deprecated_aliases=[dict(name='working-dir', version='5.0.0', collection_name='community.general')]), + global_command=dict( + default=False, type="bool", aliases=["global-command"], + deprecated_aliases=[dict(name='global-command', version='5.0.0', collection_name='community.general')]), + prefer_source=dict( + default=False, type="bool", aliases=["prefer-source"], + deprecated_aliases=[dict(name='prefer-source', version='5.0.0', collection_name='community.general')]), + prefer_dist=dict( + default=False, type="bool", aliases=["prefer-dist"], + deprecated_aliases=[dict(name='prefer-dist', version='5.0.0', collection_name='community.general')]), + no_dev=dict( + default=True, type="bool", aliases=["no-dev"], + deprecated_aliases=[dict(name='no-dev', version='5.0.0', collection_name='community.general')]), + no_scripts=dict( + default=False, type="bool", aliases=["no-scripts"], + deprecated_aliases=[dict(name='no-scripts', version='5.0.0', collection_name='community.general')]), + no_plugins=dict( + default=False, type="bool", aliases=["no-plugins"], + deprecated_aliases=[dict(name='no-plugins', version='5.0.0', collection_name='community.general')]), + apcu_autoloader=dict( + default=False, type="bool", aliases=["apcu-autoloader"], + deprecated_aliases=[dict(name='apcu-autoloader', version='5.0.0', collection_name='community.general')]), + optimize_autoloader=dict( + default=True, type="bool", aliases=["optimize-autoloader"], + deprecated_aliases=[dict(name='optimize-autoloader', version='5.0.0', collection_name='community.general')]), + classmap_authoritative=dict( + default=False, type="bool", aliases=["classmap-authoritative"], + deprecated_aliases=[dict(name='classmap-authoritative', version='5.0.0', collection_name='community.general')]), + ignore_platform_reqs=dict( + default=False, type="bool", aliases=["ignore-platform-reqs"], + deprecated_aliases=[dict(name='ignore-platform-reqs', version='5.0.0', collection_name='community.general')]), ), required_if=[('global_command', False, ['working_dir'])], supports_check_mode=True diff --git a/plugins/modules/packaging/os/apt_rpm.py b/plugins/modules/packaging/os/apt_rpm.py index 6b6bb7ec26..3c200927ce 100644 --- a/plugins/modules/packaging/os/apt_rpm.py +++ b/plugins/modules/packaging/os/apt_rpm.py @@ -33,6 +33,7 @@ options: update_cache: description: - update the package database first C(apt-get update). + - Alias C(update-cache) has been deprecated and will be removed in community.general 5.0.0. aliases: [ 'update-cache' ] type: bool default: no @@ -157,7 +158,9 @@ def main(): module = AnsibleModule( argument_spec=dict( state=dict(type='str', default='present', choices=['absent', 'installed', 'present', 'removed']), - update_cache=dict(type='bool', default=False, aliases=['update-cache']), + update_cache=dict( + type='bool', default=False, aliases=['update-cache'], + deprecated_aliases=[dict(name='update-cache', version='5.0.0', collection_name='community.general')]), package=dict(type='list', elements='str', required=True, aliases=['name', 'pkg']), ), ) diff --git a/plugins/modules/packaging/os/homebrew.py b/plugins/modules/packaging/os/homebrew.py index 21dea647f5..9a41370c3d 100644 --- a/plugins/modules/packaging/os/homebrew.py +++ b/plugins/modules/packaging/os/homebrew.py @@ -49,6 +49,7 @@ options: update_homebrew: description: - update homebrew itself first. + - Alias C(update-brew) has been deprecated and will be removed in community.general 5.0.0. type: bool default: no aliases: ['update-brew'] @@ -888,6 +889,7 @@ def main(): default=False, aliases=["update-brew"], type='bool', + deprecated_aliases=[dict(name='update-brew', version='5.0.0', collection_name='community.general')], ), upgrade_all=dict( default=False, diff --git a/plugins/modules/packaging/os/homebrew_cask.py b/plugins/modules/packaging/os/homebrew_cask.py index feb1ba68fe..498d0b8771 100644 --- a/plugins/modules/packaging/os/homebrew_cask.py +++ b/plugins/modules/packaging/os/homebrew_cask.py @@ -49,6 +49,7 @@ options: description: - Update homebrew itself first. - Note that C(brew cask update) is a synonym for C(brew update). + - Alias C(update-brew) has been deprecated and will be removed in community.general 5.0.0. type: bool default: no aliases: [ 'update-brew' ] @@ -800,6 +801,7 @@ def main(): default=False, aliases=["update-brew"], type='bool', + deprecated_aliases=[dict(name='update-brew', version='5.0.0', collection_name='community.general')], ), install_options=dict( default=None, diff --git a/plugins/modules/packaging/os/opkg.py b/plugins/modules/packaging/os/opkg.py index 7da9a48755..07b99bf4f4 100644 --- a/plugins/modules/packaging/os/opkg.py +++ b/plugins/modules/packaging/os/opkg.py @@ -49,6 +49,7 @@ options: update_cache: description: - update the package db first + - Alias C(update-cache) has been deprecated and will be removed in community.general 5.0.0. aliases: ['update-cache'] default: "no" type: bool @@ -173,7 +174,9 @@ def main(): state=dict(default="present", choices=["present", "installed", "absent", "removed"]), force=dict(default="", choices=["", "depends", "maintainer", "reinstall", "overwrite", "downgrade", "space", "postinstall", "remove", "checksum", "removal-of-dependent-packages"]), - update_cache=dict(default="no", aliases=["update-cache"], type='bool') + update_cache=dict( + default="no", aliases=["update-cache"], type='bool', + deprecated_aliases=[dict(name='update-cache', version='5.0.0', collection_name='community.general')]), ) ) diff --git a/plugins/modules/packaging/os/pacman.py b/plugins/modules/packaging/os/pacman.py index 0931ddc7e1..b19528ba9e 100644 --- a/plugins/modules/packaging/os/pacman.py +++ b/plugins/modules/packaging/os/pacman.py @@ -54,6 +54,7 @@ options: description: - Whether or not to refresh the master package lists. - This can be run as part of a package installation or as a separate step. + - Alias C(update-cache) has been deprecated and will be removed in community.general 5.0.0. default: no type: bool aliases: [ update-cache ] @@ -421,7 +422,9 @@ def main(): extra_args=dict(type='str', default=''), upgrade=dict(type='bool', default=False), upgrade_extra_args=dict(type='str', default=''), - update_cache=dict(type='bool', default=False, aliases=['update-cache']), + update_cache=dict( + type='bool', default=False, aliases=['update-cache'], + deprecated_aliases=[dict(name='update-cache', version='5.0.0', collection_name='community.general')]), update_cache_extra_args=dict(type='str', default=''), ), required_one_of=[['name', 'update_cache', 'upgrade']], diff --git a/plugins/modules/packaging/os/slackpkg.py b/plugins/modules/packaging/os/slackpkg.py index 424f5b1b4e..b556d8be3d 100644 --- a/plugins/modules/packaging/os/slackpkg.py +++ b/plugins/modules/packaging/os/slackpkg.py @@ -41,6 +41,7 @@ options: update_cache: description: - update the package database first + - Alias C(update-cache) has been deprecated and will be removed in community.general 5.0.0. required: false default: false type: bool @@ -177,8 +178,9 @@ def main(): argument_spec=dict( state=dict(default="present", choices=['installed', 'removed', 'absent', 'present', 'latest']), name=dict(aliases=["pkg"], required=True, type='list', elements='str'), - update_cache=dict(default=False, aliases=["update-cache"], - type='bool'), + update_cache=dict( + default=False, aliases=["update-cache"], type='bool', + deprecated_aliases=[dict(name='update-cache', version='5.0.0', collection_name='community.general')]), ), supports_check_mode=True) diff --git a/plugins/modules/packaging/os/urpmi.py b/plugins/modules/packaging/os/urpmi.py index 9d54fbcf1e..47c22ffb93 100644 --- a/plugins/modules/packaging/os/urpmi.py +++ b/plugins/modules/packaging/os/urpmi.py @@ -33,12 +33,14 @@ options: update_cache: description: - Update the package database first C(urpmi.update -a). + - Alias C(update-cache) has been deprecated and will be removed in community.general 5.0.0. type: bool default: no aliases: ['update-cache'] no_recommends: description: - Corresponds to the C(--no-recommends) option for I(urpmi). + - Alias C(no-recommends) has been deprecated and will be removed in community.general 5.0.0. type: bool default: yes aliases: ['no-recommends'] @@ -195,9 +197,13 @@ def main(): argument_spec=dict( state=dict(type='str', default='present', choices=['absent', 'installed', 'present', 'removed']), - update_cache=dict(type='bool', default=False, aliases=['update-cache']), + update_cache=dict( + type='bool', default=False, aliases=['update-cache'], + deprecated_aliases=[dict(name='update-cache', version='5.0.0', collection_name='community.general')]), force=dict(type='bool', default=True), - no_recommends=dict(type='bool', default=True, aliases=['no-recommends']), + no_recommends=dict( + type='bool', default=True, aliases=['no-recommends'], + deprecated_aliases=[dict(name='no-recommends', version='5.0.0', collection_name='community.general')]), name=dict(type='list', elements='str', required=True, aliases=['package', 'pkg']), root=dict(type='str', aliases=['installroot']), ), diff --git a/plugins/modules/packaging/os/xbps.py b/plugins/modules/packaging/os/xbps.py index 6f2f5dfaaa..69163a4744 100644 --- a/plugins/modules/packaging/os/xbps.py +++ b/plugins/modules/packaging/os/xbps.py @@ -42,6 +42,7 @@ options: description: - Whether or not to refresh the master package lists. This can be run as part of a package installation or as a separate step. + - Alias C(update-cache) has been deprecated and will be removed in community.general 5.0.0. aliases: ['update-cache'] type: bool default: yes @@ -290,8 +291,9 @@ def main(): recurse=dict(default=False, type='bool'), force=dict(default=False, type='bool', removed_in_version='3.0.0', removed_from_collection='community.general'), upgrade=dict(default=False, type='bool'), - update_cache=dict(default=True, aliases=['update-cache'], - type='bool'), + update_cache=dict( + default=True, aliases=['update-cache'], type='bool', + deprecated_aliases=[dict(name='update-cache', version='5.0.0', collection_name='community.general')]), upgrade_xbps=dict(default=True, type='bool') ), required_one_of=[['name', 'update_cache', 'upgrade']], diff --git a/plugins/modules/source_control/github/github_deploy_key.py b/plugins/modules/source_control/github/github_deploy_key.py index 8954317b71..4d55cb0db3 100644 --- a/plugins/modules/source_control/github/github_deploy_key.py +++ b/plugins/modules/source_control/github/github_deploy_key.py @@ -78,6 +78,7 @@ options: otp: description: - The 6 digit One Time Password for 2-Factor Authentication. Required together with I(username) and I(password). + - Alias C(2fa_token) has been deprecated and will be removed in community.general 5.0.0. aliases: ['2fa_token'] type: int notes: @@ -297,7 +298,9 @@ def main(): force=dict(required=False, type='bool', default=False), username=dict(required=False, type='str'), password=dict(required=False, type='str', no_log=True), - otp=dict(required=False, type='int', aliases=['2fa_token'], no_log=True), + otp=dict( + required=False, type='int', aliases=['2fa_token'], no_log=True, + deprecated_aliases=[dict(name='2fa_token', version='5.0.0', collection_name='community.general')]), token=dict(required=False, type='str', no_log=True) ), mutually_exclusive=[ diff --git a/plugins/modules/system/puppet.py b/plugins/modules/system/puppet.py index db8c0ec8ef..309da290d0 100644 --- a/plugins/modules/system/puppet.py +++ b/plugins/modules/system/puppet.py @@ -171,12 +171,11 @@ def main(): puppetmaster=dict(type='str'), modulepath=dict(type='str'), manifest=dict(type='str'), - noop=dict(required=False, type='bool'), - logdest=dict(type='str', default='stdout', choices=['all', - 'stdout', - 'syslog']), - # internal code to work with --diff, do not use - show_diff=dict(type='bool', default=False, aliases=['show-diff']), + noop=dict(type='bool'), + logdest=dict(type='str', default='stdout', choices=['all', 'stdout', 'syslog']), + show_diff=dict( + type='bool', default=False, aliases=['show-diff'], + removed_in_version='7.0.0', removed_from_collection='community.general'), facts=dict(type='dict'), facter_basename=dict(type='str', default='ansible'), environment=dict(type='str'), diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 54f5b279ea..59e250681c 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -205,9 +205,7 @@ plugins/modules/system/osx_defaults.py validate-modules:parameter-state-invalid- plugins/modules/system/parted.py validate-modules:parameter-state-invalid-choice plugins/modules/system/puppet.py use-argspec-type-path plugins/modules/system/puppet.py validate-modules:doc-default-does-not-match-spec # show_diff is not documented -plugins/modules/system/puppet.py validate-modules:parameter-invalid plugins/modules/system/puppet.py validate-modules:parameter-type-not-in-doc -plugins/modules/system/puppet.py validate-modules:undocumented-parameter plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc plugins/modules/system/ssh_config.py use-argspec-type-path # Required since module uses other methods to specify path plugins/modules/system/xfconf.py validate-modules:parameter-state-invalid-choice diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 9da66ae9dc..ee35b15aea 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -204,9 +204,7 @@ plugins/modules/system/osx_defaults.py validate-modules:parameter-state-invalid- plugins/modules/system/parted.py validate-modules:parameter-state-invalid-choice plugins/modules/system/puppet.py use-argspec-type-path plugins/modules/system/puppet.py validate-modules:doc-default-does-not-match-spec # show_diff is not documented -plugins/modules/system/puppet.py validate-modules:parameter-invalid plugins/modules/system/puppet.py validate-modules:parameter-type-not-in-doc -plugins/modules/system/puppet.py validate-modules:undocumented-parameter plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc plugins/modules/system/ssh_config.py use-argspec-type-path # Required since module uses other methods to specify path plugins/modules/system/xfconf.py validate-modules:parameter-state-invalid-choice diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 1e2b56a684..200c7204ea 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -220,9 +220,7 @@ plugins/modules/storage/purestorage/purefb_facts.py validate-modules:return-synt plugins/modules/system/iptables_state.py validate-modules:undocumented-parameter plugins/modules/system/launchd.py use-argspec-type-path # False positive plugins/modules/system/puppet.py use-argspec-type-path -plugins/modules/system/puppet.py validate-modules:parameter-invalid plugins/modules/system/puppet.py validate-modules:parameter-type-not-in-doc -plugins/modules/system/puppet.py validate-modules:undocumented-parameter plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc plugins/modules/system/ssh_config.py use-argspec-type-path # Required since module uses other methods to specify path plugins/modules/system/xfconf.py validate-modules:return-syntax-error From 53c6b4967332c48048124b32fb976c7af9e50f82 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=81lvaro=20Torres=20Cogollo?= Date: Sat, 6 Mar 2021 13:30:43 +0100 Subject: [PATCH 0095/3093] Added new module github_repo (#1683) * Added new module github_repo * Fixed sanity errors * Fixed sanity errors * Unit tests for github_repo module * Fixed import-before-documentation * Added PyGithub dependency for unit tests * Fixed errata * Require Python >= 2.7 * Support for check_mode and other improvements * Fixed import-before-documentation * Improved module parameter requirements, check mode and docs * Code improvements * Fixed version tag --- plugins/modules/github_repo.py | 1 + .../source_control/github/github_repo.py | 242 +++++++++++++++++ .../modules/source_control/github/__init__.py | 0 .../source_control/github/test_github_repo.py | 252 ++++++++++++++++++ tests/unit/requirements.txt | 3 +- 5 files changed, 497 insertions(+), 1 deletion(-) create mode 120000 plugins/modules/github_repo.py create mode 100644 plugins/modules/source_control/github/github_repo.py create mode 100644 tests/unit/plugins/modules/source_control/github/__init__.py create mode 100644 tests/unit/plugins/modules/source_control/github/test_github_repo.py diff --git a/plugins/modules/github_repo.py b/plugins/modules/github_repo.py new file mode 120000 index 0000000000..ef55c25c2f --- /dev/null +++ b/plugins/modules/github_repo.py @@ -0,0 +1 @@ +./source_control/github/github_repo.py \ No newline at end of file diff --git a/plugins/modules/source_control/github/github_repo.py b/plugins/modules/source_control/github/github_repo.py new file mode 100644 index 0000000000..41f57469e4 --- /dev/null +++ b/plugins/modules/source_control/github/github_repo.py @@ -0,0 +1,242 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Álvaro Torres Cogollo +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: github_repo +short_description: Manage your repositories on Github +version_added: 2.2.0 +description: +- Manages Github repositories using PyGithub library. +- Authentication can be done with I(access_token) or with I(username) and I(password). +options: + username: + description: + - Username used for authentication. + - This is only needed when not using I(access_token). + type: str + required: false + password: + description: + - Password used for authentication. + - This is only needed when not using I(access_token). + type: str + required: false + access_token: + description: + - Token parameter for authentication. + - This is only needed when not using I(username) and I(password). + type: str + required: false + name: + description: + - Repository name. + type: str + required: true + description: + description: + - Description for the repository. + - This is only used when I(state) is C(present). + type: str + default: '' + required: false + private: + description: + - Whether the new repository should be private or not. + - This is only used when I(state) is C(present). + type: bool + default: no + required: false + state: + description: + - Whether the repository should exist or not. + type: str + default: present + choices: [ absent, present ] + required: false + organization: + description: + - Organization for the repository. + - When I(state) is C(present), the repository will be created in the current user profile. + type: str + required: false +requirements: +- PyGithub>=1.54 +notes: +- For Python 3, PyGithub>=1.54 should be used. +- "For Python 3.5, PyGithub==1.54 should be used. More information: U(https://pygithub.readthedocs.io/en/latest/changes.html#version-1-54-november-30-2020)." +- "For Python 2.7, PyGithub==1.45 should be used. More information: U(https://pygithub.readthedocs.io/en/latest/changes.html#version-1-45-december-29-2019)." +- Supports C(check_mode). +author: +- Álvaro Torres Cogollo (@atorrescogollo) +''' + +EXAMPLES = ''' +- name: Create a Github repository + community.general.github_repo: + access_token: mytoken + organization: MyOrganization + name: myrepo + description: "Just for fun" + private: yes + state: present + register: result + +- name: Delete the repository + community.general.github_repo: + username: octocat + password: password + organization: MyOrganization + name: myrepo + state: absent + register: result +''' + +RETURN = ''' +repo: + description: Repository information as JSON. See U(https://docs.github.com/en/rest/reference/repos#get-a-repository). + returned: success and I(state) is C(present) + type: dict +''' + +import traceback +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +import sys + +GITHUB_IMP_ERR = None +try: + from github import Github, GithubException + from github.GithubException import UnknownObjectException + HAS_GITHUB_PACKAGE = True +except Exception: + GITHUB_IMP_ERR = traceback.format_exc() + HAS_GITHUB_PACKAGE = False + + +def authenticate(username=None, password=None, access_token=None): + if access_token: + return Github(base_url="https://api.github.com:443", login_or_token=access_token) + else: + return Github(base_url="https://api.github.com:443", login_or_token=username, password=password) + + +def create_repo(gh, name, organization=None, private=False, description='', check_mode=False): + result = dict( + changed=False, + repo=dict()) + if organization: + target = gh.get_organization(organization) + else: + target = gh.get_user() + + repo = None + try: + repo = target.get_repo(name=name) + result['repo'] = repo.raw_data + except UnknownObjectException: + if not check_mode: + repo = target.create_repo( + name=name, private=private, description=description) + result['repo'] = repo.raw_data + + result['changed'] = True + + changes = {} + if repo is None or repo.raw_data['private'] != private: + changes['private'] = private + if repo is None or repo.raw_data['description'] != description: + changes['description'] = description + + if changes: + if not check_mode: + repo.edit(**changes) + + result['repo'].update({ + 'private': repo._private.value if not check_mode else private, + 'description': repo._description.value if not check_mode else description, + }) + result['changed'] = True + + return result + + +def delete_repo(gh, name, organization=None, check_mode=False): + result = dict(changed=False) + if organization: + target = gh.get_organization(organization) + else: + target = gh.get_user() + try: + repo = target.get_repo(name=name) + if not check_mode: + repo.delete() + result['changed'] = True + except UnknownObjectException: + pass + + return result + + +def run_module(params, check_mode=False): + gh = authenticate( + username=params['username'], password=params['password'], access_token=params['access_token']) + if params['state'] == "absent": + return delete_repo( + gh=gh, + name=params['name'], + organization=params['organization'], + check_mode=check_mode + ) + else: + return create_repo( + gh=gh, + name=params['name'], + organization=params['organization'], + private=params['private'], + description=params['description'], + check_mode=check_mode + ) + + +def main(): + module_args = dict( + username=dict(type='str', required=False, default=None), + password=dict(type='str', required=False, default=None, no_log=True), + access_token=dict(type='str', required=False, + default=None, no_log=True), + name=dict(type='str', required=True), + state=dict(type='str', required=False, default="present", + choices=["present", "absent"]), + organization=dict(type='str', required=False, default=None), + private=dict(type='bool', required=False, default=False), + description=dict(type='str', required=False, default=''), + ) + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + required_together=[('username', 'password')], + required_one_of=[('username', 'access_token')], + mutually_exclusive=[('username', 'access_token')] + ) + + if not HAS_GITHUB_PACKAGE: + module.fail_json(msg=missing_required_lib( + "PyGithub"), exception=GITHUB_IMP_ERR) + + try: + result = run_module(module.params, module.check_mode) + module.exit_json(**result) + except GithubException as e: + module.fail_json(msg="Github error. {0}".format(repr(e))) + except Exception as e: + module.fail_json(msg="Unexpected error. {0}".format(repr(e))) + + +if __name__ == '__main__': + main() diff --git a/tests/unit/plugins/modules/source_control/github/__init__.py b/tests/unit/plugins/modules/source_control/github/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit/plugins/modules/source_control/github/test_github_repo.py b/tests/unit/plugins/modules/source_control/github/test_github_repo.py new file mode 100644 index 0000000000..8d41c986b4 --- /dev/null +++ b/tests/unit/plugins/modules/source_control/github/test_github_repo.py @@ -0,0 +1,252 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import requests +import re +import json +import sys +from httmock import with_httmock, urlmatch, response +from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.general.plugins.modules.source_control.github import github_repo + +GITHUB_MINIMUM_PYTHON_VERSION = (2, 7) + + +@urlmatch(netloc=r'.*') +def debug_mock(url, request): + print(request.original.__dict__) + + +@urlmatch(netloc=r'api\.github\.com:443$', path=r'/orgs/.*', method="get") +def get_orgs_mock(url, request): + match = re.search(r"api\.github\.com:443/orgs/(?P[^/]+)", request.url) + org = match.group("org") + + # https://docs.github.com/en/rest/reference/orgs#get-an-organization + headers = {'content-type': 'application/json'} + content = { + "login": org, + "url": "https://api.github.com:443/orgs/{0}".format(org) + } + content = json.dumps(content).encode("utf-8") + return response(200, content, headers, None, 5, request) + + +@urlmatch(netloc=r'api\.github\.com:443$', path=r'/user', method="get") +def get_user_mock(url, request): + # https://docs.github.com/en/rest/reference/users#get-the-authenticated-user + headers = {'content-type': 'application/json'} + content = { + "login": "octocat", + "url": "https://api.github.com:443/users/octocat" + } + content = json.dumps(content).encode("utf-8") + return response(200, content, headers, None, 5, request) + + +@urlmatch(netloc=r'api\.github\.com:443$', path=r'/repos/.*/.*', method="get") +def get_repo_notfound_mock(url, request): + return response(404, "{\"message\": \"Not Found\"}", "", "Not Found", 5, request) + + +@urlmatch(netloc=r'api\.github\.com:443$', path=r'/repos/.*/.*', method="get") +def get_repo_mock(url, request): + match = re.search( + r"api\.github\.com:443/repos/(?P[^/]+)/(?P[^/]+)", request.url) + org = match.group("org") + repo = match.group("repo") + + # https://docs.github.com/en/rest/reference/repos#get-a-repository + headers = {'content-type': 'application/json'} + content = { + "name": repo, + "full_name": "{0}/{1}".format(org, repo), + "url": "https://api.github.com:443/repos/{0}/{1}".format(org, repo), + "private": False, + "description": "This your first repo!", + "default_branch": "master", + "allow_rebase_merge": True + } + content = json.dumps(content).encode("utf-8") + return response(200, content, headers, None, 5, request) + + +@urlmatch(netloc=r'api\.github\.com:443$', path=r'/orgs/.*/repos', method="post") +def create_new_org_repo_mock(url, request): + match = re.search( + r"api\.github\.com:443/orgs/(?P[^/]+)/repos", request.url) + org = match.group("org") + repo = json.loads(request.body) + + headers = {'content-type': 'application/json'} + # https://docs.github.com/en/rest/reference/repos#create-an-organization-repository + content = { + "name": repo['name'], + "full_name": "{0}/{1}".format(org, repo['name']), + "private": repo['private'], + "description": repo['description'] + } + content = json.dumps(content).encode("utf-8") + return response(201, content, headers, None, 5, request) + + +@urlmatch(netloc=r'api\.github\.com:443$', path=r'/user/repos', method="post") +def create_new_user_repo_mock(url, request): + repo = json.loads(request.body) + + headers = {'content-type': 'application/json'} + # https://docs.github.com/en/rest/reference/repos#create-a-repository-for-the-authenticated-user + content = { + "name": repo['name'], + "full_name": "{0}/{1}".format("octocat", repo['name']), + "private": repo['private'], + "description": repo['description'] + } + content = json.dumps(content).encode("utf-8") + return response(201, content, headers, None, 5, request) + + +@urlmatch(netloc=r'api\.github\.com:443$', path=r'/repos/.*/.*', method="patch") +def patch_repo_mock(url, request): + match = re.search( + r"api\.github\.com:443/repos/(?P[^/]+)/(?P[^/]+)", request.url) + org = match.group("org") + repo = match.group("repo") + + body = json.loads(request.body) + headers = {'content-type': 'application/json'} + # https://docs.github.com/en/rest/reference/repos#update-a-repository + content = { + "name": repo, + "full_name": "{0}/{1}".format(org, repo), + "url": "https://api.github.com:443/repos/{0}/{1}".format(org, repo), + "private": body['private'], + "description": body['description'], + "default_branch": "master", + "allow_rebase_merge": True + } + content = json.dumps(content).encode("utf-8") + return response(200, content, headers, None, 5, request) + + +@urlmatch(netloc=r'api\.github\.com:443$', path=r'/repos/.*/.*', method="delete") +def delete_repo_mock(url, request): + # https://docs.github.com/en/rest/reference/repos#delete-a-repository + return response(204, None, None, None, 5, request) + + +@urlmatch(netloc=r'api\.github\.com:443$', path=r'/repos/.*/.*', method="delete") +def delete_repo_notfound_mock(url, request): + # https://docs.github.com/en/rest/reference/repos#delete-a-repository + return response(404, "{\"message\": \"Not Found\"}", "", "Not Found", 5, request) + + +class TestGithubRepo(unittest.TestCase): + def setUp(self): + if sys.version_info < GITHUB_MINIMUM_PYTHON_VERSION: + self.skipTest("Python %s+ is needed for PyGithub" % + ",".join(map(str, GITHUB_MINIMUM_PYTHON_VERSION))) + + @with_httmock(get_orgs_mock) + @with_httmock(get_repo_notfound_mock) + @with_httmock(create_new_org_repo_mock) + def test_create_new_org_repo(self): + result = github_repo.run_module({ + 'username': None, + 'password': None, + "access_token": "mytoken", + "organization": "MyOrganization", + "name": "myrepo", + "description": "Just for fun", + "private": False, + "state": "present" + }) + + self.assertEqual(result['changed'], True) + self.assertEqual(result['repo']['private'], False) + + @with_httmock(get_user_mock) + @with_httmock(get_repo_notfound_mock) + @with_httmock(create_new_user_repo_mock) + def test_create_new_user_repo(self): + result = github_repo.run_module({ + 'username': None, + 'password': None, + "access_token": "mytoken", + "organization": None, + "name": "myrepo", + "description": "Just for fun", + "private": True, + "state": "present" + }) + self.assertEqual(result['changed'], True) + self.assertEqual(result['repo']['private'], True) + + @with_httmock(get_orgs_mock) + @with_httmock(get_repo_mock) + @with_httmock(patch_repo_mock) + def test_patch_existing_org_repo(self): + result = github_repo.run_module({ + 'username': None, + 'password': None, + "access_token": "mytoken", + "organization": "MyOrganization", + "name": "myrepo", + "description": "Just for fun", + "private": True, + "state": "present" + }) + self.assertEqual(result['changed'], True) + self.assertEqual(result['repo']['private'], True) + + @with_httmock(get_orgs_mock) + @with_httmock(get_repo_mock) + @with_httmock(delete_repo_mock) + def test_delete_org_repo(self): + result = github_repo.run_module({ + 'username': None, + 'password': None, + "access_token": "mytoken", + "organization": "MyOrganization", + "name": "myrepo", + "description": "Just for fun", + "private": False, + "state": "absent" + }) + self.assertEqual(result['changed'], True) + + @with_httmock(get_user_mock) + @with_httmock(get_repo_mock) + @with_httmock(delete_repo_mock) + def test_delete_user_repo(self): + result = github_repo.run_module({ + 'username': None, + 'password': None, + "access_token": "mytoken", + "organization": None, + "name": "myrepo", + "description": "Just for fun", + "private": False, + "state": "absent" + }) + self.assertEqual(result['changed'], True) + + @with_httmock(get_orgs_mock) + @with_httmock(get_repo_notfound_mock) + @with_httmock(delete_repo_notfound_mock) + def test_delete_org_repo_notfound(self): + result = github_repo.run_module({ + 'username': None, + 'password': None, + "access_token": "mytoken", + "organization": "MyOrganization", + "name": "myrepo", + "description": "Just for fun", + "private": True, + "state": "absent" + }) + self.assertEqual(result['changed'], False) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/unit/requirements.txt b/tests/unit/requirements.txt index 968820807d..1d082cffb8 100644 --- a/tests/unit/requirements.txt +++ b/tests/unit/requirements.txt @@ -11,8 +11,9 @@ redis linode-python # APIv3 linode_api4 ; python_version > '2.6' # APIv4 -# requirement for the gitlab module +# requirement for the gitlab and github modules python-gitlab +PyGithub httmock # requirement for maven_artifact module From ff4e4c055c35071bc005c834b048fa091908df9d Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 6 Mar 2021 14:05:56 +0100 Subject: [PATCH 0096/3093] Remove part of shippable config that's not needed for bot. --- shippable.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/shippable.yml b/shippable.yml index 9961851e70..bb907d21b1 100644 --- a/shippable.yml +++ b/shippable.yml @@ -12,7 +12,6 @@ matrix: - env: T=devel/sanity/2 - env: T=devel/sanity/3 - env: T=devel/sanity/4 - - env: T=devel/sanity/extra - env: T=2.10/sanity/1 - env: T=2.10/sanity/2 From a013e69d67ed3bf96ecc7bcbe1e8d087424d993f Mon Sep 17 00:00:00 2001 From: phospi Date: Mon, 8 Mar 2021 06:56:34 +0100 Subject: [PATCH 0097/3093] Extending manageiq modules with parameter resource_id (#719) * Extending modules with resource_id * Added documentation * Fixed syntax Changed resource_type back to required true Added description identifier * Added changelog fragment. * fixed syntax * Improved changelog fragment content. * Updated description * Changed if statement * Changed changelog fragement filename * version bump * removed duplicate type * Apply suggestions from code review * Update plugins/modules/remote_management/manageiq/manageiq_tags.py Co-authored-by: Felix Fontein --- .../fragments/719-manageiq-resource_id.yml | 2 ++ .../manageiq/manageiq_policies.py | 21 ++++++++--- .../manageiq/manageiq_tags.py | 36 ++++++++++++++++--- 3 files changed, 49 insertions(+), 10 deletions(-) create mode 100644 changelogs/fragments/719-manageiq-resource_id.yml diff --git a/changelogs/fragments/719-manageiq-resource_id.yml b/changelogs/fragments/719-manageiq-resource_id.yml new file mode 100644 index 0000000000..bbeef5ff82 --- /dev/null +++ b/changelogs/fragments/719-manageiq-resource_id.yml @@ -0,0 +1,2 @@ +minor_changes: + - manageiq_tags and manageiq_policies - added new parameter ``resource_id``. This parameter can be used instead of parameter ``resource_name`` (https://github.com/ansible-collections/community.general/pull/719). \ No newline at end of file diff --git a/plugins/modules/remote_management/manageiq/manageiq_policies.py b/plugins/modules/remote_management/manageiq/manageiq_policies.py index 600c0bfff6..8bbe05bc5e 100644 --- a/plugins/modules/remote_management/manageiq/manageiq_policies.py +++ b/plugins/modules/remote_management/manageiq/manageiq_policies.py @@ -37,7 +37,7 @@ options: resource_type: type: str description: - - the type of the resource to which the profile should be [un]assigned + - The type of the resource to which the profile should be [un]assigned. required: true choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster', 'data store', 'group', 'resource pool', 'service', 'service template', @@ -45,8 +45,14 @@ options: resource_name: type: str description: - - the name of the resource to which the profile should be [un]assigned - required: true + - The name of the resource to which the profile should be [un]assigned. + - Must be specified if I(resource_id) is not set. Both options are mutually exclusive. + resource_id: + type: int + description: + - The ID of the resource to which the profile should be [un]assigned. + - Must be specified if I(resource_name) is not set. Both options are mutually exclusive. + version_added: 2.2.0 ''' EXAMPLES = ''' @@ -296,7 +302,8 @@ def main(): actions = {'present': 'assign', 'absent': 'unassign', 'list': 'list'} argument_spec = dict( policy_profiles=dict(type='list'), - resource_name=dict(required=True, type='str'), + resource_id=dict(required=False, type='int'), + resource_name=dict(required=False, type='str'), resource_type=dict(required=True, type='str', choices=list(manageiq_entities().keys())), state=dict(required=False, type='str', @@ -307,6 +314,8 @@ def main(): module = AnsibleModule( argument_spec=argument_spec, + mutually_exclusive=[["resource_id", "resource_name"]], + required_one_of=[["resource_id", "resource_name"]], required_if=[ ('state', 'present', ['policy_profiles']), ('state', 'absent', ['policy_profiles']) @@ -314,6 +323,7 @@ def main(): ) policy_profiles = module.params['policy_profiles'] + resource_id = module.params['resource_id'] resource_type_key = module.params['resource_type'] resource_name = module.params['resource_name'] state = module.params['state'] @@ -325,7 +335,8 @@ def main(): manageiq = ManageIQ(module) # query resource id, fail if resource does not exist - resource_id = manageiq.find_collection_resource_or_fail(resource_type, name=resource_name)['id'] + if resource_id is None: + resource_id = manageiq.find_collection_resource_or_fail(resource_type, name=resource_name)['id'] manageiq_policies = ManageIQPolicies(manageiq, resource_type, resource_id) diff --git a/plugins/modules/remote_management/manageiq/manageiq_tags.py b/plugins/modules/remote_management/manageiq/manageiq_tags.py index 68de232499..d1fa900079 100644 --- a/plugins/modules/remote_management/manageiq/manageiq_tags.py +++ b/plugins/modules/remote_management/manageiq/manageiq_tags.py @@ -37,7 +37,7 @@ options: resource_type: type: str description: - - the relevant resource type in manageiq + - The relevant resource type in manageiq. required: true choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster', 'data store', 'group', 'resource pool', 'service', 'service template', @@ -45,8 +45,14 @@ options: resource_name: type: str description: - - the relevant resource name in manageiq - required: true + - The name of the resource at which tags will be controlled. + - Must be specified if I(resource_id) is not set. Both options are mutually exclusive. + resource_id: + description: + - The ID of the resource at which tags will be controlled. + - Must be specified if I(resource_name) is not set. Both options are mutually exclusive. + type: int + version_added: 2.2.0 ''' EXAMPLES = ''' @@ -65,6 +71,21 @@ EXAMPLES = ''' password: 'smartvm' validate_certs: False +- name: Create new tags for a provider in ManageIQ + community.general.manageiq_tags: + resource_id: 23000000790497 + resource_type: 'provider' + tags: + - category: environment + name: prod + - category: owner + name: prod_ops + manageiq_connection: + url: 'http://127.0.0.1:3000' + username: 'admin' + password: 'smartvm' + validate_certs: False + - name: Remove tags for a provider in ManageIQ community.general.manageiq_tags: state: absent @@ -241,7 +262,8 @@ def main(): actions = {'present': 'assign', 'absent': 'unassign', 'list': 'list'} argument_spec = dict( tags=dict(type='list'), - resource_name=dict(required=True, type='str'), + resource_id=dict(required=False, type='int'), + resource_name=dict(required=False, type='str'), resource_type=dict(required=True, type='str', choices=list(manageiq_entities().keys())), state=dict(required=False, type='str', @@ -252,6 +274,8 @@ def main(): module = AnsibleModule( argument_spec=argument_spec, + mutually_exclusive=[["resource_id", "resource_name"]], + required_one_of=[["resource_id", "resource_name"]], required_if=[ ('state', 'present', ['tags']), ('state', 'absent', ['tags']) @@ -259,6 +283,7 @@ def main(): ) tags = module.params['tags'] + resource_id = module.params['resource_id'] resource_type_key = module.params['resource_type'] resource_name = module.params['resource_name'] state = module.params['state'] @@ -270,7 +295,8 @@ def main(): manageiq = ManageIQ(module) # query resource id, fail if resource does not exist - resource_id = query_resource_id(manageiq, resource_type, resource_name) + if resource_id is None: + resource_id = query_resource_id(manageiq, resource_type, resource_name) manageiq_tags = ManageIQTags(manageiq, resource_type, resource_id) From 088743749b590d2886b351767830d931a3490fc2 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 8 Mar 2021 18:57:40 +1300 Subject: [PATCH 0098/3093] ini_file - allows adding empty string as a value (#1972) * Added integration test and fixed bug * added changelog fragment * Update changelogs/fragments/1972-ini_file-empty-str-value.yml Co-authored-by: Felix Fontein * Update tests/integration/targets/ini_file/tasks/main.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../1972-ini_file-empty-str-value.yml | 2 ++ plugins/modules/files/ini_file.py | 10 +++--- .../targets/ini_file/tasks/main.yml | 36 +++++++++++++++++++ 3 files changed, 42 insertions(+), 6 deletions(-) create mode 100644 changelogs/fragments/1972-ini_file-empty-str-value.yml diff --git a/changelogs/fragments/1972-ini_file-empty-str-value.yml b/changelogs/fragments/1972-ini_file-empty-str-value.yml new file mode 100644 index 0000000000..7beba5ac4c --- /dev/null +++ b/changelogs/fragments/1972-ini_file-empty-str-value.yml @@ -0,0 +1,2 @@ +bugfixes: + - ini_file - allows an empty string as a value for an option (https://github.com/ansible-collections/community.general/pull/1972). diff --git a/plugins/modules/files/ini_file.py b/plugins/modules/files/ini_file.py index e476c6e8d6..ac4c6d0cf3 100644 --- a/plugins/modules/files/ini_file.py +++ b/plugins/modules/files/ini_file.py @@ -114,9 +114,7 @@ from ansible.module_utils.basic import AnsibleModule def match_opt(option, line): option = re.escape(option) - return re.match('( |\t)*%s( |\t)*(=|$)' % option, line) \ - or re.match('#( |\t)*%s( |\t)*(=|$)' % option, line) \ - or re.match(';( |\t)*%s( |\t)*(=|$)' % option, line) + return re.match('[#;]?( |\t)*%s( |\t)*(=|$)' % option, line) def match_active_opt(option, line): @@ -251,9 +249,9 @@ def do_ini(module, filename, section=None, option=None, value=None, if not within_section and state == 'present': ini_lines.append('[%s]\n' % section) msg = 'section and option added' - if option and value: + if option and value is not None: ini_lines.append(assignment_format % (option, value)) - elif option and not value and allow_no_value: + elif option and value is None and allow_no_value: ini_lines.append('%s\n' % option) else: msg = 'only section added' @@ -312,7 +310,7 @@ def main(): allow_no_value = module.params['allow_no_value'] create = module.params['create'] - if state == 'present' and not allow_no_value and not value: + if state == 'present' and not allow_no_value and value is None: module.fail_json("Parameter 'value' must not be empty if state=present and allow_no_value=False") (changed, backup_file, diff, msg) = do_ini(module, path, section, option, value, state, backup, no_extra_spaces, create, allow_no_value) diff --git a/tests/integration/targets/ini_file/tasks/main.yml b/tests/integration/targets/ini_file/tasks/main.yml index 83b850809a..2e84147c72 100644 --- a/tests/integration/targets/ini_file/tasks/main.yml +++ b/tests/integration/targets/ini_file/tasks/main.yml @@ -444,3 +444,39 @@ assert: that: - content14 == expected14 + +- name: Check add option with empty string value + block: + - name: Remove drinks + ini_file: + path: "{{ output_file }}" + section: drinks + state: absent + - name: Remove tea + ini_file: + path: "{{ output_file }}" + section: + option: like + value: tea + state: absent + - name: Test with empty string + ini_file: + path: "{{ output_file }}" + section: extensions + option: evolve + value: "" + +- name: read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: set expected content and get current ini file content + set_fact: + expected15: "\n[extensions]\nevolve = \n" + content15: "{{ output_content.content | b64decode }}" +- debug: var=content15 +- name: Verify content of ini file is as expected + assert: + that: + - content15 == expected15 From 2906591c08031b090e4210c56a0c76eff8c7cced Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 8 Mar 2021 20:35:09 +1300 Subject: [PATCH 0099/3093] Tidy up sanity checks ignore lines modules (batch 7) (#1970) * fixed validation-modules for plugins/modules/cloud/heroku/heroku_collaborator.py * fixed validation-modules for plugins/modules/cloud/linode/linode_v4.py * fixed validation-modules for plugins/modules/remote_management/manageiq/manageiq_provider.py * fixed validation-modules for plugins/modules/remote_management/manageiq/manageiq_policies.py * fixed validation-modules for plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py * fixed validation-modules for plugins/modules/remote_management/manageiq/manageiq_tags.py * fixed validation-modules for plugins/modules/cloud/opennebula/one_host.py * fixed validation-modules for plugins/modules/cloud/opennebula/one_image_info.py * fixed validation-modules for plugins/modules/cloud/opennebula/one_vm.py * fixed validation-modules for plugins/modules/cloud/scaleway/scaleway_lb.py * fixed validation-modules for plugins/modules/cloud/scaleway/scaleway_compute.py * fixed validation-modules for plugins/modules/remote_management/oneview/oneview_network_set_info.py * fixed validation-modules for plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py * fixed validation-modules for plugins/modules/remote_management/oneview/oneview_datacenter_info.py * fixed validation-modules for plugins/modules/remote_management/oneview/oneview_enclosure_info.py * Tidy up sanity checks ignore lines modules (batch 7) * added changelog fragment * Missed a couple of lines in ingnore-2.11.txt * fixed validation-modules for plugins/modules/cloud/packet/packet_volume_attachment.py * Adjusted ignore files and changelog for packet_volume_attachment.py * Rolled back ignore line for linode module * Update plugins/modules/cloud/opennebula/one_image_info.py Co-authored-by: Felix Fontein * fixes from the PR Co-authored-by: Felix Fontein --- changelogs/fragments/1970-valmod-batch7.yml | 18 ++++++++++ .../cloud/heroku/heroku_collaborator.py | 3 +- plugins/modules/cloud/linode/linode_v4.py | 6 ++-- plugins/modules/cloud/opennebula/one_host.py | 3 +- .../cloud/opennebula/one_image_info.py | 11 +++--- plugins/modules/cloud/opennebula/one_vm.py | 15 +++++--- .../cloud/packet/packet_volume_attachment.py | 1 - .../cloud/scaleway/scaleway_compute.py | 3 +- plugins/modules/cloud/scaleway/scaleway_lb.py | 3 +- .../manageiq/manageiq_alert_profiles.py | 3 +- .../manageiq/manageiq_policies.py | 3 +- .../manageiq/manageiq_provider.py | 2 +- .../manageiq/manageiq_tags.py | 3 +- .../oneview/oneview_datacenter_info.py | 4 ++- .../oneview/oneview_enclosure_info.py | 8 ++++- .../oneview/oneview_ethernet_network_info.py | 4 ++- .../oneview/oneview_network_set_info.py | 4 ++- tests/sanity/ignore-2.10.txt | 34 +++++-------------- tests/sanity/ignore-2.11.txt | 24 +++---------- tests/sanity/ignore-2.9.txt | 8 ++--- 20 files changed, 85 insertions(+), 75 deletions(-) create mode 100644 changelogs/fragments/1970-valmod-batch7.yml diff --git a/changelogs/fragments/1970-valmod-batch7.yml b/changelogs/fragments/1970-valmod-batch7.yml new file mode 100644 index 0000000000..cd577d4578 --- /dev/null +++ b/changelogs/fragments/1970-valmod-batch7.yml @@ -0,0 +1,18 @@ +minor_changes: + - heroku_collaborator - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). + - linode_v4 - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). + - one_host - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). + - one_image_info - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). + - one_vm - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). + - scaleway_compute - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). + - scaleway_lb - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). + - manageiq_alert_profiles - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). + - manageiq_policies - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). + - manageiq_tags - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). + - oneview_datacenter_info - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). + - oneview_enclosure_info - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). + - oneview_ethernet_network_info - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). + - oneview_network_set_info - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). +bugfixes: + - manageiq_provider - wrapped ``dict.keys()`` with ``list`` for use in ``choices`` setting (https://github.com/ansible-collections/community.general/pull/1970). + - packet_volume_attachment - removed extraneous ``print`` call - old debug? (https://github.com/ansible-collections/community.general/pull/1970). diff --git a/plugins/modules/cloud/heroku/heroku_collaborator.py b/plugins/modules/cloud/heroku/heroku_collaborator.py index 276b5b12be..a326894dce 100644 --- a/plugins/modules/cloud/heroku/heroku_collaborator.py +++ b/plugins/modules/cloud/heroku/heroku_collaborator.py @@ -26,6 +26,7 @@ options: - Heroku API key apps: type: list + elements: str description: - List of Heroku App names required: true @@ -109,7 +110,7 @@ def main(): argument_spec = HerokuHelper.heroku_argument_spec() argument_spec.update( user=dict(required=True, type='str'), - apps=dict(required=True, type='list'), + apps=dict(required=True, type='list', elements='str'), suppress_invitation=dict(default=False, type='bool'), state=dict(default='present', type='str', choices=['present', 'absent']), ) diff --git a/plugins/modules/cloud/linode/linode_v4.py b/plugins/modules/cloud/linode/linode_v4.py index 17a697b320..809621cfe1 100644 --- a/plugins/modules/cloud/linode/linode_v4.py +++ b/plugins/modules/cloud/linode/linode_v4.py @@ -63,6 +63,7 @@ options: U(https://www.linode.com/docs/api/tags/). required: false type: list + elements: str root_pass: description: - The password for the root user. If not specified, one will be @@ -75,6 +76,7 @@ options: - A list of SSH public key parts to deploy for the root user. required: false type: list + elements: str state: description: - The desired instance state. @@ -240,12 +242,12 @@ def initialise_module(): no_log=True, fallback=(env_fallback, ['LINODE_ACCESS_TOKEN']), ), - authorized_keys=dict(type='list', required=False), + authorized_keys=dict(type='list', elements='str', required=False), group=dict(type='str', required=False), image=dict(type='str', required=False), region=dict(type='str', required=False), root_pass=dict(type='str', required=False, no_log=True), - tags=dict(type='list', required=False), + tags=dict(type='list', elements='str', required=False), type=dict(type='str', required=False), stackscript_id=dict(type='int', required=False), stackscript_data=dict(type='dict', required=False), diff --git a/plugins/modules/cloud/opennebula/one_host.py b/plugins/modules/cloud/opennebula/one_host.py index efe1ce2267..714d2d86a9 100644 --- a/plugins/modules/cloud/opennebula/one_host.py +++ b/plugins/modules/cloud/opennebula/one_host.py @@ -66,6 +66,7 @@ options: description: - The labels for this host. type: list + elements: str template: description: - The template or attribute changes to merge into the host template. @@ -130,7 +131,7 @@ class HostModule(OpenNebulaModule): vmm_mad_name=dict(type='str', default="kvm"), cluster_id=dict(type='int', default=0), cluster_name=dict(type='str'), - labels=dict(type='list'), + labels=dict(type='list', elements='str'), template=dict(type='dict', aliases=['attributes']), ) diff --git a/plugins/modules/cloud/opennebula/one_image_info.py b/plugins/modules/cloud/opennebula/one_image_info.py index 0d2bd07070..77c280d07b 100644 --- a/plugins/modules/cloud/opennebula/one_image_info.py +++ b/plugins/modules/cloud/opennebula/one_image_info.py @@ -56,6 +56,7 @@ options: - A list of images ids whose facts you want to gather. aliases: ['id'] type: list + elements: str name: description: - A C(name) of the image whose facts will be gathered. @@ -253,7 +254,7 @@ def main(): "api_url": {"required": False, "type": "str"}, "api_username": {"required": False, "type": "str"}, "api_password": {"required": False, "type": "str", "no_log": True}, - "ids": {"required": False, "aliases": ['id'], "type": "list"}, + "ids": {"required": False, "aliases": ['id'], "type": "list", "elements": "str"}, "name": {"required": False, "type": "str"}, } @@ -273,9 +274,6 @@ def main(): name = params.get('name') client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password) - result = {'images': []} - images = [] - if ids: images = get_images_by_ids(module, client, ids) elif name: @@ -283,8 +281,9 @@ def main(): else: images = get_all_images(client).IMAGE - for image in images: - result['images'].append(get_image_info(image)) + result = { + 'images': [get_image_info(image) for image in images], + } module.exit_json(**result) diff --git a/plugins/modules/cloud/opennebula/one_vm.py b/plugins/modules/cloud/opennebula/one_vm.py index 286514bd13..425a1c464a 100644 --- a/plugins/modules/cloud/opennebula/one_vm.py +++ b/plugins/modules/cloud/opennebula/one_vm.py @@ -72,6 +72,7 @@ options: - A list of instance ids used for states':' C(absent), C(running), C(rebooted), C(poweredoff) aliases: ['ids'] type: list + elements: int state: description: - C(present) - create instances from a template specified with C(template_id)/C(template_name). @@ -120,6 +121,7 @@ options: - C(state) of instances with these labels. default: [] type: list + elements: str count_attributes: description: - A dictionary of key/value attributes that can only be used with @@ -134,6 +136,7 @@ options: - This can be expressed in multiple ways and is shown in the EXAMPLES - section. type: list + elements: str count: description: - Number of instances to launch @@ -168,6 +171,7 @@ options: - NOTE':' If The Template hats Multiple Disks the Order of the Sizes is - matched against the order specified in C(template_id)/C(template_name). type: list + elements: str cpu: description: - Percentage of CPU divided by 100 required for the new instance. Half a @@ -182,6 +186,7 @@ options: - A list of dictionaries with network parameters. See examples for more details. default: [] type: list + elements: dict disk_saveas: description: - Creates an image from a VM disk. @@ -1349,7 +1354,7 @@ def main(): "api_url": {"required": False, "type": "str"}, "api_username": {"required": False, "type": "str"}, "api_password": {"required": False, "type": "str", "no_log": True}, - "instance_ids": {"required": False, "aliases": ['ids'], "type": "list"}, + "instance_ids": {"required": False, "aliases": ['ids'], "type": "list", "elements": "int"}, "template_name": {"required": False, "type": "str"}, "template_id": {"required": False, "type": "int"}, "vm_start_on_hold": {"default": False, "type": "bool"}, @@ -1367,16 +1372,16 @@ def main(): "memory": {"required": False, "type": "str"}, "cpu": {"required": False, "type": "float"}, "vcpu": {"required": False, "type": "int"}, - "disk_size": {"required": False, "type": "list"}, + "disk_size": {"required": False, "type": "list", "elements": "str"}, "datastore_name": {"required": False, "type": "str"}, "datastore_id": {"required": False, "type": "int"}, - "networks": {"default": [], "type": "list"}, + "networks": {"default": [], "type": "list", "elements": "dict"}, "count": {"default": 1, "type": "int"}, "exact_count": {"required": False, "type": "int"}, "attributes": {"default": {}, "type": "dict"}, "count_attributes": {"required": False, "type": "dict"}, - "labels": {"default": [], "type": "list"}, - "count_labels": {"required": False, "type": "list"}, + "labels": {"default": [], "type": "list", "elements": "str"}, + "count_labels": {"required": False, "type": "list", "elements": "str"}, "disk_saveas": {"type": "dict"}, "persistent": {"default": False, "type": "bool"} } diff --git a/plugins/modules/cloud/packet/packet_volume_attachment.py b/plugins/modules/cloud/packet/packet_volume_attachment.py index a1a38bb42c..7cda16ce86 100644 --- a/plugins/modules/cloud/packet/packet_volume_attachment.py +++ b/plugins/modules/cloud/packet/packet_volume_attachment.py @@ -181,7 +181,6 @@ def do_detach(packet_conn, vol, dev_id=None): return (dev_id is None) or (a['device']['id'] == dev_id) for a in vol['attachments']: if dev_match(a): - print(a['href']) packet_conn.call_api(a['href'], type="DELETE") diff --git a/plugins/modules/cloud/scaleway/scaleway_compute.py b/plugins/modules/cloud/scaleway/scaleway_compute.py index 8df9a5e6ff..421157a425 100644 --- a/plugins/modules/cloud/scaleway/scaleway_compute.py +++ b/plugins/modules/cloud/scaleway/scaleway_compute.py @@ -70,6 +70,7 @@ options: tags: type: list + elements: str description: - List of tags to apply to the instance (5 max) required: false @@ -652,7 +653,7 @@ def main(): enable_ipv6=dict(default=False, type="bool"), public_ip=dict(default="absent"), state=dict(choices=list(state_strategy.keys()), default='present'), - tags=dict(type="list", default=[]), + tags=dict(type="list", elements="str", default=[]), organization=dict(required=True), wait=dict(type="bool", default=False), wait_timeout=dict(type="int", default=300), diff --git a/plugins/modules/cloud/scaleway/scaleway_lb.py b/plugins/modules/cloud/scaleway/scaleway_lb.py index a9358188dd..f19c0a3c43 100644 --- a/plugins/modules/cloud/scaleway/scaleway_lb.py +++ b/plugins/modules/cloud/scaleway/scaleway_lb.py @@ -63,6 +63,7 @@ options: tags: type: list + elements: str description: - List of tags to apply to the load-balancer @@ -338,7 +339,7 @@ def main(): description=dict(required=True), region=dict(required=True, choices=SCALEWAY_REGIONS), state=dict(choices=list(state_strategy.keys()), default='present'), - tags=dict(type="list", default=[]), + tags=dict(type="list", elements="str", default=[]), organization_id=dict(required=True), wait=dict(type="bool", default=False), wait_timeout=dict(type="int", default=300), diff --git a/plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py b/plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py index d40a8ca01f..d76c334259 100644 --- a/plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py +++ b/plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py @@ -40,6 +40,7 @@ options: 'ExtManagementSystem', 'MiddlewareServer'] alerts: type: list + elements: str description: - List of alert descriptions to assign to this profile. - Required if state is "present" @@ -257,7 +258,7 @@ def main(): 'EmsCluster', 'ExtManagementSystem', 'MiddlewareServer']), - alerts=dict(type='list'), + alerts=dict(type='list', elements='str'), notes=dict(type='str'), state=dict(default='present', choices=['present', 'absent']), ) diff --git a/plugins/modules/remote_management/manageiq/manageiq_policies.py b/plugins/modules/remote_management/manageiq/manageiq_policies.py index 8bbe05bc5e..567833d7cc 100644 --- a/plugins/modules/remote_management/manageiq/manageiq_policies.py +++ b/plugins/modules/remote_management/manageiq/manageiq_policies.py @@ -31,6 +31,7 @@ options: default: 'present' policy_profiles: type: list + elements: dict description: - list of dictionaries, each includes the policy_profile 'name' key. - required if state is present or absent. @@ -301,7 +302,7 @@ class ManageIQPolicies(object): def main(): actions = {'present': 'assign', 'absent': 'unassign', 'list': 'list'} argument_spec = dict( - policy_profiles=dict(type='list'), + policy_profiles=dict(type='list', elements='dict'), resource_id=dict(required=False, type='int'), resource_name=dict(required=False, type='str'), resource_type=dict(required=True, type='str', diff --git a/plugins/modules/remote_management/manageiq/manageiq_provider.py b/plugins/modules/remote_management/manageiq/manageiq_provider.py index 7f55b55be1..8a3d96c745 100644 --- a/plugins/modules/remote_management/manageiq/manageiq_provider.py +++ b/plugins/modules/remote_management/manageiq/manageiq_provider.py @@ -829,7 +829,7 @@ def main(): azure_tenant_id=dict(aliases=['keystone_v3_domain_id']), tenant_mapping_enabled=dict(default=False, type='bool'), api_version=dict(choices=['v2', 'v3']), - type=dict(choices=supported_providers().keys()), + type=dict(choices=list(supported_providers().keys())), ) # add the manageiq connection arguments to the arguments argument_spec.update(manageiq_argument_spec()) diff --git a/plugins/modules/remote_management/manageiq/manageiq_tags.py b/plugins/modules/remote_management/manageiq/manageiq_tags.py index d1fa900079..83ab60ac93 100644 --- a/plugins/modules/remote_management/manageiq/manageiq_tags.py +++ b/plugins/modules/remote_management/manageiq/manageiq_tags.py @@ -31,6 +31,7 @@ options: default: 'present' tags: type: list + elements: dict description: - tags - list of dictionaries, each includes 'name' and 'category' keys. - required if state is present or absent. @@ -261,7 +262,7 @@ class ManageIQTags(object): def main(): actions = {'present': 'assign', 'absent': 'unassign', 'list': 'list'} argument_spec = dict( - tags=dict(type='list'), + tags=dict(type='list', elements='dict'), resource_id=dict(required=False, type='int'), resource_name=dict(required=False, type='str'), resource_type=dict(required=True, type='str', diff --git a/plugins/modules/remote_management/oneview/oneview_datacenter_info.py b/plugins/modules/remote_management/oneview/oneview_datacenter_info.py index 19aa7a2708..a057503440 100644 --- a/plugins/modules/remote_management/oneview/oneview_datacenter_info.py +++ b/plugins/modules/remote_management/oneview/oneview_datacenter_info.py @@ -27,6 +27,8 @@ options: options: description: - "Retrieve additional information. Options available: 'visualContent'." + type: list + elements: str extends_documentation_fragment: - community.general.oneview @@ -108,7 +110,7 @@ from ansible_collections.community.general.plugins.module_utils.oneview import O class DatacenterInfoModule(OneViewModuleBase): argument_spec = dict( name=dict(type='str'), - options=dict(type='list'), + options=dict(type='list', elements='str'), params=dict(type='dict') ) diff --git a/plugins/modules/remote_management/oneview/oneview_enclosure_info.py b/plugins/modules/remote_management/oneview/oneview_enclosure_info.py index 7963de74fa..1f2688d610 100644 --- a/plugins/modules/remote_management/oneview/oneview_enclosure_info.py +++ b/plugins/modules/remote_management/oneview/oneview_enclosure_info.py @@ -29,6 +29,8 @@ options: - "List with options to gather additional information about an Enclosure and related resources. Options allowed: C(script), C(environmentalConfiguration), and C(utilization). For the option C(utilization), you can provide specific parameters." + type: list + elements: raw extends_documentation_fragment: - community.general.oneview @@ -153,7 +155,11 @@ from ansible_collections.community.general.plugins.module_utils.oneview import O class EnclosureInfoModule(OneViewModuleBase): - argument_spec = dict(name=dict(type='str'), options=dict(type='list'), params=dict(type='dict')) + argument_spec = dict( + name=dict(type='str'), + options=dict(type='list', elements='raw'), + params=dict(type='dict') + ) def __init__(self): super(EnclosureInfoModule, self).__init__(additional_arg_spec=self.argument_spec) diff --git a/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py b/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py index b1790932c1..a609bf772a 100644 --- a/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py +++ b/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py @@ -27,6 +27,8 @@ options: description: - "List with options to gather additional information about an Ethernet Network and related resources. Options allowed: C(associatedProfiles) and C(associatedUplinkGroups)." + type: list + elements: str extends_documentation_fragment: - community.general.oneview - community.general.oneview.factsparams @@ -106,7 +108,7 @@ from ansible_collections.community.general.plugins.module_utils.oneview import O class EthernetNetworkInfoModule(OneViewModuleBase): argument_spec = dict( name=dict(type='str'), - options=dict(type='list'), + options=dict(type='list', elements='str'), params=dict(type='dict') ) diff --git a/plugins/modules/remote_management/oneview/oneview_network_set_info.py b/plugins/modules/remote_management/oneview/oneview_network_set_info.py index 68c18db924..e88a190796 100644 --- a/plugins/modules/remote_management/oneview/oneview_network_set_info.py +++ b/plugins/modules/remote_management/oneview/oneview_network_set_info.py @@ -29,6 +29,8 @@ options: - "List with options to gather information about Network Set. Option allowed: C(withoutEthernet). The option C(withoutEthernet) retrieves the list of network_sets excluding Ethernet networks." + type: list + elements: str extends_documentation_fragment: - community.general.oneview @@ -127,7 +129,7 @@ from ansible_collections.community.general.plugins.module_utils.oneview import O class NetworkSetInfoModule(OneViewModuleBase): argument_spec = dict( name=dict(type='str'), - options=dict(type='list'), + options=dict(type='list', elements='str'), params=dict(type='dict'), ) diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 59e250681c..ffd03919ac 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -3,11 +3,9 @@ plugins/module_utils/compat/ipaddress.py no-assert plugins/module_utils/compat/ipaddress.py no-unicode-literals plugins/module_utils/_mount.py future-import-boilerplate plugins/module_utils/_mount.py metaclass-boilerplate -plugins/modules/cloud/heroku/heroku_collaborator.py validate-modules:parameter-list-no-elements plugins/modules/cloud/linode/linode.py validate-modules:parameter-list-no-elements plugins/modules/cloud/linode/linode.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/linode/linode.py validate-modules:undocumented-parameter -plugins/modules/cloud/linode/linode_v4.py validate-modules:parameter-list-no-elements plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-choice @@ -15,9 +13,6 @@ plugins/modules/cloud/online/online_server_facts.py validate-modules:return-synt plugins/modules/cloud/online/online_server_info.py validate-modules:return-syntax-error plugins/modules/cloud/online/online_user_facts.py validate-modules:return-syntax-error plugins/modules/cloud/online/online_user_info.py validate-modules:return-syntax-error -plugins/modules/cloud/opennebula/one_host.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/opennebula/one_image_info.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/opennebula/one_vm.py validate-modules:parameter-list-no-elements plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py validate-modules:doc-missing-type plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py validate-modules:parameter-list-no-elements plugins/modules/cloud/ovirt/ovirt_api_facts.py validate-modules:parameter-list-no-elements @@ -68,7 +63,6 @@ plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:parameter-list-no plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:doc-missing-type plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/packet/packet_volume_attachment.py pylint:ansible-bad-function plugins/modules/cloud/rackspace/rax.py use-argspec-type-path # fix needed plugins/modules/cloud/rackspace/rax.py validate-modules:doc-missing-type plugins/modules/cloud/rackspace/rax.py validate-modules:parameter-list-no-elements @@ -79,12 +73,10 @@ plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path plugins/modules/cloud/rackspace/rax_mon_notification_plan.py validate-modules:parameter-list-no-elements plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path # fix needed plugins/modules/cloud/rackspace/rax_scaling_group.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/scaleway/scaleway_compute.py validate-modules:parameter-list-no-elements plugins/modules/cloud/scaleway/scaleway_image_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_image_info.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_ip_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_ip_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_lb.py validate-modules:parameter-list-no-elements plugins/modules/cloud/scaleway/scaleway_organization_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_organization_info.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_security_group_facts.py validate-modules:return-syntax-error @@ -107,11 +99,11 @@ plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:parameter-type plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:undocumented-parameter plugins/modules/cloud/univention/udm_share.py validate-modules:parameter-list-no-elements plugins/modules/cloud/univention/udm_user.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:doc-choices-do-not-match-spec -plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:doc-required-mismatch -plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:missing-suboption-docs -plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:undocumented-parameter +plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:doc-choices-do-not-match-spec # missing docs on suboptions +plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:doc-required-mismatch # missing docs on suboptions +plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:missing-suboption-docs # missing docs on suboptions +plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:parameter-type-not-in-doc # missing docs on suboptions +plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:undocumented-parameter # missing docs on suboptions plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-state-invalid-choice @@ -137,25 +129,18 @@ plugins/modules/packaging/os/xbps.py validate-modules:parameter-invalid plugins/modules/remote_management/hpilo/hpilo_boot.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hpilo_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hponcfg.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py validate-modules:parameter-list-no-elements -plugins/modules/remote_management/manageiq/manageiq_policies.py validate-modules:parameter-list-no-elements plugins/modules/remote_management/manageiq/manageiq_policies.py validate-modules:parameter-state-invalid-choice -plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:doc-choices-do-not-match-spec -plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:doc-missing-type -plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:invalid-ansiblemodule-schema -plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:undocumented-parameter -plugins/modules/remote_management/manageiq/manageiq_tags.py validate-modules:parameter-list-no-elements +plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:doc-choices-do-not-match-spec # missing docs on suboptions +plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:doc-missing-type # missing docs on suboptions +plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:parameter-type-not-in-doc # missing docs on suboptions +plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:undocumented-parameter # missing docs on suboptions plugins/modules/remote_management/manageiq/manageiq_tags.py validate-modules:parameter-state-invalid-choice -plugins/modules/remote_management/oneview/oneview_datacenter_info.py validate-modules:parameter-list-no-elements plugins/modules/remote_management/oneview/oneview_datacenter_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/oneview/oneview_datacenter_info.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_enclosure_info.py validate-modules:parameter-list-no-elements plugins/modules/remote_management/oneview/oneview_enclosure_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/oneview/oneview_enclosure_info.py validate-modules:undocumented-parameter plugins/modules/remote_management/oneview/oneview_ethernet_network.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/oneview/oneview_ethernet_network.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py validate-modules:parameter-list-no-elements plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py validate-modules:undocumented-parameter plugins/modules/remote_management/oneview/oneview_fc_network.py validate-modules:doc-missing-type @@ -176,7 +161,6 @@ plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_inf plugins/modules/remote_management/oneview/oneview_network_set.py validate-modules:doc-missing-type plugins/modules/remote_management/oneview/oneview_network_set.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/oneview/oneview_network_set.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_network_set_info.py validate-modules:parameter-list-no-elements plugins/modules/remote_management/oneview/oneview_network_set_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/oneview/oneview_network_set_info.py validate-modules:undocumented-parameter plugins/modules/remote_management/oneview/oneview_san_manager.py validate-modules:parameter-type-not-in-doc diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index ee35b15aea..6e386e91d9 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -2,11 +2,9 @@ plugins/module_utils/compat/ipaddress.py no-assert plugins/module_utils/compat/ipaddress.py no-unicode-literals plugins/module_utils/_mount.py future-import-boilerplate plugins/module_utils/_mount.py metaclass-boilerplate -plugins/modules/cloud/heroku/heroku_collaborator.py validate-modules:parameter-list-no-elements plugins/modules/cloud/linode/linode.py validate-modules:parameter-list-no-elements plugins/modules/cloud/linode/linode.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/linode/linode.py validate-modules:undocumented-parameter -plugins/modules/cloud/linode/linode_v4.py validate-modules:parameter-list-no-elements plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-choice @@ -14,9 +12,6 @@ plugins/modules/cloud/online/online_server_facts.py validate-modules:return-synt plugins/modules/cloud/online/online_server_info.py validate-modules:return-syntax-error plugins/modules/cloud/online/online_user_facts.py validate-modules:return-syntax-error plugins/modules/cloud/online/online_user_info.py validate-modules:return-syntax-error -plugins/modules/cloud/opennebula/one_host.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/opennebula/one_image_info.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/opennebula/one_vm.py validate-modules:parameter-list-no-elements plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py validate-modules:doc-missing-type plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py validate-modules:parameter-list-no-elements plugins/modules/cloud/ovirt/ovirt_api_facts.py validate-modules:parameter-list-no-elements @@ -67,7 +62,6 @@ plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:parameter-list-no plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:doc-missing-type plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/packet/packet_volume_attachment.py pylint:ansible-bad-function plugins/modules/cloud/rackspace/rax.py use-argspec-type-path # fix needed plugins/modules/cloud/rackspace/rax.py validate-modules:doc-missing-type plugins/modules/cloud/rackspace/rax.py validate-modules:parameter-list-no-elements @@ -78,12 +72,10 @@ plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path plugins/modules/cloud/rackspace/rax_mon_notification_plan.py validate-modules:parameter-list-no-elements plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path # fix needed plugins/modules/cloud/rackspace/rax_scaling_group.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/scaleway/scaleway_compute.py validate-modules:parameter-list-no-elements plugins/modules/cloud/scaleway/scaleway_image_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_image_info.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_ip_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_ip_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_lb.py validate-modules:parameter-list-no-elements plugins/modules/cloud/scaleway/scaleway_organization_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_organization_info.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_security_group_facts.py validate-modules:return-syntax-error @@ -136,25 +128,18 @@ plugins/modules/packaging/os/xbps.py validate-modules:parameter-invalid plugins/modules/remote_management/hpilo/hpilo_boot.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hpilo_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hponcfg.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py validate-modules:parameter-list-no-elements -plugins/modules/remote_management/manageiq/manageiq_policies.py validate-modules:parameter-list-no-elements plugins/modules/remote_management/manageiq/manageiq_policies.py validate-modules:parameter-state-invalid-choice -plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:doc-choices-do-not-match-spec -plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:doc-missing-type -plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:invalid-ansiblemodule-schema -plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:undocumented-parameter -plugins/modules/remote_management/manageiq/manageiq_tags.py validate-modules:parameter-list-no-elements +plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:doc-choices-do-not-match-spec # missing docs on suboptions +plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:doc-missing-type # missing docs on suboptions +plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:parameter-type-not-in-doc # missing docs on suboptions +plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:undocumented-parameter # missing docs on suboptions plugins/modules/remote_management/manageiq/manageiq_tags.py validate-modules:parameter-state-invalid-choice -plugins/modules/remote_management/oneview/oneview_datacenter_info.py validate-modules:parameter-list-no-elements plugins/modules/remote_management/oneview/oneview_datacenter_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/oneview/oneview_datacenter_info.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_enclosure_info.py validate-modules:parameter-list-no-elements plugins/modules/remote_management/oneview/oneview_enclosure_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/oneview/oneview_enclosure_info.py validate-modules:undocumented-parameter plugins/modules/remote_management/oneview/oneview_ethernet_network.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/oneview/oneview_ethernet_network.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py validate-modules:parameter-list-no-elements plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py validate-modules:undocumented-parameter plugins/modules/remote_management/oneview/oneview_fc_network.py validate-modules:doc-missing-type @@ -175,7 +160,6 @@ plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_inf plugins/modules/remote_management/oneview/oneview_network_set.py validate-modules:doc-missing-type plugins/modules/remote_management/oneview/oneview_network_set.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/oneview/oneview_network_set.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_network_set_info.py validate-modules:parameter-list-no-elements plugins/modules/remote_management/oneview/oneview_network_set_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/oneview/oneview_network_set_info.py validate-modules:undocumented-parameter plugins/modules/remote_management/oneview/oneview_san_manager.py validate-modules:parameter-type-not-in-doc diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 200c7204ea..06e83d3535 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -160,10 +160,10 @@ plugins/modules/packaging/os/xbps.py validate-modules:parameter-invalid plugins/modules/remote_management/hpilo/hpilo_boot.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hpilo_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hponcfg.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:doc-choices-do-not-match-spec -plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:doc-missing-type -plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:undocumented-parameter +plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:doc-choices-do-not-match-spec # missing docs on suboptions +plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:doc-missing-type # missing docs on suboptions +plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:parameter-type-not-in-doc # missing docs on suboptions +plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:undocumented-parameter # missing docs on suboptions plugins/modules/remote_management/oneview/oneview_datacenter_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/oneview/oneview_datacenter_info.py validate-modules:undocumented-parameter plugins/modules/remote_management/oneview/oneview_enclosure_info.py validate-modules:parameter-type-not-in-doc From 1ca9229c66d5b612f6425038c452741fc056fdb7 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 8 Mar 2021 12:40:13 +0100 Subject: [PATCH 0100/3093] Next expected release is 2.3.0. --- galaxy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/galaxy.yml b/galaxy.yml index 2440118c6f..5e03c0e8dc 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,6 +1,6 @@ namespace: community name: general -version: 2.2.0 +version: 2.3.0 readme: README.md authors: - Ansible (https://github.com/ansible) From 36daa7c48e84cb92f4b908cc043bf0edbc95f162 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 9 Mar 2021 21:23:20 +0100 Subject: [PATCH 0101/3093] Remove deprecated features scheduled for removal in 3.0.0 (#1926) * Remove deprecated features. * Remove ignore.txt entries. * Update changelogs/fragments/remove-deprecated-features.yml Co-authored-by: Joe Adams Co-authored-by: Joe Adams --- .../fragments/remove-deprecated-features.yml | 16 ++++ plugins/module_utils/redfish_utils.py | 18 ++-- .../cloud/centurylink/clc_aa_policy.py | 5 -- plugins/modules/files/iso_extract.py | 5 +- .../modules/monitoring/airbrake_deployment.py | 88 +++++-------------- plugins/modules/monitoring/bigpanda.py | 6 +- .../monitoring/datadog/datadog_monitor.py | 9 +- plugins/modules/notification/cisco_webex.py | 5 +- plugins/modules/packaging/os/pulp_repo.py | 23 +---- plugins/modules/packaging/os/xbps.py | 7 -- .../redfish/idrac_redfish_config.py | 25 ------ .../redfish/redfish_config.py | 26 +----- plugins/modules/system/syspatch.py | 13 +-- .../web_infrastructure/django_manage.py | 14 +-- tests/sanity/ignore-2.10.txt | 3 - tests/sanity/ignore-2.11.txt | 3 - 16 files changed, 58 insertions(+), 208 deletions(-) create mode 100644 changelogs/fragments/remove-deprecated-features.yml diff --git a/changelogs/fragments/remove-deprecated-features.yml b/changelogs/fragments/remove-deprecated-features.yml new file mode 100644 index 0000000000..9d202e9cec --- /dev/null +++ b/changelogs/fragments/remove-deprecated-features.yml @@ -0,0 +1,16 @@ +removed_features: +- "airbrake_deployment - removed deprecated ``token`` parameter. Use ``project_id`` and ``project_key`` instead." +- "bigpanda - the alias ``message`` has been removed. Use ``deployment_message`` instead." +- "cisco_spark, cisco_webex - the alias ``message`` has been removed. Use ``msg`` instead." +- "clc_aa_policy - the ``wait`` parameter has been removed. It did not have any effect." +- "datadog_monitor - the alias ``message`` has been removed. Use ``notification_message`` instead." +- "django_manage - the parameter ``liveserver`` has been removed." +- "idrac_redfish_config - the parameters ``manager_attribute_name`` and ``manager_attribute_value`` have been removed. Use ``manager_attributes`` instead." +- "iso_extract - the alias ``thirsty`` has been removed. Use ``force`` instead." +- "redfish_config - the parameters ``bios_attribute_name`` and ``bios_attribute_value`` have been removed. Use ``bios_attributes`` instead." +- "syspatch - the ``apply`` parameter has been removed. This is the default mode, so simply removing it will not change the behavior." +- "xbps - the ``force`` parameter has been removed. It did not have any effect." +- "redfish modules - issuing a data modification command without specifying the ID of the target System, Chassis or Manager resource when there is more than one is no longer allowed. Use the ``resource_id`` option to specify the target ID." +- "pulp_repo - the alias ``ca_cert`` has been removed. Use ``feed_ca_cert`` instead." +- "pulp_repo - the ``feed_client_cert`` parameter no longer defaults to the value of the ``client_cert`` parameter." +- "pulp_repo - the ``feed_client_key`` parameter no longer defaults to the value of the ``client_key`` parameter." diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index 01b1f9a29f..0fd421a3e2 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -19,11 +19,10 @@ PATCH_HEADERS = {'content-type': 'application/json', 'accept': 'application/json 'OData-Version': '4.0'} DELETE_HEADERS = {'accept': 'application/json', 'OData-Version': '4.0'} -DEPRECATE_MSG = 'Issuing a data modification command without specifying the '\ - 'ID of the target %(resource)s resource when there is more '\ - 'than one %(resource)s will use the first one in the '\ - 'collection. Use the `resource_id` option to specify the '\ - 'target %(resource)s ID' +FAIL_MSG = 'Issuing a data modification command without specifying the '\ + 'ID of the target %(resource)s resource when there is more '\ + 'than one %(resource)s is no longer allowed. Use the `resource_id` '\ + 'option to specify the target %(resource)s ID.' class RedfishUtils(object): @@ -245,8 +244,7 @@ class RedfishUtils(object): 'ret': False, 'msg': "System resource %s not found" % self.resource_id} elif len(self.systems_uris) > 1: - self.module.deprecate(DEPRECATE_MSG % {'resource': 'System'}, - version='3.0.0', collection_name='community.general') # was Ansible 2.14 + self.module.fail_json(msg=FAIL_MSG % {'resource': 'System'}) return {'ret': True} def _find_updateservice_resource(self): @@ -296,8 +294,7 @@ class RedfishUtils(object): 'ret': False, 'msg': "Chassis resource %s not found" % self.resource_id} elif len(self.chassis_uris) > 1: - self.module.deprecate(DEPRECATE_MSG % {'resource': 'Chassis'}, - version='3.0.0', collection_name='community.general') # was Ansible 2.14 + self.module.fail_json(msg=FAIL_MSG % {'resource': 'Chassis'}) return {'ret': True} def _find_managers_resource(self): @@ -326,8 +323,7 @@ class RedfishUtils(object): 'ret': False, 'msg': "Manager resource %s not found" % self.resource_id} elif len(self.manager_uris) > 1: - self.module.deprecate(DEPRECATE_MSG % {'resource': 'Manager'}, - version='3.0.0', collection_name='community.general') # was Ansible 2.14 + self.module.fail_json(msg=FAIL_MSG % {'resource': 'Manager'}) return {'ret': True} def _get_all_action_info_values(self, action): diff --git a/plugins/modules/cloud/centurylink/clc_aa_policy.py b/plugins/modules/cloud/centurylink/clc_aa_policy.py index a275093773..88c27e20f5 100644 --- a/plugins/modules/cloud/centurylink/clc_aa_policy.py +++ b/plugins/modules/cloud/centurylink/clc_aa_policy.py @@ -30,10 +30,6 @@ options: required: False default: present choices: ['present','absent'] - wait: - description: - - This option does nothing and will be removed in community.general 3.0.0. - type: bool requirements: - python = 2.7 - requests >= 2.5.0 @@ -185,7 +181,6 @@ class ClcAntiAffinityPolicy: argument_spec = dict( name=dict(required=True), location=dict(required=True), - wait=dict(type='bool', removed_in_version='3.0.0', removed_from_collection='community.general'), # was Ansible 2.14 state=dict(default='present', choices=['present', 'absent']), ) return argument_spec diff --git a/plugins/modules/files/iso_extract.py b/plugins/modules/files/iso_extract.py index b84db39756..18c283cbf7 100644 --- a/plugins/modules/files/iso_extract.py +++ b/plugins/modules/files/iso_extract.py @@ -52,10 +52,8 @@ options: description: - If C(yes), which will replace the remote file when contents are different than the source. - If C(no), the file will only be extracted and copied if the destination does not already exist. - - Alias C(thirsty) has been deprecated and will be removed in community.general 3.0.0. type: bool default: yes - aliases: [ thirsty ] executable: description: - The path to the C(7z) executable to use for extracting files from the ISO. @@ -101,8 +99,7 @@ def main(): image=dict(type='path', required=True, aliases=['path', 'src']), dest=dict(type='path', required=True), files=dict(type='list', elements='str', required=True), - force=dict(type='bool', default=True, aliases=['thirsty'], - deprecated_aliases=[dict(name='thirsty', version='3.0.0', collection_name='community.general')]), + force=dict(type='bool', default=True), executable=dict(type='path'), # No default on purpose ), supports_check_mode=True, diff --git a/plugins/modules/monitoring/airbrake_deployment.py b/plugins/modules/monitoring/airbrake_deployment.py index 3e7938bfba..a7d7710a0a 100644 --- a/plugins/modules/monitoring/airbrake_deployment.py +++ b/plugins/modules/monitoring/airbrake_deployment.py @@ -17,18 +17,17 @@ author: short_description: Notify airbrake about app deployments description: - Notify airbrake about app deployments (see U(https://airbrake.io/docs/api/#deploys-v4)). - - Parameter I(token) has been deprecated for community.general 0.2.0. Please remove entry. options: project_id: description: - Airbrake PROJECT_ID - required: false + required: true type: str version_added: '0.2.0' project_key: description: - Airbrake PROJECT_KEY. - required: false + required: true type: str version_added: '0.2.0' environment: @@ -70,11 +69,6 @@ options: required: false default: 'yes' type: bool - token: - description: - - This parameter (API token) has been deprecated in community.general 0.2.0. Please remove it from your tasks. - required: false - type: str requirements: [] ''' @@ -111,9 +105,8 @@ def main(): module = AnsibleModule( argument_spec=dict( - token=dict(required=False, no_log=True, type='str'), - project_id=dict(required=False, no_log=True, type='str'), - project_key=dict(required=False, no_log=True, type='str'), + project_id=dict(required=True, no_log=True, type='str'), + project_key=dict(required=True, no_log=True, type='str'), environment=dict(required=True, type='str'), user=dict(required=False, type='str'), repo=dict(required=False, type='str'), @@ -123,8 +116,6 @@ def main(): validate_certs=dict(default=True, type='bool'), ), supports_check_mode=True, - required_together=[('project_id', 'project_key')], - mutually_exclusive=[('project_id', 'token')], ) # Build list of params @@ -134,65 +125,32 @@ def main(): if module.check_mode: module.exit_json(changed=True) - if module.params["token"]: - # v2 API documented at https://airbrake.io/docs/legacy-xml-api/#tracking-deploys - if module.params["environment"]: - params["deploy[rails_env]"] = module.params["environment"] + # v4 API documented at https://airbrake.io/docs/api/#create-deploy-v4 + if module.params["environment"]: + params["environment"] = module.params["environment"] - if module.params["user"]: - params["deploy[local_username]"] = module.params["user"] + if module.params["user"]: + params["username"] = module.params["user"] - if module.params["repo"]: - params["deploy[scm_repository]"] = module.params["repo"] + if module.params["repo"]: + params["repository"] = module.params["repo"] - if module.params["revision"]: - params["deploy[scm_revision]"] = module.params["revision"] + if module.params["revision"]: + params["revision"] = module.params["revision"] - # version not supported in v2 API; omit + if module.params["version"]: + params["version"] = module.params["version"] - module.deprecate("Parameter 'token' is deprecated since community.general 0.2.0. Please remove " - "it and use 'project_id' and 'project_key' instead", - version='3.0.0', collection_name='community.general') # was Ansible 2.14 + # Build deploy url + url = module.params.get('url') + module.params["project_id"] + '/deploys?key=' + module.params["project_key"] + json_body = module.jsonify(params) - params["api_key"] = module.params["token"] + # Build header + headers = {'Content-Type': 'application/json'} - # Allow sending to Airbrake compliant v2 APIs - if module.params["url"] == 'https://api.airbrake.io/api/v4/projects/': - url = 'https://api.airbrake.io/deploys.txt' - else: - url = module.params["url"] - - # Send the data to airbrake - data = urlencode(params) - response, info = fetch_url(module, url, data=data) - - if module.params["project_id"] and module.params["project_key"]: - # v4 API documented at https://airbrake.io/docs/api/#create-deploy-v4 - if module.params["environment"]: - params["environment"] = module.params["environment"] - - if module.params["user"]: - params["username"] = module.params["user"] - - if module.params["repo"]: - params["repository"] = module.params["repo"] - - if module.params["revision"]: - params["revision"] = module.params["revision"] - - if module.params["version"]: - params["version"] = module.params["version"] - - # Build deploy url - url = module.params.get('url') + module.params["project_id"] + '/deploys?key=' + module.params["project_key"] - json_body = module.jsonify(params) - - # Build header - headers = {'Content-Type': 'application/json'} - - # Notify Airbrake of deploy - response, info = fetch_url(module, url, data=json_body, - headers=headers, method='POST') + # Notify Airbrake of deploy + response, info = fetch_url(module, url, data=json_body, + headers=headers, method='POST') if info['status'] == 200 or info['status'] == 201: module.exit_json(changed=True) diff --git a/plugins/modules/monitoring/bigpanda.py b/plugins/modules/monitoring/bigpanda.py index 8faec5d030..8392c19536 100644 --- a/plugins/modules/monitoring/bigpanda.py +++ b/plugins/modules/monitoring/bigpanda.py @@ -76,8 +76,6 @@ options: type: str description: - Message about the deployment. - - C(message) alias is deprecated in community.general 0.2.0, since it is used internally by Ansible Core Engine. - aliases: ['message'] version_added: '0.2.0' source_system: type: str @@ -148,9 +146,7 @@ def main(): env=dict(required=False), owner=dict(required=False), description=dict(required=False), - deployment_message=dict(required=False, aliases=['message'], - deprecated_aliases=[dict(name='message', version='3.0.0', - collection_name='community.general')]), # was Ansible 2.14 + deployment_message=dict(required=False), source_system=dict(required=False, default='ansible'), validate_certs=dict(default=True, type='bool'), url=dict(required=False, default='https://api.bigpanda.io'), diff --git a/plugins/modules/monitoring/datadog/datadog_monitor.py b/plugins/modules/monitoring/datadog/datadog_monitor.py index a737dd2085..8be71297f4 100644 --- a/plugins/modules/monitoring/datadog/datadog_monitor.py +++ b/plugins/modules/monitoring/datadog/datadog_monitor.py @@ -68,9 +68,7 @@ options: - A message to include with notifications for this monitor. - Email notifications can be sent to specific users by using the same '@username' notation as events. - Monitor message template variables can be accessed by using double square brackets, i.e '[[' and ']]'. - - C(message) alias is deprecated in community.general 0.2.0, since it is used internally by Ansible Core Engine. type: str - aliases: [ 'message' ] silenced: type: dict description: @@ -214,9 +212,7 @@ def main(): 'log alert', 'query alert', 'trace-analytics alert', 'rum alert']), name=dict(required=True), query=dict(), - notification_message=dict(no_log=True, aliases=['message'], - deprecated_aliases=[dict(name='message', version='3.0.0', - collection_name='community.general')]), # was Ansible 2.14 + notification_message=dict(no_log=True), silenced=dict(type='dict'), notify_no_data=dict(default=False, type='bool'), no_data_timeframe=dict(), @@ -239,9 +235,6 @@ def main(): if not HAS_DATADOG: module.fail_json(msg=missing_required_lib('datadogpy'), exception=DATADOG_IMP_ERR) - if 'message' in module.params: - module.fail_json(msg="'message' is reserved keyword, please change this parameter to 'notification_message'") - options = { 'api_key': module.params['api_key'], 'api_host': module.params['api_host'], diff --git a/plugins/modules/notification/cisco_webex.py b/plugins/modules/notification/cisco_webex.py index 4015c1852c..8c1361fb14 100644 --- a/plugins/modules/notification/cisco_webex.py +++ b/plugins/modules/notification/cisco_webex.py @@ -55,7 +55,6 @@ options: - The message you would like to send. required: yes type: str - aliases: ['message'] ''' EXAMPLES = """ @@ -174,9 +173,7 @@ def main(): recipient_id=dict(required=True, no_log=True), msg_type=dict(required=False, default='text', aliases=['message_type'], choices=['text', 'markdown']), personal_token=dict(required=True, no_log=True, aliases=['token']), - msg=dict(required=True, aliases=['message'], - deprecated_aliases=[dict(name='message', version='3.0.0', - collection_name='community.general')]), # was Ansible 2.14 + msg=dict(required=True), ), supports_check_mode=True diff --git a/plugins/modules/packaging/os/pulp_repo.py b/plugins/modules/packaging/os/pulp_repo.py index 8dbc6b9ac4..23d0b02eb4 100644 --- a/plugins/modules/packaging/os/pulp_repo.py +++ b/plugins/modules/packaging/os/pulp_repo.py @@ -46,9 +46,8 @@ options: description: - CA certificate string used to validate the feed source SSL certificate. This can be the file content or the path to the file. - The ca_cert alias will be removed in community.general 3.0.0. type: str - aliases: [ importer_ssl_ca_cert, ca_cert ] + aliases: [ importer_ssl_ca_cert ] feed_client_cert: description: - Certificate used as the client certificate when synchronizing the @@ -57,8 +56,6 @@ options: certificate. The specified file may be the certificate itself or a single file containing both the certificate and private key. This can be the file content or the path to the file. - - If not specified the default value will come from client_cert. Which will - change in community.general 3.0.0. type: str aliases: [ importer_ssl_client_cert ] feed_client_key: @@ -66,8 +63,6 @@ options: - Private key to the certificate specified in I(importer_ssl_client_cert), assuming it is not included in the certificate file itself. This can be the file content or the path to the file. - - If not specified the default value will come from client_key. Which will - change in community.general 3.0.0. type: str aliases: [ importer_ssl_client_key ] name: @@ -541,9 +536,7 @@ def main(): add_export_distributor=dict(default=False, type='bool'), feed=dict(), generate_sqlite=dict(default=False, type='bool'), - feed_ca_cert=dict(aliases=['importer_ssl_ca_cert', 'ca_cert'], - deprecated_aliases=[dict(name='ca_cert', version='3.0.0', - collection_name='community.general')]), # was Ansible 2.14 + feed_ca_cert=dict(aliases=['importer_ssl_ca_cert']), feed_client_cert=dict(aliases=['importer_ssl_client_cert']), feed_client_key=dict(aliases=['importer_ssl_client_key'], no_log=True), name=dict(required=True, aliases=['repo']), @@ -571,19 +564,7 @@ def main(): generate_sqlite = module.params['generate_sqlite'] importer_ssl_ca_cert = module.params['feed_ca_cert'] importer_ssl_client_cert = module.params['feed_client_cert'] - if importer_ssl_client_cert is None and module.params['client_cert'] is not None: - importer_ssl_client_cert = module.params['client_cert'] - module.deprecate("To specify client certificates to be used with the repo to sync, and not for communication with the " - "Pulp instance, use the new options `feed_client_cert` and `feed_client_key` (available since " - "Ansible 2.9.2). Until community.general 3.0.0, the default value for `feed_client_cert` will be " - "taken from `client_cert` if only the latter is specified", - version="3.0.0", collection_name='community.general') # was Ansible 2.14 importer_ssl_client_key = module.params['feed_client_key'] - if importer_ssl_client_key is None and module.params['client_key'] is not None: - importer_ssl_client_key = module.params['client_key'] - module.deprecate("In Ansible 2.9.2 `feed_client_key` option was added. Until community.general 3.0.0 the default " - "value will come from client_key option", - version="3.0.0", collection_name='community.general') # was Ansible 2.14 proxy_host = module.params['proxy_host'] proxy_port = module.params['proxy_port'] proxy_username = module.params['proxy_username'] diff --git a/plugins/modules/packaging/os/xbps.py b/plugins/modules/packaging/os/xbps.py index 69163a4744..8d314ea859 100644 --- a/plugins/modules/packaging/os/xbps.py +++ b/plugins/modules/packaging/os/xbps.py @@ -61,12 +61,6 @@ options: type: bool default: yes version_added: '0.2.0' - force: - description: - - This option doesn't have any effect and is deprecated, it will be - removed in 3.0.0. - type: bool - default: no ''' EXAMPLES = ''' @@ -289,7 +283,6 @@ def main(): 'latest', 'absent', 'removed']), recurse=dict(default=False, type='bool'), - force=dict(default=False, type='bool', removed_in_version='3.0.0', removed_from_collection='community.general'), upgrade=dict(default=False, type='bool'), update_cache=dict( default=True, aliases=['update-cache'], type='bool', diff --git a/plugins/modules/remote_management/redfish/idrac_redfish_config.py b/plugins/modules/remote_management/redfish/idrac_redfish_config.py index b8b8cce10a..a3525fa9c8 100644 --- a/plugins/modules/remote_management/redfish/idrac_redfish_config.py +++ b/plugins/modules/remote_management/redfish/idrac_redfish_config.py @@ -45,16 +45,6 @@ options: description: - Password for authentication with iDRAC type: str - manager_attribute_name: - required: false - description: - - (deprecated) name of iDRAC attribute to update - type: str - manager_attribute_value: - required: false - description: - - (deprecated) value of iDRAC attribute to update - type: str manager_attributes: required: false description: @@ -183,12 +173,6 @@ class IdracRedfishUtils(RedfishUtils): manager_uri = command_manager_attributes_uri_map.get(command, self.manager_uri) attributes = self.module.params['manager_attributes'] - manager_attr_name = self.module.params.get('manager_attribute_name') - manager_attr_value = self.module.params.get('manager_attribute_value') - - # manager attributes to update - if manager_attr_name: - attributes.update({manager_attr_name: manager_attr_value}) attrs_to_patch = {} attrs_skipped = {} @@ -250,8 +234,6 @@ def main(): baseuri=dict(required=True), username=dict(required=True), password=dict(required=True, no_log=True), - manager_attribute_name=dict(default=None), - manager_attribute_value=dict(default=None), manager_attributes=dict(type='dict', default={}), timeout=dict(type='int', default=10), resource_id=dict() @@ -310,13 +292,6 @@ def main(): if command in ["SetManagerAttributes", "SetLifecycleControllerAttributes", "SetSystemAttributes"]: result = rf_utils.set_manager_attributes(command) - if any((module.params['manager_attribute_name'], module.params['manager_attribute_value'])): - module.deprecate(msg='Arguments `manager_attribute_name` and ' - '`manager_attribute_value` are deprecated. ' - 'Use `manager_attributes` instead for passing in ' - 'the manager attribute name and value pairs', - version='3.0.0', collection_name='community.general') # was Ansible 2.13 - # Return data back or fail with proper message if result['ret'] is True: module.exit_json(changed=result['changed'], msg=to_native(result['msg'])) diff --git a/plugins/modules/remote_management/redfish/redfish_config.py b/plugins/modules/remote_management/redfish/redfish_config.py index 1ca30bfa86..ecc170437b 100644 --- a/plugins/modules/remote_management/redfish/redfish_config.py +++ b/plugins/modules/remote_management/redfish/redfish_config.py @@ -43,18 +43,6 @@ options: description: - Password for authentication with OOB controller type: str - bios_attribute_name: - required: false - description: - - name of BIOS attr to update (deprecated - use bios_attributes instead) - default: 'null' - type: str - bios_attribute_value: - required: false - description: - - value of BIOS attr to update (deprecated - use bios_attributes instead) - default: 'null' - type: raw bios_attributes: required: false description: @@ -129,13 +117,13 @@ EXAMPLES = ''' username: "{{ username }}" password: "{{ password }}" - - name: Enable PXE Boot for NIC1 using deprecated options + - name: Enable PXE Boot for NIC1 community.general.redfish_config: category: Systems command: SetBiosAttributes resource_id: 437XR1138R2 - bios_attribute_name: PxeDev1EnDis - bios_attribute_value: Enabled + bios_attributes: + PxeDev1EnDis: Enabled baseuri: "{{ baseuri }}" username: "{{ username }}" password: "{{ password }}" @@ -233,8 +221,6 @@ def main(): baseuri=dict(required=True), username=dict(required=True), password=dict(required=True, no_log=True), - bios_attribute_name=dict(default='null'), - bios_attribute_value=dict(default='null', type='raw'), bios_attributes=dict(type='dict', default={}), timeout=dict(type='int', default=10), boot_order=dict(type='list', elements='str', default=[]), @@ -264,12 +250,6 @@ def main(): # BIOS attributes to update bios_attributes = module.params['bios_attributes'] - if module.params['bios_attribute_name'] != 'null': - bios_attributes[module.params['bios_attribute_name']] = module.params[ - 'bios_attribute_value'] - module.deprecate(msg='The bios_attribute_name/bios_attribute_value ' - 'options are deprecated. Use bios_attributes instead', - version='3.0.0', collection_name='community.general') # was Ansible 2.14 # boot order boot_order = module.params['boot_order'] diff --git a/plugins/modules/system/syspatch.py b/plugins/modules/system/syspatch.py index 2483fb365d..6fcfaea0f5 100644 --- a/plugins/modules/system/syspatch.py +++ b/plugins/modules/system/syspatch.py @@ -17,13 +17,6 @@ description: - "Manage OpenBSD system patches using syspatch." options: - apply: - type: bool - description: - - Apply all available system patches. - - By default, apply all patches. - - Deprecated. Will be removed in community.general 3.0.0. - default: yes revert: description: - Revert system patches. @@ -37,7 +30,6 @@ author: EXAMPLES = ''' - name: Apply all available system patches community.general.syspatch: - apply: true - name: Revert last patch community.general.syspatch: @@ -50,7 +42,6 @@ EXAMPLES = ''' # NOTE: You can reboot automatically if a patch requires it: - name: Apply all patches and store result community.general.syspatch: - apply: true register: syspatch - name: Reboot if patch requires it @@ -86,14 +77,12 @@ from ansible.module_utils.basic import AnsibleModule def run_module(): # define available arguments/parameters a user can pass to the module module_args = dict( - apply=dict(type='bool', default=True, removed_in_version='3.0.0', removed_from_collection='community.general'), revert=dict(type='str', choices=['all', 'one']) ) module = AnsibleModule( argument_spec=module_args, supports_check_mode=True, - required_one_of=[['apply', 'revert']] ) result = syspatch_run(module) @@ -117,7 +106,7 @@ def syspatch_run(module): run_flag = ['-R'] else: run_flag = ['-r'] - elif module.params['apply']: + else: check_flag = ['-c'] run_flag = [] diff --git a/plugins/modules/web_infrastructure/django_manage.py b/plugins/modules/web_infrastructure/django_manage.py index 10161c0426..ba38abd90e 100644 --- a/plugins/modules/web_infrastructure/django_manage.py +++ b/plugins/modules/web_infrastructure/django_manage.py @@ -101,14 +101,6 @@ options: C(collectstatic) command. required: false type: bool - liveserver: - description: - - This parameter was implemented a long time ago in a galaxy far way. It probably relates to the - django-liveserver package, which is no longer updated. - - Hence, it will be considered DEPRECATED and should be removed in a future release. - type: str - required: false - aliases: [live_server] testrunner: description: - "From the Django docs: Controls the test runner class that is used to execute tests." @@ -233,7 +225,7 @@ def main(): flush=('database', ), loaddata=('database', 'fixtures', ), syncdb=('database', ), - test=('failfast', 'testrunner', 'liveserver', 'apps', ), + test=('failfast', 'testrunner', 'apps', ), validate=(), migrate=('apps', 'skip', 'merge', 'database',), collectstatic=('clear', 'link', ), @@ -253,7 +245,7 @@ def main(): ) # These params are allowed for certain commands only - specific_params = ('apps', 'clear', 'database', 'failfast', 'fixtures', 'liveserver', 'testrunner') + specific_params = ('apps', 'clear', 'database', 'failfast', 'fixtures', 'testrunner') # These params are automatically added to the command if present general_params = ('settings', 'pythonpath', 'database',) @@ -274,8 +266,6 @@ def main(): database=dict(default=None, required=False, type='str'), failfast=dict(default=False, required=False, type='bool', aliases=['fail_fast']), fixtures=dict(default=None, required=False, type='str'), - liveserver=dict(default=None, required=False, type='str', aliases=['live_server'], - removed_in_version='3.0.0', removed_from_collection='community.general'), testrunner=dict(default=None, required=False, type='str', aliases=['test_runner']), skip=dict(default=None, required=False, type='bool'), merge=dict(default=None, required=False, type='bool'), diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index ffd03919ac..6cc090eefd 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -107,14 +107,11 @@ plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:undocumented plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-state-invalid-choice -plugins/modules/monitoring/bigpanda.py validate-modules:invalid-argument-name -plugins/modules/monitoring/datadog/datadog_monitor.py validate-modules:invalid-argument-name plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:parameter-type-not-in-doc # This triggers when a parameter is undocumented plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:doc-missing-type plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:parameter-type-not-in-doc plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code -plugins/modules/notification/cisco_webex.py validate-modules:invalid-argument-name plugins/modules/notification/grove.py validate-modules:invalid-argument-name plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 6e386e91d9..b718639b77 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -106,14 +106,11 @@ plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:undocumented plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-state-invalid-choice -plugins/modules/monitoring/bigpanda.py validate-modules:invalid-argument-name -plugins/modules/monitoring/datadog/datadog_monitor.py validate-modules:invalid-argument-name plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:parameter-type-not-in-doc # This triggers when a parameter is undocumented plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:doc-missing-type plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:parameter-type-not-in-doc plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code -plugins/modules/notification/cisco_webex.py validate-modules:invalid-argument-name plugins/modules/notification/grove.py validate-modules:invalid-argument-name plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid From 7452a536479394f4596cdefa06700cba1587d7e2 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Fri, 12 Mar 2021 10:06:57 +1300 Subject: [PATCH 0102/3093] jenkins_job - added validate_certs parameter, setting the PYTHONHTTPSVERIFY env var (#1977) * added validate_certs parameter, setting the PYTHONHTTPSVERIFY env var * added changelog fragment * Update plugins/modules/web_infrastructure/jenkins_job.py Co-authored-by: Felix Fontein * Update plugins/modules/web_infrastructure/jenkins_job.py Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../1977-jenkinsjob-validate-certs.yml | 2 ++ .../modules/web_infrastructure/jenkins_job.py | 30 ++++++++++++------- 2 files changed, 22 insertions(+), 10 deletions(-) create mode 100644 changelogs/fragments/1977-jenkinsjob-validate-certs.yml diff --git a/changelogs/fragments/1977-jenkinsjob-validate-certs.yml b/changelogs/fragments/1977-jenkinsjob-validate-certs.yml new file mode 100644 index 0000000000..b4f7b2f938 --- /dev/null +++ b/changelogs/fragments/1977-jenkinsjob-validate-certs.yml @@ -0,0 +1,2 @@ +minor_changes: + - jenkins_job - add a ``validate_certs`` parameter that allows disabling TLS/SSL certificate validation (https://github.com/ansible-collections/community.general/issues/255). diff --git a/plugins/modules/web_infrastructure/jenkins_job.py b/plugins/modules/web_infrastructure/jenkins_job.py index 0e06b5ee62..6fb775d22a 100644 --- a/plugins/modules/web_infrastructure/jenkins_job.py +++ b/plugins/modules/web_infrastructure/jenkins_job.py @@ -6,7 +6,6 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type - DOCUMENTATION = ''' --- module: jenkins_job @@ -65,6 +64,15 @@ options: description: - User to authenticate with the Jenkins server. required: false + validate_certs: + type: bool + default: yes + description: + - If set to C(no), the SSL certificates will not be validated. + This should only set to C(no) used on personally controlled sites + using self-signed certificates as it avoids verifying the source site. + - The C(python-jenkins) library only handles this by using the environment variable C(PYTHONHTTPSVERIFY). + version_added: 2.3.0 ''' EXAMPLES = ''' @@ -146,6 +154,7 @@ url: sample: https://jenkins.mydomain.com ''' +import os import traceback import xml.etree.ElementTree as ET @@ -161,7 +170,7 @@ from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils._text import to_native -class JenkinsJob: +class JenkinsJob(object): def __init__(self, module): self.module = module @@ -189,14 +198,16 @@ class JenkinsJob: } self.EXCL_STATE = "excluded state" + if not module.params['validate_certs']: + os.environ['PYTHONHTTPSVERIFY'] = '0' def get_jenkins_connection(self): try: - if (self.user and self.password): + if self.user and self.password: return jenkins.Jenkins(self.jenkins_url, self.user, self.password) - elif (self.user and self.token): + elif self.user and self.token: return jenkins.Jenkins(self.jenkins_url, self.user, self.token) - elif (self.user and not (self.password or self.token)): + elif self.user and not (self.password or self.token): return jenkins.Jenkins(self.jenkins_url, self.user) else: return jenkins.Jenkins(self.jenkins_url) @@ -256,9 +267,7 @@ class JenkinsJob: if self.enabled is None: return False - if ((self.enabled is False and status != "disabled") or (self.enabled is True and status == "disabled")): - return True - return False + return (self.enabled is False and status != "disabled") or (self.enabled is True and status == "disabled") def switch_state(self): if self.enabled is False: @@ -277,7 +286,7 @@ class JenkinsJob: self.server.reconfig_job(self.name, self.get_config()) # Handle job disable/enable - elif (status != self.EXCL_STATE and self.has_state_changed(status)): + elif status != self.EXCL_STATE and self.has_state_changed(status): self.result['changed'] = True if not self.module.check_mode: self.switch_state() @@ -342,7 +351,8 @@ def main(): enabled=dict(required=False, type='bool'), token=dict(type='str', required=False, no_log=True), url=dict(type='str', required=False, default="http://localhost:8080"), - user=dict(type='str', required=False) + user=dict(type='str', required=False), + validate_certs=dict(type='bool', default=True), ), mutually_exclusive=[ ['password', 'token'], From d0bb74a03b79b968a2ba6c3867e3f071dcbfbcc4 Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Thu, 11 Mar 2021 16:08:11 -0500 Subject: [PATCH 0103/3093] Allow tags strings containing commas in proxmox inventory plug-in (#1949) * Included explicit parsing for proxmox guest tags and updated corresponding unit test with tags key * Including changelog fragment for PR 1949 * Removed ellipsis from test Proxmox only permits periods when surrounded by alphanumeric characters * Corrected punctuation for changelog entry Co-authored-by: Felix Fontein * Allowing tags string to contain commas * Incorporated new parsed tags fact with bugfix * Correcting whitespace issues * Update changelogs/fragments/1949-proxmox-inventory-tags.yml Co-authored-by: Felix Fontein * Update plugins/inventory/proxmox.py Co-authored-by: Felix Fontein * Update changelogs/fragments/1949-proxmox-inventory-tags.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- changelogs/fragments/1949-proxmox-inventory-tags.yml | 5 +++++ plugins/inventory/proxmox.py | 10 ++++++++++ tests/unit/plugins/inventory/test_proxmox.py | 6 ++++-- 3 files changed, 19 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/1949-proxmox-inventory-tags.yml diff --git a/changelogs/fragments/1949-proxmox-inventory-tags.yml b/changelogs/fragments/1949-proxmox-inventory-tags.yml new file mode 100644 index 0000000000..073428c2e6 --- /dev/null +++ b/changelogs/fragments/1949-proxmox-inventory-tags.yml @@ -0,0 +1,5 @@ +--- +bugfixes: +- proxmox inventory plugin - allowed proxomox tag string to contain commas when returned as fact (https://github.com/ansible-collections/community.general/pull/1949). +minor_changes: +- proxmox inventory plugin - added ``tags_parsed`` fact containing tags parsed as a list (https://github.com/ansible-collections/community.general/pull/1949). diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py index d21688c4d4..bc79d596cb 100644 --- a/plugins/inventory/proxmox.py +++ b/plugins/inventory/proxmox.py @@ -217,6 +217,10 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): vmtype_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), vmtype_key.lower())) self.inventory.set_variable(name, vmtype_key, vmtype) + plaintext_configs = [ + 'tags', + ] + for config in ret: key = config key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), key.lower())) @@ -226,6 +230,12 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): if config == 'rootfs' or config.startswith(('virtio', 'sata', 'ide', 'scsi')): value = ('disk_image=' + value) + # Additional field containing parsed tags as list + if config == 'tags': + parsed_key = self.to_safe('%s%s' % (key, "_parsed")) + parsed_value = [tag.strip() for tag in value.split(",")] + self.inventory.set_variable(name, parsed_key, parsed_value) + if not (isinstance(value, int) or ',' not in value): # split off strings with commas to a dict # skip over any keys that cannot be processed diff --git a/tests/unit/plugins/inventory/test_proxmox.py b/tests/unit/plugins/inventory/test_proxmox.py index 14332e750e..036c8e5938 100644 --- a/tests/unit/plugins/inventory/test_proxmox.py +++ b/tests/unit/plugins/inventory/test_proxmox.py @@ -71,7 +71,8 @@ def get_json(url): "status": "running", "vmid": "100", "disk": "1000", - "uptime": 1000}] + "uptime": 1000, + "tags": "test, tags, here"}] elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu": # _get_qemu_per_node return [{"name": "test-qemu", @@ -105,7 +106,8 @@ def get_json(url): "vmid": "9001", "uptime": 0, "disk": 0, - "status": "stopped"}] + "status": "stopped", + "tags": "test, tags, here"}] elif url == "https://localhost:8006/api2/json/pools/test": # _get_members_per_pool return {"members": [{"uptime": 1000, From 178209be27ee78b82261d488db55125acd8ec140 Mon Sep 17 00:00:00 2001 From: Jeffrey van Pelt Date: Fri, 12 Mar 2021 08:00:03 +0100 Subject: [PATCH 0104/3093] Excluded qemu templates in pools (#1991) * Excluded qemu templates in pools * Added changelog fragment * Made check more robust --- .../fragments/1991-proxmox-inventory-fix-template-in-pool.yml | 3 +++ plugins/inventory/proxmox.py | 3 ++- 2 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/1991-proxmox-inventory-fix-template-in-pool.yml diff --git a/changelogs/fragments/1991-proxmox-inventory-fix-template-in-pool.yml b/changelogs/fragments/1991-proxmox-inventory-fix-template-in-pool.yml new file mode 100644 index 0000000000..90a438dddf --- /dev/null +++ b/changelogs/fragments/1991-proxmox-inventory-fix-template-in-pool.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - proxmox inventory - exclude qemu templates from inclusion to the inventory via pools (https://github.com/ansible-collections/community.general/issues/1986, https://github.com/ansible-collections/community.general/pull/1991). diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py index bc79d596cb..d69775baf6 100644 --- a/plugins/inventory/proxmox.py +++ b/plugins/inventory/proxmox.py @@ -349,7 +349,8 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): for member in self._get_members_per_pool(pool['poolid']): if member.get('name'): - self.inventory.add_child(pool_group, member['name']) + if not member.get('template'): + self.inventory.add_child(pool_group, member['name']) def parse(self, inventory, loader, path, cache=True): if not HAS_REQUESTS: From 1ea080762b3abf5783a4c4eb1f3c3c19fef67569 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Fri, 12 Mar 2021 08:51:47 +0100 Subject: [PATCH 0105/3093] Mark non-secret leaking module options with no_log=False (#2001) * Mark non-secret leaking module options with no_log=False. * Add changelog fragment. --- changelogs/fragments/2001-no_log-false.yml | 2 ++ plugins/module_utils/oracle/oci_utils.py | 2 +- plugins/modules/cloud/pubnub/pubnub_blocks.py | 2 +- plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py | 2 +- plugins/modules/cloud/xenserver/xenserver_guest.py | 2 +- plugins/modules/clustering/consul/consul_acl.py | 2 +- plugins/modules/clustering/consul/consul_kv.py | 2 +- plugins/modules/clustering/etcd3.py | 2 +- plugins/modules/files/read_csv.py | 2 +- plugins/modules/files/xattr.py | 2 +- plugins/modules/net_tools/cloudflare_dns.py | 2 +- .../modules/source_control/bitbucket/bitbucket_access_key.py | 2 +- .../source_control/bitbucket/bitbucket_pipeline_known_host.py | 2 +- plugins/modules/source_control/github/github_deploy_key.py | 2 +- plugins/modules/source_control/gitlab/gitlab_deploy_key.py | 2 +- plugins/modules/system/dconf.py | 2 +- plugins/modules/system/gconftool2.py | 2 +- plugins/modules/system/osx_defaults.py | 2 +- 18 files changed, 19 insertions(+), 17 deletions(-) create mode 100644 changelogs/fragments/2001-no_log-false.yml diff --git a/changelogs/fragments/2001-no_log-false.yml b/changelogs/fragments/2001-no_log-false.yml new file mode 100644 index 0000000000..82d9ba0bb0 --- /dev/null +++ b/changelogs/fragments/2001-no_log-false.yml @@ -0,0 +1,2 @@ +bugfixes: +- "Mark various module options with ``no_log=False`` which have a name that potentially could leak secrets, but which do not (https://github.com/ansible-collections/community.general/pull/2001)." diff --git a/plugins/module_utils/oracle/oci_utils.py b/plugins/module_utils/oracle/oci_utils.py index 7252f68110..610366d9ba 100644 --- a/plugins/module_utils/oracle/oci_utils.py +++ b/plugins/module_utils/oracle/oci_utils.py @@ -104,7 +104,7 @@ def get_common_arg_spec(supports_create=False, supports_wait=False): if supports_create: common_args.update( - key_by=dict(type="list", elements="str"), + key_by=dict(type="list", elements="str", no_log=False), force_create=dict(type="bool", default=False), ) diff --git a/plugins/modules/cloud/pubnub/pubnub_blocks.py b/plugins/modules/cloud/pubnub/pubnub_blocks.py index 640f6d925e..1dbe416b9c 100644 --- a/plugins/modules/cloud/pubnub/pubnub_blocks.py +++ b/plugins/modules/cloud/pubnub/pubnub_blocks.py @@ -549,7 +549,7 @@ def main(): password=dict(default='', required=False, type='str', no_log=True), account=dict(default='', required=False, type='str'), application=dict(required=True, type='str'), - keyset=dict(required=True, type='str'), + keyset=dict(required=True, type='str', no_log=False), state=dict(default='present', type='str', choices=['started', 'stopped', 'present', 'absent']), name=dict(required=True, type='str'), description=dict(type='str'), diff --git a/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py b/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py index a1dcd94efb..1a0ddb9fef 100644 --- a/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py +++ b/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py @@ -1448,7 +1448,7 @@ def main(): iam_role_arn=dict(type='str'), iam_role_name=dict(type='str'), image_id=dict(type='str', required=True), - key_pair=dict(type='str'), + key_pair=dict(type='str', no_log=False), kubernetes=dict(type='dict'), lifetime_period=dict(type='int'), load_balancers=dict(type='list'), diff --git a/plugins/modules/cloud/xenserver/xenserver_guest.py b/plugins/modules/cloud/xenserver/xenserver_guest.py index a9a5fb4c37..2316168e9d 100644 --- a/plugins/modules/cloud/xenserver/xenserver_guest.py +++ b/plugins/modules/cloud/xenserver/xenserver_guest.py @@ -1839,7 +1839,7 @@ def main(): type='list', elements='dict', options=dict( - key=dict(type='str', required=True), + key=dict(type='str', required=True, no_log=False), value=dict(type='raw', required=True), ), ), diff --git a/plugins/modules/clustering/consul/consul_acl.py b/plugins/modules/clustering/consul/consul_acl.py index c8d08f8e26..cb5395ed31 100644 --- a/plugins/modules/clustering/consul/consul_acl.py +++ b/plugins/modules/clustering/consul/consul_acl.py @@ -229,7 +229,7 @@ _ARGUMENT_SPEC = { PORT_PARAMETER_NAME: dict(default=8500, type='int'), RULES_PARAMETER_NAME: dict(type='list', elements='dict'), STATE_PARAMETER_NAME: dict(default=PRESENT_STATE_VALUE, choices=[PRESENT_STATE_VALUE, ABSENT_STATE_VALUE]), - TOKEN_PARAMETER_NAME: dict(), + TOKEN_PARAMETER_NAME: dict(no_log=False), TOKEN_TYPE_PARAMETER_NAME: dict(choices=[CLIENT_TOKEN_TYPE_VALUE, MANAGEMENT_TOKEN_TYPE_VALUE], default=CLIENT_TOKEN_TYPE_VALUE) } diff --git a/plugins/modules/clustering/consul/consul_kv.py b/plugins/modules/clustering/consul/consul_kv.py index bafa7fd6d9..01e9be2d05 100644 --- a/plugins/modules/clustering/consul/consul_kv.py +++ b/plugins/modules/clustering/consul/consul_kv.py @@ -297,7 +297,7 @@ def main(): argument_spec=dict( cas=dict(type='str'), flags=dict(type='str'), - key=dict(type='str', required=True), + key=dict(type='str', required=True, no_log=False), host=dict(type='str', default='localhost'), scheme=dict(type='str', default='http'), validate_certs=dict(type='bool', default=True), diff --git a/plugins/modules/clustering/etcd3.py b/plugins/modules/clustering/etcd3.py index df7319ecfe..0f87e32d13 100644 --- a/plugins/modules/clustering/etcd3.py +++ b/plugins/modules/clustering/etcd3.py @@ -134,7 +134,7 @@ def run_module(): # define the available arguments/parameters that a user can pass to # the module module_args = dict( - key=dict(type='str', required=True), + key=dict(type='str', required=True, no_log=False), value=dict(type='str', required=True), host=dict(type='str', default='localhost'), port=dict(type='int', default=2379), diff --git a/plugins/modules/files/read_csv.py b/plugins/modules/files/read_csv.py index 7100d3782d..24a77c0e28 100644 --- a/plugins/modules/files/read_csv.py +++ b/plugins/modules/files/read_csv.py @@ -164,7 +164,7 @@ def main(): argument_spec=dict( path=dict(type='path', required=True, aliases=['filename']), dialect=dict(type='str', default='excel'), - key=dict(type='str'), + key=dict(type='str', no_log=False), fieldnames=dict(type='list', elements='str'), unique=dict(type='bool', default=True), delimiter=dict(type='str'), diff --git a/plugins/modules/files/xattr.py b/plugins/modules/files/xattr.py index 8b1449be07..0d5f9f46f3 100644 --- a/plugins/modules/files/xattr.py +++ b/plugins/modules/files/xattr.py @@ -172,7 +172,7 @@ def main(): argument_spec=dict( path=dict(type='path', required=True, aliases=['name']), namespace=dict(type='str', default='user'), - key=dict(type='str'), + key=dict(type='str', no_log=False), value=dict(type='str'), state=dict(type='str', default='read', choices=['absent', 'all', 'keys', 'present', 'read']), follow=dict(type='bool', default=True), diff --git a/plugins/modules/net_tools/cloudflare_dns.py b/plugins/modules/net_tools/cloudflare_dns.py index c91df99556..ffa4e55745 100644 --- a/plugins/modules/net_tools/cloudflare_dns.py +++ b/plugins/modules/net_tools/cloudflare_dns.py @@ -800,7 +800,7 @@ def main(): algorithm=dict(type='int'), cert_usage=dict(type='int', choices=[0, 1, 2, 3]), hash_type=dict(type='int', choices=[1, 2]), - key_tag=dict(type='int'), + key_tag=dict(type='int', no_log=False), port=dict(type='int'), priority=dict(type='int', default=1), proto=dict(type='str'), diff --git a/plugins/modules/source_control/bitbucket/bitbucket_access_key.py b/plugins/modules/source_control/bitbucket/bitbucket_access_key.py index 80c1c49315..6e16b267ea 100644 --- a/plugins/modules/source_control/bitbucket/bitbucket_access_key.py +++ b/plugins/modules/source_control/bitbucket/bitbucket_access_key.py @@ -224,7 +224,7 @@ def main(): argument_spec.update( repository=dict(type='str', required=True), username=dict(type='str', required=True), - key=dict(type='str'), + key=dict(type='str', no_log=False), label=dict(type='str', required=True), state=dict(type='str', choices=['present', 'absent'], required=True), ) diff --git a/plugins/modules/source_control/bitbucket/bitbucket_pipeline_known_host.py b/plugins/modules/source_control/bitbucket/bitbucket_pipeline_known_host.py index dba9f9aab6..356b09c035 100644 --- a/plugins/modules/source_control/bitbucket/bitbucket_pipeline_known_host.py +++ b/plugins/modules/source_control/bitbucket/bitbucket_pipeline_known_host.py @@ -263,7 +263,7 @@ def main(): repository=dict(type='str', required=True), username=dict(type='str', required=True), name=dict(type='str', required=True), - key=dict(type='str'), + key=dict(type='str', no_log=False), state=dict(type='str', choices=['present', 'absent'], required=True), ) module = AnsibleModule( diff --git a/plugins/modules/source_control/github/github_deploy_key.py b/plugins/modules/source_control/github/github_deploy_key.py index 4d55cb0db3..7a67a12334 100644 --- a/plugins/modules/source_control/github/github_deploy_key.py +++ b/plugins/modules/source_control/github/github_deploy_key.py @@ -292,7 +292,7 @@ def main(): owner=dict(required=True, type='str', aliases=['account', 'organization']), repo=dict(required=True, type='str', aliases=['repository']), name=dict(required=True, type='str', aliases=['title', 'label']), - key=dict(required=True, type='str'), + key=dict(required=True, type='str', no_log=False), read_only=dict(required=False, type='bool', default=True), state=dict(default='present', choices=['present', 'absent']), force=dict(required=False, type='bool', default=False), diff --git a/plugins/modules/source_control/gitlab/gitlab_deploy_key.py b/plugins/modules/source_control/gitlab/gitlab_deploy_key.py index 20caf4292b..a75aef4e48 100644 --- a/plugins/modules/source_control/gitlab/gitlab_deploy_key.py +++ b/plugins/modules/source_control/gitlab/gitlab_deploy_key.py @@ -241,7 +241,7 @@ def main(): api_token=dict(type='str', no_log=True), state=dict(type='str', default="present", choices=["absent", "present"]), project=dict(type='str', required=True), - key=dict(type='str', required=True), + key=dict(type='str', required=True, no_log=False), can_push=dict(type='bool', default=False), title=dict(type='str', required=True) )) diff --git a/plugins/modules/system/dconf.py b/plugins/modules/system/dconf.py index 50f4369f4f..f7776cde6e 100644 --- a/plugins/modules/system/dconf.py +++ b/plugins/modules/system/dconf.py @@ -352,7 +352,7 @@ def main(): module = AnsibleModule( argument_spec=dict( state=dict(default='present', choices=['present', 'absent', 'read']), - key=dict(required=True, type='str'), + key=dict(required=True, type='str', no_log=False), value=dict(required=False, default=None, type='str'), ), supports_check_mode=True diff --git a/plugins/modules/system/gconftool2.py b/plugins/modules/system/gconftool2.py index a4acad5580..6b9ce71213 100644 --- a/plugins/modules/system/gconftool2.py +++ b/plugins/modules/system/gconftool2.py @@ -151,7 +151,7 @@ def main(): # Setup the Ansible module module = AnsibleModule( argument_spec=dict( - key=dict(type='str', required=True), + key=dict(type='str', required=True, no_log=False), value_type=dict(type='str', choices=['bool', 'float', 'int', 'string']), value=dict(type='str'), state=dict(type='str', required=True, choices=['absent', 'get', 'present']), diff --git a/plugins/modules/system/osx_defaults.py b/plugins/modules/system/osx_defaults.py index a036290879..45179dc7d2 100644 --- a/plugins/modules/system/osx_defaults.py +++ b/plugins/modules/system/osx_defaults.py @@ -369,7 +369,7 @@ def main(): argument_spec=dict( domain=dict(type='str', default='NSGlobalDomain'), host=dict(type='str'), - key=dict(type='str'), + key=dict(type='str', no_log=False), type=dict(type='str', default='string', choices=['array', 'bool', 'boolean', 'date', 'float', 'int', 'integer', 'string']), array_add=dict(type='bool', default=False), value=dict(type='raw'), From 4676ca584b0552d9695bacaa445a5fdd7194f2d8 Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Fri, 12 Mar 2021 13:17:41 -0500 Subject: [PATCH 0106/3093] Remove password requirement when creating lxc containers (#1999) * Removed requirement for password * Updated documentation for password * Adding changelog fragment * Update changelogs/fragments/1999-proxmox-fix-issue-1955.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- changelogs/fragments/1999-proxmox-fix-issue-1955.yml | 3 +++ plugins/modules/cloud/misc/proxmox.py | 3 +-- 2 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/1999-proxmox-fix-issue-1955.yml diff --git a/changelogs/fragments/1999-proxmox-fix-issue-1955.yml b/changelogs/fragments/1999-proxmox-fix-issue-1955.yml new file mode 100644 index 0000000000..274e70fb0f --- /dev/null +++ b/changelogs/fragments/1999-proxmox-fix-issue-1955.yml @@ -0,0 +1,3 @@ +--- +bugfixes: +- proxmox - removed requirement that root password is provided when containter state is ``present`` (https://github.com/ansible-collections/community.general/pull/1999). diff --git a/plugins/modules/cloud/misc/proxmox.py b/plugins/modules/cloud/misc/proxmox.py index 4f495da34e..b5040bc659 100644 --- a/plugins/modules/cloud/misc/proxmox.py +++ b/plugins/modules/cloud/misc/proxmox.py @@ -17,7 +17,6 @@ options: password: description: - the instance root password - - required only for C(state=present) type: str hostname: description: @@ -514,7 +513,7 @@ def main(): hookscript=dict(type='str'), proxmox_default_behavior=dict(type='str', choices=['compatibility', 'no_defaults']), ), - required_if=[('state', 'present', ['node', 'hostname', 'password', 'ostemplate'])], + required_if=[('state', 'present', ['node', 'hostname', 'ostemplate'])], required_together=[('api_token_id', 'api_token_secret')], required_one_of=[('api_password', 'api_token_id')], ) From 49d9a257efb87a47b74c55175910df32f7884d5a Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 13 Mar 2021 13:24:26 +0100 Subject: [PATCH 0107/3093] More false-positives (not flagged by sanity tests yet). (#2010) --- plugins/module_utils/identity/keycloak/keycloak.py | 2 +- plugins/modules/cloud/linode/linode_v4.py | 2 +- plugins/modules/cloud/misc/proxmox_kvm.py | 2 +- plugins/modules/cloud/oneandone/oneandone_server.py | 2 +- plugins/modules/cloud/profitbricks/profitbricks.py | 2 +- plugins/modules/cloud/profitbricks/profitbricks_volume.py | 2 +- plugins/modules/cloud/softlayer/sl_vm.py | 2 +- plugins/modules/database/aerospike/aerospike_migrations.py | 4 ++-- plugins/modules/monitoring/datadog/datadog_event.py | 2 +- plugins/modules/monitoring/pagerduty_alert.py | 2 +- .../modules/remote_management/manageiq/manageiq_provider.py | 2 +- plugins/modules/source_control/gitlab/gitlab_user.py | 2 +- 12 files changed, 13 insertions(+), 13 deletions(-) diff --git a/plugins/module_utils/identity/keycloak/keycloak.py b/plugins/module_utils/identity/keycloak/keycloak.py index 5c57e755da..58a39645e4 100644 --- a/plugins/module_utils/identity/keycloak/keycloak.py +++ b/plugins/module_utils/identity/keycloak/keycloak.py @@ -55,7 +55,7 @@ def keycloak_argument_spec(): :return: argument_spec dict """ return dict( - auth_keycloak_url=dict(type='str', aliases=['url'], required=True), + auth_keycloak_url=dict(type='str', aliases=['url'], required=True, no_log=False), auth_client_id=dict(type='str', default='admin-cli'), auth_realm=dict(type='str', required=True), auth_client_secret=dict(type='str', default=None, no_log=True), diff --git a/plugins/modules/cloud/linode/linode_v4.py b/plugins/modules/cloud/linode/linode_v4.py index 809621cfe1..aec7704c25 100644 --- a/plugins/modules/cloud/linode/linode_v4.py +++ b/plugins/modules/cloud/linode/linode_v4.py @@ -242,7 +242,7 @@ def initialise_module(): no_log=True, fallback=(env_fallback, ['LINODE_ACCESS_TOKEN']), ), - authorized_keys=dict(type='list', elements='str', required=False), + authorized_keys=dict(type='list', elements='str', required=False, no_log=False), group=dict(type='str', required=False), image=dict(type='str', required=False), region=dict(type='str', required=False), diff --git a/plugins/modules/cloud/misc/proxmox_kvm.py b/plugins/modules/cloud/misc/proxmox_kvm.py index 7fb997abc7..54de76944c 100644 --- a/plugins/modules/cloud/misc/proxmox_kvm.py +++ b/plugins/modules/cloud/misc/proxmox_kvm.py @@ -1057,7 +1057,7 @@ def main(): smbios=dict(type='str'), snapname=dict(type='str'), sockets=dict(type='int'), - sshkeys=dict(type='str'), + sshkeys=dict(type='str', no_log=False), startdate=dict(type='str'), startup=dict(), state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted', 'current']), diff --git a/plugins/modules/cloud/oneandone/oneandone_server.py b/plugins/modules/cloud/oneandone/oneandone_server.py index a91fc55ac9..9eaf943be7 100644 --- a/plugins/modules/cloud/oneandone/oneandone_server.py +++ b/plugins/modules/cloud/oneandone/oneandone_server.py @@ -630,7 +630,7 @@ def main(): ram=dict(type='float'), hdds=dict(type='list', elements='dict'), count=dict(type='int', default=1), - ssh_key=dict(type='raw'), + ssh_key=dict(type='raw', no_log=False), auto_increment=dict(type='bool', default=True), server=dict(type='str'), datacenter=dict( diff --git a/plugins/modules/cloud/profitbricks/profitbricks.py b/plugins/modules/cloud/profitbricks/profitbricks.py index 507bea5339..c64151d68e 100644 --- a/plugins/modules/cloud/profitbricks/profitbricks.py +++ b/plugins/modules/cloud/profitbricks/profitbricks.py @@ -583,7 +583,7 @@ def main(): volume_size=dict(type='int', default=10), disk_type=dict(choices=['HDD', 'SSD'], default='HDD'), image_password=dict(default=None, no_log=True), - ssh_keys=dict(type='list', elements='str', default=[]), + ssh_keys=dict(type='list', elements='str', default=[], no_log=False), bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'), lan=dict(type='int', default=1), count=dict(type='int', default=1), diff --git a/plugins/modules/cloud/profitbricks/profitbricks_volume.py b/plugins/modules/cloud/profitbricks/profitbricks_volume.py index 0719c025fa..0e9523c664 100644 --- a/plugins/modules/cloud/profitbricks/profitbricks_volume.py +++ b/plugins/modules/cloud/profitbricks/profitbricks_volume.py @@ -376,7 +376,7 @@ def main(): bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'), image=dict(), image_password=dict(no_log=True), - ssh_keys=dict(type='list', elements='str', default=[]), + ssh_keys=dict(type='list', elements='str', default=[], no_log=False), disk_type=dict(choices=['HDD', 'SSD'], default='HDD'), licence_type=dict(default='UNKNOWN'), count=dict(type='int', default=1), diff --git a/plugins/modules/cloud/softlayer/sl_vm.py b/plugins/modules/cloud/softlayer/sl_vm.py index 72d520ddeb..c8db13d815 100644 --- a/plugins/modules/cloud/softlayer/sl_vm.py +++ b/plugins/modules/cloud/softlayer/sl_vm.py @@ -404,7 +404,7 @@ def main(): nic_speed=dict(type='int', choices=NIC_SPEEDS), public_vlan=dict(type='str'), private_vlan=dict(type='str'), - ssh_keys=dict(type='list', elements='str', default=[]), + ssh_keys=dict(type='list', elements='str', default=[], no_log=False), post_uri=dict(type='str'), state=dict(type='str', default='present', choices=STATES), wait=dict(type='bool', default=True), diff --git a/plugins/modules/database/aerospike/aerospike_migrations.py b/plugins/modules/database/aerospike/aerospike_migrations.py index 95eda4775c..33f27cd381 100644 --- a/plugins/modules/database/aerospike/aerospike_migrations.py +++ b/plugins/modules/database/aerospike/aerospike_migrations.py @@ -190,9 +190,9 @@ def run_module(): min_cluster_size=dict(type='int', required=False, default=1), target_cluster_size=dict(type='int', required=False, default=None), fail_on_cluster_change=dict(type='bool', required=False, default=True), - migrate_tx_key=dict(type='str', required=False, + migrate_tx_key=dict(type='str', required=False, no_log=False, default="migrate_tx_partitions_remaining"), - migrate_rx_key=dict(type='str', required=False, + migrate_rx_key=dict(type='str', required=False, no_log=False, default="migrate_rx_partitions_remaining") ) diff --git a/plugins/modules/monitoring/datadog/datadog_event.py b/plugins/modules/monitoring/datadog/datadog_event.py index fd15eaf46c..c3a3920aee 100644 --- a/plugins/modules/monitoring/datadog/datadog_event.py +++ b/plugins/modules/monitoring/datadog/datadog_event.py @@ -120,7 +120,7 @@ def main(): host=dict(), tags=dict(type='list', elements='str'), alert_type=dict(default='info', choices=['error', 'warning', 'info', 'success']), - aggregation_key=dict(), + aggregation_key=dict(no_log=False), validate_certs=dict(default=True, type='bool'), ) ) diff --git a/plugins/modules/monitoring/pagerduty_alert.py b/plugins/modules/monitoring/pagerduty_alert.py index 736ada5e4a..58a1f260fb 100644 --- a/plugins/modules/monitoring/pagerduty_alert.py +++ b/plugins/modules/monitoring/pagerduty_alert.py @@ -205,7 +205,7 @@ def main(): client=dict(required=False, default=None), client_url=dict(required=False, default=None), desc=dict(required=False, default='Created via Ansible'), - incident_key=dict(required=False, default=None) + incident_key=dict(required=False, default=None, no_log=False) ), supports_check_mode=True ) diff --git a/plugins/modules/remote_management/manageiq/manageiq_provider.py b/plugins/modules/remote_management/manageiq/manageiq_provider.py index 8a3d96c745..f17cbec910 100644 --- a/plugins/modules/remote_management/manageiq/manageiq_provider.py +++ b/plugins/modules/remote_management/manageiq/manageiq_provider.py @@ -569,7 +569,7 @@ def endpoint_list_spec(): provider=dict(type='dict', options=endpoint_argument_spec()), metrics=dict(type='dict', options=endpoint_argument_spec()), alerts=dict(type='dict', options=endpoint_argument_spec()), - ssh_keypair=dict(type='dict', options=endpoint_argument_spec()), + ssh_keypair=dict(type='dict', options=endpoint_argument_spec(), no_log=False), ) diff --git a/plugins/modules/source_control/gitlab/gitlab_user.py b/plugins/modules/source_control/gitlab/gitlab_user.py index 1e8ee65a67..9fefe1aff9 100644 --- a/plugins/modules/source_control/gitlab/gitlab_user.py +++ b/plugins/modules/source_control/gitlab/gitlab_user.py @@ -470,7 +470,7 @@ def main(): password=dict(type='str', no_log=True), email=dict(type='str'), sshkey_name=dict(type='str'), - sshkey_file=dict(type='str'), + sshkey_file=dict(type='str', no_log=False), group=dict(type='str'), access_level=dict(type='str', default="guest", choices=["developer", "guest", "maintainer", "master", "owner", "reporter"]), confirm=dict(type='bool', default=True), From f8859af3774627ca304239da47725406e0e326e2 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 14 Mar 2021 22:53:49 +1300 Subject: [PATCH 0108/3093] Tidy up sanity checks ignore lines modules (batch 8) (#2006) * fixed validation-modules for plugins/modules/cloud/smartos/smartos_image_info.py * fixed validation-modules for plugins/modules/cloud/rackspace/rax_scaling_group.py * fixed validation-modules for plugins/modules/cloud/rackspace/rax_cdb_user.py * fixed validation-modules for plugins/modules/cloud/rackspace/rax.py * Tidy up sanity checks ignore lines modules (batch 8) * added changelog fragment * rolled back removal of parameter from rax.py --- changelogs/fragments/2006-valmod-batch8.yml | 4 ++++ plugins/modules/cloud/rackspace/rax.py | 6 ++++-- plugins/modules/cloud/rackspace/rax_cdb_user.py | 3 ++- plugins/modules/cloud/rackspace/rax_scaling_group.py | 6 ++++-- plugins/modules/cloud/smartos/smartos_image_info.py | 1 + tests/sanity/ignore-2.10.txt | 6 +----- tests/sanity/ignore-2.11.txt | 6 +----- tests/sanity/ignore-2.9.txt | 3 +-- 8 files changed, 18 insertions(+), 17 deletions(-) create mode 100644 changelogs/fragments/2006-valmod-batch8.yml diff --git a/changelogs/fragments/2006-valmod-batch8.yml b/changelogs/fragments/2006-valmod-batch8.yml new file mode 100644 index 0000000000..30be5e16b2 --- /dev/null +++ b/changelogs/fragments/2006-valmod-batch8.yml @@ -0,0 +1,4 @@ +minor_changes: + - rax - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/2006). + - rax_cdb_user - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/2006). + - rax_scaling_group - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/2006). diff --git a/plugins/modules/cloud/rackspace/rax.py b/plugins/modules/cloud/rackspace/rax.py index 9f7df5c45b..d2d9119b02 100644 --- a/plugins/modules/cloud/rackspace/rax.py +++ b/plugins/modules/cloud/rackspace/rax.py @@ -110,6 +110,7 @@ options: with this image instance_ids: type: list + elements: str description: - list of instance ids, currently only used when state='absent' to remove instances @@ -129,6 +130,7 @@ options: - Name to give the instance networks: type: list + elements: str description: - The network to attach to the instances. If specified, you must include ALL networks including the public and private interfaces. Can be C(id) @@ -810,11 +812,11 @@ def main(): flavor=dict(), group=dict(), image=dict(), - instance_ids=dict(type='list'), + instance_ids=dict(type='list', elements='str'), key_name=dict(aliases=['keypair']), meta=dict(type='dict', default={}), name=dict(), - networks=dict(type='list', default=['public', 'private']), + networks=dict(type='list', elements='str', default=['public', 'private']), service=dict(), state=dict(default='present', choices=['present', 'absent']), user_data=dict(no_log=True), diff --git a/plugins/modules/cloud/rackspace/rax_cdb_user.py b/plugins/modules/cloud/rackspace/rax_cdb_user.py index 34be49d862..2034170f42 100644 --- a/plugins/modules/cloud/rackspace/rax_cdb_user.py +++ b/plugins/modules/cloud/rackspace/rax_cdb_user.py @@ -30,6 +30,7 @@ options: required: yes databases: type: list + elements: str description: - Name of the databases that the user can access default: [] @@ -189,7 +190,7 @@ def main(): cdb_id=dict(type='str', required=True), db_username=dict(type='str', required=True), db_password=dict(type='str', required=True, no_log=True), - databases=dict(type='list', default=[]), + databases=dict(type='list', elements='str', default=[]), host=dict(type='str', default='%'), state=dict(default='present', choices=['present', 'absent']) ) diff --git a/plugins/modules/cloud/rackspace/rax_scaling_group.py b/plugins/modules/cloud/rackspace/rax_scaling_group.py index 7b2b6ace79..2f8fa0a2cc 100644 --- a/plugins/modules/cloud/rackspace/rax_scaling_group.py +++ b/plugins/modules/cloud/rackspace/rax_scaling_group.py @@ -53,6 +53,7 @@ options: - key pair to use on the instance loadbalancers: type: list + elements: dict description: - List of load balancer C(id) and C(port) hashes max_entities: @@ -78,6 +79,7 @@ options: required: true networks: type: list + elements: str description: - The network to attach to the instances. If specified, you must include ALL networks including the public and private interfaces. Can be C(id) @@ -376,12 +378,12 @@ def main(): flavor=dict(required=True), image=dict(required=True), key_name=dict(), - loadbalancers=dict(type='list'), + loadbalancers=dict(type='list', elements='dict'), meta=dict(type='dict', default={}), min_entities=dict(type='int', required=True), max_entities=dict(type='int', required=True), name=dict(required=True), - networks=dict(type='list', default=['public', 'private']), + networks=dict(type='list', elements='str', default=['public', 'private']), server_name=dict(required=True), state=dict(default='present', choices=['present', 'absent']), user_data=dict(no_log=True), diff --git a/plugins/modules/cloud/smartos/smartos_image_info.py b/plugins/modules/cloud/smartos/smartos_image_info.py index 17761af8a5..473d345ad8 100644 --- a/plugins/modules/cloud/smartos/smartos_image_info.py +++ b/plugins/modules/cloud/smartos/smartos_image_info.py @@ -24,6 +24,7 @@ options: manifest and 'published_date', 'published', 'source', 'clones', and 'size'. More information can be found at U(https://smartos.org/man/1m/imgadm) under 'imgadm list'. + type: str ''' EXAMPLES = ''' diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 6cc090eefd..bb78528220 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -65,14 +65,11 @@ plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:doc-missing-t plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:parameter-list-no-elements plugins/modules/cloud/rackspace/rax.py use-argspec-type-path # fix needed plugins/modules/cloud/rackspace/rax.py validate-modules:doc-missing-type -plugins/modules/cloud/rackspace/rax.py validate-modules:parameter-list-no-elements plugins/modules/cloud/rackspace/rax.py validate-modules:undocumented-parameter -plugins/modules/cloud/rackspace/rax_cdb_user.py validate-modules:parameter-list-no-elements plugins/modules/cloud/rackspace/rax_files.py validate-modules:parameter-state-invalid-choice plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path plugins/modules/cloud/rackspace/rax_mon_notification_plan.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path # fix needed -plugins/modules/cloud/rackspace/rax_scaling_group.py validate-modules:parameter-list-no-elements +plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path # fix needed, expanduser() applied to dict values plugins/modules/cloud/scaleway/scaleway_image_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_image_info.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_ip_facts.py validate-modules:return-syntax-error @@ -87,7 +84,6 @@ plugins/modules/cloud/scaleway/scaleway_snapshot_facts.py validate-modules:retur plugins/modules/cloud/scaleway/scaleway_snapshot_info.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_volume_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_volume_info.py validate-modules:return-syntax-error -plugins/modules/cloud/smartos/smartos_image_info.py validate-modules:doc-missing-type plugins/modules/cloud/smartos/vmadm.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/smartos/vmadm.py validate-modules:undocumented-parameter plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:parameter-list-no-elements diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index b718639b77..c30978e247 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -64,14 +64,11 @@ plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:doc-missing-t plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:parameter-list-no-elements plugins/modules/cloud/rackspace/rax.py use-argspec-type-path # fix needed plugins/modules/cloud/rackspace/rax.py validate-modules:doc-missing-type -plugins/modules/cloud/rackspace/rax.py validate-modules:parameter-list-no-elements plugins/modules/cloud/rackspace/rax.py validate-modules:undocumented-parameter -plugins/modules/cloud/rackspace/rax_cdb_user.py validate-modules:parameter-list-no-elements plugins/modules/cloud/rackspace/rax_files.py validate-modules:parameter-state-invalid-choice plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path plugins/modules/cloud/rackspace/rax_mon_notification_plan.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path # fix needed -plugins/modules/cloud/rackspace/rax_scaling_group.py validate-modules:parameter-list-no-elements +plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path # fix needed, expanduser() applied to dict values plugins/modules/cloud/scaleway/scaleway_image_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_image_info.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_ip_facts.py validate-modules:return-syntax-error @@ -86,7 +83,6 @@ plugins/modules/cloud/scaleway/scaleway_snapshot_facts.py validate-modules:retur plugins/modules/cloud/scaleway/scaleway_snapshot_info.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_volume_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_volume_info.py validate-modules:return-syntax-error -plugins/modules/cloud/smartos/smartos_image_info.py validate-modules:doc-missing-type plugins/modules/cloud/smartos/vmadm.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/smartos/vmadm.py validate-modules:undocumented-parameter plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:parameter-list-no-elements diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 06e83d3535..0782b31593 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -97,7 +97,7 @@ plugins/modules/cloud/rackspace/rax.py use-argspec-type-path plugins/modules/cloud/rackspace/rax.py validate-modules:doc-missing-type plugins/modules/cloud/rackspace/rax.py validate-modules:undocumented-parameter plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path -plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path +plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path # fix needed, expanduser() applied to dict values plugins/modules/cloud/scaleway/scaleway_image_facts.py validate-modules:deprecation-mismatch plugins/modules/cloud/scaleway/scaleway_image_facts.py validate-modules:invalid-documentation plugins/modules/cloud/scaleway/scaleway_image_facts.py validate-modules:return-syntax-error @@ -126,7 +126,6 @@ plugins/modules/cloud/scaleway/scaleway_volume_facts.py validate-modules:depreca plugins/modules/cloud/scaleway/scaleway_volume_facts.py validate-modules:invalid-documentation plugins/modules/cloud/scaleway/scaleway_volume_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_volume_info.py validate-modules:return-syntax-error -plugins/modules/cloud/smartos/smartos_image_info.py validate-modules:doc-missing-type plugins/modules/cloud/smartos/vmadm.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/smartos/vmadm.py validate-modules:undocumented-parameter plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:parameter-type-not-in-doc From 84b54ad6a2b8eaf46e57d860fa8ac519c3e985ed Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 15 Mar 2021 00:22:03 +1300 Subject: [PATCH 0109/3093] rax - removed service parameter (#2020) * removed service parameter * added changelog fragment --- changelogs/fragments/2020-remove-unused-param-in-rax.yml | 2 ++ plugins/modules/cloud/rackspace/rax.py | 8 -------- tests/sanity/ignore-2.10.txt | 2 -- tests/sanity/ignore-2.11.txt | 2 -- tests/sanity/ignore-2.9.txt | 2 -- 5 files changed, 2 insertions(+), 14 deletions(-) create mode 100644 changelogs/fragments/2020-remove-unused-param-in-rax.yml diff --git a/changelogs/fragments/2020-remove-unused-param-in-rax.yml b/changelogs/fragments/2020-remove-unused-param-in-rax.yml new file mode 100644 index 0000000000..333548f0b9 --- /dev/null +++ b/changelogs/fragments/2020-remove-unused-param-in-rax.yml @@ -0,0 +1,2 @@ +removed_features: + - rax - unused parameter ``service`` removed (https://github.com/ansible-collections/community.general/pull/2020). diff --git a/plugins/modules/cloud/rackspace/rax.py b/plugins/modules/cloud/rackspace/rax.py index d2d9119b02..cbaa0a57d2 100644 --- a/plugins/modules/cloud/rackspace/rax.py +++ b/plugins/modules/cloud/rackspace/rax.py @@ -817,7 +817,6 @@ def main(): meta=dict(type='dict', default={}), name=dict(), networks=dict(type='list', elements='str', default=['public', 'private']), - service=dict(), state=dict(default='present', choices=['present', 'absent']), user_data=dict(no_log=True), wait=dict(default=False, type='bool'), @@ -833,13 +832,6 @@ def main(): if not HAS_PYRAX: module.fail_json(msg='pyrax is required for this module') - service = module.params.get('service') - - if service is not None: - module.fail_json(msg='The "service" attribute has been deprecated, ' - 'please remove "service: cloudservers" from your ' - 'playbook pertaining to the "rax" module') - auto_increment = module.params.get('auto_increment') boot_from_volume = module.params.get('boot_from_volume') boot_volume = module.params.get('boot_volume') diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index bb78528220..2271765963 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -64,8 +64,6 @@ plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:parameter-type-no plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:doc-missing-type plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:parameter-list-no-elements plugins/modules/cloud/rackspace/rax.py use-argspec-type-path # fix needed -plugins/modules/cloud/rackspace/rax.py validate-modules:doc-missing-type -plugins/modules/cloud/rackspace/rax.py validate-modules:undocumented-parameter plugins/modules/cloud/rackspace/rax_files.py validate-modules:parameter-state-invalid-choice plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path plugins/modules/cloud/rackspace/rax_mon_notification_plan.py validate-modules:parameter-list-no-elements diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index c30978e247..e01f0ee998 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -63,8 +63,6 @@ plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:parameter-type-no plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:doc-missing-type plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:parameter-list-no-elements plugins/modules/cloud/rackspace/rax.py use-argspec-type-path # fix needed -plugins/modules/cloud/rackspace/rax.py validate-modules:doc-missing-type -plugins/modules/cloud/rackspace/rax.py validate-modules:undocumented-parameter plugins/modules/cloud/rackspace/rax_files.py validate-modules:parameter-state-invalid-choice plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path plugins/modules/cloud/rackspace/rax_mon_notification_plan.py validate-modules:parameter-list-no-elements diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 0782b31593..40eef942b4 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -94,8 +94,6 @@ plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:deprecation-m plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:doc-missing-type plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:invalid-documentation plugins/modules/cloud/rackspace/rax.py use-argspec-type-path -plugins/modules/cloud/rackspace/rax.py validate-modules:doc-missing-type -plugins/modules/cloud/rackspace/rax.py validate-modules:undocumented-parameter plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path # fix needed, expanduser() applied to dict values plugins/modules/cloud/scaleway/scaleway_image_facts.py validate-modules:deprecation-mismatch From 3162ed67959a74aa811a87ba4f2c70d671d04bf9 Mon Sep 17 00:00:00 2001 From: dacodas Date: Mon, 15 Mar 2021 05:35:34 -0700 Subject: [PATCH 0110/3093] allow passing the --allow-root flag to kibana_plugin module (#2014) * kibana_plugin module parameter force is a boolean * allow passing the --allow-root flag to kibana_plugin module * add changelog fragment for kibana_plugin --allow-root Co-authored-by: Amin Vakil Co-authored-by: Felix Fontein Co-authored-by: Amin Vakil Co-authored-by: Felix Fontein --- .../2014-allow-root-for-kibana-plugin.yaml | 2 ++ .../modules/database/misc/kibana_plugin.py | 34 ++++++++++++++----- 2 files changed, 28 insertions(+), 8 deletions(-) create mode 100644 changelogs/fragments/2014-allow-root-for-kibana-plugin.yaml diff --git a/changelogs/fragments/2014-allow-root-for-kibana-plugin.yaml b/changelogs/fragments/2014-allow-root-for-kibana-plugin.yaml new file mode 100644 index 0000000000..6420203888 --- /dev/null +++ b/changelogs/fragments/2014-allow-root-for-kibana-plugin.yaml @@ -0,0 +1,2 @@ +minor_changes: + - kibana_plugin - add parameter for passing ``--allow-root`` flag to kibana and kibana-plugin commands (https://github.com/ansible-collections/community.general/pull/2014). diff --git a/plugins/modules/database/misc/kibana_plugin.py b/plugins/modules/database/misc/kibana_plugin.py index 33bc86229b..e8daf2ff58 100644 --- a/plugins/modules/database/misc/kibana_plugin.py +++ b/plugins/modules/database/misc/kibana_plugin.py @@ -58,7 +58,13 @@ options: description: - Delete and re-install the plugin. Can be useful for plugins update. type: bool - default: 'no' + default: false + allow_root: + description: + - Whether to allow C(kibana) and C(kibana-plugin) to be run as root. Passes the C(--allow-root) flag to these commands. + type: bool + default: false + version_added: 2.3.0 ''' EXAMPLES = ''' @@ -152,7 +158,7 @@ def parse_error(string): return string -def install_plugin(module, plugin_bin, plugin_name, url, timeout, kibana_version='4.6'): +def install_plugin(module, plugin_bin, plugin_name, url, timeout, allow_root, kibana_version='4.6'): if LooseVersion(kibana_version) > LooseVersion('4.6'): kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), 'kibana-plugin') cmd_args = [kibana_plugin_bin, "install"] @@ -169,6 +175,9 @@ def install_plugin(module, plugin_bin, plugin_name, url, timeout, kibana_version if timeout: cmd_args.append("--timeout %s" % timeout) + if allow_root: + cmd_args.append('--allow-root') + cmd = " ".join(cmd_args) if module.check_mode: @@ -182,13 +191,16 @@ def install_plugin(module, plugin_bin, plugin_name, url, timeout, kibana_version return True, cmd, out, err -def remove_plugin(module, plugin_bin, plugin_name, kibana_version='4.6'): +def remove_plugin(module, plugin_bin, plugin_name, allow_root, kibana_version='4.6'): if LooseVersion(kibana_version) > LooseVersion('4.6'): kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), 'kibana-plugin') cmd_args = [kibana_plugin_bin, "remove", plugin_name] else: cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["absent"], plugin_name] + if allow_root: + cmd_args.append('--allow-root') + cmd = " ".join(cmd_args) if module.check_mode: @@ -202,8 +214,12 @@ def remove_plugin(module, plugin_bin, plugin_name, kibana_version='4.6'): return True, cmd, out, err -def get_kibana_version(module, plugin_bin): +def get_kibana_version(module, plugin_bin, allow_root): cmd_args = [plugin_bin, '--version'] + + if allow_root: + cmd_args.append('--allow-root') + cmd = " ".join(cmd_args) rc, out, err = module.run_command(cmd) if rc != 0: @@ -222,7 +238,8 @@ def main(): plugin_bin=dict(default="/opt/kibana/bin/kibana", type="path"), plugin_dir=dict(default="/opt/kibana/installedPlugins/", type="path"), version=dict(default=None), - force=dict(default="no", type="bool") + force=dict(default=False, type="bool"), + allow_root=dict(default=False, type="bool"), ), supports_check_mode=True, ) @@ -235,10 +252,11 @@ def main(): plugin_dir = module.params["plugin_dir"] version = module.params["version"] force = module.params["force"] + allow_root = module.params["allow_root"] changed, cmd, out, err = False, '', '', '' - kibana_version = get_kibana_version(module, plugin_bin) + kibana_version = get_kibana_version(module, plugin_bin, allow_root) present = is_plugin_present(parse_plugin_repo(name), plugin_dir) @@ -252,10 +270,10 @@ def main(): if state == "present": if force: remove_plugin(module, plugin_bin, name) - changed, cmd, out, err = install_plugin(module, plugin_bin, name, url, timeout, kibana_version) + changed, cmd, out, err = install_plugin(module, plugin_bin, name, url, timeout, allow_root, kibana_version) elif state == "absent": - changed, cmd, out, err = remove_plugin(module, plugin_bin, name, kibana_version) + changed, cmd, out, err = remove_plugin(module, plugin_bin, name, allow_root, kibana_version) module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err) From 0f61ae4841a9837fd6318e5d0f24bdc202d78689 Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Mon, 15 Mar 2021 08:39:23 -0400 Subject: [PATCH 0111/3093] Adding tags as module parameter to proxmox_kvm (#2000) * Adding tags as module parameter * Added changelog fragment * Correcting typo in changelog fragment * Correcting punctuation in docs * Including version to tags parameter description Co-authored-by: Felix Fontein * Correct tag validation and parsing logic condition Original test was for key and not value Co-authored-by: Felix Fontein * Improving usability with default null behavior * Removing default case and related unneccessary complexity * Display regex in tags description as code Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../2000-proxmox_kvm-tag-support.yml | 3 +++ plugins/modules/cloud/misc/proxmox_kvm.py | 19 ++++++++++++++++++- 2 files changed, 21 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2000-proxmox_kvm-tag-support.yml diff --git a/changelogs/fragments/2000-proxmox_kvm-tag-support.yml b/changelogs/fragments/2000-proxmox_kvm-tag-support.yml new file mode 100644 index 0000000000..d4084ecd67 --- /dev/null +++ b/changelogs/fragments/2000-proxmox_kvm-tag-support.yml @@ -0,0 +1,3 @@ +--- +minor_changes: +- proxmox_kvm - added new module parameter ``tags`` for use with PVE 6+ (https://github.com/ansible-collections/community.general/pull/2000). diff --git a/plugins/modules/cloud/misc/proxmox_kvm.py b/plugins/modules/cloud/misc/proxmox_kvm.py index 54de76944c..2dcb1ab573 100644 --- a/plugins/modules/cloud/misc/proxmox_kvm.py +++ b/plugins/modules/cloud/misc/proxmox_kvm.py @@ -425,6 +425,14 @@ options: option has a default of C(no). Note that the default value of I(proxmox_default_behavior) changes in community.general 4.0.0. type: bool + tags: + description: + - List of tags to apply to the VM instance. + - Tags must start with C([a-z0-9_]) followed by zero or more of the following characters C([a-z0-9_-+.]). + - Tags are only available in Proxmox 6+. + type: list + elements: str + version_added: 2.3.0 target: description: - Target node. Only allowed if the original VM is on shared storage. @@ -858,7 +866,7 @@ def wait_for_task(module, proxmox, node, taskid): def create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sockets, update, **kwargs): # Available only in PVE 4 only_v4 = ['force', 'protection', 'skiplock'] - only_v6 = ['ciuser', 'cipassword', 'sshkeys', 'ipconfig'] + only_v6 = ['ciuser', 'cipassword', 'sshkeys', 'ipconfig', 'tags'] # valide clone parameters valid_clone_params = ['format', 'full', 'pool', 'snapname', 'storage', 'target'] @@ -928,6 +936,13 @@ def create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sock if searchdomains: kwargs['searchdomain'] = ' '.join(searchdomains) + # VM tags are expected to be valid and presented as a comma/semi-colon delimited string + if 'tags' in kwargs: + for tag in kwargs['tags']: + if not re.match(r'^[a-z0-9_][a-z0-9_\-\+\.]*$', tag): + module.fail_json(msg='%s is not a valid tag' % tag) + kwargs['tags'] = ",".join(kwargs['tags']) + # -args and skiplock require root@pam user - but can not use api tokens if module.params['api_user'] == "root@pam" and module.params['args'] is None: if not update and module.params['proxmox_default_behavior'] == 'compatibility': @@ -1063,6 +1078,7 @@ def main(): state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted', 'current']), storage=dict(type='str'), tablet=dict(type='bool'), + tags=dict(type='list', elements='str'), target=dict(type='str'), tdf=dict(type='bool'), template=dict(type='bool'), @@ -1267,6 +1283,7 @@ def main(): startdate=module.params['startdate'], startup=module.params['startup'], tablet=module.params['tablet'], + tags=module.params['tags'], target=module.params['target'], tdf=module.params['tdf'], template=module.params['template'], From 4fbef900e193f887329930816b822e2270f1d7c9 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 18 Mar 2021 01:39:49 +1300 Subject: [PATCH 0112/3093] Improvements and fixes to ModuleHelper, with (some) tests. (#2024) * Improvements and fixes to ModuleHelper, with (some) tests. * added changelog fragment * adjusted changelog frag - get_bin_path() handling is actually a bugfix --- .../fragments/2024-module-helper-fixes.yml | 4 ++++ plugins/module_utils/module_helper.py | 8 +++++++- .../plugins/module_utils/test_module_helper.py | 16 +++++++++++++++- 3 files changed, 26 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/2024-module-helper-fixes.yml diff --git a/changelogs/fragments/2024-module-helper-fixes.yml b/changelogs/fragments/2024-module-helper-fixes.yml new file mode 100644 index 0000000000..3ce3cc71dc --- /dev/null +++ b/changelogs/fragments/2024-module-helper-fixes.yml @@ -0,0 +1,4 @@ +bugfixes: + - module_helper module utils - actually ignoring formatting of parameters with value ``None`` (https://github.com/ansible-collections/community.general/pull/2024). + - module_helper module utils - handling ``ModuleHelperException`` now properly calls ``fail_json()`` (https://github.com/ansible-collections/community.general/pull/2024). + - module_helper module utils - use the command name as-is in ``CmdMixin`` if it fails ``get_bin_path()`` - allowing full path names to be passed (https://github.com/ansible-collections/community.general/pull/2024). diff --git a/plugins/module_utils/module_helper.py b/plugins/module_utils/module_helper.py index 3d145e713e..caf915abbf 100644 --- a/plugins/module_utils/module_helper.py +++ b/plugins/module_utils/module_helper.py @@ -93,6 +93,8 @@ class ArgFormat(object): self.arg_format = (self.stars_deco(stars))(self.arg_format) def to_text(self, value): + if value is None: + return [] func = self.arg_format return [str(p) for p in func(value)] @@ -121,6 +123,7 @@ def module_fails_on_exception(func): except ModuleHelperException as e: if e.update_output: self.update_output(e.update_output) + self.module.fail_json(changed=False, msg=e.msg, exception=traceback.format_exc(), output=self.output, vars=self.vars) except Exception as e: self.vars.msg = "Module failed with exception: {0}".format(str(e).strip()) self.vars.exception = traceback.format_exc() @@ -292,7 +295,10 @@ class CmdMixin(object): extra_params = extra_params or dict() cmd_args = list([self.command]) if isinstance(self.command, str) else list(self.command) - cmd_args[0] = self.module.get_bin_path(cmd_args[0]) + try: + cmd_args[0] = self.module.get_bin_path(cmd_args[0], required=True) + except ValueError: + pass param_list = params if params else self.module.params.keys() for param in param_list: diff --git a/tests/unit/plugins/module_utils/test_module_helper.py b/tests/unit/plugins/module_utils/test_module_helper.py index fb7746a91a..82a8f2c144 100644 --- a/tests/unit/plugins/module_utils/test_module_helper.py +++ b/tests/unit/plugins/module_utils/test_module_helper.py @@ -22,24 +22,38 @@ ARG_FORMATS = dict( True, ["--superflag"]), simple_boolean_false=("--superflag", ArgFormat.BOOLEAN, 0, False, []), + simple_boolean_none=("--superflag", ArgFormat.BOOLEAN, 0, + None, []), single_printf=("--param=%s", ArgFormat.PRINTF, 0, "potatoes", ["--param=potatoes"]), single_printf_no_substitution=("--param", ArgFormat.PRINTF, 0, "potatoes", ["--param"]), + single_printf_none=("--param=%s", ArgFormat.PRINTF, 0, + None, []), multiple_printf=(["--param", "free-%s"], ArgFormat.PRINTF, 0, "potatoes", ["--param", "free-potatoes"]), single_format=("--param={0}", ArgFormat.FORMAT, 0, "potatoes", ["--param=potatoes"]), + single_format_none=("--param={0}", ArgFormat.FORMAT, 0, + None, []), single_format_no_substitution=("--param", ArgFormat.FORMAT, 0, "potatoes", ["--param"]), multiple_format=(["--param", "free-{0}"], ArgFormat.FORMAT, 0, "potatoes", ["--param", "free-potatoes"]), + multiple_format_none=(["--param", "free-{0}"], ArgFormat.FORMAT, 0, + None, []), single_lambda_0star=((lambda v: ["piggies=[{0},{1},{2}]".format(v[0], v[1], v[2])]), None, 0, ['a', 'b', 'c'], ["piggies=[a,b,c]"]), + single_lambda_0star_none=((lambda v: ["piggies=[{0},{1},{2}]".format(v[0], v[1], v[2])]), None, 0, + None, []), single_lambda_1star=((lambda a, b, c: ["piggies=[{0},{1},{2}]".format(a, b, c)]), None, 1, ['a', 'b', 'c'], ["piggies=[a,b,c]"]), + single_lambda_1star_none=((lambda a, b, c: ["piggies=[{0},{1},{2}]".format(a, b, c)]), None, 1, + None, []), single_lambda_2star=(single_lambda_2star, None, 2, - dict(z='c', x='a', y='b'), ["piggies=[a,b,c]"]) + dict(z='c', x='a', y='b'), ["piggies=[a,b,c]"]), + single_lambda_2star_none=(single_lambda_2star, None, 2, + None, []), ) ARG_FORMATS_IDS = sorted(ARG_FORMATS.keys()) From fe61be3e111c4d5111d359fe5f667530c55a858d Mon Sep 17 00:00:00 2001 From: abarbare Date: Thu, 18 Mar 2021 23:06:56 +0100 Subject: [PATCH 0113/3093] fix: scaleway inventory pagination (#2036) * fix: scaleway inventory pagination * add changelog * Update changelogs/fragments/2036-scaleway-inventory.yml Co-authored-by: Felix Fontein Co-authored-by: Antoine Barbare Co-authored-by: Felix Fontein --- changelogs/fragments/2036-scaleway-inventory.yml | 3 +++ plugins/module_utils/scaleway.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2036-scaleway-inventory.yml diff --git a/changelogs/fragments/2036-scaleway-inventory.yml b/changelogs/fragments/2036-scaleway-inventory.yml new file mode 100644 index 0000000000..44161306ac --- /dev/null +++ b/changelogs/fragments/2036-scaleway-inventory.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - scaleway inventory plugin - fix pagination on scaleway inventory plugin (https://github.com/ansible-collections/community.general/pull/2036). diff --git a/plugins/module_utils/scaleway.py b/plugins/module_utils/scaleway.py index f5107feda2..3c73e92bb8 100644 --- a/plugins/module_utils/scaleway.py +++ b/plugins/module_utils/scaleway.py @@ -39,7 +39,7 @@ class ScalewayException(Exception): R_LINK_HEADER = r'''<[^>]+>;\srel="(first|previous|next|last)" (,<[^>]+>;\srel="(first|previous|next|last)")*''' # Specify a single relation, for iteration and string extraction purposes -R_RELATION = r'<(?P[^>]+)>; rel="(?Pfirst|previous|next|last)"' +R_RELATION = r'[^>]+)>; rel="(?Pfirst|previous|next|last)"' def parse_pagination_link(header): From 8225b745f30c78ac271721405d2a274c0b1d6af4 Mon Sep 17 00:00:00 2001 From: Charlie Kenney Date: Thu, 18 Mar 2021 18:20:12 -0400 Subject: [PATCH 0114/3093] update linode team (#2039) --- .github/BOTMETA.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index c9a62d5d29..8f073e6c6f 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1007,7 +1007,7 @@ macros: team_ipa: Akasurde Nosmoht fxfitz team_jboss: Wolfant jairojunior wbrefvem team_keycloak: eikef ndclt - team_linode: InTheCloudDan decentral1se displague rmcintosh + team_linode: InTheCloudDan decentral1se displague rmcintosh Charliekenney23 LBGarber team_macos: Akasurde kyleabenson martinm82 danieljaouen indrajitr team_manageiq: abellotti cben gtanzillo yaacov zgalor dkorn evertmulder team_netapp: amit0701 carchi8py hulquest lmprice lonico ndswartz schmots1 From 0b2ebabd29a7ac1e6a4278318f4c8a0cc436132a Mon Sep 17 00:00:00 2001 From: Bill Dodd Date: Fri, 19 Mar 2021 13:06:57 -0500 Subject: [PATCH 0115/3093] Fix IndexError in SetManagerNic (#2040) * fix IndexError in SetManagerNic * add changelog fragment --- .../2040-fix-index-error-in-redfish-set-manager-nic.yml | 2 ++ plugins/module_utils/redfish_utils.py | 4 ++++ 2 files changed, 6 insertions(+) create mode 100644 changelogs/fragments/2040-fix-index-error-in-redfish-set-manager-nic.yml diff --git a/changelogs/fragments/2040-fix-index-error-in-redfish-set-manager-nic.yml b/changelogs/fragments/2040-fix-index-error-in-redfish-set-manager-nic.yml new file mode 100644 index 0000000000..04d9a11101 --- /dev/null +++ b/changelogs/fragments/2040-fix-index-error-in-redfish-set-manager-nic.yml @@ -0,0 +1,2 @@ +bugfixes: + - redfish_config module, redfish_utils module utils - fix IndexError in ``SetManagerNic`` command (https://github.com/ansible-collections/community.general/issues/1692). diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index 0fd421a3e2..67a17bd8d1 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -2672,6 +2672,10 @@ class RedfishUtils(object): need_change = True # type is list if isinstance(set_value, list): + if len(set_value) != len(cur_value): + # if arrays are not the same len, no need to check each element + need_change = True + continue for i in range(len(set_value)): for subprop in payload[property][i].keys(): if subprop not in target_ethernet_current_setting[property][i]: From 79fb3e9852531f4476105031138c0f91fce8f9f3 Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Fri, 19 Mar 2021 14:18:05 -0400 Subject: [PATCH 0116/3093] Adding purge parameter to proxmox for use with lxc delete requests (#2013) * added purge as optional module parameter * Adding changelog fragment * Adding version to documentation for purge Co-authored-by: Felix Fontein * Updating changelog Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../fragments/2013-proxmox-purge-parameter.yml | 3 +++ plugins/modules/cloud/misc/proxmox.py | 18 +++++++++++++++++- 2 files changed, 20 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2013-proxmox-purge-parameter.yml diff --git a/changelogs/fragments/2013-proxmox-purge-parameter.yml b/changelogs/fragments/2013-proxmox-purge-parameter.yml new file mode 100644 index 0000000000..6c681e5a19 --- /dev/null +++ b/changelogs/fragments/2013-proxmox-purge-parameter.yml @@ -0,0 +1,3 @@ +--- +minor_changes: +- proxmox - added ``purge`` module parameter for use when deleting lxc's with HA options (https://github.com/ansible-collections/community.general/pull/2013). diff --git a/plugins/modules/cloud/misc/proxmox.py b/plugins/modules/cloud/misc/proxmox.py index b5040bc659..422c108c35 100644 --- a/plugins/modules/cloud/misc/proxmox.py +++ b/plugins/modules/cloud/misc/proxmox.py @@ -123,6 +123,15 @@ options: - with states C(stopped) , C(restarted) allow to force stop instance type: bool default: 'no' + purge: + description: + - Remove container from all related configurations. + - For example backup jobs, replication jobs, or HA. + - Related ACLs and Firewall entries will always be removed. + - Used with state C(absent). + type: bool + default: false + version_added: 2.3.0 state: description: - Indicate desired state of the instance @@ -506,6 +515,7 @@ def main(): searchdomain=dict(), timeout=dict(type='int', default=30), force=dict(type='bool', default=False), + purge=dict(type='bool', default=False), state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted']), pubkey=dict(type='str', default=None), unprivileged=dict(type='bool', default=False), @@ -686,7 +696,13 @@ def main(): if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted': module.exit_json(changed=False, msg="VM %s is mounted. Stop it with force option before deletion." % vmid) - taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE).delete(vmid) + delete_params = {} + + if module.params['purge']: + delete_params['purge'] = 1 + + taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE).delete(vmid, **delete_params) + while timeout: if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'): From efd441407f04630958d5cd684d5c837e0a33f2f4 Mon Sep 17 00:00:00 2001 From: Bill Dodd Date: Fri, 19 Mar 2021 15:14:33 -0500 Subject: [PATCH 0117/3093] Add support for Redfish session create, delete, and authenticate (#2027) * Add support for Redfish session create and delete * add changelog fragment * Apply suggestions from code review Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- ...ish-session-create-delete-authenticate.yml | 2 + plugins/module_utils/redfish_utils.py | 112 ++++++++++++++---- .../redfish/idrac_redfish_command.py | 24 +++- .../redfish/idrac_redfish_config.py | 24 +++- .../redfish/idrac_redfish_info.py | 24 +++- .../redfish/redfish_command.py | 74 +++++++++--- .../redfish/redfish_config.py | 24 +++- .../remote_management/redfish/redfish_info.py | 24 +++- 8 files changed, 246 insertions(+), 62 deletions(-) create mode 100644 changelogs/fragments/2027-add-redfish-session-create-delete-authenticate.yml diff --git a/changelogs/fragments/2027-add-redfish-session-create-delete-authenticate.yml b/changelogs/fragments/2027-add-redfish-session-create-delete-authenticate.yml new file mode 100644 index 0000000000..b5c22b9502 --- /dev/null +++ b/changelogs/fragments/2027-add-redfish-session-create-delete-authenticate.yml @@ -0,0 +1,2 @@ +minor_changes: + - redfish_* modules, redfish_utils module utils - add support for Redfish session create, delete, and authenticate (https://github.com/ansible-collections/community.general/issues/1975). diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index 67a17bd8d1..d8cc4061f8 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -38,13 +38,34 @@ class RedfishUtils(object): self.data_modification = data_modification self._init_session() + def _auth_params(self, headers): + """ + Return tuple of required authentication params based on the presence + of a token in the self.creds dict. If using a token, set the + X-Auth-Token header in the `headers` param. + + :param headers: dict containing headers to send in request + :return: tuple of username, password and force_basic_auth + """ + if self.creds.get('token'): + username = None + password = None + force_basic_auth = False + headers['X-Auth-Token'] = self.creds['token'] + else: + username = self.creds['user'] + password = self.creds['pswd'] + force_basic_auth = True + return username, password, force_basic_auth + # The following functions are to send GET/POST/PATCH/DELETE requests def get_request(self, uri): + req_headers = dict(GET_HEADERS) + username, password, basic_auth = self._auth_params(req_headers) try: - resp = open_url(uri, method="GET", headers=GET_HEADERS, - url_username=self.creds['user'], - url_password=self.creds['pswd'], - force_basic_auth=True, validate_certs=False, + resp = open_url(uri, method="GET", headers=req_headers, + url_username=username, url_password=password, + force_basic_auth=basic_auth, validate_certs=False, follow_redirects='all', use_proxy=True, timeout=self.timeout) data = json.loads(to_native(resp.read())) @@ -65,14 +86,16 @@ class RedfishUtils(object): return {'ret': True, 'data': data, 'headers': headers} def post_request(self, uri, pyld): + req_headers = dict(POST_HEADERS) + username, password, basic_auth = self._auth_params(req_headers) try: resp = open_url(uri, data=json.dumps(pyld), - headers=POST_HEADERS, method="POST", - url_username=self.creds['user'], - url_password=self.creds['pswd'], - force_basic_auth=True, validate_certs=False, + headers=req_headers, method="POST", + url_username=username, url_password=password, + force_basic_auth=basic_auth, validate_certs=False, follow_redirects='all', use_proxy=True, timeout=self.timeout) + headers = dict((k.lower(), v) for (k, v) in resp.info().items()) except HTTPError as e: msg = self._get_extended_message(e) return {'ret': False, @@ -86,10 +109,10 @@ class RedfishUtils(object): except Exception as e: return {'ret': False, 'msg': "Failed POST request to '%s': '%s'" % (uri, to_text(e))} - return {'ret': True, 'resp': resp} + return {'ret': True, 'headers': headers, 'resp': resp} def patch_request(self, uri, pyld): - headers = PATCH_HEADERS + req_headers = dict(PATCH_HEADERS) r = self.get_request(uri) if r['ret']: # Get etag from etag header or @odata.etag property @@ -97,15 +120,13 @@ class RedfishUtils(object): if not etag: etag = r['data'].get('@odata.etag') if etag: - # Make copy of headers and add If-Match header - headers = dict(headers) - headers['If-Match'] = etag + req_headers['If-Match'] = etag + username, password, basic_auth = self._auth_params(req_headers) try: resp = open_url(uri, data=json.dumps(pyld), - headers=headers, method="PATCH", - url_username=self.creds['user'], - url_password=self.creds['pswd'], - force_basic_auth=True, validate_certs=False, + headers=req_headers, method="PATCH", + url_username=username, url_password=password, + force_basic_auth=basic_auth, validate_certs=False, follow_redirects='all', use_proxy=True, timeout=self.timeout) except HTTPError as e: @@ -124,13 +145,14 @@ class RedfishUtils(object): return {'ret': True, 'resp': resp} def delete_request(self, uri, pyld=None): + req_headers = dict(DELETE_HEADERS) + username, password, basic_auth = self._auth_params(req_headers) try: data = json.dumps(pyld) if pyld else None resp = open_url(uri, data=data, - headers=DELETE_HEADERS, method="DELETE", - url_username=self.creds['user'], - url_password=self.creds['pswd'], - force_basic_auth=True, validate_certs=False, + headers=req_headers, method="DELETE", + url_username=username, url_password=password, + force_basic_auth=basic_auth, validate_certs=False, follow_redirects='all', use_proxy=True, timeout=self.timeout) except HTTPError as e: @@ -1192,6 +1214,54 @@ class RedfishUtils(object): return {'ret': True, 'changed': True, 'msg': "Clear all sessions successfully"} + def create_session(self): + if not self.creds.get('user') or not self.creds.get('pswd'): + return {'ret': False, 'msg': + 'Must provide the username and password parameters for ' + 'the CreateSession command'} + + payload = { + 'UserName': self.creds['user'], + 'Password': self.creds['pswd'] + } + response = self.post_request(self.root_uri + self.sessions_uri, payload) + if response['ret'] is False: + return response + + headers = response['headers'] + if 'x-auth-token' not in headers: + return {'ret': False, 'msg': + 'The service did not return the X-Auth-Token header in ' + 'the response from the Sessions collection POST'} + + if 'location' not in headers: + self.module.warn( + 'The service did not return the Location header for the ' + 'session URL in the response from the Sessions collection ' + 'POST') + session_uri = None + else: + session_uri = urlparse(headers.get('location')).path + + session = dict() + session['token'] = headers.get('x-auth-token') + session['uri'] = session_uri + return {'ret': True, 'changed': True, 'session': session, + 'msg': 'Session created successfully'} + + def delete_session(self, session_uri): + if not session_uri: + return {'ret': False, 'msg': + 'Must provide the session_uri parameter for the ' + 'DeleteSession command'} + + response = self.delete_request(self.root_uri + session_uri) + if response['ret'] is False: + return response + + return {'ret': True, 'changed': True, + 'msg': 'Session deleted successfully'} + def get_firmware_update_capabilities(self): result = {} response = self.get_request(self.root_uri + self.update_uri) diff --git a/plugins/modules/remote_management/redfish/idrac_redfish_command.py b/plugins/modules/remote_management/redfish/idrac_redfish_command.py index 49e12e811a..a637d15631 100644 --- a/plugins/modules/remote_management/redfish/idrac_redfish_command.py +++ b/plugins/modules/remote_management/redfish/idrac_redfish_command.py @@ -33,15 +33,18 @@ options: - Base URI of OOB controller type: str username: - required: true description: - User for authentication with OOB controller type: str password: - required: true description: - Password for authentication with OOB controller type: str + auth_token: + description: + - Security token for authentication with OOB controller + type: str + version_added: 2.3.0 timeout: description: - Timeout in seconds for URL requests to OOB controller @@ -137,11 +140,21 @@ def main(): category=dict(required=True), command=dict(required=True, type='list', elements='str'), baseuri=dict(required=True), - username=dict(required=True), - password=dict(required=True, no_log=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), timeout=dict(type='int', default=10), resource_id=dict() ), + required_together=[ + ('username', 'password'), + ], + required_one_of=[ + ('username', 'auth_token'), + ], + mutually_exclusive=[ + ('username', 'auth_token'), + ], supports_check_mode=False ) @@ -150,7 +163,8 @@ def main(): # admin credentials used for authentication creds = {'user': module.params['username'], - 'pswd': module.params['password']} + 'pswd': module.params['password'], + 'token': module.params['auth_token']} # timeout timeout = module.params['timeout'] diff --git a/plugins/modules/remote_management/redfish/idrac_redfish_config.py b/plugins/modules/remote_management/redfish/idrac_redfish_config.py index a3525fa9c8..e27ef6a2a6 100644 --- a/plugins/modules/remote_management/redfish/idrac_redfish_config.py +++ b/plugins/modules/remote_management/redfish/idrac_redfish_config.py @@ -36,15 +36,18 @@ options: - Base URI of iDRAC type: str username: - required: true description: - User for authentication with iDRAC type: str password: - required: true description: - Password for authentication with iDRAC type: str + auth_token: + description: + - Security token for authentication with OOB controller + type: str + version_added: 2.3.0 manager_attributes: required: false description: @@ -232,12 +235,22 @@ def main(): category=dict(required=True), command=dict(required=True, type='list', elements='str'), baseuri=dict(required=True), - username=dict(required=True), - password=dict(required=True, no_log=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), manager_attributes=dict(type='dict', default={}), timeout=dict(type='int', default=10), resource_id=dict() ), + required_together=[ + ('username', 'password'), + ], + required_one_of=[ + ('username', 'auth_token'), + ], + mutually_exclusive=[ + ('username', 'auth_token'), + ], supports_check_mode=False ) @@ -246,7 +259,8 @@ def main(): # admin credentials used for authentication creds = {'user': module.params['username'], - 'pswd': module.params['password']} + 'pswd': module.params['password'], + 'token': module.params['auth_token']} # timeout timeout = module.params['timeout'] diff --git a/plugins/modules/remote_management/redfish/idrac_redfish_info.py b/plugins/modules/remote_management/redfish/idrac_redfish_info.py index 42a5efcba9..65fbd5a58b 100644 --- a/plugins/modules/remote_management/redfish/idrac_redfish_info.py +++ b/plugins/modules/remote_management/redfish/idrac_redfish_info.py @@ -37,15 +37,18 @@ options: - Base URI of iDRAC controller type: str username: - required: true description: - User for authentication with iDRAC controller type: str password: - required: true description: - Password for authentication with iDRAC controller type: str + auth_token: + description: + - Security token for authentication with OOB controller + type: str + version_added: 2.3.0 timeout: description: - Timeout in seconds for URL requests to OOB controller @@ -174,10 +177,20 @@ def main(): category=dict(required=True), command=dict(required=True, type='list', elements='str'), baseuri=dict(required=True), - username=dict(required=True), - password=dict(required=True, no_log=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), timeout=dict(type='int', default=10) ), + required_together=[ + ('username', 'password'), + ], + required_one_of=[ + ('username', 'auth_token'), + ], + mutually_exclusive=[ + ('username', 'auth_token'), + ], supports_check_mode=False ) is_old_facts = module._name in ('idrac_redfish_facts', 'community.general.idrac_redfish_facts') @@ -191,7 +204,8 @@ def main(): # admin credentials used for authentication creds = {'user': module.params['username'], - 'pswd': module.params['password']} + 'pswd': module.params['password'], + 'token': module.params['auth_token']} # timeout timeout = module.params['timeout'] diff --git a/plugins/modules/remote_management/redfish/redfish_command.py b/plugins/modules/remote_management/redfish/redfish_command.py index 9e23fd4626..a2f290d16a 100644 --- a/plugins/modules/remote_management/redfish/redfish_command.py +++ b/plugins/modules/remote_management/redfish/redfish_command.py @@ -35,15 +35,23 @@ options: - Base URI of OOB controller type: str username: - required: true description: - Username for authentication with OOB controller type: str password: - required: true description: - Password for authentication with OOB controller type: str + auth_token: + description: + - Security token for authentication with OOB controller + type: str + version_added: 2.3.0 + session_uri: + description: + - URI of the session resource + type: str + version_added: 2.3.0 id: required: false aliases: [ account_id ] @@ -284,15 +292,6 @@ EXAMPLES = ''' category: Systems command: DisableBootOverride - - name: Set chassis indicator LED to blink - community.general.redfish_command: - category: Chassis - command: IndicatorLedBlink - resource_id: 1U - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - - name: Add user community.general.redfish_command: category: Accounts @@ -414,6 +413,31 @@ EXAMPLES = ''' password: "{{ password }}" timeout: 20 + - name: Create session + community.general.redfish_command: + category: Sessions + command: CreateSession + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result + + - name: Set chassis indicator LED to blink using security token for auth + community.general.redfish_command: + category: Chassis + command: IndicatorLedBlink + resource_id: 1U + baseuri: "{{ baseuri }}" + auth_token: "{{ result.session.token }}" + + - name: Delete session using security token created by CreateSesssion above + community.general.redfish_command: + category: Sessions + command: DeleteSession + baseuri: "{{ baseuri }}" + auth_token: "{{ result.session.token }}" + session_uri: "{{ result.session.uri }}" + - name: Clear Sessions community.general.redfish_command: category: Sessions @@ -538,7 +562,7 @@ CATEGORY_COMMANDS_ALL = { "Accounts": ["AddUser", "EnableUser", "DeleteUser", "DisableUser", "UpdateUserRole", "UpdateUserPassword", "UpdateUserName", "UpdateAccountServiceProperties"], - "Sessions": ["ClearSessions"], + "Sessions": ["ClearSessions", "CreateSession", "DeleteSession"], "Manager": ["GracefulRestart", "ClearLogs", "VirtualMediaInsert", "VirtualMediaEject", "PowerOn", "PowerForceOff", "PowerForceRestart", "PowerGracefulRestart", "PowerGracefulShutdown", "PowerReboot"], @@ -553,8 +577,10 @@ def main(): category=dict(required=True), command=dict(required=True, type='list', elements='str'), baseuri=dict(required=True), - username=dict(required=True), - password=dict(required=True, no_log=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + session_uri=dict(), id=dict(aliases=["account_id"]), new_username=dict(aliases=["account_username"]), new_password=dict(aliases=["account_password"], no_log=True), @@ -590,6 +616,15 @@ def main(): ) ) ), + required_together=[ + ('username', 'password'), + ], + required_one_of=[ + ('username', 'auth_token'), + ], + mutually_exclusive=[ + ('username', 'auth_token'), + ], supports_check_mode=False ) @@ -598,7 +633,8 @@ def main(): # admin credentials used for authentication creds = {'user': module.params['username'], - 'pswd': module.params['password']} + 'pswd': module.params['password'], + 'token': module.params['auth_token']} # user to add/modify/delete user = {'account_id': module.params['id'], @@ -712,6 +748,10 @@ def main(): for command in command_list: if command == "ClearSessions": result = rf_utils.clear_sessions() + elif command == "CreateSession": + result = rf_utils.create_session() + elif command == "DeleteSession": + result = rf_utils.delete_session(module.params['session_uri']) elif category == "Manager": # execute only if we find a Manager service resource @@ -748,7 +788,9 @@ def main(): if result['ret'] is True: del result['ret'] changed = result.get('changed', True) - module.exit_json(changed=changed, msg='Action was successful') + session = result.get('session', dict()) + module.exit_json(changed=changed, session=session, + msg='Action was successful') else: module.fail_json(msg=to_native(result['msg'])) diff --git a/plugins/modules/remote_management/redfish/redfish_config.py b/plugins/modules/remote_management/redfish/redfish_config.py index ecc170437b..5c1df16c4e 100644 --- a/plugins/modules/remote_management/redfish/redfish_config.py +++ b/plugins/modules/remote_management/redfish/redfish_config.py @@ -34,15 +34,18 @@ options: - Base URI of OOB controller type: str username: - required: true description: - User for authentication with OOB controller type: str password: - required: true description: - Password for authentication with OOB controller type: str + auth_token: + description: + - Security token for authentication with OOB controller + type: str + version_added: 2.3.0 bios_attributes: required: false description: @@ -219,8 +222,9 @@ def main(): category=dict(required=True), command=dict(required=True, type='list', elements='str'), baseuri=dict(required=True), - username=dict(required=True), - password=dict(required=True, no_log=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), bios_attributes=dict(type='dict', default={}), timeout=dict(type='int', default=10), boot_order=dict(type='list', elements='str', default=[]), @@ -235,6 +239,15 @@ def main(): default={} ) ), + required_together=[ + ('username', 'password'), + ], + required_one_of=[ + ('username', 'auth_token'), + ], + mutually_exclusive=[ + ('username', 'auth_token'), + ], supports_check_mode=False ) @@ -243,7 +256,8 @@ def main(): # admin credentials used for authentication creds = {'user': module.params['username'], - 'pswd': module.params['password']} + 'pswd': module.params['password'], + 'token': module.params['auth_token']} # timeout timeout = module.params['timeout'] diff --git a/plugins/modules/remote_management/redfish/redfish_info.py b/plugins/modules/remote_management/redfish/redfish_info.py index 7bf209b7f6..782115d464 100644 --- a/plugins/modules/remote_management/redfish/redfish_info.py +++ b/plugins/modules/remote_management/redfish/redfish_info.py @@ -37,15 +37,18 @@ options: - Base URI of OOB controller type: str username: - required: true description: - User for authentication with OOB controller type: str password: - required: true description: - Password for authentication with OOB controller type: str + auth_token: + description: + - Security token for authentication with OOB controller + type: str + version_added: 2.3.0 timeout: description: - Timeout in seconds for URL requests to OOB controller @@ -301,10 +304,20 @@ def main(): category=dict(type='list', elements='str', default=['Systems']), command=dict(type='list', elements='str'), baseuri=dict(required=True), - username=dict(required=True), - password=dict(required=True, no_log=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), timeout=dict(type='int', default=10) ), + required_together=[ + ('username', 'password'), + ], + required_one_of=[ + ('username', 'auth_token'), + ], + mutually_exclusive=[ + ('username', 'auth_token'), + ], supports_check_mode=False ) is_old_facts = module._name in ('redfish_facts', 'community.general.redfish_facts') @@ -315,7 +328,8 @@ def main(): # admin credentials used for authentication creds = {'user': module.params['username'], - 'pswd': module.params['password']} + 'pswd': module.params['password'], + 'token': module.params['auth_token']} # timeout timeout = module.params['timeout'] From a23fc67f1fe11c37f1acfdf10fb69b8231618430 Mon Sep 17 00:00:00 2001 From: Mike Raineri Date: Sat, 20 Mar 2021 04:57:38 -0400 Subject: [PATCH 0118/3093] Adding xmadsen and renxulei as Redfish maintainers (#2047) --- .github/BOTMETA.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 8f073e6c6f..ce12bb6885 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1015,7 +1015,7 @@ macros: team_opennebula: ilicmilan meerkampdvv rsmontero xorel team_oracle: manojmeda mross22 nalsaber team_purestorage: bannaych dnix101 genegr lionmax opslounge raekins sdodsley sile16 - team_redfish: billdodd mraineri tomasg2012 + team_redfish: billdodd mraineri tomasg2012 xmadsen renxulei team_rhn: FlossWare alikins barnabycourt vritant team_scaleway: QuentinBrosse abarbare jerome-quere kindermoumoute remyleone sieben team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l From 24f8be834af2c7f49a326821c22d58c30c8fa0d5 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 20 Mar 2021 13:45:42 +0100 Subject: [PATCH 0119/3093] Fix nios modules to work with ansible-core 2.11 (#2057) * Fix nios modules to work with ansible-core 2.11. * Adjust tests. --- changelogs/fragments/2057-nios-devel.yml | 2 ++ plugins/module_utils/net_tools/nios/api.py | 15 ++++++++------- .../module_utils/net_tools/nios/test_api.py | 13 ++++++++----- .../modules/net_tools/nios/test_nios_a_record.py | 5 ++++- .../net_tools/nios/test_nios_aaaa_record.py | 5 ++++- .../net_tools/nios/test_nios_cname_record.py | 5 ++++- .../modules/net_tools/nios/test_nios_dns_view.py | 5 ++++- .../net_tools/nios/test_nios_host_record.py | 5 ++++- .../modules/net_tools/nios/test_nios_mx_record.py | 5 ++++- .../net_tools/nios/test_nios_naptr_record.py | 5 ++++- .../net_tools/nios/test_nios_network_view.py | 5 ++++- .../modules/net_tools/nios/test_nios_nsgroup.py | 6 +++++- .../net_tools/nios/test_nios_srv_record.py | 5 ++++- 13 files changed, 59 insertions(+), 22 deletions(-) create mode 100644 changelogs/fragments/2057-nios-devel.yml diff --git a/changelogs/fragments/2057-nios-devel.yml b/changelogs/fragments/2057-nios-devel.yml new file mode 100644 index 0000000000..be9f8a970f --- /dev/null +++ b/changelogs/fragments/2057-nios-devel.yml @@ -0,0 +1,2 @@ +bugfixes: +- "nios* modules - fix modules to work with ansible-core 2.11 (https://github.com/ansible-collections/community.general/pull/2057)." diff --git a/plugins/module_utils/net_tools/nios/api.py b/plugins/module_utils/net_tools/nios/api.py index eadc66fc37..4a771e49af 100644 --- a/plugins/module_utils/net_tools/nios/api.py +++ b/plugins/module_utils/net_tools/nios/api.py @@ -18,6 +18,7 @@ from ansible.module_utils._text import to_native from ansible.module_utils.six import iteritems from ansible.module_utils._text import to_text from ansible.module_utils.basic import env_fallback +from ansible.module_utils.common.validation import check_type_dict try: from infoblox_client.connector import Connector @@ -399,11 +400,11 @@ class WapiModule(WapiBase): if 'ipv4addrs' in proposed_object: if 'nios_next_ip' in proposed_object['ipv4addrs'][0]['ipv4addr']: - ip_range = self.module._check_type_dict(proposed_object['ipv4addrs'][0]['ipv4addr'])['nios_next_ip'] + ip_range = check_type_dict(proposed_object['ipv4addrs'][0]['ipv4addr'])['nios_next_ip'] proposed_object['ipv4addrs'][0]['ipv4addr'] = NIOS_NEXT_AVAILABLE_IP + ':' + ip_range elif 'ipv4addr' in proposed_object: if 'nios_next_ip' in proposed_object['ipv4addr']: - ip_range = self.module._check_type_dict(proposed_object['ipv4addr'])['nios_next_ip'] + ip_range = check_type_dict(proposed_object['ipv4addr'])['nios_next_ip'] proposed_object['ipv4addr'] = NIOS_NEXT_AVAILABLE_IP + ':' + ip_range return proposed_object @@ -485,7 +486,7 @@ class WapiModule(WapiBase): if ('name' in obj_filter): # gets and returns the current object based on name/old_name passed try: - name_obj = self.module._check_type_dict(obj_filter['name']) + name_obj = check_type_dict(obj_filter['name']) old_name = name_obj['old_name'] new_name = name_obj['new_name'] except TypeError: @@ -521,7 +522,7 @@ class WapiModule(WapiBase): test_obj_filter['name'] = test_obj_filter['name'].lower() # resolves issue where multiple a_records with same name and different IP address try: - ipaddr_obj = self.module._check_type_dict(obj_filter['ipv4addr']) + ipaddr_obj = check_type_dict(obj_filter['ipv4addr']) ipaddr = ipaddr_obj['old_ipv4addr'] except TypeError: ipaddr = obj_filter['ipv4addr'] @@ -530,7 +531,7 @@ class WapiModule(WapiBase): # resolves issue where multiple txt_records with same name and different text test_obj_filter = obj_filter try: - text_obj = self.module._check_type_dict(obj_filter['text']) + text_obj = check_type_dict(obj_filter['text']) txt = text_obj['old_text'] except TypeError: txt = obj_filter['text'] @@ -543,7 +544,7 @@ class WapiModule(WapiBase): # resolves issue where multiple a_records with same name and different IP address test_obj_filter = obj_filter try: - ipaddr_obj = self.module._check_type_dict(obj_filter['ipv4addr']) + ipaddr_obj = check_type_dict(obj_filter['ipv4addr']) ipaddr = ipaddr_obj['old_ipv4addr'] except TypeError: ipaddr = obj_filter['ipv4addr'] @@ -553,7 +554,7 @@ class WapiModule(WapiBase): # resolves issue where multiple txt_records with same name and different text test_obj_filter = obj_filter try: - text_obj = self.module._check_type_dict(obj_filter['text']) + text_obj = check_type_dict(obj_filter['text']) txt = text_obj['old_text'] except TypeError: txt = obj_filter['text'] diff --git a/tests/unit/plugins/module_utils/net_tools/nios/test_api.py b/tests/unit/plugins/module_utils/net_tools/nios/test_api.py index 89fccb0a03..09cb1deb79 100644 --- a/tests/unit/plugins/module_utils/net_tools/nios/test_api.py +++ b/tests/unit/plugins/module_utils/net_tools/nios/test_api.py @@ -22,11 +22,14 @@ class TestNiosApi(unittest.TestCase): self.mock_connector = patch('ansible_collections.community.general.plugins.module_utils.net_tools.nios.api.get_connector') self.mock_connector.start() + self.mock_check_type_dict = patch('ansible_collections.community.general.plugins.module_utils.net_tools.nios.api.check_type_dict') + self.mock_check_type_dict_obj = self.mock_check_type_dict.start() def tearDown(self): super(TestNiosApi, self).tearDown() self.mock_connector.stop() + self.mock_check_type_dict.stop() def test_get_provider_spec(self): provider_options = ['host', 'username', 'password', 'validate_certs', 'silent_ssl_warnings', @@ -55,7 +58,7 @@ class TestNiosApi(unittest.TestCase): { "comment": "test comment", "_ref": "networkview/ZG5zLm5ldHdvcmtfdmlldyQw:default/true", - "name": self.module._check_type_dict().__getitem__(), + "name": self.mock_check_type_dict_obj().__getitem__(), "extattrs": {} } ] @@ -143,7 +146,7 @@ class TestNiosApi(unittest.TestCase): kwargs = copy.deepcopy(test_object[0]) kwargs['extattrs']['Site']['value'] = 'update' - kwargs['name'] = self.module._check_type_dict().__getitem__() + kwargs['name'] = self.mock_check_type_dict_obj().__getitem__() del kwargs['_ref'] wapi = self._get_wapi(test_object) @@ -159,7 +162,7 @@ class TestNiosApi(unittest.TestCase): test_object = [{ "comment": "test comment", "_ref": "networkview/ZG5zLm5ldHdvcmtfdmlldyQw:default/true", - "name": self.module._check_type_dict().__getitem__(), + "name": self.mock_check_type_dict_obj().__getitem__(), "extattrs": {'Site': {'value': 'test'}} }] @@ -190,7 +193,7 @@ class TestNiosApi(unittest.TestCase): res = wapi.run('testobject', test_spec) self.assertTrue(res['changed']) - wapi.create_object.assert_called_once_with('testobject', {'name': self.module._check_type_dict().__getitem__()}) + wapi.create_object.assert_called_once_with('testobject', {'name': self.mock_check_type_dict_obj().__getitem__()}) def test_wapi_delete(self): self.module.params = {'provider': None, 'state': 'absent', 'name': 'ansible', @@ -240,7 +243,7 @@ class TestNiosApi(unittest.TestCase): kwargs = test_object[0].copy() ref = kwargs.pop('_ref') kwargs['comment'] = 'updated comment' - kwargs['name'] = self.module._check_type_dict().__getitem__() + kwargs['name'] = self.mock_check_type_dict_obj().__getitem__() del kwargs['network_view'] del kwargs['extattrs'] diff --git a/tests/unit/plugins/modules/net_tools/nios/test_nios_a_record.py b/tests/unit/plugins/modules/net_tools/nios/test_nios_a_record.py index 4e51b89467..a1f9097854 100644 --- a/tests/unit/plugins/modules/net_tools/nios/test_nios_a_record.py +++ b/tests/unit/plugins/modules/net_tools/nios/test_nios_a_record.py @@ -40,11 +40,14 @@ class TestNiosARecordModule(TestNiosModule): self.mock_wapi_run = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_a_record.WapiModule.run') self.mock_wapi_run.start() self.load_config = self.mock_wapi_run.start() + self.mock_check_type_dict = patch('ansible_collections.community.general.plugins.module_utils.net_tools.nios.api.check_type_dict') + self.mock_check_type_dict_obj = self.mock_check_type_dict.start() def tearDown(self): super(TestNiosARecordModule, self).tearDown() self.mock_wapi.stop() self.mock_wapi_run.stop() + self.mock_check_type_dict.stop() def _get_wapi(self, test_object): wapi = api.WapiModule(self.module) @@ -76,7 +79,7 @@ class TestNiosARecordModule(TestNiosModule): res = wapi.run('testobject', test_spec) self.assertTrue(res['changed']) - wapi.create_object.assert_called_once_with('testobject', {'name': self.module._check_type_dict().__getitem__(), + wapi.create_object.assert_called_once_with('testobject', {'name': self.mock_check_type_dict_obj().__getitem__(), 'ipv4': '192.168.10.1'}) def test_nios_a_record_update_comment(self): diff --git a/tests/unit/plugins/modules/net_tools/nios/test_nios_aaaa_record.py b/tests/unit/plugins/modules/net_tools/nios/test_nios_aaaa_record.py index 83f1984593..efc67a2273 100644 --- a/tests/unit/plugins/modules/net_tools/nios/test_nios_aaaa_record.py +++ b/tests/unit/plugins/modules/net_tools/nios/test_nios_aaaa_record.py @@ -40,11 +40,14 @@ class TestNiosAAAARecordModule(TestNiosModule): self.mock_wapi_run = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_aaaa_record.WapiModule.run') self.mock_wapi_run.start() self.load_config = self.mock_wapi_run.start() + self.mock_check_type_dict = patch('ansible_collections.community.general.plugins.module_utils.net_tools.nios.api.check_type_dict') + self.mock_check_type_dict_obj = self.mock_check_type_dict.start() def tearDown(self): super(TestNiosAAAARecordModule, self).tearDown() self.mock_wapi.stop() self.mock_wapi_run.stop() + self.mock_check_type_dict.stop() def _get_wapi(self, test_object): wapi = api.WapiModule(self.module) @@ -76,7 +79,7 @@ class TestNiosAAAARecordModule(TestNiosModule): res = wapi.run('testobject', test_spec) self.assertTrue(res['changed']) - wapi.create_object.assert_called_once_with('testobject', {'name': self.module._check_type_dict().__getitem__(), + wapi.create_object.assert_called_once_with('testobject', {'name': self.mock_check_type_dict_obj().__getitem__(), 'ipv6': '2001:0db8:85a3:0000:0000:8a2e:0370:7334'}) def test_nios_aaaa_record_update_comment(self): diff --git a/tests/unit/plugins/modules/net_tools/nios/test_nios_cname_record.py b/tests/unit/plugins/modules/net_tools/nios/test_nios_cname_record.py index 12f97243eb..b66520dd39 100644 --- a/tests/unit/plugins/modules/net_tools/nios/test_nios_cname_record.py +++ b/tests/unit/plugins/modules/net_tools/nios/test_nios_cname_record.py @@ -40,11 +40,14 @@ class TestNiosCNameRecordModule(TestNiosModule): self.mock_wapi_run = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_cname_record.WapiModule.run') self.mock_wapi_run.start() self.load_config = self.mock_wapi_run.start() + self.mock_check_type_dict = patch('ansible_collections.community.general.plugins.module_utils.net_tools.nios.api.check_type_dict') + self.mock_check_type_dict_obj = self.mock_check_type_dict.start() def tearDown(self): super(TestNiosCNameRecordModule, self).tearDown() self.mock_wapi.stop() self.mock_wapi_run.stop() + self.mock_check_type_dict.stop() def _get_wapi(self, test_object): wapi = api.WapiModule(self.module) @@ -76,7 +79,7 @@ class TestNiosCNameRecordModule(TestNiosModule): res = wapi.run('testobject', test_spec) self.assertTrue(res['changed']) - wapi.create_object.assert_called_once_with('testobject', {'name': self.module._check_type_dict().__getitem__(), + wapi.create_object.assert_called_once_with('testobject', {'name': self.mock_check_type_dict_obj().__getitem__(), 'canonical': 'realhost.ansible.com'}) def test_nios_a_record_update_comment(self): diff --git a/tests/unit/plugins/modules/net_tools/nios/test_nios_dns_view.py b/tests/unit/plugins/modules/net_tools/nios/test_nios_dns_view.py index 5d6fe90fd7..fa67d59440 100644 --- a/tests/unit/plugins/modules/net_tools/nios/test_nios_dns_view.py +++ b/tests/unit/plugins/modules/net_tools/nios/test_nios_dns_view.py @@ -40,11 +40,14 @@ class TestNiosDnsViewModule(TestNiosModule): self.mock_wapi_run = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_dns_view.WapiModule.run') self.mock_wapi_run.start() self.load_config = self.mock_wapi_run.start() + self.mock_check_type_dict = patch('ansible_collections.community.general.plugins.module_utils.net_tools.nios.api.check_type_dict') + self.mock_check_type_dict_obj = self.mock_check_type_dict.start() def tearDown(self): super(TestNiosDnsViewModule, self).tearDown() self.mock_wapi.stop() self.mock_wapi_run.stop() + self.mock_check_type_dict.stop() def _get_wapi(self, test_object): wapi = api.WapiModule(self.module) @@ -75,7 +78,7 @@ class TestNiosDnsViewModule(TestNiosModule): res = wapi.run('testobject', test_spec) self.assertTrue(res['changed']) - wapi.create_object.assert_called_once_with('testobject', {'name': self.module._check_type_dict().__getitem__()}) + wapi.create_object.assert_called_once_with('testobject', {'name': self.mock_check_type_dict_obj().__getitem__()}) def test_nios_dns_view_update_comment(self): self.module.params = {'provider': None, 'state': 'present', 'name': 'ansible-dns', diff --git a/tests/unit/plugins/modules/net_tools/nios/test_nios_host_record.py b/tests/unit/plugins/modules/net_tools/nios/test_nios_host_record.py index 0f7dc58aed..05f29348e3 100644 --- a/tests/unit/plugins/modules/net_tools/nios/test_nios_host_record.py +++ b/tests/unit/plugins/modules/net_tools/nios/test_nios_host_record.py @@ -41,10 +41,13 @@ class TestNiosHostRecordModule(TestNiosModule): self.mock_wapi_run.start() self.load_config = self.mock_wapi_run.start() + self.mock_check_type_dict = patch('ansible_collections.community.general.plugins.module_utils.net_tools.nios.api.check_type_dict') + self.mock_check_type_dict_obj = self.mock_check_type_dict.start() def tearDown(self): super(TestNiosHostRecordModule, self).tearDown() self.mock_wapi.stop() + self.mock_check_type_dict.stop() def _get_wapi(self, test_object): wapi = api.WapiModule(self.module) @@ -74,7 +77,7 @@ class TestNiosHostRecordModule(TestNiosModule): res = wapi.run('testobject', test_spec) self.assertTrue(res['changed']) - wapi.create_object.assert_called_once_with('testobject', {'name': self.module._check_type_dict().__getitem__()}) + wapi.create_object.assert_called_once_with('testobject', {'name': self.mock_check_type_dict_obj().__getitem__()}) def test_nios_host_record_remove(self): self.module.params = {'provider': None, 'state': 'absent', 'name': 'ansible', diff --git a/tests/unit/plugins/modules/net_tools/nios/test_nios_mx_record.py b/tests/unit/plugins/modules/net_tools/nios/test_nios_mx_record.py index 219e86bf5a..87f944ff4b 100644 --- a/tests/unit/plugins/modules/net_tools/nios/test_nios_mx_record.py +++ b/tests/unit/plugins/modules/net_tools/nios/test_nios_mx_record.py @@ -40,11 +40,14 @@ class TestNiosMXRecordModule(TestNiosModule): self.mock_wapi_run = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_mx_record.WapiModule.run') self.mock_wapi_run.start() self.load_config = self.mock_wapi_run.start() + self.mock_check_type_dict = patch('ansible_collections.community.general.plugins.module_utils.net_tools.nios.api.check_type_dict') + self.mock_check_type_dict_obj = self.mock_check_type_dict.start() def tearDown(self): super(TestNiosMXRecordModule, self).tearDown() self.mock_wapi.stop() self.mock_wapi_run.stop() + self.mock_check_type_dict.stop() def _get_wapi(self, test_object): wapi = api.WapiModule(self.module) @@ -77,7 +80,7 @@ class TestNiosMXRecordModule(TestNiosModule): res = wapi.run('testobject', test_spec) self.assertTrue(res['changed']) - wapi.create_object.assert_called_once_with('testobject', {'name': self.module._check_type_dict().__getitem__(), + wapi.create_object.assert_called_once_with('testobject', {'name': self.mock_check_type_dict_obj().__getitem__(), 'mx': 'mailhost.ansible.com', 'preference': 0}) def test_nios_mx_record_update_comment(self): diff --git a/tests/unit/plugins/modules/net_tools/nios/test_nios_naptr_record.py b/tests/unit/plugins/modules/net_tools/nios/test_nios_naptr_record.py index 510df69bbd..de2a6df5e5 100644 --- a/tests/unit/plugins/modules/net_tools/nios/test_nios_naptr_record.py +++ b/tests/unit/plugins/modules/net_tools/nios/test_nios_naptr_record.py @@ -40,11 +40,14 @@ class TestNiosNAPTRRecordModule(TestNiosModule): self.mock_wapi_run = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_naptr_record.WapiModule.run') self.mock_wapi_run.start() self.load_config = self.mock_wapi_run.start() + self.mock_check_type_dict = patch('ansible_collections.community.general.plugins.module_utils.net_tools.nios.api.check_type_dict') + self.mock_check_type_dict_obj = self.mock_check_type_dict.start() def tearDown(self): super(TestNiosNAPTRRecordModule, self).tearDown() self.mock_wapi.stop() self.mock_wapi_run.stop() + self.mock_check_type_dict.stop() def _get_wapi(self, test_object): wapi = api.WapiModule(self.module) @@ -79,7 +82,7 @@ class TestNiosNAPTRRecordModule(TestNiosModule): res = wapi.run('testobject', test_spec) self.assertTrue(res['changed']) - wapi.create_object.assert_called_once_with('testobject', {'name': self.module._check_type_dict().__getitem__(), + wapi.create_object.assert_called_once_with('testobject', {'name': self.mock_check_type_dict_obj().__getitem__(), 'order': '1000', 'preference': '10', 'replacement': 'replacement1.network.ansiblezone.com'}) diff --git a/tests/unit/plugins/modules/net_tools/nios/test_nios_network_view.py b/tests/unit/plugins/modules/net_tools/nios/test_nios_network_view.py index 9c38951b93..dc9b166bc7 100644 --- a/tests/unit/plugins/modules/net_tools/nios/test_nios_network_view.py +++ b/tests/unit/plugins/modules/net_tools/nios/test_nios_network_view.py @@ -40,11 +40,14 @@ class TestNiosNetworkViewModule(TestNiosModule): self.mock_wapi_run = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_network_view.WapiModule.run') self.mock_wapi_run.start() self.load_config = self.mock_wapi_run.start() + self.mock_check_type_dict = patch('ansible_collections.community.general.plugins.module_utils.net_tools.nios.api.check_type_dict') + self.mock_check_type_dict_obj = self.mock_check_type_dict.start() def tearDown(self): super(TestNiosNetworkViewModule, self).tearDown() self.mock_wapi.stop() self.mock_wapi_run.stop() + self.mock_check_type_dict.stop() def _get_wapi(self, test_object): wapi = api.WapiModule(self.module) @@ -75,7 +78,7 @@ class TestNiosNetworkViewModule(TestNiosModule): res = wapi.run('testobject', test_spec) self.assertTrue(res['changed']) - wapi.create_object.assert_called_once_with('testobject', {'name': self.module._check_type_dict().__getitem__()}) + wapi.create_object.assert_called_once_with('testobject', {'name': self.mock_check_type_dict_obj().__getitem__()}) def test_nios_network_view_update_comment(self): self.module.params = {'provider': None, 'state': 'present', 'name': 'default', diff --git a/tests/unit/plugins/modules/net_tools/nios/test_nios_nsgroup.py b/tests/unit/plugins/modules/net_tools/nios/test_nios_nsgroup.py index 63f59bff29..6320f970c2 100644 --- a/tests/unit/plugins/modules/net_tools/nios/test_nios_nsgroup.py +++ b/tests/unit/plugins/modules/net_tools/nios/test_nios_nsgroup.py @@ -42,9 +42,13 @@ class TestNiosNSGroupModule(TestNiosModule): self.load_config = self.mock_wapi_run.start() + self.mock_check_type_dict = patch('ansible_collections.community.general.plugins.module_utils.net_tools.nios.api.check_type_dict') + self.mock_check_type_dict_obj = self.mock_check_type_dict.start() + def tearDown(self): super(TestNiosNSGroupModule, self).tearDown() self.mock_wapi.stop() + self.mock_check_type_dict.stop() def _get_wapi(self, test_object): wapi = api.WapiModule(self.module) @@ -73,7 +77,7 @@ class TestNiosNSGroupModule(TestNiosModule): res = wapi.run('testobject', test_spec) self.assertTrue(res['changed']) - wapi.create_object.assert_called_once_with('testobject', {'name': self.module._check_type_dict().__getitem__()}) + wapi.create_object.assert_called_once_with('testobject', {'name': self.mock_check_type_dict_obj().__getitem__()}) def test_nios_nsgroup_remove(self): self.module.params = {'provider': None, 'state': 'absent', 'name': 'my-simple-group', diff --git a/tests/unit/plugins/modules/net_tools/nios/test_nios_srv_record.py b/tests/unit/plugins/modules/net_tools/nios/test_nios_srv_record.py index 39024657c7..48079d3a78 100644 --- a/tests/unit/plugins/modules/net_tools/nios/test_nios_srv_record.py +++ b/tests/unit/plugins/modules/net_tools/nios/test_nios_srv_record.py @@ -40,11 +40,14 @@ class TestNiosSRVRecordModule(TestNiosModule): self.mock_wapi_run = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_srv_record.WapiModule.run') self.mock_wapi_run.start() self.load_config = self.mock_wapi_run.start() + self.mock_check_type_dict = patch('ansible_collections.community.general.plugins.module_utils.net_tools.nios.api.check_type_dict') + self.mock_check_type_dict_obj = self.mock_check_type_dict.start() def tearDown(self): super(TestNiosSRVRecordModule, self).tearDown() self.mock_wapi.stop() self.mock_wapi_run.stop() + self.mock_check_type_dict.stop() def _get_wapi(self, test_object): wapi = api.WapiModule(self.module) @@ -80,7 +83,7 @@ class TestNiosSRVRecordModule(TestNiosModule): res = wapi.run('testobject', test_spec) self.assertTrue(res['changed']) - wapi.create_object.assert_called_once_with('testobject', {'name': self.module._check_type_dict().__getitem__(), + wapi.create_object.assert_called_once_with('testobject', {'name': self.mock_check_type_dict_obj().__getitem__(), 'port': 5080, 'target': 'service1.ansible.com', 'priority': 10, 'weight': 10}) def test_nios_srv_record_update_comment(self): From f5a9584ae64e6c1d3f6b9edb03214ca75ab63f61 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 21 Mar 2021 21:51:07 +1300 Subject: [PATCH 0120/3093] archive - created an integration test that archives broken links (#2063) * created an integration test that archives broken links * sanity fix --- .../targets/archive/tasks/broken-link.yml | 22 +++++++++++++++++++ .../targets/archive/tasks/main.yml | 3 +++ 2 files changed, 25 insertions(+) create mode 100644 tests/integration/targets/archive/tasks/broken-link.yml diff --git a/tests/integration/targets/archive/tasks/broken-link.yml b/tests/integration/targets/archive/tasks/broken-link.yml new file mode 100644 index 0000000000..b1e0fb752b --- /dev/null +++ b/tests/integration/targets/archive/tasks/broken-link.yml @@ -0,0 +1,22 @@ +--- +- name: Create broken link + file: + src: /nowhere + dest: "{{ output_dir }}/nowhere.txt" + state: link + force: yes + +- name: Archive broken link (tar.gz) + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/archive_broken_link.tar.gz" + +- name: Archive broken link (tar.bz2) + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/archive_broken_link.tar.bz2" + +- name: Archive broken link (zip) + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/archive_broken_link.zip" diff --git a/tests/integration/targets/archive/tasks/main.yml b/tests/integration/targets/archive/tasks/main.yml index 51504e6bf8..19a1f6af0c 100644 --- a/tests/integration/targets/archive/tasks/main.yml +++ b/tests/integration/targets/archive/tasks/main.yml @@ -369,3 +369,6 @@ - name: import remove tests import_tasks: remove.yml + +- name: import broken-link tests + import_tasks: broken-link.yml From ff9f98795e1ec7fc5579e31135107239420dac3d Mon Sep 17 00:00:00 2001 From: quasd Date: Sun, 21 Mar 2021 12:09:49 +0200 Subject: [PATCH 0121/3093] ipa_sudorule add support for setting runasextusers (#2031) * Add support for setting runasextusers * fix formatting * add changelog fragment * Update plugins/modules/identity/ipa/ipa_sudorule.py Co-authored-by: Felix Fontein * Update changelogs/fragments/2031-ipa_sudorule_add_runasextusers.yml Co-authored-by: Felix Fontein Co-authored-by: quasd Co-authored-by: Felix Fontein --- .../2031-ipa_sudorule_add_runasextusers.yml | 3 ++ plugins/modules/identity/ipa/ipa_sudorule.py | 38 +++++++++++++++++-- 2 files changed, 37 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/2031-ipa_sudorule_add_runasextusers.yml diff --git a/changelogs/fragments/2031-ipa_sudorule_add_runasextusers.yml b/changelogs/fragments/2031-ipa_sudorule_add_runasextusers.yml new file mode 100644 index 0000000000..9e70a16d80 --- /dev/null +++ b/changelogs/fragments/2031-ipa_sudorule_add_runasextusers.yml @@ -0,0 +1,3 @@ +--- +minor_changes: +- ipa_sudorule - add support for setting sudo runasuser (https://github.com/ansible-collections/community.general/pull/2031). diff --git a/plugins/modules/identity/ipa/ipa_sudorule.py b/plugins/modules/identity/ipa/ipa_sudorule.py index 35c3327841..15abef8f17 100644 --- a/plugins/modules/identity/ipa/ipa_sudorule.py +++ b/plugins/modules/identity/ipa/ipa_sudorule.py @@ -68,6 +68,12 @@ options: - Option C(hostcategory) must be omitted to assign host groups. type: list elements: str + runasextusers: + description: + - List of external RunAs users + type: list + elements: str + version_added: 2.3.0 runasusercategory: description: - RunAs User category the rule applies to. @@ -143,13 +149,15 @@ EXAMPLES = r''' ipa_user: admin ipa_pass: topsecret -- name: Ensure user group operations can run any commands that is part of operations-cmdgroup on any host. +- name: Ensure user group operations can run any commands that is part of operations-cmdgroup on any host as user root. community.general.ipa_sudorule: name: sudo_operations_all - description: Allow operators to run any commands that is part of operations-cmdgroup on any host. + description: Allow operators to run any commands that is part of operations-cmdgroup on any host as user root. cmdgroup: - operations-cmdgroup hostcategory: all + runasextusers: + - root sudoopt: - '!authenticate' usergroup: @@ -183,6 +191,12 @@ class SudoRuleIPAClient(IPAClient): def sudorule_add(self, name, item): return self._post_json(method='sudorule_add', name=name, item=item) + def sudorule_add_runasuser(self, name, item): + return self._post_json(method='sudorule_add_runasuser', name=name, item={'user': item}) + + def sudorule_remove_runasuser(self, name, item): + return self._post_json(method='sudorule_remove_runasuser', name=name, item={'user': item}) + def sudorule_mod(self, name, item): return self._post_json(method='sudorule_mod', name=name, item=item) @@ -287,6 +301,7 @@ def ensure(module, client): hostgroup = module.params['hostgroup'] runasusercategory = module.params['runasusercategory'] runasgroupcategory = module.params['runasgroupcategory'] + runasextusers = module.params['runasextusers'] if state in ['present', 'enabled']: ipaenabledflag = 'TRUE' @@ -371,6 +386,21 @@ def ensure(module, client): for item in diff: client.sudorule_add_option_ipasudoopt(name, item) + if runasextusers is not None: + ipa_sudorule_run_as_user = ipa_sudorule.get('ipasudorunasextuser', []) + diff = list(set(ipa_sudorule_run_as_user) - set(runasextusers)) + if len(diff) > 0: + changed = True + if not module.check_mode: + for item in diff: + client.sudorule_remove_runasuser(name=name, item=item) + diff = list(set(runasextusers) - set(ipa_sudorule_run_as_user)) + if len(diff) > 0: + changed = True + if not module.check_mode: + for item in diff: + client.sudorule_add_runasuser(name=name, item=item) + if user is not None: changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed changed = client.modify_if_diff(name, ipa_sudorule.get('memberuser_user', []), user, @@ -406,8 +436,8 @@ def main(): state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), user=dict(type='list', elements='str'), usercategory=dict(type='str', choices=['all']), - usergroup=dict(type='list', elements='str')) - + usergroup=dict(type='list', elements='str'), + runasextusers=dict(type='list', elements='str')) module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=[['cmdcategory', 'cmd'], ['cmdcategory', 'cmdgroup'], From 606eb0df156496196d907934c7d974eec207321d Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 21 Mar 2021 23:12:41 +1300 Subject: [PATCH 0122/3093] archive - a first refactoring (#2061) * a first refactoring on archive * added changelog fragment * suggestion from PR --- .../fragments/2061-archive-refactor1.yml | 2 + plugins/modules/files/archive.py | 115 +++++++++--------- 2 files changed, 58 insertions(+), 59 deletions(-) create mode 100644 changelogs/fragments/2061-archive-refactor1.yml diff --git a/changelogs/fragments/2061-archive-refactor1.yml b/changelogs/fragments/2061-archive-refactor1.yml new file mode 100644 index 0000000000..a7189a2f59 --- /dev/null +++ b/changelogs/fragments/2061-archive-refactor1.yml @@ -0,0 +1,2 @@ +minor_changes: + - archive - refactored some reused code out into a couple of functions (https://github.com/ansible-collections/community.general/pull/2061). diff --git a/plugins/modules/files/archive.py b/plugins/modules/files/archive.py index e63aa8cef9..98bcbf3605 100644 --- a/plugins/modules/files/archive.py +++ b/plugins/modules/files/archive.py @@ -153,7 +153,6 @@ expanded_exclude_paths: ''' import bz2 -import filecmp import glob import gzip import io @@ -186,6 +185,33 @@ else: HAS_LZMA = False +def to_b(s): + return to_bytes(s, errors='surrogate_or_strict') + + +def to_n(s): + return to_native(s, errors='surrogate_or_strict') + + +def to_na(s): + return to_native(s, errors='surrogate_or_strict', encoding='ascii') + + +def expand_paths(paths): + expanded_path = [] + is_globby = False + for path in paths: + b_path = to_b(path) + if b'*' in b_path or b'?' in b_path: + e_paths = glob.glob(b_path) + is_globby = True + + else: + e_paths = [b_path] + expanded_path.extend(e_paths) + return expanded_path, is_globby + + def main(): module = AnsibleModule( argument_spec=dict( @@ -204,21 +230,17 @@ def main(): check_mode = module.check_mode paths = params['path'] dest = params['dest'] - b_dest = None if not dest else to_bytes(dest, errors='surrogate_or_strict') + b_dest = None if not dest else to_b(dest) exclude_paths = params['exclude_path'] remove = params['remove'] - b_expanded_paths = [] - b_expanded_exclude_paths = [] fmt = params['format'] - b_fmt = to_bytes(fmt, errors='surrogate_or_strict') + b_fmt = to_b(fmt) force_archive = params['force_archive'] - globby = False changed = False state = 'absent' # Simple or archive file compression (inapplicable with 'zip' since it's always an archive) - archive = False b_successes = [] # Fail early @@ -227,35 +249,7 @@ def main(): exception=LZMA_IMP_ERR) module.fail_json(msg="lzma or backports.lzma is required when using xz format.") - for path in paths: - b_path = to_bytes(path, errors='surrogate_or_strict') - - # Expand any glob characters. If found, add the expanded glob to the - # list of expanded_paths, which might be empty. - if (b'*' in b_path or b'?' in b_path): - b_expanded_paths.extend(glob.glob(b_path)) - globby = True - - # If there are no glob characters the path is added to the expanded paths - # whether the path exists or not - else: - b_expanded_paths.append(b_path) - - # Only attempt to expand the exclude paths if it exists - if exclude_paths: - for exclude_path in exclude_paths: - b_exclude_path = to_bytes(exclude_path, errors='surrogate_or_strict') - - # Expand any glob characters. If found, add the expanded glob to the - # list of expanded_paths, which might be empty. - if (b'*' in b_exclude_path or b'?' in b_exclude_path): - b_expanded_exclude_paths.extend(glob.glob(b_exclude_path)) - - # If there are no glob character the exclude path is added to the expanded - # exclude paths whether the path exists or not. - else: - b_expanded_exclude_paths.append(b_exclude_path) - + b_expanded_paths, globby = expand_paths(paths) if not b_expanded_paths: return module.fail_json( path=', '.join(paths), @@ -263,6 +257,9 @@ def main(): msg='Error, no source paths were found' ) + # Only attempt to expand the exclude paths if it exists + b_expanded_exclude_paths = expand_paths(exclude_paths)[0] if exclude_paths else [] + # Only try to determine if we are working with an archive or not if we haven't set archive to true if not force_archive: # If we actually matched multiple files or TRIED to, then @@ -280,7 +277,7 @@ def main(): if archive and not b_dest: module.fail_json(dest=dest, path=', '.join(paths), msg='Error, must specify "dest" when archiving multiple files or trees') - b_sep = to_bytes(os.sep, errors='surrogate_or_strict') + b_sep = to_b(os.sep) b_archive_paths = [] b_missing = [] @@ -321,7 +318,7 @@ def main(): # No source files were found but the named archive exists: are we 'compress' or 'archive' now? if len(b_missing) == len(b_expanded_paths) and b_dest and os.path.exists(b_dest): # Just check the filename to know if it's an archive or simple compressed file - if re.search(br'(\.tar|\.tar\.gz|\.tgz|\.tbz2|\.tar\.bz2|\.tar\.xz|\.zip)$', os.path.basename(b_dest), re.IGNORECASE): + if re.search(br'\.(tar|tar\.(gz|bz2|xz)|tgz|tbz2|zip)$', os.path.basename(b_dest), re.IGNORECASE): state = 'archive' else: state = 'compress' @@ -352,7 +349,7 @@ def main(): # Slightly more difficult (and less efficient!) compression using zipfile module if fmt == 'zip': arcfile = zipfile.ZipFile( - to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'), + to_na(b_dest), 'w', zipfile.ZIP_DEFLATED, True @@ -360,7 +357,7 @@ def main(): # Easier compression using tarfile module elif fmt == 'gz' or fmt == 'bz2': - arcfile = tarfile.open(to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'), 'w|' + fmt) + arcfile = tarfile.open(to_na(b_dest), 'w|' + fmt) # python3 tarfile module allows xz format but for python2 we have to create the tarfile # in memory and then compress it with lzma. @@ -370,7 +367,7 @@ def main(): # Or plain tar archiving elif fmt == 'tar': - arcfile = tarfile.open(to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'), 'w') + arcfile = tarfile.open(to_na(b_dest), 'w') b_match_root = re.compile(br'^%s' % re.escape(b_arcroot)) for b_path in b_archive_paths: @@ -382,7 +379,7 @@ def main(): for b_dirname in b_dirnames: b_fullpath = b_dirpath + b_dirname - n_fullpath = to_native(b_fullpath, errors='surrogate_or_strict', encoding='ascii') + n_fullpath = to_na(b_fullpath) n_arcname = to_native(b_match_root.sub(b'', b_fullpath), errors='surrogate_or_strict') try: @@ -396,8 +393,8 @@ def main(): for b_filename in b_filenames: b_fullpath = b_dirpath + b_filename - n_fullpath = to_native(b_fullpath, errors='surrogate_or_strict', encoding='ascii') - n_arcname = to_native(b_match_root.sub(b'', b_fullpath), errors='surrogate_or_strict') + n_fullpath = to_na(b_fullpath) + n_arcname = to_n(b_match_root.sub(b'', b_fullpath)) try: if fmt == 'zip': @@ -409,8 +406,8 @@ def main(): except Exception as e: errors.append('Adding %s: %s' % (to_native(b_path), to_native(e))) else: - path = to_native(b_path, errors='surrogate_or_strict', encoding='ascii') - arcname = to_native(b_match_root.sub(b'', b_path), errors='surrogate_or_strict') + path = to_na(b_path) + arcname = to_n(b_match_root.sub(b'', b_path)) if fmt == 'zip': arcfile.write(path, arcname) else: @@ -444,14 +441,14 @@ def main(): shutil.rmtree(b_path) elif not check_mode: os.remove(b_path) - except OSError as e: + except OSError: errors.append(to_native(b_path)) for b_path in b_expanded_paths: try: if os.path.isdir(b_path): shutil.rmtree(b_path) - except OSError as e: + except OSError: errors.append(to_native(b_path)) if errors: @@ -490,25 +487,25 @@ def main(): try: if fmt == 'zip': arcfile = zipfile.ZipFile( - to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'), + to_na(b_dest), 'w', zipfile.ZIP_DEFLATED, True ) arcfile.write( - to_native(b_path, errors='surrogate_or_strict', encoding='ascii'), - to_native(b_path[len(b_arcroot):], errors='surrogate_or_strict') + to_na(b_path), + to_n(b_path[len(b_arcroot):]) ) arcfile.close() state = 'archive' # because all zip files are archives elif fmt == 'tar': - arcfile = tarfile.open(to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'), 'w') - arcfile.add(to_native(b_path, errors='surrogate_or_strict', encoding='ascii')) + arcfile = tarfile.open(to_na(b_dest), 'w') + arcfile.add(to_na(b_path)) arcfile.close() else: f_in = open(b_path, 'rb') - n_dest = to_native(b_dest, errors='surrogate_or_strict', encoding='ascii') + n_dest = to_na(b_dest) if fmt == 'gz': f_out = gzip.open(n_dest, 'wb') elif fmt == 'bz2': @@ -564,14 +561,14 @@ def main(): changed = module.set_fs_attributes_if_different(file_args, changed) module.exit_json( - archived=[to_native(p, errors='surrogate_or_strict') for p in b_successes], + archived=[to_n(p) for p in b_successes], dest=dest, changed=changed, state=state, - arcroot=to_native(b_arcroot, errors='surrogate_or_strict'), - missing=[to_native(p, errors='surrogate_or_strict') for p in b_missing], - expanded_paths=[to_native(p, errors='surrogate_or_strict') for p in b_expanded_paths], - expanded_exclude_paths=[to_native(p, errors='surrogate_or_strict') for p in b_expanded_exclude_paths], + arcroot=to_n(b_arcroot), + missing=[to_n(p) for p in b_missing], + expanded_paths=[to_n(p) for p in b_expanded_paths], + expanded_exclude_paths=[to_n(p) for p in b_expanded_exclude_paths], ) From 81f3ad45c90611299d8ca350fd2b10b695630976 Mon Sep 17 00:00:00 2001 From: Gregory Thiemonge <44313235+gthiemonge@users.noreply.github.com> Date: Sun, 21 Mar 2021 11:25:24 +0100 Subject: [PATCH 0123/3093] Add gandi_livedns module (#328) * Add gandi_livedns module This module uses REST API to register, update and delete domain name entries in Gandi DNS service (https://www.gandi.net/en/domain). * Apply suggestions from code review * Update plugins/module_utils/gandi_livedns_api.py Co-authored-by: Gregory Thiemonge Co-authored-by: Felix Fontein --- plugins/module_utils/gandi_livedns_api.py | 234 ++++++++++++++++++ plugins/modules/gandi_livedns.py | 1 + plugins/modules/net_tools/gandi_livedns.py | 187 ++++++++++++++ .../integration/targets/gandi_livedns/aliases | 2 + .../targets/gandi_livedns/defaults/main.yml | 34 +++ .../gandi_livedns/tasks/create_record.yml | 67 +++++ .../targets/gandi_livedns/tasks/main.yml | 5 + .../targets/gandi_livedns/tasks/record.yml | 6 + .../gandi_livedns/tasks/remove_record.yml | 59 +++++ .../gandi_livedns/tasks/update_record.yml | 57 +++++ 10 files changed, 652 insertions(+) create mode 100644 plugins/module_utils/gandi_livedns_api.py create mode 120000 plugins/modules/gandi_livedns.py create mode 100644 plugins/modules/net_tools/gandi_livedns.py create mode 100644 tests/integration/targets/gandi_livedns/aliases create mode 100644 tests/integration/targets/gandi_livedns/defaults/main.yml create mode 100644 tests/integration/targets/gandi_livedns/tasks/create_record.yml create mode 100644 tests/integration/targets/gandi_livedns/tasks/main.yml create mode 100644 tests/integration/targets/gandi_livedns/tasks/record.yml create mode 100644 tests/integration/targets/gandi_livedns/tasks/remove_record.yml create mode 100644 tests/integration/targets/gandi_livedns/tasks/update_record.yml diff --git a/plugins/module_utils/gandi_livedns_api.py b/plugins/module_utils/gandi_livedns_api.py new file mode 100644 index 0000000000..60e0761d26 --- /dev/null +++ b/plugins/module_utils/gandi_livedns_api.py @@ -0,0 +1,234 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2019 Gregory Thiemonge +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import json + +from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.urls import fetch_url + + +class GandiLiveDNSAPI(object): + + api_endpoint = 'https://api.gandi.net/v5/livedns' + changed = False + + error_strings = { + 400: 'Bad request', + 401: 'Permission denied', + 404: 'Resource not found', + } + + attribute_map = { + 'record': 'rrset_name', + 'type': 'rrset_type', + 'ttl': 'rrset_ttl', + 'values': 'rrset_values' + } + + def __init__(self, module): + self.module = module + self.api_key = module.params['api_key'] + + def _build_error_message(self, module, info): + s = '' + body = info.get('body') + if body: + errors = module.from_json(body).get('errors') + if errors: + error = errors[0] + name = error.get('name') + if name: + s += '{0} :'.format(name) + description = error.get('description') + if description: + s += description + return s + + def _gandi_api_call(self, api_call, method='GET', payload=None, error_on_404=True): + headers = {'Authorization': 'Apikey {0}'.format(self.api_key), + 'Content-Type': 'application/json'} + data = None + if payload: + try: + data = json.dumps(payload) + except Exception as e: + self.module.fail_json(msg="Failed to encode payload as JSON: %s " % to_native(e)) + + resp, info = fetch_url(self.module, + self.api_endpoint + api_call, + headers=headers, + data=data, + method=method) + + error_msg = '' + if info['status'] >= 400 and (info['status'] != 404 or error_on_404): + err_s = self.error_strings.get(info['status'], '') + + error_msg = "API Error {0}: {1}".format(err_s, self._build_error_message(self.module, info)) + + result = None + try: + content = resp.read() + except AttributeError: + content = None + + if content: + try: + result = json.loads(to_text(content, errors='surrogate_or_strict')) + except (getattr(json, 'JSONDecodeError', ValueError)) as e: + error_msg += "; Failed to parse API response with error {0}: {1}".format(to_native(e), content) + + if error_msg: + self.module.fail_json(msg=error_msg) + + return result, info['status'] + + def build_result(self, result, domain): + if result is None: + return None + + res = {} + for k in self.attribute_map: + v = result.get(self.attribute_map[k], None) + if v is not None: + if k == 'record' and v == '@': + v = '' + res[k] = v + + res['domain'] = domain + + return res + + def build_results(self, results, domain): + if results is None: + return [] + return [self.build_result(r, domain) for r in results] + + def get_records(self, record, type, domain): + url = '/domains/%s/records' % (domain) + if record: + url += '/%s' % (record) + if type: + url += '/%s' % (type) + + records, status = self._gandi_api_call(url, error_on_404=False) + + if status == 404: + return [] + + if not isinstance(records, list): + records = [records] + + # filter by type if record is not set + if not record and type: + records = [r + for r in records + if r['rrset_type'] == type] + + return records + + def create_record(self, record, type, values, ttl, domain): + url = '/domains/%s/records' % (domain) + new_record = { + 'rrset_name': record, + 'rrset_type': type, + 'rrset_values': values, + 'rrset_ttl': ttl, + } + record, status = self._gandi_api_call(url, method='POST', payload=new_record) + + if status in (200, 201,): + return new_record + + return None + + def update_record(self, record, type, values, ttl, domain): + url = '/domains/%s/records/%s/%s' % (domain, record, type) + new_record = { + 'rrset_values': values, + 'rrset_ttl': ttl, + } + record = self._gandi_api_call(url, method='PUT', payload=new_record)[0] + return record + + def delete_record(self, record, type, domain): + url = '/domains/%s/records/%s/%s' % (domain, record, type) + + self._gandi_api_call(url, method='DELETE') + + def delete_dns_record(self, record, type, values, domain): + if record == '': + record = '@' + + records = self.get_records(record, type, domain) + + if records: + cur_record = records[0] + + self.changed = True + + if values is not None and set(cur_record['rrset_values']) != set(values): + new_values = set(cur_record['rrset_values']) - set(values) + if new_values: + # Removing one or more values from a record, we update the record with the remaining values + self.update_record(record, type, list(new_values), cur_record['rrset_ttl'], domain) + records = self.get_records(record, type, domain) + return records[0], self.changed + + if not self.module.check_mode: + self.delete_record(record, type, domain) + else: + cur_record = None + + return None, self.changed + + def ensure_dns_record(self, record, type, ttl, values, domain): + if record == '': + record = '@' + + records = self.get_records(record, type, domain) + + if records: + cur_record = records[0] + + do_update = False + if ttl is not None and cur_record['rrset_ttl'] != ttl: + do_update = True + if values is not None and set(cur_record['rrset_values']) != set(values): + do_update = True + + if do_update: + if self.module.check_mode: + result = dict( + rrset_type=type, + rrset_name=record, + rrset_values=values, + rrset_ttl=ttl + ) + else: + self.update_record(record, type, values, ttl, domain) + + records = self.get_records(record, type, domain) + result = records[0] + self.changed = True + return result, self.changed + else: + return cur_record, self.changed + + if self.module.check_mode: + new_record = dict( + rrset_type=type, + rrset_name=record, + rrset_values=values, + rrset_ttl=ttl + ) + result = new_record + else: + result = self.create_record(record, type, values, ttl, domain) + + self.changed = True + return result, self.changed diff --git a/plugins/modules/gandi_livedns.py b/plugins/modules/gandi_livedns.py new file mode 120000 index 0000000000..6a8a82fab7 --- /dev/null +++ b/plugins/modules/gandi_livedns.py @@ -0,0 +1 @@ +net_tools/gandi_livedns.py \ No newline at end of file diff --git a/plugins/modules/net_tools/gandi_livedns.py b/plugins/modules/net_tools/gandi_livedns.py new file mode 100644 index 0000000000..6124288511 --- /dev/null +++ b/plugins/modules/net_tools/gandi_livedns.py @@ -0,0 +1,187 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019 Gregory Thiemonge +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: gandi_livedns +author: +- Gregory Thiemonge (@gthiemonge) +version_added: "2.3.0" +short_description: Manage Gandi LiveDNS records +description: +- "Manages DNS records by the Gandi LiveDNS API, see the docs: U(https://doc.livedns.gandi.net/)." +options: + api_key: + description: + - Account API token. + type: str + required: true + record: + description: + - Record to add. + type: str + required: true + state: + description: + - Whether the record(s) should exist or not. + type: str + choices: [ absent, present ] + default: present + ttl: + description: + - The TTL to give the new record. + - Required when I(state=present). + type: int + type: + description: + - The type of DNS record to create. + type: str + required: true + values: + description: + - The record values. + - Required when I(state=present). + type: list + elements: str + domain: + description: + - The name of the Domain to work with (for example, "example.com"). + required: true + type: str +notes: +- Supports C(check_mode). +''' + +EXAMPLES = r''' +- name: Create a test A record to point to 127.0.0.1 in the my.com domain + community.general.gandi_livedns: + domain: my.com + record: test + type: A + values: + - 127.0.0.1 + ttl: 7200 + api_key: dummyapitoken + register: record + +- name: Create a mail CNAME record to www.my.com domain + community.general.gandi_livedns: + domain: my.com + type: CNAME + record: mail + values: + - www + ttl: 7200 + api_key: dummyapitoken + state: present + +- name: Change its TTL + community.general.gandi_livedns: + domain: my.com + type: CNAME + record: mail + values: + - www + ttl: 10800 + api_key: dummyapitoken + state: present + +- name: Delete the record + community.general.gandi_livedns: + domain: my.com + type: CNAME + record: mail + api_key: dummyapitoken + state: absent +''' + +RETURN = r''' +record: + description: A dictionary containing the record data. + returned: success, except on record deletion + type: dict + contains: + values: + description: The record content (details depend on record type). + returned: success + type: list + elements: str + sample: + - 192.0.2.91 + - 192.0.2.92 + record: + description: The record name. + returned: success + type: str + sample: www + ttl: + description: The time-to-live for the record. + returned: success + type: int + sample: 300 + type: + description: The record type. + returned: success + type: str + sample: A + domain: + description: The domain associated with the record. + returned: success + type: str + sample: my.com +''' + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.gandi_livedns_api import GandiLiveDNSAPI + + +def main(): + module = AnsibleModule( + argument_spec=dict( + api_key=dict(type='str', required=True, no_log=True), + record=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + ttl=dict(type='int'), + type=dict(type='str', required=True), + values=dict(type='list', elements='str'), + domain=dict(type='str', required=True), + ), + supports_check_mode=True, + required_if=[ + ('state', 'present', ['values', 'ttl']), + ], + ) + + gandi_api = GandiLiveDNSAPI(module) + + if module.params['state'] == 'present': + ret, changed = gandi_api.ensure_dns_record(module.params['record'], + module.params['type'], + module.params['ttl'], + module.params['values'], + module.params['domain']) + else: + ret, changed = gandi_api.delete_dns_record(module.params['record'], + module.params['type'], + module.params['values'], + module.params['domain']) + + result = dict( + changed=changed, + ) + if ret: + result['record'] = gandi_api.build_result(ret, + module.params['domain']) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/gandi_livedns/aliases b/tests/integration/targets/gandi_livedns/aliases new file mode 100644 index 0000000000..3ff69ca3a0 --- /dev/null +++ b/tests/integration/targets/gandi_livedns/aliases @@ -0,0 +1,2 @@ +cloud/gandi +unsupported diff --git a/tests/integration/targets/gandi_livedns/defaults/main.yml b/tests/integration/targets/gandi_livedns/defaults/main.yml new file mode 100644 index 0000000000..d27842ae0b --- /dev/null +++ b/tests/integration/targets/gandi_livedns/defaults/main.yml @@ -0,0 +1,34 @@ +# Copyright: (c) 2020 Gregory Thiemonge +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +--- +gandi_livedns_domain_name: "ansible-tests.org" +gandi_livedns_record_items: +# Single A record +- record: test-www + type: A + values: + - 10.10.10.10 + ttl: 400 + update_values: + - 10.10.10.11 + update_ttl: 800 + +# Multiple A records +- record: test-www-multiple + type: A + ttl: 3600 + values: + - 10.10.11.10 + - 10.10.11.10 + update_values: + - 10.10.11.11 + - 10.10.11.13 + +# CNAME +- record: test-cname + type: CNAME + ttl: 10800 + values: + - test-www2 + update_values: + - test-www diff --git a/tests/integration/targets/gandi_livedns/tasks/create_record.yml b/tests/integration/targets/gandi_livedns/tasks/create_record.yml new file mode 100644 index 0000000000..bfaff81393 --- /dev/null +++ b/tests/integration/targets/gandi_livedns/tasks/create_record.yml @@ -0,0 +1,67 @@ +# Copyright: (c) 2020 Gregory Thiemonge +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +--- +- name: test absent dns record + community.general.gandi_livedns: + api_key: "{{ gandi_api_key }}" + record: "{{ item.record }}" + domain: "{{ gandi_livedns_domain_name }}" + type: "{{ item.type }}" + ttl: "{{ item.ttl }}" + state: absent + register: result +- name: verify test absent dns record + assert: + that: + - result is successful + +- name: test create a dns record in check mode + community.general.gandi_livedns: + api_key: "{{ gandi_api_key }}" + record: "{{ item.record }}" + domain: "{{ gandi_livedns_domain_name }}" + values: "{{ item['values'] }}" + ttl: "{{ item.ttl }}" + type: "{{ item.type }}" + check_mode: yes + register: result +- name: verify test create a dns record in check mode + assert: + that: + - result is changed + +- name: test create a dns record + community.general.gandi_livedns: + api_key: "{{ gandi_api_key }}" + record: "{{ item.record }}" + domain: "{{ gandi_livedns_domain_name }}" + values: "{{ item['values'] }}" + ttl: "{{ item.ttl }}" + type: "{{ item.type }}" + register: result +- name: verify test create a dns record + assert: + that: + - result is changed + - result.record['values'] == {{ item['values'] }} + - result.record.record == "{{ item.record }}" + - result.record.type == "{{ item.type }}" + - result.record.ttl == {{ item.ttl }} + +- name: test create a dns record idempotence + community.general.gandi_livedns: + api_key: "{{ gandi_api_key }}" + record: "{{ item.record }}" + domain: "{{ gandi_livedns_domain_name }}" + values: "{{ item['values'] }}" + ttl: "{{ item.ttl }}" + type: "{{ item.type }}" + register: result +- name: verify test create a dns record idempotence + assert: + that: + - result is not changed + - result.record['values'] == {{ item['values'] }} + - result.record.record == "{{ item.record }}" + - result.record.type == "{{ item.type }}" + - result.record.ttl == {{ item.ttl }} diff --git a/tests/integration/targets/gandi_livedns/tasks/main.yml b/tests/integration/targets/gandi_livedns/tasks/main.yml new file mode 100644 index 0000000000..5b11e8b9f3 --- /dev/null +++ b/tests/integration/targets/gandi_livedns/tasks/main.yml @@ -0,0 +1,5 @@ +# Copyright: (c) 2020 Gregory Thiemonge +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +--- +- include_tasks: record.yml + with_items: "{{ gandi_livedns_record_items }}" diff --git a/tests/integration/targets/gandi_livedns/tasks/record.yml b/tests/integration/targets/gandi_livedns/tasks/record.yml new file mode 100644 index 0000000000..1e5977e3f9 --- /dev/null +++ b/tests/integration/targets/gandi_livedns/tasks/record.yml @@ -0,0 +1,6 @@ +# Copyright: (c) 2020 Gregory Thiemonge +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +--- +- include_tasks: create_record.yml +- include_tasks: update_record.yml +- include_tasks: remove_record.yml diff --git a/tests/integration/targets/gandi_livedns/tasks/remove_record.yml b/tests/integration/targets/gandi_livedns/tasks/remove_record.yml new file mode 100644 index 0000000000..78a0d2f42b --- /dev/null +++ b/tests/integration/targets/gandi_livedns/tasks/remove_record.yml @@ -0,0 +1,59 @@ +# Copyright: (c) 2020 Gregory Thiemonge +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +--- +- name: test remove a dns record in check mode + community.general.gandi_livedns: + api_key: "{{ gandi_api_key }}" + record: "{{ item.record }}" + domain: "{{ gandi_livedns_domain_name }}" + values: "{{ item.update_values | default(item['values']) }}" + type: "{{ item.type }}" + state: absent + check_mode: yes + register: result +- name: verify test remove a dns record in check mode + assert: + that: + - result is changed + +- name: test remove a dns record + community.general.gandi_livedns: + api_key: "{{ gandi_api_key }}" + record: "{{ item.record }}" + domain: "{{ gandi_livedns_domain_name }}" + values: "{{ item.update_values | default(item['values']) }}" + type: "{{ item.type }}" + state: absent + register: result +- name: verify test remove a dns record + assert: + that: + - result is changed + +- name: test remove a dns record idempotence + community.general.gandi_livedns: + api_key: "{{ gandi_api_key }}" + record: "{{ item.record }}" + domain: "{{ gandi_livedns_domain_name }}" + values: "{{ item.update_values | default(item['values']) }}" + type: "{{ item.type }}" + state: absent + register: result +- name: verify test remove a dns record idempotence + assert: + that: + - result is not changed + +- name: test remove second dns record idempotence + community.general.gandi_livedns: + api_key: "{{ gandi_api_key }}" + record: "{{ item.record }}" + domain: "{{ gandi_livedns_domain_name }}" + values: "{{ item['values'] }}" + type: "{{ item.type }}" + state: absent + register: result +- name: verify test remove a dns record idempotence + assert: + that: + - result is not changed diff --git a/tests/integration/targets/gandi_livedns/tasks/update_record.yml b/tests/integration/targets/gandi_livedns/tasks/update_record.yml new file mode 100644 index 0000000000..fdb1dc1e23 --- /dev/null +++ b/tests/integration/targets/gandi_livedns/tasks/update_record.yml @@ -0,0 +1,57 @@ +# Copyright: (c) 2020 Gregory Thiemonge +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +--- +- name: test update or add another dns record in check mode + community.general.gandi_livedns: + api_key: "{{ gandi_api_key }}" + record: "{{ item.record }}" + domain: "{{ gandi_livedns_domain_name }}" + values: "{{ item.update_values | default(item['values']) }}" + ttl: "{{ item.update_ttl | default(item.ttl) }}" + type: "{{ item.type }}" + check_mode: yes + register: result +- name: verify test update in check mode + assert: + that: + - result is changed + - result.record['values'] == {{ item.update_values | default(item['values']) }} + - result.record.record == "{{ item.record }}" + - result.record.type == "{{ item.type }}" + - result.record.ttl == {{ item.update_ttl | default(item.ttl) }} + +- name: test update or add another dns record + community.general.gandi_livedns: + api_key: "{{ gandi_api_key }}" + record: "{{ item.record }}" + domain: "{{ gandi_livedns_domain_name }}" + values: "{{ item.update_values | default(item['values']) }}" + ttl: "{{ item.update_ttl | default(item.ttl) }}" + type: "{{ item.type }}" + register: result +- name: verify test update a dns record + assert: + that: + - result is changed + - result.record['values'] == {{ item.update_values | default(item['values']) }} + - result.record.record == "{{ item.record }}" + - result.record.ttl == {{ item.update_ttl | default(item.ttl) }} + - result.record.type == "{{ item.type }}" + +- name: test update or add another dns record idempotence + community.general.gandi_livedns: + api_key: "{{ gandi_api_key }}" + record: "{{ item.record }}" + domain: "{{ gandi_livedns_domain_name }}" + values: "{{ item.update_values | default(item['values']) }}" + ttl: "{{ item.update_ttl | default(item.ttl) }}" + type: "{{ item.type }}" + register: result +- name: verify test update a dns record idempotence + assert: + that: + - result is not changed + - result.record['values'] == {{ item.update_values | default(item['values']) }} + - result.record.record == "{{ item.record }}" + - result.record.ttl == {{ item.update_ttl | default(item.ttl) }} + - result.record.type == "{{ item.type }}" From 68fc48cd1f77945a4cdbad5f1984d062c8ee998f Mon Sep 17 00:00:00 2001 From: Florian Dambrine Date: Sun, 21 Mar 2021 03:30:16 -0700 Subject: [PATCH 0124/3093] New module: Add Pritunl VPN user module (net_tools/pritunl/) (#803) --- plugins/doc_fragments/pritunl.py | 43 ++ .../net_tools/pritunl/__init__.py | 0 plugins/module_utils/net_tools/pritunl/api.py | 300 ++++++++++ .../modules/net_tools/pritunl/pritunl_user.py | 343 +++++++++++ .../net_tools/pritunl/pritunl_user_info.py | 171 ++++++ plugins/modules/pritunl_user.py | 1 + plugins/modules/pritunl_user_info.py | 1 + .../net_tools/pritunl/__init__.py | 0 .../net_tools/pritunl/test_api.py | 541 ++++++++++++++++++ .../net_tools/pritunl/test_pritunl_user.py | 208 +++++++ .../pritunl/test_pritunl_user_info.py | 160 ++++++ 11 files changed, 1768 insertions(+) create mode 100644 plugins/doc_fragments/pritunl.py create mode 100644 plugins/module_utils/net_tools/pritunl/__init__.py create mode 100644 plugins/module_utils/net_tools/pritunl/api.py create mode 100644 plugins/modules/net_tools/pritunl/pritunl_user.py create mode 100644 plugins/modules/net_tools/pritunl/pritunl_user_info.py create mode 120000 plugins/modules/pritunl_user.py create mode 120000 plugins/modules/pritunl_user_info.py create mode 100644 tests/unit/plugins/module_utils/net_tools/pritunl/__init__.py create mode 100644 tests/unit/plugins/module_utils/net_tools/pritunl/test_api.py create mode 100644 tests/unit/plugins/modules/net_tools/pritunl/test_pritunl_user.py create mode 100644 tests/unit/plugins/modules/net_tools/pritunl/test_pritunl_user_info.py diff --git a/plugins/doc_fragments/pritunl.py b/plugins/doc_fragments/pritunl.py new file mode 100644 index 0000000000..e2eaff2889 --- /dev/null +++ b/plugins/doc_fragments/pritunl.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Florian Dambrine +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +class ModuleDocFragment(object): + + DOCUMENTATION = r""" +options: + pritunl_url: + type: str + required: true + description: + - URL and port of the Pritunl server on which the API is enabled. + + pritunl_api_token: + type: str + required: true + description: + - API Token of a Pritunl admin user. + - It needs to be enabled in Administrators > USERNAME > Enable Token Authentication. + + pritunl_api_secret: + type: str + required: true + description: + - API Secret found in Administrators > USERNAME > API Secret. + + validate_certs: + type: bool + required: false + default: true + description: + - If certificates should be validated or not. + - This should never be set to C(false), except if you are very sure that + your connection to the server can not be subject to a Man In The Middle + attack. +""" diff --git a/plugins/module_utils/net_tools/pritunl/__init__.py b/plugins/module_utils/net_tools/pritunl/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/net_tools/pritunl/api.py b/plugins/module_utils/net_tools/pritunl/api.py new file mode 100644 index 0000000000..e78f1848eb --- /dev/null +++ b/plugins/module_utils/net_tools/pritunl/api.py @@ -0,0 +1,300 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2021, Florian Dambrine +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Pritunl API that offers CRUD operations on Pritunl Organizations and Users +""" + +from __future__ import absolute_import, division, print_function + +import base64 +import hashlib +import hmac +import json +import time +import uuid + +from ansible.module_utils.six import iteritems +from ansible.module_utils.urls import open_url + +__metaclass__ = type + + +class PritunlException(Exception): + pass + + +def pritunl_argument_spec(): + return dict( + pritunl_url=dict(required=True, type="str"), + pritunl_api_token=dict(required=True, type="str", no_log=False), + pritunl_api_secret=dict(required=True, type="str", no_log=True), + validate_certs=dict(required=False, type="bool", default=True), + ) + + +def get_pritunl_settings(module): + """ + Helper function to set required Pritunl request params from module arguments. + """ + return { + "api_token": module.params.get("pritunl_api_token"), + "api_secret": module.params.get("pritunl_api_secret"), + "base_url": module.params.get("pritunl_url"), + "validate_certs": module.params.get("validate_certs"), + } + + +def _get_pritunl_organizations(api_token, api_secret, base_url, validate_certs=True): + return pritunl_auth_request( + base_url=base_url, + api_token=api_token, + api_secret=api_secret, + method="GET", + path="/organization", + validate_certs=validate_certs, + ) + + +def _get_pritunl_users( + api_token, api_secret, base_url, organization_id, validate_certs=True +): + return pritunl_auth_request( + api_token=api_token, + api_secret=api_secret, + base_url=base_url, + method="GET", + path="/user/%s" % organization_id, + validate_certs=validate_certs, + ) + + +def _delete_pritunl_user( + api_token, api_secret, base_url, organization_id, user_id, validate_certs=True +): + return pritunl_auth_request( + api_token=api_token, + api_secret=api_secret, + base_url=base_url, + method="DELETE", + path="/user/%s/%s" % (organization_id, user_id), + validate_certs=validate_certs, + ) + + +def _post_pritunl_user( + api_token, api_secret, base_url, organization_id, user_data, validate_certs=True +): + return pritunl_auth_request( + api_token=api_token, + api_secret=api_secret, + base_url=base_url, + method="POST", + path="/user/%s" % organization_id, + headers={"Content-Type": "application/json"}, + data=json.dumps(user_data), + validate_certs=validate_certs, + ) + + +def _put_pritunl_user( + api_token, + api_secret, + base_url, + organization_id, + user_id, + user_data, + validate_certs=True, +): + return pritunl_auth_request( + api_token=api_token, + api_secret=api_secret, + base_url=base_url, + method="PUT", + path="/user/%s/%s" % (organization_id, user_id), + headers={"Content-Type": "application/json"}, + data=json.dumps(user_data), + validate_certs=validate_certs, + ) + + +def list_pritunl_organizations( + api_token, api_secret, base_url, validate_certs=True, filters=None +): + orgs = [] + + response = _get_pritunl_organizations( + api_token=api_token, + api_secret=api_secret, + base_url=base_url, + validate_certs=validate_certs, + ) + + if response.getcode() != 200: + raise PritunlException("Could not retrieve organizations from Pritunl") + else: + for org in json.loads(response.read()): + # No filtering + if filters is None: + orgs.append(org) + else: + if not any( + filter_val != org[filter_key] + for filter_key, filter_val in iteritems(filters) + ): + orgs.append(org) + + return orgs + + +def list_pritunl_users( + api_token, api_secret, base_url, organization_id, validate_certs=True, filters=None +): + users = [] + + response = _get_pritunl_users( + api_token=api_token, + api_secret=api_secret, + base_url=base_url, + validate_certs=validate_certs, + organization_id=organization_id, + ) + + if response.getcode() != 200: + raise PritunlException("Could not retrieve users from Pritunl") + else: + for user in json.loads(response.read()): + # No filtering + if filters is None: + users.append(user) + + else: + if not any( + filter_val != user[filter_key] + for filter_key, filter_val in iteritems(filters) + ): + users.append(user) + + return users + + +def post_pritunl_user( + api_token, + api_secret, + base_url, + organization_id, + user_data, + user_id=None, + validate_certs=True, +): + # If user_id is provided will do PUT otherwise will do POST + if user_id is None: + response = _post_pritunl_user( + api_token=api_token, + api_secret=api_secret, + base_url=base_url, + organization_id=organization_id, + user_data=user_data, + validate_certs=True, + ) + + if response.getcode() != 200: + raise PritunlException( + "Could not remove user %s from organization %s from Pritunl" + % (user_id, organization_id) + ) + # user POST request returns an array of a single item, + # so return this item instead of the list + return json.loads(response.read())[0] + else: + response = _put_pritunl_user( + api_token=api_token, + api_secret=api_secret, + base_url=base_url, + organization_id=organization_id, + user_data=user_data, + user_id=user_id, + validate_certs=True, + ) + + if response.getcode() != 200: + raise PritunlException( + "Could not update user %s from organization %s from Pritunl" + % (user_id, organization_id) + ) + # The user PUT request returns the updated user object + return json.loads(response.read()) + + +def delete_pritunl_user( + api_token, api_secret, base_url, organization_id, user_id, validate_certs=True +): + response = _delete_pritunl_user( + api_token=api_token, + api_secret=api_secret, + base_url=base_url, + organization_id=organization_id, + user_id=user_id, + validate_certs=True, + ) + + if response.getcode() != 200: + raise PritunlException( + "Could not remove user %s from organization %s from Pritunl" + % (user_id, organization_id) + ) + + return json.loads(response.read()) + + +def pritunl_auth_request( + api_token, + api_secret, + base_url, + method, + path, + validate_certs=True, + headers=None, + data=None, +): + """ + Send an API call to a Pritunl server. + Taken from https://pritunl.com/api and adaped work with Ansible open_url + """ + auth_timestamp = str(int(time.time())) + auth_nonce = uuid.uuid4().hex + + auth_string = "&".join( + [api_token, auth_timestamp, auth_nonce, method.upper(), path] + + ([data] if data else []) + ) + + auth_signature = base64.b64encode( + hmac.new( + api_secret.encode("utf-8"), auth_string.encode("utf-8"), hashlib.sha256 + ).digest() + ) + + auth_headers = { + "Auth-Token": api_token, + "Auth-Timestamp": auth_timestamp, + "Auth-Nonce": auth_nonce, + "Auth-Signature": auth_signature, + } + + if headers: + auth_headers.update(headers) + + try: + uri = "%s%s" % (base_url, path) + + return open_url( + uri, + method=method.upper(), + headers=auth_headers, + data=data, + validate_certs=validate_certs, + ) + except Exception as e: + raise PritunlException(e) diff --git a/plugins/modules/net_tools/pritunl/pritunl_user.py b/plugins/modules/net_tools/pritunl/pritunl_user.py new file mode 100644 index 0000000000..3d1c7f338f --- /dev/null +++ b/plugins/modules/net_tools/pritunl/pritunl_user.py @@ -0,0 +1,343 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2021, Florian Dambrine +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: pritunl_user +author: "Florian Dambrine (@Lowess)" +version_added: 2.3.0 +short_description: Manage Pritunl Users using the Pritunl API +description: + - A module to manage Pritunl users using the Pritunl API. +extends_documentation_fragment: + - community.general.pritunl +options: + organization: + type: str + required: true + aliases: + - org + description: + - The name of the organization the user is part of. + + state: + type: str + default: 'present' + choices: + - present + - absent + description: + - If C(present), the module adds user I(user_name) to + the Pritunl I(organization). If C(absent), removes the user + I(user_name) from the Pritunl I(organization). + + user_name: + type: str + required: true + default: null + description: + - Name of the user to create or delete from Pritunl. + + user_email: + type: str + required: false + default: null + description: + - Email address associated with the user I(user_name). + + user_type: + type: str + required: false + default: client + choices: + - client + - server + description: + - Type of the user I(user_name). + + user_groups: + type: list + elements: str + required: false + default: null + description: + - List of groups associated with the user I(user_name). + + user_disabled: + type: bool + required: false + default: null + description: + - Enable/Disable the user I(user_name). + + user_gravatar: + type: bool + required: false + default: null + description: + - Enable/Disable Gravatar usage for the user I(user_name). +""" + +EXAMPLES = """ +- name: Create the user Foo with email address foo@bar.com in MyOrg + community.general.pritunl_user: + state: present + name: MyOrg + user_name: Foo + user_email: foo@bar.com + +- name: Disable the user Foo but keep it in Pritunl + community.general.pritunl_user: + state: present + name: MyOrg + user_name: Foo + user_email: foo@bar.com + user_disabled: yes + +- name: Make sure the user Foo is not part of MyOrg anymore + community.general.pritunl_user: + state: absent + name: MyOrg + user_name: Foo +""" + +RETURN = """ +response: + description: JSON representation of Pritunl Users. + returned: success + type: dict + sample: + { + "audit": false, + "auth_type": "google", + "bypass_secondary": false, + "client_to_client": false, + "disabled": false, + "dns_mapping": null, + "dns_servers": null, + "dns_suffix": null, + "email": "foo@bar.com", + "gravatar": true, + "groups": [ + "foo", "bar" + ], + "id": "5d070dafe63q3b2e6s472c3b", + "name": "foo@acme.com", + "network_links": [], + "organization": "58070daee6sf342e6e4s2c36", + "organization_name": "Acme", + "otp_auth": true, + "otp_secret": "35H5EJA3XB2$4CWG", + "pin": false, + "port_forwarding": [], + "servers": [], + } +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import dict_merge +from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api import ( + PritunlException, + delete_pritunl_user, + get_pritunl_settings, + list_pritunl_organizations, + list_pritunl_users, + post_pritunl_user, + pritunl_argument_spec, +) + + +def add_or_update_pritunl_user(module): + result = {} + + org_name = module.params.get("organization") + user_name = module.params.get("user_name") + + user_params = { + "name": user_name, + "email": module.params.get("user_email"), + "groups": module.params.get("user_groups"), + "disabled": module.params.get("user_disabled"), + "gravatar": module.params.get("user_gravatar"), + "type": module.params.get("user_type"), + } + + org_obj_list = list_pritunl_organizations( + **dict_merge( + get_pritunl_settings(module), + {"filters": {"name": org_name}}, + ) + ) + + if len(org_obj_list) == 0: + module.fail_json( + msg="Can not add user to organization '%s' which does not exist" % org_name + ) + + org_id = org_obj_list[0]["id"] + + # Grab existing users from this org + users = list_pritunl_users( + **dict_merge( + get_pritunl_settings(module), + { + "organization_id": org_id, + "filters": {"name": user_name}, + }, + ) + ) + + # Check if the pritunl user already exists + if len(users) > 0: + # Compare remote user params with local user_params and trigger update if needed + user_params_changed = False + for key in user_params.keys(): + # When a param is not specified grab existing ones to prevent from changing it with the PUT request + if user_params[key] is None: + user_params[key] = users[0][key] + + # 'groups' is a list comparison + if key == "groups": + if set(users[0][key]) != set(user_params[key]): + user_params_changed = True + + # otherwise it is either a boolean or a string + else: + if users[0][key] != user_params[key]: + user_params_changed = True + + # Trigger a PUT on the API to update the current user if settings have changed + if user_params_changed: + response = post_pritunl_user( + **dict_merge( + get_pritunl_settings(module), + { + "organization_id": org_id, + "user_id": users[0]["id"], + "user_data": user_params, + }, + ) + ) + + result["changed"] = True + result["response"] = response + else: + result["changed"] = False + result["response"] = users + else: + response = post_pritunl_user( + **dict_merge( + get_pritunl_settings(module), + { + "organization_id": org_id, + "user_data": user_params, + }, + ) + ) + result["changed"] = True + result["response"] = response + + module.exit_json(**result) + + +def remove_pritunl_user(module): + result = {} + + org_name = module.params.get("organization") + user_name = module.params.get("user_name") + + org_obj_list = [] + + org_obj_list = list_pritunl_organizations( + **dict_merge( + get_pritunl_settings(module), + { + "filters": {"name": org_name}, + }, + ) + ) + + if len(org_obj_list) == 0: + module.fail_json( + msg="Can not remove user '%s' from a non existing organization '%s'" + % (user_name, org_name) + ) + + org_id = org_obj_list[0]["id"] + + # Grab existing users from this org + users = list_pritunl_users( + **dict_merge( + get_pritunl_settings(module), + { + "organization_id": org_id, + "filters": {"name": user_name}, + }, + ) + ) + + # Check if the pritunl user exists, if not, do nothing + if len(users) == 0: + result["changed"] = False + result["response"] = {} + + # Otherwise remove the org from Pritunl + else: + response = delete_pritunl_user( + **dict_merge( + get_pritunl_settings(module), + { + "organization_id": org_id, + "user_id": users[0]["id"], + }, + ) + ) + result["changed"] = True + result["response"] = response + + module.exit_json(**result) + + +def main(): + argument_spec = pritunl_argument_spec() + + argument_spec.update( + dict( + organization=dict(required=True, type="str", aliases=["org"]), + state=dict( + required=False, choices=["present", "absent"], default="present" + ), + user_name=dict(required=True, type="str"), + user_type=dict( + required=False, choices=["client", "server"], default="client" + ), + user_email=dict(required=False, type="str", default=None), + user_groups=dict(required=False, type="list", elements="str", default=None), + user_disabled=dict(required=False, type="bool", default=None), + user_gravatar=dict(required=False, type="bool", default=None), + ) + ), + + module = AnsibleModule(argument_spec=argument_spec) + + state = module.params.get("state") + + try: + if state == "present": + add_or_update_pritunl_user(module) + elif state == "absent": + remove_pritunl_user(module) + except PritunlException as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/net_tools/pritunl/pritunl_user_info.py b/plugins/modules/net_tools/pritunl/pritunl_user_info.py new file mode 100644 index 0000000000..c00da6dc23 --- /dev/null +++ b/plugins/modules/net_tools/pritunl/pritunl_user_info.py @@ -0,0 +1,171 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2021, Florian Dambrine +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: pritunl_user_info +author: "Florian Dambrine (@Lowess)" +version_added: 2.3.0 +short_description: List Pritunl Users using the Pritunl API +description: + - A module to list Pritunl users using the Pritunl API. +extends_documentation_fragment: + - community.general.pritunl +options: + organization: + type: str + required: true + aliases: + - org + description: + - The name of the organization the user is part of. + + user_name: + type: str + required: false + description: + - Name of the user to filter on Pritunl. + + user_type: + type: str + required: false + default: client + choices: + - client + - server + description: + - Type of the user I(user_name). +""" + +EXAMPLES = """ +- name: List all existing users part of the organization MyOrg + community.general.pritunl_user_info: + state: list + organization: MyOrg + +- name: Search for the user named Florian part of the organization MyOrg + community.general.pritunl_user_info: + state: list + organization: MyOrg + user_name: Florian +""" + +RETURN = """ +users: + description: List of Pritunl users. + returned: success + type: list + elements: dict + sample: + [ + { + "audit": false, + "auth_type": "google", + "bypass_secondary": false, + "client_to_client": false, + "disabled": false, + "dns_mapping": null, + "dns_servers": null, + "dns_suffix": null, + "email": "foo@bar.com", + "gravatar": true, + "groups": [ + "foo", "bar" + ], + "id": "5d070dafe63q3b2e6s472c3b", + "name": "foo@acme.com", + "network_links": [], + "organization": "58070daee6sf342e6e4s2c36", + "organization_name": "Acme", + "otp_auth": true, + "otp_secret": "35H5EJA3XB2$4CWG", + "pin": false, + "port_forwarding": [], + "servers": [], + } + ] +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import dict_merge +from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api import ( + PritunlException, + get_pritunl_settings, + list_pritunl_organizations, + list_pritunl_users, + pritunl_argument_spec, +) + + +def get_pritunl_user(module): + user_name = module.params.get("user_name") + user_type = module.params.get("user_type") + org_name = module.params.get("organization") + + org_obj_list = [] + + org_obj_list = list_pritunl_organizations( + **dict_merge(get_pritunl_settings(module), {"filters": {"name": org_name}}) + ) + + if len(org_obj_list) == 0: + module.fail_json( + msg="Can not list users from the organization '%s' which does not exist" + % org_name + ) + + org_id = org_obj_list[0]["id"] + + users = list_pritunl_users( + **dict_merge( + get_pritunl_settings(module), + { + "organization_id": org_id, + "filters": ( + {"type": user_type} + if user_name is None + else {"name": user_name, "type": user_type} + ), + }, + ) + ) + + result = {} + result["changed"] = False + result["users"] = users + + module.exit_json(**result) + + +def main(): + argument_spec = pritunl_argument_spec() + + argument_spec.update( + dict( + organization=dict(required=True, type="str", aliases=["org"]), + user_name=dict(required=False, type="str", default=None), + user_type=dict( + required=False, + choices=["client", "server"], + default="client", + ), + ) + ), + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + try: + get_pritunl_user(module) + except PritunlException as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/pritunl_user.py b/plugins/modules/pritunl_user.py new file mode 120000 index 0000000000..25a91db66b --- /dev/null +++ b/plugins/modules/pritunl_user.py @@ -0,0 +1 @@ +./net_tools/pritunl/pritunl_user.py \ No newline at end of file diff --git a/plugins/modules/pritunl_user_info.py b/plugins/modules/pritunl_user_info.py new file mode 120000 index 0000000000..bfabbe0c8c --- /dev/null +++ b/plugins/modules/pritunl_user_info.py @@ -0,0 +1 @@ +net_tools/pritunl/pritunl_user_info.py \ No newline at end of file diff --git a/tests/unit/plugins/module_utils/net_tools/pritunl/__init__.py b/tests/unit/plugins/module_utils/net_tools/pritunl/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit/plugins/module_utils/net_tools/pritunl/test_api.py b/tests/unit/plugins/module_utils/net_tools/pritunl/test_api.py new file mode 100644 index 0000000000..1d78a6b555 --- /dev/null +++ b/tests/unit/plugins/module_utils/net_tools/pritunl/test_api.py @@ -0,0 +1,541 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2021, Florian Dambrine +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +import json + +import pytest +from ansible.module_utils.common.dict_transformations import dict_merge +from ansible.module_utils.six import iteritems +from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl import api +from mock import MagicMock + +__metaclass__ = type + + +# Pritunl Mocks + + +class PritunlListOrganizationMock(MagicMock): + """Pritunl API Mock for organization GET API calls.""" + + def getcode(self): + return 200 + + def read(self): + return json.dumps( + [ + { + "auth_api": False, + "name": "Foo", + "auth_token": None, + "user_count": 0, + "auth_secret": None, + "id": "csftwlu6uhralzi2dpmhekz3", + }, + { + "auth_api": False, + "name": "GumGum", + "auth_token": None, + "user_count": 3, + "auth_secret": None, + "id": "58070daee63f3b2e6e472c36", + }, + { + "auth_api": False, + "name": "Bar", + "auth_token": None, + "user_count": 0, + "auth_secret": None, + "id": "v1sncsxxybnsylc8gpqg85pg", + }, + ] + ) + + +class PritunlListUserMock(MagicMock): + """Pritunl API Mock for user GET API calls.""" + + def getcode(self): + return 200 + + def read(self): + return json.dumps( + [ + { + "auth_type": "google", + "dns_servers": None, + "pin": True, + "dns_suffix": None, + "servers": [ + { + "status": False, + "platform": None, + "server_id": "580711322bb66c1d59b9568f", + "virt_address6": "fd00:c0a8: 9700: 0: 192: 168: 101: 27", + "virt_address": "192.168.101.27", + "name": "vpn-A", + "real_address": None, + "connected_since": None, + "id": "580711322bb66c1d59b9568f", + "device_name": None, + }, + { + "status": False, + "platform": None, + "server_id": "5dad2cc6e63f3b3f4a6dfea5", + "virt_address6": "fd00:c0a8:f200: 0: 192: 168: 201: 37", + "virt_address": "192.168.201.37", + "name": "vpn-B", + "real_address": None, + "connected_since": None, + "id": "5dad2cc6e63f3b3f4a6dfea5", + "device_name": None, + }, + ], + "disabled": False, + "network_links": [], + "port_forwarding": [], + "id": "58070dafe63f3b2e6e472c3b", + "organization_name": "GumGum", + "type": "server", + "email": "bot@company.com", + "status": True, + "dns_mapping": None, + "otp_secret": "123456789ABCDEFG", + "client_to_client": False, + "sso": "google", + "bypass_secondary": False, + "groups": ["admin", "multiregion"], + "audit": False, + "name": "bot", + "gravatar": True, + "otp_auth": True, + "organization": "58070daee63f3b2e6e472c36", + }, + { + "auth_type": "google", + "dns_servers": None, + "pin": True, + "dns_suffix": None, + "servers": [ + { + "status": False, + "platform": None, + "server_id": "580711322bb66c1d59b9568f", + "virt_address6": "fd00:c0a8: 9700: 0: 192: 168: 101: 27", + "virt_address": "192.168.101.27", + "name": "vpn-A", + "real_address": None, + "connected_since": None, + "id": "580711322bb66c1d59b9568f", + "device_name": None, + }, + { + "status": False, + "platform": None, + "server_id": "5dad2cc6e63f3b3f4a6dfea5", + "virt_address6": "fd00:c0a8:f200: 0: 192: 168: 201: 37", + "virt_address": "192.168.201.37", + "name": "vpn-B", + "real_address": None, + "connected_since": None, + "id": "5dad2cc6e63f3b3f4a6dfea5", + "device_name": None, + }, + ], + "disabled": False, + "network_links": [], + "port_forwarding": [], + "id": "58070dafe63f3b2e6e472c3b", + "organization_name": "GumGum", + "type": "client", + "email": "florian@company.com", + "status": True, + "dns_mapping": None, + "otp_secret": "123456789ABCDEFG", + "client_to_client": False, + "sso": "google", + "bypass_secondary": False, + "groups": ["web", "database"], + "audit": False, + "name": "florian", + "gravatar": True, + "otp_auth": True, + "organization": "58070daee63f3b2e6e472c36", + }, + { + "auth_type": "google", + "dns_servers": None, + "pin": True, + "dns_suffix": None, + "servers": [ + { + "status": False, + "platform": None, + "server_id": "580711322bb66c1d59b9568f", + "virt_address6": "fd00:c0a8: 9700: 0: 192: 168: 101: 27", + "virt_address": "192.168.101.27", + "name": "vpn-A", + "real_address": None, + "connected_since": None, + "id": "580711322bb66c1d59b9568f", + "device_name": None, + }, + { + "status": False, + "platform": None, + "server_id": "5dad2cc6e63f3b3f4a6dfea5", + "virt_address6": "fd00:c0a8:f200: 0: 192: 168: 201: 37", + "virt_address": "192.168.201.37", + "name": "vpn-B", + "real_address": None, + "connected_since": None, + "id": "5dad2cc6e63f3b3f4a6dfea5", + "device_name": None, + }, + ], + "disabled": False, + "network_links": [], + "port_forwarding": [], + "id": "58070dafe63f3b2e6e472c3b", + "organization_name": "GumGum", + "type": "server", + "email": "ops@company.com", + "status": True, + "dns_mapping": None, + "otp_secret": "123456789ABCDEFG", + "client_to_client": False, + "sso": "google", + "bypass_secondary": False, + "groups": ["web", "database"], + "audit": False, + "name": "ops", + "gravatar": True, + "otp_auth": True, + "organization": "58070daee63f3b2e6e472c36", + }, + ] + ) + + +class PritunlErrorMock(MagicMock): + """Pritunl API Mock for API call failures.""" + + def getcode(self): + return 500 + + def read(self): + return "{}" + + +class PritunlPostUserMock(MagicMock): + """Pritunl API Mock for POST API calls.""" + + def getcode(self): + return 200 + + def read(self): + return json.dumps( + [ + { + "auth_type": "local", + "disabled": False, + "dns_servers": None, + "otp_secret": "6M4UWP2BCJBSYZAT", + "name": "alice", + "pin": False, + "dns_suffix": None, + "client_to_client": False, + "email": "alice@company.com", + "organization_name": "GumGum", + "bypass_secondary": False, + "groups": ["a", "b"], + "organization": "58070daee63f3b2e6e472c36", + "port_forwarding": [], + "type": "client", + "id": "590add71e63f3b72d8bb951a", + } + ] + ) + + +class PritunlPutUserMock(MagicMock): + """Pritunl API Mock for PUT API calls.""" + + def getcode(self): + return 200 + + def read(self): + return json.dumps( + { + "auth_type": "local", + "disabled": True, + "dns_servers": None, + "otp_secret": "WEJANJYMF3Q2QSLG", + "name": "bob", + "pin": False, + "dns_suffix": False, + "client_to_client": False, + "email": "bob@company.com", + "organization_name": "GumGum", + "bypass_secondary": False, + "groups": ["c", "d"], + "organization": "58070daee63f3b2e6e472c36", + "port_forwarding": [], + "type": "client", + "id": "590add71e63f3b72d8bb951a", + } + ) + + +class PritunlDeleteUserMock(MagicMock): + """Pritunl API Mock for DELETE API calls.""" + + def getcode(self): + return 200 + + def read(self): + return "{}" + + +# Ansible Module Mock and Pytest mock fixtures + + +class ModuleFailException(Exception): + def __init__(self, msg, **kwargs): + super(ModuleFailException, self).__init__(msg) + self.fail_msg = msg + self.fail_kwargs = kwargs + + +@pytest.fixture +def pritunl_settings(): + return { + "api_token": "token", + "api_secret": "secret", + "base_url": "https://pritunl.domain.com", + "validate_certs": True, + } + + +@pytest.fixture +def pritunl_user_data(): + return { + "name": "alice", + "email": "alice@company.com", + "groups": ["a", "b"], + "disabled": False, + "type": "client", + } + + +@pytest.fixture +def get_pritunl_organization_mock(): + return PritunlListOrganizationMock() + + +@pytest.fixture +def get_pritunl_user_mock(): + return PritunlListUserMock() + + +@pytest.fixture +def get_pritunl_error_mock(): + return PritunlErrorMock() + + +@pytest.fixture +def post_pritunl_user_mock(): + return PritunlPostUserMock() + + +@pytest.fixture +def put_pritunl_user_mock(): + return PritunlPutUserMock() + + +@pytest.fixture +def delete_pritunl_user_mock(): + return PritunlDeleteUserMock() + + +class TestPritunlApi: + """ + Test class to validate CRUD operations on Pritunl. + """ + + # Test for GET / list operation on Pritunl API + @pytest.mark.parametrize( + "org_id,org_user_count", + [ + ("58070daee63f3b2e6e472c36", 3), + ("v1sncsxxybnsylc8gpqg85pg", 0), + ], + ) + def test_list_all_pritunl_organization( + self, + pritunl_settings, + get_pritunl_organization_mock, + org_id, + org_user_count, + ): + api._get_pritunl_organizations = get_pritunl_organization_mock() + + response = api.list_pritunl_organizations(**pritunl_settings) + + assert len(response) == 3 + + for org in response: + if org["id"] == org_id: + org["user_count"] == org_user_count + + @pytest.mark.parametrize( + "org_filters,org_expected", + [ + ({"id": "58070daee63f3b2e6e472c36"}, "GumGum"), + ({"name": "GumGum"}, "GumGum"), + ], + ) + def test_list_filtered_pritunl_organization( + self, + pritunl_settings, + get_pritunl_organization_mock, + org_filters, + org_expected, + ): + api._get_pritunl_organizations = get_pritunl_organization_mock() + + response = api.list_pritunl_organizations( + **dict_merge(pritunl_settings, {"filters": org_filters}) + ) + + assert len(response) == 1 + assert response[0]["name"] == org_expected + + @pytest.mark.parametrize( + "org_id,org_user_count", + [("58070daee63f3b2e6e472c36", 3)], + ) + def test_list_all_pritunl_user( + self, pritunl_settings, get_pritunl_user_mock, org_id, org_user_count + ): + api._get_pritunl_users = get_pritunl_user_mock() + + response = api.list_pritunl_users( + **dict_merge(pritunl_settings, {"organization_id": org_id}) + ) + + assert len(response) == org_user_count + + @pytest.mark.parametrize( + "org_id,user_filters,user_expected", + [ + ("58070daee63f3b2e6e472c36", {"email": "bot@company.com"}, "bot"), + ("58070daee63f3b2e6e472c36", {"name": "florian"}, "florian"), + ], + ) + def test_list_filtered_pritunl_user( + self, + pritunl_settings, + get_pritunl_user_mock, + org_id, + user_filters, + user_expected, + ): + api._get_pritunl_users = get_pritunl_user_mock() + + response = api.list_pritunl_users( + **dict_merge( + pritunl_settings, {"organization_id": org_id, "filters": user_filters} + ) + ) + + assert len(response) > 0 + + for user in response: + assert user["organization"] == org_id + assert user["name"] == user_expected + + # Test for POST operation on Pritunl API + @pytest.mark.parametrize("org_id", [("58070daee63f3b2e6e472c36")]) + def test_add_and_update_pritunl_user( + self, + pritunl_settings, + pritunl_user_data, + post_pritunl_user_mock, + put_pritunl_user_mock, + org_id, + ): + api._post_pritunl_user = post_pritunl_user_mock() + api._put_pritunl_user = put_pritunl_user_mock() + + create_response = api.post_pritunl_user( + **dict_merge( + pritunl_settings, + { + "organization_id": org_id, + "user_data": pritunl_user_data, + }, + ) + ) + + # Ensure provided settings match with the ones returned by Pritunl + for k, v in iteritems(pritunl_user_data): + assert create_response[k] == v + + # Update the newly created user to ensure only certain settings are changed + + user_updates = { + "name": "bob", + "email": "bob@company.com", + "disabled": True, + } + + update_response = api.post_pritunl_user( + **dict_merge( + pritunl_settings, + { + "organization_id": org_id, + "user_id": create_response["id"], + "user_data": dict_merge(pritunl_user_data, user_updates), + }, + ) + ) + + # Ensure only certain settings changed and the rest remained untouched. + for k, v in iteritems(update_response): + if k in update_response: + assert update_response[k] == v + else: + assert update_response[k] == create_response[k] + + # Test for DELETE operation on Pritunl API + @pytest.mark.parametrize( + "org_id,user_id", [("58070daee63f3b2e6e472c36", "590add71e63f3b72d8bb951a")] + ) + def test_delete_pritunl_user( + self, pritunl_settings, org_id, user_id, delete_pritunl_user_mock + ): + api._delete_pritunl_user = delete_pritunl_user_mock() + + response = api.delete_pritunl_user( + **dict_merge( + pritunl_settings, + { + "organization_id": org_id, + "user_id": user_id, + }, + ) + ) + + assert response == {} + + # Test API call errors + def test_pritunl_error(self, pritunl_settings, get_pritunl_error_mock): + api.pritunl_auth_request = get_pritunl_error_mock() + + with pytest.raises(api.PritunlException): + response = api.list_pritunl_organizations(**pritunl_settings) diff --git a/tests/unit/plugins/modules/net_tools/pritunl/test_pritunl_user.py b/tests/unit/plugins/modules/net_tools/pritunl/test_pritunl_user.py new file mode 100644 index 0000000000..114fe8a81a --- /dev/null +++ b/tests/unit/plugins/modules/net_tools/pritunl/test_pritunl_user.py @@ -0,0 +1,208 @@ +# -*- coding: utf-8 -*- +# (c) 2021 Florian Dambrine +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +import sys + +from ansible.module_utils.common.dict_transformations import dict_merge +from ansible.module_utils.six import iteritems +from ansible_collections.community.general.plugins.modules.net_tools.pritunl import ( + pritunl_user, +) +from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.general.tests.unit.plugins.module_utils.net_tools.pritunl.test_api import ( + PritunlDeleteUserMock, + PritunlListOrganizationMock, + PritunlListUserMock, + PritunlPostUserMock, + PritunlPutUserMock, +) +from ansible_collections.community.general.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, + ModuleTestCase, + set_module_args, +) + +__metaclass__ = type + + +def mock_pritunl_api(func, **kwargs): + def wrapped(self=None): + with self.patch_get_pritunl_organizations( + side_effect=PritunlListOrganizationMock + ): + with self.patch_get_pritunl_users(side_effect=PritunlListUserMock): + with self.patch_add_pritunl_users(side_effect=PritunlPostUserMock): + with self.patch_delete_pritunl_users( + side_effect=PritunlDeleteUserMock + ): + func(self, **kwargs) + + return wrapped + + +class TestPritunlUser(ModuleTestCase): + def setUp(self): + super(TestPritunlUser, self).setUp() + self.module = pritunl_user + + # Add backward compatibility + if sys.version_info < (3, 2): + self.assertRegex = self.assertRegexpMatches + + def tearDown(self): + super(TestPritunlUser, self).tearDown() + + def patch_get_pritunl_users(self, **kwds): + return patch( + "ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api._get_pritunl_users", + autospec=True, + **kwds + ) + + def patch_add_pritunl_users(self, **kwds): + return patch( + "ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api._post_pritunl_user", + autospec=True, + **kwds + ) + + def patch_update_pritunl_users(self, **kwds): + return patch( + "ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api._put_pritunl_user", + autospec=True, + **kwds + ) + + def patch_delete_pritunl_users(self, **kwds): + return patch( + "ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api._delete_pritunl_user", + autospec=True, + **kwds + ) + + def patch_get_pritunl_organizations(self, **kwds): + return patch( + "ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api._get_pritunl_organizations", + autospec=True, + **kwds + ) + + def test_without_parameters(self): + """Test without parameters""" + set_module_args({}) + with self.assertRaises(AnsibleFailJson): + self.module.main() + + @mock_pritunl_api + def test_present(self): + """Test Pritunl user creation and update.""" + user_params = { + "user_name": "alice", + "user_email": "alice@company.com", + } + set_module_args( + dict_merge( + { + "pritunl_api_token": "token", + "pritunl_api_secret": "secret", + "pritunl_url": "https://pritunl.domain.com", + "organization": "GumGum", + }, + user_params, + ) + ) + + with self.patch_update_pritunl_users( + side_effect=PritunlPostUserMock + ) as post_mock: + with self.assertRaises(AnsibleExitJson) as create_result: + self.module.main() + + create_exc = create_result.exception.args[0] + + self.assertTrue(create_exc["changed"]) + self.assertEqual(create_exc["response"]["name"], user_params["user_name"]) + self.assertEqual(create_exc["response"]["email"], user_params["user_email"]) + self.assertFalse(create_exc["response"]["disabled"]) + + # Changing user from alice to bob should update certain fields only + + new_user_params = { + "user_name": "bob", + "user_email": "bob@company.com", + "user_disabled": True, + } + set_module_args( + dict_merge( + { + "pritunl_api_token": "token", + "pritunl_api_secret": "secret", + "pritunl_url": "https://pritunl.domain.com", + "organization": "GumGum", + }, + new_user_params, + ) + ) + + with self.patch_update_pritunl_users( + side_effect=PritunlPutUserMock + ) as put_mock: + + with self.assertRaises(AnsibleExitJson) as update_result: + self.module.main() + + update_exc = update_result.exception.args[0] + + # Ensure only certain settings changed and the rest remained untouched. + for k, v in iteritems(update_exc): + if k in new_user_params: + assert update_exc[k] == v + else: + assert update_exc[k] == create_exc[k] + + @mock_pritunl_api + def test_absent(self): + """Test user removal from Pritunl.""" + set_module_args( + { + "state": "absent", + "pritunl_api_token": "token", + "pritunl_api_secret": "secret", + "pritunl_url": "https://pritunl.domain.com", + "organization": "GumGum", + "user_name": "florian", + } + ) + + with self.assertRaises(AnsibleExitJson) as result: + self.module.main() + + exc = result.exception.args[0] + + self.assertTrue(exc["changed"]) + self.assertEqual(exc["response"], {}) + + @mock_pritunl_api + def test_absent_failure(self): + """Test user removal from a non existing organization.""" + set_module_args( + { + "state": "absent", + "pritunl_api_token": "token", + "pritunl_api_secret": "secret", + "pritunl_url": "https://pritunl.domain.com", + "organization": "Unknown", + "user_name": "floria@company.com", + } + ) + + with self.assertRaises(AnsibleFailJson) as result: + self.module.main() + + exc = result.exception.args[0] + + self.assertRegex(exc["msg"], "Can not remove user") diff --git a/tests/unit/plugins/modules/net_tools/pritunl/test_pritunl_user_info.py b/tests/unit/plugins/modules/net_tools/pritunl/test_pritunl_user_info.py new file mode 100644 index 0000000000..b253dc27ec --- /dev/null +++ b/tests/unit/plugins/modules/net_tools/pritunl/test_pritunl_user_info.py @@ -0,0 +1,160 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2021, Florian Dambrine +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +import sys + +from ansible_collections.community.general.plugins.modules.net_tools.pritunl import ( + pritunl_user_info, +) +from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.general.tests.unit.plugins.module_utils.net_tools.pritunl.test_api import ( + PritunlListOrganizationMock, + PritunlListUserMock, +) +from ansible_collections.community.general.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, + ModuleTestCase, + set_module_args, +) + +__metaclass__ = type + + +class TestPritunlUserInfo(ModuleTestCase): + def setUp(self): + super(TestPritunlUserInfo, self).setUp() + self.module = pritunl_user_info + + # Add backward compatibility + if sys.version_info < (3, 2): + self.assertRegex = self.assertRegexpMatches + + def tearDown(self): + super(TestPritunlUserInfo, self).tearDown() + + def patch_get_pritunl_users(self, **kwds): + return patch( + "ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api._get_pritunl_users", + autospec=True, + **kwds + ) + + def patch_get_pritunl_organizations(self, **kwds): + return patch( + "ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api._get_pritunl_organizations", + autospec=True, + **kwds + ) + + def test_without_parameters(self): + """Test without parameters""" + with self.patch_get_pritunl_organizations( + side_effect=PritunlListOrganizationMock + ) as org_mock: + with self.patch_get_pritunl_users( + side_effect=PritunlListUserMock + ) as user_mock: + set_module_args({}) + with self.assertRaises(AnsibleFailJson): + self.module.main() + + self.assertEqual(org_mock.call_count, 0) + self.assertEqual(user_mock.call_count, 0) + + def test_missing_organization(self): + """Failure must occur when the requested organization is not found.""" + with self.patch_get_pritunl_organizations( + side_effect=PritunlListOrganizationMock + ) as org_mock: + with self.patch_get_pritunl_users( + side_effect=PritunlListUserMock + ) as user_mock: + with self.assertRaises(AnsibleFailJson) as result: + set_module_args( + { + "pritunl_api_token": "token", + "pritunl_api_secret": "secret", + "pritunl_url": "https://pritunl.domain.com", + "organization": "Unknown", + } + ) + self.module.main() + + self.assertEqual(org_mock.call_count, 1) + self.assertEqual(user_mock.call_count, 0) + + exc = result.exception.args[0] + self.assertRegex(exc["msg"], "Can not list users from the organization") + + def test_get_all_client_users_from_organization(self): + """ + The list of all Pritunl client users from the organization must be returned when no user specified. + """ + expected_user_type = "client" + with self.patch_get_pritunl_organizations( + side_effect=PritunlListOrganizationMock + ) as org_mock: + with self.patch_get_pritunl_users( + side_effect=PritunlListUserMock + ) as user_mock: + with self.assertRaises(AnsibleExitJson) as result: + set_module_args( + { + "pritunl_api_token": "token", + "pritunl_api_secret": "secret", + "pritunl_url": "https://pritunl.domain.com", + "organization": "GumGum", + } + ) + self.module.main() + + self.assertEqual(org_mock.call_count, 1) + self.assertEqual(user_mock.call_count, 1) + + exc = result.exception.args[0] + # module should not report changes + self.assertFalse(exc["changed"]) + # user_type when not provided is set client and should only return client user type + self.assertEqual(len(exc["users"]), 1) + for user in exc["users"]: + self.assertEqual(user["type"], expected_user_type) + + def test_get_specific_server_user_from_organization(self): + """ + Retrieving a specific user from the organization must return a single record. + """ + expected_user_type = "server" + expected_user_name = "ops" + with self.patch_get_pritunl_organizations( + side_effect=PritunlListOrganizationMock + ) as org_mock: + with self.patch_get_pritunl_users( + side_effect=PritunlListUserMock + ) as user_mock: + with self.assertRaises(AnsibleExitJson) as result: + set_module_args( + { + "pritunl_api_token": "token", + "pritunl_api_secret": "secret", + "pritunl_url": "https://pritunl.domain.com", + "organization": "GumGum", + "user_name": expected_user_name, + "user_type": expected_user_type, + } + ) + self.module.main() + + self.assertEqual(org_mock.call_count, 1) + self.assertEqual(user_mock.call_count, 1) + + exc = result.exception.args[0] + # module should not report changes + self.assertFalse(exc["changed"]) + self.assertEqual(len(exc["users"]), 1) + for user in exc["users"]: + self.assertEqual(user["type"], expected_user_type) + self.assertEqual(user["name"], expected_user_name) From c147d2fb98fd8443096667ae56d667a0f1541138 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 21 Mar 2021 23:37:12 +1300 Subject: [PATCH 0125/3093] snmp_facts - added timeout and retries params to module (#2065) * added timeout and retries params to module * added changelog fragment * Update plugins/modules/net_tools/snmp_facts.py Co-authored-by: Felix Fontein * Update plugins/modules/net_tools/snmp_facts.py Co-authored-by: Felix Fontein * removed default for retries per suggestion in PR * Update plugins/modules/net_tools/snmp_facts.py Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../fragments/2065-snmp-facts-timeout.yml | 2 ++ plugins/modules/net_tools/snmp_facts.py | 17 +++++++++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/2065-snmp-facts-timeout.yml diff --git a/changelogs/fragments/2065-snmp-facts-timeout.yml b/changelogs/fragments/2065-snmp-facts-timeout.yml new file mode 100644 index 0000000000..0e6a4e54fa --- /dev/null +++ b/changelogs/fragments/2065-snmp-facts-timeout.yml @@ -0,0 +1,2 @@ +minor_changes: + - snmp_facts - added parameters ``timeout`` and ``retries`` to module (https://github.com/ansible-collections/community.general/issues/980). diff --git a/plugins/modules/net_tools/snmp_facts.py b/plugins/modules/net_tools/snmp_facts.py index 661db46060..3918a3a1c0 100644 --- a/plugins/modules/net_tools/snmp_facts.py +++ b/plugins/modules/net_tools/snmp_facts.py @@ -67,6 +67,16 @@ options: - Encryption key. - Required if I(level) is C(authPriv). type: str + timeout: + description: + - Response timeout in seconds. + type: int + version_added: 2.3.0 + retries: + description: + - Maximum number of request retries, 0 retries means just a single request. + type: int + version_added: 2.3.0 ''' EXAMPLES = r''' @@ -271,6 +281,8 @@ def main(): privacy=dict(type='str', choices=['aes', 'des']), authkey=dict(type='str', no_log=True), privkey=dict(type='str', no_log=True), + timeout=dict(type='int'), + retries=dict(type='int'), ), required_together=( ['username', 'level', 'integrity', 'authkey'], @@ -285,6 +297,7 @@ def main(): module.fail_json(msg=missing_required_lib('pysnmp'), exception=PYSNMP_IMP_ERR) cmdGen = cmdgen.CommandGenerator() + transport_opts = dict((k, m_args[k]) for k in ('timeout', 'retries') if m_args[k] is not None) # Verify that we receive a community when using snmp v2 if m_args['version'] in ("v2", "v2c"): @@ -333,7 +346,7 @@ def main(): errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd( snmp_auth, - cmdgen.UdpTransportTarget((m_args['host'], 161)), + cmdgen.UdpTransportTarget((m_args['host'], 161), **transport_opts), cmdgen.MibVariable(p.sysDescr,), cmdgen.MibVariable(p.sysObjectId,), cmdgen.MibVariable(p.sysUpTime,), @@ -364,7 +377,7 @@ def main(): errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd( snmp_auth, - cmdgen.UdpTransportTarget((m_args['host'], 161)), + cmdgen.UdpTransportTarget((m_args['host'], 161), **transport_opts), cmdgen.MibVariable(p.ifIndex,), cmdgen.MibVariable(p.ifDescr,), cmdgen.MibVariable(p.ifMtu,), From 652939090147b1389ce8270f5fdb136b21f637e7 Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Sun, 21 Mar 2021 08:21:54 -0400 Subject: [PATCH 0126/3093] New Filter plugin from_csv (#2037) * Added from_csv filter and integration tests * Cleaning up whitespace * Adding changelog fragment * Updated changelog fragment name * Removed temp fragment * Refactoring csv functions Part 1 * Syncing refactored csv modules/filters * Adding unit tests for csv Module_Util * Updating changelog fragment * Correcting whitespace in unit test * Improving changelog fragment Co-authored-by: Felix Fontein * Update changelogs/fragments/2037-add-from-csv-filter.yml Co-authored-by: Felix Fontein --- .../fragments/2037-add-from-csv-filter.yml | 7 + plugins/filter/from_csv.py | 49 ++++++ plugins/module_utils/csv.py | 67 +++++++ plugins/modules/files/read_csv.py | 66 ++----- .../targets/filter_from_csv/aliases | 2 + .../targets/filter_from_csv/tasks/main.yml | 49 ++++++ .../targets/filter_from_csv/vars/main.yml | 26 +++ tests/unit/plugins/module_utils/test_csv.py | 164 ++++++++++++++++++ 8 files changed, 383 insertions(+), 47 deletions(-) create mode 100644 changelogs/fragments/2037-add-from-csv-filter.yml create mode 100644 plugins/filter/from_csv.py create mode 100644 plugins/module_utils/csv.py create mode 100644 tests/integration/targets/filter_from_csv/aliases create mode 100644 tests/integration/targets/filter_from_csv/tasks/main.yml create mode 100644 tests/integration/targets/filter_from_csv/vars/main.yml create mode 100644 tests/unit/plugins/module_utils/test_csv.py diff --git a/changelogs/fragments/2037-add-from-csv-filter.yml b/changelogs/fragments/2037-add-from-csv-filter.yml new file mode 100644 index 0000000000..d99c4cd0a8 --- /dev/null +++ b/changelogs/fragments/2037-add-from-csv-filter.yml @@ -0,0 +1,7 @@ +--- +add plugin.filter: + - name: from_csv + description: Converts CSV text input into list of dicts +minor_changes: + - csv module utils - new module_utils for shared functions between ``from_csv`` filter and ``read_csv`` module (https://github.com/ansible-collections/community.general/pull/2037). + - read_csv - refactored read_csv module to use shared csv functions from csv module_utils (https://github.com/ansible-collections/community.general/pull/2037). diff --git a/plugins/filter/from_csv.py b/plugins/filter/from_csv.py new file mode 100644 index 0000000000..13a18aa88a --- /dev/null +++ b/plugins/filter/from_csv.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Andrew Pantuso (@ajpantuso) +# Copyright: (c) 2018, Dag Wieers (@dagwieers) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.errors import AnsibleFilterError +from ansible.module_utils._text import to_native + +from ansible_collections.community.general.plugins.module_utils.csv import (initialize_dialect, read_csv, CSVError, + DialectNotAvailableError, + CustomDialectFailureError) + + +def from_csv(data, dialect='excel', fieldnames=None, delimiter=None, skipinitialspace=None, strict=None): + + dialect_params = { + "delimiter": delimiter, + "skipinitialspace": skipinitialspace, + "strict": strict, + } + + try: + dialect = initialize_dialect(dialect, **dialect_params) + except (CustomDialectFailureError, DialectNotAvailableError) as e: + raise AnsibleFilterError(to_native(e)) + + reader = read_csv(data, dialect, fieldnames) + + data_list = [] + + try: + for row in reader: + data_list.append(row) + except CSVError as e: + raise AnsibleFilterError("Unable to process file: %s" % to_native(e)) + + return data_list + + +class FilterModule(object): + + def filters(self): + return { + 'from_csv': from_csv + } diff --git a/plugins/module_utils/csv.py b/plugins/module_utils/csv.py new file mode 100644 index 0000000000..426e2eb279 --- /dev/null +++ b/plugins/module_utils/csv.py @@ -0,0 +1,67 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Andrew Pantuso (@ajpantuso) +# Copyright: (c) 2018, Dag Wieers (@dagwieers) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import csv +from io import BytesIO, StringIO + +from ansible.module_utils._text import to_native +from ansible.module_utils.six import PY3 + + +class CustomDialectFailureError(Exception): + pass + + +class DialectNotAvailableError(Exception): + pass + + +CSVError = csv.Error + + +def initialize_dialect(dialect, **kwargs): + # Add Unix dialect from Python 3 + class unix_dialect(csv.Dialect): + """Describe the usual properties of Unix-generated CSV files.""" + delimiter = ',' + quotechar = '"' + doublequote = True + skipinitialspace = False + lineterminator = '\n' + quoting = csv.QUOTE_ALL + + csv.register_dialect("unix", unix_dialect) + + if dialect not in csv.list_dialects(): + raise DialectNotAvailableError("Dialect '%s' is not supported by your version of python." % dialect) + + # Create a dictionary from only set options + dialect_params = dict((k, v) for k, v in kwargs.items() if v is not None) + if dialect_params: + try: + csv.register_dialect('custom', dialect, **dialect_params) + except TypeError as e: + raise CustomDialectFailureError("Unable to create custom dialect: %s" % to_native(e)) + dialect = 'custom' + + return dialect + + +def read_csv(data, dialect, fieldnames=None): + + data = to_native(data, errors='surrogate_or_strict') + + if PY3: + fake_fh = StringIO(data) + else: + fake_fh = BytesIO(data) + + reader = csv.DictReader(fake_fh, fieldnames=fieldnames, dialect=dialect) + + return reader diff --git a/plugins/modules/files/read_csv.py b/plugins/modules/files/read_csv.py index 24a77c0e28..c48efc7440 100644 --- a/plugins/modules/files/read_csv.py +++ b/plugins/modules/files/read_csv.py @@ -137,26 +137,12 @@ list: gid: 500 ''' -import csv -from io import BytesIO, StringIO - from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_text -from ansible.module_utils.six import PY3 +from ansible.module_utils._text import to_native - -# Add Unix dialect from Python 3 -class unix_dialect(csv.Dialect): - """Describe the usual properties of Unix-generated CSV files.""" - delimiter = ',' - quotechar = '"' - doublequote = True - skipinitialspace = False - lineterminator = '\n' - quoting = csv.QUOTE_ALL - - -csv.register_dialect("unix", unix_dialect) +from ansible_collections.community.general.plugins.module_utils.csv import (initialize_dialect, read_csv, CSVError, + DialectNotAvailableError, + CustomDialectFailureError) def main(): @@ -180,38 +166,24 @@ def main(): fieldnames = module.params['fieldnames'] unique = module.params['unique'] - if dialect not in csv.list_dialects(): - module.fail_json(msg="Dialect '%s' is not supported by your version of python." % dialect) + dialect_params = { + "delimiter": module.params['delimiter'], + "skipinitialspace": module.params['skipinitialspace'], + "strict": module.params['strict'], + } - dialect_options = dict( - delimiter=module.params['delimiter'], - skipinitialspace=module.params['skipinitialspace'], - strict=module.params['strict'], - ) - - # Create a dictionary from only set options - dialect_params = dict((k, v) for k, v in dialect_options.items() if v is not None) - if dialect_params: - try: - csv.register_dialect('custom', dialect, **dialect_params) - except TypeError as e: - module.fail_json(msg="Unable to create custom dialect: %s" % to_text(e)) - dialect = 'custom' + try: + dialect = initialize_dialect(dialect, **dialect_params) + except (CustomDialectFailureError, DialectNotAvailableError) as e: + module.fail_json(msg=to_native(e)) try: with open(path, 'rb') as f: data = f.read() except (IOError, OSError) as e: - module.fail_json(msg="Unable to open file: %s" % to_text(e)) + module.fail_json(msg="Unable to open file: %s" % to_native(e)) - if PY3: - # Manually decode on Python3 so that we can use the surrogateescape error handler - data = to_text(data, errors='surrogate_or_strict') - fake_fh = StringIO(data) - else: - fake_fh = BytesIO(data) - - reader = csv.DictReader(fake_fh, fieldnames=fieldnames, dialect=dialect) + reader = read_csv(data, dialect, fieldnames) if key and key not in reader.fieldnames: module.fail_json(msg="Key '%s' was not found in the CSV header fields: %s" % (key, ', '.join(reader.fieldnames))) @@ -223,16 +195,16 @@ def main(): try: for row in reader: data_list.append(row) - except csv.Error as e: - module.fail_json(msg="Unable to process file: %s" % to_text(e)) + except CSVError as e: + module.fail_json(msg="Unable to process file: %s" % to_native(e)) else: try: for row in reader: if unique and row[key] in data_dict: module.fail_json(msg="Key '%s' is not unique for value '%s'" % (key, row[key])) data_dict[row[key]] = row - except csv.Error as e: - module.fail_json(msg="Unable to process file: %s" % to_text(e)) + except CSVError as e: + module.fail_json(msg="Unable to process file: %s" % to_native(e)) module.exit_json(dict=data_dict, list=data_list) diff --git a/tests/integration/targets/filter_from_csv/aliases b/tests/integration/targets/filter_from_csv/aliases new file mode 100644 index 0000000000..f04737b845 --- /dev/null +++ b/tests/integration/targets/filter_from_csv/aliases @@ -0,0 +1,2 @@ +shippable/posix/group2 +skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller diff --git a/tests/integration/targets/filter_from_csv/tasks/main.yml b/tests/integration/targets/filter_from_csv/tasks/main.yml new file mode 100644 index 0000000000..aafb28fbb0 --- /dev/null +++ b/tests/integration/targets/filter_from_csv/tasks/main.yml @@ -0,0 +1,49 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- name: Parse valid csv input + assert: + that: + - "valid_comma_separated | community.general.from_csv == expected_result" + +- name: Parse valid csv input containing spaces with/without skipinitialspace=True + assert: + that: + - "valid_comma_separated_spaces | community.general.from_csv(skipinitialspace=True) == expected_result" + - "valid_comma_separated_spaces | community.general.from_csv != expected_result" + +- name: Parse valid csv input with no headers with/without specifiying fieldnames + assert: + that: + - "valid_comma_separated_no_headers | community.general.from_csv(fieldnames=['id','name','role']) == expected_result" + - "valid_comma_separated_no_headers | community.general.from_csv != expected_result" + +- name: Parse valid pipe-delimited csv input with/without delimiter=| + assert: + that: + - "valid_pipe_separated | community.general.from_csv(delimiter='|') == expected_result" + - "valid_pipe_separated | community.general.from_csv != expected_result" + +- name: Register result of invalid csv input when strict=False + debug: + var: "invalid_comma_separated | community.general.from_csv" + register: _invalid_csv_strict_false + +- name: Test invalid csv input when strict=False is successful + assert: + that: + - _invalid_csv_strict_false is success + +- name: Register result of invalid csv input when strict=True + debug: + var: "invalid_comma_separated | community.general.from_csv(strict=True)" + register: _invalid_csv_strict_true + ignore_errors: True + +- name: Test invalid csv input when strict=True is failed + assert: + that: + - _invalid_csv_strict_true is failed + - _invalid_csv_strict_true.msg is match('Unable to process file:.*') diff --git a/tests/integration/targets/filter_from_csv/vars/main.yml b/tests/integration/targets/filter_from_csv/vars/main.yml new file mode 100644 index 0000000000..5801bc20dc --- /dev/null +++ b/tests/integration/targets/filter_from_csv/vars/main.yml @@ -0,0 +1,26 @@ +valid_comma_separated: | + id,name,role + 1,foo,bar + 2,bar,baz +valid_comma_separated_spaces: | + id,name,role + 1, foo, bar + 2, bar, baz +valid_comma_separated_no_headers: | + 1,foo,bar + 2,bar,baz +valid_pipe_separated: | + id|name|role + 1|foo|bar + 2|bar|baz +invalid_comma_separated: | + id,name,role + 1,foo,bar + 2,"b"ar",baz +expected_result: + - id: '1' + name: foo + role: bar + - id: '2' + name: bar + role: baz diff --git a/tests/unit/plugins/module_utils/test_csv.py b/tests/unit/plugins/module_utils/test_csv.py new file mode 100644 index 0000000000..b31915d66d --- /dev/null +++ b/tests/unit/plugins/module_utils/test_csv.py @@ -0,0 +1,164 @@ +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible_collections.community.general.plugins.module_utils import csv + + +VALID_CSV = [ + ( + 'excel', + {}, + None, + "id,name,role\n1,foo,bar\n2,bar,baz", + [ + { + "id": "1", + "name": "foo", + "role": "bar", + }, + { + "id": "2", + "name": "bar", + "role": "baz", + }, + ] + ), + ( + 'excel', + {"skipinitialspace": True}, + None, + "id,name,role\n1, foo, bar\n2, bar, baz", + [ + { + "id": "1", + "name": "foo", + "role": "bar", + }, + { + "id": "2", + "name": "bar", + "role": "baz", + }, + ] + ), + ( + 'excel', + {"delimiter": '|'}, + None, + "id|name|role\n1|foo|bar\n2|bar|baz", + [ + { + "id": "1", + "name": "foo", + "role": "bar", + }, + { + "id": "2", + "name": "bar", + "role": "baz", + }, + ] + ), + ( + 'unix', + {}, + None, + "id,name,role\n1,foo,bar\n2,bar,baz", + [ + { + "id": "1", + "name": "foo", + "role": "bar", + }, + { + "id": "2", + "name": "bar", + "role": "baz", + }, + ] + ), + ( + 'excel', + {}, + ['id', 'name', 'role'], + "1,foo,bar\n2,bar,baz", + [ + { + "id": "1", + "name": "foo", + "role": "bar", + }, + { + "id": "2", + "name": "bar", + "role": "baz", + }, + ] + ), +] + +INVALID_CSV = [ + ( + 'excel', + {'strict': True}, + None, + 'id,name,role\n1,"f"oo",bar\n2,bar,baz', + ), +] + +INVALID_DIALECT = [ + ( + 'invalid', + {}, + None, + "id,name,role\n1,foo,bar\n2,bar,baz", + ), +] + + +@pytest.mark.parametrize("dialect,dialect_params,fieldnames,data,expected", VALID_CSV) +def test_valid_csv(data, dialect, dialect_params, fieldnames, expected): + dialect = csv.initialize_dialect(dialect, **dialect_params) + reader = csv.read_csv(data, dialect, fieldnames) + result = True + + for idx, row in enumerate(reader): + for k, v in row.items(): + if expected[idx][k] != v: + result = False + break + + assert result + + +@pytest.mark.parametrize("dialect,dialect_params,fieldnames,data", INVALID_CSV) +def test_invalid_csv(data, dialect, dialect_params, fieldnames): + dialect = csv.initialize_dialect(dialect, **dialect_params) + reader = csv.read_csv(data, dialect, fieldnames) + result = False + + try: + for row in reader: + continue + except csv.CSVError: + result = True + + assert result + + +@pytest.mark.parametrize("dialect,dialect_params,fieldnames,data", INVALID_DIALECT) +def test_invalid_dialect(data, dialect, dialect_params, fieldnames): + result = False + + try: + dialect = csv.initialize_dialect(dialect, **dialect_params) + except csv.DialectNotAvailableError: + result = True + + assert result From 5fc56676c2cb58791e9d222588b9c0da74d0068e Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 22 Mar 2021 01:56:41 +1300 Subject: [PATCH 0127/3093] stacki_host - configured params to use fallback instead of default (#2072) * configuredd params to use fallback instead of default * added changelog fragment --- .../fragments/2072-stacki-host-params-fallback.yml | 2 ++ .../modules/remote_management/stacki/stacki_host.py | 10 +++++----- tests/sanity/ignore-2.10.txt | 1 - tests/sanity/ignore-2.11.txt | 1 - tests/sanity/ignore-2.9.txt | 1 - 5 files changed, 7 insertions(+), 8 deletions(-) create mode 100644 changelogs/fragments/2072-stacki-host-params-fallback.yml diff --git a/changelogs/fragments/2072-stacki-host-params-fallback.yml b/changelogs/fragments/2072-stacki-host-params-fallback.yml new file mode 100644 index 0000000000..f586a6eb0c --- /dev/null +++ b/changelogs/fragments/2072-stacki-host-params-fallback.yml @@ -0,0 +1,2 @@ +bugfixes: + - stacki_host - replaced ``default`` to environment variables with ``fallback`` to them (https://github.com/ansible-collections/community.general/pull/2072). diff --git a/plugins/modules/remote_management/stacki/stacki_host.py b/plugins/modules/remote_management/stacki/stacki_host.py index 372ba2df99..8bdc0f82f6 100644 --- a/plugins/modules/remote_management/stacki/stacki_host.py +++ b/plugins/modules/remote_management/stacki/stacki_host.py @@ -53,6 +53,7 @@ options: description: - Set value to True to force node into install state if it already exists in stacki. type: bool + default: no state: description: - Set value to the desired state for the specified host. @@ -103,9 +104,8 @@ stdout_lines: ''' import json -import os -from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import AnsibleModule, env_fallback from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils.urls import fetch_url @@ -235,9 +235,9 @@ def main(): prim_intf_ip=dict(type='str'), network=dict(type='str', default='private'), prim_intf_mac=dict(type='str'), - stacki_user=dict(type='str', required=True, default=os.environ.get('stacki_user')), - stacki_password=dict(type='str', required=True, default=os.environ.get('stacki_password'), no_log=True), - stacki_endpoint=dict(type='str', required=True, default=os.environ.get('stacki_endpoint')), + stacki_user=dict(type='str', required=True, fallback=(env_fallback, ['stacki_user'])), + stacki_password=dict(type='str', required=True, fallback=(env_fallback, ['stacki_password']), no_log=True), + stacki_endpoint=dict(type='str', required=True, fallback=(env_fallback, ['stacki_endpoint'])), force_install=dict(type='bool', default=False), ), supports_check_mode=False, diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 2271765963..57500abf97 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -159,7 +159,6 @@ plugins/modules/remote_management/oneview/oneview_san_manager.py validate-module plugins/modules/remote_management/oneview/oneview_san_manager_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/oneview/oneview_san_manager_info.py validate-modules:undocumented-parameter plugins/modules/remote_management/stacki/stacki_host.py validate-modules:doc-default-does-not-match-spec -plugins/modules/remote_management/stacki/stacki_host.py validate-modules:no-default-for-required-parameter plugins/modules/remote_management/stacki/stacki_host.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/stacki/stacki_host.py validate-modules:undocumented-parameter plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-invalid diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index e01f0ee998..f471a31461 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -158,7 +158,6 @@ plugins/modules/remote_management/oneview/oneview_san_manager.py validate-module plugins/modules/remote_management/oneview/oneview_san_manager_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/oneview/oneview_san_manager_info.py validate-modules:undocumented-parameter plugins/modules/remote_management/stacki/stacki_host.py validate-modules:doc-default-does-not-match-spec -plugins/modules/remote_management/stacki/stacki_host.py validate-modules:no-default-for-required-parameter plugins/modules/remote_management/stacki/stacki_host.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/stacki/stacki_host.py validate-modules:undocumented-parameter plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-invalid diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 40eef942b4..d92914cc1a 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -194,7 +194,6 @@ plugins/modules/remote_management/oneview/oneview_san_manager.py validate-module plugins/modules/remote_management/oneview/oneview_san_manager_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/oneview/oneview_san_manager_info.py validate-modules:undocumented-parameter plugins/modules/remote_management/stacki/stacki_host.py validate-modules:doc-default-does-not-match-spec -plugins/modules/remote_management/stacki/stacki_host.py validate-modules:no-default-for-required-parameter plugins/modules/remote_management/stacki/stacki_host.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/stacki/stacki_host.py validate-modules:undocumented-parameter plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-invalid From af441aecfcc59e38f6ea87a35c2a66adc64b8f6c Mon Sep 17 00:00:00 2001 From: Triantafyllos Date: Mon, 22 Mar 2021 19:27:55 +0200 Subject: [PATCH 0128/3093] improve force_archive parameter documentation of archive module (#2052) * improve documentation for force_archive parameter * add link to unarchive module Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- plugins/modules/files/archive.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/plugins/modules/files/archive.py b/plugins/modules/files/archive.py index 98bcbf3605..e071c1bced 100644 --- a/plugins/modules/files/archive.py +++ b/plugins/modules/files/archive.py @@ -44,8 +44,9 @@ options: elements: path force_archive: description: - - Allow you to force the module to treat this as an archive even if only a single file is specified. - - By default behaviour is maintained. i.e A when a single file is specified it is compressed only (not archived). + - Allows you to force the module to treat this as an archive even if only a single file is specified. + - By default when a single file is specified it is compressed only (not archived). + - Enable this if you want to use M(ansible.builtin.unarchive) on an archive of a single file created with this module. type: bool default: false remove: From 88994ef2b786cf29081456756594ee7ce053ff6c Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 23 Mar 2021 06:51:27 +1300 Subject: [PATCH 0129/3093] Fixed documentation (#2062) --- plugins/modules/files/archive.py | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/modules/files/archive.py b/plugins/modules/files/archive.py index e071c1bced..8b8088dae1 100644 --- a/plugins/modules/files/archive.py +++ b/plugins/modules/files/archive.py @@ -36,6 +36,7 @@ options: description: - The file name of the destination archive. The parent directory must exists on the remote host. - This is required when C(path) refers to multiple files by either specifying a glob, a directory or multiple paths in a list. + - If the destination archive already exists, it will be truncated and overwritten. type: path exclude_path: description: From 09351d90106f9e972d93b3c5a455c18402b87207 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 23 Mar 2021 06:50:50 +0100 Subject: [PATCH 0130/3093] Temporarily disable copr integration tests due to failures with remote repository. (#2083) --- tests/integration/targets/copr/aliases | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/targets/copr/aliases b/tests/integration/targets/copr/aliases index 0ad5e1c80c..fbe7da85db 100644 --- a/tests/integration/targets/copr/aliases +++ b/tests/integration/targets/copr/aliases @@ -3,3 +3,4 @@ needs/root skip/macos skip/osx skip/freebsd +disabled # FIXME From 7fe9dd7a60add2e488232916479f32993b6c9970 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 23 Mar 2021 07:08:04 +0100 Subject: [PATCH 0131/3093] Install collections in CI directly with git to work around the Galaxy CloudFlare PITA. (#2082) --- tests/utils/shippable/shippable.sh | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/tests/utils/shippable/shippable.sh b/tests/utils/shippable/shippable.sh index 0b0b8316ac..f239e86975 100755 --- a/tests/utils/shippable/shippable.sh +++ b/tests/utils/shippable/shippable.sh @@ -77,13 +77,18 @@ fi if [ "${script}" != "sanity" ] || [ "${test}" == "sanity/extra" ]; then # Nothing further should be added to this list. # This is to prevent modules or plugins in this collection having a runtime dependency on other collections. - retry ansible-galaxy -vvv collection install community.internal_test_tools + retry git clone --depth=1 --single-branch https://github.com/ansible-collections/community.internal_test_tools.git "${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/community/internal_test_tools" + # NOTE: we're installing with git to work around Galaxy being a huge PITA (https://github.com/ansible/galaxy/issues/2429) + # retry ansible-galaxy -vvv collection install community.internal_test_tools fi if [ "${script}" != "sanity" ] && [ "${script}" != "units" ]; then # To prevent Python dependencies on other collections only install other collections for integration tests - retry ansible-galaxy -vvv collection install ansible.posix - retry ansible-galaxy -vvv collection install community.crypto + retry git clone --depth=1 --single-branch https://github.com/ansible-collections/ansible.posix.git "${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/ansible/posix" + retry git clone --depth=1 --single-branch https://github.com/ansible-collections/community.crypto.git "${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/community/crypto" + # NOTE: we're installing with git to work around Galaxy being a huge PITA (https://github.com/ansible/galaxy/issues/2429) + # retry ansible-galaxy -vvv collection install ansible.posix + # retry ansible-galaxy -vvv collection install community.crypto fi # END: HACK From be13f41b30e29854942eac547121b4479d867429 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 23 Mar 2021 13:30:44 +0100 Subject: [PATCH 0132/3093] Next expected release is 2.4.0. --- galaxy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/galaxy.yml b/galaxy.yml index 5e03c0e8dc..335b902da2 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,6 +1,6 @@ namespace: community name: general -version: 2.3.0 +version: 2.4.0 readme: README.md authors: - Ansible (https://github.com/ansible) From 926c0a71d03a447580b6a034432608c632477059 Mon Sep 17 00:00:00 2001 From: Georg Gadinger Date: Wed, 24 Mar 2021 07:32:12 +0100 Subject: [PATCH 0133/3093] opennebula: port one_image to pyone (#2032) --- changelogs/fragments/2032-one_image-pyone.yml | 2 + plugins/modules/cloud/opennebula/one_image.py | 87 +++++++++---------- 2 files changed, 44 insertions(+), 45 deletions(-) create mode 100644 changelogs/fragments/2032-one_image-pyone.yml diff --git a/changelogs/fragments/2032-one_image-pyone.yml b/changelogs/fragments/2032-one_image-pyone.yml new file mode 100644 index 0000000000..4975cb73ad --- /dev/null +++ b/changelogs/fragments/2032-one_image-pyone.yml @@ -0,0 +1,2 @@ +breaking_changes: + - one_image - use pyone instead of python-oca (https://github.com/ansible-collections/community.general/pull/2032). diff --git a/plugins/modules/cloud/opennebula/one_image.py b/plugins/modules/cloud/opennebula/one_image.py index 867bab628a..5e62ee9ee4 100644 --- a/plugins/modules/cloud/opennebula/one_image.py +++ b/plugins/modules/cloud/opennebula/one_image.py @@ -31,7 +31,7 @@ short_description: Manages OpenNebula images description: - Manages OpenNebula images requirements: - - python-oca + - pyone options: api_url: description: @@ -88,7 +88,7 @@ EXAMPLES = ''' - name: Print the IMAGE properties ansible.builtin.debug: - msg: result + var: result - name: Rename existing IMAGE community.general.one_image: @@ -168,21 +168,20 @@ running_vms: ''' try: - import oca - HAS_OCA = True + import pyone + HAS_PYONE = True except ImportError: - HAS_OCA = False + HAS_PYONE = False from ansible.module_utils.basic import AnsibleModule import os def get_image(module, client, predicate): - pool = oca.ImagePool(client) # Filter -2 means fetch all images user can Use - pool.info(filter=-2) + pool = client.imagepool.info(-2, -1, -1, -1) - for image in pool: + for image in pool.IMAGE: if predicate(image): return image @@ -190,11 +189,11 @@ def get_image(module, client, predicate): def get_image_by_name(module, client, image_name): - return get_image(module, client, lambda image: (image.name == image_name)) + return get_image(module, client, lambda image: (image.NAME == image_name)) def get_image_by_id(module, client, image_id): - return get_image(module, client, lambda image: (image.id == image_id)) + return get_image(module, client, lambda image: (image.ID == image_id)) def get_image_instance(module, client, requested_id, requested_name): @@ -208,30 +207,28 @@ IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', def get_image_info(image): - image.info() - info = { - 'id': image.id, - 'name': image.name, - 'state': IMAGE_STATES[image.state], - 'running_vms': image.running_vms, - 'used': bool(image.running_vms), - 'user_name': image.uname, - 'user_id': image.uid, - 'group_name': image.gname, - 'group_id': image.gid, + 'id': image.ID, + 'name': image.NAME, + 'state': IMAGE_STATES[image.STATE], + 'running_vms': image.RUNNING_VMS, + 'used': bool(image.RUNNING_VMS), + 'user_name': image.UNAME, + 'user_id': image.UID, + 'group_name': image.GNAME, + 'group_id': image.GID, } return info -def wait_for_state(module, image, wait_timeout, state_predicate): +def wait_for_state(module, client, image_id, wait_timeout, state_predicate): import time start_time = time.time() while (time.time() - start_time) < wait_timeout: - image.info() - state = image.state + image = client.image.info(image_id) + state = image.STATE if state_predicate(state): return image @@ -241,19 +238,19 @@ def wait_for_state(module, image, wait_timeout, state_predicate): module.fail_json(msg="Wait timeout has expired!") -def wait_for_ready(module, image, wait_timeout=60): - return wait_for_state(module, image, wait_timeout, lambda state: (state in [IMAGE_STATES.index('READY')])) +def wait_for_ready(module, client, image_id, wait_timeout=60): + return wait_for_state(module, client, image_id, wait_timeout, lambda state: (state in [IMAGE_STATES.index('READY')])) -def wait_for_delete(module, image, wait_timeout=60): - return wait_for_state(module, image, wait_timeout, lambda state: (state in [IMAGE_STATES.index('DELETE')])) +def wait_for_delete(module, client, image_id, wait_timeout=60): + return wait_for_state(module, client, image_id, wait_timeout, lambda state: (state in [IMAGE_STATES.index('DELETE')])) def enable_image(module, client, image, enable): - image.info() + image = client.image.info(image.ID) changed = False - state = image.state + state = image.STATE if state not in [IMAGE_STATES.index('READY'), IMAGE_STATES.index('DISABLED'), IMAGE_STATES.index('ERROR')]: if enable: @@ -266,7 +263,7 @@ def enable_image(module, client, image, enable): changed = True if changed and not module.check_mode: - client.call('image.enable', image.id, enable) + client.image.enable(image.ID, enable) result = get_image_info(image) result['changed'] = changed @@ -276,7 +273,7 @@ def enable_image(module, client, image, enable): def clone_image(module, client, image, new_name): if new_name is None: - new_name = "Copy of " + image.name + new_name = "Copy of " + image.NAME tmp_image = get_image_by_name(module, client, new_name) if tmp_image: @@ -284,13 +281,13 @@ def clone_image(module, client, image, new_name): result['changed'] = False return result - if image.state == IMAGE_STATES.index('DISABLED'): + if image.STATE == IMAGE_STATES.index('DISABLED'): module.fail_json(msg="Cannot clone DISABLED image") if not module.check_mode: - new_id = client.call('image.clone', image.id, new_name) - image = get_image_by_id(module, client, new_id) - wait_for_ready(module, image) + new_id = client.image.clone(image.ID, new_name) + wait_for_ready(module, client, new_id) + image = client.image.info(new_id) result = get_image_info(image) result['changed'] = True @@ -302,7 +299,7 @@ def rename_image(module, client, image, new_name): if new_name is None: module.fail_json(msg="'new_name' option has to be specified when the state is 'renamed'") - if new_name == image.name: + if new_name == image.NAME: result = get_image_info(image) result['changed'] = False return result @@ -312,7 +309,7 @@ def rename_image(module, client, image, new_name): module.fail_json(msg="Name '" + new_name + "' is already taken by IMAGE with id=" + str(tmp_image.id)) if not module.check_mode: - client.call('image.rename', image.id, new_name) + client.image.rename(image.ID, new_name) result = get_image_info(image) result['changed'] = True @@ -324,12 +321,12 @@ def delete_image(module, client, image): if not image: return {'changed': False} - if image.running_vms > 0: - module.fail_json(msg="Cannot delete image. There are " + str(image.running_vms) + " VMs using it.") + if image.RUNNING_VMS > 0: + module.fail_json(msg="Cannot delete image. There are " + str(image.RUNNING_VMS) + " VMs using it.") if not module.check_mode: - client.call('image.delete', image.id) - wait_for_delete(module, image) + client.image.delete(image.ID) + wait_for_delete(module, client, image.ID) return {'changed': True} @@ -378,8 +375,8 @@ def main(): mutually_exclusive=[['id', 'name']], supports_check_mode=True) - if not HAS_OCA: - module.fail_json(msg='This module requires python-oca to work!') + if not HAS_PYONE: + module.fail_json(msg='This module requires pyone to work!') auth = get_connection_info(module) params = module.params @@ -388,7 +385,7 @@ def main(): state = params.get('state') enabled = params.get('enabled') new_name = params.get('new_name') - client = oca.Client(auth.username + ':' + auth.password, auth.url) + client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password) result = {} From 94cf07efbfec76ffa6e8d38b956ebf05b08551ab Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 25 Mar 2021 02:16:33 +1300 Subject: [PATCH 0134/3093] fixed documentation for oneview modules (#2092) --- plugins/doc_fragments/oneview.py | 24 ++++++++++++-- .../oneview/oneview_datacenter_info.py | 1 + .../oneview/oneview_enclosure_info.py | 1 + .../oneview/oneview_ethernet_network.py | 2 ++ .../oneview/oneview_ethernet_network_info.py | 1 + .../oneview/oneview_fc_network.py | 2 ++ .../oneview/oneview_fc_network_info.py | 1 + .../oneview/oneview_fcoe_network.py | 2 ++ .../oneview/oneview_fcoe_network_info.py | 1 + .../oneview_logical_interconnect_group.py | 2 ++ ...oneview_logical_interconnect_group_info.py | 1 + .../oneview/oneview_network_set.py | 2 ++ .../oneview/oneview_network_set_info.py | 1 + .../oneview/oneview_san_manager.py | 8 +++-- .../oneview/oneview_san_manager_info.py | 2 ++ tests/sanity/ignore-2.10.txt | 32 ------------------- tests/sanity/ignore-2.11.txt | 32 ------------------- tests/sanity/ignore-2.9.txt | 32 ------------------- 18 files changed, 46 insertions(+), 101 deletions(-) diff --git a/plugins/doc_fragments/oneview.py b/plugins/doc_fragments/oneview.py index bbbcbeea45..0d385e99aa 100644 --- a/plugins/doc_fragments/oneview.py +++ b/plugins/doc_fragments/oneview.py @@ -13,12 +13,32 @@ class ModuleDocFragment(object): DOCUMENTATION = r''' options: config: - description: + description: - Path to a .json configuration file containing the OneView client configuration. The configuration file is optional and when used should be present in the host running the ansible commands. If the file path is not provided, the configuration will be loaded from environment variables. For links to example configuration files or how to use the environment variables verify the notes section. - type: path + type: path + api_version: + description: + - OneView API Version. + type: int + image_streamer_hostname: + description: + - IP address or hostname for the HPE Image Streamer REST API. + type: str + hostname: + description: + - IP address or hostname for the appliance. + type: str + username: + description: + - Username for API authentication. + type: str + password: + description: + - Password for API authentication. + type: str requirements: - python >= 2.7.9 diff --git a/plugins/modules/remote_management/oneview/oneview_datacenter_info.py b/plugins/modules/remote_management/oneview/oneview_datacenter_info.py index a057503440..35c2e7acf4 100644 --- a/plugins/modules/remote_management/oneview/oneview_datacenter_info.py +++ b/plugins/modules/remote_management/oneview/oneview_datacenter_info.py @@ -24,6 +24,7 @@ options: name: description: - Data Center name. + type: str options: description: - "Retrieve additional information. Options available: 'visualContent'." diff --git a/plugins/modules/remote_management/oneview/oneview_enclosure_info.py b/plugins/modules/remote_management/oneview/oneview_enclosure_info.py index 1f2688d610..8ee92c689f 100644 --- a/plugins/modules/remote_management/oneview/oneview_enclosure_info.py +++ b/plugins/modules/remote_management/oneview/oneview_enclosure_info.py @@ -24,6 +24,7 @@ options: name: description: - Enclosure name. + type: str options: description: - "List with options to gather additional information about an Enclosure and related resources. diff --git a/plugins/modules/remote_management/oneview/oneview_ethernet_network.py b/plugins/modules/remote_management/oneview/oneview_ethernet_network.py index a81e144a68..c09f09c8f6 100644 --- a/plugins/modules/remote_management/oneview/oneview_ethernet_network.py +++ b/plugins/modules/remote_management/oneview/oneview_ethernet_network.py @@ -24,11 +24,13 @@ options: - C(present) will ensure data properties are compliant with OneView. - C(absent) will remove the resource from OneView, if it exists. - C(default_bandwidth_reset) will reset the network connection template to the default. + type: str default: present choices: [present, absent, default_bandwidth_reset] data: description: - List with Ethernet Network properties. + type: dict required: true extends_documentation_fragment: - community.general.oneview diff --git a/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py b/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py index a609bf772a..bc8765c352 100644 --- a/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py +++ b/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py @@ -23,6 +23,7 @@ options: name: description: - Ethernet Network name. + type: str options: description: - "List with options to gather additional information about an Ethernet Network and related resources. diff --git a/plugins/modules/remote_management/oneview/oneview_fc_network.py b/plugins/modules/remote_management/oneview/oneview_fc_network.py index 45fa035ca2..009a54a89b 100644 --- a/plugins/modules/remote_management/oneview/oneview_fc_network.py +++ b/plugins/modules/remote_management/oneview/oneview_fc_network.py @@ -20,11 +20,13 @@ options: - Indicates the desired state for the Fibre Channel Network resource. C(present) will ensure data properties are compliant with OneView. C(absent) will remove the resource from OneView, if it exists. + type: str choices: ['present', 'absent'] required: true data: description: - List with the Fibre Channel Network properties. + type: dict required: true extends_documentation_fragment: diff --git a/plugins/modules/remote_management/oneview/oneview_fc_network_info.py b/plugins/modules/remote_management/oneview/oneview_fc_network_info.py index 2fad241af6..db48f19f84 100644 --- a/plugins/modules/remote_management/oneview/oneview_fc_network_info.py +++ b/plugins/modules/remote_management/oneview/oneview_fc_network_info.py @@ -23,6 +23,7 @@ options: name: description: - Fibre Channel Network name. + type: str extends_documentation_fragment: - community.general.oneview diff --git a/plugins/modules/remote_management/oneview/oneview_fcoe_network.py b/plugins/modules/remote_management/oneview/oneview_fcoe_network.py index 79d8ae2182..30e05677f8 100644 --- a/plugins/modules/remote_management/oneview/oneview_fcoe_network.py +++ b/plugins/modules/remote_management/oneview/oneview_fcoe_network.py @@ -21,11 +21,13 @@ options: - Indicates the desired state for the FCoE Network resource. C(present) will ensure data properties are compliant with OneView. C(absent) will remove the resource from OneView, if it exists. + type: str default: present choices: ['present', 'absent'] data: description: - List with FCoE Network properties. + type: dict required: true extends_documentation_fragment: diff --git a/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py b/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py index 8c1980df93..e5e1bc08e4 100644 --- a/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py +++ b/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py @@ -23,6 +23,7 @@ options: name: description: - FCoE Network name. + type: str extends_documentation_fragment: - community.general.oneview - community.general.oneview.factsparams diff --git a/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py b/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py index 8ca49e21ab..78735dc5e7 100644 --- a/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py +++ b/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py @@ -24,11 +24,13 @@ options: - Indicates the desired state for the Logical Interconnect Group resource. C(absent) will remove the resource from OneView, if it exists. C(present) will ensure data properties are compliant with OneView. + type: str choices: [absent, present] default: present data: description: - List with the Logical Interconnect Group properties. + type: dict required: true extends_documentation_fragment: - community.general.oneview diff --git a/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py b/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py index 16a78309f6..3488be92a6 100644 --- a/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py +++ b/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py @@ -24,6 +24,7 @@ options: name: description: - Logical Interconnect Group name. + type: str extends_documentation_fragment: - community.general.oneview - community.general.oneview.factsparams diff --git a/plugins/modules/remote_management/oneview/oneview_network_set.py b/plugins/modules/remote_management/oneview/oneview_network_set.py index cc70d5e57a..14efdabe70 100644 --- a/plugins/modules/remote_management/oneview/oneview_network_set.py +++ b/plugins/modules/remote_management/oneview/oneview_network_set.py @@ -23,11 +23,13 @@ options: - Indicates the desired state for the Network Set resource. - C(present) will ensure data properties are compliant with OneView. - C(absent) will remove the resource from OneView, if it exists. + type: str default: present choices: ['present', 'absent'] data: description: - List with the Network Set properties. + type: dict required: true extends_documentation_fragment: diff --git a/plugins/modules/remote_management/oneview/oneview_network_set_info.py b/plugins/modules/remote_management/oneview/oneview_network_set_info.py index e88a190796..bfc212d40c 100644 --- a/plugins/modules/remote_management/oneview/oneview_network_set_info.py +++ b/plugins/modules/remote_management/oneview/oneview_network_set_info.py @@ -23,6 +23,7 @@ options: name: description: - Network Set name. + type: str options: description: diff --git a/plugins/modules/remote_management/oneview/oneview_san_manager.py b/plugins/modules/remote_management/oneview/oneview_san_manager.py index 57e9347550..858072826b 100644 --- a/plugins/modules/remote_management/oneview/oneview_san_manager.py +++ b/plugins/modules/remote_management/oneview/oneview_san_manager.py @@ -24,12 +24,14 @@ options: - C(present) ensures data properties are compliant with OneView. - C(absent) removes the resource from OneView, if it exists. - C(connection_information_set) updates the connection information for the SAN Manager. This operation is non-idempotent. + type: str default: present choices: [present, absent, connection_information_set] data: - description: - - List with SAN Manager properties. - required: true + description: + - List with SAN Manager properties. + type: dict + required: true extends_documentation_fragment: - community.general.oneview diff --git a/plugins/modules/remote_management/oneview/oneview_san_manager_info.py b/plugins/modules/remote_management/oneview/oneview_san_manager_info.py index c4a6b7a86b..2e462b966d 100644 --- a/plugins/modules/remote_management/oneview/oneview_san_manager_info.py +++ b/plugins/modules/remote_management/oneview/oneview_san_manager_info.py @@ -23,6 +23,7 @@ options: provider_display_name: description: - Provider Display Name. + type: str params: description: - List of params to delimit, filter and sort the list of resources. @@ -31,6 +32,7 @@ options: - C(count): The number of resources to return. - C(query): A general query string to narrow the list of resources returned. - C(sort): The sort order of the returned data set." + type: dict extends_documentation_fragment: - community.general.oneview diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 57500abf97..e3f5531991 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -126,38 +126,6 @@ plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:parameter-type-not-in-doc # missing docs on suboptions plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:undocumented-parameter # missing docs on suboptions plugins/modules/remote_management/manageiq/manageiq_tags.py validate-modules:parameter-state-invalid-choice -plugins/modules/remote_management/oneview/oneview_datacenter_info.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_datacenter_info.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_enclosure_info.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_enclosure_info.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_ethernet_network.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_ethernet_network.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_fc_network.py validate-modules:doc-missing-type -plugins/modules/remote_management/oneview/oneview_fc_network.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_fc_network.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_fc_network_info.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_fc_network_info.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_fcoe_network.py validate-modules:doc-missing-type -plugins/modules/remote_management/oneview/oneview_fcoe_network.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_fcoe_network.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py validate-modules:doc-missing-type -plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_network_set.py validate-modules:doc-missing-type -plugins/modules/remote_management/oneview/oneview_network_set.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_network_set.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_network_set_info.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_network_set_info.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_san_manager.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_san_manager.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_san_manager_info.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_san_manager_info.py validate-modules:undocumented-parameter plugins/modules/remote_management/stacki/stacki_host.py validate-modules:doc-default-does-not-match-spec plugins/modules/remote_management/stacki/stacki_host.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/stacki/stacki_host.py validate-modules:undocumented-parameter diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index f471a31461..547a788be0 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -125,38 +125,6 @@ plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:parameter-type-not-in-doc # missing docs on suboptions plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:undocumented-parameter # missing docs on suboptions plugins/modules/remote_management/manageiq/manageiq_tags.py validate-modules:parameter-state-invalid-choice -plugins/modules/remote_management/oneview/oneview_datacenter_info.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_datacenter_info.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_enclosure_info.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_enclosure_info.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_ethernet_network.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_ethernet_network.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_fc_network.py validate-modules:doc-missing-type -plugins/modules/remote_management/oneview/oneview_fc_network.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_fc_network.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_fc_network_info.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_fc_network_info.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_fcoe_network.py validate-modules:doc-missing-type -plugins/modules/remote_management/oneview/oneview_fcoe_network.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_fcoe_network.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py validate-modules:doc-missing-type -plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_network_set.py validate-modules:doc-missing-type -plugins/modules/remote_management/oneview/oneview_network_set.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_network_set.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_network_set_info.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_network_set_info.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_san_manager.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_san_manager.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_san_manager_info.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_san_manager_info.py validate-modules:undocumented-parameter plugins/modules/remote_management/stacki/stacki_host.py validate-modules:doc-default-does-not-match-spec plugins/modules/remote_management/stacki/stacki_host.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/stacki/stacki_host.py validate-modules:undocumented-parameter diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index d92914cc1a..f5bbfa704d 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -161,38 +161,6 @@ plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:doc-missing-type # missing docs on suboptions plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:parameter-type-not-in-doc # missing docs on suboptions plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:undocumented-parameter # missing docs on suboptions -plugins/modules/remote_management/oneview/oneview_datacenter_info.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_datacenter_info.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_enclosure_info.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_enclosure_info.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_ethernet_network.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_ethernet_network.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_fc_network.py validate-modules:doc-missing-type -plugins/modules/remote_management/oneview/oneview_fc_network.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_fc_network.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_fc_network_info.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_fc_network_info.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_fcoe_network.py validate-modules:doc-missing-type -plugins/modules/remote_management/oneview/oneview_fcoe_network.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_fcoe_network.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py validate-modules:doc-missing-type -plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_network_set.py validate-modules:doc-missing-type -plugins/modules/remote_management/oneview/oneview_network_set.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_network_set.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_network_set_info.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_network_set_info.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_san_manager.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_san_manager.py validate-modules:undocumented-parameter -plugins/modules/remote_management/oneview/oneview_san_manager_info.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/oneview/oneview_san_manager_info.py validate-modules:undocumented-parameter plugins/modules/remote_management/stacki/stacki_host.py validate-modules:doc-default-does-not-match-spec plugins/modules/remote_management/stacki/stacki_host.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/stacki/stacki_host.py validate-modules:undocumented-parameter From 04f46f04358bddc242e29d0d8dd1918149c94cdd Mon Sep 17 00:00:00 2001 From: panyy3 Date: Wed, 24 Mar 2021 22:07:17 +0800 Subject: [PATCH 0135/3093] add new module xcc_redfish_command to manage Lenovo servers using Redfish APIs (#2007) * add new module xcc_redfish_command to manage Lenovo servers using Redfish APIs * Update plugins/modules/remote_management/lenovoxcc/xcc_redfish_command.py Co-authored-by: Felix Fontein * fix some errors detected by ansible-test sanity * end all descriptions (except short_description) with a period * fix return definition problem and other errors detected by ansible-test sanity * Always use true/false for booleans in YAML * It is usually a good idea to leave away required: false * fix errors detected by ansible-test sanity * fix elements of command is not defined * check whether resource_uri is specified for Raw commands * if no Members property, return false; if empty array, return true * get @odata.etag from patch body instead of getting again * add request_body checking * add unit test for the module * fix errors detected by ansible-test sanity --test pep8 * update class name xcc_RedfishUtils to XCCRedfishUtils to follow convention; import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args, exit_json, fail_json from ansible_collections.community.general.tests.unit.plugins.modules.utils instead of inline them * support using security token for auth * fix line too long error * As 2.3.0 got released yesterday, move to 2.4.0 * add maintainers for lenovoxcc * update to make sure that it's sorted alphabetically Co-authored-by: Felix Fontein --- .github/BOTMETA.yml | 2 + .../lenovoxcc/xcc_redfish_command.py | 673 ++++++++++++++++++ plugins/modules/xcc_redfish_command.py | 1 + .../lenovoxcc/test_xcc_redfish_command.py | 626 ++++++++++++++++ 4 files changed, 1302 insertions(+) create mode 100644 plugins/modules/remote_management/lenovoxcc/xcc_redfish_command.py create mode 120000 plugins/modules/xcc_redfish_command.py create mode 100644 tests/unit/plugins/modules/remote_management/lenovoxcc/test_xcc_redfish_command.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index ce12bb6885..32394e9e9f 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -701,6 +701,8 @@ files: labels: cisco $modules/remote_management/ipmi/: maintainers: bgaifullin cloudnull + $modules/remote_management/lenovoxcc/: + maintainers: panyy3 renxulei $modules/remote_management/lxca/: maintainers: navalkp prabhosa $modules/remote_management/manageiq/: diff --git a/plugins/modules/remote_management/lenovoxcc/xcc_redfish_command.py b/plugins/modules/remote_management/lenovoxcc/xcc_redfish_command.py new file mode 100644 index 0000000000..d8966c6d64 --- /dev/null +++ b/plugins/modules/remote_management/lenovoxcc/xcc_redfish_command.py @@ -0,0 +1,673 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: xcc_redfish_command +short_description: Manages Lenovo Out-Of-Band controllers using Redfish APIs +version_added: 2.4.0 +description: + - Builds Redfish URIs locally and sends them to remote OOB controllers to + perform an action or get information back or update a configuration attribute. + - Manages virtual media. + - Supports getting information back via GET method. + - Supports updating a configuration attribute via PATCH method. + - Supports performing an action via POST method. +options: + category: + required: true + description: + - Category to execute on OOB controller. + type: str + command: + required: true + description: + - List of commands to execute on OOB controller. + type: list + elements: str + baseuri: + required: true + description: + - Base URI of OOB controller. + type: str + username: + description: + - Username for authentication with OOB controller. + type: str + password: + description: + - Password for authentication with OOB controller. + type: str + auth_token: + description: + - Security token for authentication with OOB controller + type: str + timeout: + description: + - Timeout in seconds for URL requests to OOB controller. + default: 10 + type: int + resource_id: + required: false + description: + - The ID of the System, Manager or Chassis to modify. + type: str + virtual_media: + required: false + description: + - The options for VirtualMedia commands. + type: dict + suboptions: + media_types: + description: + - The list of media types appropriate for the image. + type: list + elements: str + image_url: + description: + - The URL of the image to insert or eject. + type: str + inserted: + description: + - Indicates if the image is treated as inserted on command completion. + type: bool + default: true + write_protected: + description: + - Indicates if the media is treated as write-protected. + type: bool + default: true + username: + description: + - The username for accessing the image URL. + type: str + password: + description: + - The password for accessing the image URL. + type: str + transfer_protocol_type: + description: + - The network protocol to use with the image. + type: str + transfer_method: + description: + - The transfer method to use with the image. + type: str + resource_uri: + required: false + description: + - The resource uri to get or patch or post. + type: str + request_body: + required: false + description: + - The request body to patch or post. + type: dict + +author: "Yuyan Pan (@panyy3)" +''' + +EXAMPLES = ''' + - name: Insert Virtual Media + community.general.xcc_redfish_command: + category: Manager + command: VirtualMediaInsert + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + virtual_media: + image_url: "http://example.com/images/SomeLinux-current.iso" + media_types: + - CD + - DVD + resource_id: "1" + + - name: Eject Virtual Media + community.general.xcc_redfish_command: + category: Manager + command: VirtualMediaEject + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + virtual_media: + image_url: "http://example.com/images/SomeLinux-current.iso" + resource_id: "1" + + - name: Eject all Virtual Media + community.general.xcc_redfish_command: + category: Manager + command: VirtualMediaEject + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + resource_id: "1" + + - name: Get ComputeSystem Oem property SystemStatus via GetResource command + community.general.xcc_redfish_command: + category: Raw + command: GetResource + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + resource_uri: "/redfish/v1/Systems/1" + register: result + - ansible.builtin.debug: + msg: "{{ result.redfish_facts.data.Oem.Lenovo.SystemStatus }}" + + - name: Get Oem DNS setting via GetResource command + community.general.xcc_redfish_command: + category: Raw + command: GetResource + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + resource_uri: "/redfish/v1/Managers/1/NetworkProtocol/Oem/Lenovo/DNS" + register: result + - ansible.builtin.debug: + msg: "{{ result.redfish_facts.data }}" + + - name: Get Lenovo FoD key collection resource via GetCollectionResource command + community.general.xcc_redfish_command: + category: Raw + command: GetCollectionResource + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + resource_uri: "/redfish/v1/Managers/1/Oem/Lenovo/FoD/Keys" + register: result + - ansible.builtin.debug: + msg: "{{ result.redfish_facts.data_list }}" + + - name: Update ComputeSystem property AssetTag via PatchResource command + community.general.xcc_redfish_command: + category: Raw + command: PatchResource + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + resource_uri: "/redfish/v1/Systems/1" + request_body: + AssetTag: "new_asset_tag" + + - name: Perform BootToBIOSSetup action via PostResource command + community.general.xcc_redfish_command: + category: Raw + command: PostResource + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + resource_uri: "/redfish/v1/Systems/1/Actions/Oem/LenovoComputerSystem.BootToBIOSSetup" + request_body: {} + + - name: Perform SecureBoot.ResetKeys action via PostResource command + community.general.xcc_redfish_command: + category: Raw + command: PostResource + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + resource_uri: "/redfish/v1/Systems/1/SecureBoot/Actions/SecureBoot.ResetKeys" + request_body: + ResetKeysType: DeleteAllKeys + + - name: Create session + community.general.redfish_command: + category: Sessions + command: CreateSession + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result + + - name: Update Manager DateTimeLocalOffset property using security token for auth + community.general.xcc_redfish_command: + category: Raw + command: PatchResource + baseuri: "{{ baseuri }}" + auth_token: "{{ result.session.token }}" + resource_uri: "/redfish/v1/Managers/1" + request_body: + DateTimeLocalOffset: "+08:00" + + - name: Delete session using security token created by CreateSesssion above + community.general.redfish_command: + category: Sessions + command: DeleteSession + baseuri: "{{ baseuri }}" + auth_token: "{{ result.session.token }}" + session_uri: "{{ result.session.uri }}" +''' + +RETURN = ''' +msg: + description: A message related to the performed action(s). + returned: when failure or action/update success + type: str + sample: "Action was successful" +redfish_facts: + description: Resource content. + returned: when command == GetResource or command == GetCollectionResource + type: dict + sample: '{ + "redfish_facts": { + "data": { + "@odata.etag": "\"3179bf00d69f25a8b3c\"", + "@odata.id": "/redfish/v1/Managers/1/NetworkProtocol/Oem/Lenovo/DNS", + "@odata.type": "#LenovoDNS.v1_0_0.LenovoDNS", + "DDNS": [ + { + "DDNSEnable": true, + "DomainName": "", + "DomainNameSource": "DHCP" + } + ], + "DNSEnable": true, + "Description": "This resource is used to represent a DNS resource for a Redfish implementation.", + "IPv4Address1": "10.103.62.178", + "IPv4Address2": "0.0.0.0", + "IPv4Address3": "0.0.0.0", + "IPv6Address1": "::", + "IPv6Address2": "::", + "IPv6Address3": "::", + "Id": "LenovoDNS", + "PreferredAddresstype": "IPv4" + }, + "ret": true + } + }' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils + + +class XCCRedfishUtils(RedfishUtils): + @staticmethod + def _find_empty_virt_media_slot(resources, media_types, + media_match_strict=True): + for uri, data in resources.items(): + # check MediaTypes + if 'MediaTypes' in data and media_types: + if not set(media_types).intersection(set(data['MediaTypes'])): + continue + else: + if media_match_strict: + continue + if 'RDOC' in uri: + continue + # if ejected, 'Inserted' should be False and 'ImageName' cleared + if (not data.get('Inserted', False) and + not data.get('ImageName')): + return uri, data + return None, None + + def virtual_media_eject_one(self, image_url): + # locate and read the VirtualMedia resources + response = self.get_request(self.root_uri + self.manager_uri) + if response['ret'] is False: + return response + data = response['data'] + if 'VirtualMedia' not in data: + return {'ret': False, 'msg': "VirtualMedia resource not found"} + virt_media_uri = data["VirtualMedia"]["@odata.id"] + response = self.get_request(self.root_uri + virt_media_uri) + if response['ret'] is False: + return response + data = response['data'] + virt_media_list = [] + for member in data[u'Members']: + virt_media_list.append(member[u'@odata.id']) + resources, headers = self._read_virt_media_resources(virt_media_list) + + # find the VirtualMedia resource to eject + uri, data, eject = self._find_virt_media_to_eject(resources, image_url) + if uri and eject: + if ('Actions' not in data or + '#VirtualMedia.EjectMedia' not in data['Actions']): + # try to eject via PATCH if no EjectMedia action found + h = headers[uri] + if 'allow' in h: + methods = [m.strip() for m in h.get('allow').split(',')] + if 'PATCH' not in methods: + # if Allow header present and PATCH missing, return error + return {'ret': False, + 'msg': "%s action not found and PATCH not allowed" + % '#VirtualMedia.EjectMedia'} + return self.virtual_media_eject_via_patch(uri) + else: + # POST to the EjectMedia Action + action = data['Actions']['#VirtualMedia.EjectMedia'] + if 'target' not in action: + return {'ret': False, + 'msg': "target URI property missing from Action " + "#VirtualMedia.EjectMedia"} + action_uri = action['target'] + # empty payload for Eject action + payload = {} + # POST to action + response = self.post_request(self.root_uri + action_uri, + payload) + if response['ret'] is False: + return response + return {'ret': True, 'changed': True, + 'msg': "VirtualMedia ejected"} + elif uri and not eject: + # already ejected: return success but changed=False + return {'ret': True, 'changed': False, + 'msg': "VirtualMedia image '%s' already ejected" % + image_url} + else: + # return failure (no resources matching image_url found) + return {'ret': False, 'changed': False, + 'msg': "No VirtualMedia resource found with image '%s' " + "inserted" % image_url} + + def virtual_media_eject(self, options): + if options: + image_url = options.get('image_url') + if image_url: # eject specified one media + return self.virtual_media_eject_one(image_url) + + # eject all inserted media when no image_url specified + # read all the VirtualMedia resources + response = self.get_request(self.root_uri + self.manager_uri) + if response['ret'] is False: + return response + data = response['data'] + if 'VirtualMedia' not in data: + return {'ret': False, 'msg': "VirtualMedia resource not found"} + virt_media_uri = data["VirtualMedia"]["@odata.id"] + response = self.get_request(self.root_uri + virt_media_uri) + if response['ret'] is False: + return response + data = response['data'] + virt_media_list = [] + for member in data[u'Members']: + virt_media_list.append(member[u'@odata.id']) + resources, headers = self._read_virt_media_resources(virt_media_list) + + # eject all inserted media one by one + ejected_media_list = [] + for uri, data in resources.items(): + if data.get('Image') and data.get('Inserted', True): + returndict = self.virtual_media_eject_one(data.get('Image')) + if not returndict['ret']: + return returndict + ejected_media_list.append(data.get('Image')) + + if len(ejected_media_list) == 0: + # no media inserted: return success but changed=False + return {'ret': True, 'changed': False, + 'msg': "No VirtualMedia image inserted"} + else: + return {'ret': True, 'changed': True, + 'msg': "VirtualMedia %s ejected" % str(ejected_media_list)} + + def raw_get_resource(self, resource_uri): + if resource_uri is None: + return {'ret': False, 'msg': "resource_uri is missing"} + response = self.get_request(self.root_uri + resource_uri) + if response['ret'] is False: + return response + data = response['data'] + return {'ret': True, 'data': data} + + def raw_get_collection_resource(self, resource_uri): + if resource_uri is None: + return {'ret': False, 'msg': "resource_uri is missing"} + response = self.get_request(self.root_uri + resource_uri) + if response['ret'] is False: + return response + if 'Members' not in response['data']: + return {'ret': False, 'msg': "Specified resource_uri doesn't have Members property"} + member_list = [i['@odata.id'] for i in response['data'].get('Members', [])] + + # get member resource one by one + data_list = [] + for member_uri in member_list: + uri = self.root_uri + member_uri + response = self.get_request(uri) + if response['ret'] is False: + return response + data = response['data'] + data_list.append(data) + + return {'ret': True, 'data_list': data_list} + + def raw_patch_resource(self, resource_uri, request_body): + if resource_uri is None: + return {'ret': False, 'msg': "resource_uri is missing"} + if request_body is None: + return {'ret': False, 'msg': "request_body is missing"} + # check whether resource_uri existing or not + response = self.get_request(self.root_uri + resource_uri) + if response['ret'] is False: + return response + original_etag = response['data']['@odata.etag'] + + # check validity of keys in request_body + data = response['data'] + for key in request_body.keys(): + if key not in data: + return {'ret': False, 'msg': "Key %s not found. Supported key list: %s" % (key, str(data.keys()))} + + # perform patch + response = self.patch_request(self.root_uri + resource_uri, request_body) + if response['ret'] is False: + return response + + # check whether changed or not + current_etag = '' + if 'data' in response and '@odata.etag' in response['data']: + current_etag = response['data']['@odata.etag'] + if current_etag != original_etag: + return {'ret': True, 'changed': True} + else: + return {'ret': True, 'changed': False} + + def raw_post_resource(self, resource_uri, request_body): + if resource_uri is None: + return {'ret': False, 'msg': "resource_uri is missing"} + if '/Actions/' not in resource_uri: + return {'ret': False, 'msg': "Bad uri %s. Keyword /Actions/ should be included in uri" % resource_uri} + if request_body is None: + return {'ret': False, 'msg': "request_body is missing"} + # get action base uri data for further checking + action_base_uri = resource_uri.split('/Actions/')[0] + response = self.get_request(self.root_uri + action_base_uri) + if response['ret'] is False: + return response + if 'Actions' not in response['data']: + return {'ret': False, 'msg': "Actions property not found in %s" % action_base_uri} + + # check resouce_uri with target uri found in action base uri data + action_found = False + action_info_uri = None + action_target_uri_list = [] + for key in response['data']['Actions'].keys(): + if action_found: + break + if not key.startswith('#'): + continue + if 'target' in response['data']['Actions'][key]: + if resource_uri == response['data']['Actions'][key]['target']: + action_found = True + if '@Redfish.ActionInfo' in response['data']['Actions'][key]: + action_info_uri = response['data']['Actions'][key]['@Redfish.ActionInfo'] + else: + action_target_uri_list.append(response['data']['Actions'][key]['target']) + if not action_found and 'Oem' in response['data']['Actions']: + for key in response['data']['Actions']['Oem'].keys(): + if action_found: + break + if not key.startswith('#'): + continue + if 'target' in response['data']['Actions']['Oem'][key]: + if resource_uri == response['data']['Actions']['Oem'][key]['target']: + action_found = True + if '@Redfish.ActionInfo' in response['data']['Actions']['Oem'][key]: + action_info_uri = response['data']['Actions']['Oem'][key]['@Redfish.ActionInfo'] + else: + action_target_uri_list.append(response['data']['Actions']['Oem'][key]['target']) + + if not action_found: + return {'ret': False, + 'msg': 'Specified resource_uri is not a supported action target uri, please specify a supported target uri instead. Supported uri: %s' + % (str(action_target_uri_list))} + + # check request_body with parameter name defined by @Redfish.ActionInfo + if action_info_uri is not None: + response = self.get_request(self.root_uri + action_info_uri) + if response['ret'] is False: + return response + for key in request_body.keys(): + key_found = False + for para in response['data']['Parameters']: + if key == para['Name']: + key_found = True + break + if not key_found: + return {'ret': False, + 'msg': 'Invalid property %s found in request_body. Please refer to @Redfish.ActionInfo Parameters: %s' + % (key, str(response['data']['Parameters']))} + + # perform post + response = self.post_request(self.root_uri + resource_uri, request_body) + if response['ret'] is False: + return response + return {'ret': True, 'changed': True} + + +# More will be added as module features are expanded +CATEGORY_COMMANDS_ALL = { + "Manager": ["VirtualMediaInsert", + "VirtualMediaEject"], + "Raw": ["GetResource", + "GetCollectionResource", + "PatchResource", + "PostResource"] +} + + +def main(): + result = {} + module = AnsibleModule( + argument_spec=dict( + category=dict(required=True), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + timeout=dict(type='int', default=10), + resource_id=dict(), + virtual_media=dict( + type='dict', + options=dict( + media_types=dict(type='list', elements='str', default=[]), + image_url=dict(), + inserted=dict(type='bool', default=True), + write_protected=dict(type='bool', default=True), + username=dict(), + password=dict(no_log=True), + transfer_protocol_type=dict(), + transfer_method=dict(), + ) + ), + resource_uri=dict(), + request_body=dict( + type='dict', + ), + ), + required_together=[ + ('username', 'password'), + ], + required_one_of=[ + ('username', 'auth_token'), + ], + mutually_exclusive=[ + ('username', 'auth_token'), + ], + supports_check_mode=False + ) + + category = module.params['category'] + command_list = module.params['command'] + + # admin credentials used for authentication + creds = {'user': module.params['username'], + 'pswd': module.params['password'], + 'token': module.params['auth_token']} + + # timeout + timeout = module.params['timeout'] + + # System, Manager or Chassis ID to modify + resource_id = module.params['resource_id'] + + # VirtualMedia options + virtual_media = module.params['virtual_media'] + + # resource_uri + resource_uri = module.params['resource_uri'] + + # request_body + request_body = module.params['request_body'] + + # Build root URI + root_uri = "https://" + module.params['baseuri'] + rf_utils = XCCRedfishUtils(creds, root_uri, timeout, module, resource_id=resource_id, data_modification=True) + + # Check that Category is valid + if category not in CATEGORY_COMMANDS_ALL: + module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys()))) + + # Check that all commands are valid + for cmd in command_list: + # Fail if even one command given is invalid + if cmd not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) + + # Organize by Categories / Commands + if category == "Manager": + # execute only if we find a Manager service resource + result = rf_utils._find_managers_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + if command == 'VirtualMediaInsert': + result = rf_utils.virtual_media_insert(virtual_media) + elif command == 'VirtualMediaEject': + result = rf_utils.virtual_media_eject(virtual_media) + elif category == "Raw": + for command in command_list: + if command == 'GetResource': + result = rf_utils.raw_get_resource(resource_uri) + elif command == 'GetCollectionResource': + result = rf_utils.raw_get_collection_resource(resource_uri) + elif command == 'PatchResource': + result = rf_utils.raw_patch_resource(resource_uri, request_body) + elif command == 'PostResource': + result = rf_utils.raw_post_resource(resource_uri, request_body) + + # Return data back or fail with proper message + if result['ret'] is True: + if command == 'GetResource' or command == 'GetCollectionResource': + module.exit_json(redfish_facts=result) + else: + changed = result.get('changed', True) + msg = result.get('msg', 'Action was successful') + module.exit_json(changed=changed, msg=msg) + else: + module.fail_json(msg=to_native(result['msg'])) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/xcc_redfish_command.py b/plugins/modules/xcc_redfish_command.py new file mode 120000 index 0000000000..4fa967b410 --- /dev/null +++ b/plugins/modules/xcc_redfish_command.py @@ -0,0 +1 @@ +remote_management/lenovoxcc/xcc_redfish_command.py \ No newline at end of file diff --git a/tests/unit/plugins/modules/remote_management/lenovoxcc/test_xcc_redfish_command.py b/tests/unit/plugins/modules/remote_management/lenovoxcc/test_xcc_redfish_command.py new file mode 100644 index 0000000000..38a6652fb1 --- /dev/null +++ b/tests/unit/plugins/modules/remote_management/lenovoxcc/test_xcc_redfish_command.py @@ -0,0 +1,626 @@ +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +from ansible_collections.community.general.tests.unit.compat import mock +from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.general.tests.unit.compat import unittest +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +import ansible_collections.community.general.plugins.modules.remote_management.lenovoxcc.xcc_redfish_command as module +from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson +from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args, exit_json, fail_json + + +def get_bin_path(self, arg, required=False): + """Mock AnsibleModule.get_bin_path""" + return arg + + +class TestXCCRedfishCommand(unittest.TestCase): + + def setUp(self): + self.mock_module_helper = patch.multiple(basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json, + get_bin_path=get_bin_path) + self.mock_module_helper.start() + self.addCleanup(self.mock_module_helper.stop) + + def test_module_fail_when_required_args_missing(self): + with self.assertRaises(AnsibleFailJson): + set_module_args({}) + module.main() + + def test_module_fail_when_unknown_category(self): + with self.assertRaises(AnsibleFailJson): + set_module_args({ + 'category': 'unknown', + 'command': 'VirtualMediaEject', + 'baseuri': '10.245.39.251', + 'username': 'USERID', + 'password': 'PASSW0RD=21', + }) + module.main() + + def test_module_fail_when_unknown_command(self): + with self.assertRaises(AnsibleFailJson): + set_module_args({ + 'category': 'Manager', + 'command': 'unknown', + 'baseuri': '10.245.39.251', + 'username': 'USERID', + 'password': 'PASSW0RD=21', + }) + module.main() + + def test_module_command_VirtualMediaInsert_pass(self): + set_module_args({ + 'category': 'Manager', + 'command': 'VirtualMediaInsert', + 'baseuri': '10.245.39.251', + 'username': 'USERID', + 'password': 'PASSW0RD=21', + 'timeout': 30, + 'virtual_media': { + 'image_url': "nfs://10.245.52.18:/home/nfs/bootable-sr635-20210111-autorun.iso", + 'media_types': ['CD'], + 'inserted': True, + 'write_protected': True, + 'transfer_protocol_type': 'NFS' + } + }) + with patch.object(module.XCCRedfishUtils, '_find_managers_resource') as mock__find_managers_resource: + mock__find_managers_resource.return_value = {'ret': True, 'changed': True, 'msg': 'success'} + + with patch.object(module.XCCRedfishUtils, 'virtual_media_insert') as mock_virtual_media_insert: + mock_virtual_media_insert.return_value = {'ret': True, 'changed': True, 'msg': 'success'} + + with self.assertRaises(AnsibleExitJson) as result: + module.main() + + def test_module_command_VirtualMediaEject_pass(self): + set_module_args({ + 'category': 'Manager', + 'command': 'VirtualMediaEject', + 'baseuri': '10.245.39.251', + 'username': 'USERID', + 'password': 'PASSW0RD=21', + 'timeout': 30, + 'virtual_media': { + 'image_url': "nfs://10.245.52.18:/home/nfs/bootable-sr635-20210111-autorun.iso", + } + }) + with patch.object(module.XCCRedfishUtils, '_find_managers_resource') as mock__find_managers_resource: + mock__find_managers_resource.return_value = {'ret': True, 'changed': True, 'msg': 'success'} + + with patch.object(module.XCCRedfishUtils, 'virtual_media_eject') as mock_virtual_media_eject: + mock_virtual_media_eject.return_value = {'ret': True, 'changed': True, 'msg': 'success'} + + with self.assertRaises(AnsibleExitJson) as result: + module.main() + + def test_module_command_VirtualMediaEject_fail_when_required_args_missing(self): + with self.assertRaises(AnsibleFailJson): + set_module_args({ + 'category': 'Manager', + 'command': 'VirtualMediaEject', + 'baseuri': '10.245.39.251', + 'username': 'USERID', + 'password': 'PASSW0RD=21', + }) + module.main() + + def test_module_command_GetResource_fail_when_required_args_missing(self): + set_module_args({ + 'category': 'Raw', + 'command': 'GetResource', + 'baseuri': '10.245.39.251', + 'username': 'USERID', + 'password': 'PASSW0RD=21', + }) + + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}} + + with self.assertRaises(AnsibleFailJson) as result: + module.main() + + def test_module_command_GetResource_fail_when_get_return_false(self): + set_module_args({ + 'category': 'Raw', + 'command': 'GetResource', + 'baseuri': '10.245.39.251', + 'username': 'USERID', + 'password': 'PASSW0RD=21', + 'resource_uri': '/redfish/v1/testuri', + }) + + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = {'ret': False, 'msg': '404 error'} + + with self.assertRaises(AnsibleFailJson) as result: + module.main() + + def test_module_command_GetResource_pass(self): + set_module_args({ + 'category': 'Raw', + 'command': 'GetResource', + 'baseuri': '10.245.39.251', + 'username': 'USERID', + 'password': 'PASSW0RD=21', + 'resource_uri': '/redfish/v1/testuri', + }) + + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}} + + with self.assertRaises(AnsibleExitJson) as result: + module.main() + + def test_module_command_GetCollectionResource_fail_when_required_args_missing(self): + set_module_args({ + 'category': 'Raw', + 'command': 'GetCollectionResource', + 'baseuri': '10.245.39.251', + 'username': 'USERID', + 'password': 'PASSW0RD=21', + }) + + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}} + + with self.assertRaises(AnsibleFailJson) as result: + module.main() + + def test_module_command_GetCollectionResource_fail_when_get_return_false(self): + set_module_args({ + 'category': 'Raw', + 'command': 'GetCollectionResource', + 'baseuri': '10.245.39.251', + 'username': 'USERID', + 'password': 'PASSW0RD=21', + 'resource_uri': '/redfish/v1/testuri', + }) + + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = {'ret': False, 'msg': '404 error'} + + with self.assertRaises(AnsibleFailJson) as result: + module.main() + + def test_module_command_GetCollectionResource_fail_when_get_not_colection(self): + set_module_args({ + 'category': 'Raw', + 'command': 'GetCollectionResource', + 'baseuri': '10.245.39.251', + 'username': 'USERID', + 'password': 'PASSW0RD=21', + 'resource_uri': '/redfish/v1/testuri', + }) + + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}} + + with self.assertRaises(AnsibleFailJson) as result: + module.main() + + def test_module_command_GetCollectionResource_pass_when_get_empty_collection(self): + set_module_args({ + 'category': 'Raw', + 'command': 'GetCollectionResource', + 'baseuri': '10.245.39.251', + 'username': 'USERID', + 'password': 'PASSW0RD=21', + 'resource_uri': '/redfish/v1/testuri', + }) + + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = {'ret': True, 'data': {'Members': [], 'Members@odata.count': 0}} + + with self.assertRaises(AnsibleExitJson) as result: + module.main() + + def test_module_command_GetCollectionResource_pass_when_get_collection(self): + set_module_args({ + 'category': 'Raw', + 'command': 'GetCollectionResource', + 'baseuri': '10.245.39.251', + 'username': 'USERID', + 'password': 'PASSW0RD=21', + 'resource_uri': '/redfish/v1/testuri', + }) + + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = {'ret': True, 'data': {'Members': [{'@odata.id': '/redfish/v1/testuri/1'}], 'Members@odata.count': 1}} + + with self.assertRaises(AnsibleExitJson) as result: + module.main() + + def test_module_command_PatchResource_fail_when_required_args_missing(self): + set_module_args({ + 'category': 'Raw', + 'command': 'PatchResource', + 'baseuri': '10.245.39.251', + 'username': 'USERID', + 'password': 'PASSW0RD=21', + }) + + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx', '@odata.etag': '27f6eb13fa1c28a2711'}} + + with patch.object(module.XCCRedfishUtils, 'patch_request') as mock_patch_request: + mock_patch_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}} + + with self.assertRaises(AnsibleFailJson) as result: + module.main() + + def test_module_command_PatchResource_fail_when_required_args_missing_no_requestbody(self): + set_module_args({ + 'category': 'Raw', + 'command': 'PatchResource', + 'baseuri': '10.245.39.251', + 'username': 'USERID', + 'password': 'PASSW0RD=21', + 'resource_uri': '/redfish/v1/testuri', + }) + + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx', '@odata.etag': '27f6eb13fa1c28a2711'}} + + with patch.object(module.XCCRedfishUtils, 'patch_request') as mock_patch_request: + mock_patch_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}} + + with self.assertRaises(AnsibleFailJson) as result: + module.main() + + def test_module_command_PatchResource_fail_when_noexisting_property_in_requestbody(self): + set_module_args({ + 'category': 'Raw', + 'command': 'PatchResource', + 'baseuri': '10.245.39.251', + 'username': 'USERID', + 'password': 'PASSW0RD=21', + 'resource_uri': '/redfish/v1/testuri', + 'request_body': {'teststr': 'yyyy', 'otherkey': 'unknownkey'} + }) + + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx', '@odata.etag': '27f6eb13fa1c28a2711'}} + + with patch.object(module.XCCRedfishUtils, 'patch_request') as mock_patch_request: + mock_patch_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}} + + with self.assertRaises(AnsibleFailJson) as result: + module.main() + + def test_module_command_PatchResource_fail_when_get_return_false(self): + set_module_args({ + 'category': 'Raw', + 'command': 'PatchResource', + 'baseuri': '10.245.39.251', + 'username': 'USERID', + 'password': 'PASSW0RD=21', + 'resource_uri': '/redfish/v1/testuri', + 'request_body': {'teststr': 'yyyy'} + }) + + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx', '@odata.etag': '27f6eb13fa1c28a2711'}} + + with patch.object(module.XCCRedfishUtils, 'patch_request') as mock_patch_request: + mock_patch_request.return_value = {'ret': False, 'msg': '500 internal error'} + + with self.assertRaises(AnsibleFailJson) as result: + module.main() + + def test_module_command_PatchResource_pass(self): + set_module_args({ + 'category': 'Raw', + 'command': 'PatchResource', + 'baseuri': '10.245.39.251', + 'username': 'USERID', + 'password': 'PASSW0RD=21', + 'resource_uri': '/redfish/v1/testuri', + 'request_body': {'teststr': 'yyyy'} + }) + + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx', '@odata.etag': '27f6eb13fa1c28a2711'}} + + with patch.object(module.XCCRedfishUtils, 'patch_request') as mock_patch_request: + mock_patch_request.return_value = {'ret': True, 'data': {'teststr': 'yyyy', '@odata.etag': '322e0d45d9572723c98'}} + + with self.assertRaises(AnsibleExitJson) as result: + module.main() + + def test_module_command_PostResource_fail_when_required_args_missing(self): + set_module_args({ + 'category': 'Raw', + 'command': 'PostResource', + 'baseuri': '10.245.39.251', + 'username': 'USERID', + 'password': 'PASSW0RD=21', + }) + + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = { + 'ret': True, + 'data': { + 'Actions': { + '#Bios.ChangePassword': { + '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", + 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", + 'title': "ChangePassword", + 'PasswordName@Redfish.AllowableValues': [ + "UefiAdminPassword", + "UefiPowerOnPassword" + ] + }, + '#Bios.ResetBios': { + 'title': "ResetBios", + 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios" + } + }, + } + } + + with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request: + mock_post_request.return_value = {'ret': True} + + with self.assertRaises(AnsibleFailJson) as result: + module.main() + + def test_module_command_PostResource_fail_when_invalid_resourceuri(self): + set_module_args({ + 'category': 'Raw', + 'command': 'PostResource', + 'baseuri': '10.245.39.251', + 'username': 'USERID', + 'password': 'PASSW0RD=21', + 'resource_uri': '/redfish/v1/testuri', + }) + + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = { + 'ret': True, + 'data': { + 'Actions': { + '#Bios.ChangePassword': { + '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", + 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", + 'title': "ChangePassword", + 'PasswordName@Redfish.AllowableValues': [ + "UefiAdminPassword", + "UefiPowerOnPassword" + ] + }, + '#Bios.ResetBios': { + 'title': "ResetBios", + 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios" + } + }, + } + } + + with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request: + mock_post_request.return_value = {'ret': True} + + with self.assertRaises(AnsibleFailJson) as result: + module.main() + + def test_module_command_PostResource_fail_when_no_requestbody(self): + set_module_args({ + 'category': 'Raw', + 'command': 'PostResource', + 'baseuri': '10.245.39.251', + 'username': 'USERID', + 'password': 'PASSW0RD=21', + 'resource_uri': '/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword', + }) + + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = { + 'ret': True, + 'data': { + 'Actions': { + '#Bios.ChangePassword': { + '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", + 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", + 'title': "ChangePassword", + 'PasswordName@Redfish.AllowableValues': [ + "UefiAdminPassword", + "UefiPowerOnPassword" + ] + }, + '#Bios.ResetBios': { + 'title': "ResetBios", + 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios" + } + }, + } + } + + with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request: + mock_post_request.return_value = {'ret': True} + + with self.assertRaises(AnsibleFailJson) as result: + module.main() + + def test_module_command_PostResource_fail_when_no_requestbody(self): + set_module_args({ + 'category': 'Raw', + 'command': 'PostResource', + 'baseuri': '10.245.39.251', + 'username': 'USERID', + 'password': 'PASSW0RD=21', + 'resource_uri': '/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword', + }) + + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = { + 'ret': True, + 'data': { + 'Actions': { + '#Bios.ChangePassword': { + '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", + 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", + 'title': "ChangePassword", + 'PasswordName@Redfish.AllowableValues': [ + "UefiAdminPassword", + "UefiPowerOnPassword" + ] + }, + '#Bios.ResetBios': { + 'title': "ResetBios", + 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios" + } + }, + } + } + + with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request: + mock_post_request.return_value = {'ret': True} + + with self.assertRaises(AnsibleFailJson) as result: + module.main() + + def test_module_command_PostResource_fail_when_requestbody_mismatch_with_data_from_actioninfo_uri(self): + set_module_args({ + 'category': 'Raw', + 'command': 'PostResource', + 'baseuri': '10.245.39.251', + 'username': 'USERID', + 'password': 'PASSW0RD=21', + 'resource_uri': '/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword', + 'request_body': {'PasswordName': 'UefiAdminPassword', 'NewPassword': 'PASSW0RD=='} + }) + + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = { + 'ret': True, + 'data': { + 'Parameters': [], + 'Actions': { + '#Bios.ChangePassword': { + '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", + 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", + 'title': "ChangePassword", + 'PasswordName@Redfish.AllowableValues': [ + "UefiAdminPassword", + "UefiPowerOnPassword" + ] + }, + '#Bios.ResetBios': { + 'title': "ResetBios", + 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios" + } + }, + } + } + + with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request: + mock_post_request.return_value = {'ret': True} + + with self.assertRaises(AnsibleFailJson) as result: + module.main() + + def test_module_command_PostResource_fail_when_get_return_false(self): + set_module_args({ + 'category': 'Raw', + 'command': 'PostResource', + 'baseuri': '10.245.39.251', + 'username': 'USERID', + 'password': 'PASSW0RD=21', + 'resource_uri': '/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword', + 'request_body': {'PasswordName': 'UefiAdminPassword', 'NewPassword': 'PASSW0RD=='} + }) + + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = {'ret': False, 'msg': '404 error'} + + with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request: + mock_post_request.return_value = {'ret': True} + + with self.assertRaises(AnsibleFailJson) as result: + module.main() + + def test_module_command_PostResource_fail_when_post_return_false(self): + set_module_args({ + 'category': 'Raw', + 'command': 'PostResource', + 'baseuri': '10.245.39.251', + 'username': 'USERID', + 'password': 'PASSW0RD=21', + 'resource_uri': '/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios', + 'request_body': {} + }) + + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = { + 'ret': True, + 'data': { + 'Actions': { + '#Bios.ChangePassword': { + '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", + 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", + 'title': "ChangePassword", + 'PasswordName@Redfish.AllowableValues': [ + "UefiAdminPassword", + "UefiPowerOnPassword" + ] + }, + '#Bios.ResetBios': { + 'title': "ResetBios", + 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios" + } + }, + } + } + + with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request: + mock_post_request.return_value = {'ret': False, 'msg': '500 internal error'} + + with self.assertRaises(AnsibleFailJson) as result: + module.main() + + def test_module_command_PostResource_pass(self): + set_module_args({ + 'category': 'Raw', + 'command': 'PostResource', + 'baseuri': '10.245.39.251', + 'username': 'USERID', + 'password': 'PASSW0RD=21', + 'resource_uri': '/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios', + 'request_body': {} + }) + + with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request: + mock_get_request.return_value = { + 'ret': True, + 'data': { + 'Actions': { + '#Bios.ChangePassword': { + '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo", + 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword", + 'title': "ChangePassword", + 'PasswordName@Redfish.AllowableValues': [ + "UefiAdminPassword", + "UefiPowerOnPassword" + ] + }, + '#Bios.ResetBios': { + 'title': "ResetBios", + 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios" + } + }, + } + } + + with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request: + mock_post_request.return_value = {'ret': True, 'msg': 'post success'} + + with self.assertRaises(AnsibleExitJson) as result: + module.main() From db26514bf11ec64ca44d99d8b27d6b5518c51483 Mon Sep 17 00:00:00 2001 From: Dag Wieers Date: Wed, 24 Mar 2021 17:20:26 +0100 Subject: [PATCH 0136/3093] Add support for `sudo su -` using password auth (#2054) * Add support for `sudo su -` using password auth Allow users to run Ansible tasks through `sudo su -` using password auth - Feature Pull Request sudosu So I have been using this at various customers for bootstrapping Ansible mostly. Often you have an existing setup where there is a user that has root-access enabled through sudo, but only to run `su` to log using the user's password. In these specific cases the root password is unique to the system and therefore not an easy way to automate bootstrapping. Having a `sudo su -` become option **with password prompt** is not possible with the existing become methods (neither sudo nor su can be used) by abusing `become_exe` or `become_flags`. This fixes ansible/ansible#12686 * Fix all reported issues * Add unit tests * Apply suggestions from code review * Update plugins/become/sudosu.py Co-authored-by: Felix Fontein * Update tests/unit/plugins/become/test_sudosu.py Co-authored-by: Felix Fontein * Update tests/unit/plugins/become/test_sudosu.py Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- plugins/become/sudosu.py | 91 ++++++++++++++++++++++++ tests/unit/plugins/become/test_sudosu.py | 45 ++++++++++++ 2 files changed, 136 insertions(+) create mode 100644 plugins/become/sudosu.py create mode 100644 tests/unit/plugins/become/test_sudosu.py diff --git a/plugins/become/sudosu.py b/plugins/become/sudosu.py new file mode 100644 index 0000000000..e9668e6522 --- /dev/null +++ b/plugins/become/sudosu.py @@ -0,0 +1,91 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2021, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ + become: sudosu + short_description: Run tasks using sudo su - + description: + - This become plugins allows your remote/login user to execute commands as another user via the C(sudo) and C(su) utilities combined. + author: + - Dag Wieers (@dagwieers) + version_added: 2.4.0 + options: + become_user: + description: User you 'become' to execute the task. + default: root + ini: + - section: privilege_escalation + key: become_user + - section: sudo_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_sudo_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_SUDO_USER + become_flags: + description: Options to pass to C(sudo). + default: -H -S -n + ini: + - section: privilege_escalation + key: become_flags + - section: sudo_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_sudo_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_SUDO_FLAGS + become_pass: + description: Password to pass to C(sudo). + required: false + vars: + - name: ansible_become_password + - name: ansible_become_pass + - name: ansible_sudo_pass + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_SUDO_PASS + ini: + - section: sudo_become_plugin + key: password +""" + + +from ansible.plugins.become import BecomeBase + + +class BecomeModule(BecomeBase): + + name = 'community.general.sudosu' + + # messages for detecting prompted password issues + fail = ('Sorry, try again.',) + missing = ('Sorry, a password is required to run sudo', 'sudo: a password is required') + + def build_become_command(self, cmd, shell): + super(BecomeModule, self).build_become_command(cmd, shell) + + if not cmd: + return cmd + + becomecmd = 'sudo' + + flags = self.get_option('become_flags') or '' + prompt = '' + if self.get_option('become_pass'): + self.prompt = '[sudo via ansible, key=%s] password:' % self._id + if flags: # this could be simplified, but kept as is for now for backwards string matching + flags = flags.replace('-n', '') + prompt = '-p "%s"' % (self.prompt) + + user = self.get_option('become_user') or '' + if user: + user = '%s' % (user) + + return ' '.join([becomecmd, flags, prompt, 'su -l', user, self._build_success_command(cmd, shell)]) diff --git a/tests/unit/plugins/become/test_sudosu.py b/tests/unit/plugins/become/test_sudosu.py new file mode 100644 index 0000000000..4e5c998f09 --- /dev/null +++ b/tests/unit/plugins/become/test_sudosu.py @@ -0,0 +1,45 @@ +# (c) 2012-2014, Michael DeHaan +# (c) 2021 Ansible Project +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re + +from ansible import context +from ansible.playbook.play_context import PlayContext +from ansible.plugins.loader import become_loader + + +def test_sudosu(mocker, parser, reset_cli_args): + options = parser.parse_args([]) + context._init_global_context(options) + play_context = PlayContext() + + default_cmd = "/bin/foo" + default_exe = "/bin/bash" + sudo_exe = 'sudo' + sudo_flags = '-H -s -n' + + cmd = play_context.make_become_cmd(cmd=default_cmd, executable=default_exe) + assert cmd == default_cmd + + success = 'BECOME-SUCCESS-.+?' + + play_context.become = True + play_context.become_user = 'foo' + play_context.set_become_plugin(become_loader.get('community.general.sudosu')) + play_context.become_flags = sudo_flags + cmd = play_context.make_become_cmd(cmd=default_cmd, executable=default_exe) + + assert (re.match("""%s %s su -l %s %s -c 'echo %s; %s'""" % (sudo_exe, sudo_flags, play_context.become_user, + default_exe, success, default_cmd), cmd) is not None) + + play_context.become_pass = 'testpass' + cmd = play_context.make_become_cmd(cmd=default_cmd, executable=default_exe) + assert (re.match("""%s %s -p "%s" su -l %s %s -c 'echo %s; %s'""" % (sudo_exe, sudo_flags.replace('-n', ''), + r"\[sudo via ansible, key=.+?\] password:", play_context.become_user, + default_exe, success, default_cmd), cmd) is not None) From de8e2a83e2948eabb0b8b1d9c1a533a18fc65214 Mon Sep 17 00:00:00 2001 From: Matthias Dellweg <2500@gmx.de> Date: Wed, 24 Mar 2021 17:48:24 +0100 Subject: [PATCH 0137/3093] Add a Pulp 2 related note to pulp_repo (#2096) --- plugins/modules/packaging/os/pulp_repo.py | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/modules/packaging/os/pulp_repo.py b/plugins/modules/packaging/os/pulp_repo.py index 23d0b02eb4..d14d84451b 100644 --- a/plugins/modules/packaging/os/pulp_repo.py +++ b/plugins/modules/packaging/os/pulp_repo.py @@ -16,6 +16,7 @@ author: "Joe Adams (@sysadmind)" short_description: Add or remove Pulp repos from a remote host. description: - Add or remove Pulp repos from a remote host. + - Note, this is for Pulp 2 only. options: add_export_distributor: description: From 2558cd3f017568c844c31891bfe1f9c65faaa496 Mon Sep 17 00:00:00 2001 From: Alex Willmer Date: Thu, 25 Mar 2021 21:19:48 +0000 Subject: [PATCH 0138/3093] ipa_service - Correct pluralisation of "hosts" in example (#2103) --- plugins/modules/identity/ipa/ipa_service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/modules/identity/ipa/ipa_service.py b/plugins/modules/identity/ipa/ipa_service.py index c13f7ab68f..088127e0c3 100644 --- a/plugins/modules/identity/ipa/ipa_service.py +++ b/plugins/modules/identity/ipa/ipa_service.py @@ -63,7 +63,7 @@ EXAMPLES = r''' - name: Changing Managing hosts list community.general.ipa_service: name: http/host01.example.com - host: + hosts: - host01.example.com - host02.example.com ipa_host: ipa.example.com From 62cd38a9a0d025beee3a2e29a22d600fab48957f Mon Sep 17 00:00:00 2001 From: Andy Walsh <33293922+rhawalsh@users.noreply.github.com> Date: Thu, 25 Mar 2021 17:22:50 -0400 Subject: [PATCH 0139/3093] Updated vdo maintainer to rhawalsh. (#2102) bgurney-rh does not work with VDO projects anymore. This change re-points maintainer pings to rhawalsh instead. --- .github/BOTMETA.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 32394e9e9f..f3ba526661 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -914,7 +914,7 @@ files: maintainers: ahtik ovcharenko pyykkis labels: ufw $modules/system/vdo.py: - maintainers: bgurney-rh + maintainers: rhawalsh $modules/system/xfconf.py: maintainers: russoz jbenden labels: xfconf From e7a0a12c3fb6140210e3f9583aa48dd60378c988 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Thu, 25 Mar 2021 22:30:35 +0100 Subject: [PATCH 0140/3093] Bugfix: Respect PATH env variable in zypper modules (#2094) * Bugfix: Respect PATH env variable in zypper modules * Improve changelogs/fragments/2094-bugfix-respect-PATH-env-variable-in-zypper-modules.yaml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- ...pect-PATH-env-variable-in-zypper-modules.yaml | 2 ++ plugins/modules/packaging/os/zypper.py | 2 +- .../modules/packaging/os/zypper_repository.py | 16 ++++++++-------- 3 files changed, 11 insertions(+), 9 deletions(-) create mode 100644 changelogs/fragments/2094-bugfix-respect-PATH-env-variable-in-zypper-modules.yaml diff --git a/changelogs/fragments/2094-bugfix-respect-PATH-env-variable-in-zypper-modules.yaml b/changelogs/fragments/2094-bugfix-respect-PATH-env-variable-in-zypper-modules.yaml new file mode 100644 index 0000000000..e0addce2fc --- /dev/null +++ b/changelogs/fragments/2094-bugfix-respect-PATH-env-variable-in-zypper-modules.yaml @@ -0,0 +1,2 @@ +bugfixes: + - zypper, zypper_repository - respect ``PATH`` environment variable when resolving zypper executable path (https://github.com/ansible-collections/community.general/pull/2094). diff --git a/plugins/modules/packaging/os/zypper.py b/plugins/modules/packaging/os/zypper.py index 9ad539caa8..9c9b12a1a5 100644 --- a/plugins/modules/packaging/os/zypper.py +++ b/plugins/modules/packaging/os/zypper.py @@ -336,7 +336,7 @@ def get_cmd(m, subcommand): "puts together the basic zypper command arguments with those passed to the module" is_install = subcommand in ['install', 'update', 'patch', 'dist-upgrade'] is_refresh = subcommand == 'refresh' - cmd = ['/usr/bin/zypper', '--quiet', '--non-interactive', '--xmlout'] + cmd = [m.get_bin_path('zypper', required=True), '--quiet', '--non-interactive', '--xmlout'] if m.params['extra_args_precommand']: args_list = m.params['extra_args_precommand'].split() cmd.extend(args_list) diff --git a/plugins/modules/packaging/os/zypper_repository.py b/plugins/modules/packaging/os/zypper_repository.py index 55738b58d8..f1d85376f5 100644 --- a/plugins/modules/packaging/os/zypper_repository.py +++ b/plugins/modules/packaging/os/zypper_repository.py @@ -141,9 +141,9 @@ from ansible.module_utils.basic import AnsibleModule, missing_required_lib REPO_OPTS = ['alias', 'name', 'priority', 'enabled', 'autorefresh', 'gpgcheck'] -def _get_cmd(*args): +def _get_cmd(module, *args): """Combines the non-interactive zypper command with arguments/subcommands""" - cmd = ['/usr/bin/zypper', '--quiet', '--non-interactive'] + cmd = [module.get_bin_path('zypper', required=True), '--quiet', '--non-interactive'] cmd.extend(args) return cmd @@ -151,7 +151,7 @@ def _get_cmd(*args): def _parse_repos(module): """parses the output of zypper --xmlout repos and return a parse repo dictionary""" - cmd = _get_cmd('--xmlout', 'repos') + cmd = _get_cmd(module, '--xmlout', 'repos') if not HAS_XML: module.fail_json(msg=missing_required_lib("python-xml"), exception=XML_IMP_ERR) @@ -230,7 +230,7 @@ def repo_exists(module, repodata, overwrite_multiple): def addmodify_repo(module, repodata, old_repos, zypper_version, warnings): "Adds the repo, removes old repos before, that would conflict." repo = repodata['url'] - cmd = _get_cmd('addrepo', '--check') + cmd = _get_cmd(module, 'addrepo', '--check') if repodata['name']: cmd.extend(['--name', repodata['name']]) @@ -274,14 +274,14 @@ def addmodify_repo(module, repodata, old_repos, zypper_version, warnings): def remove_repo(module, repo): "Removes the repo." - cmd = _get_cmd('removerepo', repo) + cmd = _get_cmd(module, 'removerepo', repo) rc, stdout, stderr = module.run_command(cmd, check_rc=True) return rc, stdout, stderr def get_zypper_version(module): - rc, stdout, stderr = module.run_command(['/usr/bin/zypper', '--version']) + rc, stdout, stderr = module.run_command([module.get_bin_path('zypper', required=True), '--version']) if rc != 0 or not stdout.startswith('zypper '): return LooseVersion('1.0') return LooseVersion(stdout.split()[1]) @@ -290,9 +290,9 @@ def get_zypper_version(module): def runrefreshrepo(module, auto_import_keys=False, shortname=None): "Forces zypper to refresh repo metadata." if auto_import_keys: - cmd = _get_cmd('--gpg-auto-import-keys', 'refresh', '--force') + cmd = _get_cmd(module, '--gpg-auto-import-keys', 'refresh', '--force') else: - cmd = _get_cmd('refresh', '--force') + cmd = _get_cmd(module, 'refresh', '--force') if shortname is not None: cmd.extend(['-r', shortname]) From cdc415ea1fb3edbfeef57bbe00c55f859669758d Mon Sep 17 00:00:00 2001 From: Georg Gadinger Date: Fri, 26 Mar 2021 07:24:24 +0100 Subject: [PATCH 0141/3093] opennebula: add one_template module (#2046) * opennebula: add one_template module A basic module for maintaining VM templates which should be flexible enough for most needs ... * fixup! opennebula: add one_template module * fixup! fixup! opennebula: add one_template module --- plugins/module_utils/opennebula.py | 8 +- .../modules/cloud/opennebula/one_template.py | 276 ++++++++++++++++++ plugins/modules/one_template.py | 1 + .../integration/targets/one_template/aliases | 2 + .../testhost/tmp/opennebula-fixtures.json.gz | Bin 0 -> 1069 bytes .../targets/one_template/tasks/main.yml | 243 +++++++++++++++ 6 files changed, 527 insertions(+), 3 deletions(-) create mode 100644 plugins/modules/cloud/opennebula/one_template.py create mode 120000 plugins/modules/one_template.py create mode 100644 tests/integration/targets/one_template/aliases create mode 100644 tests/integration/targets/one_template/files/testhost/tmp/opennebula-fixtures.json.gz create mode 100644 tests/integration/targets/one_template/tasks/main.yml diff --git a/plugins/module_utils/opennebula.py b/plugins/module_utils/opennebula.py index 0b95c6185b..a0a8d1305b 100644 --- a/plugins/module_utils/opennebula.py +++ b/plugins/module_utils/opennebula.py @@ -39,14 +39,16 @@ class OpenNebulaModule: wait_timeout=dict(type='int', default=300), ) - def __init__(self, argument_spec, supports_check_mode=False, mutually_exclusive=None): + def __init__(self, argument_spec, supports_check_mode=False, mutually_exclusive=None, required_one_of=None, required_if=None): - module_args = OpenNebulaModule.common_args + module_args = OpenNebulaModule.common_args.copy() module_args.update(argument_spec) self.module = AnsibleModule(argument_spec=module_args, supports_check_mode=supports_check_mode, - mutually_exclusive=mutually_exclusive) + mutually_exclusive=mutually_exclusive, + required_one_of=required_one_of, + required_if=required_if) self.result = dict(changed=False, original_message='', message='') diff --git a/plugins/modules/cloud/opennebula/one_template.py b/plugins/modules/cloud/opennebula/one_template.py new file mode 100644 index 0000000000..b4c8a2fa83 --- /dev/null +++ b/plugins/modules/cloud/opennebula/one_template.py @@ -0,0 +1,276 @@ +#!/usr/bin/python +# +# Copyright: (c) 2021, Georg Gadinger +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: one_template + +short_description: Manages OpenNebula templates + +version_added: 2.4.0 + +requirements: + - pyone + +description: + - "Manages OpenNebula templates." + +options: + id: + description: + - A I(id) of the template you would like to manage. If not set then a + - new template will be created with the given I(name). + type: int + name: + description: + - A I(name) of the template you would like to manage. If a template with + - the given name does not exist it will be created, otherwise it will be + - managed by this module. + type: str + template: + description: + - A string containing the template contents. + type: str + state: + description: + - C(present) - state that is used to manage the template. + - C(absent) - delete the template. + choices: ["present", "absent"] + default: present + type: str + +notes: + - Supports C(check_mode). Note that check mode always returns C(changed=true) for existing templates, even if the template would not actually change. + +extends_documentation_fragment: + - community.general.opennebula + +author: + - "Georg Gadinger (@nilsding)" +''' + +EXAMPLES = ''' +- name: Fetch the TEMPLATE by id + community.general.one_template: + id: 6459 + register: result + +- name: Print the TEMPLATE properties + ansible.builtin.debug: + var: result + +- name: Fetch the TEMPLATE by name + community.general.one_template: + name: tf-prd-users-workerredis-p6379a + register: result + +- name: Create a new or update an existing TEMPLATE + community.general.one_template: + name: generic-opensuse + template: | + CONTEXT = [ + HOSTNAME = "generic-opensuse" + ] + CPU = "1" + CUSTOM_ATTRIBUTE = "" + DISK = [ + CACHE = "writeback", + DEV_PREFIX = "sd", + DISCARD = "unmap", + IMAGE = "opensuse-leap-15.2", + IMAGE_UNAME = "oneadmin", + IO = "threads", + SIZE = "" ] + MEMORY = "2048" + NIC = [ + MODEL = "virtio", + NETWORK = "testnet", + NETWORK_UNAME = "oneadmin" ] + OS = [ + ARCH = "x86_64", + BOOT = "disk0" ] + SCHED_REQUIREMENTS = "CLUSTER_ID=\\"100\\"" + VCPU = "2" + +- name: Delete the TEMPLATE by id + community.general.one_template: + id: 6459 + state: absent +''' + +RETURN = ''' +id: + description: template id + type: int + returned: when I(state=present) + sample: 153 +name: + description: template name + type: str + returned: when I(state=present) + sample: app1 +template: + description: the parsed template + type: dict + returned: when I(state=present) +group_id: + description: template's group id + type: int + returned: when I(state=present) + sample: 1 +group_name: + description: template's group name + type: str + returned: when I(state=present) + sample: one-users +owner_id: + description: template's owner id + type: int + returned: when I(state=present) + sample: 143 +owner_name: + description: template's owner name + type: str + returned: when I(state=present) + sample: ansible-test +''' + + +from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule + + +class TemplateModule(OpenNebulaModule): + def __init__(self): + argument_spec = dict( + id=dict(type='int', required=False), + name=dict(type='str', required=False), + state=dict(type='str', choices=['present', 'absent'], default='present'), + template=dict(type='str', required=False), + ) + + mutually_exclusive = [ + ['id', 'name'] + ] + + required_one_of = [('id', 'name')] + + required_if = [ + ['state', 'present', ['template']] + ] + + OpenNebulaModule.__init__(self, + argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + required_one_of=required_one_of, + required_if=required_if) + + def run(self, one, module, result): + params = module.params + id = params.get('id') + name = params.get('name') + desired_state = params.get('state') + template_data = params.get('template') + + self.result = {} + + template = self.get_template_instance(id, name) + needs_creation = False + if not template and desired_state != 'absent': + if id: + module.fail_json(msg="There is no template with id=" + str(id)) + else: + needs_creation = True + + if desired_state == 'absent': + self.result = self.delete_template(template) + else: + if needs_creation: + self.result = self.create_template(name, template_data) + else: + self.result = self.update_template(template, template_data) + + self.exit() + + def get_template(self, predicate): + # -3 means "Resources belonging to the user" + # the other two parameters are used for pagination, -1 for both essentially means "return all" + pool = self.one.templatepool.info(-3, -1, -1) + + for template in pool.VMTEMPLATE: + if predicate(template): + return template + + return None + + def get_template_by_id(self, template_id): + return self.get_template(lambda template: (template.ID == template_id)) + + def get_template_by_name(self, template_name): + return self.get_template(lambda template: (template.NAME == template_name)) + + def get_template_instance(self, requested_id, requested_name): + if requested_id: + return self.get_template_by_id(requested_id) + else: + return self.get_template_by_name(requested_name) + + def get_template_info(self, template): + info = { + 'id': template.ID, + 'name': template.NAME, + 'template': template.TEMPLATE, + 'user_name': template.UNAME, + 'user_id': template.UID, + 'group_name': template.GNAME, + 'group_id': template.GID, + } + + return info + + def create_template(self, name, template_data): + if not self.module.check_mode: + self.one.template.allocate("NAME = \"" + name + "\"\n" + template_data) + + result = self.get_template_info(self.get_template_by_name(name)) + result['changed'] = True + + return result + + def update_template(self, template, template_data): + if not self.module.check_mode: + # 0 = replace the whole template + self.one.template.update(template.ID, template_data, 0) + + result = self.get_template_info(self.get_template_by_id(template.ID)) + if self.module.check_mode: + # Unfortunately it is not easy to detect if the template would have changed, therefore always report a change here. + result['changed'] = True + else: + # if the previous parsed template data is not equal to the updated one, this has changed + result['changed'] = template.TEMPLATE != result['template'] + + return result + + def delete_template(self, template): + if not template: + return {'changed': False} + + if not self.module.check_mode: + self.one.template.delete(template.ID) + + return {'changed': True} + + +def main(): + TemplateModule().run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/one_template.py b/plugins/modules/one_template.py new file mode 120000 index 0000000000..78637e1843 --- /dev/null +++ b/plugins/modules/one_template.py @@ -0,0 +1 @@ +./cloud/opennebula/one_template.py \ No newline at end of file diff --git a/tests/integration/targets/one_template/aliases b/tests/integration/targets/one_template/aliases new file mode 100644 index 0000000000..1ff4e0b13e --- /dev/null +++ b/tests/integration/targets/one_template/aliases @@ -0,0 +1,2 @@ +cloud/opennebula +shippable/cloud/group1 diff --git a/tests/integration/targets/one_template/files/testhost/tmp/opennebula-fixtures.json.gz b/tests/integration/targets/one_template/files/testhost/tmp/opennebula-fixtures.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..169451a22d0c3956194a56742905497637dd905b GIT binary patch literal 1069 zcmV+|1k(E-iwFq)OIu(718;C;Zf<2_b!=fRW@&hIb#i5ME^2dcZUF6??{C^T7{}k= zU$NtLX?}+=<-jRiNo4;Rg~axMWqRXaHBWve?KQqYG7=-H`DD-1__++^R;6? zcJh$}pF3r?E#vtnOUf)Rv*-1S6gqD|cZxi_Y>(GPvASI5i=qmsGRBKUR77dMP*beT zWlfhCDmK(i!f7gY-rnDrn_YI%F=q|}jz6^!h<(qS4ou-x>0F2p7vHoqVPL}1K-Ue` z6iC60$cJbO)t5P@<#DaMmIDT)>j{2d=EP7Ro8uFi9-H-Nl5dykD(jMHJKS!S6H}lj z9|R5zL+H8T!1Ug`Ac&)ZWD0Gu(Hy2Zt!7pIYK!JL@FJg7sQTIz8_i*w(`pveNL>$2 z>XTLNHN{4AnC7&a#WYe^I2u6!#t4#eNjiC}nr0}b&}t^jw(YqH+@gUw@j^rs@$0=k zvXFIuCSeZ`P3pg~$jesT zzY<$oeHcNBbV#c-cx~N$LQfG{Zy zCmx~hg-7~p1@?s1*&BMSt91<;7FJ4)EsQiZQdL=q2F|qJTubFwTvc?b8zZgNP{i4aTo`dqn%*zBe7{V)VcLNbhvFl;k*0R@J(GX=0e~pf~_I z0673T0675p4vGVW1B3&F1B3(QzXy;XvV&^=6v2OQ;;`ee Date: Fri, 26 Mar 2021 12:33:11 +0100 Subject: [PATCH 0142/3093] AZP: update default container version (#2112) --- .azure-pipelines/azure-pipelines.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index 61569b0c2d..c6f546a485 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -36,7 +36,7 @@ variables: resources: containers: - container: default - image: quay.io/ansible/azure-pipelines-test-container:1.8.0 + image: quay.io/ansible/azure-pipelines-test-container:1.9.0 pool: Standard From 0de196413f91322c9bdb3b2e9c8c2f6e41764aab Mon Sep 17 00:00:00 2001 From: Bill Dodd Date: Fri, 26 Mar 2021 13:02:59 -0500 Subject: [PATCH 0143/3093] remove billdodd from team_redfish (#2118) --- .github/BOTMETA.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index f3ba526661..12fa40725a 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -723,7 +723,7 @@ files: $modules/remote_management/oneview/oneview_fcoe_network.py: maintainers: fgbulsoni $modules/remote_management/redfish/: - maintainers: $team_redfish billdodd + maintainers: $team_redfish ignore: jose-delarosa $modules/remote_management/stacki/stacki_host.py: maintainers: bsanders bbyhuy @@ -1017,7 +1017,7 @@ macros: team_opennebula: ilicmilan meerkampdvv rsmontero xorel team_oracle: manojmeda mross22 nalsaber team_purestorage: bannaych dnix101 genegr lionmax opslounge raekins sdodsley sile16 - team_redfish: billdodd mraineri tomasg2012 xmadsen renxulei + team_redfish: mraineri tomasg2012 xmadsen renxulei team_rhn: FlossWare alikins barnabycourt vritant team_scaleway: QuentinBrosse abarbare jerome-quere kindermoumoute remyleone sieben team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l From 73bb0f1900bd56269dcf79df102d5751d859a5c4 Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Sat, 27 Mar 2021 18:43:53 +0430 Subject: [PATCH 0144/3093] vdo: add force option (#2110) * vdo: add force option * Add changelog * Improve the diff the next time something is added :) Co-authored-by: Felix Fontein * Add warning text placeholder by felixfontein Co-authored-by: Felix Fontein * Add warning text * Apply suggestion for warning text from rhawalsh Co-authored-by: Felix Fontein --- .../fragments/2110-vdo-add_force_option.yaml | 3 +++ plugins/modules/system/vdo.py | 18 +++++++++++++++++- 2 files changed, 20 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2110-vdo-add_force_option.yaml diff --git a/changelogs/fragments/2110-vdo-add_force_option.yaml b/changelogs/fragments/2110-vdo-add_force_option.yaml new file mode 100644 index 0000000000..9e93a919a2 --- /dev/null +++ b/changelogs/fragments/2110-vdo-add_force_option.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - vdo - add ``force`` option (https://github.com/ansible-collections/community.general/issues/2101). diff --git a/plugins/modules/system/vdo.py b/plugins/modules/system/vdo.py index 4049f82ed3..a27745510a 100644 --- a/plugins/modules/system/vdo.py +++ b/plugins/modules/system/vdo.py @@ -258,6 +258,18 @@ options: configured setting unless a different value is specified in the playbook. type: str + force: + description: + - When creating a volume, ignores any existing file system + or VDO signature already present in the storage device. + When stopping or removing a VDO volume, first unmounts + the file system stored on the device if mounted. + - "B(Warning:) Since this parameter removes all safety + checks it is important to make sure that all parameters + provided are accurate and intentional." + type: bool + default: no + version_added: 2.4.0 notes: - In general, the default thread configuration should be used. requirements: @@ -409,6 +421,9 @@ def add_vdooptions(params): if ('indexmode' in params) and (params['indexmode'] == 'sparse'): options.append("--sparseIndex=enabled") + if ('force' in params) and (params['force']): + options.append("--force") + # Entering an invalid thread config results in a cryptic # 'Could not set up device mapper for %s' error from the 'vdo' # command execution. The dmsetup module on the system will @@ -465,7 +480,8 @@ def run_module(): biothreads=dict(type='str'), cputhreads=dict(type='str'), logicalthreads=dict(type='str'), - physicalthreads=dict(type='str') + physicalthreads=dict(type='str'), + force=dict(type='bool', default=False), ) # Seed the result dictionary in the object. There will be an From eb24e3366634422dc38d76303512d1d4d2c7867f Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 30 Mar 2021 19:24:08 +1300 Subject: [PATCH 0145/3093] git_config - fixed bug with scope file (#2125) * fixed bug - scope file was not working - added test to guarantee that behaviour - marked integration test as destructive, because it overwrites ~/.gitconfig * added changelog fragment * Update tests/integration/targets/git_config/tasks/setup_no_value.yml Co-authored-by: Felix Fontein * Update tests/integration/targets/git_config/tasks/get_set_state_present_file.yml Co-authored-by: Felix Fontein * Update tests/integration/targets/git_config/tasks/get_set_state_present_file.yml Co-authored-by: Felix Fontein * Update tests/integration/targets/git_config/aliases Co-authored-by: Felix Fontein * Update changelogs/fragments/2125-git-config-scope-file.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../fragments/2125-git-config-scope-file.yml | 2 ++ plugins/modules/source_control/git_config.py | 9 +++--- tests/integration/targets/git_config/aliases | 1 + .../tasks/get_set_state_present_file.yml | 29 +++++++++++++++++++ .../targets/git_config/tasks/main.yml | 2 ++ .../git_config/tasks/setup_no_value.yml | 7 ++++- .../targets/git_config/tasks/setup_value.yml | 7 ++++- 7 files changed, 50 insertions(+), 7 deletions(-) create mode 100644 changelogs/fragments/2125-git-config-scope-file.yml create mode 100644 tests/integration/targets/git_config/tasks/get_set_state_present_file.yml diff --git a/changelogs/fragments/2125-git-config-scope-file.yml b/changelogs/fragments/2125-git-config-scope-file.yml new file mode 100644 index 0000000000..75862e0333 --- /dev/null +++ b/changelogs/fragments/2125-git-config-scope-file.yml @@ -0,0 +1,2 @@ +bugfixes: + - git_config - fixed scope ``file`` behaviour and added integraton test for it (https://github.com/ansible-collections/community.general/issues/2117). diff --git a/plugins/modules/source_control/git_config.py b/plugins/modules/source_control/git_config.py index 16126b3bfa..ab71370115 100644 --- a/plugins/modules/source_control/git_config.py +++ b/plugins/modules/source_control/git_config.py @@ -216,14 +216,13 @@ def main(): args = [git_path, "config", "--includes"] if params['list_all']: args.append('-l') - if scope: - args.append("--" + scope) - if name: - args.append(name) - if scope == 'file': args.append('-f') args.append(params['file']) + elif scope: + args.append("--" + scope) + if name: + args.append(name) if scope == 'local': dir = params['repo'] diff --git a/tests/integration/targets/git_config/aliases b/tests/integration/targets/git_config/aliases index 757c99661d..114ac22bb1 100644 --- a/tests/integration/targets/git_config/aliases +++ b/tests/integration/targets/git_config/aliases @@ -1,2 +1,3 @@ shippable/posix/group3 skip/aix +destructive diff --git a/tests/integration/targets/git_config/tasks/get_set_state_present_file.yml b/tests/integration/targets/git_config/tasks/get_set_state_present_file.yml new file mode 100644 index 0000000000..20946ac393 --- /dev/null +++ b/tests/integration/targets/git_config/tasks/get_set_state_present_file.yml @@ -0,0 +1,29 @@ +--- +- import_tasks: setup_no_value.yml + +- name: setting value with state=present + git_config: + name: "{{ option_name }}" + value: "{{ option_value }}" + scope: "file" + file: "{{ output_dir }}/gitconfig_file" + state: present + register: result + +- name: getting value with state=present + git_config: + name: "{{ option_name }}" + scope: "file" + file: "{{ output_dir }}/gitconfig_file" + state: present + register: get_result + +- name: assert set changed and value is correct with state=present + assert: + that: + - set_result is changed + - set_result.diff.before == "\n" + - set_result.diff.after == option_value + "\n" + - get_result is not changed + - get_result.config_value == option_value +... diff --git a/tests/integration/targets/git_config/tasks/main.yml b/tests/integration/targets/git_config/tasks/main.yml index 74127eb5c6..c88d52e27a 100644 --- a/tests/integration/targets/git_config/tasks/main.yml +++ b/tests/integration/targets/git_config/tasks/main.yml @@ -16,6 +16,8 @@ - import_tasks: get_set_no_state.yml # testing get/set option with state=present - import_tasks: get_set_state_present.yml + # testing get/set option with state=present and scope=file + - import_tasks: get_set_state_present_file.yml # testing state=absent without value to delete - import_tasks: unset_no_value.yml # testing state=absent with value to delete diff --git a/tests/integration/targets/git_config/tasks/setup_no_value.yml b/tests/integration/targets/git_config/tasks/setup_no_value.yml index 01a2c9735e..d5552450cf 100644 --- a/tests/integration/targets/git_config/tasks/setup_no_value.yml +++ b/tests/integration/targets/git_config/tasks/setup_no_value.yml @@ -5,4 +5,9 @@ file: path: ~/.gitconfig state: absent -... \ No newline at end of file + +- name: set up without value (file) + file: + path: "{{ output_dir }}/gitconfig_file" + state: absent +... diff --git a/tests/integration/targets/git_config/tasks/setup_value.yml b/tests/integration/targets/git_config/tasks/setup_value.yml index f5e0565441..3eff9c423a 100644 --- a/tests/integration/targets/git_config/tasks/setup_value.yml +++ b/tests/integration/targets/git_config/tasks/setup_value.yml @@ -5,4 +5,9 @@ copy: src: gitconfig dest: ~/.gitconfig -... \ No newline at end of file + +- name: set up with value (file) + copy: + src: gitconfig + dest: "{{ output_dir }}/gitconfig_file" +... From 19db6f24f7e939dd6f46a4cecf2930b3d9d6fd49 Mon Sep 17 00:00:00 2001 From: zhcli <49675498+zhcli@users.noreply.github.com> Date: Tue, 30 Mar 2021 21:12:26 +1100 Subject: [PATCH 0146/3093] Callback plugin: Azure Log Analytics (#2091) * adding plugins/callback/loganalytics.py * * fixed sanity check issues * adjusted documentation and license sections * added changelogs fragment * * added unit test * documentation updated * updated changelogs * further docuement update * minor fixes * updated unittest * suggested updates from community * remove AnsibleError section --- plugins/callback/loganalytics.py | 234 ++++++++++++++++++ .../plugins/callback/test_loganalytics.py | 64 +++++ 2 files changed, 298 insertions(+) create mode 100644 plugins/callback/loganalytics.py create mode 100644 tests/unit/plugins/callback/test_loganalytics.py diff --git a/plugins/callback/loganalytics.py b/plugins/callback/loganalytics.py new file mode 100644 index 0000000000..507d6fccd9 --- /dev/null +++ b/plugins/callback/loganalytics.py @@ -0,0 +1,234 @@ +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + callback: loganalytics + type: aggregate + short_description: Posts task results to Azure Log Analytics + author: "Cyrus Li (@zhcli) " + description: + - This callback plugin will post task results in JSON formatted to an Azure Log Analytics workspace. + - Credits to authors of splunk callback plugin. + version_added: "2.4.0" + requirements: + - Whitelisting this callback plugin. + - An Azure log analytics work space has been established. + options: + workspace_id: + description: Workspace ID of the Azure log analytics workspace. + required: true + env: + - name: WORKSPACE_ID + ini: + - section: callback_loganalytics + key: workspace_id + shared_key: + description: Shared key to connect to Azure log analytics workspace. + required: true + env: + - name: WORKSPACE_SHARED_KEY + ini: + - section: callback_loganalytics + key: shared_key +''' + +EXAMPLES = ''' +examples: | + Whitelist the plugin in ansible.cfg: + [defaults] + callback_whitelist = community.general.loganalytics + Set the environment variable: + export WORKSPACE_ID=01234567-0123-0123-0123-01234567890a + export WORKSPACE_SHARED_KEY=dZD0kCbKl3ehZG6LHFMuhtE0yHiFCmetzFMc2u+roXIUQuatqU924SsAAAAPemhjbGlAemhjbGktTUJQAQIDBA== + Or configure the plugin in ansible.cfg in the callback_loganalytics block: + [callback_loganalytics] + workspace_id = 01234567-0123-0123-0123-01234567890a + shared_key = dZD0kCbKl3ehZG6LHFMuhtE0yHiFCmetzFMc2u+roXIUQuatqU924SsAAAAPemhjbGlAemhjbGktTUJQAQIDBA== +''' + +import hashlib +import hmac +import base64 +import logging +import json +import uuid +import socket +import getpass + +from datetime import datetime +from os.path import basename + +from ansible.module_utils.urls import open_url +from ansible.parsing.ajson import AnsibleJSONEncoder +from ansible.plugins.callback import CallbackBase + + +class AzureLogAnalyticsSource(object): + def __init__(self): + self.ansible_check_mode = False + self.ansible_playbook = "" + self.ansible_version = "" + self.session = str(uuid.uuid4()) + self.host = socket.gethostname() + self.user = getpass.getuser() + self.extra_vars = "" + + def __build_signature(self, date, workspace_id, shared_key, content_length): + # Build authorisation signature for Azure log analytics API call + sigs = "POST\n{0}\napplication/json\nx-ms-date:{1}\n/api/logs".format( + str(content_length), date) + utf8_sigs = sigs.encode('utf-8') + decoded_shared_key = base64.b64decode(shared_key) + hmac_sha256_sigs = hmac.new( + decoded_shared_key, utf8_sigs, digestmod=hashlib.sha256).digest() + encoded_hash = base64.b64encode(hmac_sha256_sigs).decode('utf-8') + signature = "SharedKey {0}:{1}".format(workspace_id, encoded_hash) + return signature + + def __build_workspace_url(self, workspace_id): + return "https://{0}.ods.opinsights.azure.com/api/logs?api-version=2016-04-01".format(workspace_id) + + def __rfc1123date(self): + return datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT') + + def send_event(self, workspace_id, shared_key, state, result, runtime): + if result._task_fields['args'].get('_ansible_check_mode') is True: + self.ansible_check_mode = True + + if result._task_fields['args'].get('_ansible_version'): + self.ansible_version = \ + result._task_fields['args'].get('_ansible_version') + + if result._task._role: + ansible_role = str(result._task._role) + else: + ansible_role = None + + data = {} + data['uuid'] = result._task._uuid + data['session'] = self.session + data['status'] = state + data['timestamp'] = self.__rfc1123date() + data['host'] = self.host + data['user'] = self.user + data['runtime'] = runtime + data['ansible_version'] = self.ansible_version + data['ansible_check_mode'] = self.ansible_check_mode + data['ansible_host'] = result._host.name + data['ansible_playbook'] = self.ansible_playbook + data['ansible_role'] = ansible_role + data['ansible_task'] = result._task_fields + # Removing args since it can contain sensitive data + if 'args' in data['ansible_task']: + data['ansible_task'].pop('args') + data['ansible_result'] = result._result + if 'content' in data['ansible_result']: + data['ansible_result'].pop('content') + + # Adding extra vars info + data['extra_vars'] = self.extra_vars + + # Preparing the playbook logs as JSON format and send to Azure log analytics + jsondata = json.dumps({'event': data}, cls=AnsibleJSONEncoder, sort_keys=True) + content_length = len(jsondata) + rfc1123date = self.__rfc1123date() + signature = self.__build_signature(rfc1123date, workspace_id, shared_key, content_length) + workspace_url = self.__build_workspace_url(workspace_id) + + open_url( + workspace_url, + jsondata, + headers={ + 'content-type': 'application/json', + 'Authorization': signature, + 'Log-Type': 'ansible_playbook', + 'x-ms-date': rfc1123date + }, + method='POST' + ) + + +class CallbackModule(CallbackBase): + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'aggregate' + CALLBACK_NAME = 'loganalytics' + CALLBACK_NEEDS_WHITELIST = True + + def __init__(self, display=None): + super(CallbackModule, self).__init__(display=display) + self.start_datetimes = {} # Collect task start times + self.workspace_id = None + self.shared_key = None + self.loganalytics = AzureLogAnalyticsSource() + + def _seconds_since_start(self, result): + return ( + datetime.utcnow() - + self.start_datetimes[result._task._uuid] + ).total_seconds() + + def set_options(self, task_keys=None, var_options=None, direct=None): + super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct) + self.workspace_id = self.get_option('workspace_id') + self.shared_key = self.get_option('shared_key') + + def v2_playbook_on_play_start(self, play): + vm = play.get_variable_manager() + extra_vars = vm.extra_vars + self.loganalytics.extra_vars = extra_vars + + def v2_playbook_on_start(self, playbook): + self.loganalytics.ansible_playbook = basename(playbook._file_name) + + def v2_playbook_on_task_start(self, task, is_conditional): + self.start_datetimes[task._uuid] = datetime.utcnow() + + def v2_playbook_on_handler_task_start(self, task): + self.start_datetimes[task._uuid] = datetime.utcnow() + + def v2_runner_on_ok(self, result, **kwargs): + self.loganalytics.send_event( + self.workspace_id, + self.shared_key, + 'OK', + result, + self._seconds_since_start(result) + ) + + def v2_runner_on_skipped(self, result, **kwargs): + self.loganalytics.send_event( + self.workspace_id, + self.shared_key, + 'SKIPPED', + result, + self._seconds_since_start(result) + ) + + def v2_runner_on_failed(self, result, **kwargs): + self.loganalytics.send_event( + self.workspace_id, + self.shared_key, + 'FAILED', + result, + self._seconds_since_start(result) + ) + + def runner_on_async_failed(self, result, **kwargs): + self.loganalytics.send_event( + self.workspace_id, + self.shared_key, + 'FAILED', + result, + self._seconds_since_start(result) + ) + + def v2_runner_on_unreachable(self, result, **kwargs): + self.loganalytics.send_event( + self.workspace_id, + self.shared_key, + 'UNREACHABLE', + result, + self._seconds_since_start(result) + ) diff --git a/tests/unit/plugins/callback/test_loganalytics.py b/tests/unit/plugins/callback/test_loganalytics.py new file mode 100644 index 0000000000..085e1163cb --- /dev/null +++ b/tests/unit/plugins/callback/test_loganalytics.py @@ -0,0 +1,64 @@ +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.executor.task_result import TaskResult +from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.general.tests.unit.compat.mock import patch, call, MagicMock, Mock +from ansible_collections.community.general.plugins.callback.loganalytics import AzureLogAnalyticsSource +from datetime import datetime + +import json + + +class TestAzureLogAnalytics(unittest.TestCase): + @patch('ansible_collections.community.general.plugins.callback.loganalytics.socket') + def setUp(self, mock_socket): + mock_socket.gethostname.return_value = 'my-host' + mock_socket.gethostbyname.return_value = '1.2.3.4' + self.loganalytics = AzureLogAnalyticsSource() + self.mock_task = Mock('MockTask') + self.mock_task._role = 'myrole' + self.mock_task._uuid = 'myuuid' + self.task_fields = {'args': {}} + self.mock_host = Mock('MockHost') + self.mock_host.name = 'myhost' + + @patch('ansible_collections.community.general.plugins.callback.loganalytics.datetime') + @patch('ansible_collections.community.general.plugins.callback.loganalytics.open_url') + def test_overall(self, open_url_mock, mock_datetime): + mock_datetime.utcnow.return_value = datetime(2020, 12, 1) + result = TaskResult(host=self.mock_host, task=self.mock_task, return_data={}, task_fields=self.task_fields) + + self.loganalytics.send_event(workspace_id='01234567-0123-0123-0123-01234567890a', + shared_key='dZD0kCbKl3ehZG6LHFMuhtE0yHiFCmetzFMc2u+roXIUQuatqU924SsAAAAPemhjbGlAemhjbGktTUJQAQIDBA==', + state='OK', + result=result, + runtime=100) + + args, kwargs = open_url_mock.call_args + sent_data = json.loads(args[1]) + + self.assertEqual(sent_data['event']['timestamp'], 'Tue, 01 Dec 2020 00:00:00 GMT') + self.assertEqual(sent_data['event']['host'], 'my-host') + self.assertEqual(sent_data['event']['uuid'], 'myuuid') + self.assertEqual(args[0], 'https://01234567-0123-0123-0123-01234567890a.ods.opinsights.azure.com/api/logs?api-version=2016-04-01') + + @patch('ansible_collections.community.general.plugins.callback.loganalytics.datetime') + @patch('ansible_collections.community.general.plugins.callback.loganalytics.open_url') + def test_auth_headers(self, open_url_mock, mock_datetime): + mock_datetime.utcnow.return_value = datetime(2020, 12, 1) + result = TaskResult(host=self.mock_host, task=self.mock_task, return_data={}, task_fields=self.task_fields) + + self.loganalytics.send_event(workspace_id='01234567-0123-0123-0123-01234567890a', + shared_key='dZD0kCbKl3ehZG6LHFMuhtE0yHiFCmetzFMc2u+roXIUQuatqU924SsAAAAPemhjbGlAemhjbGktTUJQAQIDBA==', + state='OK', + result=result, + runtime=100) + + args, kwargs = open_url_mock.call_args + headers = kwargs['headers'] + + self.assertRegexpMatches(headers['Authorization'], r'^SharedKey 01234567-0123-0123-0123-01234567890a:.*=$') + self.assertEqual(headers['Log-Type'], 'ansible_playbook') From 3355e657815e40f72bfee1aa94e3211eb26e3cb2 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 30 Mar 2021 12:19:34 +0200 Subject: [PATCH 0147/3093] Next release will be 2.5.0. --- galaxy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/galaxy.yml b/galaxy.yml index 335b902da2..bb1eb75153 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,6 +1,6 @@ namespace: community name: general -version: 2.4.0 +version: 2.5.0 readme: README.md authors: - Ansible (https://github.com/ansible) From 604a5dbf49c7d275e8d1d9ff2905ebdf5e1f14f7 Mon Sep 17 00:00:00 2001 From: Gaige B Paulsen Date: Wed, 31 Mar 2021 05:25:28 -0400 Subject: [PATCH 0148/3093] fix type information for vmadm.resolvers (#2136) * fix type information for vmadm.resolvers * Update changelogs/fragments/2135-vmadm-resolvers-type-fix.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- changelogs/fragments/2135-vmadm-resolvers-type-fix.yml | 2 ++ plugins/modules/cloud/smartos/vmadm.py | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/2135-vmadm-resolvers-type-fix.yml diff --git a/changelogs/fragments/2135-vmadm-resolvers-type-fix.yml b/changelogs/fragments/2135-vmadm-resolvers-type-fix.yml new file mode 100644 index 0000000000..fcce6e12e1 --- /dev/null +++ b/changelogs/fragments/2135-vmadm-resolvers-type-fix.yml @@ -0,0 +1,2 @@ +bugfixes: + - vmadm - correct type of list elements in ``resolvers`` parameter (https://github.com/ansible-collections/community.general/issues/2135). diff --git a/plugins/modules/cloud/smartos/vmadm.py b/plugins/modules/cloud/smartos/vmadm.py index 45dc86cc14..63a4c21231 100644 --- a/plugins/modules/cloud/smartos/vmadm.py +++ b/plugins/modules/cloud/smartos/vmadm.py @@ -233,7 +233,7 @@ options: description: - List of resolvers to be put into C(/etc/resolv.conf). type: list - elements: dict + elements: str routes: required: false description: @@ -702,7 +702,7 @@ def main(): vnc_password=dict(type='str', no_log=True), disks=dict(type='list', elements='dict'), nics=dict(type='list', elements='dict'), - resolvers=dict(type='list', elements='dict'), + resolvers=dict(type='list', elements='str'), filesystems=dict(type='list', elements='dict'), ) From f1dbef414387ad50f7505625024cdbc3f8e1959f Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 1 Apr 2021 00:05:40 +1300 Subject: [PATCH 0149/3093] fixed str formatting (#2139) --- changelogs/fragments/2139-dimensiondata_network-str-format.yml | 2 ++ plugins/modules/cloud/dimensiondata/dimensiondata_network.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2139-dimensiondata_network-str-format.yml diff --git a/changelogs/fragments/2139-dimensiondata_network-str-format.yml b/changelogs/fragments/2139-dimensiondata_network-str-format.yml new file mode 100644 index 0000000000..115b04f045 --- /dev/null +++ b/changelogs/fragments/2139-dimensiondata_network-str-format.yml @@ -0,0 +1,2 @@ +bugfixes: + - dimensiondata_network - bug when formatting message, instead of % a simple comma was used (https://github.com/ansible-collections/community.general/pull/2139). diff --git a/plugins/modules/cloud/dimensiondata/dimensiondata_network.py b/plugins/modules/cloud/dimensiondata/dimensiondata_network.py index 2187ceaa45..246b486d06 100644 --- a/plugins/modules/cloud/dimensiondata/dimensiondata_network.py +++ b/plugins/modules/cloud/dimensiondata/dimensiondata_network.py @@ -260,7 +260,7 @@ class DimensionDataNetworkModule(DimensionDataModule): ) self.module.fail_json( - "Unexpected failure deleting network with id %s", network.id + "Unexpected failure deleting network with id %s" % network.id ) except DimensionDataAPIException as e: From 1d1cbc4f56b776cf490432fc6f3920a5602094d4 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 1 Apr 2021 18:50:24 +1300 Subject: [PATCH 0150/3093] apache2_mod_proxy - minor improvements/fixes (#2142) * minor improvements/fixes - moved imports from the bottom of the code to the top (ansible-style). - pythonified/simplified get_member_status()/set_member_status() - reduced clutter in Balancer.__init__() * added changelog fragment --- .../2142-apache2_mod_proxy-cleanup.yml | 2 ++ .../web_infrastructure/apache2_mod_proxy.py | 30 ++++++++----------- 2 files changed, 14 insertions(+), 18 deletions(-) create mode 100644 changelogs/fragments/2142-apache2_mod_proxy-cleanup.yml diff --git a/changelogs/fragments/2142-apache2_mod_proxy-cleanup.yml b/changelogs/fragments/2142-apache2_mod_proxy-cleanup.yml new file mode 100644 index 0000000000..6a24f1afc3 --- /dev/null +++ b/changelogs/fragments/2142-apache2_mod_proxy-cleanup.yml @@ -0,0 +1,2 @@ +minor_changes: + - apache2_mod_proxy - refactored/cleaned-up part of the code (https://github.com/ansible-collections/community.general/pull/2142). diff --git a/plugins/modules/web_infrastructure/apache2_mod_proxy.py b/plugins/modules/web_infrastructure/apache2_mod_proxy.py index dcf1656fcf..2ab679aaf6 100644 --- a/plugins/modules/web_infrastructure/apache2_mod_proxy.py +++ b/plugins/modules/web_infrastructure/apache2_mod_proxy.py @@ -198,6 +198,10 @@ members: import re import traceback +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.six import iteritems + BEAUTIFUL_SOUP_IMP_ERR = None try: from BeautifulSoup import BeautifulSoup @@ -273,13 +277,8 @@ class BalancerMember(object): 'drained': 'Drn', 'hot_standby': 'Stby', 'ignore_errors': 'Ign'} - status = {} actual_status = str(self.attributes['Status']) - for mode in status_mapping.keys(): - if re.search(pattern=status_mapping[mode], string=actual_status): - status[mode] = True - else: - status[mode] = False + status = dict((mode, patt in actual_status) for mode, patt in iteritems(status_mapping)) return status def set_member_status(self, values): @@ -290,13 +289,10 @@ class BalancerMember(object): 'ignore_errors': '&w_status_I'} request_body = regexp_extraction(self.management_url, EXPRESSION, 1) - for k in values_mapping.keys(): - if values[str(k)]: - request_body = request_body + str(values_mapping[k]) + '=1' - else: - request_body = request_body + str(values_mapping[k]) + '=0' + values_url = "".join("{0}={1}".format(url_param, 1 if values[mode] else 0) for mode, url_param in iteritems(values_mapping)) + request_body = "{0}{1}".format(request_body, values_url) - response = fetch_url(self.module, self.management_url, data=str(request_body)) + response = fetch_url(self.module, self.management_url, data=request_body) if response[1]['status'] != 200: self.module.fail_json(msg="Could not set the member status! " + self.host + " " + response[1]['status']) @@ -309,11 +305,11 @@ class Balancer(object): def __init__(self, host, suffix, module, members=None, tls=False): if tls: - self.base_url = str(str('https://') + str(host)) - self.url = str(str('https://') + str(host) + str(suffix)) + self.base_url = 'https://' + str(host) + self.url = 'https://' + str(host) + str(suffix) else: - self.base_url = str(str('http://') + str(host)) - self.url = str(str('http://') + str(host) + str(suffix)) + self.base_url = 'http://' + str(host) + self.url = 'http://' + str(host) + str(suffix) self.module = module self.page = self.fetch_balancer_page() if members is None: @@ -444,7 +440,5 @@ def main(): module.fail_json(msg=str(module.params['member_host']) + ' is not a member of the balancer ' + str(module.params['balancer_vhost']) + '!') -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.urls import fetch_url if __name__ == '__main__': main() From 3312ae08af00c64d16e34d940d1c3991a3a11b92 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sat, 3 Apr 2021 09:05:28 +1300 Subject: [PATCH 0151/3093] kibana_plugin: fixed remove call + run_command with list instead of str (#2143) * fixed remove call + run_command with list instead of str * fixed the other calls to run_command() * added changelog fragment * adjustment on run_command params * Update changelogs/fragments/2143-kibana_plugin-fixed-function-calls.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- ...143-kibana_plugin-fixed-function-calls.yml | 2 ++ .../modules/database/misc/kibana_plugin.py | 25 ++++++++----------- 2 files changed, 12 insertions(+), 15 deletions(-) create mode 100644 changelogs/fragments/2143-kibana_plugin-fixed-function-calls.yml diff --git a/changelogs/fragments/2143-kibana_plugin-fixed-function-calls.yml b/changelogs/fragments/2143-kibana_plugin-fixed-function-calls.yml new file mode 100644 index 0000000000..54a41cd237 --- /dev/null +++ b/changelogs/fragments/2143-kibana_plugin-fixed-function-calls.yml @@ -0,0 +1,2 @@ +bugfixes: + - kibana_plugin - added missing parameters to ``remove_plugin`` when using ``state=present force=true``, and fix potential quoting errors when invoking ``kibana`` (https://github.com/ansible-collections/community.general/pull/2143). diff --git a/plugins/modules/database/misc/kibana_plugin.py b/plugins/modules/database/misc/kibana_plugin.py index e8daf2ff58..25d7719353 100644 --- a/plugins/modules/database/misc/kibana_plugin.py +++ b/plugins/modules/database/misc/kibana_plugin.py @@ -170,25 +170,23 @@ def install_plugin(module, plugin_bin, plugin_name, url, timeout, allow_root, ki cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["present"], plugin_name] if url: - cmd_args.append("--url %s" % url) + cmd_args.extend(["--url", url]) if timeout: - cmd_args.append("--timeout %s" % timeout) + cmd_args.extend(["--timeout", timeout]) if allow_root: cmd_args.append('--allow-root') - cmd = " ".join(cmd_args) - if module.check_mode: - return True, cmd, "check mode", "" + return True, " ".join(cmd_args), "check mode", "" - rc, out, err = module.run_command(cmd) + rc, out, err = module.run_command(cmd_args) if rc != 0: reason = parse_error(out) module.fail_json(msg=reason) - return True, cmd, out, err + return True, " ".join(cmd_args), out, err def remove_plugin(module, plugin_bin, plugin_name, allow_root, kibana_version='4.6'): @@ -201,17 +199,15 @@ def remove_plugin(module, plugin_bin, plugin_name, allow_root, kibana_version='4 if allow_root: cmd_args.append('--allow-root') - cmd = " ".join(cmd_args) - if module.check_mode: - return True, cmd, "check mode", "" + return True, " ".join(cmd_args), "check mode", "" - rc, out, err = module.run_command(cmd) + rc, out, err = module.run_command(cmd_args) if rc != 0: reason = parse_error(out) module.fail_json(msg=reason) - return True, cmd, out, err + return True, " ".join(cmd_args), out, err def get_kibana_version(module, plugin_bin, allow_root): @@ -220,8 +216,7 @@ def get_kibana_version(module, plugin_bin, allow_root): if allow_root: cmd_args.append('--allow-root') - cmd = " ".join(cmd_args) - rc, out, err = module.run_command(cmd) + rc, out, err = module.run_command(cmd_args) if rc != 0: module.fail_json(msg="Failed to get Kibana version : %s" % err) @@ -269,7 +264,7 @@ def main(): if state == "present": if force: - remove_plugin(module, plugin_bin, name) + remove_plugin(module, plugin_bin, name, allow_root, kibana_version) changed, cmd, out, err = install_plugin(module, plugin_bin, name, url, timeout, allow_root, kibana_version) elif state == "absent": From c8885fdfbdab6099e3be5347e908268063013066 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sat, 3 Apr 2021 09:06:07 +1300 Subject: [PATCH 0152/3093] using get_bin_path() on atomic modules (#2144) * using get_bin_path() on atomic modules * added changelog fragment * Update changelogs/fragments/2144-atomic_get_bin_path.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../fragments/2144-atomic_get_bin_path.yml | 4 ++++ .../modules/cloud/atomic/atomic_container.py | 22 +++++++++---------- plugins/modules/cloud/atomic/atomic_host.py | 10 +++------ plugins/modules/cloud/atomic/atomic_image.py | 20 ++++++++--------- 4 files changed, 28 insertions(+), 28 deletions(-) create mode 100644 changelogs/fragments/2144-atomic_get_bin_path.yml diff --git a/changelogs/fragments/2144-atomic_get_bin_path.yml b/changelogs/fragments/2144-atomic_get_bin_path.yml new file mode 100644 index 0000000000..eeb55114d2 --- /dev/null +++ b/changelogs/fragments/2144-atomic_get_bin_path.yml @@ -0,0 +1,4 @@ +minor_changes: + - atomic_container - using ``get_bin_path()`` before calling ``run_command()`` (https://github.com/ansible-collections/community.general/pull/2144). + - atomic_host - using ``get_bin_path()`` before calling ``run_command()`` (https://github.com/ansible-collections/community.general/pull/2144). + - atomic_image - using ``get_bin_path()`` before calling ``run_command()`` (https://github.com/ansible-collections/community.general/pull/2144). diff --git a/plugins/modules/cloud/atomic/atomic_container.py b/plugins/modules/cloud/atomic/atomic_container.py index 1364a42c89..273cdc8931 100644 --- a/plugins/modules/cloud/atomic/atomic_container.py +++ b/plugins/modules/cloud/atomic/atomic_container.py @@ -102,7 +102,8 @@ def do_install(module, mode, rootfs, container, image, values_list, backend): system_list = ["--system"] if mode == 'system' else [] user_list = ["--user"] if mode == 'user' else [] rootfs_list = ["--rootfs=%s" % rootfs] if rootfs else [] - args = ['atomic', 'install', "--storage=%s" % backend, '--name=%s' % container] + system_list + user_list + rootfs_list + values_list + [image] + atomic_bin = module.get_bin_path('atomic') + args = [atomic_bin, 'install', "--storage=%s" % backend, '--name=%s' % container] + system_list + user_list + rootfs_list + values_list + [image] rc, out, err = module.run_command(args, check_rc=False) if rc != 0: module.fail_json(rc=rc, msg=err) @@ -112,7 +113,8 @@ def do_install(module, mode, rootfs, container, image, values_list, backend): def do_update(module, container, image, values_list): - args = ['atomic', 'containers', 'update', "--rebase=%s" % image] + values_list + [container] + atomic_bin = module.get_bin_path('atomic') + args = [atomic_bin, 'containers', 'update', "--rebase=%s" % image] + values_list + [container] rc, out, err = module.run_command(args, check_rc=False) if rc != 0: module.fail_json(rc=rc, msg=err) @@ -122,7 +124,8 @@ def do_update(module, container, image, values_list): def do_uninstall(module, name, backend): - args = ['atomic', 'uninstall', "--storage=%s" % backend, name] + atomic_bin = module.get_bin_path('atomic') + args = [atomic_bin, 'uninstall', "--storage=%s" % backend, name] rc, out, err = module.run_command(args, check_rc=False) if rc != 0: module.fail_json(rc=rc, msg=err) @@ -130,7 +133,8 @@ def do_uninstall(module, name, backend): def do_rollback(module, name): - args = ['atomic', 'containers', 'rollback', name] + atomic_bin = module.get_bin_path('atomic') + args = [atomic_bin, 'containers', 'rollback', name] rc, out, err = module.run_command(args, check_rc=False) if rc != 0: module.fail_json(rc=rc, msg=err) @@ -148,14 +152,12 @@ def core(module): backend = module.params['backend'] state = module.params['state'] + atomic_bin = module.get_bin_path('atomic') module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') - out = {} - err = {} - rc = 0 values_list = ["--set=%s" % x for x in values] if values else [] - args = ['atomic', 'containers', 'list', '--no-trunc', '-n', '--all', '-f', 'backend=%s' % backend, '-f', 'container=%s' % name] + args = [atomic_bin, 'containers', 'list', '--no-trunc', '-n', '--all', '-f', 'backend=%s' % backend, '-f', 'container=%s' % name] rc, out, err = module.run_command(args, check_rc=False) if rc != 0: module.fail_json(rc=rc, msg=err) @@ -194,9 +196,7 @@ def main(): module.fail_json(msg="values is supported only with user or system mode") # Verify that the platform supports atomic command - rc, out, err = module.run_command('atomic -v', check_rc=False) - if rc != 0: - module.fail_json(msg="Error in running atomic command", err=err) + dummy = module.get_bin_path('atomic', required=True) try: core(module) diff --git a/plugins/modules/cloud/atomic/atomic_host.py b/plugins/modules/cloud/atomic/atomic_host.py index 993933e53c..d7164a9adb 100644 --- a/plugins/modules/cloud/atomic/atomic_host.py +++ b/plugins/modules/cloud/atomic/atomic_host.py @@ -57,18 +57,14 @@ from ansible.module_utils._text import to_native def core(module): revision = module.params['revision'] - args = [] + atomic_bin = module.get_bin_path('atomic', required=True) module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') if revision == 'latest': - args = ['atomic', 'host', 'upgrade'] + args = [atomic_bin, 'host', 'upgrade'] else: - args = ['atomic', 'host', 'deploy', revision] - - out = {} - err = {} - rc = 0 + args = [atomic_bin, 'host', 'deploy', revision] rc, out, err = module.run_command(args, check_rc=False) diff --git a/plugins/modules/cloud/atomic/atomic_image.py b/plugins/modules/cloud/atomic/atomic_image.py index c915ed0b78..fd99bb3bf7 100644 --- a/plugins/modules/cloud/atomic/atomic_image.py +++ b/plugins/modules/cloud/atomic/atomic_image.py @@ -73,7 +73,8 @@ from ansible.module_utils._text import to_native def do_upgrade(module, image): - args = ['atomic', 'update', '--force', image] + atomic_bin = module.get_bin_path('atomic') + args = [atomic_bin, 'update', '--force', image] rc, out, err = module.run_command(args, check_rc=False) if rc != 0: # something went wrong emit the msg module.fail_json(rc=rc, msg=err) @@ -91,20 +92,21 @@ def core(module): is_upgraded = False module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') + atomic_bin = module.get_bin_path('atomic') out = {} err = {} rc = 0 if backend: if state == 'present' or state == 'latest': - args = ['atomic', 'pull', "--storage=%s" % backend, image] + args = [atomic_bin, 'pull', "--storage=%s" % backend, image] rc, out, err = module.run_command(args, check_rc=False) if rc < 0: module.fail_json(rc=rc, msg=err) else: out_run = "" if started: - args = ['atomic', 'run', "--storage=%s" % backend, image] + args = [atomic_bin, 'run', "--storage=%s" % backend, image] rc, out_run, err = module.run_command(args, check_rc=False) if rc < 0: module.fail_json(rc=rc, msg=err) @@ -112,7 +114,7 @@ def core(module): changed = "Extracting" in out or "Copying blob" in out module.exit_json(msg=(out + out_run), changed=changed) elif state == 'absent': - args = ['atomic', 'images', 'delete', "--storage=%s" % backend, image] + args = [atomic_bin, 'images', 'delete', "--storage=%s" % backend, image] rc, out, err = module.run_command(args, check_rc=False) if rc < 0: module.fail_json(rc=rc, msg=err) @@ -126,11 +128,11 @@ def core(module): is_upgraded = do_upgrade(module, image) if started: - args = ['atomic', 'run', image] + args = [atomic_bin, 'run', image] else: - args = ['atomic', 'install', image] + args = [atomic_bin, 'install', image] elif state == 'absent': - args = ['atomic', 'uninstall', image] + args = [atomic_bin, 'uninstall', image] rc, out, err = module.run_command(args, check_rc=False) @@ -155,9 +157,7 @@ def main(): ) # Verify that the platform supports atomic command - rc, out, err = module.run_command('atomic -v', check_rc=False) - if rc != 0: - module.fail_json(msg="Error in running atomic command", err=err) + dummy = module.get_bin_path('atomic', required=True) try: core(module) From 95156a11a183f0bbb0d1949550f832eda2f6a416 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 3 Apr 2021 11:46:02 +0200 Subject: [PATCH 0153/3093] [WIP] Committer guidelines (#2077) * First idea for committer guidelines. * Update commit-rights.md Co-authored-by: Andrew Klychkov * Update commit-rights.md Co-authored-by: John R Barker * Apply suggestions from code review Co-authored-by: Andrew Klychkov * Improve 'do not' list. * Add improvements from ansible/ansible#73782. * Apply suggestions from code review Co-authored-by: Amin Vakil * Apply suggestions from code review * Update commit-rights.md * Update commit-rights.md Co-authored-by: Andrew Klychkov Co-authored-by: John R Barker Co-authored-by: Amin Vakil --- commit-rights.md | 72 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 commit-rights.md diff --git a/commit-rights.md b/commit-rights.md new file mode 100644 index 0000000000..d10bea9af7 --- /dev/null +++ b/commit-rights.md @@ -0,0 +1,72 @@ +Committers Guidelines for community.general +=========================================== + +This document is based on the [Ansible committer guidelines](https://github.com/ansible/ansible/blob/b57444af14062ec96e0af75fdfc2098c74fe2d9a/docs/docsite/rst/community/committer_guidelines.rst) ([latest version](https://docs.ansible.com/ansible/devel/community/committer_guidelines.html)). + +These are the guidelines for people with commit privileges on the Ansible Community General Collection GitHub repository. Please read the guidelines before you commit. + +These guidelines apply to everyone. At the same time, this is NOT a process document. So just use good judgment. You have been given commit access because we trust your judgment. + +That said, use the trust wisely. + +If you abuse the trust and break components and builds, and so on, the trust level falls and you may be asked not to commit or you may lose your commit privileges. + +Our workflow on GitHub +---------------------- + +As a committer, you may already know this, but our workflow forms a lot of our team policies. Please ensure you are aware of the following workflow steps: + +* Fork the repository upon which you want to do some work to your own personal repository +* Work on the specific branch upon which you need to commit +* Create a Pull Request back to the collection repository and await reviews +* Adjust code as necessary based on the Comments provided +* Ask someone from the other committers to do a final review and merge + +Sometimes, committers merge their own pull requests. This section is a set of guidelines. If you are changing a comma in a doc or making a very minor change, you can use your best judgement. This is another trust thing. The process is critical for any major change, but for little things or getting something done quickly, use your best judgement and make sure people on the team are aware of your work. + +Roles +----- +* Release managers: Merge pull requests to `stable-X` branches, create tags to do releases. +* Committers: Fine to do PRs for most things, but we should have a timebox. Hanging PRs may merge on the judgement of these devs. +* Module maintainers: Module maintainers own specific modules and have indirect commit access through the current module PR mechanisms. This is primary [ansibullbot](https://github.com/ansibullbot)'s `shipit` mechanism. + +General rules +------------- +Individuals with direct commit access to this collection repository are entrusted with powers that allow them to do a broad variety of things--probably more than we can write down. Rather than rules, treat these as general *guidelines*, individuals with this power are expected to use their best judgement. + +* Do NOTs: + + - Do not commit directly. + - Do not merge your own PRs. Someone else should have a chance to review and approve the PR merge. You have a small amount of leeway here for very minor changes. + - Do not forget about non-standard / alternate environments. Consider the alternatives. Yes, people have bad/unusual/strange environments (like binaries from multiple init systems installed), but they are the ones who need us the most. + - Do not drag your community team members down. Discuss the technical merits of any pull requests you review. Avoid negativity and personal comments. For more guidance on being a good community member, read the [Ansible Community Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html). + - Do not forget about the maintenance burden. High-maintenance features may not be worth adding. + - Do not break playbooks. Always keep backwards compatibility in mind. + - Do not forget to keep it simple. Complexity breeds all kinds of problems. + - Do not merge to branches other than `main`, especially not to `stable-X`, if you do not have explicit permission to do so. + - Do not create tags. Tags are used in the release process, and should only be created by the people responsible for managing the stable branches. + +* Do: + + - Squash, avoid merges whenever possible, use GitHub's squash commits or cherry pick if needed (bisect thanks you). + - Be active. Committers who have no activity on the project (through merges, triage, commits, and so on) will have their permissions suspended. + - Consider backwards compatibility (goes back to "do not break existing playbooks"). + - Write tests. PRs with tests are looked at with more priority than PRs without tests that should have them included. While not all changes require tests, be sure to add them for bug fixes or functionality changes. + - Discuss with other committers, specially when you are unsure of something. + - Document! If your PR is a new feature or a change to behavior, make sure you've updated all associated documentation or have notified the right people to do so. + - Consider scope, sometimes a fix can be generalized. + - Keep it simple, then things are maintainable, debuggable and intelligible. + +Committers are expected to continue to follow the same community and contribution guidelines followed by the rest of the Ansible community. + + +People +------ + +Individuals who have been asked to become a part of this group have generally been contributing in significant ways to the community.general collection for some time. Should they agree, they are requested to add their names and GitHub IDs to this file, in the section below, through a pull request. Doing so indicates that these individuals agree to act in the ways that their fellow committers trust that they will act. + +| Name | GitHub ID | IRC Nick | Other | +| ------------------- | -------------------- | ------------------ | -------------------- | +| Andrew Klychkov | andersson007 | andersson007_ | | +| Felix Fontein | felixfontein | felixfontein | | +| John R Barker | gundalow | gundalow | | From d92d0632eb374ca1472d32c1473507a596cfa58b Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 5 Apr 2021 09:38:59 +1200 Subject: [PATCH 0154/3093] fixed calls to list.extend() (#2161) * fixed calls to list.extend() * added changelog fragment * Update changelogs/fragments/2161-pkgutil-list-extend.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- changelogs/fragments/2161-pkgutil-list-extend.yml | 2 ++ plugins/modules/packaging/os/pkgutil.py | 6 +++--- 2 files changed, 5 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/2161-pkgutil-list-extend.yml diff --git a/changelogs/fragments/2161-pkgutil-list-extend.yml b/changelogs/fragments/2161-pkgutil-list-extend.yml new file mode 100644 index 0000000000..9af970afd8 --- /dev/null +++ b/changelogs/fragments/2161-pkgutil-list-extend.yml @@ -0,0 +1,2 @@ +bugfixes: + - pkgutil - fixed calls to ``list.extend()`` (https://github.com/ansible-collections/community.general/pull/2161). diff --git a/plugins/modules/packaging/os/pkgutil.py b/plugins/modules/packaging/os/pkgutil.py index 9ec0ebaad6..0f1daca4ef 100644 --- a/plugins/modules/packaging/os/pkgutil.py +++ b/plugins/modules/packaging/os/pkgutil.py @@ -130,7 +130,7 @@ def packages_not_latest(module, names, site, update_catalog): cmd.append('-U') cmd.append('-c') if site is not None: - cmd.extend('-t', site) + cmd.extend(['-t', site]) if names != ['*']: cmd.extend(names) rc, out, err = run_command(module, cmd) @@ -159,7 +159,7 @@ def package_install(module, state, pkgs, site, update_catalog, force): if update_catalog: cmd.append('-U') if site is not None: - cmd.extend('-t', site) + cmd.extend(['-t', site]) if force: cmd.append('-f') cmd.extend(pkgs) @@ -174,7 +174,7 @@ def package_upgrade(module, pkgs, site, update_catalog, force): if update_catalog: cmd.append('-U') if site is not None: - cmd.extend('-t', site) + cmd.extend(['-t', site]) if force: cmd.append('-f') cmd += pkgs From b97e31dd552a3c8957e046590b5172ebb93bbde9 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 5 Apr 2021 19:22:06 +1200 Subject: [PATCH 0155/3093] rewritten as list literals (#2160) * rewritten as list literals * added changelog fragment --- changelogs/fragments/2160-list-literals.yml | 11 +++++ plugins/lookup/hiera.py | 4 +- plugins/module_utils/known_hosts.py | 11 ++--- plugins/modules/cloud/smartos/nictagadm.py | 16 ++------ .../cloud/smartos/smartos_image_info.py | 5 +-- plugins/modules/files/xattr.py | 9 ++-- plugins/modules/net_tools/ipwcli_dns.py | 24 +++++++---- plugins/modules/packaging/os/svr4pkg.py | 3 +- plugins/modules/storage/zfs/zfs_facts.py | 15 ++----- plugins/modules/storage/zfs/zpool_facts.py | 10 +---- plugins/modules/system/beadm.py | 41 ++++--------------- 11 files changed, 55 insertions(+), 94 deletions(-) create mode 100644 changelogs/fragments/2160-list-literals.yml diff --git a/changelogs/fragments/2160-list-literals.yml b/changelogs/fragments/2160-list-literals.yml new file mode 100644 index 0000000000..661b1e322e --- /dev/null +++ b/changelogs/fragments/2160-list-literals.yml @@ -0,0 +1,11 @@ +minor_changes: + - hiera lookup - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160). + - known_hosts module utils - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160). + - nictagadm - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160). + - smartos_image_info - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160). + - xattr - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160). + - ipwcli_dns - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160). + - svr4pkg - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160). + - zfs_facts - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160). + - zpool_facts - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160). + - beadm - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160). diff --git a/plugins/lookup/hiera.py b/plugins/lookup/hiera.py index e757f41215..1ce82d7bd6 100644 --- a/plugins/lookup/hiera.py +++ b/plugins/lookup/hiera.py @@ -84,7 +84,5 @@ class Hiera(object): class LookupModule(LookupBase): def run(self, terms, variables=''): hiera = Hiera() - ret = [] - - ret.append(hiera.get(terms)) + ret = [hiera.get(terms)] return ret diff --git a/plugins/module_utils/known_hosts.py b/plugins/module_utils/known_hosts.py index 96f91ba80e..efd311eb51 100644 --- a/plugins/module_utils/known_hosts.py +++ b/plugins/module_utils/known_hosts.py @@ -87,11 +87,12 @@ def not_in_host_file(self, host): user_host_file = "~/.ssh/known_hosts" user_host_file = os.path.expanduser(user_host_file) - host_file_list = [] - host_file_list.append(user_host_file) - host_file_list.append("/etc/ssh/ssh_known_hosts") - host_file_list.append("/etc/ssh/ssh_known_hosts2") - host_file_list.append("/etc/openssh/ssh_known_hosts") + host_file_list = [ + user_host_file, + "/etc/ssh/ssh_known_hosts", + "/etc/ssh/ssh_known_hosts2", + "/etc/openssh/ssh_known_hosts", + ] hfiles_not_found = 0 for hf in host_file_list: diff --git a/plugins/modules/cloud/smartos/nictagadm.py b/plugins/modules/cloud/smartos/nictagadm.py index 7db7c5ea45..05aba6f188 100644 --- a/plugins/modules/cloud/smartos/nictagadm.py +++ b/plugins/modules/cloud/smartos/nictagadm.py @@ -119,20 +119,13 @@ class NicTag(object): return is_mac(self.mac.lower()) def nictag_exists(self): - cmd = [self.nictagadm_bin] - - cmd.append('exists') - cmd.append(self.name) - + cmd = [self.nictagadm_bin, 'exists', self.name] (rc, dummy, dummy) = self.module.run_command(cmd) return rc == 0 def add_nictag(self): - cmd = [self.nictagadm_bin] - - cmd.append('-v') - cmd.append('add') + cmd = [self.nictagadm_bin, '-v', 'add'] if self.etherstub: cmd.append('-l') @@ -150,10 +143,7 @@ class NicTag(object): return self.module.run_command(cmd) def delete_nictag(self): - cmd = [self.nictagadm_bin] - - cmd.append('-v') - cmd.append('delete') + cmd = [self.nictagadm_bin, '-v', 'delete'] if self.force: cmd.append('-f') diff --git a/plugins/modules/cloud/smartos/smartos_image_info.py b/plugins/modules/cloud/smartos/smartos_image_info.py index 473d345ad8..45d8e34085 100644 --- a/plugins/modules/cloud/smartos/smartos_image_info.py +++ b/plugins/modules/cloud/smartos/smartos_image_info.py @@ -72,10 +72,7 @@ class ImageFacts(object): self.filters = module.params['filters'] def return_all_installed_images(self): - cmd = [self.module.get_bin_path('imgadm')] - - cmd.append('list') - cmd.append('-j') + cmd = [self.module.get_bin_path('imgadm'), 'list', '-j'] if self.filters: cmd.append(self.filters) diff --git a/plugins/modules/files/xattr.py b/plugins/modules/files/xattr.py index 0d5f9f46f3..7691f30905 100644 --- a/plugins/modules/files/xattr.py +++ b/plugins/modules/files/xattr.py @@ -98,9 +98,8 @@ from ansible.module_utils._text import to_native def get_xattr_keys(module, path, follow): - cmd = [module.get_bin_path('getfattr', True)] - # prevents warning and not sure why it's not default - cmd.append('--absolute-names') + cmd = [module.get_bin_path('getfattr', True), '--absolute-names'] + if not follow: cmd.append('-h') cmd.append(path) @@ -109,10 +108,8 @@ def get_xattr_keys(module, path, follow): def get_xattr(module, path, key, follow): + cmd = [module.get_bin_path('getfattr', True), '--absolute-names'] - cmd = [module.get_bin_path('getfattr', True)] - # prevents warning and not sure why it's not default - cmd.append('--absolute-names') if not follow: cmd.append('-h') if key is None: diff --git a/plugins/modules/net_tools/ipwcli_dns.py b/plugins/modules/net_tools/ipwcli_dns.py index 355c70346f..284f3ad810 100644 --- a/plugins/modules/net_tools/ipwcli_dns.py +++ b/plugins/modules/net_tools/ipwcli_dns.py @@ -205,9 +205,11 @@ class ResourceRecord(object): def list_record(self, record): # check if the record exists via list on ipwcli search = 'list %s' % (record.replace(';', '&&').replace('set', 'where')) - cmd = [self.module.get_bin_path('ipwcli', True)] - cmd.append('-user=%s' % (self.user)) - cmd.append('-password=%s' % (self.password)) + cmd = [ + self.module.get_bin_path('ipwcli', True), + '-user=%s' % self.user, + '-password=%s' % self.password, + ] rc, out, err = self.module.run_command(cmd, data=search) if 'Invalid username or password' in out: @@ -222,9 +224,11 @@ class ResourceRecord(object): def deploy_record(self, record): # check what happens if create fails on ipworks stdin = 'create %s' % (record) - cmd = [self.module.get_bin_path('ipwcli', True)] - cmd.append('-user=%s' % (self.user)) - cmd.append('-password=%s' % (self.password)) + cmd = [ + self.module.get_bin_path('ipwcli', True), + '-user=%s' % self.user, + '-password=%s' % self.password, + ] rc, out, err = self.module.run_command(cmd, data=stdin) if 'Invalid username or password' in out: @@ -238,9 +242,11 @@ class ResourceRecord(object): def delete_record(self, record): # check what happens if create fails on ipworks stdin = 'delete %s' % (record.replace(';', '&&').replace('set', 'where')) - cmd = [self.module.get_bin_path('ipwcli', True)] - cmd.append('-user=%s' % (self.user)) - cmd.append('-password=%s' % (self.password)) + cmd = [ + self.module.get_bin_path('ipwcli', True), + '-user=%s' % self.user, + '-password=%s' % self.password, + ] rc, out, err = self.module.run_command(cmd, data=stdin) if 'Invalid username or password' in out: diff --git a/plugins/modules/packaging/os/svr4pkg.py b/plugins/modules/packaging/os/svr4pkg.py index 21d17f4de2..ea3cd7d468 100644 --- a/plugins/modules/packaging/os/svr4pkg.py +++ b/plugins/modules/packaging/os/svr4pkg.py @@ -108,8 +108,7 @@ from ansible.module_utils.basic import AnsibleModule def package_installed(module, name, category): - cmd = [module.get_bin_path('pkginfo', True)] - cmd.append('-q') + cmd = [module.get_bin_path('pkginfo', True), '-q'] if category: cmd.append('-c') cmd.append(name) diff --git a/plugins/modules/storage/zfs/zfs_facts.py b/plugins/modules/storage/zfs/zfs_facts.py index 930214743a..cb106de111 100644 --- a/plugins/modules/storage/zfs/zfs_facts.py +++ b/plugins/modules/storage/zfs/zfs_facts.py @@ -175,10 +175,7 @@ class ZFSFacts(object): self.facts = [] def dataset_exists(self): - cmd = [self.module.get_bin_path('zfs')] - - cmd.append('list') - cmd.append(self.name) + cmd = [self.module.get_bin_path('zfs'), 'list', self.name] (rc, out, err) = self.module.run_command(cmd) @@ -188,10 +185,7 @@ class ZFSFacts(object): return False def get_facts(self): - cmd = [self.module.get_bin_path('zfs')] - - cmd.append('get') - cmd.append('-H') + cmd = [self.module.get_bin_path('zfs'), 'get', '-H'] if self.parsable: cmd.append('-p') if self.recurse: @@ -202,10 +196,7 @@ class ZFSFacts(object): if self.type: cmd.append('-t') cmd.append(self.type) - cmd.append('-o') - cmd.append('name,property,value') - cmd.append(self.properties) - cmd.append(self.name) + cmd.extend(['-o', 'name,property,value', self.properties, self.name]) (rc, out, err) = self.module.run_command(cmd) diff --git a/plugins/modules/storage/zfs/zpool_facts.py b/plugins/modules/storage/zfs/zpool_facts.py index eced85000e..ed3d6cf965 100644 --- a/plugins/modules/storage/zfs/zpool_facts.py +++ b/plugins/modules/storage/zfs/zpool_facts.py @@ -134,10 +134,7 @@ class ZPoolFacts(object): self.facts = [] def pool_exists(self): - cmd = [self.module.get_bin_path('zpool')] - - cmd.append('list') - cmd.append(self.name) + cmd = [self.module.get_bin_path('zpool'), 'list', self.name] (rc, out, err) = self.module.run_command(cmd) @@ -147,10 +144,7 @@ class ZPoolFacts(object): return False def get_facts(self): - cmd = [self.module.get_bin_path('zpool')] - - cmd.append('get') - cmd.append('-H') + cmd = [self.module.get_bin_path('zpool'), 'get', '-H'] if self.parsable: cmd.append('-p') cmd.append('-o') diff --git a/plugins/modules/system/beadm.py b/plugins/modules/system/beadm.py index d34c5e7d96..d89ca79af1 100644 --- a/plugins/modules/system/beadm.py +++ b/plugins/modules/system/beadm.py @@ -154,9 +154,7 @@ class BE(object): self.is_freebsd = os.uname()[0] == 'FreeBSD' def _beadm_list(self): - cmd = [self.module.get_bin_path('beadm')] - cmd.append('list') - cmd.append('-H') + cmd = [self.module.get_bin_path('beadm'), 'list', '-H'] if '@' in self.name: cmd.append('-s') return self.module.run_command(cmd) @@ -218,42 +216,26 @@ class BE(object): return False def activate_be(self): - cmd = [self.module.get_bin_path('beadm')] - - cmd.append('activate') - cmd.append(self.name) - + cmd = [self.module.get_bin_path('beadm'), 'activate', self.name] return self.module.run_command(cmd) def create_be(self): - cmd = [self.module.get_bin_path('beadm')] - - cmd.append('create') + cmd = [self.module.get_bin_path('beadm'), 'create'] if self.snapshot: - cmd.append('-e') - cmd.append(self.snapshot) - + cmd.extend(['-e', self.snapshot]) if not self.is_freebsd: if self.description: - cmd.append('-d') - cmd.append(self.description) - + cmd.extend(['-d', self.description]) if self.options: - cmd.append('-o') - cmd.append(self.options) + cmd.extend(['-o', self.options]) cmd.append(self.name) return self.module.run_command(cmd) def destroy_be(self): - cmd = [self.module.get_bin_path('beadm')] - - cmd.append('destroy') - cmd.append('-F') - cmd.append(self.name) - + cmd = [self.module.get_bin_path('beadm'), 'destroy', '-F', self.name] return self.module.run_command(cmd) def is_mounted(self): @@ -276,10 +258,7 @@ class BE(object): return False def mount_be(self): - cmd = [self.module.get_bin_path('beadm')] - - cmd.append('mount') - cmd.append(self.name) + cmd = [self.module.get_bin_path('beadm'), 'mount', self.name] if self.mountpoint: cmd.append(self.mountpoint) @@ -287,9 +266,7 @@ class BE(object): return self.module.run_command(cmd) def unmount_be(self): - cmd = [self.module.get_bin_path('beadm')] - - cmd.append('unmount') + cmd = [self.module.get_bin_path('beadm'), 'unmount'] if self.force: cmd.append('-f') cmd.append(self.name) From b81a7cdd16bc1c5ab519e6338bff3d7058f00f8e Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 5 Apr 2021 19:22:28 +1200 Subject: [PATCH 0156/3093] removed unreachable code (#2157) * removed unreachable code * added changelog fragment --- changelogs/fragments/2157-unreachable-code.yml | 4 ++++ plugins/modules/cloud/misc/rhevm.py | 18 ------------------ plugins/modules/cloud/ovh/ovh_ip_failover.py | 1 - .../bitbucket/bitbucket_pipeline_variable.py | 2 -- 4 files changed, 4 insertions(+), 21 deletions(-) create mode 100644 changelogs/fragments/2157-unreachable-code.yml diff --git a/changelogs/fragments/2157-unreachable-code.yml b/changelogs/fragments/2157-unreachable-code.yml new file mode 100644 index 0000000000..7cb84b4db9 --- /dev/null +++ b/changelogs/fragments/2157-unreachable-code.yml @@ -0,0 +1,4 @@ +minor_changes: + - rhevm - removed unreachable code (https://github.com/ansible-collections/community.general/pull/2157). + - ovh_ip_failover - removed unreachable code (https://github.com/ansible-collections/community.general/pull/2157). + - bitbucket_pipeline_variable - removed unreachable code (https://github.com/ansible-collections/community.general/pull/2157). diff --git a/plugins/modules/cloud/misc/rhevm.py b/plugins/modules/cloud/misc/rhevm.py index 2aebc3464d..cc6c1252bf 100644 --- a/plugins/modules/cloud/misc/rhevm.py +++ b/plugins/modules/cloud/misc/rhevm.py @@ -1229,24 +1229,6 @@ class RHEV(object): self.__get_conn() return self.conn.set_VM_Host(vmname, vmhost) - # pylint: disable=unreachable - VM = self.conn.get_VM(vmname) - HOST = self.conn.get_Host(vmhost) - - if VM.placement_policy.host is None: - self.conn.set_VM_Host(vmname, vmhost) - elif str(VM.placement_policy.host.id) != str(HOST.id): - self.conn.set_VM_Host(vmname, vmhost) - else: - setMsg("VM's startup host was already set to " + vmhost) - checkFail() - - if str(VM.status.state) == "up": - self.conn.migrate_VM(vmname, vmhost) - checkFail() - - return True - def setHost(self, hostname, cluster, ifaces): self.__get_conn() return self.conn.set_Host(hostname, cluster, ifaces) diff --git a/plugins/modules/cloud/ovh/ovh_ip_failover.py b/plugins/modules/cloud/ovh/ovh_ip_failover.py index 7ed3a5ee16..545c40fff7 100644 --- a/plugins/modules/cloud/ovh/ovh_ip_failover.py +++ b/plugins/modules/cloud/ovh/ovh_ip_failover.py @@ -162,7 +162,6 @@ def waitForTaskDone(client, name, taskId, timeout): currentTimeout -= 5 if currentTimeout < 0: return False - return True def main(): diff --git a/plugins/modules/source_control/bitbucket/bitbucket_pipeline_variable.py b/plugins/modules/source_control/bitbucket/bitbucket_pipeline_variable.py index 33457fcab4..735db8b784 100644 --- a/plugins/modules/source_control/bitbucket/bitbucket_pipeline_variable.py +++ b/plugins/modules/source_control/bitbucket/bitbucket_pipeline_variable.py @@ -149,8 +149,6 @@ def get_existing_pipeline_variable(module, bitbucket): var['name'] = var.pop('key') return var - return None - def create_pipeline_variable(module, bitbucket): info, content = bitbucket.request( From 533e01a3f997a98d01fb8e3a35376e4aefefe8aa Mon Sep 17 00:00:00 2001 From: quidame Date: Mon, 5 Apr 2021 14:40:36 +0200 Subject: [PATCH 0157/3093] java_keystore/fix 1667 improve temp files storage (#2163) * improve temporary files storage (naming/removal) * update unit tests * Update changelogs/fragments/2163-java_keystore_1667_improve_temp_files_storage.yml Co-authored-by: Felix Fontein * add dedicated function to randomize PKCS#12 filename fix unit tests (also mock the new function) Co-authored-by: Felix Fontein --- ...ystore_1667_improve_temp_files_storage.yml | 5 +++ plugins/modules/system/java_keystore.py | 32 +++++++++------- .../modules/system/test_java_keystore.py | 38 +++++++++++++------ 3 files changed, 51 insertions(+), 24 deletions(-) create mode 100644 changelogs/fragments/2163-java_keystore_1667_improve_temp_files_storage.yml diff --git a/changelogs/fragments/2163-java_keystore_1667_improve_temp_files_storage.yml b/changelogs/fragments/2163-java_keystore_1667_improve_temp_files_storage.yml new file mode 100644 index 0000000000..43d183707c --- /dev/null +++ b/changelogs/fragments/2163-java_keystore_1667_improve_temp_files_storage.yml @@ -0,0 +1,5 @@ +--- +bugfixes: + - "java_keystore - use tempfile lib to create temporary files with randomized + names, and remove the temporary PKCS#12 keystore as well as other materials + (https://github.com/ansible-collections/community.general/issues/1667)." diff --git a/plugins/modules/system/java_keystore.py b/plugins/modules/system/java_keystore.py index db37bdee91..82bd03678c 100644 --- a/plugins/modules/system/java_keystore.py +++ b/plugins/modules/system/java_keystore.py @@ -114,13 +114,15 @@ cmd: description: Executed command to get action done returned: changed and failure type: str - sample: "openssl x509 -noout -in /tmp/cert.crt -fingerprint -sha256" + sample: "/usr/bin/openssl x509 -noout -in /tmp/user/1000/tmp8jd_lh23 -fingerprint -sha256" ''' -from ansible.module_utils.basic import AnsibleModule import os import re +import tempfile + +from ansible.module_utils.basic import AnsibleModule def read_certificate_fingerprint(module, openssl_bin, certificate_path): @@ -170,18 +172,25 @@ def run_commands(module, cmd, data=None, check_rc=True): return module.run_command(cmd, check_rc=check_rc, data=data) -def create_file(path, content): - with open(path, 'w') as f: +def create_path(): + tmpfd, tmpfile = tempfile.mkstemp() + os.remove(tmpfile) + return tmpfile + + +def create_file(content): + tmpfd, tmpfile = tempfile.mkstemp() + with os.fdopen(tmpfd, 'w') as f: f.write(content) - return path + return tmpfile def create_tmp_certificate(module): - return create_file("/tmp/%s.crt" % module.params['name'], module.params['certificate']) + return create_file(module.params['certificate']) def create_tmp_private_key(module): - return create_file("/tmp/%s.key" % module.params['name'], module.params['private_key']) + return create_file(module.params['private_key']) def cert_changed(module, openssl_bin, keytool_bin, keystore_path, keystore_pass, alias): @@ -200,17 +209,13 @@ def create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password, else: certificate_path = create_tmp_certificate(module) private_key_path = create_tmp_private_key(module) + keystore_p12_path = create_path() try: if os.path.exists(keystore_path): os.remove(keystore_path) - keystore_p12_path = "/tmp/keystore.p12" - if os.path.exists(keystore_p12_path): - os.remove(keystore_p12_path) - export_p12_cmd = [openssl_bin, "pkcs12", "-export", "-name", name, "-in", certificate_path, - "-inkey", private_key_path, "-out", - keystore_p12_path, "-passout", "stdin"] + "-inkey", private_key_path, "-out", keystore_p12_path, "-passout", "stdin"] # when keypass is provided, add -passin cmd_stdin = "" @@ -249,6 +254,7 @@ def create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password, finally: os.remove(certificate_path) os.remove(private_key_path) + os.remove(keystore_p12_path) def update_jks_perm(module, keystore_path): diff --git a/tests/unit/plugins/modules/system/test_java_keystore.py b/tests/unit/plugins/modules/system/test_java_keystore.py index c2f3421c72..68863c149e 100644 --- a/tests/unit/plugins/modules/system/test_java_keystore.py +++ b/tests/unit/plugins/modules/system/test_java_keystore.py @@ -26,8 +26,8 @@ class TestCreateJavaKeystore(ModuleTestCase): orig_exists = os.path.exists self.spec = ArgumentSpec() - self.mock_create_file = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.create_file', - side_effect=lambda path, content: path) + self.mock_create_file = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.create_file') + self.mock_create_path = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.create_path') self.mock_run_commands = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.run_commands') self.mock_os_path_exists = patch('os.path.exists', side_effect=lambda path: True if path == '/path/to/keystore.jks' else orig_exists(path)) @@ -37,6 +37,7 @@ class TestCreateJavaKeystore(ModuleTestCase): side_effect=lambda path: (False, None)) self.run_commands = self.mock_run_commands.start() self.create_file = self.mock_create_file.start() + self.create_path = self.mock_create_path.start() self.selinux_context = self.mock_selinux_context.start() self.is_special_selinux_path = self.mock_is_special_selinux_path.start() self.os_path_exists = self.mock_os_path_exists.start() @@ -45,6 +46,7 @@ class TestCreateJavaKeystore(ModuleTestCase): """Teardown.""" super(TestCreateJavaKeystore, self).tearDown() self.mock_create_file.stop() + self.mock_create_path.stop() self.mock_run_commands.stop() self.mock_selinux_context.stop() self.mock_is_special_selinux_path.stop() @@ -67,13 +69,15 @@ class TestCreateJavaKeystore(ModuleTestCase): module.exit_json = Mock() with patch('os.remove', return_value=True): + self.create_path.side_effect = ['/tmp/tmpgrzm2ah7'] + self.create_file.side_effect = ['/tmp/etacifitrec', '/tmp/yek_etavirp'] self.run_commands.side_effect = lambda module, cmd, data: (0, '', '') create_jks(module, "test", "openssl", "keytool", "/path/to/keystore.jks", "changeit", "") module.exit_json.assert_called_once_with( changed=True, cmd=["keytool", "-importkeystore", "-destkeystore", "/path/to/keystore.jks", - "-srckeystore", "/tmp/keystore.p12", "-srcstoretype", "pkcs12", "-alias", "test", + "-srckeystore", "/tmp/tmpgrzm2ah7", "-srcstoretype", "pkcs12", "-alias", "test", "-deststorepass", "changeit", "-srcstorepass", "changeit", "-noprompt"], msg='', rc=0, @@ -98,12 +102,15 @@ class TestCreateJavaKeystore(ModuleTestCase): module.fail_json = Mock() with patch('os.remove', return_value=True): + self.create_path.side_effect = ['/tmp/tmp1cyp12xa'] + self.create_file.side_effect = ['/tmp/tmpvalcrt32', '/tmp/tmpwh4key0c'] self.run_commands.side_effect = [(1, '', ''), (0, '', '')] create_jks(module, "test", "openssl", "keytool", "/path/to/keystore.jks", "changeit", "passphrase-foo") module.fail_json.assert_called_once_with( cmd=["openssl", "pkcs12", "-export", "-name", "test", - "-in", "/tmp/foo.crt", "-inkey", "/tmp/foo.key", - "-out", "/tmp/keystore.p12", + "-in", "/tmp/tmpvalcrt32", + "-inkey", "/tmp/tmpwh4key0c", + "-out", "/tmp/tmp1cyp12xa", "-passout", "stdin", "-passin", "stdin"], msg='', @@ -127,12 +134,15 @@ class TestCreateJavaKeystore(ModuleTestCase): module.fail_json = Mock() with patch('os.remove', return_value=True): + self.create_path.side_effect = ['/tmp/tmp1cyp12xa'] + self.create_file.side_effect = ['/tmp/tmpvalcrt32', '/tmp/tmpwh4key0c'] self.run_commands.side_effect = [(1, '', ''), (0, '', '')] create_jks(module, "test", "openssl", "keytool", "/path/to/keystore.jks", "changeit", "") module.fail_json.assert_called_once_with( cmd=["openssl", "pkcs12", "-export", "-name", "test", - "-in", "/tmp/foo.crt", "-inkey", "/tmp/foo.key", - "-out", "/tmp/keystore.p12", + "-in", "/tmp/tmpvalcrt32", + "-inkey", "/tmp/tmpwh4key0c", + "-out", "/tmp/tmp1cyp12xa", "-passout", "stdin"], msg='', rc=1 @@ -155,12 +165,14 @@ class TestCreateJavaKeystore(ModuleTestCase): module.fail_json = Mock() with patch('os.remove', return_value=True): + self.create_path.side_effect = ['/tmp/tmpgrzm2ah7'] + self.create_file.side_effect = ['/tmp/etacifitrec', '/tmp/yek_etavirp'] self.run_commands.side_effect = [(0, '', ''), (1, '', '')] create_jks(module, "test", "openssl", "keytool", "/path/to/keystore.jks", "changeit", "") module.fail_json.assert_called_once_with( cmd=["keytool", "-importkeystore", "-destkeystore", "/path/to/keystore.jks", - "-srckeystore", "/tmp/keystore.p12", "-srcstoretype", "pkcs12", "-alias", "test", + "-srckeystore", "/tmp/tmpgrzm2ah7", "-srcstoretype", "pkcs12", "-alias", "test", "-deststorepass", "changeit", "-srcstorepass", "changeit", "-noprompt"], msg='', rc=1 @@ -174,8 +186,7 @@ class TestCertChanged(ModuleTestCase): """Setup.""" super(TestCertChanged, self).setUp() self.spec = ArgumentSpec() - self.mock_create_file = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.create_file', - side_effect=lambda path, content: path) + self.mock_create_file = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.create_file') self.mock_run_commands = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.run_commands') self.run_commands = self.mock_run_commands.start() self.create_file = self.mock_create_file.start() @@ -201,6 +212,7 @@ class TestCertChanged(ModuleTestCase): ) with patch('os.remove', return_value=True): + self.create_file.side_effect = ['/tmp/placeholder'] self.run_commands.side_effect = [(0, 'foo=abcd:1234:efgh', ''), (0, 'SHA256: abcd:1234:efgh', '')] result = cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo') self.assertFalse(result, 'Fingerprint is identical') @@ -220,6 +232,7 @@ class TestCertChanged(ModuleTestCase): ) with patch('os.remove', return_value=True): + self.create_file.side_effect = ['/tmp/placeholder'] self.run_commands.side_effect = [(0, 'foo=abcd:1234:efgh', ''), (0, 'SHA256: wxyz:9876:stuv', '')] result = cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo') self.assertTrue(result, 'Fingerprint mismatch') @@ -239,6 +252,7 @@ class TestCertChanged(ModuleTestCase): ) with patch('os.remove', return_value=True): + self.create_file.side_effect = ['/tmp/placeholder'] self.run_commands.side_effect = [(0, 'foo=abcd:1234:efgh', ''), (1, 'keytool error: java.lang.Exception: Alias does not exist', '')] result = cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo') @@ -261,10 +275,11 @@ class TestCertChanged(ModuleTestCase): module.fail_json = Mock() with patch('os.remove', return_value=True): + self.create_file.side_effect = ['/tmp/tmpdj6bvvme'] self.run_commands.side_effect = [(1, '', 'Oops'), (0, 'SHA256: wxyz:9876:stuv', '')] cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo') module.fail_json.assert_called_once_with( - cmd=["openssl", "x509", "-noout", "-in", "/tmp/foo.crt", "-fingerprint", "-sha256"], + cmd=["openssl", "x509", "-noout", "-in", "/tmp/tmpdj6bvvme", "-fingerprint", "-sha256"], msg='', err='Oops', rc=1 @@ -287,6 +302,7 @@ class TestCertChanged(ModuleTestCase): module.fail_json = Mock(return_value=True) with patch('os.remove', return_value=True): + self.create_file.side_effect = ['/tmp/placeholder'] self.run_commands.side_effect = [(0, 'foo: wxyz:9876:stuv', ''), (1, '', 'Oops')] cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo') module.fail_json.assert_called_with( From d2070277e86274821306d1957a0178599b616c28 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 6 Apr 2021 01:19:13 +1200 Subject: [PATCH 0158/3093] ModuleHelper variables management (#2162) * added metadata for variables in module helper * adjustments * added separate support for tracking changes * rewrote the diff code * added integration test for module_helper * using ansible.module_utils.common.dict_transformations.dict_merge * improved dependency management * restore ModuleHelper to base classes of CmdStateModuleHelper * added assertions to ensure the failing module name appears in the error messages * added test code for state-based modules * fixed test name * renamed class to VarMeta * small fixes * fixes from the PR * fixed VarDict.__set_attr__ * added VarDict.__getitem__() * added changelog fragment * adjustments per PR * ModuleHelper.output is now aware of conflicting variable names * Update plugins/module_utils/module_helper.py Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../fragments/2162-modhelper-variables.yml | 2 + plugins/module_utils/module_helper.py | 175 +++++++++++++++--- .../integration/targets/module_helper/aliases | 1 + .../targets/module_helper/library/mdepfail.py | 69 +++++++ .../targets/module_helper/library/msimple.py | 65 +++++++ .../targets/module_helper/library/mstate.py | 77 ++++++++ .../targets/module_helper/tasks/main.yml | 3 + .../targets/module_helper/tasks/mdepfail.yml | 14 ++ .../targets/module_helper/tasks/msimple.yml | 54 ++++++ .../targets/module_helper/tasks/mstate.yml | 79 ++++++++ .../module_utils/test_module_helper.py | 57 +++++- 11 files changed, 572 insertions(+), 24 deletions(-) create mode 100644 changelogs/fragments/2162-modhelper-variables.yml create mode 100644 tests/integration/targets/module_helper/aliases create mode 100644 tests/integration/targets/module_helper/library/mdepfail.py create mode 100644 tests/integration/targets/module_helper/library/msimple.py create mode 100644 tests/integration/targets/module_helper/library/mstate.py create mode 100644 tests/integration/targets/module_helper/tasks/main.yml create mode 100644 tests/integration/targets/module_helper/tasks/mdepfail.yml create mode 100644 tests/integration/targets/module_helper/tasks/msimple.yml create mode 100644 tests/integration/targets/module_helper/tasks/mstate.yml diff --git a/changelogs/fragments/2162-modhelper-variables.yml b/changelogs/fragments/2162-modhelper-variables.yml new file mode 100644 index 0000000000..68b0edc37e --- /dev/null +++ b/changelogs/fragments/2162-modhelper-variables.yml @@ -0,0 +1,2 @@ +minor_changes: + - module_helper module utils - added mechanism to manage variables, providing automatic output of variables, change status and diff information (https://github.com/ansible-collections/community.general/pull/2162). diff --git a/plugins/module_utils/module_helper.py b/plugins/module_utils/module_helper.py index caf915abbf..44758c8733 100644 --- a/plugins/module_utils/module_helper.py +++ b/plugins/module_utils/module_helper.py @@ -10,6 +10,7 @@ from functools import partial, wraps import traceback from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.dict_transformations import dict_merge class ModuleHelperException(Exception): @@ -24,12 +25,12 @@ class ModuleHelperException(Exception): def __init__(self, *args, **kwargs): self.msg = self._get_remove('msg', kwargs) or "Module failed with exception: {0}".format(self) self.update_output = self._get_remove('update_output', kwargs) or {} - super(ModuleHelperException, self).__init__(*args, **kwargs) + super(ModuleHelperException, self).__init__(*args) class ArgFormat(object): """ - Argument formatter + Argument formatter for use as a command line parameter. Used in CmdMixin. """ BOOLEAN = 0 PRINTF = 1 @@ -50,7 +51,8 @@ class ArgFormat(object): def __init__(self, name, fmt=None, style=FORMAT, stars=0): """ - Creates a new formatter + Creates a CLI-formatter for one specific argument. The argument may be a module parameter or just a named parameter for + the CLI command execution. :param name: Name of the argument to be formatted :param fmt: Either a str to be formatted (using or not printf-style) or a callable that does that :param style: Whether arg_format (as str) should use printf-style formatting. @@ -106,7 +108,7 @@ def cause_changes(func, on_success=True, on_failure=False): func(*args, **kwargs) if on_success: self.changed = True - except Exception as e: + except Exception: if on_failure: self.changed = True raise @@ -123,11 +125,12 @@ def module_fails_on_exception(func): except ModuleHelperException as e: if e.update_output: self.update_output(e.update_output) - self.module.fail_json(changed=False, msg=e.msg, exception=traceback.format_exc(), output=self.output, vars=self.vars) + self.module.fail_json(msg=e.msg, exception=traceback.format_exc(), + output=self.output, vars=self.vars.output(), **self.output) except Exception as e: - self.vars.msg = "Module failed with exception: {0}".format(str(e).strip()) - self.vars.exception = traceback.format_exc() - self.module.fail_json(changed=False, msg=self.vars.msg, exception=self.vars.exception, output=self.output, vars=self.vars) + msg = "Module failed with exception: {0}".format(str(e).strip()) + self.module.fail_json(msg=msg, exception=traceback.format_exc(), + output=self.output, vars=self.vars.output(), **self.output) return wrapper @@ -141,7 +144,7 @@ class DependencyCtxMgr(object): self.exc_tb = None def __enter__(self): - pass + return self def __exit__(self, exc_type, exc_val, exc_tb): self.has_it = exc_type is None @@ -155,17 +158,120 @@ class DependencyCtxMgr(object): return self.msg or str(self.exc_val) -class ModuleHelper(object): - _dependencies = [] - module = {} - facts_name = None +class VarMeta(object): + def __init__(self, diff=False, output=False, change=None): + self.init = False + self.initial_value = None + self.value = None + + self.diff = diff + self.change = diff if change is None else change + self.output = output + + def set(self, diff=None, output=None, change=None): + if diff is not None: + self.diff = diff + if output is not None: + self.output = output + if change is not None: + self.change = change + + def set_value(self, value): + if not self.init: + self.initial_value = value + self.init = True + self.value = value + return self + + @property + def has_changed(self): + return self.change and (self.initial_value != self.value) + + @property + def diff_result(self): + return None if not (self.diff and self.has_changed) else { + 'before': self.initial_value, + 'after': self.value, + } + + def __str__(self): + return "".format( + self.value, self.initial_value, self.diff, self.output, self.change + ) + + +class ModuleHelper(object): + _output_conflict_list = ('msg', 'exception', 'output', 'vars', 'changed') + _dependencies = [] + module = None + facts_name = None + output_params = () + diff_params = () + change_params = () + + class VarDict(object): + def __init__(self): + self._data = dict() + self._meta = dict() + + def __getitem__(self, item): + return self._data[item] + + def __setitem__(self, key, value): + self.set(key, value) - class AttrDict(dict): def __getattr__(self, item): - return self[item] + try: + return self._data[item] + except KeyError: + return getattr(self._data, item) + + def __setattr__(self, key, value): + if key in ('_data', '_meta'): + super(ModuleHelper.VarDict, self).__setattr__(key, value) + else: + self.set(key, value) + + def meta(self, name): + return self._meta[name] + + def set_meta(self, name, **kwargs): + self.meta(name).set(**kwargs) + + def set(self, name, value, **kwargs): + if name in ('_data', '_meta'): + raise ValueError("Names _data and _meta are reserved for use by ModuleHelper") + self._data[name] = value + if name in self._meta: + meta = self.meta(name) + else: + if 'output' not in kwargs: + kwargs['output'] = True + meta = VarMeta(**kwargs) + meta.set_value(value) + self._meta[name] = meta + + def output(self): + return dict((k, v) for k, v in self._data.items() if self.meta(k).output) + + def diff(self): + diff_results = [(k, self.meta(k).diff_result) for k in self._data] + diff_results = [dr for dr in diff_results if dr[1] is not None] + if diff_results: + before = dict((dr[0], dr[1]['before']) for dr in diff_results) + after = dict((dr[0], dr[1]['after']) for dr in diff_results) + return {'before': before, 'after': after} + + return None + + def change_vars(self): + return [v for v in self._data if self.meta(v).change] + + def has_changed(self, v): + return self._meta[v].has_changed def __init__(self, module=None): - self.vars = ModuleHelper.AttrDict() + self.vars = ModuleHelper.VarDict() self.output_dict = dict() self.facts_dict = dict() self._changed = False @@ -173,9 +279,17 @@ class ModuleHelper(object): if module: self.module = module - if isinstance(self.module, dict): + if not isinstance(self.module, AnsibleModule): self.module = AnsibleModule(**self.module) + for name, value in self.module.params.items(): + self.vars.set( + name, value, + diff=name in self.diff_params, + output=name in self.output_params, + change=None if not self.change_params else name in self.change_params, + ) + def update_output(self, **kwargs): self.output_dict.update(kwargs) @@ -191,6 +305,9 @@ class ModuleHelper(object): def __quit_module__(self): pass + def _vars_changed(self): + return any(self.vars.has_changed(v) for v in self.vars.change_vars()) + @property def changed(self): return self._changed @@ -199,12 +316,24 @@ class ModuleHelper(object): def changed(self, value): self._changed = value + def has_changed(self): + return self.changed or self._vars_changed() + @property def output(self): - result = dict(self.vars) + result = dict(self.vars.output()) result.update(self.output_dict) if self.facts_name: result['ansible_facts'] = {self.facts_name: self.facts_dict} + if self.module._diff: + diff = result.get('diff', {}) + vars_diff = self.vars.diff() or {} + result['diff'] = dict_merge(dict(diff), vars_diff) + + for varname in result: + if varname in self._output_conflict_list: + result["_" + varname] = result[varname] + del result[varname] return result @module_fails_on_exception @@ -213,7 +342,7 @@ class ModuleHelper(object): self.__init_module__() self.__run__() self.__quit_module__() - self.module.exit_json(changed=self.changed, **self.output_dict) + self.module.exit_json(changed=self.has_changed(), **self.output) @classmethod def dependency(cls, name, msg): @@ -224,9 +353,9 @@ class ModuleHelper(object): for d in self._dependencies: if not d.has_it: self.module.fail_json(changed=False, - exception=d.exc_val.__traceback__.format_exc(), + exception="\n".join(traceback.format_exception(d.exc_type, d.exc_val, d.exc_tb)), msg=d.text, - **self.output_dict) + **self.output) class StateMixin(object): @@ -332,7 +461,7 @@ class CmdMixin(object): return rc, out, err def run_command(self, extra_params=None, params=None, *args, **kwargs): - self.vars['cmd_args'] = self._calculate_args(extra_params, params) + self.vars.cmd_args = self._calculate_args(extra_params, params) options = dict(self.run_command_fixed_options) env_update = dict(options.get('environ_update', {})) options['check_rc'] = options.get('check_rc', self.check_rc) @@ -341,7 +470,7 @@ class CmdMixin(object): self.update_output(force_lang=self.force_lang) options['environ_update'] = env_update options.update(kwargs) - rc, out, err = self.module.run_command(self.vars['cmd_args'], *args, **options) + rc, out, err = self.module.run_command(self.vars.cmd_args, *args, **options) self.update_output(rc=rc, stdout=out, stderr=err) return self.process_command_output(rc, out, err) diff --git a/tests/integration/targets/module_helper/aliases b/tests/integration/targets/module_helper/aliases new file mode 100644 index 0000000000..3005e4b26d --- /dev/null +++ b/tests/integration/targets/module_helper/aliases @@ -0,0 +1 @@ +shippable/posix/group4 diff --git a/tests/integration/targets/module_helper/library/mdepfail.py b/tests/integration/targets/module_helper/library/mdepfail.py new file mode 100644 index 0000000000..614c50dbf8 --- /dev/null +++ b/tests/integration/targets/module_helper/library/mdepfail.py @@ -0,0 +1,69 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# (c) 2021, Alexei Znamensky +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: mdepfail +author: "Alexei Znamensky (@russoz)" +short_description: Simple module for testing +description: + - Simple module test description. +options: + a: + description: aaaa + type: int + b: + description: bbbb + type: str + c: + description: cccc + type: str +''' + +EXAMPLES = "" + +RETURN = "" + +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper +from ansible.module_utils.basic import missing_required_lib + +with ModuleHelper.dependency("nopackagewiththisname", missing_required_lib("nopackagewiththisname")): + import nopackagewiththisname + + +class MSimple(ModuleHelper): + output_params = ('a', 'b', 'c') + module = dict( + argument_spec=dict( + a=dict(type='int'), + b=dict(type='str'), + c=dict(type='str'), + ), + ) + + def __init_module__(self): + self.vars.set('value', None) + self.vars.set('abc', "abc", diff=True) + + def __run__(self): + if (0 if self.vars.a is None else self.vars.a) >= 100: + raise Exception("a >= 100") + if self.vars.c == "abc change": + self.vars['abc'] = "changed abc" + if self.vars.get('a', 0) == 2: + self.vars['b'] = str(self.vars.b) * 2 + self.vars['c'] = str(self.vars.c) * 2 + + +def main(): + msimple = MSimple() + msimple.run() + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/module_helper/library/msimple.py b/tests/integration/targets/module_helper/library/msimple.py new file mode 100644 index 0000000000..da43eca777 --- /dev/null +++ b/tests/integration/targets/module_helper/library/msimple.py @@ -0,0 +1,65 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# (c) 2021, Alexei Znamensky +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: msimple +author: "Alexei Znamensky (@russoz)" +short_description: Simple module for testing +description: + - Simple module test description. +options: + a: + description: aaaa + type: int + b: + description: bbbb + type: str + c: + description: cccc + type: str +''' + +EXAMPLES = "" + +RETURN = "" + +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper + + +class MSimple(ModuleHelper): + output_params = ('a', 'b', 'c') + module = dict( + argument_spec=dict( + a=dict(type='int'), + b=dict(type='str'), + c=dict(type='str'), + ), + ) + + def __init_module__(self): + self.vars.set('value', None) + self.vars.set('abc', "abc", diff=True) + + def __run__(self): + if (0 if self.vars.a is None else self.vars.a) >= 100: + raise Exception("a >= 100") + if self.vars.c == "abc change": + self.vars['abc'] = "changed abc" + if self.vars.get('a', 0) == 2: + self.vars['b'] = str(self.vars.b) * 2 + self.vars['c'] = str(self.vars.c) * 2 + + +def main(): + msimple = MSimple() + msimple.run() + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/module_helper/library/mstate.py b/tests/integration/targets/module_helper/library/mstate.py new file mode 100644 index 0000000000..b8cf674505 --- /dev/null +++ b/tests/integration/targets/module_helper/library/mstate.py @@ -0,0 +1,77 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# (c) 2021, Alexei Znamensky +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: mstate +author: "Alexei Znamensky (@russoz)" +short_description: State-based module for testing +description: + - State-based module test description. +options: + a: + description: aaaa + type: int + required: yes + b: + description: bbbb + type: str + c: + description: cccc + type: str + state: + description: test states + type: str + choices: [join, b_x_a, c_x_a, both_x_a] + default: join +''' + +EXAMPLES = "" + +RETURN = "" + +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper + + +class MState(StateModuleHelper): + output_params = ('a', 'b', 'c', 'state') + module = dict( + argument_spec=dict( + a=dict(type='int', required=True), + b=dict(type='str'), + c=dict(type='str'), + state=dict(type='str', choices=['join', 'b_x_a', 'c_x_a', 'both_x_a', 'nop'], default='join'), + ), + ) + + def __init_module__(self): + self.vars.set('result', "abc", diff=True) + + def state_join(self): + self.vars['result'] = "".join([str(self.vars.a), str(self.vars.b), str(self.vars.c)]) + + def state_b_x_a(self): + self.vars['result'] = str(self.vars.b) * self.vars.a + + def state_c_x_a(self): + self.vars['result'] = str(self.vars.c) * self.vars.a + + def state_both_x_a(self): + self.vars['result'] = (str(self.vars.b) + str(self.vars.c)) * self.vars.a + + def state_nop(self): + pass + + +def main(): + mstate = MState() + mstate.run() + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/module_helper/tasks/main.yml b/tests/integration/targets/module_helper/tasks/main.yml new file mode 100644 index 0000000000..05c41c2a38 --- /dev/null +++ b/tests/integration/targets/module_helper/tasks/main.yml @@ -0,0 +1,3 @@ +- include_tasks: msimple.yml +- include_tasks: mdepfail.yml +- include_tasks: mstate.yml diff --git a/tests/integration/targets/module_helper/tasks/mdepfail.yml b/tests/integration/targets/module_helper/tasks/mdepfail.yml new file mode 100644 index 0000000000..d22738a778 --- /dev/null +++ b/tests/integration/targets/module_helper/tasks/mdepfail.yml @@ -0,0 +1,14 @@ +- name: test failing dependency + mdepfail: + a: 123 + ignore_errors: yes + register: result + +- name: assert failing dependency + assert: + that: + - result.failed is true + - '"Failed to import" in result.msg' + - '"nopackagewiththisname" in result.msg' + - '"ModuleNotFoundError:" in result.exception' + - '"nopackagewiththisname" in result.exception' diff --git a/tests/integration/targets/module_helper/tasks/msimple.yml b/tests/integration/targets/module_helper/tasks/msimple.yml new file mode 100644 index 0000000000..deb386f2b5 --- /dev/null +++ b/tests/integration/targets/module_helper/tasks/msimple.yml @@ -0,0 +1,54 @@ +- name: test msimple 1 + msimple: + a: 80 + register: simple1 + +- name: assert simple1 + assert: + that: + - simple1.a == 80 + - simple1.abc == "abc" + - simple1.changed is false + - simple1.value is none + +- name: test msimple 2 + msimple: + a: 101 + ignore_errors: yes + register: simple2 + +- name: assert simple2 + assert: + that: + - simple2.a == 101 + - 'simple2.msg == "Module failed with exception: a >= 100"' + - simple2.abc == "abc" + - simple2.failed is true + - simple2.changed is false + - simple2.value is none + +- name: test msimple 3 + msimple: + a: 2 + b: potatoes + register: simple3 + +- name: assert simple3 + assert: + that: + - simple3.a == 2 + - simple3.b == "potatoespotatoes" + - simple3.c == "NoneNone" + - simple3.changed is false + +- name: test msimple 4 + msimple: + c: abc change + register: simple4 + +- name: assert simple4 + assert: + that: + - simple4.c == "abc change" + - simple4.abc == "changed abc" + - simple4.changed is true diff --git a/tests/integration/targets/module_helper/tasks/mstate.yml b/tests/integration/targets/module_helper/tasks/mstate.yml new file mode 100644 index 0000000000..53329a3c70 --- /dev/null +++ b/tests/integration/targets/module_helper/tasks/mstate.yml @@ -0,0 +1,79 @@ +- name: test mstate 1 + mstate: + a: 80 + b: banana + c: cashew + state: nop + register: state1 + +- name: assert state1 + assert: + that: + - state1.a == 80 + - state1.b == "banana" + - state1.c == "cashew" + - state1.result == "abc" + - state1.changed is false + +- name: test mstate 2 + mstate: + a: 80 + b: banana + c: cashew + register: state2 + +- name: assert state2 + assert: + that: + - state2.a == 80 + - state2.b == "banana" + - state2.c == "cashew" + - state2.result == "80bananacashew" + - state2.changed is true + +- name: test mstate 3 + mstate: + a: 3 + b: banana + state: b_x_a + register: state3 + +- name: assert state3 + assert: + that: + - state3.a == 3 + - state3.b == "banana" + - state3.result == "bananabananabanana" + - state3.changed is true + +- name: test mstate 4 + mstate: + a: 4 + c: cashew + state: c_x_a + register: state4 + +- name: assert state4 + assert: + that: + - state4.a == 4 + - state4.c == "cashew" + - state4.result == "cashewcashewcashewcashew" + - state4.changed is true + +- name: test mstate 5 + mstate: + a: 5 + b: foo + c: bar + state: both_x_a + register: state5 + +- name: assert state5 + assert: + that: + - state5.a == 5 + - state5.b == "foo" + - state5.c == "bar" + - state5.result == "foobarfoobarfoobarfoobarfoobar" + - state5.changed is true diff --git a/tests/unit/plugins/module_utils/test_module_helper.py b/tests/unit/plugins/module_utils/test_module_helper.py index 82a8f2c144..1402fa07d6 100644 --- a/tests/unit/plugins/module_utils/test_module_helper.py +++ b/tests/unit/plugins/module_utils/test_module_helper.py @@ -9,7 +9,7 @@ __metaclass__ = type import pytest from ansible_collections.community.general.plugins.module_utils.module_helper import ( - ArgFormat, DependencyCtxMgr, ModuleHelper + ArgFormat, DependencyCtxMgr, ModuleHelper, VarMeta ) @@ -105,3 +105,58 @@ def test_dependency_ctxmgr(): with ctx: import sys assert ctx.has_it + + +def test_variable_meta(): + meta = VarMeta() + assert meta.output is False + assert meta.diff is False + assert meta.value is None + meta.set_value("abc") + assert meta.initial_value == "abc" + assert meta.value == "abc" + assert meta.diff_result is None + meta.set_value("def") + assert meta.initial_value == "abc" + assert meta.value == "def" + assert meta.diff_result is None + + +def test_variable_meta_diff(): + meta = VarMeta(diff=True) + assert meta.output is False + assert meta.diff is True + assert meta.value is None + meta.set_value("abc") + assert meta.initial_value == "abc" + assert meta.value == "abc" + assert meta.diff_result is None + meta.set_value("def") + assert meta.initial_value == "abc" + assert meta.value == "def" + assert meta.diff_result == {"before": "abc", "after": "def"} + meta.set_value("ghi") + assert meta.initial_value == "abc" + assert meta.value == "ghi" + assert meta.diff_result == {"before": "abc", "after": "ghi"} + + +def test_vardict(): + vd = ModuleHelper.VarDict() + vd.set('a', 123) + assert vd['a'] == 123 + assert vd.a == 123 + assert 'a' in vd._meta + assert vd.meta('a').output is True + assert vd.meta('a').diff is False + assert vd.meta('a').change is False + vd['b'] = 456 + vd.set_meta('a', diff=True, change=True) + vd.set_meta('b', diff=True, output=False) + vd['c'] = 789 + vd['a'] = 'new_a' + vd['c'] = 'new_c' + assert vd.a == 'new_a' + assert vd.c == 'new_c' + assert vd.output() == {'a': 'new_a', 'c': 'new_c'} + assert vd.diff() == {'before': {'a': 123}, 'after': {'a': 'new_a'}}, "diff={0}".format(vd.diff()) From eb851d420857981f2753869234e2c6fefa2a46f4 Mon Sep 17 00:00:00 2001 From: quidame Date: Mon, 5 Apr 2021 16:37:13 +0200 Subject: [PATCH 0159/3093] replace inline clear password by environment variable (#2177) * replace inline clear password by environment variable on a per-command basis. * add changelog fragment * update related unit tests * Update changelogs/fragments/2177-java_keystore_1668_dont_expose_secrets_on_cmdline.yml Co-authored-by: Felix Fontein * fix unit test: force result without lambda Co-authored-by: Felix Fontein --- ...tore_1668_dont_expose_secrets_on_cmdline.yml | 4 ++++ plugins/modules/system/java_keystore.py | 17 ++++++++++------- .../modules/system/test_java_keystore.py | 8 ++++---- 3 files changed, 18 insertions(+), 11 deletions(-) create mode 100644 changelogs/fragments/2177-java_keystore_1668_dont_expose_secrets_on_cmdline.yml diff --git a/changelogs/fragments/2177-java_keystore_1668_dont_expose_secrets_on_cmdline.yml b/changelogs/fragments/2177-java_keystore_1668_dont_expose_secrets_on_cmdline.yml new file mode 100644 index 0000000000..0d961a53ac --- /dev/null +++ b/changelogs/fragments/2177-java_keystore_1668_dont_expose_secrets_on_cmdline.yml @@ -0,0 +1,4 @@ +--- +security_fixes: + - "java_keystore - pass secret to keytool through an environment variable to not expose it as a + commandline argument (https://github.com/ansible-collections/community.general/issues/1668)." diff --git a/plugins/modules/system/java_keystore.py b/plugins/modules/system/java_keystore.py index 82bd03678c..feab757f58 100644 --- a/plugins/modules/system/java_keystore.py +++ b/plugins/modules/system/java_keystore.py @@ -146,8 +146,9 @@ def read_certificate_fingerprint(module, openssl_bin, certificate_path): def read_stored_certificate_fingerprint(module, keytool_bin, alias, keystore_path, keystore_password): - stored_certificate_fingerprint_cmd = [keytool_bin, "-list", "-alias", alias, "-keystore", keystore_path, "-storepass", keystore_password, "-v"] - (rc, stored_certificate_fingerprint_out, stored_certificate_fingerprint_err) = run_commands(module, stored_certificate_fingerprint_cmd) + stored_certificate_fingerprint_cmd = [keytool_bin, "-list", "-alias", alias, "-keystore", keystore_path, "-storepass:env", "STOREPASS", "-v"] + (rc, stored_certificate_fingerprint_out, stored_certificate_fingerprint_err) = run_commands( + module, stored_certificate_fingerprint_cmd, environ_update=dict(STOREPASS=keystore_password)) if rc != 0: if "keytool error: java.lang.Exception: Alias <%s> does not exist" % alias not in stored_certificate_fingerprint_out: return module.fail_json(msg=stored_certificate_fingerprint_out, @@ -168,8 +169,8 @@ def read_stored_certificate_fingerprint(module, keytool_bin, alias, keystore_pat return stored_certificate_match.group(1) -def run_commands(module, cmd, data=None, check_rc=True): - return module.run_command(cmd, check_rc=check_rc, data=data) +def run_commands(module, cmd, data=None, environ_update=None, check_rc=True): + return module.run_command(cmd, check_rc=check_rc, data=data, environ_update=environ_update) def create_path(): @@ -236,10 +237,12 @@ def create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password, "-srckeystore", keystore_p12_path, "-srcstoretype", "pkcs12", "-alias", name, - "-deststorepass", password, - "-srcstorepass", password, + "-deststorepass:env", "STOREPASS", + "-srcstorepass:env", "STOREPASS", "-noprompt"] - (rc, import_keystore_out, import_keystore_err) = run_commands(module, import_keystore_cmd, data=None) + + (rc, import_keystore_out, import_keystore_err) = run_commands(module, import_keystore_cmd, data=None, + environ_update=dict(STOREPASS=password)) if rc == 0: update_jks_perm(module, keystore_path) return module.exit_json(changed=True, diff --git a/tests/unit/plugins/modules/system/test_java_keystore.py b/tests/unit/plugins/modules/system/test_java_keystore.py index 68863c149e..409e956799 100644 --- a/tests/unit/plugins/modules/system/test_java_keystore.py +++ b/tests/unit/plugins/modules/system/test_java_keystore.py @@ -71,14 +71,14 @@ class TestCreateJavaKeystore(ModuleTestCase): with patch('os.remove', return_value=True): self.create_path.side_effect = ['/tmp/tmpgrzm2ah7'] self.create_file.side_effect = ['/tmp/etacifitrec', '/tmp/yek_etavirp'] - self.run_commands.side_effect = lambda module, cmd, data: (0, '', '') + self.run_commands.side_effect = [(0, '', ''), (0, '', '')] create_jks(module, "test", "openssl", "keytool", "/path/to/keystore.jks", "changeit", "") module.exit_json.assert_called_once_with( changed=True, cmd=["keytool", "-importkeystore", "-destkeystore", "/path/to/keystore.jks", "-srckeystore", "/tmp/tmpgrzm2ah7", "-srcstoretype", "pkcs12", "-alias", "test", - "-deststorepass", "changeit", "-srcstorepass", "changeit", "-noprompt"], + "-deststorepass:env", "STOREPASS", "-srcstorepass:env", "STOREPASS", "-noprompt"], msg='', rc=0, stdout_lines='' @@ -173,7 +173,7 @@ class TestCreateJavaKeystore(ModuleTestCase): cmd=["keytool", "-importkeystore", "-destkeystore", "/path/to/keystore.jks", "-srckeystore", "/tmp/tmpgrzm2ah7", "-srcstoretype", "pkcs12", "-alias", "test", - "-deststorepass", "changeit", "-srcstorepass", "changeit", "-noprompt"], + "-deststorepass:env", "STOREPASS", "-srcstorepass:env", "STOREPASS", "-noprompt"], msg='', rc=1 ) @@ -306,7 +306,7 @@ class TestCertChanged(ModuleTestCase): self.run_commands.side_effect = [(0, 'foo: wxyz:9876:stuv', ''), (1, '', 'Oops')] cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo') module.fail_json.assert_called_with( - cmd=["keytool", "-list", "-alias", "foo", "-keystore", "/path/to/keystore.jks", "-storepass", "changeit", "-v"], + cmd=["keytool", "-list", "-alias", "foo", "-keystore", "/path/to/keystore.jks", "-storepass:env", "STOREPASS", "-v"], msg='', err='Oops', rc=1 From 6bea8215c9dfe2e98826379a16d611d14c42a111 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 6 Apr 2021 07:32:59 +0200 Subject: [PATCH 0160/3093] ansible/ansible's stable-2.11 branch has been created. (#2130) --- .azure-pipelines/azure-pipelines.yml | 82 ++++++++++++++ README.md | 2 +- shippable.yml | 5 + tests/sanity/ignore-2.12.txt | 158 +++++++++++++++++++++++++++ 4 files changed, 246 insertions(+), 1 deletion(-) create mode 100644 tests/sanity/ignore-2.12.txt diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index c6f546a485..2aa559a03f 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -56,6 +56,19 @@ stages: - test: 3 - test: 4 - test: extra + - stage: Sanity_2_11 + displayName: Sanity 2.11 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + nameFormat: Test {0} + testFormat: 2.11/sanity/{0} + targets: + - test: 1 + - test: 2 + - test: 3 + - test: 4 - stage: Sanity_2_10 displayName: Sanity 2.10 dependsOn: [] @@ -99,6 +112,22 @@ stages: - test: 3.7 - test: 3.8 - test: 3.9 + - stage: Units_2_11 + displayName: Units 2.11 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + nameFormat: Python {0} + testFormat: 2.11/units/{0}/1 + targets: + - test: 2.6 + - test: 2.7 + - test: 3.5 + - test: 3.6 + - test: 3.7 + - test: 3.8 + - test: 3.9 - stage: Units_2_10 displayName: Units 2.10 dependsOn: [] @@ -154,6 +183,25 @@ stages: - 1 - 2 - 3 + - stage: Remote_2_11 + displayName: Remote 2.11 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + testFormat: 2.11/{0} + targets: + - name: macOS 11.1 + test: macos/11.1 + - name: RHEL 7.9 + test: rhel/7.9 + - name: RHEL 8.3 + test: rhel/8.3 + - name: FreeBSD 12.2 + test: freebsd/12.2 + groups: + - 1 + - 2 - stage: Remote_2_10 displayName: Remote 2.10 dependsOn: [] @@ -224,6 +272,25 @@ stages: - 1 - 2 - 3 + - stage: Docker_2_11 + displayName: Docker 2.11 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + testFormat: 2.11/linux/{0} + targets: + - name: CentOS 8 + test: centos8 + - name: Fedora 32 + test: fedora33 + - name: openSUSE 15 py3 + test: opensuse15 + - name: Ubuntu 20.04 + test: ubuntu2004 + groups: + - 2 + - 3 - stage: Docker_2_10 displayName: Docker 2.10 dependsOn: [] @@ -270,6 +337,16 @@ stages: parameters: nameFormat: Python {0} testFormat: devel/cloud/{0}/1 + targets: + - test: 3.8 + - stage: Cloud_2_11 + displayName: Cloud 2.11 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + nameFormat: Python {0} + testFormat: 2.11/cloud/{0}/1 targets: - test: 2.7 - test: 3.6 @@ -299,17 +376,22 @@ stages: - Sanity_devel - Sanity_2_9 - Sanity_2_10 + - Sanity_2_11 - Units_devel - Units_2_9 - Units_2_10 + - Units_2_11 - Remote_devel - Remote_2_9 - Remote_2_10 + - Remote_2_11 - Docker_devel - Docker_2_9 - Docker_2_10 + - Docker_2_11 - Cloud_devel - Cloud_2_9 - Cloud_2_10 + - Cloud_2_11 jobs: - template: templates/coverage.yml diff --git a/README.md b/README.md index 0f35d7d753..935f0ecabd 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ You can find [documentation for this collection on the Ansible docs site](https: ## Tested with Ansible -Tested with the current Ansible 2.9 and 2.10 releases and the current development version of Ansible. Ansible versions before 2.9.10 are not supported. +Tested with the current Ansible 2.9, ansible-base 2.10 and ansible-core 2.11 releases and the current development version of ansible-core. Ansible versions before 2.9.10 are not supported. ## External requirements diff --git a/shippable.yml b/shippable.yml index bb907d21b1..7cbbdc24e7 100644 --- a/shippable.yml +++ b/shippable.yml @@ -13,6 +13,11 @@ matrix: - env: T=devel/sanity/3 - env: T=devel/sanity/4 + - env: T=2.11/sanity/1 + - env: T=2.11/sanity/2 + - env: T=2.11/sanity/3 + - env: T=2.11/sanity/4 + - env: T=2.10/sanity/1 - env: T=2.10/sanity/2 - env: T=2.10/sanity/3 diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt new file mode 100644 index 0000000000..547a788be0 --- /dev/null +++ b/tests/sanity/ignore-2.12.txt @@ -0,0 +1,158 @@ +plugins/module_utils/compat/ipaddress.py no-assert +plugins/module_utils/compat/ipaddress.py no-unicode-literals +plugins/module_utils/_mount.py future-import-boilerplate +plugins/module_utils/_mount.py metaclass-boilerplate +plugins/modules/cloud/linode/linode.py validate-modules:parameter-list-no-elements +plugins/modules/cloud/linode/linode.py validate-modules:parameter-type-not-in-doc +plugins/modules/cloud/linode/linode.py validate-modules:undocumented-parameter +plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path +plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen +plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-choice +plugins/modules/cloud/online/online_server_facts.py validate-modules:return-syntax-error +plugins/modules/cloud/online/online_server_info.py validate-modules:return-syntax-error +plugins/modules/cloud/online/online_user_facts.py validate-modules:return-syntax-error +plugins/modules/cloud/online/online_user_info.py validate-modules:return-syntax-error +plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py validate-modules:doc-missing-type +plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py validate-modules:parameter-list-no-elements +plugins/modules/cloud/ovirt/ovirt_api_facts.py validate-modules:parameter-list-no-elements +plugins/modules/cloud/ovirt/ovirt_cluster_facts.py validate-modules:doc-missing-type +plugins/modules/cloud/ovirt/ovirt_cluster_facts.py validate-modules:parameter-list-no-elements +plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py validate-modules:doc-missing-type +plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py validate-modules:parameter-list-no-elements +plugins/modules/cloud/ovirt/ovirt_disk_facts.py validate-modules:doc-missing-type +plugins/modules/cloud/ovirt/ovirt_disk_facts.py validate-modules:parameter-list-no-elements +plugins/modules/cloud/ovirt/ovirt_event_facts.py validate-modules:parameter-list-no-elements +plugins/modules/cloud/ovirt/ovirt_external_provider_facts.py validate-modules:parameter-list-no-elements +plugins/modules/cloud/ovirt/ovirt_external_provider_facts.py validate-modules:undocumented-parameter +plugins/modules/cloud/ovirt/ovirt_group_facts.py validate-modules:doc-missing-type +plugins/modules/cloud/ovirt/ovirt_group_facts.py validate-modules:parameter-list-no-elements +plugins/modules/cloud/ovirt/ovirt_host_facts.py validate-modules:doc-missing-type +plugins/modules/cloud/ovirt/ovirt_host_facts.py validate-modules:parameter-list-no-elements +plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py validate-modules:doc-missing-type +plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py validate-modules:parameter-list-no-elements +plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py validate-modules:parameter-type-not-in-doc +plugins/modules/cloud/ovirt/ovirt_network_facts.py validate-modules:doc-missing-type +plugins/modules/cloud/ovirt/ovirt_network_facts.py validate-modules:parameter-list-no-elements +plugins/modules/cloud/ovirt/ovirt_nic_facts.py validate-modules:doc-missing-type +plugins/modules/cloud/ovirt/ovirt_nic_facts.py validate-modules:parameter-list-no-elements +plugins/modules/cloud/ovirt/ovirt_permission_facts.py validate-modules:doc-missing-type +plugins/modules/cloud/ovirt/ovirt_permission_facts.py validate-modules:parameter-list-no-elements +plugins/modules/cloud/ovirt/ovirt_quota_facts.py validate-modules:doc-missing-type +plugins/modules/cloud/ovirt/ovirt_quota_facts.py validate-modules:parameter-list-no-elements +plugins/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py validate-modules:doc-missing-type +plugins/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py validate-modules:parameter-list-no-elements +plugins/modules/cloud/ovirt/ovirt_snapshot_facts.py validate-modules:doc-missing-type +plugins/modules/cloud/ovirt/ovirt_snapshot_facts.py validate-modules:parameter-list-no-elements +plugins/modules/cloud/ovirt/ovirt_storage_domain_facts.py validate-modules:doc-missing-type +plugins/modules/cloud/ovirt/ovirt_storage_domain_facts.py validate-modules:parameter-list-no-elements +plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py validate-modules:doc-missing-type +plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py validate-modules:parameter-list-no-elements +plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py validate-modules:parameter-type-not-in-doc +plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py validate-modules:doc-missing-type +plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py validate-modules:parameter-list-no-elements +plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py validate-modules:parameter-type-not-in-doc +plugins/modules/cloud/ovirt/ovirt_tag_facts.py validate-modules:doc-missing-type +plugins/modules/cloud/ovirt/ovirt_tag_facts.py validate-modules:parameter-list-no-elements +plugins/modules/cloud/ovirt/ovirt_template_facts.py validate-modules:doc-missing-type +plugins/modules/cloud/ovirt/ovirt_template_facts.py validate-modules:parameter-list-no-elements +plugins/modules/cloud/ovirt/ovirt_user_facts.py validate-modules:doc-missing-type +plugins/modules/cloud/ovirt/ovirt_user_facts.py validate-modules:parameter-list-no-elements +plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:doc-missing-type +plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:parameter-list-no-elements +plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:parameter-type-not-in-doc +plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:doc-missing-type +plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:parameter-list-no-elements +plugins/modules/cloud/rackspace/rax.py use-argspec-type-path # fix needed +plugins/modules/cloud/rackspace/rax_files.py validate-modules:parameter-state-invalid-choice +plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path +plugins/modules/cloud/rackspace/rax_mon_notification_plan.py validate-modules:parameter-list-no-elements +plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path # fix needed, expanduser() applied to dict values +plugins/modules/cloud/scaleway/scaleway_image_facts.py validate-modules:return-syntax-error +plugins/modules/cloud/scaleway/scaleway_image_info.py validate-modules:return-syntax-error +plugins/modules/cloud/scaleway/scaleway_ip_facts.py validate-modules:return-syntax-error +plugins/modules/cloud/scaleway/scaleway_ip_info.py validate-modules:return-syntax-error +plugins/modules/cloud/scaleway/scaleway_organization_facts.py validate-modules:return-syntax-error +plugins/modules/cloud/scaleway/scaleway_organization_info.py validate-modules:return-syntax-error +plugins/modules/cloud/scaleway/scaleway_security_group_facts.py validate-modules:return-syntax-error +plugins/modules/cloud/scaleway/scaleway_security_group_info.py validate-modules:return-syntax-error +plugins/modules/cloud/scaleway/scaleway_server_facts.py validate-modules:return-syntax-error +plugins/modules/cloud/scaleway/scaleway_server_info.py validate-modules:return-syntax-error +plugins/modules/cloud/scaleway/scaleway_snapshot_facts.py validate-modules:return-syntax-error +plugins/modules/cloud/scaleway/scaleway_snapshot_info.py validate-modules:return-syntax-error +plugins/modules/cloud/scaleway/scaleway_volume_facts.py validate-modules:return-syntax-error +plugins/modules/cloud/scaleway/scaleway_volume_info.py validate-modules:return-syntax-error +plugins/modules/cloud/smartos/vmadm.py validate-modules:parameter-type-not-in-doc +plugins/modules/cloud/smartos/vmadm.py validate-modules:undocumented-parameter +plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:parameter-list-no-elements +plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:parameter-type-not-in-doc +plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:undocumented-parameter +plugins/modules/cloud/univention/udm_dns_record.py validate-modules:parameter-type-not-in-doc +plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:parameter-list-no-elements +plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:parameter-type-not-in-doc +plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:undocumented-parameter +plugins/modules/cloud/univention/udm_share.py validate-modules:parameter-list-no-elements +plugins/modules/cloud/univention/udm_user.py validate-modules:parameter-list-no-elements +plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:doc-required-mismatch +plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:missing-suboption-docs +plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:parameter-type-not-in-doc +plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:undocumented-parameter +plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type +plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter +plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-state-invalid-choice +plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:parameter-type-not-in-doc # This triggers when a parameter is undocumented +plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code +plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:doc-missing-type +plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:parameter-type-not-in-doc +plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code +plugins/modules/notification/grove.py validate-modules:invalid-argument-name +plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid +plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid +plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid +plugins/modules/packaging/os/homebrew_cask.py validate-modules:parameter-invalid +plugins/modules/packaging/os/opkg.py validate-modules:parameter-invalid +plugins/modules/packaging/os/pacman.py validate-modules:parameter-invalid +plugins/modules/packaging/os/redhat_subscription.py validate-modules:return-syntax-error +plugins/modules/packaging/os/slackpkg.py validate-modules:parameter-invalid +plugins/modules/packaging/os/urpmi.py validate-modules:parameter-invalid +plugins/modules/packaging/os/xbps.py validate-modules:parameter-invalid +plugins/modules/remote_management/hpilo/hpilo_boot.py validate-modules:parameter-type-not-in-doc +plugins/modules/remote_management/hpilo/hpilo_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/remote_management/hpilo/hponcfg.py validate-modules:parameter-type-not-in-doc +plugins/modules/remote_management/manageiq/manageiq_policies.py validate-modules:parameter-state-invalid-choice +plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:doc-choices-do-not-match-spec # missing docs on suboptions +plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:doc-missing-type # missing docs on suboptions +plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:parameter-type-not-in-doc # missing docs on suboptions +plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:undocumented-parameter # missing docs on suboptions +plugins/modules/remote_management/manageiq/manageiq_tags.py validate-modules:parameter-state-invalid-choice +plugins/modules/remote_management/stacki/stacki_host.py validate-modules:doc-default-does-not-match-spec +plugins/modules/remote_management/stacki/stacki_host.py validate-modules:parameter-type-not-in-doc +plugins/modules/remote_management/stacki/stacki_host.py validate-modules:undocumented-parameter +plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-invalid +plugins/modules/storage/glusterfs/gluster_heal_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/storage/glusterfs/gluster_peer.py validate-modules:parameter-list-no-elements +plugins/modules/storage/glusterfs/gluster_volume.py validate-modules:parameter-list-no-elements +plugins/modules/storage/glusterfs/gluster_volume.py validate-modules:parameter-type-not-in-doc +plugins/modules/storage/netapp/na_ontap_gather_facts.py validate-modules:parameter-state-invalid-choice +plugins/modules/storage/purestorage/purefa_facts.py validate-modules:doc-required-mismatch +plugins/modules/storage/purestorage/purefa_facts.py validate-modules:parameter-list-no-elements +plugins/modules/storage/purestorage/purefa_facts.py validate-modules:return-syntax-error +plugins/modules/storage/purestorage/purefb_facts.py validate-modules:parameter-list-no-elements +plugins/modules/storage/purestorage/purefb_facts.py validate-modules:return-syntax-error +plugins/modules/system/gconftool2.py validate-modules:parameter-state-invalid-choice +plugins/modules/system/iptables_state.py validate-modules:undocumented-parameter +plugins/modules/system/launchd.py use-argspec-type-path # False positive +plugins/modules/system/osx_defaults.py validate-modules:parameter-state-invalid-choice +plugins/modules/system/parted.py validate-modules:parameter-state-invalid-choice +plugins/modules/system/puppet.py use-argspec-type-path +plugins/modules/system/puppet.py validate-modules:doc-default-does-not-match-spec # show_diff is not documented +plugins/modules/system/puppet.py validate-modules:parameter-type-not-in-doc +plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc +plugins/modules/system/ssh_config.py use-argspec-type-path # Required since module uses other methods to specify path +plugins/modules/system/xfconf.py validate-modules:parameter-state-invalid-choice +plugins/modules/system/xfconf.py validate-modules:return-syntax-error +plugins/modules/web_infrastructure/jenkins_plugin.py use-argspec-type-path +tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.6 # django generated code +tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.7 # django generated code +tests/utils/shippable/check_matrix.py replace-urlopen +tests/utils/shippable/timing.py shebang From 9a5191d1f934560f82525af0f93df366c937c54d Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 6 Apr 2021 22:49:50 +1200 Subject: [PATCH 0161/3093] xfconf - state absent was not honoring check_mode (#2185) * state absent was not honoring check_mode * added changelog fragment --- changelogs/fragments/2185-xfconf-absent-check-mode.yml | 2 ++ plugins/modules/system/xfconf.py | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2185-xfconf-absent-check-mode.yml diff --git a/changelogs/fragments/2185-xfconf-absent-check-mode.yml b/changelogs/fragments/2185-xfconf-absent-check-mode.yml new file mode 100644 index 0000000000..059f4acd9a --- /dev/null +++ b/changelogs/fragments/2185-xfconf-absent-check-mode.yml @@ -0,0 +1,2 @@ +bugfixes: + - xfconf - module was not honoring check mode when ``state`` was ``absent`` (https://github.com/ansible-collections/community.general/pull/2185). diff --git a/plugins/modules/system/xfconf.py b/plugins/modules/system/xfconf.py index b6e6110e87..459991747c 100644 --- a/plugins/modules/system/xfconf.py +++ b/plugins/modules/system/xfconf.py @@ -237,8 +237,9 @@ class XFConfProperty(CmdMixin, StateMixin, ModuleHelper): self.update_xfconf_output(value=self.vars.value) def state_absent(self): + if not self.module.check_mode: + self.run_command(params=('channel', 'property', {'reset': True})) self.vars.value = None - self.run_command(params=('channel', 'property', {'reset': True})) self.update_xfconf_output(previous_value=self.vars.previous_value, value=None) From 9aec9b502e69cf9badce76f1d6f57626da7ffe8c Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Wed, 7 Apr 2021 07:21:05 +1200 Subject: [PATCH 0162/3093] Applying ModuleHelper variable mgmt to xfconf -> improvements on MH (#2188) * applying MH variable mgmt to xfconf - improvements on MH * added changelog fragment --- .../2188-xfconf-modhelper-variables.yml | 3 ++ plugins/module_utils/module_helper.py | 37 +++++++++----- plugins/modules/system/xfconf.py | 48 +++++++------------ .../module_utils/test_module_helper.py | 4 +- 4 files changed, 48 insertions(+), 44 deletions(-) create mode 100644 changelogs/fragments/2188-xfconf-modhelper-variables.yml diff --git a/changelogs/fragments/2188-xfconf-modhelper-variables.yml b/changelogs/fragments/2188-xfconf-modhelper-variables.yml new file mode 100644 index 0000000000..19e94254bd --- /dev/null +++ b/changelogs/fragments/2188-xfconf-modhelper-variables.yml @@ -0,0 +1,3 @@ +minor_changes: + - module_helper module utils - added management of facts and adhoc setting of the initial value for variables (https://github.com/ansible-collections/community.general/pull/2188). + - xfconf - changed implementation to use ``ModuleHelper`` new features (https://github.com/ansible-collections/community.general/pull/2188). diff --git a/plugins/module_utils/module_helper.py b/plugins/module_utils/module_helper.py index 44758c8733..6357eae25c 100644 --- a/plugins/module_utils/module_helper.py +++ b/plugins/module_utils/module_helper.py @@ -159,7 +159,9 @@ class DependencyCtxMgr(object): class VarMeta(object): - def __init__(self, diff=False, output=False, change=None): + NOTHING = object() + + def __init__(self, diff=False, output=True, change=None, fact=False): self.init = False self.initial_value = None self.value = None @@ -167,14 +169,19 @@ class VarMeta(object): self.diff = diff self.change = diff if change is None else change self.output = output + self.fact = fact - def set(self, diff=None, output=None, change=None): + def set(self, diff=None, output=None, change=None, fact=None, initial_value=NOTHING): if diff is not None: self.diff = diff if output is not None: self.output = output if change is not None: self.change = change + if fact is not None: + self.fact = fact + if initial_value is not self.NOTHING: + self.initial_value = initial_value def set_value(self, value): if not self.init: @@ -208,6 +215,7 @@ class ModuleHelper(object): output_params = () diff_params = () change_params = () + facts_params = () class VarDict(object): def __init__(self): @@ -245,8 +253,6 @@ class ModuleHelper(object): if name in self._meta: meta = self.meta(name) else: - if 'output' not in kwargs: - kwargs['output'] = True meta = VarMeta(**kwargs) meta.set_value(value) self._meta[name] = meta @@ -261,9 +267,12 @@ class ModuleHelper(object): before = dict((dr[0], dr[1]['before']) for dr in diff_results) after = dict((dr[0], dr[1]['after']) for dr in diff_results) return {'before': before, 'after': after} - return None + def facts(self): + facts_result = dict((k, v) for k, v in self._data.items() if self._meta[k].fact) + return facts_result if facts_result else None + def change_vars(self): return [v for v in self._data if self.meta(v).change] @@ -272,8 +281,6 @@ class ModuleHelper(object): def __init__(self, module=None): self.vars = ModuleHelper.VarDict() - self.output_dict = dict() - self.facts_dict = dict() self._changed = False if module: @@ -288,13 +295,20 @@ class ModuleHelper(object): diff=name in self.diff_params, output=name in self.output_params, change=None if not self.change_params else name in self.change_params, + fact=name in self.facts_params, ) + def update_vars(self, meta=None, **kwargs): + if meta is None: + meta = {} + for k, v in kwargs.items(): + self.vars.set(k, v, **meta) + def update_output(self, **kwargs): - self.output_dict.update(kwargs) + self.update_vars(meta={"output": True}, **kwargs) def update_facts(self, **kwargs): - self.facts_dict.update(kwargs) + self.update_vars(meta={"fact": True}, **kwargs) def __init_module__(self): pass @@ -322,9 +336,10 @@ class ModuleHelper(object): @property def output(self): result = dict(self.vars.output()) - result.update(self.output_dict) if self.facts_name: - result['ansible_facts'] = {self.facts_name: self.facts_dict} + facts = self.vars.facts() + if facts is not None: + result['ansible_facts'] = {self.facts_name: facts} if self.module._diff: diff = result.get('diff', {}) vars_diff = self.vars.diff() or {} diff --git a/plugins/modules/system/xfconf.py b/plugins/modules/system/xfconf.py index 459991747c..f2975df050 100644 --- a/plugins/modules/system/xfconf.py +++ b/plugins/modules/system/xfconf.py @@ -143,10 +143,7 @@ def values_fmt(values, value_types): for value, value_type in zip(values, value_types): if value_type == 'bool': value = fix_bool(value) - result.append('--type') - result.append('{0}'.format(value_type)) - result.append('--set') - result.append('{0}'.format(value)) + result.extend(['--type', '{0}'.format(value_type), '--set', '{0}'.format(value)]) return result @@ -155,6 +152,10 @@ class XFConfException(Exception): class XFConfProperty(CmdMixin, StateMixin, ModuleHelper): + change_params = 'value', + diff_params = 'value', + output_params = ('property', 'channel', 'value') + facts_params = ('property', 'channel', 'value') module = dict( argument_spec=dict( state=dict(default="present", @@ -185,17 +186,15 @@ class XFConfProperty(CmdMixin, StateMixin, ModuleHelper): ) def update_xfconf_output(self, **kwargs): - self.update_output(**kwargs) - if not self.module.params['disable_facts']: - self.update_facts(**kwargs) + self.update_vars(meta={"output": True, "fact": True}, **kwargs) def __init_module__(self): self.does_not = 'Property "{0}" does not exist on channel "{1}".'.format(self.module.params['property'], self.module.params['channel']) - self.vars.previous_value = self._get() - self.update_xfconf_output(property=self.module.params['property'], - channel=self.module.params['channel'], - previous_value=None) + self.vars.set('previous_value', self._get(), fact=True) + self.vars.set('type', self.vars.value_type, fact=True) + self.vars.meta('value').set(initial_value=self.vars.previous_value) + if not self.module.params['disable_facts']: self.facts_name = "xfconf" self.module.deprecate( @@ -220,34 +219,23 @@ class XFConfProperty(CmdMixin, StateMixin, ModuleHelper): return result - @property - def changed(self): - if self.vars.previous_value is None: - return self.vars.value is not None - elif self.vars.value is None: - return self.vars.previous_value is not None - else: - return set(self.vars.previous_value) != set(self.vars.value) - def _get(self): return self.run_command(params=('channel', 'property')) def state_get(self): self.vars.value = self.vars.previous_value - self.update_xfconf_output(value=self.vars.value) + self.vars.previous_value = None def state_absent(self): if not self.module.check_mode: self.run_command(params=('channel', 'property', {'reset': True})) self.vars.value = None - self.update_xfconf_output(previous_value=self.vars.previous_value, - value=None) def state_present(self): # stringify all values - in the CLI they will all be happy strings anyway # and by doing this here the rest of the code can be agnostic to it - self.vars.value = [str(v) for v in self.module.params['value']] - value_type = self.module.params['value_type'] + self.vars.value = [str(v) for v in self.vars.value] + value_type = self.vars.value_type values_len = len(self.vars.value) types_len = len(value_type) @@ -264,7 +252,7 @@ class XFConfProperty(CmdMixin, StateMixin, ModuleHelper): # calculates if it is an array self.vars.is_array = \ - bool(self.module.params['force_array']) or \ + bool(self.vars.force_array) or \ isinstance(self.vars.previous_value, list) or \ values_len > 1 @@ -278,11 +266,9 @@ class XFConfProperty(CmdMixin, StateMixin, ModuleHelper): if not self.vars.is_array: self.vars.value = self.vars.value[0] - value_type = value_type[0] - - self.update_xfconf_output(previous_value=self.vars.previous_value, - value=self.vars.value, - type=value_type) + self.vars.type = value_type[0] + else: + self.vars.type = value_type def main(): diff --git a/tests/unit/plugins/module_utils/test_module_helper.py b/tests/unit/plugins/module_utils/test_module_helper.py index 1402fa07d6..b8ea36501c 100644 --- a/tests/unit/plugins/module_utils/test_module_helper.py +++ b/tests/unit/plugins/module_utils/test_module_helper.py @@ -109,7 +109,7 @@ def test_dependency_ctxmgr(): def test_variable_meta(): meta = VarMeta() - assert meta.output is False + assert meta.output is True assert meta.diff is False assert meta.value is None meta.set_value("abc") @@ -124,7 +124,7 @@ def test_variable_meta(): def test_variable_meta_diff(): meta = VarMeta(diff=True) - assert meta.output is False + assert meta.output is True assert meta.diff is True assert meta.value is None meta.set_value("abc") From beb3b85a4fdb5c94f55771857adc7da4a99a916c Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Wed, 7 Apr 2021 18:14:03 +1200 Subject: [PATCH 0163/3093] jira - changing the logic for transition (#1978) * attempt at fixing the issue * Update plugins/modules/web_infrastructure/jira.py * Fixed setting of "fields" element in the payload * added changelog fragment * added accountId parameter + minor fixes in docs * added integration test for jira * adjustments per PR * Update plugins/modules/web_infrastructure/jira.py Co-authored-by: Felix Fontein * Update plugins/modules/web_infrastructure/jira.py Co-authored-by: Felix Fontein * Update plugins/modules/web_infrastructure/jira.py Co-authored-by: Felix Fontein * Update plugins/modules/web_infrastructure/jira.py Co-authored-by: Felix Fontein * Update plugins/modules/web_infrastructure/jira.py Co-authored-by: Felix Fontein * adjustments per PR Co-authored-by: Felix Fontein --- .../fragments/1978-jira-transition-logic.yml | 4 ++ plugins/modules/web_infrastructure/jira.py | 52 +++++++++++++---- tests/integration/targets/jira/aliases | 2 + tests/integration/targets/jira/tasks/main.yml | 58 +++++++++++++++++++ tests/integration/targets/jira/vars/main.yml | 7 +++ 5 files changed, 112 insertions(+), 11 deletions(-) create mode 100644 changelogs/fragments/1978-jira-transition-logic.yml create mode 100644 tests/integration/targets/jira/aliases create mode 100644 tests/integration/targets/jira/tasks/main.yml create mode 100644 tests/integration/targets/jira/vars/main.yml diff --git a/changelogs/fragments/1978-jira-transition-logic.yml b/changelogs/fragments/1978-jira-transition-logic.yml new file mode 100644 index 0000000000..12b4adc56d --- /dev/null +++ b/changelogs/fragments/1978-jira-transition-logic.yml @@ -0,0 +1,4 @@ +bugfixes: + - jira - fixed fields' update in ticket transitions (https://github.com/ansible-collections/community.general/issues/818). +minor_changes: + - jira - added parameter ``account_id`` for compatibility with recent versions of JIRA (https://github.com/ansible-collections/community.general/issues/818, https://github.com/ansible-collections/community.general/pull/1978). diff --git a/plugins/modules/web_infrastructure/jira.py b/plugins/modules/web_infrastructure/jira.py index d10be9eafc..51810f6b97 100644 --- a/plugins/modules/web_infrastructure/jira.py +++ b/plugins/modules/web_infrastructure/jira.py @@ -56,12 +56,14 @@ options: required: false description: - The issue summary, where appropriate. + - Note that JIRA may not allow changing field values on specific transitions or states. description: type: str required: false description: - The issue description, where appropriate. + - Note that JIRA may not allow changing field values on specific transitions or states. issuetype: type: str @@ -81,18 +83,28 @@ options: required: false description: - The comment text to add. + - Note that JIRA may not allow changing field values on specific transitions or states. status: type: str required: false description: - - The desired status; only relevant for the transition operation. + - Only used when I(operation) is C(transition), and a bit of a misnomer, it actually refers to the transition name. assignee: type: str required: false description: - - Sets the assignee on create or transition operations. Note not all transitions will allow this. + - Sets the the assignee when I(operation) is C(create), C(transition) or C(edit). + - Recent versions of JIRA no longer accept a user name as a user identifier. In that case, use I(account_id) instead. + - Note that JIRA may not allow changing field values on specific transitions or states. + + account_id: + type: str + description: + - Sets the account identifier for the assignee when I(operation) is C(create), C(transition) or C(edit). + - Note that JIRA may not allow changing field values on specific transitions or states. + version_added: 2.5.0 linktype: type: str @@ -119,6 +131,7 @@ options: - This is a free-form data structure that can contain arbitrary data. This is passed directly to the JIRA REST API (possibly after merging with other required data, as when passed to create). See examples for more information, and the JIRA REST API for the structure required for various fields. + - Note that JIRA may not allow changing field values on specific transitions or states. jql: required: false @@ -151,6 +164,7 @@ options: notes: - "Currently this only works with basic-auth." + - "To use with JIRA Cloud, pass the login e-mail as the I(username) and the API token as I(password)." author: - "Steve Smith (@tarka)" @@ -172,7 +186,7 @@ EXAMPLES = r""" args: fields: customfield_13225: "test" - customfield_12931: '{"value": "Test"}' + customfield_12931: {"value": "Test"} register: issue - name: Comment on issue @@ -282,20 +296,20 @@ EXAMPLES = r""" inwardissue: HSP-1 outwardissue: MKY-1 -# Transition an issue by target status -- name: Close the issue +# Transition an issue +- name: Resolve the issue community.general.jira: uri: '{{ server }}' username: '{{ user }}' password: '{{ pass }}' issue: '{{ issue.meta.key }}' operation: transition - status: Done - args: + status: Resolve Issue + account_id: 112233445566778899aabbcc fields: - customfield_14321: [ {'set': {'value': 'Value of Select' }} ] - comment: [ { 'add': { 'body' : 'Test' } }] - + resolution: + name: Done + description: I am done! This is the last description I will ever give you. """ import base64 @@ -440,10 +454,22 @@ def transition(restbase, user, passwd, params): if not tid: raise ValueError("Failed find valid transition for '%s'" % target) + fields = dict(params['fields']) + if params['summary'] is not None: + fields.update({'summary': params['summary']}) + if params['description'] is not None: + fields.update({'description': params['description']}) + # Perform it url = restbase + '/issue/' + params['issue'] + "/transitions" data = {'transition': {"id": tid}, - 'update': params['fields']} + 'fields': fields} + if params['comment'] is not None: + data.update({"update": { + "comment": [{ + "add": {"body": params['comment']} + }], + }}) return True, post(url, user, passwd, params['timeout'], data) @@ -486,6 +512,7 @@ def main(): maxresults=dict(type='int'), timeout=dict(type='float', default=10), validate_certs=dict(default=True, type='bool'), + account_id=dict(type='str'), ), required_if=( ('operation', 'create', ['project', 'issuetype', 'summary']), @@ -495,6 +522,7 @@ def main(): ('operation', 'link', ['linktype', 'inwardissue', 'outwardissue']), ('operation', 'search', ['jql']), ), + mutually_exclusive=[('assignee', 'account_id')], supports_check_mode=False ) @@ -506,6 +534,8 @@ def main(): passwd = module.params['password'] if module.params['assignee']: module.params['fields']['assignee'] = {'name': module.params['assignee']} + if module.params['account_id']: + module.params['fields']['assignee'] = {'accountId': module.params['account_id']} if not uri.endswith('/'): uri = uri + '/' diff --git a/tests/integration/targets/jira/aliases b/tests/integration/targets/jira/aliases new file mode 100644 index 0000000000..c368f6e800 --- /dev/null +++ b/tests/integration/targets/jira/aliases @@ -0,0 +1,2 @@ +unsupported +shippable/posix/group3 diff --git a/tests/integration/targets/jira/tasks/main.yml b/tests/integration/targets/jira/tasks/main.yml new file mode 100644 index 0000000000..824de09a89 --- /dev/null +++ b/tests/integration/targets/jira/tasks/main.yml @@ -0,0 +1,58 @@ +--- +- community.general.jira: + uri: "{{ uri }}" + username: "{{ user }}" + password: "{{ pasw }}" + project: "{{ proj }}" + operation: create + summary: test ticket + description: bla bla bla + issuetype: Task + register: issue + +- debug: + msg: Issue={{ issue }} +- name: Add comment bleep bleep + community.general.jira: + uri: "{{ uri }}" + username: "{{ user }}" + password: "{{ pasw }}" + issue: "{{ issue.meta.key }}" + operation: comment + comment: bleep bleep! +- name: Transition -> In Progress with comment + community.general.jira: + uri: "{{ uri }}" + username: "{{ user }}" + password: "{{ pasw }}" + issue: "{{ issue.meta.key }}" + operation: transition + status: Start Progress + comment: -> in progress +- name: Change assignee + community.general.jira: + uri: "{{ uri }}" + username: "{{ user }}" + password: "{{ pasw }}" + issue: "{{ issue.meta.key }}" + operation: edit + accountId: "{{ user2 }}" +- name: Transition -> Resolved with comment + community.general.jira: + uri: "{{ uri }}" + username: "{{ user }}" + password: "{{ pasw }}" + issue: "{{ issue.meta.key }}" + operation: transition + status: Resolve Issue + comment: -> resolved + accountId: "{{ user1 }}" + fields: + resolution: + name: Done + description: wakawakawakawaka + +- debug: + msg: + - Issue = {{ issue.meta.key }} + - URL = {{ issue.meta.self }} diff --git a/tests/integration/targets/jira/vars/main.yml b/tests/integration/targets/jira/vars/main.yml new file mode 100644 index 0000000000..f170cefcc9 --- /dev/null +++ b/tests/integration/targets/jira/vars/main.yml @@ -0,0 +1,7 @@ +--- +uri: https://xxxx.atlassian.net/ +user: xxx@xxxx.xxx +pasw: supersecret +proj: ABC +user1: 6574474636373822y7338 +user2: 6574474636373822y73959696 From 7145204594a30bfd9dcb087aa72a019baaa22224 Mon Sep 17 00:00:00 2001 From: Norman Ziegner Date: Wed, 7 Apr 2021 19:29:10 +0200 Subject: [PATCH 0164/3093] Fix HAProxy draining (#1993) * Fix HAProxy draining by manually entering the 'MAINT' state Inspired by rldleblanc: https://github.com/ansible/ansible/issues/37591#issuecomment-610130611 Signed-off-by: Norman Ziegner * Add changelog fragment Signed-off-by: Norman Ziegner * Fix drain function docstring Signed-off-by: Norman Ziegner * Fix typos Signed-off-by: Norman Ziegner * Update changelog fragment Signed-off-by: Norman Ziegner --- .../fragments/1993-haproxy-fix-draining.yml | 3 +++ plugins/modules/net_tools/haproxy.py | 15 ++++++++------- 2 files changed, 11 insertions(+), 7 deletions(-) create mode 100644 changelogs/fragments/1993-haproxy-fix-draining.yml diff --git a/changelogs/fragments/1993-haproxy-fix-draining.yml b/changelogs/fragments/1993-haproxy-fix-draining.yml new file mode 100644 index 0000000000..fd5c77f573 --- /dev/null +++ b/changelogs/fragments/1993-haproxy-fix-draining.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - haproxy - fix a bug preventing haproxy from properly entering ``DRAIN`` mode (https://github.com/ansible-collections/community.general/issues/1913). diff --git a/plugins/modules/net_tools/haproxy.py b/plugins/modules/net_tools/haproxy.py index 848cc1faeb..8efb59ed2e 100644 --- a/plugins/modules/net_tools/haproxy.py +++ b/plugins/modules/net_tools/haproxy.py @@ -367,10 +367,9 @@ class HAProxy(object): # We can assume there will only be 1 element in state because both svname and pxname are always set when we get here # When using track we get a status like this: MAINT (via pxname/svname) so we need to do substring matching if status in state[0]['status']: - if not self._drain or (state[0]['scur'] == '0' and 'MAINT' in state): + if not self._drain or state[0]['scur'] == '0': return True - else: - time.sleep(self.wait_interval) + time.sleep(self.wait_interval) self.module.fail_json(msg="server %s/%s not status '%s' after %d retries. Aborting." % (pxname, svname, status, self.wait_retries)) @@ -409,15 +408,17 @@ class HAProxy(object): def drain(self, host, backend, status='DRAIN'): """ Drain action, sets the server to DRAIN mode. - In this mode mode, the server will not accept any new connections + In this mode, the server will not accept any new connections other than those that are accepted via persistence. """ haproxy_version = self.discover_version() - # check if haproxy version suppots DRAIN state (starting with 1.5) + # check if haproxy version supports DRAIN state (starting with 1.5) if haproxy_version and (1, 5) <= haproxy_version: cmd = "set server $pxname/$svname state drain" - self.execute_for_backends(cmd, backend, host, status) + self.execute_for_backends(cmd, backend, host, "DRAIN") + if status == "MAINT": + self.disabled(host, backend, self.shutdown_sessions) def act(self): """ @@ -426,7 +427,7 @@ class HAProxy(object): # Get the state before the run self.command_results['state_before'] = self.get_state_for(self.backend, self.host) - # toggle enable/disbale server + # toggle enable/disable server if self.state == 'enabled': self.enabled(self.host, self.backend, self.weight) elif self.state == 'disabled' and self._drain: From 40ce0f995b8c968781c096cc49eda3e34662acb3 Mon Sep 17 00:00:00 2001 From: absynth76 <58172580+absynth76@users.noreply.github.com> Date: Wed, 7 Apr 2021 19:31:58 +0200 Subject: [PATCH 0165/3093] Update java_cert module (#2008) * porting https://github.com/ansible/ansible/pull/56778 as requested in https://github.com/ansible-collections/community.general/issues/821 * fix imports, add back trust_cacerts option * try to fix import, ansible-lint fixes * modify import to use ansible.module_utils.six instead * cleanup indentation for tests/integration/targets/java_cert/tasks/main.yml file * remove external crypto dependency - switch to openssl, work on password obfuscation, using files compare to reduce logic * java_cert - remove latest run_command using password in arguments * fix sanity check * rename changelog fragment file - wrong extension * add openssl dependency * fix openssl_bin parameter missing on _get_digest_from_x509_file function call * remove useless close files, fix paragraph, fix changelog, clean import re * fix missing dots at end-of-line in changelogs fragments * fix reminder case * fix changelog * restore .gitignore * fix indentation on integration test files, delete useless json file * fix typo importing tasks in tests/integration/targets/java_cert/tasks/main.yml * Update changelogs/fragments/2008-update-java-cert-replace-cert-when-changed.yml Co-authored-by: Felix Fontein * Update tests/integration/targets/java_cert/tasks/state_change.yml Co-authored-by: Felix Fontein * Update plugins/modules/system/java_cert.py Co-authored-by: Felix Fontein * Update plugins/modules/system/java_cert.py Co-authored-by: Felix Fontein * Update plugins/modules/system/java_cert.py Co-authored-by: Felix Fontein * Update plugins/modules/system/java_cert.py Co-authored-by: Felix Fontein * Update plugins/modules/system/java_cert.py Co-authored-by: Felix Fontein * Update plugins/modules/system/java_cert.py Co-authored-by: Felix Fontein * Update plugins/modules/system/java_cert.py Co-authored-by: Felix Fontein * fix hardcoded executable keytool, use re.sub instead of import, add required cert_url or cert_alias parameter when absent, fix python script and cert_url test * fix pylint issue with setupSSLServeR.py Co-authored-by: Felix Fontein --- ...te-java-cert-replace-cert-when-changed.yml | 7 + plugins/modules/system/java_cert.py | 305 +++++++++++++----- .../targets/java_cert/defaults/main.yml | 12 +- .../targets/java_cert/files/setupSSLServer.py | 20 ++ .../targets/java_cert/meta/main.yml | 1 + .../targets/java_cert/tasks/main.yml | 93 ++++-- .../targets/java_cert/tasks/state_change.yml | 169 ++++++++++ 7 files changed, 496 insertions(+), 111 deletions(-) create mode 100644 changelogs/fragments/2008-update-java-cert-replace-cert-when-changed.yml create mode 100644 tests/integration/targets/java_cert/files/setupSSLServer.py create mode 100644 tests/integration/targets/java_cert/tasks/state_change.yml diff --git a/changelogs/fragments/2008-update-java-cert-replace-cert-when-changed.yml b/changelogs/fragments/2008-update-java-cert-replace-cert-when-changed.yml new file mode 100644 index 0000000000..8cfda91016 --- /dev/null +++ b/changelogs/fragments/2008-update-java-cert-replace-cert-when-changed.yml @@ -0,0 +1,7 @@ +minor_changes: + - "java_cert - change ``state: present`` to check certificates by hash, not just alias name (https://github.com/ansible/ansible/issues/43249)." +bugfixes: + - "java_cert - allow setting ``state: absent`` by providing just the ``cert_alias`` (https://github.com/ansible/ansible/issues/27982)." + - "java_cert - properly handle proxy arguments when the scheme is provided (https://github.com/ansible/ansible/issues/54481)." +security_fixes: + - "java_cert - remove password from ``run_command`` arguments (https://github.com/ansible-collections/community.general/pull/2008)." diff --git a/plugins/modules/system/java_cert.py b/plugins/modules/system/java_cert.py index 6594ed235b..ad56358034 100644 --- a/plugins/modules/system/java_cert.py +++ b/plugins/modules/system/java_cert.py @@ -10,6 +10,7 @@ __metaclass__ = type DOCUMENTATION = r''' --- module: java_cert + short_description: Uses keytool to import/remove key from java keystore (cacerts) description: - This is a wrapper module around keytool, which can be used to import/remove @@ -81,9 +82,12 @@ options: state: description: - Defines action which can be either certificate import or removal. + - When state is present, the certificate will always idempotently be inserted + into the keystore, even if there already exists a cert alias that is different. type: str choices: [ absent, present ] default: present +requirements: [openssl, keytool] author: - Adam Hamsik (@haad) ''' @@ -166,41 +170,143 @@ cmd: ''' import os +import tempfile +import random +import string import re + # import module snippets from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.parse import urlparse +from ansible.module_utils.six.moves.urllib.request import getproxies -def get_keystore_type(keystore_type): +def _get_keystore_type_keytool_parameters(keystore_type): ''' Check that custom keystore is presented in parameters ''' if keystore_type: - return " -storetype '%s'" % keystore_type - return '' + return ["-storetype", keystore_type] + return [] -def check_cert_present(module, executable, keystore_path, keystore_pass, alias, keystore_type): +def _check_cert_present(module, executable, keystore_path, keystore_pass, alias, keystore_type): ''' Check if certificate with alias is present in keystore located at keystore_path ''' - test_cmd = ("%s -noprompt -list -keystore '%s' -storepass '%s' " - "-alias '%s' %s") % (executable, keystore_path, keystore_pass, alias, get_keystore_type(keystore_type)) + test_cmd = [ + executable, + "-list", + "-keystore", + keystore_path, + "-alias", + alias, + "-rfc" + ] + test_cmd += _get_keystore_type_keytool_parameters(keystore_type) - check_rc, dummy, dummy = module.run_command(test_cmd) + (check_rc, stdout, dummy) = module.run_command(test_cmd, data=keystore_pass, check_rc=False) if check_rc == 0: - return True - return False + return (True, stdout) + return (False, '') -def import_cert_url(module, executable, url, port, keystore_path, keystore_pass, alias, keystore_type, trust_cacert): - ''' Import certificate from URL into keystore located at keystore_path ''' +def _get_certificate_from_url(module, executable, url, port, pem_certificate_output): + remote_cert_pem_chain = _download_cert_url(module, executable, url, port) + with open(pem_certificate_output, 'w') as f: + f.write(remote_cert_pem_chain) - https_proxy = os.getenv("https_proxy") + +def _get_first_certificate_from_x509_file(module, pem_certificate_file, pem_certificate_output, openssl_bin): + """ Read a X509 certificate chain file and output the first certificate in the list """ + extract_cmd = [ + openssl_bin, + "x509", + "-in", + pem_certificate_file, + "-out", + pem_certificate_output + ] + (extract_rc, dummy, extract_stderr) = module.run_command(extract_cmd, check_rc=False) + + if extract_rc != 0: + # trying der encoded file + extract_cmd += ["-inform", "der"] + (extract_rc, dummy, extract_stderr) = module.run_command(extract_cmd, check_rc=False) + + if extract_rc != 0: + # this time it's a real failure + module.fail_json(msg="Internal module failure, cannot extract certificate, error: %s" % extract_stderr, + rc=extract_rc, cmd=extract_cmd) + + return extract_rc + + +def _get_digest_from_x509_file(module, pem_certificate_file, openssl_bin): + """ Read a X509 certificate file and output sha256 digest using openssl """ + # cleanup file before to compare + (dummy, tmp_certificate) = tempfile.mkstemp() + module.add_cleanup_file(tmp_certificate) + _get_first_certificate_from_x509_file(module, pem_certificate_file, tmp_certificate, openssl_bin) + dgst_cmd = [ + openssl_bin, + "dgst", + "-r", + "-sha256", + tmp_certificate + ] + (dgst_rc, dgst_stdout, dgst_stderr) = module.run_command(dgst_cmd, check_rc=False) + + if dgst_rc != 0: + module.fail_json(msg="Internal module failure, cannot compute digest for certificate, error: %s" % dgst_stderr, + rc=dgst_rc, cmd=dgst_cmd) + + return dgst_stdout.split(' ')[0] + + +def _export_public_cert_from_pkcs12(module, executable, pkcs_file, alias, password, dest): + """ Runs keytools to extract the public cert from a PKCS12 archive and write it to a file. """ + export_cmd = [ + executable, + "-list", + "-keystore", + pkcs_file, + "-alias", + alias, + "-storetype", + "pkcs12", + "-rfc" + ] + (export_rc, export_stdout, export_err) = module.run_command(export_cmd, data=password, check_rc=False) + + if export_rc != 0: + module.fail_json(msg="Internal module failure, cannot extract public certificate from pkcs12, error: %s" % export_err, + rc=export_rc) + + with open(dest, 'w') as f: + f.write(export_stdout) + + +def get_proxy_settings(scheme='https'): + """ Returns a tuple containing (proxy_host, proxy_port). (False, False) if no proxy is found """ + proxy_url = getproxies().get(scheme, '') + if not proxy_url: + return (False, False) + else: + parsed_url = urlparse(proxy_url) + if parsed_url.scheme: + (proxy_host, proxy_port) = parsed_url.netloc.split(':') + else: + (proxy_host, proxy_port) = parsed_url.path.split(':') + return (proxy_host, proxy_port) + + +def build_proxy_options(): + """ Returns list of valid proxy options for keytool """ + (proxy_host, proxy_port) = get_proxy_settings() no_proxy = os.getenv("no_proxy") - proxy_opts = '' - if https_proxy is not None: - (proxy_host, proxy_port) = https_proxy.split(':') - proxy_opts = "-J-Dhttps.proxyHost=%s -J-Dhttps.proxyPort=%s" % (proxy_host, proxy_port) + proxy_opts = [] + if proxy_host: + proxy_opts.extend(["-J-Dhttps.proxyHost=%s" % proxy_host, "-J-Dhttps.proxyPort=%s" % proxy_port]) if no_proxy is not None: # For Java's nonProxyHosts property, items are separated by '|', @@ -210,46 +316,48 @@ def import_cert_url(module, executable, url, port, keystore_path, keystore_pass, # The property name is http.nonProxyHosts, there is no # separate setting for HTTPS. - proxy_opts += " -J-Dhttp.nonProxyHosts='%s'" % non_proxy_hosts + proxy_opts.extend(["-J-Dhttp.nonProxyHosts=%s" % non_proxy_hosts]) + return proxy_opts - fetch_cmd = "%s -printcert -rfc -sslserver %s %s:%d" % (executable, proxy_opts, url, port) - import_cmd = ("%s -importcert -noprompt -keystore '%s' " - "-storepass '%s' -alias '%s' %s") % (executable, keystore_path, - keystore_pass, alias, - get_keystore_type(keystore_type)) - if trust_cacert: - import_cmd = import_cmd + " -trustcacerts" + +def _download_cert_url(module, executable, url, port): + """ Fetches the certificate from the remote URL using `keytool -printcert...` + The PEM formatted string is returned """ + proxy_opts = build_proxy_options() + fetch_cmd = [executable, "-printcert", "-rfc", "-sslserver"] + proxy_opts + ["%s:%d" % (url, port)] # Fetch SSL certificate from remote host. - dummy, fetch_out, dummy = module.run_command(fetch_cmd, check_rc=True) + (fetch_rc, fetch_out, fetch_err) = module.run_command(fetch_cmd, check_rc=False) - # Use remote certificate from remote host and import it to a java keystore - (import_rc, import_out, import_err) = module.run_command(import_cmd, - data=fetch_out, - check_rc=False) - diff = {'before': '\n', 'after': '%s\n' % alias} - if import_rc == 0: - module.exit_json(changed=True, msg=import_out, - rc=import_rc, cmd=import_cmd, stdout=import_out, - diff=diff) - else: - module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd, - error=import_err) + if fetch_rc != 0: + module.fail_json(msg="Internal module failure, cannot download certificate, error: %s" % fetch_err, + rc=fetch_rc, cmd=fetch_cmd) + + return fetch_out def import_cert_path(module, executable, path, keystore_path, keystore_pass, alias, keystore_type, trust_cacert): ''' Import certificate from path into keystore located on keystore_path as alias ''' - import_cmd = ("%s -importcert -noprompt -keystore '%s' " - "-storepass '%s' -file '%s' -alias '%s' %s") % (executable, keystore_path, - keystore_pass, path, alias, - get_keystore_type(keystore_type)) + import_cmd = [ + executable, + "-importcert", + "-noprompt", + "-keystore", + keystore_path, + "-file", + path, + "-alias", + alias + ] + import_cmd += _get_keystore_type_keytool_parameters(keystore_type) if trust_cacert: - import_cmd = import_cmd + " -trustcacerts" + import_cmd.extend(["-trustcacerts"]) # Use local certificate from local path and import it to a java keystore (import_rc, import_out, import_err) = module.run_command(import_cmd, + data="%s\n%s" % (keystore_pass, keystore_pass), check_rc=False) diff = {'before': '\n', 'after': '%s\n' % alias} @@ -261,41 +369,29 @@ def import_cert_path(module, executable, path, keystore_path, keystore_pass, ali module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd) -def import_pkcs12_path(module, executable, path, keystore_path, keystore_pass, pkcs12_pass, pkcs12_alias, alias, keystore_type): - ''' Import pkcs12 from path into keystore located on - keystore_path as alias ''' - import_cmd = ("%s -importkeystore -noprompt -destkeystore '%s' -srcstoretype PKCS12 " - "-deststorepass '%s' -destkeypass '%s' -srckeystore '%s' -srcstorepass '%s' " - "-srcalias '%s' -destalias '%s' %s") % (executable, keystore_path, keystore_pass, - keystore_pass, path, pkcs12_pass, pkcs12_alias, - alias, get_keystore_type(keystore_type)) - - # Use local certificate from local path and import it to a java keystore - (import_rc, import_out, import_err) = module.run_command(import_cmd, - check_rc=False) - - diff = {'before': '\n', 'after': '%s\n' % alias} - if import_rc == 0: - module.exit_json(changed=True, msg=import_out, - rc=import_rc, cmd=import_cmd, stdout=import_out, - error=import_err, diff=diff) - else: - module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd) - - -def delete_cert(module, executable, keystore_path, keystore_pass, alias, keystore_type): +def delete_cert(module, executable, keystore_path, keystore_pass, alias, keystore_type, exit_after=True): ''' Delete certificate identified with alias from keystore on keystore_path ''' - del_cmd = ("%s -delete -keystore '%s' -storepass '%s' " - "-alias '%s' %s") % (executable, keystore_path, keystore_pass, alias, get_keystore_type(keystore_type)) + del_cmd = [ + executable, + "-delete", + "-noprompt", + "-keystore", + keystore_path, + "-alias", + alias + ] + + del_cmd += _get_keystore_type_keytool_parameters(keystore_type) # Delete SSL certificate from keystore - (del_rc, del_out, del_err) = module.run_command(del_cmd, check_rc=True) + (del_rc, del_out, del_err) = module.run_command(del_cmd, data=keystore_pass, check_rc=True) - diff = {'before': '%s\n' % alias, 'after': None} + if exit_after: + diff = {'before': '%s\n' % alias, 'after': None} - module.exit_json(changed=True, msg=del_out, - rc=del_rc, cmd=del_cmd, stdout=del_out, - error=del_err, diff=diff) + module.exit_json(changed=True, msg=del_out, + rc=del_rc, cmd=del_cmd, stdout=del_out, + error=del_err, diff=diff) def test_keytool(module, executable): @@ -333,7 +429,8 @@ def main(): module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[['cert_path', 'cert_url', 'pkcs12_path']], + required_if=[['state', 'present', ('cert_path', 'cert_url', 'pkcs12_path'), True], + ['state', 'absent', ('cert_url', 'cert_alias'), True]], required_together=[['keystore_path', 'keystore_pass']], mutually_exclusive=[ ['cert_url', 'cert_path', 'pkcs12_path'] @@ -359,6 +456,9 @@ def main(): executable = module.params.get('executable') state = module.params.get('state') + # openssl dependency resolution + openssl_bin = module.get_bin_path('openssl', True) + if path and not cert_alias: module.fail_json(changed=False, msg="Using local path import from %s requires alias argument." @@ -369,31 +469,62 @@ def main(): if not keystore_create: test_keystore(module, keystore_path) - cert_present = check_cert_present(module, executable, keystore_path, - keystore_pass, cert_alias, keystore_type) + alias_exists, alias_exists_output = _check_cert_present( + module, executable, keystore_path, keystore_pass, cert_alias, keystore_type) - if state == 'absent' and cert_present: + (dummy, new_certificate) = tempfile.mkstemp() + (dummy, old_certificate) = tempfile.mkstemp() + module.add_cleanup_file(new_certificate) + module.add_cleanup_file(old_certificate) + + if state == 'absent' and alias_exists: if module.check_mode: module.exit_json(changed=True) + # delete and exit delete_cert(module, executable, keystore_path, keystore_pass, cert_alias, keystore_type) - elif state == 'present' and not cert_present: - if module.check_mode: - module.exit_json(changed=True) + # dump certificate to enroll in the keystore on disk and compute digest + if state == 'present': + # The alias exists in the keystore so we must now compare the SHA256 hash of the + # public certificate already in the keystore, and the certificate we are wanting to add + if alias_exists: + with open(old_certificate, "w") as f: + f.write(alias_exists_output) + keystore_cert_digest = _get_digest_from_x509_file(module, old_certificate, openssl_bin) + + else: + keystore_cert_digest = '' if pkcs12_path: - import_pkcs12_path(module, executable, pkcs12_path, keystore_path, - keystore_pass, pkcs12_pass, pkcs12_alias, cert_alias, keystore_type) + # Extracting certificate with openssl + _export_public_cert_from_pkcs12(module, executable, pkcs12_path, cert_alias, pkcs12_pass, new_certificate) - if path: - import_cert_path(module, executable, path, keystore_path, + elif path: + # Extracting the X509 digest is a bit easier. Keytool will print the PEM + # certificate to stdout so we don't need to do any transformations. + new_certificate = path + + elif url: + # Getting the X509 digest from a URL is the same as from a path, we just have + # to download the cert first + _get_certificate_from_url(module, executable, url, port, new_certificate) + + new_cert_digest = _get_digest_from_x509_file(module, new_certificate, openssl_bin) + + if keystore_cert_digest != new_cert_digest: + + if module.check_mode: + module.exit_json(changed=True) + + if alias_exists: + # The certificate in the keystore does not match with the one we want to be present + # The existing certificate must first be deleted before we insert the correct one + delete_cert(module, executable, keystore_path, keystore_pass, cert_alias, keystore_type, exit_after=False) + + import_cert_path(module, executable, new_certificate, keystore_path, keystore_pass, cert_alias, keystore_type, trust_cacert) - if url: - import_cert_url(module, executable, url, port, keystore_path, - keystore_pass, cert_alias, keystore_type, trust_cacert) - module.exit_json(changed=False) diff --git a/tests/integration/targets/java_cert/defaults/main.yml b/tests/integration/targets/java_cert/defaults/main.yml index 22723ff177..6416f306af 100644 --- a/tests/integration/targets/java_cert/defaults/main.yml +++ b/tests/integration/targets/java_cert/defaults/main.yml @@ -1,3 +1,13 @@ --- test_pkcs12_path: testpkcs.p12 -test_keystore_path: keystore.jks \ No newline at end of file +test_keystore_path: keystore.jks +test_keystore2_path: "{{ output_dir }}/keystore2.jks" +test_keystore2_password: changeit +test_cert_path: "{{ output_dir }}/cert.pem" +test_key_path: "{{ output_dir }}/key.pem" +test_cert2_path: "{{ output_dir }}/cert2.pem" +test_key2_path: "{{ output_dir }}/key2.pem" +test_pkcs_path: "{{ output_dir }}/cert.p12" +test_pkcs2_path: "{{ output_dir }}/cert2.p12" +test_ssl: setupSSLServer.py +test_ssl_port: 21500 \ No newline at end of file diff --git a/tests/integration/targets/java_cert/files/setupSSLServer.py b/tests/integration/targets/java_cert/files/setupSSLServer.py new file mode 100644 index 0000000000..9227eefd81 --- /dev/null +++ b/tests/integration/targets/java_cert/files/setupSSLServer.py @@ -0,0 +1,20 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import ssl +import os +import sys + +root_dir = sys.argv[1] +port = int(sys.argv[2]) + +try: + from BaseHTTPServer import HTTPServer + from SimpleHTTPServer import SimpleHTTPRequestHandler +except ModuleNotFoundError: + from http.server import HTTPServer, SimpleHTTPRequestHandler + +httpd = HTTPServer(('localhost', port), SimpleHTTPRequestHandler) +httpd.socket = ssl.wrap_socket(httpd.socket, server_side=True, + certfile=os.path.join(root_dir, 'cert.pem'), + keyfile=os.path.join(root_dir, 'key.pem')) +httpd.handle_request() diff --git a/tests/integration/targets/java_cert/meta/main.yml b/tests/integration/targets/java_cert/meta/main.yml index 1d18287ada..9bc23ac67f 100644 --- a/tests/integration/targets/java_cert/meta/main.yml +++ b/tests/integration/targets/java_cert/meta/main.yml @@ -1,2 +1,3 @@ dependencies: - setup_java_keytool + - setup_openssl diff --git a/tests/integration/targets/java_cert/tasks/main.yml b/tests/integration/targets/java_cert/tasks/main.yml index e701836e5d..8172db5c15 100644 --- a/tests/integration/targets/java_cert/tasks/main.yml +++ b/tests/integration/targets/java_cert/tasks/main.yml @@ -11,15 +11,16 @@ - name: import pkcs12 java_cert: - pkcs12_path: "{{output_dir}}/{{ test_pkcs12_path }}" - pkcs12_password: changeit - pkcs12_alias: default - cert_alias: default - keystore_path: "{{output_dir}}/{{ test_keystore_path }}" - keystore_pass: changeme_keystore - keystore_create: yes - state: present + pkcs12_path: "{{output_dir}}/{{ test_pkcs12_path }}" + pkcs12_password: changeit + pkcs12_alias: default + cert_alias: default + keystore_path: "{{output_dir}}/{{ test_keystore_path }}" + keystore_pass: changeme_keystore + keystore_create: yes + state: present register: result_success + - name: verify success assert: that: @@ -27,14 +28,14 @@ - name: import pkcs12 with wrong password java_cert: - pkcs12_path: "{{output_dir}}/{{ test_pkcs12_path }}" - pkcs12_password: wrong_pass - pkcs12_alias: default - cert_alias: default_new - keystore_path: "{{output_dir}}/{{ test_keystore_path }}" - keystore_pass: changeme_keystore - keystore_create: yes - state: present + pkcs12_path: "{{output_dir}}/{{ test_pkcs12_path }}" + pkcs12_password: wrong_pass + pkcs12_alias: default + cert_alias: default_new + keystore_path: "{{output_dir}}/{{ test_keystore_path }}" + keystore_pass: changeme_keystore + keystore_create: yes + state: present ignore_errors: true register: result_wrong_pass @@ -45,16 +46,62 @@ - name: test fail on mutually exclusive params java_cert: - cert_path: ca.crt - pkcs12_path: "{{output_dir}}/{{ test_pkcs12_path }}" - cert_alias: default - keystore_path: "{{output_dir}}/{{ test_keystore_path }}" - keystore_pass: changeme_keystore - keystore_create: yes - state: present + cert_path: ca.crt + pkcs12_path: "{{output_dir}}/{{ test_pkcs12_path }}" + cert_alias: default + keystore_path: "{{output_dir}}/{{ test_keystore_path }}" + keystore_pass: changeme_keystore + keystore_create: yes + state: present ignore_errors: true register: result_excl_params + - name: verify failed exclusive params assert: that: - result_excl_params is failed + + - name: test fail on missing required params + java_cert: + keystore_path: "{{output_dir}}/{{ test_keystore_path }}" + keystore_pass: changeme_keystore + state: absent + ignore_errors: true + register: result_missing_required_param + + - name: verify failed missing required params + assert: + that: + - result_missing_required_param is failed + + - name: delete object based on cert_alias parameter + java_cert: + keystore_path: "{{output_dir}}/{{ test_keystore_path }}" + keystore_pass: changeme_keystore + cert_alias: default + state: absent + ignore_errors: true + register: result_alias_deleted + + - name: verify object successfully deleted + assert: + that: + - result_alias_deleted is successful + + - name: include extended test suite + import_tasks: state_change.yml + + - name: cleanup environment + file: + path: "{{ item }}" + state: absent + loop: + - "{{ output_dir }}/{{ test_pkcs12_path }}" + - "{{ output_dir }}/{{ test_keystore_path }}" + - "{{ test_keystore2_path }}" + - "{{ test_cert_path }}" + - "{{ test_key_path }}" + - "{{ test_cert2_path }}" + - "{{ test_key2_path }}" + - "{{ test_pkcs_path }}" + - "{{ test_pkcs2_path }}" \ No newline at end of file diff --git a/tests/integration/targets/java_cert/tasks/state_change.yml b/tests/integration/targets/java_cert/tasks/state_change.yml new file mode 100644 index 0000000000..3c37fc6727 --- /dev/null +++ b/tests/integration/targets/java_cert/tasks/state_change.yml @@ -0,0 +1,169 @@ +--- +- name: Generate the self signed cert used as a place holder to create the java keystore + command: openssl req -x509 -newkey rsa:4096 -keyout {{ test_key_path }} -out {{ test_cert_path }} -days 365 -nodes -subj '/CN=localhost' + args: + creates: "{{ test_key_path }}" + +- name: Create the test keystore + java_keystore: + name: placeholder + dest: "{{ test_keystore2_path }}" + password: "{{ test_keystore2_password }}" + private_key: "{{ lookup('file', '{{ test_key_path }}') }}" + certificate: "{{ lookup('file', '{{ test_cert_path }}') }}" + +- name: Generate the self signed cert we will use for testing + command: openssl req -x509 -newkey rsa:4096 -keyout '{{ test_key2_path }}' -out '{{ test_cert2_path }}' -days 365 -nodes -subj '/CN=localhost' + args: + creates: "{{ test_key2_path }}" + +- name: | + Import the newly created certificate. This is our main test. + If the java_cert has been updated properly, then this task will report changed each time + since the module will be comparing the hash of the certificate instead of validating that the alias + simply exists + java_cert: + cert_alias: test_cert + cert_path: "{{ test_cert2_path }}" + keystore_path: "{{ test_keystore2_path }}" + keystore_pass: "{{ test_keystore2_password }}" + state: present + register: result_x509_changed + +- name: Verify the x509 status has changed + assert: + that: + - result_x509_changed is changed + +- name: | + We also want to make sure that the status doesnt change if we import the same cert + java_cert: + cert_alias: test_cert + cert_path: "{{ test_cert2_path }}" + keystore_path: "{{ test_keystore2_path }}" + keystore_pass: "{{ test_keystore2_password }}" + state: present + register: result_x509_succeeded + +- name: Verify the x509 status is ok + assert: + that: + - result_x509_succeeded is succeeded + +- name: Create the pkcs12 archive from the test x509 cert + command: > + openssl pkcs12 + -in {{ test_cert_path }} + -inkey {{ test_key_path }} + -export + -name test_pkcs12_cert + -out {{ test_pkcs_path }} + -passout pass:"{{ test_keystore2_password }}" + +- name: Create the pkcs12 archive from the certificate we will be trying to add to the keystore + command: > + openssl pkcs12 + -in {{ test_cert2_path }} + -inkey {{ test_key2_path }} + -export + -name test_pkcs12_cert + -out {{ test_pkcs2_path }} + -passout pass:"{{ test_keystore2_password }}" + +- name: > + Ensure the original pkcs12 cert is in the keystore + java_cert: + cert_alias: test_pkcs12_cert + pkcs12_alias: test_pkcs12_cert + pkcs12_path: "{{ test_pkcs_path }}" + pkcs12_password: "{{ test_keystore2_password }}" + keystore_path: "{{ test_keystore2_path }}" + keystore_pass: "{{ test_keystore2_password }}" + state: present + +- name: | + Perform the same test, but we will now be testing the pkcs12 functionality + If we add a different pkcs12 cert with the same alias, we should have a chnaged result, NOT the same + java_cert: + cert_alias: test_pkcs12_cert + pkcs12_alias: test_pkcs12_cert + pkcs12_path: "{{ test_pkcs2_path }}" + pkcs12_password: "{{ test_keystore2_password }}" + keystore_path: "{{ test_keystore2_path }}" + keystore_pass: "{{ test_keystore2_password }}" + state: present + register: result_pkcs12_changed + +- name: Verify the pkcs12 status has changed + assert: + that: + - result_pkcs12_changed is changed + +- name: | + We are requesting the same cert now, so the status should show OK + java_cert: + cert_alias: test_pkcs12_cert + pkcs12_alias: test_pkcs12_cert + pkcs12_path: "{{ test_pkcs2_path }}" + pkcs12_password: "{{ test_keystore2_password }}" + keystore_path: "{{ test_keystore2_path }}" + keystore_pass: "{{ test_keystore2_password }}" + register: result_pkcs12_succeeded + +- name: Verify the pkcs12 status is ok + assert: + that: + - result_pkcs12_succeeded is succeeded + +- name: Copy the ssl server script + copy: + src: "setupSSLServer.py" + dest: "{{ output_dir }}" + +- name: Create an SSL server that we will use for testing URL imports + command: python {{ output_dir }}/setupSSLServer.py {{ output_dir }} {{ test_ssl_port }} + async: 10 + poll: 0 + +- name: | + Download the original cert.pem from our temporary server. The current cert should contain + cert2.pem. Importing this cert should return a status of changed + java_cert: + cert_alias: test_cert_localhost + cert_url: localhost + cert_port: "{{ test_ssl_port }}" + keystore_path: "{{ test_keystore2_path }}" + keystore_pass: "{{ test_keystore2_password }}" + state: present + register: result_url_changed + +- name: Verify that the url status is changed + assert: + that: + - result_url_changed is changed + +- name: Ensure we can remove the x509 cert + java_cert: + cert_alias: test_cert + keystore_path: "{{ test_keystore2_path }}" + keystore_pass: "{{ test_keystore2_password }}" + state: absent + register: result_x509_absent + +- name: Verify the x509 cert is absent + assert: + that: + - result_x509_absent is changed + +- name: Ensure we can remove the pkcs12 archive + java_cert: + cert_alias: test_pkcs12_cert + keystore_path: "{{ test_keystore2_path }}" + keystore_pass: "{{ test_keystore2_password }}" + state: absent + register: result_pkcs12_absent + +- name: Verify the pkcs12 archive is absent + assert: + that: + - result_pkcs12_absent is changed From 7f91821bcc13b63413e4c2aeb77d8f21f86c13f1 Mon Sep 17 00:00:00 2001 From: justchris1 <30219018+justchris1@users.noreply.github.com> Date: Wed, 7 Apr 2021 13:32:45 -0400 Subject: [PATCH 0166/3093] ipa_user sshpubkey can now support multi word comments in the key (#2159) * ipa_user sshpubkey can now support multi word comments in the key * Add documentation fragment for pull request * Update changelogs/fragments/2159-ipa-user-sshpubkey-multi-word-comments.yaml Co-authored-by: Felix Fontein * Cleaner implementation of multi word comments Co-authored-by: Chris Costa Co-authored-by: Felix Fontein --- .../2159-ipa-user-sshpubkey-multi-word-comments.yaml | 2 ++ plugins/modules/identity/ipa/ipa_user.py | 10 ++++++---- 2 files changed, 8 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/2159-ipa-user-sshpubkey-multi-word-comments.yaml diff --git a/changelogs/fragments/2159-ipa-user-sshpubkey-multi-word-comments.yaml b/changelogs/fragments/2159-ipa-user-sshpubkey-multi-word-comments.yaml new file mode 100644 index 0000000000..10547bb71b --- /dev/null +++ b/changelogs/fragments/2159-ipa-user-sshpubkey-multi-word-comments.yaml @@ -0,0 +1,2 @@ +bugfixes: + - ipa_user - allow ``sshpubkey`` to permit multiple word comments (https://github.com/ansible-collections/community.general/pull/2159). diff --git a/plugins/modules/identity/ipa/ipa_user.py b/plugins/modules/identity/ipa/ipa_user.py index fa7b3abbda..1a0c885cfb 100644 --- a/plugins/modules/identity/ipa/ipa_user.py +++ b/plugins/modules/identity/ipa/ipa_user.py @@ -269,16 +269,18 @@ def get_user_diff(client, ipa_user, module_user): def get_ssh_key_fingerprint(ssh_key, hash_algo='sha256'): """ Return the public key fingerprint of a given public SSH key - in format "[fp] [user@host] (ssh-rsa)" where fp is of the format: + in format "[fp] [comment] (ssh-rsa)" where fp is of the format: FB:0C:AC:0A:07:94:5B:CE:75:6E:63:32:13:AD:AD:D7 for md5 or SHA256:[base64] for sha256 + Comments are assumed to be all characters past the second + whitespace character in the sshpubkey string. :param ssh_key: :param hash_algo: :return: """ - parts = ssh_key.strip().split() + parts = ssh_key.strip().split(None, 2) if len(parts) == 0: return None key_type = parts[0] @@ -293,8 +295,8 @@ def get_ssh_key_fingerprint(ssh_key, hash_algo='sha256'): if len(parts) < 3: return "%s (%s)" % (key_fp, key_type) else: - user_host = parts[2] - return "%s %s (%s)" % (key_fp, user_host, key_type) + comment = parts[2] + return "%s %s (%s)" % (key_fp, comment, key_type) def ensure(module, client): From 595d590862f5fc88f37d5353332ab37ec4cbc225 Mon Sep 17 00:00:00 2001 From: justchris1 <30219018+justchris1@users.noreply.github.com> Date: Wed, 7 Apr 2021 14:57:04 -0400 Subject: [PATCH 0167/3093] Fix issue where multiselect field in userauthtype did not allow multiple values (#2174) * Fix issue where multiselect field in userauthtype did not allow multiple values * Add changelogfragment for change * Update changelogs/fragments/2174-ipa-user-userauthtype-multiselect.yml Co-authored-by: Felix Fontein * Update plugins/modules/identity/ipa/ipa_user.py Co-authored-by: Felix Fontein * Update changelogs/fragments/2174-ipa-user-userauthtype-multiselect.yml Co-authored-by: Felix Fontein Co-authored-by: Chris Costa Co-authored-by: Felix Fontein --- .../2174-ipa-user-userauthtype-multiselect.yml | 2 ++ plugins/modules/identity/ipa/ipa_user.py | 11 +++++++---- 2 files changed, 9 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/2174-ipa-user-userauthtype-multiselect.yml diff --git a/changelogs/fragments/2174-ipa-user-userauthtype-multiselect.yml b/changelogs/fragments/2174-ipa-user-userauthtype-multiselect.yml new file mode 100644 index 0000000000..d162f19b7a --- /dev/null +++ b/changelogs/fragments/2174-ipa-user-userauthtype-multiselect.yml @@ -0,0 +1,2 @@ +minor_changes: + - ipa_user - fix ``userauthtype`` option to take in list of strings for the multi-select field instead of single string (https://github.com/ansible-collections/community.general/pull/2174). diff --git a/plugins/modules/identity/ipa/ipa_user.py b/plugins/modules/identity/ipa/ipa_user.py index 1a0c885cfb..847749f15e 100644 --- a/plugins/modules/identity/ipa/ipa_user.py +++ b/plugins/modules/identity/ipa/ipa_user.py @@ -94,7 +94,8 @@ options: description: - The authentication type to use for the user. choices: ["password", "radius", "otp", "pkinit", "hardened"] - type: str + type: list + elements: str version_added: '1.2.0' extends_documentation_fragment: - community.general.ipa.documentation @@ -146,11 +147,13 @@ EXAMPLES = r''' ipa_pass: topsecret update_password: on_create -- name: Ensure pinky is present and using one time password authentication +- name: Ensure pinky is present and using one time password and RADIUS authentication community.general.ipa_user: name: pinky state: present - userauthtype: otp + userauthtype: + - otp + - radius ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret @@ -363,7 +366,7 @@ def main(): telephonenumber=dict(type='list', elements='str'), title=dict(type='str'), homedirectory=dict(type='str'), - userauthtype=dict(type='str', + userauthtype=dict(type='list', elements='str', choices=['password', 'radius', 'otp', 'pkinit', 'hardened'])) module = AnsibleModule(argument_spec=argument_spec, From 4b6722d93844a2ca0347a8b8a97b29ce0b8ec9b6 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 8 Apr 2021 07:49:57 +0200 Subject: [PATCH 0168/3093] Add path_join compatibility shim (#2172) * Add path_join compatibility shim. * Add myself as maintainer. --- .github/BOTMETA.yml | 2 ++ .../fragments/path_join-shim-filter.yml | 3 ++ meta/runtime.yml | 7 +++++ plugins/filter/path_join_shim.py | 28 +++++++++++++++++++ .../targets/filter_path_join_shim/aliases | 2 ++ .../filter_path_join_shim/tasks/main.yml | 7 +++++ 6 files changed, 49 insertions(+) create mode 100644 changelogs/fragments/path_join-shim-filter.yml create mode 100644 plugins/filter/path_join_shim.py create mode 100644 tests/integration/targets/filter_path_join_shim/aliases create mode 100644 tests/integration/targets/filter_path_join_shim/tasks/main.yml diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 12fa40725a..24e2061469 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -59,6 +59,8 @@ files: maintainers: kellyjonbrazil $filters/list.py: maintainers: vbotka + $filters/path_join_shim.py: + maintainers: felixfontein $filters/time.py: maintainers: resmo $httpapis/: diff --git a/changelogs/fragments/path_join-shim-filter.yml b/changelogs/fragments/path_join-shim-filter.yml new file mode 100644 index 0000000000..f96922203f --- /dev/null +++ b/changelogs/fragments/path_join-shim-filter.yml @@ -0,0 +1,3 @@ +add plugin.filter: + - name: path_join + description: Redirects to ansible.builtin.path_join for ansible-base 2.10 or newer, and provides a compatible implementation for Ansible 2.9 diff --git a/meta/runtime.yml b/meta/runtime.yml index b13cbc549c..00eed0fa84 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -611,3 +611,10 @@ plugin_routing: redirect: community.docker.docker_swarm kubevirt: redirect: community.kubevirt.kubevirt + filter: + path_join: + # The ansible.builtin.path_join filter has been added in ansible-base 2.10. + # Since plugin routing is only available since ansible-base 2.10, this + # redirect will be used for ansible-base 2.10 or later, and the included + # path_join filter will be used for Ansible 2.9 or earlier. + redirect: ansible.builtin.path_join diff --git a/plugins/filter/path_join_shim.py b/plugins/filter/path_join_shim.py new file mode 100644 index 0000000000..9734298a15 --- /dev/null +++ b/plugins/filter/path_join_shim.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2020-2021, Felix Fontein +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +import os.path + + +def path_join(list): + '''Join list of paths. + + This is a minimal shim for ansible.builtin.path_join included in ansible-base 2.10. + This should only be called by Ansible 2.9 or earlier. See meta/runtime.yml for details. + ''' + return os.path.join(*list) + + +class FilterModule(object): + '''Ansible jinja2 filters''' + + def filters(self): + return { + 'path_join': path_join, + } diff --git a/tests/integration/targets/filter_path_join_shim/aliases b/tests/integration/targets/filter_path_join_shim/aliases new file mode 100644 index 0000000000..b167317cfe --- /dev/null +++ b/tests/integration/targets/filter_path_join_shim/aliases @@ -0,0 +1,2 @@ +shippable/posix/group1 +skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller diff --git a/tests/integration/targets/filter_path_join_shim/tasks/main.yml b/tests/integration/targets/filter_path_join_shim/tasks/main.yml new file mode 100644 index 0000000000..5f9eff3d4b --- /dev/null +++ b/tests/integration/targets/filter_path_join_shim/tasks/main.yml @@ -0,0 +1,7 @@ +--- +- name: "Test path_join filter" + assert: + that: + - "['a', 'b'] | community.general.path_join == 'a/b'" + - "['a', '/b'] | community.general.path_join == '/b'" + - "[''] | community.general.path_join == ''" From b6ae47c4553e31f6f7ac7a869fceaf9ecb7b9ffc Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 8 Apr 2021 07:50:28 +0200 Subject: [PATCH 0169/3093] Add dict filter (#2171) * Add dict and list_to_dict filters. * Remove list_to_dict filter. * Add myself as maintainer. --- .github/BOTMETA.yml | 2 ++ changelogs/fragments/dict-filter.yml | 3 +++ plugins/filter/dict.py | 24 +++++++++++++++++++ tests/integration/targets/filter_dict/aliases | 2 ++ .../targets/filter_dict/tasks/main.yml | 7 ++++++ 5 files changed, 38 insertions(+) create mode 100644 changelogs/fragments/dict-filter.yml create mode 100644 plugins/filter/dict.py create mode 100644 tests/integration/targets/filter_dict/aliases create mode 100644 tests/integration/targets/filter_dict/tasks/main.yml diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 24e2061469..fbe43bbd3e 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -53,6 +53,8 @@ files: $doc_fragments/xenserver.py: maintainers: bvitnik labels: xenserver + $filters/dict.py: + maintainers: felixfontein $filters/dict_kv.py: maintainers: giner $filters/jc.py: diff --git a/changelogs/fragments/dict-filter.yml b/changelogs/fragments/dict-filter.yml new file mode 100644 index 0000000000..1e9923e796 --- /dev/null +++ b/changelogs/fragments/dict-filter.yml @@ -0,0 +1,3 @@ +add plugin.filter: + - name: dict + description: "The ``dict`` function as a filter: converts a list of tuples to a dictionary" diff --git a/plugins/filter/dict.py b/plugins/filter/dict.py new file mode 100644 index 0000000000..3d20e752b1 --- /dev/null +++ b/plugins/filter/dict.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Felix Fontein +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +def dict_filter(sequence): + '''Convert a list of tuples to a dictionary. + + Example: ``[[1, 2], ['a', 'b']] | community.general.dict`` results in ``{1: 2, 'a': 'b'}`` + ''' + return dict(sequence) + + +class FilterModule(object): + '''Ansible jinja2 filters''' + + def filters(self): + return { + 'dict': dict_filter, + } diff --git a/tests/integration/targets/filter_dict/aliases b/tests/integration/targets/filter_dict/aliases new file mode 100644 index 0000000000..3e81d77f98 --- /dev/null +++ b/tests/integration/targets/filter_dict/aliases @@ -0,0 +1,2 @@ +shippable/posix/group3 +skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller diff --git a/tests/integration/targets/filter_dict/tasks/main.yml b/tests/integration/targets/filter_dict/tasks/main.yml new file mode 100644 index 0000000000..ab88d3ff3f --- /dev/null +++ b/tests/integration/targets/filter_dict/tasks/main.yml @@ -0,0 +1,7 @@ +--- +- name: "Test dict filter" + assert: + that: + - "[['a', 'b']] | community.general.dict == dict([['a', 'b']])" + - "[['a', 'b'], [1, 2]] | community.general.dict == dict([['a', 'b'], [1, 2]])" + - "[] | community.general.dict == dict([])" From 0cd0f0eaf670d8da8afc6137ee19a53439e06e48 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 8 Apr 2021 17:58:39 +1200 Subject: [PATCH 0170/3093] module_helper - fixed decorator cause_changes (#2203) * fixed decorator cause_changes * added changelog fragment * typo --- .../2203-modhelper-cause-changes-deco.yml | 2 + plugins/module_utils/module_helper.py | 33 ++++++++----- .../module_utils/test_module_helper.py | 46 ++++++++++++++++++- 3 files changed, 68 insertions(+), 13 deletions(-) create mode 100644 changelogs/fragments/2203-modhelper-cause-changes-deco.yml diff --git a/changelogs/fragments/2203-modhelper-cause-changes-deco.yml b/changelogs/fragments/2203-modhelper-cause-changes-deco.yml new file mode 100644 index 0000000000..b61f97d6b8 --- /dev/null +++ b/changelogs/fragments/2203-modhelper-cause-changes-deco.yml @@ -0,0 +1,2 @@ +bugfixes: + - module_helper module utils - fixed decorator ``cause_changes`` (https://github.com/ansible-collections/community.general/pull/2203). diff --git a/plugins/module_utils/module_helper.py b/plugins/module_utils/module_helper.py index 6357eae25c..d241eba5af 100644 --- a/plugins/module_utils/module_helper.py +++ b/plugins/module_utils/module_helper.py @@ -101,18 +101,27 @@ class ArgFormat(object): return [str(p) for p in func(value)] -def cause_changes(func, on_success=True, on_failure=False): - @wraps(func) - def wrapper(self, *args, **kwargs): - try: - func(*args, **kwargs) - if on_success: - self.changed = True - except Exception: - if on_failure: - self.changed = True - raise - return wrapper +def cause_changes(on_success=None, on_failure=None): + + def deco(func): + if on_success is None and on_failure is None: + return func + + @wraps(func) + def wrapper(*args, **kwargs): + try: + self = args[0] + func(*args, **kwargs) + if on_success is not None: + self.changed = on_success + except Exception: + if on_failure is not None: + self.changed = on_failure + raise + + return wrapper + + return deco def module_fails_on_exception(func): diff --git a/tests/unit/plugins/module_utils/test_module_helper.py b/tests/unit/plugins/module_utils/test_module_helper.py index b8ea36501c..6f77ca7662 100644 --- a/tests/unit/plugins/module_utils/test_module_helper.py +++ b/tests/unit/plugins/module_utils/test_module_helper.py @@ -6,10 +6,12 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from collections import namedtuple + import pytest from ansible_collections.community.general.plugins.module_utils.module_helper import ( - ArgFormat, DependencyCtxMgr, ModuleHelper, VarMeta + ArgFormat, DependencyCtxMgr, ModuleHelper, VarMeta, cause_changes ) @@ -160,3 +162,45 @@ def test_vardict(): assert vd.c == 'new_c' assert vd.output() == {'a': 'new_a', 'c': 'new_c'} assert vd.diff() == {'before': {'a': 123}, 'after': {'a': 'new_a'}}, "diff={0}".format(vd.diff()) + + +class MockMH(object): + changed = None + + def _div(self, x, y): + return x / y + + func_none = cause_changes()(_div) + func_onsucc = cause_changes(on_success=True)(_div) + func_onfail = cause_changes(on_failure=True)(_div) + func_onboth = cause_changes(on_success=True, on_failure=True)(_div) + + +CAUSE_CHG_DECO_PARAMS = ['method', 'expect_exception', 'expect_changed'] +CAUSE_CHG_DECO = dict( + none_succ=dict(method='func_none', expect_exception=False, expect_changed=None), + none_fail=dict(method='func_none', expect_exception=True, expect_changed=None), + onsucc_succ=dict(method='func_onsucc', expect_exception=False, expect_changed=True), + onsucc_fail=dict(method='func_onsucc', expect_exception=True, expect_changed=None), + onfail_succ=dict(method='func_onfail', expect_exception=False, expect_changed=None), + onfail_fail=dict(method='func_onfail', expect_exception=True, expect_changed=True), + onboth_succ=dict(method='func_onboth', expect_exception=False, expect_changed=True), + onboth_fail=dict(method='func_onboth', expect_exception=True, expect_changed=True), +) +CAUSE_CHG_DECO_IDS = sorted(CAUSE_CHG_DECO.keys()) + + +@pytest.mark.parametrize(CAUSE_CHG_DECO_PARAMS, + [[CAUSE_CHG_DECO[tc][param] + for param in CAUSE_CHG_DECO_PARAMS] + for tc in CAUSE_CHG_DECO_IDS], + ids=CAUSE_CHG_DECO_IDS) +def test_cause_changes_deco(method, expect_exception, expect_changed): + mh = MockMH() + if expect_exception: + with pytest.raises(Exception): + getattr(mh, method)(1, 0) + else: + getattr(mh, method)(9, 3) + + assert mh.changed == expect_changed From 4b71e088c77795a54e2604a8f3816f1296113f8a Mon Sep 17 00:00:00 2001 From: Ilija Matoski Date: Thu, 8 Apr 2021 22:32:18 +0200 Subject: [PATCH 0171/3093] inventory/proxmox: added constructable and added keyed_groups, groups and compose (#2180) * added constructable and added keyed_groups, groups and compose * Update changelogs/fragments/2162-proxmox-constructable.yml Co-authored-by: Felix Fontein * added constructed to extends_documentation_fragment and version_added to all the items * renamed _apply_rules to _apply_constructable for more clarity * Update changelogs/fragments/2162-proxmox-constructable.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../fragments/2162-proxmox-constructable.yml | 3 ++ plugins/inventory/proxmox.py | 36 +++++++++++++++++-- 2 files changed, 37 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/2162-proxmox-constructable.yml diff --git a/changelogs/fragments/2162-proxmox-constructable.yml b/changelogs/fragments/2162-proxmox-constructable.yml new file mode 100644 index 0000000000..dfcb1e3495 --- /dev/null +++ b/changelogs/fragments/2162-proxmox-constructable.yml @@ -0,0 +1,3 @@ +--- +minor_changes: +- proxmox inventory plugin - added ``Constructable`` class to the inventory to provide options ``strict``, ``keyed_groups``, ``groups``, and ``compose`` (https://github.com/ansible-collections/community.general/pull/2180). diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py index d69775baf6..3e44dd1ddd 100644 --- a/plugins/inventory/proxmox.py +++ b/plugins/inventory/proxmox.py @@ -19,6 +19,7 @@ DOCUMENTATION = ''' - Will retrieve the first network interface with an IP for Proxmox nodes. - Can retrieve LXC/QEMU configuration as facts. extends_documentation_fragment: + - constructed - inventory_cache options: plugin: @@ -69,6 +70,14 @@ DOCUMENTATION = ''' description: Gather LXC/QEMU configuration facts. default: no type: bool + strict: + version_added: 2.5.0 + compose: + version_added: 2.5.0 + groups: + version_added: 2.5.0 + keyed_groups: + version_added: 2.5.0 ''' EXAMPLES = ''' @@ -78,6 +87,15 @@ url: http://localhost:8006 user: ansible@pve password: secure validate_certs: no +keyed_groups: + - key: proxmox_tags_parsed + separator: "" + prefix: group +groups: + webservers: "'web' in (proxmox_tags_parsed|list)" + mailservers: "'mail' in (proxmox_tags_parsed|list)" +compose: + ansible_port: 2222 ''' import re @@ -86,7 +104,7 @@ from ansible.module_utils.common._collections_compat import MutableMapping from distutils.version import LooseVersion from ansible.errors import AnsibleError -from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable +from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable from ansible.module_utils.six.moves.urllib.parse import urlencode # 3rd party imports @@ -99,7 +117,7 @@ except ImportError: HAS_REQUESTS = False -class InventoryModule(BaseInventoryPlugin, Cacheable): +class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): ''' Host inventory parser for ansible using Proxmox as source. ''' NAME = 'community.general.proxmox' @@ -209,6 +227,10 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): def _get_vm_config(self, node, vmid, vmtype, name): ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/config" % (self.proxmox_url, node, vmtype, vmid)) + node_key = 'node' + node_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), node_key.lower())) + self.inventory.set_variable(name, node_key, node) + vmid_key = 'vmid' vmid_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), vmid_key.lower())) self.inventory.set_variable(name, vmid_key, vmid) @@ -264,6 +286,12 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): regex = r"[^A-Za-z0-9\_]" return re.sub(regex, "_", word.replace(" ", "")) + def _apply_constructable(self, name, variables): + strict = self.get_option('strict') + self._add_host_to_composed_groups(self.get_option('groups'), variables, name, strict=strict) + self._add_host_to_keyed_groups(self.get_option('keyed_groups'), variables, name, strict=strict) + self._set_composite_vars(self.get_option('compose'), variables, name, strict=strict) + def _populate(self): self._get_auth() @@ -318,6 +346,8 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): if self.get_option('want_facts'): self._get_vm_config(node['node'], lxc['vmid'], 'lxc', lxc['name']) + self._apply_constructable(lxc["name"], self.inventory.get_host(lxc['name']).get_vars()) + # get QEMU vm's for this node node_qemu_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), ('%s_qemu' % node['node']).lower())) self.inventory.add_group(node_qemu_group) @@ -340,6 +370,8 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): if self.get_option('want_facts'): self._get_vm_config(node['node'], qemu['vmid'], 'qemu', qemu['name']) + self._apply_constructable(qemu["name"], self.inventory.get_host(qemu['name']).get_vars()) + # gather vm's in pools for pool in self._get_pools(): if pool.get('poolid'): From f0b7c6351ecc60ac4cd0441cd2fe13ccf2ac06cc Mon Sep 17 00:00:00 2001 From: Florian Dambrine Date: Thu, 8 Apr 2021 13:37:06 -0700 Subject: [PATCH 0172/3093] New module: Add Pritunl VPN organization module (net_tools/pritunl/) (#804) --- plugins/module_utils/net_tools/pritunl/api.py | 70 +++ plugins/modules/net_tools/pritunl/__init__.py | 0 .../modules/net_tools/pritunl/pritunl_org.py | 199 +++++++ .../net_tools/pritunl/pritunl_org_info.py | 129 ++++ plugins/modules/pritunl_org.py | 1 + plugins/modules/pritunl_org_info.py | 1 + .../net_tools/pritunl/test_api.py | 556 ++++++++++-------- .../net_tools/pritunl/test_pritunl_org.py | 204 +++++++ .../pritunl/test_pritunl_org_info.py | 137 +++++ 9 files changed, 1064 insertions(+), 233 deletions(-) create mode 100644 plugins/modules/net_tools/pritunl/__init__.py create mode 100644 plugins/modules/net_tools/pritunl/pritunl_org.py create mode 100644 plugins/modules/net_tools/pritunl/pritunl_org_info.py create mode 120000 plugins/modules/pritunl_org.py create mode 120000 plugins/modules/pritunl_org_info.py create mode 100644 tests/unit/plugins/modules/net_tools/pritunl/test_pritunl_org.py create mode 100644 tests/unit/plugins/modules/net_tools/pritunl/test_pritunl_org_info.py diff --git a/plugins/module_utils/net_tools/pritunl/api.py b/plugins/module_utils/net_tools/pritunl/api.py index e78f1848eb..4dffe2b626 100644 --- a/plugins/module_utils/net_tools/pritunl/api.py +++ b/plugins/module_utils/net_tools/pritunl/api.py @@ -57,6 +57,34 @@ def _get_pritunl_organizations(api_token, api_secret, base_url, validate_certs=T ) +def _delete_pritunl_organization( + api_token, api_secret, base_url, organization_id, validate_certs=True +): + return pritunl_auth_request( + base_url=base_url, + api_token=api_token, + api_secret=api_secret, + method="DELETE", + path="/organization/%s" % (organization_id), + validate_certs=validate_certs, + ) + + +def _post_pritunl_organization( + api_token, api_secret, base_url, organization_data, validate_certs=True +): + return pritunl_auth_request( + api_token=api_token, + api_secret=api_secret, + base_url=base_url, + method="POST", + path="/organization/%s", + headers={"Content-Type": "application/json"}, + data=json.dumps(organization_data), + validate_certs=validate_certs, + ) + + def _get_pritunl_users( api_token, api_secret, base_url, organization_id, validate_certs=True ): @@ -179,6 +207,29 @@ def list_pritunl_users( return users +def post_pritunl_organization( + api_token, + api_secret, + base_url, + organization_name, + validate_certs=True, +): + response = _post_pritunl_organization( + api_token=api_token, + api_secret=api_secret, + base_url=base_url, + organization_data={"name": organization_name}, + validate_certs=True, + ) + + if response.getcode() != 200: + raise PritunlException( + "Could not add organization %s to Pritunl" % (organization_name) + ) + # The user PUT request returns the updated user object + return json.loads(response.read()) + + def post_pritunl_user( api_token, api_secret, @@ -227,6 +278,25 @@ def post_pritunl_user( return json.loads(response.read()) +def delete_pritunl_organization( + api_token, api_secret, base_url, organization_id, validate_certs=True +): + response = _delete_pritunl_organization( + api_token=api_token, + api_secret=api_secret, + base_url=base_url, + organization_id=organization_id, + validate_certs=True, + ) + + if response.getcode() != 200: + raise PritunlException( + "Could not remove organization %s from Pritunl" % (organization_id) + ) + + return json.loads(response.read()) + + def delete_pritunl_user( api_token, api_secret, base_url, organization_id, user_id, validate_certs=True ): diff --git a/plugins/modules/net_tools/pritunl/__init__.py b/plugins/modules/net_tools/pritunl/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/modules/net_tools/pritunl/pritunl_org.py b/plugins/modules/net_tools/pritunl/pritunl_org.py new file mode 100644 index 0000000000..7fa7cbc124 --- /dev/null +++ b/plugins/modules/net_tools/pritunl/pritunl_org.py @@ -0,0 +1,199 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2021, Florian Dambrine +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: pritunl_org +author: Florian Dambrine (@Lowess) +version_added: 2.5.0 +short_description: Manages Pritunl Organizations using the Pritunl API +description: + - A module to manage Pritunl organizations using the Pritunl API. +extends_documentation_fragment: + - community.general.pritunl +options: + name: + type: str + required: true + aliases: + - org + description: + - The name of the organization to manage in Pritunl. + + force: + type: bool + default: false + description: + - If I(force) is C(true) and I(state) is C(absent), the module + will delete the organization, no matter if it contains users + or not. By default I(force) is C(false), which will cause the + module to fail the deletion of the organization when it contains + users. + + state: + type: str + default: 'present' + choices: + - present + - absent + description: + - If C(present), the module adds organization I(name) to + Pritunl. If C(absent), attempt to delete the organization + from Pritunl (please read about I(force) usage). +""" + +EXAMPLES = """ +- name: Ensure the organization named MyOrg exists + community.general.pritunl_org: + state: present + name: MyOrg + +- name: Ensure the organization named MyOrg does not exist + community.general.pritunl_org: + state: absent + name: MyOrg +""" + +RETURN = """ +response: + description: JSON representation of a Pritunl Organization. + returned: success + type: dict + sample: + { + "auth_api": False, + "name": "Foo", + "auth_token": None, + "user_count": 0, + "auth_secret": None, + "id": "csftwlu6uhralzi2dpmhekz3", + } +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import dict_merge +from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api import ( + PritunlException, + delete_pritunl_organization, + post_pritunl_organization, + list_pritunl_organizations, + get_pritunl_settings, + pritunl_argument_spec, +) + + +def add_pritunl_organization(module): + result = {} + + org_name = module.params.get("name") + + org_obj_list = list_pritunl_organizations( + **dict_merge( + get_pritunl_settings(module), + {"filters": {"name": org_name}}, + ) + ) + + # If the organization already exists + if len(org_obj_list) > 0: + result["changed"] = False + result["response"] = org_obj_list[0] + else: + # Otherwise create it + response = post_pritunl_organization( + **dict_merge( + get_pritunl_settings(module), + {"organization_name": org_name}, + ) + ) + result["changed"] = True + result["response"] = response + + module.exit_json(**result) + + +def remove_pritunl_organization(module): + result = {} + + org_name = module.params.get("name") + force = module.params.get("force") + + org_obj_list = [] + + org_obj_list = list_pritunl_organizations( + **dict_merge( + get_pritunl_settings(module), + { + "filters": {"name": org_name}, + }, + ) + ) + + # No organization found + if len(org_obj_list) == 0: + result["changed"] = False + result["response"] = {} + + else: + # Otherwise attempt to delete it + org = org_obj_list[0] + + # Only accept deletion under specific conditions + if force or org["user_count"] == 0: + response = delete_pritunl_organization( + **dict_merge( + get_pritunl_settings(module), + {"organization_id": org["id"]}, + ) + ) + result["changed"] = True + result["response"] = response + else: + module.fail_json( + msg=( + "Can not remove organization '%s' with %d attached users. " + "Either set 'force' option to true or remove active users " + "from the organization" + ) + % (org_name, org["user_count"]) + ) + + module.exit_json(**result) + + +def main(): + argument_spec = pritunl_argument_spec() + + argument_spec.update( + dict( + name=dict(required=True, type="str", aliases=["org"]), + force=dict(required=False, type="bool", default=False), + state=dict( + required=False, choices=["present", "absent"], default="present" + ), + ) + ), + + module = AnsibleModule(argument_spec=argument_spec) + + state = module.params.get("state") + + try: + if state == "present": + add_pritunl_organization(module) + elif state == "absent": + remove_pritunl_organization(module) + except PritunlException as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/net_tools/pritunl/pritunl_org_info.py b/plugins/modules/net_tools/pritunl/pritunl_org_info.py new file mode 100644 index 0000000000..e0c573fb19 --- /dev/null +++ b/plugins/modules/net_tools/pritunl/pritunl_org_info.py @@ -0,0 +1,129 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2021, Florian Dambrine +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: pritunl_org_info +author: Florian Dambrine (@Lowess) +version_added: 2.5.0 +short_description: List Pritunl Organizations using the Pritunl API +description: + - A module to list Pritunl organizations using the Pritunl API. +extends_documentation_fragment: + - community.general.pritunl +options: + organization: + type: str + required: false + aliases: + - org + default: null + description: + - Name of the Pritunl organization to search for. + If none provided, the module will return all Pritunl + organizations. +""" + +EXAMPLES = """ +- name: List all existing Pritunl organizations + community.general.pritunl_org_info: + +- name: Search for an organization named MyOrg + community.general.pritunl_user_info: + organization: MyOrg +""" + +RETURN = """ +organizations: + description: List of Pritunl organizations. + returned: success + type: list + elements: dict + sample: + [ + { + "auth_api": False, + "name": "FooOrg", + "auth_token": None, + "user_count": 0, + "auth_secret": None, + "id": "csftwlu6uhralzi2dpmhekz3", + }, + { + "auth_api": False, + "name": "MyOrg", + "auth_token": None, + "user_count": 3, + "auth_secret": None, + "id": "58070daee63f3b2e6e472c36", + }, + { + "auth_api": False, + "name": "BarOrg", + "auth_token": None, + "user_count": 0, + "auth_secret": None, + "id": "v1sncsxxybnsylc8gpqg85pg", + } + ] +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import dict_merge +from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api import ( + PritunlException, + get_pritunl_settings, + list_pritunl_organizations, + pritunl_argument_spec, +) + + +def get_pritunl_organizations(module): + org_name = module.params.get("organization") + + organizations = [] + + organizations = list_pritunl_organizations( + **dict_merge( + get_pritunl_settings(module), + {"filters": {"name": org_name} if org_name else None}, + ) + ) + + if org_name and len(organizations) == 0: + # When an org_name is provided but no organization match return an error + module.fail_json(msg="Organization '%s' does not exist" % org_name) + + result = {} + result["changed"] = False + result["organizations"] = organizations + + module.exit_json(**result) + + +def main(): + argument_spec = pritunl_argument_spec() + + argument_spec.update( + dict( + organization=dict(required=False, type="str", default=None, aliases=["org"]) + ) + ), + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + try: + get_pritunl_organizations(module) + except PritunlException as e: + module.fail_json(msg=to_native(e)) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/pritunl_org.py b/plugins/modules/pritunl_org.py new file mode 120000 index 0000000000..3e45ac224d --- /dev/null +++ b/plugins/modules/pritunl_org.py @@ -0,0 +1 @@ +./net_tools/pritunl/pritunl_org.py \ No newline at end of file diff --git a/plugins/modules/pritunl_org_info.py b/plugins/modules/pritunl_org_info.py new file mode 120000 index 0000000000..45ca579db2 --- /dev/null +++ b/plugins/modules/pritunl_org_info.py @@ -0,0 +1 @@ +./net_tools/pritunl/pritunl_org_info.py \ No newline at end of file diff --git a/tests/unit/plugins/module_utils/net_tools/pritunl/test_api.py b/tests/unit/plugins/module_utils/net_tools/pritunl/test_api.py index 1d78a6b555..4039f7c57a 100644 --- a/tests/unit/plugins/module_utils/net_tools/pritunl/test_api.py +++ b/tests/unit/plugins/module_utils/net_tools/pritunl/test_api.py @@ -9,7 +9,9 @@ import json import pytest from ansible.module_utils.common.dict_transformations import dict_merge from ansible.module_utils.six import iteritems -from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl import api +from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl import ( + api, +) from mock import MagicMock __metaclass__ = type @@ -17,6 +19,237 @@ __metaclass__ = type # Pritunl Mocks +PRITUNL_ORGS = [ + { + "auth_api": False, + "name": "Foo", + "auth_token": None, + "user_count": 0, + "auth_secret": None, + "id": "csftwlu6uhralzi2dpmhekz3", + }, + { + "auth_api": False, + "name": "GumGum", + "auth_token": None, + "user_count": 3, + "auth_secret": None, + "id": "58070daee63f3b2e6e472c36", + }, + { + "auth_api": False, + "name": "Bar", + "auth_token": None, + "user_count": 0, + "auth_secret": None, + "id": "v1sncsxxybnsylc8gpqg85pg", + }, +] + +NEW_PRITUNL_ORG = { + "auth_api": False, + "name": "NewOrg", + "auth_token": None, + "user_count": 0, + "auth_secret": None, + "id": "604a140ae63f3b36bc34c7bd", +} + +PRITUNL_USERS = [ + { + "auth_type": "google", + "dns_servers": None, + "pin": True, + "dns_suffix": None, + "servers": [ + { + "status": False, + "platform": None, + "server_id": "580711322bb66c1d59b9568f", + "virt_address6": "fd00:c0a8: 9700: 0: 192: 168: 101: 27", + "virt_address": "192.168.101.27", + "name": "vpn-A", + "real_address": None, + "connected_since": None, + "id": "580711322bb66c1d59b9568f", + "device_name": None, + }, + { + "status": False, + "platform": None, + "server_id": "5dad2cc6e63f3b3f4a6dfea5", + "virt_address6": "fd00:c0a8:f200: 0: 192: 168: 201: 37", + "virt_address": "192.168.201.37", + "name": "vpn-B", + "real_address": None, + "connected_since": None, + "id": "5dad2cc6e63f3b3f4a6dfea5", + "device_name": None, + }, + ], + "disabled": False, + "network_links": [], + "port_forwarding": [], + "id": "58070dafe63f3b2e6e472c3b", + "organization_name": "GumGum", + "type": "server", + "email": "bot@company.com", + "status": True, + "dns_mapping": None, + "otp_secret": "123456789ABCDEFG", + "client_to_client": False, + "sso": "google", + "bypass_secondary": False, + "groups": ["admin", "multiregion"], + "audit": False, + "name": "bot", + "gravatar": True, + "otp_auth": True, + "organization": "58070daee63f3b2e6e472c36", + }, + { + "auth_type": "google", + "dns_servers": None, + "pin": True, + "dns_suffix": None, + "servers": [ + { + "status": False, + "platform": None, + "server_id": "580711322bb66c1d59b9568f", + "virt_address6": "fd00:c0a8: 9700: 0: 192: 168: 101: 27", + "virt_address": "192.168.101.27", + "name": "vpn-A", + "real_address": None, + "connected_since": None, + "id": "580711322bb66c1d59b9568f", + "device_name": None, + }, + { + "status": False, + "platform": None, + "server_id": "5dad2cc6e63f3b3f4a6dfea5", + "virt_address6": "fd00:c0a8:f200: 0: 192: 168: 201: 37", + "virt_address": "192.168.201.37", + "name": "vpn-B", + "real_address": None, + "connected_since": None, + "id": "5dad2cc6e63f3b3f4a6dfea5", + "device_name": None, + }, + ], + "disabled": False, + "network_links": [], + "port_forwarding": [], + "id": "58070dafe63f3b2e6e472c3b", + "organization_name": "GumGum", + "type": "client", + "email": "florian@company.com", + "status": True, + "dns_mapping": None, + "otp_secret": "123456789ABCDEFG", + "client_to_client": False, + "sso": "google", + "bypass_secondary": False, + "groups": ["web", "database"], + "audit": False, + "name": "florian", + "gravatar": True, + "otp_auth": True, + "organization": "58070daee63f3b2e6e472c36", + }, + { + "auth_type": "google", + "dns_servers": None, + "pin": True, + "dns_suffix": None, + "servers": [ + { + "status": False, + "platform": None, + "server_id": "580711322bb66c1d59b9568f", + "virt_address6": "fd00:c0a8: 9700: 0: 192: 168: 101: 27", + "virt_address": "192.168.101.27", + "name": "vpn-A", + "real_address": None, + "connected_since": None, + "id": "580711322bb66c1d59b9568f", + "device_name": None, + }, + { + "status": False, + "platform": None, + "server_id": "5dad2cc6e63f3b3f4a6dfea5", + "virt_address6": "fd00:c0a8:f200: 0: 192: 168: 201: 37", + "virt_address": "192.168.201.37", + "name": "vpn-B", + "real_address": None, + "connected_since": None, + "id": "5dad2cc6e63f3b3f4a6dfea5", + "device_name": None, + }, + ], + "disabled": False, + "network_links": [], + "port_forwarding": [], + "id": "58070dafe63f3b2e6e472c3b", + "organization_name": "GumGum", + "type": "server", + "email": "ops@company.com", + "status": True, + "dns_mapping": None, + "otp_secret": "123456789ABCDEFG", + "client_to_client": False, + "sso": "google", + "bypass_secondary": False, + "groups": ["web", "database"], + "audit": False, + "name": "ops", + "gravatar": True, + "otp_auth": True, + "organization": "58070daee63f3b2e6e472c36", + }, +] + +NEW_PRITUNL_USER = { + "auth_type": "local", + "disabled": False, + "dns_servers": None, + "otp_secret": "6M4UWP2BCJBSYZAT", + "name": "alice", + "pin": False, + "dns_suffix": None, + "client_to_client": False, + "email": "alice@company.com", + "organization_name": "GumGum", + "bypass_secondary": False, + "groups": ["a", "b"], + "organization": "58070daee63f3b2e6e472c36", + "port_forwarding": [], + "type": "client", + "id": "590add71e63f3b72d8bb951a", +} + +NEW_PRITUNL_USER_UPDATED = dict_merge( + NEW_PRITUNL_USER, + { + "disabled": True, + "name": "bob", + "email": "bob@company.com", + "groups": ["c", "d"], + }, +) + + +class PritunlEmptyOrganizationMock(MagicMock): + """Pritunl API Mock for organization GET API calls.""" + + def getcode(self): + return 200 + + def read(self): + return json.dumps([]) + class PritunlListOrganizationMock(MagicMock): """Pritunl API Mock for organization GET API calls.""" @@ -25,34 +258,7 @@ class PritunlListOrganizationMock(MagicMock): return 200 def read(self): - return json.dumps( - [ - { - "auth_api": False, - "name": "Foo", - "auth_token": None, - "user_count": 0, - "auth_secret": None, - "id": "csftwlu6uhralzi2dpmhekz3", - }, - { - "auth_api": False, - "name": "GumGum", - "auth_token": None, - "user_count": 3, - "auth_secret": None, - "id": "58070daee63f3b2e6e472c36", - }, - { - "auth_api": False, - "name": "Bar", - "auth_token": None, - "user_count": 0, - "auth_secret": None, - "id": "v1sncsxxybnsylc8gpqg85pg", - }, - ] - ) + return json.dumps(PRITUNL_ORGS) class PritunlListUserMock(MagicMock): @@ -62,163 +268,7 @@ class PritunlListUserMock(MagicMock): return 200 def read(self): - return json.dumps( - [ - { - "auth_type": "google", - "dns_servers": None, - "pin": True, - "dns_suffix": None, - "servers": [ - { - "status": False, - "platform": None, - "server_id": "580711322bb66c1d59b9568f", - "virt_address6": "fd00:c0a8: 9700: 0: 192: 168: 101: 27", - "virt_address": "192.168.101.27", - "name": "vpn-A", - "real_address": None, - "connected_since": None, - "id": "580711322bb66c1d59b9568f", - "device_name": None, - }, - { - "status": False, - "platform": None, - "server_id": "5dad2cc6e63f3b3f4a6dfea5", - "virt_address6": "fd00:c0a8:f200: 0: 192: 168: 201: 37", - "virt_address": "192.168.201.37", - "name": "vpn-B", - "real_address": None, - "connected_since": None, - "id": "5dad2cc6e63f3b3f4a6dfea5", - "device_name": None, - }, - ], - "disabled": False, - "network_links": [], - "port_forwarding": [], - "id": "58070dafe63f3b2e6e472c3b", - "organization_name": "GumGum", - "type": "server", - "email": "bot@company.com", - "status": True, - "dns_mapping": None, - "otp_secret": "123456789ABCDEFG", - "client_to_client": False, - "sso": "google", - "bypass_secondary": False, - "groups": ["admin", "multiregion"], - "audit": False, - "name": "bot", - "gravatar": True, - "otp_auth": True, - "organization": "58070daee63f3b2e6e472c36", - }, - { - "auth_type": "google", - "dns_servers": None, - "pin": True, - "dns_suffix": None, - "servers": [ - { - "status": False, - "platform": None, - "server_id": "580711322bb66c1d59b9568f", - "virt_address6": "fd00:c0a8: 9700: 0: 192: 168: 101: 27", - "virt_address": "192.168.101.27", - "name": "vpn-A", - "real_address": None, - "connected_since": None, - "id": "580711322bb66c1d59b9568f", - "device_name": None, - }, - { - "status": False, - "platform": None, - "server_id": "5dad2cc6e63f3b3f4a6dfea5", - "virt_address6": "fd00:c0a8:f200: 0: 192: 168: 201: 37", - "virt_address": "192.168.201.37", - "name": "vpn-B", - "real_address": None, - "connected_since": None, - "id": "5dad2cc6e63f3b3f4a6dfea5", - "device_name": None, - }, - ], - "disabled": False, - "network_links": [], - "port_forwarding": [], - "id": "58070dafe63f3b2e6e472c3b", - "organization_name": "GumGum", - "type": "client", - "email": "florian@company.com", - "status": True, - "dns_mapping": None, - "otp_secret": "123456789ABCDEFG", - "client_to_client": False, - "sso": "google", - "bypass_secondary": False, - "groups": ["web", "database"], - "audit": False, - "name": "florian", - "gravatar": True, - "otp_auth": True, - "organization": "58070daee63f3b2e6e472c36", - }, - { - "auth_type": "google", - "dns_servers": None, - "pin": True, - "dns_suffix": None, - "servers": [ - { - "status": False, - "platform": None, - "server_id": "580711322bb66c1d59b9568f", - "virt_address6": "fd00:c0a8: 9700: 0: 192: 168: 101: 27", - "virt_address": "192.168.101.27", - "name": "vpn-A", - "real_address": None, - "connected_since": None, - "id": "580711322bb66c1d59b9568f", - "device_name": None, - }, - { - "status": False, - "platform": None, - "server_id": "5dad2cc6e63f3b3f4a6dfea5", - "virt_address6": "fd00:c0a8:f200: 0: 192: 168: 201: 37", - "virt_address": "192.168.201.37", - "name": "vpn-B", - "real_address": None, - "connected_since": None, - "id": "5dad2cc6e63f3b3f4a6dfea5", - "device_name": None, - }, - ], - "disabled": False, - "network_links": [], - "port_forwarding": [], - "id": "58070dafe63f3b2e6e472c3b", - "organization_name": "GumGum", - "type": "server", - "email": "ops@company.com", - "status": True, - "dns_mapping": None, - "otp_secret": "123456789ABCDEFG", - "client_to_client": False, - "sso": "google", - "bypass_secondary": False, - "groups": ["web", "database"], - "audit": False, - "name": "ops", - "gravatar": True, - "otp_auth": True, - "organization": "58070daee63f3b2e6e472c36", - }, - ] - ) + return json.dumps(PRITUNL_USERS) class PritunlErrorMock(MagicMock): @@ -231,6 +281,22 @@ class PritunlErrorMock(MagicMock): return "{}" +class PritunlPostOrganizationMock(MagicMock): + def getcode(self): + return 200 + + def read(self): + return json.dumps(NEW_PRITUNL_ORG) + + +class PritunlListOrganizationAfterPostMock(MagicMock): + def getcode(self): + return 200 + + def read(self): + return json.dumps(PRITUNL_ORGS + [NEW_PRITUNL_ORG]) + + class PritunlPostUserMock(MagicMock): """Pritunl API Mock for POST API calls.""" @@ -238,28 +304,7 @@ class PritunlPostUserMock(MagicMock): return 200 def read(self): - return json.dumps( - [ - { - "auth_type": "local", - "disabled": False, - "dns_servers": None, - "otp_secret": "6M4UWP2BCJBSYZAT", - "name": "alice", - "pin": False, - "dns_suffix": None, - "client_to_client": False, - "email": "alice@company.com", - "organization_name": "GumGum", - "bypass_secondary": False, - "groups": ["a", "b"], - "organization": "58070daee63f3b2e6e472c36", - "port_forwarding": [], - "type": "client", - "id": "590add71e63f3b72d8bb951a", - } - ] - ) + return json.dumps([NEW_PRITUNL_USER]) class PritunlPutUserMock(MagicMock): @@ -269,26 +314,17 @@ class PritunlPutUserMock(MagicMock): return 200 def read(self): - return json.dumps( - { - "auth_type": "local", - "disabled": True, - "dns_servers": None, - "otp_secret": "WEJANJYMF3Q2QSLG", - "name": "bob", - "pin": False, - "dns_suffix": False, - "client_to_client": False, - "email": "bob@company.com", - "organization_name": "GumGum", - "bypass_secondary": False, - "groups": ["c", "d"], - "organization": "58070daee63f3b2e6e472c36", - "port_forwarding": [], - "type": "client", - "id": "590add71e63f3b72d8bb951a", - } - ) + return json.dumps(NEW_PRITUNL_USER_UPDATED) + + +class PritunlDeleteOrganizationMock(MagicMock): + """Pritunl API Mock for DELETE API calls.""" + + def getcode(self): + return 200 + + def read(self): + return "{}" class PritunlDeleteUserMock(MagicMock): @@ -321,14 +357,21 @@ def pritunl_settings(): } +@pytest.fixture +def pritunl_organization_data(): + return { + "name": NEW_PRITUNL_ORG["name"], + } + + @pytest.fixture def pritunl_user_data(): return { - "name": "alice", - "email": "alice@company.com", - "groups": ["a", "b"], - "disabled": False, - "type": "client", + "name": NEW_PRITUNL_USER["name"], + "email": NEW_PRITUNL_USER["email"], + "groups": NEW_PRITUNL_USER["groups"], + "disabled": NEW_PRITUNL_USER["disabled"], + "type": NEW_PRITUNL_USER["type"], } @@ -347,6 +390,11 @@ def get_pritunl_error_mock(): return PritunlErrorMock() +@pytest.fixture +def post_pritunl_organization_mock(): + return PritunlPostOrganizationMock() + + @pytest.fixture def post_pritunl_user_mock(): return PritunlPostUserMock() @@ -357,6 +405,11 @@ def put_pritunl_user_mock(): return PritunlPutUserMock() +@pytest.fixture +def delete_pritunl_organization_mock(): + return PritunlDeleteOrganizationMock() + + @pytest.fixture def delete_pritunl_user_mock(): return PritunlDeleteUserMock() @@ -460,6 +513,25 @@ class TestPritunlApi: assert user["name"] == user_expected # Test for POST operation on Pritunl API + def test_add_pritunl_organization( + self, + pritunl_settings, + pritunl_organization_data, + post_pritunl_organization_mock, + ): + api._post_pritunl_organization = post_pritunl_organization_mock() + + create_response = api.post_pritunl_organization( + **dict_merge( + pritunl_settings, + {"organization_name": pritunl_organization_data["name"]}, + ) + ) + + # Ensure provided settings match with the ones returned by Pritunl + for k, v in iteritems(pritunl_organization_data): + assert create_response[k] == v + @pytest.mark.parametrize("org_id", [("58070daee63f3b2e6e472c36")]) def test_add_and_update_pritunl_user( self, @@ -513,6 +585,24 @@ class TestPritunlApi: assert update_response[k] == create_response[k] # Test for DELETE operation on Pritunl API + + @pytest.mark.parametrize("org_id", [("58070daee63f3b2e6e472c36")]) + def test_delete_pritunl_organization( + self, pritunl_settings, org_id, delete_pritunl_organization_mock + ): + api._delete_pritunl_organization = delete_pritunl_organization_mock() + + response = api.delete_pritunl_organization( + **dict_merge( + pritunl_settings, + { + "organization_id": org_id, + }, + ) + ) + + assert response == {} + @pytest.mark.parametrize( "org_id,user_id", [("58070daee63f3b2e6e472c36", "590add71e63f3b72d8bb951a")] ) diff --git a/tests/unit/plugins/modules/net_tools/pritunl/test_pritunl_org.py b/tests/unit/plugins/modules/net_tools/pritunl/test_pritunl_org.py new file mode 100644 index 0000000000..39071974c8 --- /dev/null +++ b/tests/unit/plugins/modules/net_tools/pritunl/test_pritunl_org.py @@ -0,0 +1,204 @@ +# -*- coding: utf-8 -*- +# (c) 2021 Florian Dambrine +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +import sys + +from ansible.module_utils.common.dict_transformations import dict_merge +from ansible.module_utils.six import iteritems +from ansible_collections.community.general.plugins.modules.net_tools.pritunl import ( + pritunl_org, +) +from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.general.tests.unit.plugins.module_utils.net_tools.pritunl.test_api import ( + PritunlDeleteOrganizationMock, + PritunlListOrganizationMock, + PritunlListOrganizationAfterPostMock, + PritunlPostOrganizationMock, +) +from ansible_collections.community.general.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, + ModuleTestCase, + set_module_args, +) + +__metaclass__ = type + + +class TestPritunlOrg(ModuleTestCase): + def setUp(self): + super(TestPritunlOrg, self).setUp() + self.module = pritunl_org + + # Add backward compatibility + if sys.version_info < (3, 2): + self.assertRegex = self.assertRegexpMatches + + def tearDown(self): + super(TestPritunlOrg, self).tearDown() + + def patch_add_pritunl_organization(self, **kwds): + return patch( + "ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api._post_pritunl_organization", + autospec=True, + **kwds + ) + + def patch_delete_pritunl_organization(self, **kwds): + return patch( + "ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api._delete_pritunl_organization", + autospec=True, + **kwds + ) + + def patch_get_pritunl_organizations(self, **kwds): + return patch( + "ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api._get_pritunl_organizations", + autospec=True, + **kwds + ) + + def test_without_parameters(self): + """Test without parameters""" + set_module_args({}) + with self.assertRaises(AnsibleFailJson): + self.module.main() + + def test_present(self): + """Test Pritunl organization creation.""" + org_params = {"name": "NewOrg"} + set_module_args( + dict_merge( + { + "pritunl_api_token": "token", + "pritunl_api_secret": "secret", + "pritunl_url": "https://pritunl.domain.com", + }, + org_params, + ) + ) + # Test creation + with self.patch_get_pritunl_organizations( + side_effect=PritunlListOrganizationMock + ) as mock_get: + with self.patch_add_pritunl_organization( + side_effect=PritunlPostOrganizationMock + ) as mock_add: + with self.assertRaises(AnsibleExitJson) as create_result: + self.module.main() + + create_exc = create_result.exception.args[0] + + self.assertTrue(create_exc["changed"]) + self.assertEqual(create_exc["response"]["name"], org_params["name"]) + self.assertEqual(create_exc["response"]["user_count"], 0) + + # Test module idempotency + with self.patch_get_pritunl_organizations( + side_effect=PritunlListOrganizationAfterPostMock + ) as mock_get: + with self.patch_add_pritunl_organization( + side_effect=PritunlPostOrganizationMock + ) as mock_add: + with self.assertRaises(AnsibleExitJson) as idempotent_result: + self.module.main() + + idempotent_exc = idempotent_result.exception.args[0] + + # Ensure both calls resulted in the same returned value + # except for changed which sould be false the second time + for k, v in iteritems(idempotent_exc): + if k == "changed": + self.assertFalse(idempotent_exc[k]) + else: + self.assertEqual(create_exc[k], idempotent_exc[k]) + + def test_absent(self): + """Test organization removal from Pritunl.""" + org_params = {"name": "NewOrg"} + set_module_args( + dict_merge( + { + "state": "absent", + "pritunl_api_token": "token", + "pritunl_api_secret": "secret", + "pritunl_url": "https://pritunl.domain.com", + }, + org_params, + ) + ) + # Test deletion + with self.patch_get_pritunl_organizations( + side_effect=PritunlListOrganizationAfterPostMock + ) as mock_get: + with self.patch_delete_pritunl_organization( + side_effect=PritunlDeleteOrganizationMock + ) as mock_delete: + with self.assertRaises(AnsibleExitJson) as delete_result: + self.module.main() + + delete_exc = delete_result.exception.args[0] + + self.assertTrue(delete_exc["changed"]) + self.assertEqual(delete_exc["response"], {}) + + # Test module idempotency + with self.patch_get_pritunl_organizations( + side_effect=PritunlListOrganizationMock + ) as mock_get: + with self.patch_delete_pritunl_organization( + side_effect=PritunlDeleteOrganizationMock + ) as mock_add: + with self.assertRaises(AnsibleExitJson) as idempotent_result: + self.module.main() + + idempotent_exc = idempotent_result.exception.args[0] + + # Ensure both calls resulted in the same returned value + # except for changed which sould be false the second time + self.assertFalse(idempotent_exc["changed"]) + self.assertEqual(idempotent_exc["response"], delete_exc["response"]) + + def test_absent_with_existing_users(self): + """Test organization removal with attached users should fail except if force is true.""" + module_args = { + "state": "absent", + "pritunl_api_token": "token", + "pritunl_api_secret": "secret", + "pritunl_url": "https://pritunl.domain.com", + "name": "GumGum", + } + set_module_args(module_args) + + # Test deletion + with self.patch_get_pritunl_organizations( + side_effect=PritunlListOrganizationMock + ) as mock_get: + with self.patch_delete_pritunl_organization( + side_effect=PritunlDeleteOrganizationMock + ) as mock_delete: + with self.assertRaises(AnsibleFailJson) as failure_result: + self.module.main() + + failure_exc = failure_result.exception.args[0] + + self.assertRegex(failure_exc["msg"], "Can not remove organization") + + # Switch force=True which should run successfully + set_module_args(dict_merge(module_args, {"force": True})) + + with self.patch_get_pritunl_organizations( + side_effect=PritunlListOrganizationMock + ) as mock_get: + with self.patch_delete_pritunl_organization( + side_effect=PritunlDeleteOrganizationMock + ) as mock_delete: + with self.assertRaises(AnsibleExitJson) as delete_result: + self.module.main() + + delete_exc = delete_result.exception.args[0] + + self.assertTrue(delete_exc["changed"]) diff --git a/tests/unit/plugins/modules/net_tools/pritunl/test_pritunl_org_info.py b/tests/unit/plugins/modules/net_tools/pritunl/test_pritunl_org_info.py new file mode 100644 index 0000000000..54922f4b75 --- /dev/null +++ b/tests/unit/plugins/modules/net_tools/pritunl/test_pritunl_org_info.py @@ -0,0 +1,137 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2021, Florian Dambrine +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +import sys + +from ansible_collections.community.general.plugins.modules.net_tools.pritunl import ( + pritunl_org_info, +) +from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.general.tests.unit.plugins.module_utils.net_tools.pritunl.test_api import ( + PritunlListOrganizationMock, + PritunlEmptyOrganizationMock, +) +from ansible_collections.community.general.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, + ModuleTestCase, + set_module_args, +) + +__metaclass__ = type + + +class TestPritunlOrgInfo(ModuleTestCase): + def setUp(self): + super(TestPritunlOrgInfo, self).setUp() + self.module = pritunl_org_info + + # Add backward compatibility + if sys.version_info < (3, 2): + self.assertRegex = self.assertRegexpMatches + + def tearDown(self): + super(TestPritunlOrgInfo, self).tearDown() + + def patch_get_pritunl_organizations(self, **kwds): + return patch( + "ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api._get_pritunl_organizations", + autospec=True, + **kwds + ) + + def test_without_parameters(self): + """Test without parameters""" + with self.patch_get_pritunl_organizations( + side_effect=PritunlListOrganizationMock + ) as org_mock: + set_module_args({}) + with self.assertRaises(AnsibleFailJson): + self.module.main() + + self.assertEqual(org_mock.call_count, 0) + + def test_list_empty_organizations(self): + """Listing all organizations even when no org exists should be valid.""" + with self.patch_get_pritunl_organizations( + side_effect=PritunlEmptyOrganizationMock + ) as org_mock: + with self.assertRaises(AnsibleExitJson) as result: + set_module_args( + { + "pritunl_api_token": "token", + "pritunl_api_secret": "secret", + "pritunl_url": "https://pritunl.domain.com", + } + ) + self.module.main() + + self.assertEqual(org_mock.call_count, 1) + + exc = result.exception.args[0] + self.assertEqual(len(exc["organizations"]), 0) + + def test_list_specific_organization(self): + """Listing a specific organization should be valid.""" + with self.patch_get_pritunl_organizations( + side_effect=PritunlListOrganizationMock + ) as org_mock: + with self.assertRaises(AnsibleExitJson) as result: + set_module_args( + { + "pritunl_api_token": "token", + "pritunl_api_secret": "secret", + "pritunl_url": "https://pritunl.domain.com", + "org": "GumGum", + } + ) + self.module.main() + + self.assertEqual(org_mock.call_count, 1) + + exc = result.exception.args[0] + self.assertEqual(len(exc["organizations"]), 1) + + def test_list_unknown_organization(self): + """Listing an unknown organization should result in a failure.""" + with self.patch_get_pritunl_organizations( + side_effect=PritunlListOrganizationMock + ) as org_mock: + with self.assertRaises(AnsibleFailJson) as result: + set_module_args( + { + "pritunl_api_token": "token", + "pritunl_api_secret": "secret", + "pritunl_url": "https://pritunl.domain.com", + "org": "Unknown", + } + ) + self.module.main() + + self.assertEqual(org_mock.call_count, 1) + + exc = result.exception.args[0] + self.assertRegex(exc["msg"], "does not exist") + + def test_list_all_organizations(self): + """Listing all organizations should be valid.""" + with self.patch_get_pritunl_organizations( + side_effect=PritunlListOrganizationMock + ) as org_mock: + with self.assertRaises(AnsibleExitJson) as result: + set_module_args( + { + "pritunl_api_token": "token", + "pritunl_api_secret": "secret", + "pritunl_url": "https://pritunl.domain.com", + } + ) + self.module.main() + + self.assertEqual(org_mock.call_count, 1) + + exc = result.exception.args[0] + self.assertEqual(len(exc["organizations"]), 3) From 8eb2331aeae1457c497cde3cba630d5f31b5aa95 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=81lvaro=20Torres=20Cogollo?= Date: Fri, 9 Apr 2021 06:00:21 +0200 Subject: [PATCH 0173/3093] Bugfix: PyGithub does not support explicit port in base_url (#2204) * Bugfix: PyGithub does not support explicit port in base_url * Fix unit tests * Fix unit tests * Added changelog * Update changelogs/fragments/2204-github_repo-fix-baseurl_port.yml Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> --- .../2204-github_repo-fix-baseurl_port.yml | 2 ++ .../source_control/github/github_repo.py | 4 +-- .../source_control/github/test_github_repo.py | 34 +++++++++---------- 3 files changed, 21 insertions(+), 19 deletions(-) create mode 100644 changelogs/fragments/2204-github_repo-fix-baseurl_port.yml diff --git a/changelogs/fragments/2204-github_repo-fix-baseurl_port.yml b/changelogs/fragments/2204-github_repo-fix-baseurl_port.yml new file mode 100644 index 0000000000..0df3bd8ece --- /dev/null +++ b/changelogs/fragments/2204-github_repo-fix-baseurl_port.yml @@ -0,0 +1,2 @@ +bugfixes: + - github_repo - PyGithub bug does not allow explicit port in ``base_url``. Specifying port is not required (https://github.com/PyGithub/PyGithub/issues/1913). diff --git a/plugins/modules/source_control/github/github_repo.py b/plugins/modules/source_control/github/github_repo.py index 41f57469e4..587111fe5a 100644 --- a/plugins/modules/source_control/github/github_repo.py +++ b/plugins/modules/source_control/github/github_repo.py @@ -121,9 +121,9 @@ except Exception: def authenticate(username=None, password=None, access_token=None): if access_token: - return Github(base_url="https://api.github.com:443", login_or_token=access_token) + return Github(base_url="https://api.github.com", login_or_token=access_token) else: - return Github(base_url="https://api.github.com:443", login_or_token=username, password=password) + return Github(base_url="https://api.github.com", login_or_token=username, password=password) def create_repo(gh, name, organization=None, private=False, description='', check_mode=False): diff --git a/tests/unit/plugins/modules/source_control/github/test_github_repo.py b/tests/unit/plugins/modules/source_control/github/test_github_repo.py index 8d41c986b4..56ec9b7ec7 100644 --- a/tests/unit/plugins/modules/source_control/github/test_github_repo.py +++ b/tests/unit/plugins/modules/source_control/github/test_github_repo.py @@ -17,42 +17,42 @@ def debug_mock(url, request): print(request.original.__dict__) -@urlmatch(netloc=r'api\.github\.com:443$', path=r'/orgs/.*', method="get") +@urlmatch(netloc=r'api\.github\.com(:[0-9]+)?$', path=r'/orgs/.*', method="get") def get_orgs_mock(url, request): - match = re.search(r"api\.github\.com:443/orgs/(?P[^/]+)", request.url) + match = re.search(r"api\.github\.com(:[0-9]+)?/orgs/(?P[^/]+)", request.url) org = match.group("org") # https://docs.github.com/en/rest/reference/orgs#get-an-organization headers = {'content-type': 'application/json'} content = { "login": org, - "url": "https://api.github.com:443/orgs/{0}".format(org) + "url": "https://api.github.com/orgs/{0}".format(org) } content = json.dumps(content).encode("utf-8") return response(200, content, headers, None, 5, request) -@urlmatch(netloc=r'api\.github\.com:443$', path=r'/user', method="get") +@urlmatch(netloc=r'api\.github\.com(:[0-9]+)?$', path=r'/user', method="get") def get_user_mock(url, request): # https://docs.github.com/en/rest/reference/users#get-the-authenticated-user headers = {'content-type': 'application/json'} content = { "login": "octocat", - "url": "https://api.github.com:443/users/octocat" + "url": "https://api.github.com/users/octocat" } content = json.dumps(content).encode("utf-8") return response(200, content, headers, None, 5, request) -@urlmatch(netloc=r'api\.github\.com:443$', path=r'/repos/.*/.*', method="get") +@urlmatch(netloc=r'api\.github\.com(:[0-9]+)?$', path=r'/repos/.*/.*', method="get") def get_repo_notfound_mock(url, request): return response(404, "{\"message\": \"Not Found\"}", "", "Not Found", 5, request) -@urlmatch(netloc=r'api\.github\.com:443$', path=r'/repos/.*/.*', method="get") +@urlmatch(netloc=r'api\.github\.com(:[0-9]+)?$', path=r'/repos/.*/.*', method="get") def get_repo_mock(url, request): match = re.search( - r"api\.github\.com:443/repos/(?P[^/]+)/(?P[^/]+)", request.url) + r"api\.github\.com(:[0-9]+)?/repos/(?P[^/]+)/(?P[^/]+)", request.url) org = match.group("org") repo = match.group("repo") @@ -61,7 +61,7 @@ def get_repo_mock(url, request): content = { "name": repo, "full_name": "{0}/{1}".format(org, repo), - "url": "https://api.github.com:443/repos/{0}/{1}".format(org, repo), + "url": "https://api.github.com/repos/{0}/{1}".format(org, repo), "private": False, "description": "This your first repo!", "default_branch": "master", @@ -71,10 +71,10 @@ def get_repo_mock(url, request): return response(200, content, headers, None, 5, request) -@urlmatch(netloc=r'api\.github\.com:443$', path=r'/orgs/.*/repos', method="post") +@urlmatch(netloc=r'api\.github\.com(:[0-9]+)?$', path=r'/orgs/.*/repos', method="post") def create_new_org_repo_mock(url, request): match = re.search( - r"api\.github\.com:443/orgs/(?P[^/]+)/repos", request.url) + r"api\.github\.com(:[0-9]+)?/orgs/(?P[^/]+)/repos", request.url) org = match.group("org") repo = json.loads(request.body) @@ -90,7 +90,7 @@ def create_new_org_repo_mock(url, request): return response(201, content, headers, None, 5, request) -@urlmatch(netloc=r'api\.github\.com:443$', path=r'/user/repos', method="post") +@urlmatch(netloc=r'api\.github\.com(:[0-9]+)?$', path=r'/user/repos', method="post") def create_new_user_repo_mock(url, request): repo = json.loads(request.body) @@ -106,10 +106,10 @@ def create_new_user_repo_mock(url, request): return response(201, content, headers, None, 5, request) -@urlmatch(netloc=r'api\.github\.com:443$', path=r'/repos/.*/.*', method="patch") +@urlmatch(netloc=r'api\.github\.com(:[0-9]+)?$', path=r'/repos/.*/.*', method="patch") def patch_repo_mock(url, request): match = re.search( - r"api\.github\.com:443/repos/(?P[^/]+)/(?P[^/]+)", request.url) + r"api\.github\.com(:[0-9]+)?/repos/(?P[^/]+)/(?P[^/]+)", request.url) org = match.group("org") repo = match.group("repo") @@ -119,7 +119,7 @@ def patch_repo_mock(url, request): content = { "name": repo, "full_name": "{0}/{1}".format(org, repo), - "url": "https://api.github.com:443/repos/{0}/{1}".format(org, repo), + "url": "https://api.github.com/repos/{0}/{1}".format(org, repo), "private": body['private'], "description": body['description'], "default_branch": "master", @@ -129,13 +129,13 @@ def patch_repo_mock(url, request): return response(200, content, headers, None, 5, request) -@urlmatch(netloc=r'api\.github\.com:443$', path=r'/repos/.*/.*', method="delete") +@urlmatch(netloc=r'api\.github\.com(:[0-9]+)?$', path=r'/repos/.*/.*', method="delete") def delete_repo_mock(url, request): # https://docs.github.com/en/rest/reference/repos#delete-a-repository return response(204, None, None, None, 5, request) -@urlmatch(netloc=r'api\.github\.com:443$', path=r'/repos/.*/.*', method="delete") +@urlmatch(netloc=r'api\.github\.com(:[0-9]+)?$', path=r'/repos/.*/.*', method="delete") def delete_repo_notfound_mock(url, request): # https://docs.github.com/en/rest/reference/repos#delete-a-repository return response(404, "{\"message\": \"Not Found\"}", "", "Not Found", 5, request) From 5502e4ec17ab752ba97768e41ee7ed45bba9bc0c Mon Sep 17 00:00:00 2001 From: justchris1 <30219018+justchris1@users.noreply.github.com> Date: Sat, 10 Apr 2021 23:57:36 -0400 Subject: [PATCH 0174/3093] Added fields to the ipa_config module (#2116) * Added fields to the ipa_config module: ipadefaultprimarygroup, ipagroupsearchfields, ipahomesrootdir, ipamaxusernamelength, ipapwdexpadvnotify, ipasearchrecordslimit, ipasearchtimelimit, ipauserauthtype, ipausersearchfields * Fixed typos in documentation spec * Updated a field that was missing the version_added decoration * Add changelog fragment * Update plugins/modules/identity/ipa/ipa_config.py Cleanup example to be consistent with others. Co-authored-by: Felix Fontein * Cleanup example to be consistent with others. * Fixed changelog fragment * Updated punctuation in examples * Switched some elements to use int instead of str, and fixed duplicated example * Change type of field for ipauserauthtype to list of str, add support for ipaconfigstring and ipakrbauthzdata * Update fragment to represent adding support for ipaconfigstring and ipakrbauthzdata * Update changelogs/fragments/2116-add-fields-to-ipa-config-module.yml Co-authored-by: Felix Fontein * Update plugins/modules/identity/ipa/ipa_config.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/ipa/ipa_config.py Co-authored-by: Felix Fontein * Address review comments by making inputs into group search and user search fields a list of strings, even though IPA does not treat it as a multiselect field * Update plugins/modules/identity/ipa/ipa_config.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/ipa/ipa_config.py Co-authored-by: Felix Fontein Co-authored-by: Chris Costa Co-authored-by: Felix Fontein --- .../2116-add-fields-to-ipa-config-module.yml | 2 + plugins/modules/identity/ipa/ipa_config.py | 208 +++++++++++++++++- 2 files changed, 207 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/2116-add-fields-to-ipa-config-module.yml diff --git a/changelogs/fragments/2116-add-fields-to-ipa-config-module.yml b/changelogs/fragments/2116-add-fields-to-ipa-config-module.yml new file mode 100644 index 0000000000..d1e1dc3180 --- /dev/null +++ b/changelogs/fragments/2116-add-fields-to-ipa-config-module.yml @@ -0,0 +1,2 @@ +minor_changes: + - ipa_config - add new options ``ipaconfigstring``, ``ipadefaultprimarygroup``, ``ipagroupsearchfields``, ``ipahomesrootdir``, ``ipabrkauthzdata``, ``ipamaxusernamelength``, ``ipapwdexpadvnotify``, ``ipasearchrecordslimit``, ``ipasearchtimelimit``, ``ipauserauthtype``, and ``ipausersearchfields`` (https://github.com/ansible-collections/community.general/pull/2116). diff --git a/plugins/modules/identity/ipa/ipa_config.py b/plugins/modules/identity/ipa/ipa_config.py index 756b6cf9d5..49d46fb5b2 100644 --- a/plugins/modules/identity/ipa/ipa_config.py +++ b/plugins/modules/identity/ipa/ipa_config.py @@ -14,6 +14,13 @@ short_description: Manage Global FreeIPA Configuration Settings description: - Modify global configuration settings of a FreeIPA Server. options: + ipaconfigstring: + description: Extra hashes to generate in password plug-in. + aliases: ["configstring"] + type: list + elements: str + choices: ["AllowNThash", "KDC:Disable Last Success", "KDC:Disable Lockout", "KDC:Disable Default Preauth for SPNs"] + version_added: '2.5.0' ipadefaultloginshell: description: Default shell for new users. aliases: ["loginshell"] @@ -22,25 +29,158 @@ options: description: Default e-mail domain for new users. aliases: ["emaildomain"] type: str + ipadefaultprimarygroup: + description: Default group for new users. + aliases: ["primarygroup"] + type: str + version_added: '2.5.0' + ipagroupsearchfields: + description: A list of fields to search in when searching for groups. + aliases: ["groupsearchfields"] + type: list + elements: str + version_added: '2.5.0' + ipahomesrootdir: + description: Default location of home directories. + aliases: ["homesrootdir"] + type: str + version_added: '2.5.0' + ipakrbauthzdata: + description: Default types of PAC supported for services. + aliases: ["krbauthzdata"] + type: list + elements: str + choices: ["MS-PAC", "PAD", "nfs:NONE"] + version_added: '2.5.0' + ipamaxusernamelength: + description: Maximum length of usernames. + aliases: ["maxusernamelength"] + type: int + version_added: '2.5.0' + ipapwdexpadvnotify: + description: Notice of impending password expiration, in days. + aliases: ["pwdexpadvnotify"] + type: int + version_added: '2.5.0' + ipasearchrecordslimit: + description: Maximum number of records to search (-1 or 0 is unlimited). + aliases: ["searchrecordslimit"] + type: int + version_added: '2.5.0' + ipasearchtimelimit: + description: Maximum amount of time (seconds) for a search (-1 or 0 is unlimited). + aliases: ["searchtimelimit"] + type: int + version_added: '2.5.0' + ipauserauthtype: + description: The authentication type to use by default. + aliases: ["userauthtype"] + choices: ["password", "radius", "otp", "pkinit", "hardened", "disabled"] + type: list + elements: str + version_added: '2.5.0' + ipausersearchfields: + description: A list of fields to search in when searching for users. + aliases: ["usersearchfields"] + type: list + elements: str + version_added: '2.5.0' extends_documentation_fragment: - community.general.ipa.documentation ''' EXAMPLES = r''' -- name: Ensure the default login shell is bash. +- name: Ensure password plugin features DC:Disable Last Success and KDC:Disable Lockout are enabled + community.general.ipa_config: + ipaconfigstring: ["KDC:Disable Last Success", "KDC:Disable Lockout"] + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the default login shell is bash community.general.ipa_config: ipadefaultloginshell: /bin/bash ipa_host: localhost ipa_user: admin ipa_pass: supersecret -- name: Ensure the default e-mail domain is ansible.com. +- name: Ensure the default e-mail domain is ansible.com community.general.ipa_config: ipadefaultemaildomain: ansible.com ipa_host: localhost ipa_user: admin ipa_pass: supersecret + +- name: Ensure the default primary group is set to ipausers + community.general.ipa_config: + ipadefaultprimarygroup: ipausers + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the group search fields are set to 'cn,description' + community.general.ipa_config: + ipagroupsearchfields: ['cn', 'description'] + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the home directory location is set to /home + community.general.ipa_config: + ipahomesrootdir: /home + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the default types of PAC supported for services is set to MS-PAC and PAD + community.general.ipa_config: + ipakrbauthzdata: ["MS-PAC", "PAD"] + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the maximum user name length is set to 32 + community.general.ipa_config: + ipamaxusernamelength: 32 + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the password expiration notice is set to 4 days + community.general.ipa_config: + ipapwdexpadvnotify: 4 + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the search record limit is set to 100 + community.general.ipa_config: + ipasearchrecordslimit: 100 + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the search time limit is set to 2 seconds + community.general.ipa_config: + ipasearchtimelimit: 2 + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the default user auth type is password + community.general.ipa_config: + ipauserauthtype: ['password'] + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the user search fields is set to 'uid,givenname,sn,ou,title' + community.general.ipa_config: + ipausersearchfields: ['uid', 'givenname', 'sn', 'ou', 'title'] + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret ''' RETURN = r''' @@ -68,12 +208,40 @@ class ConfigIPAClient(IPAClient): return self._post_json(method='config_mod', name=name, item=item) -def get_config_dict(ipadefaultloginshell=None, ipadefaultemaildomain=None): +def get_config_dict(ipaconfigstring=None, ipadefaultloginshell=None, + ipadefaultemaildomain=None, ipadefaultprimarygroup=None, + ipagroupsearchfields=None, ipahomesrootdir=None, + ipakrbauthzdata=None, ipamaxusernamelength=None, + ipapwdexpadvnotify=None, ipasearchrecordslimit=None, + ipasearchtimelimit=None, ipauserauthtype=None, + ipausersearchfields=None): config = {} + if ipaconfigstring is not None: + config['ipaconfigstring'] = ipaconfigstring if ipadefaultloginshell is not None: config['ipadefaultloginshell'] = ipadefaultloginshell if ipadefaultemaildomain is not None: config['ipadefaultemaildomain'] = ipadefaultemaildomain + if ipadefaultprimarygroup is not None: + config['ipadefaultprimarygroup'] = ipadefaultprimarygroup + if ipagroupsearchfields is not None: + config['ipagroupsearchfields'] = ','.join(ipagroupsearchfields) + if ipahomesrootdir is not None: + config['ipahomesrootdir'] = ipahomesrootdir + if ipakrbauthzdata is not None: + config['ipakrbauthzdata'] = ipakrbauthzdata + if ipamaxusernamelength is not None: + config['ipamaxusernamelength'] = str(ipamaxusernamelength) + if ipapwdexpadvnotify is not None: + config['ipapwdexpadvnotify'] = str(ipapwdexpadvnotify) + if ipasearchrecordslimit is not None: + config['ipasearchrecordslimit'] = str(ipasearchrecordslimit) + if ipasearchtimelimit is not None: + config['ipasearchtimelimit'] = str(ipasearchtimelimit) + if ipauserauthtype is not None: + config['ipauserauthtype'] = ipauserauthtype + if ipausersearchfields is not None: + config['ipausersearchfields'] = ','.join(ipausersearchfields) return config @@ -84,8 +252,19 @@ def get_config_diff(client, ipa_config, module_config): def ensure(module, client): module_config = get_config_dict( + ipaconfigstring=module.params.get('ipaconfigstring'), ipadefaultloginshell=module.params.get('ipadefaultloginshell'), ipadefaultemaildomain=module.params.get('ipadefaultemaildomain'), + ipadefaultprimarygroup=module.params.get('ipadefaultprimarygroup'), + ipagroupsearchfields=module.params.get('ipagroupsearchfields'), + ipahomesrootdir=module.params.get('ipahomesrootdir'), + ipakrbauthzdata=module.params.get('ipakrbauthzdata'), + ipamaxusernamelength=module.params.get('ipamaxusernamelength'), + ipapwdexpadvnotify=module.params.get('ipapwdexpadvnotify'), + ipasearchrecordslimit=module.params.get('ipasearchrecordslimit'), + ipasearchtimelimit=module.params.get('ipasearchtimelimit'), + ipauserauthtype=module.params.get('ipauserauthtype'), + ipausersearchfields=module.params.get('ipausersearchfields'), ) ipa_config = client.config_show() diff = get_config_diff(client, ipa_config, module_config) @@ -106,8 +285,31 @@ def ensure(module, client): def main(): argument_spec = ipa_argument_spec() argument_spec.update( + ipaconfigstring=dict(type='list', elements='str', + choices=['AllowNThash', + 'KDC:Disable Last Success', + 'KDC:Disable Lockout', + 'KDC:Disable Default Preauth for SPNs'], + aliases=['configstring']), ipadefaultloginshell=dict(type='str', aliases=['loginshell']), ipadefaultemaildomain=dict(type='str', aliases=['emaildomain']), + ipadefaultprimarygroup=dict(type='str', aliases=['primarygroup']), + ipagroupsearchfields=dict(type='list', elements='str', + aliases=['groupsearchfields']), + ipahomesrootdir=dict(type='str', aliases=['homesrootdir']), + ipakrbauthzdata=dict(type='list', elements='str', + choices=['MS-PAC', 'PAD', 'nfs:NONE'], + aliases=['krbauthzdata']), + ipamaxusernamelength=dict(type='int', aliases=['maxusernamelength']), + ipapwdexpadvnotify=dict(type='int', aliases=['pwdexpadvnotify']), + ipasearchrecordslimit=dict(type='int', aliases=['searchrecordslimit']), + ipasearchtimelimit=dict(type='int', aliases=['searchtimelimit']), + ipauserauthtype=dict(type='list', elements='str', + aliases=['userauthtype'], + choices=["password", "radius", "otp", "pkinit", + "hardened", "disabled"]), + ipausersearchfields=dict(type='list', elements='str', + aliases=['usersearchfields']), ) module = AnsibleModule( From fa13826273eb38e8ed330f6b417744162364f8b7 Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Sun, 11 Apr 2021 08:28:48 +0430 Subject: [PATCH 0175/3093] npm: Add no_bin_links option (#2146) * Add no-bin-links option to npm * Add changelog * Fix changelog format * Add integration test * Change node package from thelounge to ncp --- .../2146-npm-add_no_bin_links_option.yaml | 3 + plugins/modules/packaging/language/npm.py | 13 +++- .../targets/npm/tasks/no_bin_links.yml | 64 +++++++++++++++++++ tests/integration/targets/npm/tasks/run.yml | 1 + 4 files changed, 80 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2146-npm-add_no_bin_links_option.yaml create mode 100644 tests/integration/targets/npm/tasks/no_bin_links.yml diff --git a/changelogs/fragments/2146-npm-add_no_bin_links_option.yaml b/changelogs/fragments/2146-npm-add_no_bin_links_option.yaml new file mode 100644 index 0000000000..651af80186 --- /dev/null +++ b/changelogs/fragments/2146-npm-add_no_bin_links_option.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - npm - add ``no_bin_links`` option (https://github.com/ansible-collections/community.general/issues/2128). diff --git a/plugins/modules/packaging/language/npm.py b/plugins/modules/packaging/language/npm.py index 8cde63f48c..62121297d7 100644 --- a/plugins/modules/packaging/language/npm.py +++ b/plugins/modules/packaging/language/npm.py @@ -82,6 +82,12 @@ options: type: bool default: no version_added: 2.0.0 + no_bin_links: + description: + - Use the C(--no-bin-links) flag when installing. + type: bool + default: no + version_added: 2.5.0 requirements: - npm installed in bin path (recommended /usr/local/bin) ''' @@ -151,6 +157,7 @@ class Npm(object): self.unsafe_perm = kwargs['unsafe_perm'] self.state = kwargs['state'] self.no_optional = kwargs['no_optional'] + self.no_bin_links = kwargs['no_bin_links'] if kwargs['executable']: self.executable = kwargs['executable'].split(' ') @@ -181,6 +188,8 @@ class Npm(object): cmd.append(self.registry) if self.no_optional: cmd.append('--no-optional') + if self.no_bin_links: + cmd.append('--no-bin-links') # If path is specified, cd into that path and run the command. cwd = None @@ -259,6 +268,7 @@ def main(): unsafe_perm=dict(default=False, type='bool'), ci=dict(default=False, type='bool'), no_optional=dict(default=False, type='bool'), + no_bin_links=dict(default=False, type='bool'), ) arg_spec['global'] = dict(default=False, type='bool') module = AnsibleModule( @@ -278,6 +288,7 @@ def main(): unsafe_perm = module.params['unsafe_perm'] ci = module.params['ci'] no_optional = module.params['no_optional'] + no_bin_links = module.params['no_bin_links'] if not path and not glbl: module.fail_json(msg='path must be specified when not using global') @@ -286,7 +297,7 @@ def main(): npm = Npm(module, name=name, path=path, version=version, glbl=glbl, production=production, executable=executable, registry=registry, ignore_scripts=ignore_scripts, - unsafe_perm=unsafe_perm, state=state, no_optional=no_optional) + unsafe_perm=unsafe_perm, state=state, no_optional=no_optional, no_bin_links=no_bin_links) changed = False if ci: diff --git a/tests/integration/targets/npm/tasks/no_bin_links.yml b/tests/integration/targets/npm/tasks/no_bin_links.yml new file mode 100644 index 0000000000..fdbc88c4eb --- /dev/null +++ b/tests/integration/targets/npm/tasks/no_bin_links.yml @@ -0,0 +1,64 @@ +--- +- name: 'Remove any node modules' + file: + path: '{{ remote_dir }}/node_modules' + state: absent + +- vars: + # sample: node-v8.2.0-linux-x64.tar.xz + node_path: '{{ remote_dir }}/{{ nodejs_path }}/bin' + package: 'ncp' + block: + - shell: npm --version + environment: + PATH: '{{ node_path }}:{{ ansible_env.PATH }}' + register: npm_version + + - debug: + var: npm_version.stdout + + - name: 'Install simple package with no_bin_links disabled' + npm: + path: '{{ remote_dir }}' + executable: '{{ node_path }}/npm' + state: present + name: '{{ package }}' + no_bin_links: false + environment: + PATH: '{{ node_path }}:{{ ansible_env.PATH }}' + register: npm_install_no_bin_links_disabled + + - name: 'Make sure .bin folder has been created' + stat: + path: "{{ remote_dir }}/node_modules/.bin" + register: npm_dotbin_folder_disabled + + - name: 'Remove any node modules' + file: + path: '{{ remote_dir }}/node_modules' + state: absent + + - name: 'Install simple package with no_bin_links enabled' + npm: + path: '{{ remote_dir }}' + executable: '{{ node_path }}/npm' + state: present + name: '{{ package }}' + no_bin_links: true + environment: + PATH: '{{ node_path }}:{{ ansible_env.PATH }}' + register: npm_install_no_bin_links_enabled + + - name: 'Make sure .bin folder has not been created' + stat: + path: "{{ remote_dir }}/node_modules/.bin" + register: npm_dotbin_folder_enabled + + - assert: + that: + - npm_install_no_bin_links_disabled is success + - npm_install_no_bin_links_disabled is changed + - npm_install_no_bin_links_enabled is success + - npm_install_no_bin_links_enabled is changed + - npm_dotbin_folder_disabled.stat.exists + - not npm_dotbin_folder_enabled.stat.exists diff --git a/tests/integration/targets/npm/tasks/run.yml b/tests/integration/targets/npm/tasks/run.yml index 53b374fa63..c82e7e4e37 100644 --- a/tests/integration/targets/npm/tasks/run.yml +++ b/tests/integration/targets/npm/tasks/run.yml @@ -1,2 +1,3 @@ - include_tasks: setup.yml - include_tasks: test.yml +- include_tasks: no_bin_links.yml From 31645ded1171b68a4af1dbc8ec93ad9f49de823b Mon Sep 17 00:00:00 2001 From: justchris1 <30219018+justchris1@users.noreply.github.com> Date: Sun, 11 Apr 2021 09:25:03 -0400 Subject: [PATCH 0176/3093] Added modules ipa_otpconfig and ipa_otptoken (#2122) * Added module for ipa_otpconfig * Make no_log=False explicit. * Updated inputs to be int type instead of strings to align to expected inputs. Updated output message * Add changelog fragment * Remove changelog fragment as this is a new module * Update plugins/modules/identity/ipa/ipa_otpconfig.py Add version_added field to module description. Co-authored-by: Felix Fontein * Updated punctuation in examples * Add unit test for ipa_otpconfig * Add ipa_otptoken module with unit test * Updated documentation in unit test * Update plugins/modules/identity/ipa/ipa_otpconfig.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/ipa/ipa_otpconfig.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/ipa/ipa_otptoken.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/ipa/ipa_otptoken.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/ipa/ipa_otptoken.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/ipa/ipa_otptoken.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/ipa/ipa_otptoken.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/ipa/ipa_otptoken.py Co-authored-by: Felix Fontein * Added some documentation updates to make it conform to ansible standards * Update plugins/modules/identity/ipa/ipa_otptoken.py Co-authored-by: Felix Fontein * Address review comments Co-authored-by: Chris Costa Co-authored-by: Felix Fontein --- plugins/module_utils/ipa.py | 4 +- plugins/modules/identity/ipa/ipa_otpconfig.py | 172 ++++++ plugins/modules/identity/ipa/ipa_otptoken.py | 527 ++++++++++++++++++ plugins/modules/ipa_otpconfig.py | 1 + plugins/modules/ipa_otptoken.py | 1 + .../identity/ipa/test_ipa_otpconfig.py | 406 ++++++++++++++ .../modules/identity/ipa/test_ipa_otptoken.py | 495 ++++++++++++++++ 7 files changed, 1604 insertions(+), 2 deletions(-) create mode 100644 plugins/modules/identity/ipa/ipa_otpconfig.py create mode 100644 plugins/modules/identity/ipa/ipa_otptoken.py create mode 120000 plugins/modules/ipa_otpconfig.py create mode 120000 plugins/modules/ipa_otptoken.py create mode 100644 tests/unit/plugins/modules/identity/ipa/test_ipa_otpconfig.py create mode 100644 tests/unit/plugins/modules/identity/ipa/test_ipa_otptoken.py diff --git a/plugins/module_utils/ipa.py b/plugins/module_utils/ipa.py index 9eb9f406f6..b2b1a892cd 100644 --- a/plugins/module_utils/ipa.py +++ b/plugins/module_utils/ipa.py @@ -119,9 +119,9 @@ class IPAClient(object): data = dict(method=method) # TODO: We should probably handle this a little better. - if method in ('ping', 'config_show'): + if method in ('ping', 'config_show', 'otpconfig_show'): data['params'] = [[], {}] - elif method == 'config_mod': + elif method in ('config_mod', 'otpconfig_mod'): data['params'] = [[], item] else: data['params'] = [[name], item] diff --git a/plugins/modules/identity/ipa/ipa_otpconfig.py b/plugins/modules/identity/ipa/ipa_otpconfig.py new file mode 100644 index 0000000000..84a9e969cb --- /dev/null +++ b/plugins/modules/identity/ipa/ipa_otpconfig.py @@ -0,0 +1,172 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2021, Ansible Project +# Heavily influenced from Fran Fitzpatrick ipa_config module +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ipa_otpconfig +author: justchris1 (@justchris1) +short_description: Manage FreeIPA OTP Configuration Settings +version_added: 2.5.0 +description: +- Modify global configuration settings of a FreeIPA Server with respect to OTP (One Time Passwords). +options: + ipatokentotpauthwindow: + description: TOTP authentication window in seconds. + aliases: ["totpauthwindow"] + type: int + ipatokentotpsyncwindow: + description: TOTP synchronization window in seconds. + aliases: ["totpsyncwindow"] + type: int + ipatokenhotpauthwindow: + description: HOTP authentication window in number of hops. + aliases: ["hotpauthwindow"] + type: int + ipatokenhotpsyncwindow: + description: HOTP synchronization window in hops. + aliases: ["hotpsyncwindow"] + type: int +extends_documentation_fragment: +- community.general.ipa.documentation + +''' + +EXAMPLES = r''' +- name: Ensure the TOTP authentication window is set to 300 seconds + community.general.ipa_otpconfig: + ipatokentotpauthwindow: '300' + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the TOTP syncronization window is set to 86400 seconds + community.general.ipa_otpconfig: + ipatokentotpsyncwindow: '86400' + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the HOTP authentication window is set to 10 hops + community.general.ipa_otpconfig: + ipatokenhotpauthwindow: '10' + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret + +- name: Ensure the HOTP syncronization window is set to 100 hops + community.general.ipa_otpconfig: + ipatokenhotpsyncwindow: '100' + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret +''' + +RETURN = r''' +otpconfig: + description: OTP configuration as returned by IPA API. + returned: always + type: dict +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils._text import to_native + + +class OTPConfigIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(OTPConfigIPAClient, self).__init__(module, host, port, protocol) + + def otpconfig_show(self): + return self._post_json(method='otpconfig_show', name=None) + + def otpconfig_mod(self, name, item): + return self._post_json(method='otpconfig_mod', name=name, item=item) + + +def get_otpconfig_dict(ipatokentotpauthwindow=None, ipatokentotpsyncwindow=None, + ipatokenhotpauthwindow=None, ipatokenhotpsyncwindow=None): + + config = {} + if ipatokentotpauthwindow is not None: + config['ipatokentotpauthwindow'] = str(ipatokentotpauthwindow) + if ipatokentotpsyncwindow is not None: + config['ipatokentotpsyncwindow'] = str(ipatokentotpsyncwindow) + if ipatokenhotpauthwindow is not None: + config['ipatokenhotpauthwindow'] = str(ipatokenhotpauthwindow) + if ipatokenhotpsyncwindow is not None: + config['ipatokenhotpsyncwindow'] = str(ipatokenhotpsyncwindow) + + return config + + +def get_otpconfig_diff(client, ipa_config, module_config): + return client.get_diff(ipa_data=ipa_config, module_data=module_config) + + +def ensure(module, client): + module_otpconfig = get_otpconfig_dict( + ipatokentotpauthwindow=module.params.get('ipatokentotpauthwindow'), + ipatokentotpsyncwindow=module.params.get('ipatokentotpsyncwindow'), + ipatokenhotpauthwindow=module.params.get('ipatokenhotpauthwindow'), + ipatokenhotpsyncwindow=module.params.get('ipatokenhotpsyncwindow'), + ) + ipa_otpconfig = client.otpconfig_show() + diff = get_otpconfig_diff(client, ipa_otpconfig, module_otpconfig) + + changed = False + new_otpconfig = {} + for module_key in diff: + if module_otpconfig.get(module_key) != ipa_otpconfig.get(module_key, None): + changed = True + new_otpconfig.update({module_key: module_otpconfig.get(module_key)}) + + if changed and not module.check_mode: + client.otpconfig_mod(name=None, item=new_otpconfig) + + return changed, client.otpconfig_show() + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update( + ipatokentotpauthwindow=dict(type='int', aliases=['totpauthwindow'], no_log=False), + ipatokentotpsyncwindow=dict(type='int', aliases=['totpsyncwindow'], no_log=False), + ipatokenhotpauthwindow=dict(type='int', aliases=['hotpauthwindow'], no_log=False), + ipatokenhotpsyncwindow=dict(type='int', aliases=['hotpsyncwindow'], no_log=False), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + client = OTPConfigIPAClient( + module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot'] + ) + + try: + client.login( + username=module.params['ipa_user'], + password=module.params['ipa_pass'] + ) + changed, otpconfig = ensure(module, client) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + module.exit_json(changed=changed, otpconfig=otpconfig) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/identity/ipa/ipa_otptoken.py b/plugins/modules/identity/ipa/ipa_otptoken.py new file mode 100644 index 0000000000..f8f48d68a6 --- /dev/null +++ b/plugins/modules/identity/ipa/ipa_otptoken.py @@ -0,0 +1,527 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: ipa_otptoken +author: justchris1 (@justchris1) +short_description: Manage FreeIPA OTPs +version_added: 2.5.0 +description: +- Add, modify, and delete One Time Passwords in IPA. +options: + uniqueid: + description: Unique ID of the token in IPA. + required: true + aliases: ["name"] + type: str + newuniqueid: + description: If specified, the unique id specified will be changed to this. + type: str + otptype: + description: + - Type of OTP. + - "B(Note:) Cannot be modified after OTP is created." + type: str + choices: [ totp, hotp ] + secretkey: + description: + - Token secret (Base64). + - If OTP is created and this is not specified, a random secret will be generated by IPA. + - "B(Note:) Cannot be modified after OTP is created." + type: str + description: + description: Description of the token (informational only). + type: str + owner: + description: Assigned user of the token. + type: str + enabled: + description: Mark the token as enabled (default C(true)). + default: true + type: bool + notbefore: + description: + - First date/time the token can be used. + - In the format C(YYYYMMddHHmmss). + - For example, C(20180121182022) will allow the token to be used starting on 21 January 2018 at 18:20:22. + type: str + notafter: + description: + - Last date/time the token can be used. + - In the format C(YYYYMMddHHmmss). + - For example, C(20200121182022) will allow the token to be used until 21 January 2020 at 18:20:22. + type: str + vendor: + description: Token vendor name (informational only). + type: str + model: + description: Token model (informational only). + type: str + serial: + description: Token serial (informational only). + type: str + state: + description: State to ensure. + choices: ['present', 'absent'] + default: 'present' + type: str + algorithm: + description: + - Token hash algorithm. + - "B(Note:) Cannot be modified after OTP is created." + choices: ['sha1', 'sha256', 'sha384', 'sha512'] + type: str + digits: + description: + - Number of digits each token code will have. + - "B(Note:) Cannot be modified after OTP is created." + choices: [ 6, 8 ] + type: int + offset: + description: + - TOTP token / IPA server time difference. + - "B(Note:) Cannot be modified after OTP is created." + type: int + interval: + description: + - Length of TOTP token code validity in seconds. + - "B(Note:) Cannot be modified after OTP is created." + type: int + counter: + description: + - Initial counter for the HOTP token. + - "B(Note:) Cannot be modified after OTP is created." + type: int +extends_documentation_fragment: +- community.general.ipa.documentation +''' + +EXAMPLES = r''' +- name: Create a totp for pinky, allowing the IPA server to generate using defaults + community.general.ipa_otptoken: + uniqueid: Token123 + otptype: totp + owner: pinky + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Create a 8 digit hotp for pinky with sha256 with specified validity times + community.general.ipa_otptoken: + uniqueid: Token123 + enabled: true + otptype: hotp + digits: 8 + secretkey: UMKSIER00zT2T2tWMUlTRmNlekRCbFQvWFBVZUh2dElHWGR6T3VUR3IzK2xjaFk9 + algorithm: sha256 + notbefore: 20180121182123 + notafter: 20220121182123 + owner: pinky + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Update Token123 to indicate a vendor, model, serial number (info only), and description + community.general.ipa_otptoken: + uniqueid: Token123 + vendor: Acme + model: acme101 + serial: SerialNumber1 + description: Acme OTP device + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Disable Token123 + community.general.ipa_otptoken: + uniqueid: Token123 + enabled: false + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Rename Token123 to TokenABC and enable it + community.general.ipa_otptoken: + uniqueid: Token123 + newuniqueid: TokenABC + enabled: true + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +''' + +RETURN = r''' +otptoken: + description: OTP Token as returned by IPA API + returned: always + type: dict +''' + +import base64 +import traceback + +from ansible.module_utils.basic import AnsibleModule, sanitize_keys +from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec +from ansible.module_utils._text import to_native + + +class OTPTokenIPAClient(IPAClient): + def __init__(self, module, host, port, protocol): + super(OTPTokenIPAClient, self).__init__(module, host, port, protocol) + + def otptoken_find(self, name): + return self._post_json(method='otptoken_find', name=None, item={'all': True, + 'ipatokenuniqueid': name, + 'timelimit': '0', + 'sizelimit': '0'}) + + def otptoken_add(self, name, item): + return self._post_json(method='otptoken_add', name=name, item=item) + + def otptoken_mod(self, name, item): + return self._post_json(method='otptoken_mod', name=name, item=item) + + def otptoken_del(self, name): + return self._post_json(method='otptoken_del', name=name) + + +def base64_to_base32(base64_string): + """Converts base64 string to base32 string""" + b32_string = base64.b32encode(base64.b64decode(base64_string)).decode('ascii') + return b32_string + + +def base32_to_base64(base32_string): + """Converts base32 string to base64 string""" + b64_string = base64.b64encode(base64.b32decode(base32_string)).decode('ascii') + return b64_string + + +def get_otptoken_dict(ansible_to_ipa, uniqueid=None, newuniqueid=None, otptype=None, secretkey=None, description=None, owner=None, + enabled=None, notbefore=None, notafter=None, vendor=None, + model=None, serial=None, algorithm=None, digits=None, offset=None, + interval=None, counter=None): + """Create the dictionary of settings passed in""" + + otptoken = {} + if uniqueid is not None: + otptoken[ansible_to_ipa['uniqueid']] = uniqueid + if newuniqueid is not None: + otptoken[ansible_to_ipa['newuniqueid']] = newuniqueid + if otptype is not None: + otptoken[ansible_to_ipa['otptype']] = otptype.upper() + if secretkey is not None: + # For some unknown reason, while IPA returns the secret in base64, + # it wants the secret passed in as base32. This makes it more difficult + # for comparison (does 'current' equal to 'new'). Moreover, this may + # cause some subtle issue in a playbook as the output is encoded + # in a different way than if it was passed in as a parameter. For + # these reasons, have the module standardize on base64 input (as parameter) + # and output (from IPA). + otptoken[ansible_to_ipa['secretkey']] = base64_to_base32(secretkey) + if description is not None: + otptoken[ansible_to_ipa['description']] = description + if owner is not None: + otptoken[ansible_to_ipa['owner']] = owner + if enabled is not None: + otptoken[ansible_to_ipa['enabled']] = 'FALSE' if enabled else 'TRUE' + if notbefore is not None: + otptoken[ansible_to_ipa['notbefore']] = notbefore + 'Z' + if notafter is not None: + otptoken[ansible_to_ipa['notafter']] = notafter + 'Z' + if vendor is not None: + otptoken[ansible_to_ipa['vendor']] = vendor + if model is not None: + otptoken[ansible_to_ipa['model']] = model + if serial is not None: + otptoken[ansible_to_ipa['serial']] = serial + if algorithm is not None: + otptoken[ansible_to_ipa['algorithm']] = algorithm + if digits is not None: + otptoken[ansible_to_ipa['digits']] = str(digits) + if offset is not None: + otptoken[ansible_to_ipa['offset']] = str(offset) + if interval is not None: + otptoken[ansible_to_ipa['interval']] = str(interval) + if counter is not None: + otptoken[ansible_to_ipa['counter']] = str(counter) + + return otptoken + + +def transform_output(ipa_otptoken, ansible_to_ipa, ipa_to_ansible): + """Transform the output received by IPA to a format more friendly + before it is returned to the user. IPA returns even simple + strings as a list of strings. It also returns bools and + int as string. This function cleans that up before return. + """ + updated_otptoken = ipa_otptoken + + # Used to hold values that will be sanitized from output as no_log. + # For the case where secretkey is not specified at the module, but + # is passed back from IPA. + sanitize_strings = set() + + # Rename the IPA parameters to the more friendly ansible module names for them + for ipa_parameter in ipa_to_ansible: + if ipa_parameter in ipa_otptoken: + updated_otptoken[ipa_to_ansible[ipa_parameter]] = ipa_otptoken[ipa_parameter] + updated_otptoken.pop(ipa_parameter) + + # Change the type from IPA's list of string to the appropriate return value type + # based on field. By default, assume they should be strings. + for ansible_parameter in ansible_to_ipa: + if ansible_parameter in updated_otptoken: + if isinstance(updated_otptoken[ansible_parameter], list) and len(updated_otptoken[ansible_parameter]) == 1: + if ansible_parameter in ['digits', 'offset', 'interval', 'counter']: + updated_otptoken[ansible_parameter] = int(updated_otptoken[ansible_parameter][0]) + elif ansible_parameter == 'enabled': + updated_otptoken[ansible_parameter] = bool(updated_otptoken[ansible_parameter][0]) + else: + updated_otptoken[ansible_parameter] = updated_otptoken[ansible_parameter][0] + + if 'secretkey' in updated_otptoken: + if isinstance(updated_otptoken['secretkey'], dict): + if '__base64__' in updated_otptoken['secretkey']: + sanitize_strings.add(updated_otptoken['secretkey']['__base64__']) + b64key = updated_otptoken['secretkey']['__base64__'] + updated_otptoken.pop('secretkey') + updated_otptoken['secretkey'] = b64key + sanitize_strings.add(b64key) + elif '__base32__' in updated_otptoken['secretkey']: + sanitize_strings.add(updated_otptoken['secretkey']['__base32__']) + b32key = updated_otptoken['secretkey']['__base32__'] + b64key = base32_to_base64(b32key) + updated_otptoken.pop('secretkey') + updated_otptoken['secretkey'] = b64key + sanitize_strings.add(b32key) + sanitize_strings.add(b64key) + + return updated_otptoken, sanitize_strings + + +def validate_modifications(ansible_to_ipa, module, ipa_otptoken, + module_otptoken, unmodifiable_after_creation): + """Checks to see if the requested modifications are valid. Some elements + cannot be modified after initial creation. However, we still want to + validate arguments that are specified, but are not different than what + is currently set on the server. + """ + + modifications_valid = True + + for parameter in unmodifiable_after_creation: + if ansible_to_ipa[parameter] in module_otptoken and ansible_to_ipa[parameter] in ipa_otptoken: + mod_value = module_otptoken[ansible_to_ipa[parameter]] + + # For someone unknown reason, the returns from IPA put almost all + # values in a list, even though passing them in a list (even of + # length 1) will be rejected. The module values for all elements + # other than type (totp or hotp) have this happen. + if parameter == 'otptype': + ipa_value = ipa_otptoken[ansible_to_ipa[parameter]] + else: + if len(ipa_otptoken[ansible_to_ipa[parameter]]) != 1: + module.fail_json(msg=("Invariant fail: Return value from IPA is not a list " + + "of length 1. Please open a bug report for the module.")) + if parameter == 'secretkey': + # We stored the secret key in base32 since we had assumed that would need to + # be the format if we were contacting IPA to create it. However, we are + # now comparing it against what is already set in the IPA server, so convert + # back to base64 for comparison. + mod_value = base32_to_base64(mod_value) + + # For the secret key, it is even more specific in that the key is returned + # in a dict, in the list, as the __base64__ entry for the IPA response. + ipa_value = ipa_otptoken[ansible_to_ipa[parameter]][0]['__base64__'] + if '__base64__' in ipa_otptoken[ansible_to_ipa[parameter]][0]: + ipa_value = ipa_otptoken[ansible_to_ipa[parameter]][0]['__base64__'] + elif '__base32__' in ipa_otptoken[ansible_to_ipa[parameter]][0]: + b32key = ipa_otptoken[ansible_to_ipa[parameter]][0]['__base32__'] + b64key = base32_to_base64(b32key) + ipa_value = b64key + else: + ipa_value = None + else: + ipa_value = ipa_otptoken[ansible_to_ipa[parameter]][0] + + if mod_value != ipa_value: + modifications_valid = False + fail_message = ("Parameter '" + parameter + "' cannot be changed once " + + "the OTP is created and the requested value specified here (" + + str(mod_value) + + ") differs from what is set in the IPA server (" + + str(ipa_value) + ")") + module.fail_json(msg=fail_message) + + return modifications_valid + + +def ensure(module, client): + # dict to map from ansible parameter names to attribute names + # used by IPA (which are not so friendly). + ansible_to_ipa = {'uniqueid': 'ipatokenuniqueid', + 'newuniqueid': 'rename', + 'otptype': 'type', + 'secretkey': 'ipatokenotpkey', + 'description': 'description', + 'owner': 'ipatokenowner', + 'enabled': 'ipatokendisabled', + 'notbefore': 'ipatokennotbefore', + 'notafter': 'ipatokennotafter', + 'vendor': 'ipatokenvendor', + 'model': 'ipatokenmodel', + 'serial': 'ipatokenserial', + 'algorithm': 'ipatokenotpalgorithm', + 'digits': 'ipatokenotpdigits', + 'offset': 'ipatokentotpclockoffset', + 'interval': 'ipatokentotptimestep', + 'counter': 'ipatokenhotpcounter'} + + # Create inverse dictionary for mapping return values + ipa_to_ansible = {} + for (k, v) in ansible_to_ipa.items(): + ipa_to_ansible[v] = k + + unmodifiable_after_creation = ['otptype', 'secretkey', 'algorithm', + 'digits', 'offset', 'interval', 'counter'] + state = module.params['state'] + uniqueid = module.params['uniqueid'] + + module_otptoken = get_otptoken_dict(ansible_to_ipa=ansible_to_ipa, + uniqueid=module.params.get('uniqueid'), + newuniqueid=module.params.get('newuniqueid'), + otptype=module.params.get('otptype'), + secretkey=module.params.get('secretkey'), + description=module.params.get('description'), + owner=module.params.get('owner'), + enabled=module.params.get('enabled'), + notbefore=module.params.get('notbefore'), + notafter=module.params.get('notafter'), + vendor=module.params.get('vendor'), + model=module.params.get('model'), + serial=module.params.get('serial'), + algorithm=module.params.get('algorithm'), + digits=module.params.get('digits'), + offset=module.params.get('offset'), + interval=module.params.get('interval'), + counter=module.params.get('counter')) + + ipa_otptoken = client.otptoken_find(name=uniqueid) + + if ansible_to_ipa['newuniqueid'] in module_otptoken: + # Check to see if the new unique id is already taken in use + ipa_otptoken_new = client.otptoken_find(name=module_otptoken[ansible_to_ipa['newuniqueid']]) + if ipa_otptoken_new: + module.fail_json(msg=("Requested rename through newuniqueid to " + + module_otptoken[ansible_to_ipa['newuniqueid']] + + " failed because the new unique id is already in use")) + + changed = False + if state == 'present': + if not ipa_otptoken: + changed = True + if not module.check_mode: + # It would not make sense to have a rename after creation, so if the user + # specified a newuniqueid, just replace the uniqueid with the updated one + # before creation + if ansible_to_ipa['newuniqueid'] in module_otptoken: + module_otptoken[ansible_to_ipa['uniqueid']] = module_otptoken[ansible_to_ipa['newuniqueid']] + uniqueid = module_otptoken[ansible_to_ipa['newuniqueid']] + module_otptoken.pop(ansible_to_ipa['newuniqueid']) + + # IPA wants the unique id in the first position and not as a key/value pair. + # Get rid of it from the otptoken dict and just specify it in the name field + # for otptoken_add. + if ansible_to_ipa['uniqueid'] in module_otptoken: + module_otptoken.pop(ansible_to_ipa['uniqueid']) + + module_otptoken['all'] = True + ipa_otptoken = client.otptoken_add(name=uniqueid, item=module_otptoken) + else: + if not(validate_modifications(ansible_to_ipa, module, ipa_otptoken, + module_otptoken, unmodifiable_after_creation)): + module.fail_json(msg="Modifications requested in module are not valid") + + # IPA will reject 'modifications' that do not actually modify anything + # if any of the unmodifiable elements are specified. Explicitly + # get rid of them here. They were not different or else the + # we would have failed out in validate_modifications. + for x in unmodifiable_after_creation: + if ansible_to_ipa[x] in module_otptoken: + module_otptoken.pop(ansible_to_ipa[x]) + + diff = client.get_diff(ipa_data=ipa_otptoken, module_data=module_otptoken) + if len(diff) > 0: + changed = True + if not module.check_mode: + + # IPA wants the unique id in the first position and not as a key/value pair. + # Get rid of it from the otptoken dict and just specify it in the name field + # for otptoken_mod. + if ansible_to_ipa['uniqueid'] in module_otptoken: + module_otptoken.pop(ansible_to_ipa['uniqueid']) + + module_otptoken['all'] = True + ipa_otptoken = client.otptoken_mod(name=uniqueid, item=module_otptoken) + else: + if ipa_otptoken: + changed = True + if not module.check_mode: + client.otptoken_del(name=uniqueid) + + # Transform the output to use ansible keywords (not the IPA keywords) and + # sanitize any key values in the output. + ipa_otptoken, sanitize_strings = transform_output(ipa_otptoken, ansible_to_ipa, ipa_to_ansible) + module.no_log_values = module.no_log_values.union(sanitize_strings) + sanitized_otptoken = sanitize_keys(obj=ipa_otptoken, no_log_strings=module.no_log_values) + return changed, sanitized_otptoken + + +def main(): + argument_spec = ipa_argument_spec() + argument_spec.update(uniqueid=dict(type='str', aliases=['name'], required=True), + newuniqueid=dict(type='str'), + otptype=dict(type='str', choices=['totp', 'hotp']), + secretkey=dict(type='str', no_log=True), + description=dict(type='str'), + owner=dict(type='str'), + enabled=dict(type='bool', default=True), + notbefore=dict(type='str'), + notafter=dict(type='str'), + vendor=dict(type='str'), + model=dict(type='str'), + serial=dict(type='str'), + state=dict(type='str', choices=['present', 'absent'], default='present'), + algorithm=dict(type='str', choices=['sha1', 'sha256', 'sha384', 'sha512']), + digits=dict(type='int', choices=[6, 8]), + offset=dict(type='int'), + interval=dict(type='int'), + counter=dict(type='int')) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + client = OTPTokenIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, otptoken = ensure(module, client) + except Exception as e: + module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + + module.exit_json(changed=changed, otptoken=otptoken) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ipa_otpconfig.py b/plugins/modules/ipa_otpconfig.py new file mode 120000 index 0000000000..a29ef04412 --- /dev/null +++ b/plugins/modules/ipa_otpconfig.py @@ -0,0 +1 @@ +./identity/ipa/ipa_otpconfig.py \ No newline at end of file diff --git a/plugins/modules/ipa_otptoken.py b/plugins/modules/ipa_otptoken.py new file mode 120000 index 0000000000..cb0dbaf569 --- /dev/null +++ b/plugins/modules/ipa_otptoken.py @@ -0,0 +1 @@ +./identity/ipa/ipa_otptoken.py \ No newline at end of file diff --git a/tests/unit/plugins/modules/identity/ipa/test_ipa_otpconfig.py b/tests/unit/plugins/modules/identity/ipa/test_ipa_otpconfig.py new file mode 100644 index 0000000000..cae905942a --- /dev/null +++ b/tests/unit/plugins/modules/identity/ipa/test_ipa_otpconfig.py @@ -0,0 +1,406 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2020, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from contextlib import contextmanager + +from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.general.tests.unit.compat.mock import call, patch +from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args + +from ansible_collections.community.general.plugins.modules.identity.ipa import ipa_otpconfig + + +@contextmanager +def patch_ipa(**kwargs): + """Mock context manager for patching the methods in OTPConfigIPAClient that contact the IPA server + + Patches the `login` and `_post_json` methods + + Keyword arguments are passed to the mock object that patches `_post_json` + + No arguments are passed to the mock object that patches `login` because no tests require it + + Example:: + + with patch_ipa(return_value={}) as (mock_login, mock_post): + ... + """ + obj = ipa_otpconfig.OTPConfigIPAClient + with patch.object(obj, 'login') as mock_login: + with patch.object(obj, '_post_json', **kwargs) as mock_post: + yield mock_login, mock_post + + +class TestIPAOTPConfig(ModuleTestCase): + def setUp(self): + super(TestIPAOTPConfig, self).setUp() + self.module = ipa_otpconfig + + def _test_base(self, module_args, return_value, mock_calls, changed): + """Base function that's called by all the other test functions + + module_args (dict): + Arguments passed to the module + + return_value (dict): + Mocked return value of OTPConfigIPAClient.otpconfig_show, as returned by the IPA API. + This should be set to the current state. It will be changed to the desired state using the above arguments. + (Technically, this is the return value of _post_json, but it's only checked by otpconfig_show). + + mock_calls (list/tuple of dicts): + List of calls made to OTPConfigIPAClient._post_json, in order. + _post_json is called by all of the otpconfig_* methods of the class. + Pass an empty list if no calls are expected. + + changed (bool): + Whether or not the module is supposed to be marked as changed + """ + set_module_args(module_args) + + # Run the module + with patch_ipa(return_value=return_value) as (mock_login, mock_post): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + # Verify that the calls to _post_json match what is expected + expected_call_count = len(mock_calls) + if expected_call_count > 1: + # Convert the call dicts to unittest.mock.call instances because `assert_has_calls` only accepts them + converted_calls = [] + for call_dict in mock_calls: + converted_calls.append(call(**call_dict)) + + mock_post.assert_has_calls(converted_calls) + self.assertEqual(len(mock_post.mock_calls), expected_call_count) + elif expected_call_count == 1: + mock_post.assert_called_once_with(**mock_calls[0]) + else: # expected_call_count is 0 + mock_post.assert_not_called() + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_set_all_no_adjustment(self): + """Set values requiring no adjustment""" + module_args = { + 'ipatokentotpauthwindow': 11, + 'ipatokentotpsyncwindow': 12, + 'ipatokenhotpauthwindow': 13, + 'ipatokenhotpsyncwindow': 14 + } + return_value = { + 'ipatokentotpauthwindow': ['11'], + 'ipatokentotpsyncwindow': ['12'], + 'ipatokenhotpauthwindow': ['13'], + 'ipatokenhotpsyncwindow': ['14']} + mock_calls = ( + { + 'method': 'otpconfig_show', + 'name': None + }, + { + 'method': 'otpconfig_show', + 'name': None + } + ) + changed = False + + self._test_base(module_args, return_value, mock_calls, changed) + + def test_set_all_aliases_no_adjustment(self): + """Set values requiring no adjustment on all using aliases values""" + module_args = { + 'totpauthwindow': 11, + 'totpsyncwindow': 12, + 'hotpauthwindow': 13, + 'hotpsyncwindow': 14 + } + return_value = { + 'ipatokentotpauthwindow': ['11'], + 'ipatokentotpsyncwindow': ['12'], + 'ipatokenhotpauthwindow': ['13'], + 'ipatokenhotpsyncwindow': ['14']} + mock_calls = ( + { + 'method': 'otpconfig_show', + 'name': None + }, + { + 'method': 'otpconfig_show', + 'name': None + } + ) + changed = False + + self._test_base(module_args, return_value, mock_calls, changed) + + def test_set_totp_auth_window_no_adjustment(self): + """Set values requiring no adjustment on totpauthwindow""" + module_args = { + 'totpauthwindow': 11 + } + return_value = { + 'ipatokentotpauthwindow': ['11'], + 'ipatokentotpsyncwindow': ['12'], + 'ipatokenhotpauthwindow': ['13'], + 'ipatokenhotpsyncwindow': ['14']} + mock_calls = ( + { + 'method': 'otpconfig_show', + 'name': None + }, + { + 'method': 'otpconfig_show', + 'name': None + } + ) + changed = False + + self._test_base(module_args, return_value, mock_calls, changed) + + def test_set_totp_sync_window_no_adjustment(self): + """Set values requiring no adjustment on totpsyncwindow""" + module_args = { + 'totpsyncwindow': 12 + } + return_value = { + 'ipatokentotpauthwindow': ['11'], + 'ipatokentotpsyncwindow': ['12'], + 'ipatokenhotpauthwindow': ['13'], + 'ipatokenhotpsyncwindow': ['14']} + mock_calls = ( + { + 'method': 'otpconfig_show', + 'name': None + }, + { + 'method': 'otpconfig_show', + 'name': None + } + ) + changed = False + + self._test_base(module_args, return_value, mock_calls, changed) + + def test_set_hotp_auth_window_no_adjustment(self): + """Set values requiring no adjustment on hotpauthwindow""" + module_args = { + 'hotpauthwindow': 13 + } + return_value = { + 'ipatokentotpauthwindow': ['11'], + 'ipatokentotpsyncwindow': ['12'], + 'ipatokenhotpauthwindow': ['13'], + 'ipatokenhotpsyncwindow': ['14']} + mock_calls = ( + { + 'method': 'otpconfig_show', + 'name': None + }, + { + 'method': 'otpconfig_show', + 'name': None + } + ) + changed = False + + self._test_base(module_args, return_value, mock_calls, changed) + + def test_set_hotp_sync_window_no_adjustment(self): + """Set values requiring no adjustment on hotpsyncwindow""" + module_args = { + 'hotpsyncwindow': 14 + } + return_value = { + 'ipatokentotpauthwindow': ['11'], + 'ipatokentotpsyncwindow': ['12'], + 'ipatokenhotpauthwindow': ['13'], + 'ipatokenhotpsyncwindow': ['14']} + mock_calls = ( + { + 'method': 'otpconfig_show', + 'name': None + }, + { + 'method': 'otpconfig_show', + 'name': None + } + ) + changed = False + + self._test_base(module_args, return_value, mock_calls, changed) + + def test_set_totp_auth_window(self): + """Set values requiring adjustment on totpauthwindow""" + module_args = { + 'totpauthwindow': 10 + } + return_value = { + 'ipatokentotpauthwindow': ['11'], + 'ipatokentotpsyncwindow': ['12'], + 'ipatokenhotpauthwindow': ['13'], + 'ipatokenhotpsyncwindow': ['14']} + mock_calls = ( + { + 'method': 'otpconfig_show', + 'name': None + }, + { + 'method': 'otpconfig_mod', + 'name': None, + 'item': {'ipatokentotpauthwindow': '10'} + }, + { + 'method': 'otpconfig_show', + 'name': None + } + ) + changed = True + + self._test_base(module_args, return_value, mock_calls, changed) + + def test_set_totp_sync_window(self): + """Set values requiring adjustment on totpsyncwindow""" + module_args = { + 'totpsyncwindow': 10 + } + return_value = { + 'ipatokentotpauthwindow': ['11'], + 'ipatokentotpsyncwindow': ['12'], + 'ipatokenhotpauthwindow': ['13'], + 'ipatokenhotpsyncwindow': ['14']} + mock_calls = ( + { + 'method': 'otpconfig_show', + 'name': None + }, + { + 'method': 'otpconfig_mod', + 'name': None, + 'item': {'ipatokentotpsyncwindow': '10'} + }, + { + 'method': 'otpconfig_show', + 'name': None + } + ) + changed = True + + self._test_base(module_args, return_value, mock_calls, changed) + + def test_set_hotp_auth_window(self): + """Set values requiring adjustment on hotpauthwindow""" + module_args = { + 'hotpauthwindow': 10 + } + return_value = { + 'ipatokentotpauthwindow': ['11'], + 'ipatokentotpsyncwindow': ['12'], + 'ipatokenhotpauthwindow': ['13'], + 'ipatokenhotpsyncwindow': ['14']} + mock_calls = ( + { + 'method': 'otpconfig_show', + 'name': None + }, + { + 'method': 'otpconfig_mod', + 'name': None, + 'item': {'ipatokenhotpauthwindow': '10'} + }, + { + 'method': 'otpconfig_show', + 'name': None + } + ) + changed = True + + self._test_base(module_args, return_value, mock_calls, changed) + + def test_set_hotp_sync_window(self): + """Set values requiring adjustment on hotpsyncwindow""" + module_args = { + 'hotpsyncwindow': 10 + } + return_value = { + 'ipatokentotpauthwindow': ['11'], + 'ipatokentotpsyncwindow': ['12'], + 'ipatokenhotpauthwindow': ['13'], + 'ipatokenhotpsyncwindow': ['14']} + mock_calls = ( + { + 'method': 'otpconfig_show', + 'name': None + }, + { + 'method': 'otpconfig_mod', + 'name': None, + 'item': {'ipatokenhotpsyncwindow': '10'} + }, + { + 'method': 'otpconfig_show', + 'name': None + } + ) + changed = True + + self._test_base(module_args, return_value, mock_calls, changed) + + def test_set_all(self): + """Set values requiring adjustment on all""" + module_args = { + 'ipatokentotpauthwindow': 11, + 'ipatokentotpsyncwindow': 12, + 'ipatokenhotpauthwindow': 13, + 'ipatokenhotpsyncwindow': 14 + } + return_value = { + 'ipatokentotpauthwindow': ['1'], + 'ipatokentotpsyncwindow': ['2'], + 'ipatokenhotpauthwindow': ['3'], + 'ipatokenhotpsyncwindow': ['4']} + mock_calls = ( + { + 'method': 'otpconfig_show', + 'name': None + }, + { + 'method': 'otpconfig_mod', + 'name': None, + 'item': {'ipatokentotpauthwindow': '11', + 'ipatokentotpsyncwindow': '12', + 'ipatokenhotpauthwindow': '13', + 'ipatokenhotpsyncwindow': '14'} + }, + { + 'method': 'otpconfig_show', + 'name': None + } + ) + changed = True + + self._test_base(module_args, return_value, mock_calls, changed) + + def test_fail_post(self): + """Fail due to an exception raised from _post_json""" + set_module_args({ + 'ipatokentotpauthwindow': 11, + 'ipatokentotpsyncwindow': 12, + 'ipatokenhotpauthwindow': 13, + 'ipatokenhotpsyncwindow': 14 + }) + + with patch_ipa(side_effect=Exception('ERROR MESSAGE')) as (mock_login, mock_post): + with self.assertRaises(AnsibleFailJson) as exec_info: + self.module.main() + + self.assertEqual(exec_info.exception.args[0]['msg'], 'ERROR MESSAGE') + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/unit/plugins/modules/identity/ipa/test_ipa_otptoken.py b/tests/unit/plugins/modules/identity/ipa/test_ipa_otptoken.py new file mode 100644 index 0000000000..ecea5920a0 --- /dev/null +++ b/tests/unit/plugins/modules/identity/ipa/test_ipa_otptoken.py @@ -0,0 +1,495 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2020, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from contextlib import contextmanager + +from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.general.tests.unit.compat.mock import call, patch +from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args + +from ansible_collections.community.general.plugins.modules.identity.ipa import ipa_otptoken + + +@contextmanager +def patch_ipa(**kwargs): + """Mock context manager for patching the methods in OTPTokenIPAClient that contact the IPA server + + Patches the `login` and `_post_json` methods + + Keyword arguments are passed to the mock object that patches `_post_json` + + No arguments are passed to the mock object that patches `login` because no tests require it + + Example:: + + with patch_ipa(return_value={}) as (mock_login, mock_post): + ... + """ + obj = ipa_otptoken.OTPTokenIPAClient + with patch.object(obj, 'login') as mock_login: + with patch.object(obj, '_post_json', **kwargs) as mock_post: + yield mock_login, mock_post + + +class TestIPAOTPToken(ModuleTestCase): + def setUp(self): + super(TestIPAOTPToken, self).setUp() + self.module = ipa_otptoken + + def _test_base(self, module_args, return_value, mock_calls, changed): + """Base function that's called by all the other test functions + + module_args (dict): + Arguments passed to the module + + return_value (dict): + Mocked return value of OTPTokenIPAClient.otptoken_show, as returned by the IPA API. + This should be set to the current state. It will be changed to the desired state using the above arguments. + (Technically, this is the return value of _post_json, but it's only checked by otptoken_show). + + mock_calls (list/tuple of dicts): + List of calls made to OTPTokenIPAClient._post_json, in order. + _post_json is called by all of the otptoken_* methods of the class. + Pass an empty list if no calls are expected. + + changed (bool): + Whether or not the module is supposed to be marked as changed + """ + set_module_args(module_args) + + # Run the module + with patch_ipa(return_value=return_value) as (mock_login, mock_post): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + # Verify that the calls to _post_json match what is expected + expected_call_count = len(mock_calls) + if expected_call_count > 1: + # Convert the call dicts to unittest.mock.call instances because `assert_has_calls` only accepts them + converted_calls = [] + for call_dict in mock_calls: + converted_calls.append(call(**call_dict)) + + mock_post.assert_has_calls(converted_calls) + self.assertEqual(len(mock_post.mock_calls), expected_call_count) + elif expected_call_count == 1: + mock_post.assert_called_once_with(**mock_calls[0]) + else: # expected_call_count is 0 + mock_post.assert_not_called() + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_add_new_all_default(self): + """Add a new OTP with all default values""" + module_args = { + 'uniqueid': 'NewToken1' + } + return_value = {} + mock_calls = ( + { + 'method': 'otptoken_find', + 'name': None, + 'item': {'all': True, + 'ipatokenuniqueid': 'NewToken1', + 'timelimit': '0', + 'sizelimit': '0'} + }, + { + 'method': 'otptoken_add', + 'name': 'NewToken1', + 'item': {'ipatokendisabled': 'FALSE', + 'all': True} + } + ) + changed = True + + self._test_base(module_args, return_value, mock_calls, changed) + + def test_add_new_all_default_with_aliases(self): + """Add a new OTP with all default values using alias values""" + module_args = { + 'name': 'NewToken1' + } + return_value = {} + mock_calls = ( + { + 'method': 'otptoken_find', + 'name': None, + 'item': {'all': True, + 'ipatokenuniqueid': 'NewToken1', + 'timelimit': '0', + 'sizelimit': '0'} + }, + { + 'method': 'otptoken_add', + 'name': 'NewToken1', + 'item': {'ipatokendisabled': 'FALSE', + 'all': True} + } + ) + changed = True + + self._test_base(module_args, return_value, mock_calls, changed) + + def test_add_new_all_specified(self): + """Add a new OTP with all default values""" + module_args = { + 'uniqueid': 'NewToken1', + 'otptype': 'hotp', + 'secretkey': 'VGVzdFNlY3JldDE=', + 'description': 'Test description', + 'owner': 'pinky', + 'enabled': True, + 'notbefore': '20200101010101', + 'notafter': '20900101010101', + 'vendor': 'Acme', + 'model': 'ModelT', + 'serial': 'Number1', + 'state': 'present', + 'algorithm': 'sha256', + 'digits': 6, + 'offset': 10, + 'interval': 30, + 'counter': 30, + } + return_value = {} + mock_calls = ( + { + 'method': 'otptoken_find', + 'name': None, + 'item': {'all': True, + 'ipatokenuniqueid': 'NewToken1', + 'timelimit': '0', + 'sizelimit': '0'} + }, + { + 'method': 'otptoken_add', + 'name': 'NewToken1', + 'item': {'type': 'HOTP', + 'ipatokenotpkey': 'KRSXG5CTMVRXEZLUGE======', + 'description': 'Test description', + 'ipatokenowner': 'pinky', + 'ipatokendisabled': 'FALSE', + 'ipatokennotbefore': '20200101010101Z', + 'ipatokennotafter': '20900101010101Z', + 'ipatokenvendor': 'Acme', + 'ipatokenmodel': 'ModelT', + 'ipatokenserial': 'Number1', + 'ipatokenotpalgorithm': 'sha256', + 'ipatokenotpdigits': '6', + 'ipatokentotpclockoffset': '10', + 'ipatokentotptimestep': '30', + 'ipatokenhotpcounter': '30', + 'all': True} + } + ) + changed = True + + self._test_base(module_args, return_value, mock_calls, changed) + + def test_already_existing_no_change_all_specified(self): + """Add a new OTP with all values specified but needing no change""" + module_args = { + 'uniqueid': 'NewToken1', + 'otptype': 'hotp', + 'secretkey': 'VGVzdFNlY3JldDE=', + 'description': 'Test description', + 'owner': 'pinky', + 'enabled': True, + 'notbefore': '20200101010101', + 'notafter': '20900101010101', + 'vendor': 'Acme', + 'model': 'ModelT', + 'serial': 'Number1', + 'state': 'present', + 'algorithm': 'sha256', + 'digits': 6, + 'offset': 10, + 'interval': 30, + 'counter': 30, + } + return_value = {'ipatokenuniqueid': 'NewToken1', + 'type': 'HOTP', + 'ipatokenotpkey': [{'__base64__': 'VGVzdFNlY3JldDE='}], + 'description': ['Test description'], + 'ipatokenowner': ['pinky'], + 'ipatokendisabled': ['FALSE'], + 'ipatokennotbefore': ['20200101010101Z'], + 'ipatokennotafter': ['20900101010101Z'], + 'ipatokenvendor': ['Acme'], + 'ipatokenmodel': ['ModelT'], + 'ipatokenserial': ['Number1'], + 'ipatokenotpalgorithm': ['sha256'], + 'ipatokenotpdigits': ['6'], + 'ipatokentotpclockoffset': ['10'], + 'ipatokentotptimestep': ['30'], + 'ipatokenhotpcounter': ['30']} + mock_calls = [ + { + 'method': 'otptoken_find', + 'name': None, + 'item': {'all': True, + 'ipatokenuniqueid': 'NewToken1', + 'timelimit': '0', + 'sizelimit': '0'} + } + ] + changed = False + + self._test_base(module_args, return_value, mock_calls, changed) + + def test_already_existing_one_change_all_specified(self): + """Modify an existing OTP with one value specified needing change""" + module_args = { + 'uniqueid': 'NewToken1', + 'otptype': 'hotp', + 'secretkey': 'VGVzdFNlY3JldDE=', + 'description': 'Test description', + 'owner': 'brain', + 'enabled': True, + 'notbefore': '20200101010101', + 'notafter': '20900101010101', + 'vendor': 'Acme', + 'model': 'ModelT', + 'serial': 'Number1', + 'state': 'present', + 'algorithm': 'sha256', + 'digits': 6, + 'offset': 10, + 'interval': 30, + 'counter': 30, + } + return_value = {'ipatokenuniqueid': 'NewToken1', + 'type': 'HOTP', + 'ipatokenotpkey': [{'__base64__': 'VGVzdFNlY3JldDE='}], + 'description': ['Test description'], + 'ipatokenowner': ['pinky'], + 'ipatokendisabled': ['FALSE'], + 'ipatokennotbefore': ['20200101010101Z'], + 'ipatokennotafter': ['20900101010101Z'], + 'ipatokenvendor': ['Acme'], + 'ipatokenmodel': ['ModelT'], + 'ipatokenserial': ['Number1'], + 'ipatokenotpalgorithm': ['sha256'], + 'ipatokenotpdigits': ['6'], + 'ipatokentotpclockoffset': ['10'], + 'ipatokentotptimestep': ['30'], + 'ipatokenhotpcounter': ['30']} + mock_calls = ( + { + 'method': 'otptoken_find', + 'name': None, + 'item': {'all': True, + 'ipatokenuniqueid': 'NewToken1', + 'timelimit': '0', + 'sizelimit': '0'} + }, + { + 'method': 'otptoken_mod', + 'name': 'NewToken1', + 'item': {'description': 'Test description', + 'ipatokenowner': 'brain', + 'ipatokendisabled': 'FALSE', + 'ipatokennotbefore': '20200101010101Z', + 'ipatokennotafter': '20900101010101Z', + 'ipatokenvendor': 'Acme', + 'ipatokenmodel': 'ModelT', + 'ipatokenserial': 'Number1', + 'all': True} + } + ) + changed = True + + self._test_base(module_args, return_value, mock_calls, changed) + + def test_already_existing_all_valid_change_all_specified(self): + """Modify an existing OTP with all valid values specified needing change""" + module_args = { + 'uniqueid': 'NewToken1', + 'otptype': 'hotp', + 'secretkey': 'VGVzdFNlY3JldDE=', + 'description': 'New Test description', + 'owner': 'pinky', + 'enabled': False, + 'notbefore': '20200101010102', + 'notafter': '20900101010102', + 'vendor': 'NewAcme', + 'model': 'NewModelT', + 'serial': 'Number2', + 'state': 'present', + 'algorithm': 'sha256', + 'digits': 6, + 'offset': 10, + 'interval': 30, + 'counter': 30, + } + return_value = {'ipatokenuniqueid': 'NewToken1', + 'type': 'HOTP', + 'ipatokenotpkey': [{'__base64__': 'VGVzdFNlY3JldDE='}], + 'description': ['Test description'], + 'ipatokenowner': ['pinky'], + 'ipatokendisabled': ['FALSE'], + 'ipatokennotbefore': ['20200101010101Z'], + 'ipatokennotafter': ['20900101010101Z'], + 'ipatokenvendor': ['Acme'], + 'ipatokenmodel': ['ModelT'], + 'ipatokenserial': ['Number1'], + 'ipatokenotpalgorithm': ['sha256'], + 'ipatokenotpdigits': ['6'], + 'ipatokentotpclockoffset': ['10'], + 'ipatokentotptimestep': ['30'], + 'ipatokenhotpcounter': ['30']} + mock_calls = ( + { + 'method': 'otptoken_find', + 'name': None, + 'item': {'all': True, + 'ipatokenuniqueid': 'NewToken1', + 'timelimit': '0', + 'sizelimit': '0'} + }, + { + 'method': 'otptoken_mod', + 'name': 'NewToken1', + 'item': {'description': 'New Test description', + 'ipatokenowner': 'pinky', + 'ipatokendisabled': 'TRUE', + 'ipatokennotbefore': '20200101010102Z', + 'ipatokennotafter': '20900101010102Z', + 'ipatokenvendor': 'NewAcme', + 'ipatokenmodel': 'NewModelT', + 'ipatokenserial': 'Number2', + 'all': True} + } + ) + changed = True + + self._test_base(module_args, return_value, mock_calls, changed) + + def test_delete_existing_token(self): + """Delete an existing OTP""" + module_args = { + 'uniqueid': 'NewToken1', + 'state': 'absent' + } + return_value = {'ipatokenuniqueid': 'NewToken1', + 'type': 'HOTP', + 'ipatokenotpkey': [{'__base64__': 'KRSXG5CTMVRXEZLUGE======'}], + 'description': ['Test description'], + 'ipatokenowner': ['pinky'], + 'ipatokendisabled': ['FALSE'], + 'ipatokennotbefore': ['20200101010101Z'], + 'ipatokennotafter': ['20900101010101Z'], + 'ipatokenvendor': ['Acme'], + 'ipatokenmodel': ['ModelT'], + 'ipatokenserial': ['Number1'], + 'ipatokenotpalgorithm': ['sha256'], + 'ipatokenotpdigits': ['6'], + 'ipatokentotpclockoffset': ['10'], + 'ipatokentotptimestep': ['30'], + 'ipatokenhotpcounter': ['30']} + mock_calls = ( + { + 'method': 'otptoken_find', + 'name': None, + 'item': {'all': True, + 'ipatokenuniqueid': 'NewToken1', + 'timelimit': '0', + 'sizelimit': '0'} + }, + { + 'method': 'otptoken_del', + 'name': 'NewToken1' + } + ) + changed = True + + self._test_base(module_args, return_value, mock_calls, changed) + + def test_disable_existing_token(self): + """Disable an existing OTP""" + module_args = { + 'uniqueid': 'NewToken1', + 'otptype': 'hotp', + 'enabled': False + } + return_value = {'ipatokenuniqueid': 'NewToken1', + 'type': 'HOTP', + 'ipatokenotpkey': [{'__base64__': 'KRSXG5CTMVRXEZLUGE======'}], + 'description': ['Test description'], + 'ipatokenowner': ['pinky'], + 'ipatokendisabled': ['FALSE'], + 'ipatokennotbefore': ['20200101010101Z'], + 'ipatokennotafter': ['20900101010101Z'], + 'ipatokenvendor': ['Acme'], + 'ipatokenmodel': ['ModelT'], + 'ipatokenserial': ['Number1'], + 'ipatokenotpalgorithm': ['sha256'], + 'ipatokenotpdigits': ['6'], + 'ipatokentotpclockoffset': ['10'], + 'ipatokentotptimestep': ['30'], + 'ipatokenhotpcounter': ['30']} + mock_calls = ( + { + 'method': 'otptoken_find', + 'name': None, + 'item': {'all': True, + 'ipatokenuniqueid': 'NewToken1', + 'timelimit': '0', + 'sizelimit': '0'} + }, + { + 'method': 'otptoken_mod', + 'name': 'NewToken1', + 'item': {'ipatokendisabled': 'TRUE', + 'all': True} + } + ) + changed = True + + self._test_base(module_args, return_value, mock_calls, changed) + + def test_delete_not_existing_token(self): + """Delete a OTP that does not exist""" + module_args = { + 'uniqueid': 'NewToken1', + 'state': 'absent' + } + return_value = {} + + mock_calls = [ + { + 'method': 'otptoken_find', + 'name': None, + 'item': {'all': True, + 'ipatokenuniqueid': 'NewToken1', + 'timelimit': '0', + 'sizelimit': '0'} + } + ] + + changed = False + + self._test_base(module_args, return_value, mock_calls, changed) + + def test_fail_post(self): + """Fail due to an exception raised from _post_json""" + set_module_args({ + 'uniqueid': 'NewToken1' + }) + + with patch_ipa(side_effect=Exception('ERROR MESSAGE')) as (mock_login, mock_post): + with self.assertRaises(AnsibleFailJson) as exec_info: + self.module.main() + + self.assertEqual(exec_info.exception.args[0]['msg'], 'ERROR MESSAGE') + + +if __name__ == '__main__': + unittest.main() From 7356451aa199b0f3114aed6b57c0a974c121d702 Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Mon, 12 Apr 2021 10:26:00 +0200 Subject: [PATCH 0177/3093] Grant supershipit to new maintainers (#2214) * Grant supershipit to a new maintainer * Add maintainer --- .github/BOTMETA.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index fbe43bbd3e..f609289580 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1,5 +1,7 @@ automerge: true files: + plugins/: + supershipit: aminvakil russoz changelogs/fragments/: support: community $actions: From 89b7e7191fd6d91dd220c782a8675c3fead61e74 Mon Sep 17 00:00:00 2001 From: quidame Date: Mon, 12 Apr 2021 21:23:43 +0200 Subject: [PATCH 0178/3093] java_keystore: improve error handling and returned results (#2183) * java_keystore - improve error handling and returned results * set check_rc=False to return results as documented when module fails * set LANG, LC_ALL and LC_MESSAGES to C to rely keytool output parsing * fix pylint's `no-else-return` and `unused-variable` hints * update related unit tests accordingly * add a changelog fragment update unit test (remove stdout_lines from returned dict) fix unit test: failure is now expected when alias does not exist * Update changelogs/fragments/2183-java_keystore_improve_error_handling.yml Co-authored-by: Felix Fontein * fix integration test: overwrite keystore at the same location Co-authored-by: Felix Fontein --- ...3-java_keystore_improve_error_handling.yml | 6 + plugins/modules/system/java_keystore.py | 149 +++++++++--------- .../targets/java_keystore/tasks/main.yml | 6 +- .../modules/system/test_java_keystore.py | 16 +- 4 files changed, 96 insertions(+), 81 deletions(-) create mode 100644 changelogs/fragments/2183-java_keystore_improve_error_handling.yml diff --git a/changelogs/fragments/2183-java_keystore_improve_error_handling.yml b/changelogs/fragments/2183-java_keystore_improve_error_handling.yml new file mode 100644 index 0000000000..5d6ceef511 --- /dev/null +++ b/changelogs/fragments/2183-java_keystore_improve_error_handling.yml @@ -0,0 +1,6 @@ +--- +bugfixes: + - "java_keystore - improve error handling and return ``cmd`` as documented. + Force ``LANG``, ``LC_ALL`` and ``LC_MESSAGES`` environment variables to ``C`` to rely + on ``keytool`` output parsing. Fix pylint's ``unused-variable`` and ``no-else-return`` + hints (https://github.com/ansible-collections/community.general/pull/2183)." diff --git a/plugins/modules/system/java_keystore.py b/plugins/modules/system/java_keystore.py index feab757f58..8143d1d4ef 100644 --- a/plugins/modules/system/java_keystore.py +++ b/plugins/modules/system/java_keystore.py @@ -131,16 +131,14 @@ def read_certificate_fingerprint(module, openssl_bin, certificate_path): if rc != 0: return module.fail_json(msg=current_certificate_fingerprint_out, err=current_certificate_fingerprint_err, - rc=rc, - cmd=current_certificate_fingerprint_cmd) + cmd=current_certificate_fingerprint_cmd, + rc=rc) current_certificate_match = re.search(r"=([\w:]+)", current_certificate_fingerprint_out) if not current_certificate_match: - return module.fail_json( - msg="Unable to find the current certificate fingerprint in %s" % current_certificate_fingerprint_out, - rc=rc, - cmd=current_certificate_fingerprint_err - ) + return module.fail_json(msg="Unable to find the current certificate fingerprint in %s" % current_certificate_fingerprint_out, + cmd=current_certificate_fingerprint_cmd, + rc=rc) return current_certificate_match.group(1) @@ -150,31 +148,36 @@ def read_stored_certificate_fingerprint(module, keytool_bin, alias, keystore_pat (rc, stored_certificate_fingerprint_out, stored_certificate_fingerprint_err) = run_commands( module, stored_certificate_fingerprint_cmd, environ_update=dict(STOREPASS=keystore_password)) if rc != 0: - if "keytool error: java.lang.Exception: Alias <%s> does not exist" % alias not in stored_certificate_fingerprint_out: - return module.fail_json(msg=stored_certificate_fingerprint_out, - err=stored_certificate_fingerprint_err, - rc=rc, - cmd=stored_certificate_fingerprint_cmd) - else: - return None - else: - stored_certificate_match = re.search(r"SHA256: ([\w:]+)", stored_certificate_fingerprint_out) - if not stored_certificate_match: - return module.fail_json( - msg="Unable to find the stored certificate fingerprint in %s" % stored_certificate_fingerprint_out, - rc=rc, - cmd=stored_certificate_fingerprint_cmd - ) + # First intention was to not fail, and overwrite the keystore instead, + # in case of alias mismatch; but an issue in error handling caused the + # module to fail anyway. + # See: https://github.com/ansible-collections/community.general/issues/1671 + # And: https://github.com/ansible-collections/community.general/pull/2183 + # if "keytool error: java.lang.Exception: Alias <%s> does not exist" % alias in stored_certificate_fingerprint_out: + # return "alias mismatch" + # if re.match(r'keytool error: java\.io\.IOException: [Kk]eystore( was tampered with, or)? password was incorrect', + # stored_certificate_fingerprint_out): + # return "password mismatch" + return module.fail_json(msg=stored_certificate_fingerprint_out, + err=stored_certificate_fingerprint_err, + cmd=stored_certificate_fingerprint_cmd, + rc=rc) - return stored_certificate_match.group(1) + stored_certificate_match = re.search(r"SHA256: ([\w:]+)", stored_certificate_fingerprint_out) + if not stored_certificate_match: + return module.fail_json(msg="Unable to find the stored certificate fingerprint in %s" % stored_certificate_fingerprint_out, + cmd=stored_certificate_fingerprint_cmd, + rc=rc) + + return stored_certificate_match.group(1) -def run_commands(module, cmd, data=None, environ_update=None, check_rc=True): +def run_commands(module, cmd, data=None, environ_update=None, check_rc=False): return module.run_command(cmd, check_rc=check_rc, data=data, environ_update=environ_update) def create_path(): - tmpfd, tmpfile = tempfile.mkstemp() + dummy, tmpfile = tempfile.mkstemp() os.remove(tmpfile) return tmpfile @@ -206,58 +209,57 @@ def cert_changed(module, openssl_bin, keytool_bin, keystore_path, keystore_pass, def create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password, keypass): if module.check_mode: - module.exit_json(changed=True) - else: - certificate_path = create_tmp_certificate(module) - private_key_path = create_tmp_private_key(module) - keystore_p12_path = create_path() - try: - if os.path.exists(keystore_path): - os.remove(keystore_path) + return module.exit_json(changed=True) - export_p12_cmd = [openssl_bin, "pkcs12", "-export", "-name", name, "-in", certificate_path, - "-inkey", private_key_path, "-out", keystore_p12_path, "-passout", "stdin"] + certificate_path = create_tmp_certificate(module) + private_key_path = create_tmp_private_key(module) + keystore_p12_path = create_path() + try: + if os.path.exists(keystore_path): + os.remove(keystore_path) - # when keypass is provided, add -passin - cmd_stdin = "" - if keypass: - export_p12_cmd.append("-passin") - export_p12_cmd.append("stdin") - cmd_stdin = "%s\n" % keypass + export_p12_cmd = [openssl_bin, "pkcs12", "-export", "-name", name, "-in", certificate_path, + "-inkey", private_key_path, "-out", keystore_p12_path, "-passout", "stdin"] - cmd_stdin += "%s\n%s" % (password, password) - (rc, export_p12_out, export_p12_err) = run_commands(module, export_p12_cmd, data=cmd_stdin) - if rc != 0: - return module.fail_json(msg=export_p12_out, - rc=rc, - cmd=export_p12_cmd) + # when keypass is provided, add -passin + cmd_stdin = "" + if keypass: + export_p12_cmd.append("-passin") + export_p12_cmd.append("stdin") + cmd_stdin = "%s\n" % keypass + cmd_stdin += "%s\n%s" % (password, password) - import_keystore_cmd = [keytool_bin, "-importkeystore", - "-destkeystore", keystore_path, - "-srckeystore", keystore_p12_path, - "-srcstoretype", "pkcs12", - "-alias", name, - "-deststorepass:env", "STOREPASS", - "-srcstorepass:env", "STOREPASS", - "-noprompt"] + (rc, export_p12_out, dummy) = run_commands(module, export_p12_cmd, data=cmd_stdin) + if rc != 0: + return module.fail_json(msg=export_p12_out, + cmd=export_p12_cmd, + rc=rc) - (rc, import_keystore_out, import_keystore_err) = run_commands(module, import_keystore_cmd, data=None, - environ_update=dict(STOREPASS=password)) - if rc == 0: - update_jks_perm(module, keystore_path) - return module.exit_json(changed=True, - msg=import_keystore_out, - rc=rc, - cmd=import_keystore_cmd, - stdout_lines=import_keystore_out) - else: - return module.fail_json(msg=import_keystore_out, - rc=rc, - cmd=import_keystore_cmd) - finally: - os.remove(certificate_path) - os.remove(private_key_path) - os.remove(keystore_p12_path) + import_keystore_cmd = [keytool_bin, "-importkeystore", + "-destkeystore", keystore_path, + "-srckeystore", keystore_p12_path, + "-srcstoretype", "pkcs12", + "-alias", name, + "-deststorepass:env", "STOREPASS", + "-srcstorepass:env", "STOREPASS", + "-noprompt"] + + (rc, import_keystore_out, dummy) = run_commands(module, import_keystore_cmd, data=None, + environ_update=dict(STOREPASS=password)) + if rc != 0: + return module.fail_json(msg=import_keystore_out, + cmd=import_keystore_cmd, + rc=rc) + + update_jks_perm(module, keystore_path) + return module.exit_json(changed=True, + msg=import_keystore_out, + cmd=import_keystore_cmd, + rc=rc) + finally: + os.remove(certificate_path) + os.remove(private_key_path) + os.remove(keystore_p12_path) def update_jks_perm(module, keystore_path): @@ -289,7 +291,7 @@ def process_jks(module): else: if not module.check_mode: update_jks_perm(module, keystore_path) - return module.exit_json(changed=False) + module.exit_json(changed=False) else: create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password, keypass) @@ -317,6 +319,7 @@ def main(): add_file_common_args=spec.add_file_common_args, supports_check_mode=spec.supports_check_mode ) + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') process_jks(module) diff --git a/tests/integration/targets/java_keystore/tasks/main.yml b/tests/integration/targets/java_keystore/tasks/main.yml index 2a8ad86e27..bba7a4facd 100644 --- a/tests/integration/targets/java_keystore/tasks/main.yml +++ b/tests/integration/targets/java_keystore/tasks/main.yml @@ -63,11 +63,11 @@ - name: Create a Java key store for the given certificates (check mode) community.general.java_keystore: &create_key_store_data name: example - certificate: "{{lookup('file', output_dir ~ '/' ~ item.name ~ '.pem') }}" - private_key: "{{lookup('file', output_dir ~ '/' ~ (item.keyname | default(item.name)) ~ '.key') }}" + certificate: "{{ lookup('file', output_dir ~ '/' ~ item.name ~ '.pem') }}" + private_key: "{{ lookup('file', output_dir ~ '/' ~ (item.keyname | default(item.name)) ~ '.key') }}" private_key_passphrase: "{{ item.passphrase | default(omit) }}" password: changeit - dest: "{{ output_dir ~ '/' ~ item.name ~ '.jks' }}" + dest: "{{ output_dir ~ '/' ~ (item.keyname | default(item.name)) ~ '.jks' }}" loop: &create_key_store_loop - name: cert - name: cert-pw diff --git a/tests/unit/plugins/modules/system/test_java_keystore.py b/tests/unit/plugins/modules/system/test_java_keystore.py index 409e956799..94332d6192 100644 --- a/tests/unit/plugins/modules/system/test_java_keystore.py +++ b/tests/unit/plugins/modules/system/test_java_keystore.py @@ -80,8 +80,7 @@ class TestCreateJavaKeystore(ModuleTestCase): "-srckeystore", "/tmp/tmpgrzm2ah7", "-srcstoretype", "pkcs12", "-alias", "test", "-deststorepass:env", "STOREPASS", "-srcstorepass:env", "STOREPASS", "-noprompt"], msg='', - rc=0, - stdout_lines='' + rc=0 ) def test_create_jks_keypass_fail_export_pkcs12(self): @@ -237,7 +236,7 @@ class TestCertChanged(ModuleTestCase): result = cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo') self.assertTrue(result, 'Fingerprint mismatch') - def test_cert_changed_alias_does_not_exist(self): + def test_cert_changed_fail_alias_does_not_exist(self): set_module_args(dict( certificate='cert-foo', private_key='private-foo', @@ -251,12 +250,19 @@ class TestCertChanged(ModuleTestCase): supports_check_mode=self.spec.supports_check_mode ) + module.fail_json = Mock() + with patch('os.remove', return_value=True): self.create_file.side_effect = ['/tmp/placeholder'] self.run_commands.side_effect = [(0, 'foo=abcd:1234:efgh', ''), (1, 'keytool error: java.lang.Exception: Alias does not exist', '')] - result = cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo') - self.assertTrue(result, 'Certificate does not exist') + cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo') + module.fail_json.assert_called_once_with( + cmd=["keytool", "-list", "-alias", "foo", "-keystore", "/path/to/keystore.jks", "-storepass:env", "STOREPASS", "-v"], + msg='keytool error: java.lang.Exception: Alias does not exist', + err='', + rc=1 + ) def test_cert_changed_fail_read_cert(self): set_module_args(dict( From 8ab356520d5340fd1b87a0dc7becb33309ea10a0 Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Mon, 12 Apr 2021 16:24:33 -0400 Subject: [PATCH 0179/3093] Proxmox_Inv: Adding agent network interaces fact (#2148) * Added agent network interaces fact * Adding changelog fragment * More concise looping over interfaces * Adding unit test case for agent interfaces * Correcting whitespace issue * Commented new dummy json returns with corresponding method --- ...148-proxmox-inventory-agent-interfaces.yml | 3 + plugins/inventory/proxmox.py | 29 ++++ tests/unit/plugins/inventory/test_proxmox.py | 132 +++++++++++++++++- 3 files changed, 160 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/2148-proxmox-inventory-agent-interfaces.yml diff --git a/changelogs/fragments/2148-proxmox-inventory-agent-interfaces.yml b/changelogs/fragments/2148-proxmox-inventory-agent-interfaces.yml new file mode 100644 index 0000000000..0ef97f20ed --- /dev/null +++ b/changelogs/fragments/2148-proxmox-inventory-agent-interfaces.yml @@ -0,0 +1,3 @@ +--- +minor_changes: +- proxmox inventory plugin - added ``proxmox_agent_interfaces`` fact describing network interfaces returned from a QEMU guest agent (https://github.com/ansible-collections/community.general/pull/2148). diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py index 3e44dd1ddd..036c3dc7bf 100644 --- a/plugins/inventory/proxmox.py +++ b/plugins/inventory/proxmox.py @@ -224,6 +224,29 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): except Exception: return None + def _get_agent_network_interfaces(self, node, vmid, vmtype): + result = [] + + try: + ifaces = self._get_json( + "%s/api2/json/nodes/%s/%s/%s/agent/network-get-interfaces" % ( + self.proxmox_url, node, vmtype, vmid + ) + )['result'] + + for iface in ifaces: + result.append({ + 'name': iface['name'], + 'mac-address': iface['hardware-address'], + 'ip-addresses': [ + "%s/%s" % (ip['ip-address'], ip['prefix']) for ip in iface['ip-addresses'] + ] + }) + except requests.HTTPError: + pass + + return result + def _get_vm_config(self, node, vmid, vmtype, name): ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/config" % (self.proxmox_url, node, vmtype, vmid)) @@ -258,6 +281,12 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): parsed_value = [tag.strip() for tag in value.split(",")] self.inventory.set_variable(name, parsed_key, parsed_value) + if config == 'agent' and int(value): + agent_iface_key = self.to_safe('%s%s' % (key, "_interfaces")) + agent_iface_value = self._get_agent_network_interfaces(node, vmid, vmtype) + if agent_iface_value: + self.inventory.set_variable(name, agent_iface_key, agent_iface_value) + if not (isinstance(value, int) or ',' not in value): # split off strings with commas to a dict # skip over any keys that cannot be processed diff --git a/tests/unit/plugins/inventory/test_proxmox.py b/tests/unit/plugins/inventory/test_proxmox.py index 036c8e5938..ee6c0e2963 100644 --- a/tests/unit/plugins/inventory/test_proxmox.py +++ b/tests/unit/plugins/inventory/test_proxmox.py @@ -71,8 +71,7 @@ def get_json(url): "status": "running", "vmid": "100", "disk": "1000", - "uptime": 1000, - "tags": "test, tags, here"}] + "uptime": 1000}] elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu": # _get_qemu_per_node return [{"name": "test-qemu", @@ -106,8 +105,7 @@ def get_json(url): "vmid": "9001", "uptime": 0, "disk": 0, - "status": "stopped", - "tags": "test, tags, here"}] + "status": "stopped"}] elif url == "https://localhost:8006/api2/json/pools/test": # _get_members_per_pool return {"members": [{"uptime": 1000, @@ -164,6 +162,125 @@ def get_json(url): "method6": "manual", "autostart": 1, "active": 1}] + elif url == "https://localhost:8006/api2/json/nodes/testnode/lxc/100/config": + # _get_vm_config (lxc) + return { + "console": 1, + "rootfs": "local-lvm:vm-100-disk-0,size=4G", + "cmode": "tty", + "description": "A testnode", + "cores": 1, + "hostname": "test-lxc", + "arch": "amd64", + "tty": 2, + "swap": 0, + "cpulimit": "0", + "net0": "name=eth0,bridge=vmbr0,gw=10.1.1.1,hwaddr=FF:FF:FF:FF:FF:FF,ip=10.1.1.3/24,type=veth", + "ostype": "ubuntu", + "digest": "123456789abcdef0123456789abcdef01234567890", + "protection": 0, + "memory": 1000, + "onboot": 0, + "cpuunits": 1024, + "tags": "one, two, three", + } + elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu/101/config": + # _get_vm_config (qemu) + return { + "tags": "one, two, three", + "cores": 1, + "ide2": "none,media=cdrom", + "memory": 1000, + "kvm": 1, + "digest": "0123456789abcdef0123456789abcdef0123456789", + "description": "A test qemu", + "sockets": 1, + "onboot": 1, + "vmgenid": "ffffffff-ffff-ffff-ffff-ffffffffffff", + "numa": 0, + "bootdisk": "scsi0", + "cpu": "host", + "name": "test-qemu", + "ostype": "l26", + "hotplug": "network,disk,usb", + "scsi0": "local-lvm:vm-101-disk-0,size=8G", + "net0": "virtio=ff:ff:ff:ff:ff:ff,bridge=vmbr0,firewall=1", + "agent": "1", + "bios": "seabios", + "ide0": "local-lvm:vm-101-cloudinit,media=cdrom,size=4M", + "boot": "cdn", + "scsihw": "virtio-scsi-pci", + "smbios1": "uuid=ffffffff-ffff-ffff-ffff-ffffffffffff" + } + elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu/101/agent/network-get-interfaces": + # _get_agent_network_interfaces + return {"result": [ + { + "hardware-address": "00:00:00:00:00:00", + "ip-addresses": [ + { + "prefix": 8, + "ip-address-type": "ipv4", + "ip-address": "127.0.0.1" + }, + { + "ip-address-type": "ipv6", + "ip-address": "::1", + "prefix": 128 + }], + "statistics": { + "rx-errs": 0, + "rx-bytes": 163244, + "rx-packets": 1623, + "rx-dropped": 0, + "tx-dropped": 0, + "tx-packets": 1623, + "tx-bytes": 163244, + "tx-errs": 0}, + "name": "lo"}, + { + "statistics": { + "rx-packets": 4025, + "rx-dropped": 12, + "rx-bytes": 324105, + "rx-errs": 0, + "tx-errs": 0, + "tx-bytes": 368860, + "tx-packets": 3479, + "tx-dropped": 0}, + "name": "eth0", + "ip-addresses": [ + { + "prefix": 24, + "ip-address-type": "ipv4", + "ip-address": "10.1.2.3" + }, + { + "prefix": 64, + "ip-address": "fd8c:4687:e88d:1be3:5b70:7b88:c79c:293", + "ip-address-type": "ipv6" + }], + "hardware-address": "ff:ff:ff:ff:ff:ff" + }, + { + "hardware-address": "ff:ff:ff:ff:ff:ff", + "ip-addresses": [ + { + "prefix": 16, + "ip-address": "10.10.2.3", + "ip-address-type": "ipv4" + }], + "name": "docker0", + "statistics": { + "rx-bytes": 0, + "rx-errs": 0, + "rx-dropped": 0, + "rx-packets": 0, + "tx-packets": 0, + "tx-dropped": 0, + "tx-errs": 0, + "tx-bytes": 0 + }}]} def get_vm_status(node, vmtype, vmid, name): @@ -173,6 +290,10 @@ def get_vm_status(node, vmtype, vmid, name): def get_option(option): if option == 'group_prefix': return 'proxmox_' + if option == 'facts_prefix': + return 'proxmox_' + elif option == 'want_facts': + return True else: return False @@ -201,6 +322,9 @@ def test_populate(inventory, mocker): group_qemu = inventory.inventory.groups['proxmox_pool_test'] assert group_qemu.hosts == [host_qemu] + # check if qemu-test has eth0 interface in agent_interfaces fact + assert 'eth0' in [d['name'] for d in host_qemu.get_vars()['proxmox_agent_interfaces']] + # check if lxc-test has been discovered correctly group_lxc = inventory.inventory.groups['proxmox_all_lxc'] assert group_lxc.hosts == [host_lxc] From 1f001cafd9bd7bb15388648b3077090c39ca8fd0 Mon Sep 17 00:00:00 2001 From: tgates81 <31669870+tgates81@users.noreply.github.com> Date: Mon, 12 Apr 2021 16:26:43 -0400 Subject: [PATCH 0180/3093] spectrum_model_attrs: Initial commit (#1802) * spectrum_model_attrs: Initial commit * spectrum_model_attrs: sanity check fixes (1) * Apply suggestions from code review Co-authored-by: Felix Fontein * Apply suggestions from code review: * Removed ANSIBLE_METADATA. * List all currently supported names in DOCUMENTATION block. * Don't escape declarations that are long enough to fit on one line. * Apply suggestions from code review: * YAML bools in DOCUMENTATION block. * Various DOCUMENTATION block aesthetics. * RETURN block proper format. * 'yes' -> True declaration in argument spec. * import urlencode from python 2 and 3 changed to six.moves.urllib.quote. * spectrum_model_attrs: integration test added. * Update plugins/modules/monitoring/spectrum_model_attrs.py Co-authored-by: Amin Vakil * Update plugins/modules/monitoring/spectrum_model_attrs.py Co-authored-by: Amin Vakil * spectrum_model_attrs: lint error fixes. Co-authored-by: Tyler Gates Co-authored-by: Felix Fontein Co-authored-by: Amin Vakil --- .../monitoring/spectrum_model_attrs.py | 528 ++++++++++++++++++ plugins/modules/spectrum_model_attrs.py | 1 + .../targets/spectrum_model_attrs/aliases | 1 + .../spectrum_model_attrs/tasks/main.yml | 73 +++ 4 files changed, 603 insertions(+) create mode 100644 plugins/modules/monitoring/spectrum_model_attrs.py create mode 120000 plugins/modules/spectrum_model_attrs.py create mode 100644 tests/integration/targets/spectrum_model_attrs/aliases create mode 100644 tests/integration/targets/spectrum_model_attrs/tasks/main.yml diff --git a/plugins/modules/monitoring/spectrum_model_attrs.py b/plugins/modules/monitoring/spectrum_model_attrs.py new file mode 100644 index 0000000000..d6f3948254 --- /dev/null +++ b/plugins/modules/monitoring/spectrum_model_attrs.py @@ -0,0 +1,528 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2021, Tyler Gates +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: spectrum_model_attrs +short_description: Enforce a model's attributes in CA Spectrum. +description: + - This module can be used to enforce a model's attributes in CA Spectrum. +version_added: 2.5.0 +author: + - Tyler Gates (@tgates81) +notes: + - Tested on CA Spectrum version 10.4.2.0.189. + - Model creation and deletion are not possible with this module. For that use M(community.general.spectrum_device) instead. +requirements: + - 'python >= 2.7' +options: + url: + description: + - URL of OneClick server. + type: str + required: true + url_username: + description: + - OneClick username. + type: str + required: true + aliases: [username] + url_password: + description: + - OneClick password. + type: str + required: true + aliases: [password] + use_proxy: + description: + - if C(no), it will not use a proxy, even if one is defined in + an environment variable on the target hosts. + default: yes + required: false + type: bool + name: + description: + - Model name. + type: str + required: true + type: + description: + - Model type. + type: str + required: true + validate_certs: + description: + - Validate SSL certificates. Only change this to C(false) if you can guarantee that you are talking to the correct endpoint and there is no + man-in-the-middle attack happening. + type: bool + default: yes + required: false + attributes: + description: + - A list of attribute names and values to enforce. + - All values and parameters are case sensitive and must be provided as strings only. + required: true + type: list + elements: dict + suboptions: + name: + description: + - Attribute name OR hex ID. + - 'Currently defined names are:' + - ' C(App_Manufacturer) (C(0x230683))' + - ' C(CollectionsModelNameString) (C(0x12adb))' + - ' C(Condition) (C(0x1000a))' + - ' C(Criticality) (C(0x1290c))' + - ' C(DeviceType) (C(0x23000e))' + - ' C(isManaged) (C(0x1295d))' + - ' C(Model_Class) (C(0x11ee8))' + - ' C(Model_Handle) (C(0x129fa))' + - ' C(Model_Name) (C(0x1006e))' + - ' C(Modeltype_Handle) (C(0x10001))' + - ' C(Modeltype_Name) (C(0x10000))' + - ' C(Network_Address) (C(0x12d7f))' + - ' C(Notes) (C(0x11564))' + - ' C(ServiceDesk_Asset_ID) (C(0x12db9))' + - ' C(TopologyModelNameString) (C(0x129e7))' + - ' C(sysDescr) (C(0x10052))' + - ' C(sysName) (C(0x10b5b))' + - ' C(Vendor_Name) (C(0x11570))' + - ' C(Description) (C(0x230017))' + - Hex IDs are the direct identifiers in Spectrum and will always work. + - 'To lookup hex IDs go to the UI: Locator -> Devices -> By Model Name -> -> Attributes tab.' + type: str + required: true + value: + description: + - Attribute value. Empty strings should be C("") or C(null). + type: str + required: true +''' + +EXAMPLES = r''' +- name: Enforce maintenance mode for modelxyz01 with a note about why + community.general.spectrum_model_attrs: + url: "http://oneclick.url.com" + username: "{{ oneclick_username }}" + password: "{{ oneclick_password }}" + name: "modelxyz01" + type: "Host_Device" + validate_certs: true + attributes: + - name: "isManaged" + value: "false" + - name: "Notes" + value: "MM set on {{ ansible_date_time.iso8601 }} via CO {{ CO }} by {{ tower_user_name | default(ansible_user_id) }}" + delegate_to: localhost + register: spectrum_model_attrs_status +''' + +RETURN = r''' +msg: + description: Informational message on the job result. + type: str + returned: always + sample: 'Success' +changed_attrs: + description: Dictionary of changed name or hex IDs (whichever was specified) to their new corresponding values. + type: dict + returned: always + sample: { + "Notes": "MM set on 2021-02-03T22:04:02Z via CO CO9999 by tgates", + "isManaged": "true" + } +''' + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.six.moves.urllib.parse import quote +import json +import re +import xml.etree.ElementTree as ET + + +class spectrum_model_attrs: + def __init__(self, module): + self.module = module + self.url = module.params['url'] + # If the user did not define a full path to the restul space in url: + # params, add what we believe it to be. + if not re.search('\\/.+', self.url.split('://')[1]): + self.url = "%s/spectrum/restful" % self.url.rstrip('/') + # Align these with what is defined in OneClick's UI under: + # Locator -> Devices -> By Model Name -> -> + # Attributes tab. + self.attr_map = dict(App_Manufacturer=hex(0x230683), + CollectionsModelNameString=hex(0x12adb), + Condition=hex(0x1000a), + Criticality=hex(0x1290c), + DeviceType=hex(0x23000e), + isManaged=hex(0x1295d), + Model_Class=hex(0x11ee8), + Model_Handle=hex(0x129fa), + Model_Name=hex(0x1006e), + Modeltype_Handle=hex(0x10001), + Modeltype_Name=hex(0x10000), + Network_Address=hex(0x12d7f), + Notes=hex(0x11564), + ServiceDesk_Asset_ID=hex(0x12db9), + TopologyModelNameString=hex(0x129e7), + sysDescr=hex(0x10052), + sysName=hex(0x10b5b), + Vendor_Name=hex(0x11570), + Description=hex(0x230017)) + self.search_qualifiers = [ + "and", "or", "not", "greater-than", "greater-than-or-equals", + "less-than", "less-than-or-equals", "equals", "equals-ignore-case", + "does-not-equal", "does-not-equal-ignore-case", "has-prefix", + "does-not-have-prefix", "has-prefix-ignore-case", + "does-not-have-prefix-ignore-case", "has-substring", + "does-not-have-substring", "has-substring-ignore-case", + "does-not-have-substring-ignore-case", "has-suffix", + "does-not-have-suffix", "has-suffix-ignore-case", + "does-not-have-suffix-ignore-case", "has-pcre", + "has-pcre-ignore-case", "has-wildcard", "has-wildcard-ignore-case", + "is-derived-from", "not-is-derived-from"] + + self.resp_namespace = dict(ca="http://www.ca.com/spectrum/restful/schema/response") + + self.result = dict(msg="", changed_attrs=dict()) + self.success_msg = "Success" + + def build_url(self, path): + """ + Build a sane Spectrum restful API URL + :param path: The path to append to the restful base + :type path: str + :returns: Complete restful API URL + :rtype: str + """ + + return "%s/%s" % (self.url.rstrip('/'), path.lstrip('/')) + + def attr_id(self, name): + """ + Get attribute hex ID + :param name: The name of the attribute to retrieve the hex ID for + :type name: str + :returns: Translated hex ID of name, or None if no translation found + :rtype: str or None + """ + + try: + return self.attr_map[name] + except KeyError: + return None + + def attr_name(self, _id): + """ + Get attribute name from hex ID + :param _id: The hex ID to lookup a name for + :type _id: str + :returns: Translated name of hex ID, or None if no translation found + :rtype: str or None + """ + + for name, m_id in list(self.attr_map.items()): + if _id == m_id: + return name + return None + + def urlencode(self, string): + """ + URL Encode a string + :param: string: The string to URL encode + :type string: str + :returns: URL encode version of supplied string + :rtype: str + """ + + return quote(string, "<>%-_.!*'():?#/@&+,;=") + + def update_model(self, model_handle, attrs): + """ + Update a model's attributes + :param model_handle: The model's handle ID + :type model_handle: str + :param attrs: Model's attributes to update. {'': ''} + :type attrs: dict + :returns: Nothing; exits on error or updates self.results + :rtype: None + """ + + # Build the update URL + update_url = self.build_url("/model/%s?" % model_handle) + for name, val in list(attrs.items()): + if val is None: + # None values should be converted to empty strings + val = "" + val = self.urlencode(str(val)) + if not update_url.endswith('?'): + update_url += "&" + + update_url += "attr=%s&val=%s" % (self.attr_id(name) or name, val) + + # POST to /model to update the attributes, or fail. + resp, info = fetch_url(self.module, update_url, method="PUT", + headers={"Content-Type": "application/json", + "Accept": "application/json"}, + use_proxy=self.module.params['use_proxy']) + status_code = info["status"] + if status_code >= 400: + body = info['body'] + else: + body = "" if resp is None else resp.read() + if status_code != 200: + self.result['msg'] = "HTTP PUT error %s: %s: %s" % (status_code, update_url, body) + self.module.fail_json(**self.result) + + # Load and parse the JSON response and either fail or set results. + json_resp = json.loads(body) + """ + Example success response: + {'model-update-response-list':{'model-responses':{'model':{'@error':'Success','@mh':'0x1010e76','attribute':{'@error':'Success','@id':'0x1295d'}}}}}" + Example failure response: + {'model-update-response-list': {'model-responses': {'model': {'@error': 'PartialFailure', '@mh': '0x1010e76', 'attribute': {'@error-message': 'brn0vlappua001: You do not have permission to set attribute Network_Address for this model.', '@error': 'Error', '@id': '0x12d7f'}}}}} + """ # noqa + model_resp = json_resp['model-update-response-list']['model-responses']['model'] + if model_resp['@error'] != "Success": + # I'm not 100% confident on the expected failure structure so just + # dump all of ['attribute']. + self.result['msg'] = str(model_resp['attribute']) + self.module.fail_json(**self.result) + + # Should be OK if we get to here, set results. + self.result['msg'] = self.success_msg + self.result['changed_attrs'].update(attrs) + self.result['changed'] = True + + def find_model(self, search_criteria, ret_attrs=None): + """ + Search for a model in /models + :param search_criteria: The XML + :type search_criteria: str + :param ret_attrs: List of attributes by name or ID to return back + (default is Model_Handle) + :type ret_attrs: list + returns: Dictionary mapping of ret_attrs to values: {ret_attr: ret_val} + rtype: dict + """ + + # If no return attributes were asked for, return Model_Handle. + if ret_attrs is None: + ret_attrs = ['Model_Handle'] + + # Set the XML > tags. If no hex ID + # is found for the name, assume it is already in hex. {name: hex ID} + rqstd_attrs = "" + for ra in ret_attrs: + _id = self.attr_id(ra) or ra + rqstd_attrs += '' % (self.attr_id(ra) or ra) + + # Build the complete XML search query for HTTP POST. + xml = """ + + + + + {0} + + + + {1} + +""".format(search_criteria, rqstd_attrs) + + # POST to /models and fail on errors. + url = self.build_url("/models") + resp, info = fetch_url(self.module, url, data=xml, method="POST", + use_proxy=self.module.params['use_proxy'], + headers={"Content-Type": "application/xml", + "Accept": "application/xml"}) + status_code = info["status"] + if status_code >= 400: + body = info['body'] + else: + body = "" if resp is None else resp.read() + if status_code != 200: + self.result['msg'] = "HTTP POST error %s: %s: %s" % (status_code, url, body) + self.module.fail_json(**self.result) + + # Parse through the XML response and fail on any detected errors. + root = ET.fromstring(body) + total_models = int(root.attrib['total-models']) + error = root.attrib['error'] + model_responses = root.find('ca:model-responses', self.resp_namespace) + if total_models < 1: + self.result['msg'] = "No models found matching search criteria `%s'" % search_criteria + self.module.fail_json(**self.result) + elif total_models > 1: + self.result['msg'] = "More than one model found (%s): `%s'" % (total_models, ET.tostring(model_responses, + encoding='unicode')) + self.module.fail_json(**self.result) + if error != "EndOfResults": + self.result['msg'] = "Unexpected search response `%s': %s" % (error, ET.tostring(model_responses, + encoding='unicode')) + self.module.fail_json(**self.result) + model = model_responses.find('ca:model', self.resp_namespace) + attrs = model.findall('ca:attribute', self.resp_namespace) + if not attrs: + self.result['msg'] = "No attributes returned." + self.module.fail_json(**self.result) + + # XML response should be successful. Iterate and set each returned + # attribute ID/name and value for return. + ret = dict() + for attr in attrs: + attr_id = attr.get('id') + attr_name = self.attr_name(attr_id) + # Note: all values except empty strings (None) are strings only! + attr_val = attr.text + key = attr_name if attr_name in ret_attrs else attr_id + ret[key] = attr_val + ret_attrs.remove(key) + return ret + + def find_model_by_name_type(self, mname, mtype, ret_attrs=None): + """ + Find a model by name and type + :param mname: Model name + :type mname: str + :param mtype: Model type + :type mtype: str + :param ret_attrs: List of attributes by name or ID to return back + (default is Model_Handle) + :type ret_attrs: list + returns: find_model(): Dictionary mapping of ret_attrs to values: + {ret_attr: ret_val} + rtype: dict + """ + + # If no return attributes were asked for, return Model_Handle. + if ret_attrs is None: + ret_attrs = ['Model_Handle'] + + """This is basically as follows: + + + + + ... + + + + + + + + """ + + # Parent filter tag + filtered_models = ET.Element('filtered-models') + # Logically and + _and = ET.SubElement(filtered_models, 'and') + + # Model Name + MN_equals = ET.SubElement(_and, 'equals') + Model_Name = ET.SubElement(MN_equals, 'attribute', + {'id': self.attr_map['Model_Name']}) + MN_value = ET.SubElement(Model_Name, 'value') + MN_value.text = mname + + # Model Type Name + MTN_equals = ET.SubElement(_and, 'equals') + Modeltype_Name = ET.SubElement(MTN_equals, 'attribute', + {'id': self.attr_map['Modeltype_Name']}) + MTN_value = ET.SubElement(Modeltype_Name, 'value') + MTN_value.text = mtype + + return self.find_model(ET.tostring(filtered_models, + encoding='unicode'), + ret_attrs) + + def ensure_model_attrs(self): + + # Get a list of all requested attribute names/IDs plus Model_Handle and + # use them to query the values currently set. Store finding in a + # dictionary. + req_attrs = [] + for attr in self.module.params['attributes']: + req_attrs.append(attr['name']) + if 'Model_Handle' not in req_attrs: + req_attrs.append('Model_Handle') + + # Survey attributes currently set and store in a dict. + cur_attrs = self.find_model_by_name_type(self.module.params['name'], + self.module.params['type'], + req_attrs) + + # Iterate through the requested attributes names/IDs values pair and + # compare with those currently set. If different, attempt to change. + Model_Handle = cur_attrs.pop("Model_Handle") + for attr in self.module.params['attributes']: + req_name = attr['name'] + req_val = attr['value'] + if req_val == "": + # The API will return None on empty string + req_val = None + if cur_attrs[req_name] != req_val: + if self.module.check_mode: + self.result['changed_attrs'][req_name] = req_val + self.result['msg'] = self.success_msg + self.result['changed'] = True + continue + resp = self.update_model(Model_Handle, {req_name: req_val}) + + self.module.exit_json(**self.result) + + +def run_module(): + argument_spec = dict( + url=dict(type='str', required=True), + url_username=dict(type='str', required=True, aliases=['username']), + url_password=dict(type='str', required=True, aliases=['password'], + no_log=True), + validate_certs=dict(type='bool', default=True), + use_proxy=dict(type='bool', default=True), + name=dict(type='str', required=True), + type=dict(type='str', required=True), + attributes=dict(type='list', + required=True, + elements='dict', + options=dict( + name=dict(type='str', required=True), + value=dict(type='str', required=True) + )), + ) + module = AnsibleModule( + supports_check_mode=True, + argument_spec=argument_spec, + ) + + try: + sm = spectrum_model_attrs(module) + sm.ensure_model_attrs() + except Exception as e: + module.fail_json(msg="Failed to ensure attribute(s) on `%s' with " + "exception: %s" % (module.params['name'], + to_native(e))) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/spectrum_model_attrs.py b/plugins/modules/spectrum_model_attrs.py new file mode 120000 index 0000000000..31d8c33060 --- /dev/null +++ b/plugins/modules/spectrum_model_attrs.py @@ -0,0 +1 @@ +./monitoring/spectrum_model_attrs.py \ No newline at end of file diff --git a/tests/integration/targets/spectrum_model_attrs/aliases b/tests/integration/targets/spectrum_model_attrs/aliases new file mode 100644 index 0000000000..ad7ccf7ada --- /dev/null +++ b/tests/integration/targets/spectrum_model_attrs/aliases @@ -0,0 +1 @@ +unsupported diff --git a/tests/integration/targets/spectrum_model_attrs/tasks/main.yml b/tests/integration/targets/spectrum_model_attrs/tasks/main.yml new file mode 100644 index 0000000000..c39d5c3ba2 --- /dev/null +++ b/tests/integration/targets/spectrum_model_attrs/tasks/main.yml @@ -0,0 +1,73 @@ +- name: "Verify required variables: model_name, model_type, oneclick_username, oneclick_password, oneclick_url" + fail: + msg: "One or more of the following variables are not set: model_name, model_type, oneclick_username, oneclick_password, oneclick_url" + when: > + model_name is not defined + or model_type is not defined + or oneclick_username is not defined + or oneclick_password is not defined + or oneclick_url is not defined + +- block: + - name: "001: Enforce maintenance mode for {{ model_name }} with a note about why [check_mode test]" + spectrum_model_attrs: &mm_enabled_args + url: "{{ oneclick_url }}" + username: "{{ oneclick_username }}" + password: "{{ oneclick_password }}" + name: "{{ model_name }}" + type: "{{ model_type }}" + validate_certs: false + attributes: + - name: "isManaged" + value: "false" + - name: "Notes" + value: "{{ note_mm_enabled }}" + check_mode: true + register: mm_enabled_check_mode + + - name: "001: assert that changes were made" + assert: + that: + - mm_enabled_check_mode is changed + + - name: "001: assert that changed_attrs is properly set" + assert: + that: + - mm_enabled_check_mode.changed_attrs.Notes == note_mm_enabled + - mm_enabled_check_mode.changed_attrs.isManaged == "false" + + - name: "002: Enforce maintenance mode for {{ model_name }} with a note about why" + spectrum_model_attrs: + <<: *mm_enabled_args + register: mm_enabled + check_mode: false + + - name: "002: assert that changes were made" + assert: + that: + - mm_enabled is changed + + - name: "002: assert that changed_attrs is properly set" + assert: + that: + - mm_enabled.changed_attrs.Notes == note_mm_enabled + - mm_enabled.changed_attrs.isManaged == "false" + + - name: "003: Enforce maintenance mode for {{ model_name }} with a note about why [idempontence test]" + spectrum_model_attrs: + <<: *mm_enabled_args + register: mm_enabled_idp + check_mode: false + + - name: "003: assert that changes were not made" + assert: + that: + - mm_enabled_idp is not changed + + - name: "003: assert that changed_attrs is not set" + assert: + that: + - mm_enabled_idp.changed_attrs == {} + + vars: + note_mm_enabled: "MM set via CO #1234 by OJ Simpson" From 98af8161b20212ba789389ea2ccc273bafe2b41d Mon Sep 17 00:00:00 2001 From: Brandon McNama Date: Tue, 13 Apr 2021 01:41:31 -0400 Subject: [PATCH 0181/3093] Add Jira attach operation (#2192) * Add Jira attach operation Adds the `attach` operation to the `web_infrastructure.jira` module, which allows a user to attach a file to an issue. The user can supply either the path to a file, which will be read from storage, or a file name and content (as bytes). * Apply suggestions from code review * Update plugins/modules/web_infrastructure/jira.py Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> Co-authored-by: Felix Fontein Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> --- changelogs/fragments/2192-add-jira-attach.yml | 2 + plugins/modules/web_infrastructure/jira.py | 154 +++++++++++++++++- 2 files changed, 147 insertions(+), 9 deletions(-) create mode 100644 changelogs/fragments/2192-add-jira-attach.yml diff --git a/changelogs/fragments/2192-add-jira-attach.yml b/changelogs/fragments/2192-add-jira-attach.yml new file mode 100644 index 0000000000..5877250541 --- /dev/null +++ b/changelogs/fragments/2192-add-jira-attach.yml @@ -0,0 +1,2 @@ +minor_changes: + - jira - added ``attach`` operation, which allows a user to attach a file to an issue (https://github.com/ansible-collections/community.general/pull/2192). diff --git a/plugins/modules/web_infrastructure/jira.py b/plugins/modules/web_infrastructure/jira.py index 51810f6b97..d4ddf53015 100644 --- a/plugins/modules/web_infrastructure/jira.py +++ b/plugins/modules/web_infrastructure/jira.py @@ -5,6 +5,7 @@ # Atlassian open-source approval reference OSR-76. # # (c) 2020, Per Abildgaard Toft Search and update function +# (c) 2021, Brandon McNama Issue attachment functionality # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -29,7 +30,7 @@ options: type: str required: true aliases: [ command ] - choices: [ comment, create, edit, fetch, link, search, transition, update ] + choices: [ attach, comment, create, edit, fetch, link, search, transition, update ] description: - The operation to perform. @@ -162,6 +163,29 @@ options: default: true type: bool + attachment: + type: dict + version_added: 2.5.0 + description: + - Information about the attachment being uploaded. + suboptions: + filename: + required: true + type: path + description: + - The path to the file to upload (from the remote node) or, if I(content) is specified, + the filename to use for the attachment. + content: + type: str + description: + - The Base64 encoded contents of the file to attach. If not specified, the contents of I(filename) will be + used instead. + mimetype: + type: str + description: + - The MIME type to supply for the upload. If not specified, best-effort detection will be + done. + notes: - "Currently this only works with basic-auth." - "To use with JIRA Cloud, pass the login e-mail as the I(username) and the API token as I(password)." @@ -169,6 +193,7 @@ notes: author: - "Steve Smith (@tarka)" - "Per Abildgaard Toft (@pertoft)" +- "Brandon McNama (@DWSR)" """ EXAMPLES = r""" @@ -310,10 +335,26 @@ EXAMPLES = r""" resolution: name: Done description: I am done! This is the last description I will ever give you. + +# Attach a file to an issue +- name: Attach a file + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: HSP-1 + operation: attach + attachment: + filename: topsecretreport.xlsx """ import base64 +import binascii import json +import mimetypes +import os +import random +import string import sys import traceback @@ -325,8 +366,17 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url -def request(url, user, passwd, timeout, data=None, method=None): - if data: +def request( + url, + user, + passwd, + timeout, + data=None, + method=None, + content_type='application/json', + additional_headers=None +): + if data and content_type == 'application/json': data = json.dumps(data) # NOTE: fetch_url uses a password manager, which follows the @@ -337,9 +387,18 @@ def request(url, user, passwd, timeout, data=None, method=None): # inject the basic-auth header up-front to ensure that JIRA treats # the requests as authorized for this user. auth = to_text(base64.b64encode(to_bytes('{0}:{1}'.format(user, passwd), errors='surrogate_or_strict'))) - response, info = fetch_url(module, url, data=data, method=method, timeout=timeout, - headers={'Content-Type': 'application/json', - 'Authorization': "Basic %s" % auth}) + + headers = {} + if isinstance(additional_headers) == dict: + headers = additional_headers.copy() + headers.update({ + "Content-Type": content_type, + "Authorization": "Basic %s" % auth, + }) + + response, info = fetch_url( + module, url, data=data, method=method, timeout=timeout, headers=headers + ) if info['status'] not in (200, 201, 204): error = None @@ -365,8 +424,8 @@ def request(url, user, passwd, timeout, data=None, method=None): return {} -def post(url, user, passwd, timeout, data): - return request(url, user, passwd, timeout, data=data, method='POST') +def post(url, user, passwd, timeout, data, content_type='application/json', additional_headers=None): + return request(url, user, passwd, timeout, data=data, method='POST', content_type=content_type, additional_headers=additional_headers) def put(url, user, passwd, timeout, data): @@ -486,13 +545,89 @@ def link(restbase, user, passwd, params): return True, post(url, user, passwd, params['timeout'], data) +def attach(restbase, user, passwd, params): + filename = params['attachment'].get('filename') + content = params['attachment'].get('content') + + if not any((filename, content)): + raise ValueError('at least one of filename or content must be provided') + mime = params['attachment'].get('mimetype') + + if not os.path.isfile(filename): + raise ValueError('The provided filename does not exist: %s' % filename) + + content_type, data = _prepare_attachment(filename, content, mime) + + url = restbase + '/issue/' + params['issue'] + '/attachments' + return True, post( + url, user, passwd, params['timeout'], data, content_type=content_type, + additional_headers={"X-Atlassian-Token": "no-check"} + ) + + +# Ideally we'd just use prepare_multipart from ansible.module_utils.urls, but +# unfortunately it does not support specifying the encoding and also defaults to +# base64. Jira doesn't support base64 encoded attachments (and is therefore not +# spec compliant. Go figure). I originally wrote this function as an almost +# exact copypasta of prepare_multipart, but ran into some encoding issues when +# using the noop encoder. Hand rolling the entire message body seemed to work +# out much better. +# +# https://community.atlassian.com/t5/Jira-questions/Jira-dosen-t-decode-base64-attachment-request-REST-API/qaq-p/916427 +# +# content is expected to be a base64 encoded string since Ansible doesn't +# support passing raw bytes objects. +def _prepare_attachment(filename, content=None, mime_type=None): + def escape_quotes(s): + return s.replace('"', '\\"') + + boundary = "".join(random.choice(string.digits + string.ascii_letters) for i in range(30)) + name = to_native(os.path.basename(filename)) + + if not mime_type: + try: + mime_type = mimetypes.guess_type(filename or '', strict=False)[0] or 'application/octet-stream' + except Exception: + mime_type = 'application/octet-stream' + main_type, sep, sub_type = mime_type.partition('/') + + if not content and filename: + with open(to_bytes(filename, errors='surrogate_or_strict'), 'rb') as f: + content = f.read() + else: + try: + content = base64.decode(content) + except binascii.Error as e: + raise Exception("Unable to base64 decode file content: %s" % e) + + lines = [ + "--{0}".format(boundary), + 'Content-Disposition: form-data; name="file"; filename={0}'.format(escape_quotes(name)), + "Content-Type: {0}".format("{0}/{1}".format(main_type, sub_type)), + '', + to_text(content), + "--{0}--".format(boundary), + "" + ] + + return ( + "multipart/form-data; boundary={0}".format(boundary), + "\r\n".join(lines) + ) + + def main(): global module module = AnsibleModule( argument_spec=dict( + attachment=dict(type='dict', options=dict( + content=dict(type='str'), + filename=dict(type='path', required=True), + mimetype=dict(type='str') + )), uri=dict(type='str', required=True), - operation=dict(type='str', choices=['create', 'comment', 'edit', 'update', 'fetch', 'transition', 'link', 'search'], + operation=dict(type='str', choices=['attach', 'create', 'comment', 'edit', 'update', 'fetch', 'transition', 'link', 'search'], aliases=['command'], required=True), username=dict(type='str', required=True), password=dict(type='str', required=True, no_log=True), @@ -515,6 +650,7 @@ def main(): account_id=dict(type='str'), ), required_if=( + ('operation', 'attach', ['issue', 'attachment']), ('operation', 'create', ['project', 'issuetype', 'summary']), ('operation', 'comment', ['issue', 'comment']), ('operation', 'fetch', ['issue']), From 081c534d40a8b466a7956dc1949fe97bc4324bec Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 13 Apr 2021 13:19:25 +0200 Subject: [PATCH 0182/3093] Remove deprecated modules scheduled for removal in 3.0.0 (#1924) * Remove deprecated modules scheduled for removal in 3.0.0. * Update BOTMETA. * Update ignore-2.12.txt. * Next release will be 3.0.0. --- .github/BOTMETA.yml | 21 - .../fragments/remove-deprecated-modules.yml | 66 ++ galaxy.yml | 2 +- meta/runtime.yml | 156 ++-- plugins/doc_fragments/ovirt_facts.py | 59 -- plugins/module_utils/_ovirt.py | 871 ------------------ plugins/modules/ali_instance_facts.py | 1 - .../cloud/alicloud/ali_instance_facts.py | 1 - .../cloud/alicloud/ali_instance_info.py | 3 - .../cloud/memset/memset_memstore_facts.py | 1 - .../cloud/memset/memset_memstore_info.py | 3 - .../cloud/memset/memset_server_facts.py | 1 - .../cloud/memset/memset_server_info.py | 3 - plugins/modules/cloud/misc/helm.py | 216 ----- plugins/modules/cloud/misc/ovirt.py | 503 ---------- .../cloud/online/online_server_facts.py | 175 ---- .../modules/cloud/online/online_user_facts.py | 76 -- .../cloud/opennebula/one_image_facts.py | 1 - .../cloud/opennebula/one_image_info.py | 3 - .../cloud/ovirt/ovirt_affinity_label_facts.py | 196 ---- .../modules/cloud/ovirt/ovirt_api_facts.py | 98 -- .../cloud/ovirt/ovirt_cluster_facts.py | 125 --- .../cloud/ovirt/ovirt_datacenter_facts.py | 108 --- .../modules/cloud/ovirt/ovirt_disk_facts.py | 125 --- .../modules/cloud/ovirt/ovirt_event_facts.py | 170 ---- .../ovirt/ovirt_external_provider_facts.py | 165 ---- .../modules/cloud/ovirt/ovirt_group_facts.py | 123 --- .../modules/cloud/ovirt/ovirt_host_facts.py | 149 --- .../cloud/ovirt/ovirt_host_storage_facts.py | 187 ---- .../cloud/ovirt/ovirt_network_facts.py | 125 --- .../modules/cloud/ovirt/ovirt_nic_facts.py | 143 --- .../cloud/ovirt/ovirt_permission_facts.py | 166 ---- .../modules/cloud/ovirt/ovirt_quota_facts.py | 143 --- .../ovirt/ovirt_scheduling_policy_facts.py | 140 --- .../cloud/ovirt/ovirt_snapshot_facts.py | 137 --- .../cloud/ovirt/ovirt_storage_domain_facts.py | 126 --- .../ovirt/ovirt_storage_template_facts.py | 142 --- .../cloud/ovirt/ovirt_storage_vm_facts.py | 142 --- .../modules/cloud/ovirt/ovirt_tag_facts.py | 172 ---- .../cloud/ovirt/ovirt_template_facts.py | 124 --- .../modules/cloud/ovirt/ovirt_user_facts.py | 123 --- plugins/modules/cloud/ovirt/ovirt_vm_facts.py | 166 ---- .../modules/cloud/ovirt/ovirt_vmpool_facts.py | 123 --- .../cloud/scaleway/scaleway_image_facts.py | 125 --- .../cloud/scaleway/scaleway_ip_facts.py | 108 --- .../scaleway/scaleway_organization_facts.py | 104 --- .../scaleway/scaleway_security_group_facts.py | 112 --- .../cloud/scaleway/scaleway_server_facts.py | 195 ---- .../cloud/scaleway/scaleway_snapshot_facts.py | 113 --- .../cloud/scaleway/scaleway_volume_facts.py | 108 --- .../cloud/smartos/smartos_image_facts.py | 1 - .../cloud/smartos/smartos_image_info.py | 13 +- .../cloud/xenserver/xenserver_guest_facts.py | 1 - .../cloud/xenserver/xenserver_guest_info.py | 4 - .../modules/database/vertica/vertica_facts.py | 1 - .../modules/database/vertica/vertica_info.py | 25 +- plugins/modules/gluster_heal_info.py | 1 - plugins/modules/gluster_peer.py | 1 - plugins/modules/gluster_volume.py | 1 - plugins/modules/helm.py | 1 - plugins/modules/hpilo_facts.py | 1 - plugins/modules/identity/onepassword_facts.py | 1 - plugins/modules/identity/onepassword_info.py | 11 +- plugins/modules/idrac_redfish_facts.py | 1 - plugins/modules/jenkins_job_facts.py | 1 - plugins/modules/ldap_attr.py | 1 - plugins/modules/memset_memstore_facts.py | 1 - plugins/modules/memset_server_facts.py | 1 - plugins/modules/na_ontap_gather_facts.py | 1 - plugins/modules/net_tools/ldap/ldap_attr.py | 284 ------ plugins/modules/net_tools/ldap/ldap_entry.py | 6 +- plugins/modules/nginx_status_facts.py | 1 - plugins/modules/one_image_facts.py | 1 - plugins/modules/onepassword_facts.py | 1 - plugins/modules/oneview_datacenter_facts.py | 1 - plugins/modules/oneview_enclosure_facts.py | 1 - .../modules/oneview_ethernet_network_facts.py | 1 - plugins/modules/oneview_fc_network_facts.py | 1 - plugins/modules/oneview_fcoe_network_facts.py | 1 - ...neview_logical_interconnect_group_facts.py | 1 - plugins/modules/oneview_network_set_facts.py | 1 - plugins/modules/oneview_san_manager_facts.py | 1 - plugins/modules/online_server_facts.py | 1 - plugins/modules/online_user_facts.py | 1 - plugins/modules/ovirt.py | 1 - plugins/modules/ovirt_affinity_label_facts.py | 1 - plugins/modules/ovirt_api_facts.py | 1 - plugins/modules/ovirt_cluster_facts.py | 1 - plugins/modules/ovirt_datacenter_facts.py | 1 - plugins/modules/ovirt_disk_facts.py | 1 - plugins/modules/ovirt_event_facts.py | 1 - .../modules/ovirt_external_provider_facts.py | 1 - plugins/modules/ovirt_group_facts.py | 1 - plugins/modules/ovirt_host_facts.py | 1 - plugins/modules/ovirt_host_storage_facts.py | 1 - plugins/modules/ovirt_network_facts.py | 1 - plugins/modules/ovirt_nic_facts.py | 1 - plugins/modules/ovirt_permission_facts.py | 1 - plugins/modules/ovirt_quota_facts.py | 1 - .../modules/ovirt_scheduling_policy_facts.py | 1 - plugins/modules/ovirt_snapshot_facts.py | 1 - plugins/modules/ovirt_storage_domain_facts.py | 1 - .../modules/ovirt_storage_template_facts.py | 1 - plugins/modules/ovirt_storage_vm_facts.py | 1 - plugins/modules/ovirt_tag_facts.py | 1 - plugins/modules/ovirt_template_facts.py | 1 - plugins/modules/ovirt_user_facts.py | 1 - plugins/modules/ovirt_vm_facts.py | 1 - plugins/modules/ovirt_vmpool_facts.py | 1 - plugins/modules/purefa_facts.py | 1 - plugins/modules/purefb_facts.py | 1 - plugins/modules/python_requirements_facts.py | 1 - plugins/modules/redfish_facts.py | 1 - .../remote_management/hpilo/hpilo_facts.py | 1 - .../remote_management/hpilo/hpilo_info.py | 10 +- .../oneview/oneview_datacenter_facts.py | 1 - .../oneview/oneview_datacenter_info.py | 11 +- .../oneview/oneview_enclosure_facts.py | 1 - .../oneview/oneview_enclosure_info.py | 11 +- .../oneview/oneview_ethernet_network_facts.py | 1 - .../oneview/oneview_ethernet_network_info.py | 10 +- .../oneview/oneview_fc_network_facts.py | 1 - .../oneview/oneview_fc_network_info.py | 10 +- .../oneview/oneview_fcoe_network_facts.py | 1 - .../oneview/oneview_fcoe_network_info.py | 11 +- ...neview_logical_interconnect_group_facts.py | 1 - ...oneview_logical_interconnect_group_info.py | 10 +- .../oneview/oneview_network_set_facts.py | 1 - .../oneview/oneview_network_set_info.py | 11 +- .../oneview/oneview_san_manager_facts.py | 1 - .../oneview/oneview_san_manager_info.py | 10 +- .../redfish/idrac_redfish_facts.py | 1 - .../redfish/idrac_redfish_info.py | 10 +- .../redfish/redfish_facts.py | 1 - .../remote_management/redfish/redfish_info.py | 10 +- plugins/modules/scaleway_image_facts.py | 1 - plugins/modules/scaleway_ip_facts.py | 1 - .../modules/scaleway_organization_facts.py | 1 - .../modules/scaleway_security_group_facts.py | 1 - plugins/modules/scaleway_server_facts.py | 1 - plugins/modules/scaleway_snapshot_facts.py | 1 - plugins/modules/scaleway_volume_facts.py | 1 - plugins/modules/smartos_image_facts.py | 1 - .../github/github_webhook_info.py | 3 - .../storage/glusterfs/gluster_heal_info.py | 203 ---- .../modules/storage/glusterfs/gluster_peer.py | 176 ---- .../storage/glusterfs/gluster_volume.py | 608 ------------ .../storage/netapp/na_ontap_gather_facts.py | 613 ------------ .../storage/purestorage/purefa_facts.py | 858 ----------------- .../storage/purestorage/purefb_facts.py | 652 ------------- .../system/python_requirements_facts.py | 1 - .../system/python_requirements_info.py | 3 - plugins/modules/vertica_facts.py | 1 - .../web_infrastructure/jenkins_job_facts.py | 1 - .../web_infrastructure/jenkins_job_info.py | 3 - .../web_infrastructure/nginx_status_facts.py | 160 ---- plugins/modules/xenserver_guest_facts.py | 1 - tests/sanity/ignore-2.10.txt | 71 -- tests/sanity/ignore-2.11.txt | 71 -- tests/sanity/ignore-2.12.txt | 71 -- tests/sanity/ignore-2.9.txt | 127 --- 161 files changed, 167 insertions(+), 10434 deletions(-) create mode 100644 changelogs/fragments/remove-deprecated-modules.yml delete mode 100644 plugins/doc_fragments/ovirt_facts.py delete mode 100644 plugins/module_utils/_ovirt.py delete mode 120000 plugins/modules/ali_instance_facts.py delete mode 120000 plugins/modules/cloud/alicloud/ali_instance_facts.py delete mode 120000 plugins/modules/cloud/memset/memset_memstore_facts.py delete mode 120000 plugins/modules/cloud/memset/memset_server_facts.py delete mode 100644 plugins/modules/cloud/misc/helm.py delete mode 100644 plugins/modules/cloud/misc/ovirt.py delete mode 100644 plugins/modules/cloud/online/online_server_facts.py delete mode 100644 plugins/modules/cloud/online/online_user_facts.py delete mode 120000 plugins/modules/cloud/opennebula/one_image_facts.py delete mode 100644 plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py delete mode 100644 plugins/modules/cloud/ovirt/ovirt_api_facts.py delete mode 100644 plugins/modules/cloud/ovirt/ovirt_cluster_facts.py delete mode 100644 plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py delete mode 100644 plugins/modules/cloud/ovirt/ovirt_disk_facts.py delete mode 100644 plugins/modules/cloud/ovirt/ovirt_event_facts.py delete mode 100644 plugins/modules/cloud/ovirt/ovirt_external_provider_facts.py delete mode 100644 plugins/modules/cloud/ovirt/ovirt_group_facts.py delete mode 100644 plugins/modules/cloud/ovirt/ovirt_host_facts.py delete mode 100644 plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py delete mode 100644 plugins/modules/cloud/ovirt/ovirt_network_facts.py delete mode 100644 plugins/modules/cloud/ovirt/ovirt_nic_facts.py delete mode 100644 plugins/modules/cloud/ovirt/ovirt_permission_facts.py delete mode 100644 plugins/modules/cloud/ovirt/ovirt_quota_facts.py delete mode 100644 plugins/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py delete mode 100644 plugins/modules/cloud/ovirt/ovirt_snapshot_facts.py delete mode 100644 plugins/modules/cloud/ovirt/ovirt_storage_domain_facts.py delete mode 100644 plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py delete mode 100644 plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py delete mode 100644 plugins/modules/cloud/ovirt/ovirt_tag_facts.py delete mode 100644 plugins/modules/cloud/ovirt/ovirt_template_facts.py delete mode 100644 plugins/modules/cloud/ovirt/ovirt_user_facts.py delete mode 100644 plugins/modules/cloud/ovirt/ovirt_vm_facts.py delete mode 100644 plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py delete mode 100644 plugins/modules/cloud/scaleway/scaleway_image_facts.py delete mode 100644 plugins/modules/cloud/scaleway/scaleway_ip_facts.py delete mode 100644 plugins/modules/cloud/scaleway/scaleway_organization_facts.py delete mode 100644 plugins/modules/cloud/scaleway/scaleway_security_group_facts.py delete mode 100644 plugins/modules/cloud/scaleway/scaleway_server_facts.py delete mode 100644 plugins/modules/cloud/scaleway/scaleway_snapshot_facts.py delete mode 100644 plugins/modules/cloud/scaleway/scaleway_volume_facts.py delete mode 120000 plugins/modules/cloud/smartos/smartos_image_facts.py delete mode 120000 plugins/modules/cloud/xenserver/xenserver_guest_facts.py delete mode 120000 plugins/modules/database/vertica/vertica_facts.py delete mode 120000 plugins/modules/gluster_heal_info.py delete mode 120000 plugins/modules/gluster_peer.py delete mode 120000 plugins/modules/gluster_volume.py delete mode 120000 plugins/modules/helm.py delete mode 120000 plugins/modules/hpilo_facts.py delete mode 120000 plugins/modules/identity/onepassword_facts.py delete mode 120000 plugins/modules/idrac_redfish_facts.py delete mode 120000 plugins/modules/jenkins_job_facts.py delete mode 120000 plugins/modules/ldap_attr.py delete mode 120000 plugins/modules/memset_memstore_facts.py delete mode 120000 plugins/modules/memset_server_facts.py delete mode 120000 plugins/modules/na_ontap_gather_facts.py delete mode 100644 plugins/modules/net_tools/ldap/ldap_attr.py delete mode 120000 plugins/modules/nginx_status_facts.py delete mode 120000 plugins/modules/one_image_facts.py delete mode 120000 plugins/modules/onepassword_facts.py delete mode 120000 plugins/modules/oneview_datacenter_facts.py delete mode 120000 plugins/modules/oneview_enclosure_facts.py delete mode 120000 plugins/modules/oneview_ethernet_network_facts.py delete mode 120000 plugins/modules/oneview_fc_network_facts.py delete mode 120000 plugins/modules/oneview_fcoe_network_facts.py delete mode 120000 plugins/modules/oneview_logical_interconnect_group_facts.py delete mode 120000 plugins/modules/oneview_network_set_facts.py delete mode 120000 plugins/modules/oneview_san_manager_facts.py delete mode 120000 plugins/modules/online_server_facts.py delete mode 120000 plugins/modules/online_user_facts.py delete mode 120000 plugins/modules/ovirt.py delete mode 120000 plugins/modules/ovirt_affinity_label_facts.py delete mode 120000 plugins/modules/ovirt_api_facts.py delete mode 120000 plugins/modules/ovirt_cluster_facts.py delete mode 120000 plugins/modules/ovirt_datacenter_facts.py delete mode 120000 plugins/modules/ovirt_disk_facts.py delete mode 120000 plugins/modules/ovirt_event_facts.py delete mode 120000 plugins/modules/ovirt_external_provider_facts.py delete mode 120000 plugins/modules/ovirt_group_facts.py delete mode 120000 plugins/modules/ovirt_host_facts.py delete mode 120000 plugins/modules/ovirt_host_storage_facts.py delete mode 120000 plugins/modules/ovirt_network_facts.py delete mode 120000 plugins/modules/ovirt_nic_facts.py delete mode 120000 plugins/modules/ovirt_permission_facts.py delete mode 120000 plugins/modules/ovirt_quota_facts.py delete mode 120000 plugins/modules/ovirt_scheduling_policy_facts.py delete mode 120000 plugins/modules/ovirt_snapshot_facts.py delete mode 120000 plugins/modules/ovirt_storage_domain_facts.py delete mode 120000 plugins/modules/ovirt_storage_template_facts.py delete mode 120000 plugins/modules/ovirt_storage_vm_facts.py delete mode 120000 plugins/modules/ovirt_tag_facts.py delete mode 120000 plugins/modules/ovirt_template_facts.py delete mode 120000 plugins/modules/ovirt_user_facts.py delete mode 120000 plugins/modules/ovirt_vm_facts.py delete mode 120000 plugins/modules/ovirt_vmpool_facts.py delete mode 120000 plugins/modules/purefa_facts.py delete mode 120000 plugins/modules/purefb_facts.py delete mode 120000 plugins/modules/python_requirements_facts.py delete mode 120000 plugins/modules/redfish_facts.py delete mode 120000 plugins/modules/remote_management/hpilo/hpilo_facts.py delete mode 120000 plugins/modules/remote_management/oneview/oneview_datacenter_facts.py delete mode 120000 plugins/modules/remote_management/oneview/oneview_enclosure_facts.py delete mode 120000 plugins/modules/remote_management/oneview/oneview_ethernet_network_facts.py delete mode 120000 plugins/modules/remote_management/oneview/oneview_fc_network_facts.py delete mode 120000 plugins/modules/remote_management/oneview/oneview_fcoe_network_facts.py delete mode 120000 plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_facts.py delete mode 120000 plugins/modules/remote_management/oneview/oneview_network_set_facts.py delete mode 120000 plugins/modules/remote_management/oneview/oneview_san_manager_facts.py delete mode 120000 plugins/modules/remote_management/redfish/idrac_redfish_facts.py delete mode 120000 plugins/modules/remote_management/redfish/redfish_facts.py delete mode 120000 plugins/modules/scaleway_image_facts.py delete mode 120000 plugins/modules/scaleway_ip_facts.py delete mode 120000 plugins/modules/scaleway_organization_facts.py delete mode 120000 plugins/modules/scaleway_security_group_facts.py delete mode 120000 plugins/modules/scaleway_server_facts.py delete mode 120000 plugins/modules/scaleway_snapshot_facts.py delete mode 120000 plugins/modules/scaleway_volume_facts.py delete mode 120000 plugins/modules/smartos_image_facts.py delete mode 100644 plugins/modules/storage/glusterfs/gluster_heal_info.py delete mode 100644 plugins/modules/storage/glusterfs/gluster_peer.py delete mode 100644 plugins/modules/storage/glusterfs/gluster_volume.py delete mode 100644 plugins/modules/storage/netapp/na_ontap_gather_facts.py delete mode 100644 plugins/modules/storage/purestorage/purefa_facts.py delete mode 100644 plugins/modules/storage/purestorage/purefb_facts.py delete mode 120000 plugins/modules/system/python_requirements_facts.py delete mode 120000 plugins/modules/vertica_facts.py delete mode 120000 plugins/modules/web_infrastructure/jenkins_job_facts.py delete mode 100644 plugins/modules/web_infrastructure/nginx_status_facts.py delete mode 120000 plugins/modules/xenserver_guest_facts.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index f609289580..144eca81a7 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -186,8 +186,6 @@ files: maintainers: glitchcrab $modules/cloud/misc/cloud_init_data_facts.py: maintainers: resmo - $modules/cloud/misc/helm.py: - maintainers: flaper87 $modules/cloud/misc/proxmox.py: maintainers: $team_virt UnderGreen labels: proxmox virt @@ -348,8 +346,6 @@ files: maintainers: dagwieers magnus919 tbielawa cmprescott sm4rk0 labels: m:xml xml ignore: magnus919 - $modules/identity/onepassword_facts.py: - maintainers: Rylon $modules/identity/ipa/: maintainers: $team_ipa $modules/identity/ipa/ipa_pwpolicy.py: @@ -452,8 +448,6 @@ files: maintainers: akostyuk $modules/net_tools/ipwcli_dns.py: maintainers: cwollinger - $modules/net_tools/ldap/ldap_attr.py: - maintainers: jtyr $modules/net_tools/ldap/ldap_attrs.py: maintainers: drybjed jtyr noles $modules/net_tools/ldap/ldap_entry.py: @@ -718,8 +712,6 @@ files: maintainers: evertmulder $modules/remote_management/manageiq/manageiq_tenant.py: maintainers: evertmulder - $modules/remote_management/oneview/oneview_datacenter_facts.py: - maintainers: aalexmonteiro madhav-bharadwaj ricardogpsf soodpr $modules/remote_management/oneview/: maintainers: adriane-cardozo fgbulsoni tmiotto $modules/remote_management/oneview/oneview_datacenter_info.py: @@ -766,12 +758,6 @@ files: maintainers: yeukhon $modules/storage/emc/emc_vnx_sg_member.py: maintainers: remixtj - $modules/storage/glusterfs/: - maintainers: devyanikota - $modules/storage/glusterfs/gluster_peer.py: - maintainers: sac - $modules/storage/glusterfs/gluster_volume.py: - maintainers: rosmo $modules/storage/hpe3par/ss_3par_cpg.py: maintainers: farhan7500 gautamphegde $modules/storage/ibm/: @@ -793,9 +779,6 @@ files: maintainers: johanwiren $modules/storage/zfs/zfs_delegate_admin.py: maintainers: natefoo - $modules/system/python_requirements_facts.py: - maintainers: willthames - ignore: ryansb $modules/system/aix: maintainers: $team_aix labels: aix @@ -926,10 +909,6 @@ files: labels: xfconf $modules/system/xfs_quota.py: maintainers: bushvin - $modules/web_infrastructure/jenkins_job_facts.py: - maintainers: stpierre - $modules/web_infrastructure/nginx_status_facts.py: - maintainers: resmo $modules/web_infrastructure/apache2_mod_proxy.py: maintainers: oboukili $modules/web_infrastructure/apache2_module.py: diff --git a/changelogs/fragments/remove-deprecated-modules.yml b/changelogs/fragments/remove-deprecated-modules.yml new file mode 100644 index 0000000000..fa9d9c9eb7 --- /dev/null +++ b/changelogs/fragments/remove-deprecated-modules.yml @@ -0,0 +1,66 @@ +removed_features: +- "The deprecated ali_instance_facts module has been removed. Use ali_instance_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated hpilo_facts module has been removed. Use hpilo_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated idrac_redfish_facts module has been removed. Use idrac_redfish_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated jenkins_job_facts module has been removed. Use jenkins_job_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated memset_memstore_facts module has been removed. Use memset_memstore_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated memset_server_facts module has been removed. Use memset_server_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated na_ontap_gather_facts module has been removed. Use netapp.ontap.na_ontap_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated nginx_status_facts module has been removed. Use nginx_status_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated one_image_facts module has been removed. Use one_image_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated onepassword_facts module has been removed. Use onepassword_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated oneview_datacenter_facts module has been removed. Use oneview_datacenter_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated oneview_enclosure_facts module has been removed. Use oneview_enclosure_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated oneview_ethernet_network_facts module has been removed. Use oneview_ethernet_network_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated oneview_fc_network_facts module has been removed. Use oneview_fc_network_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated oneview_fcoe_network_facts module has been removed. Use oneview_fcoe_network_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated oneview_logical_interconnect_group_facts module has been removed. Use oneview_logical_interconnect_group_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated oneview_network_set_facts module has been removed. Use oneview_network_set_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated oneview_san_manager_facts module has been removed. Use oneview_san_manager_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated online_server_facts module has been removed. Use online_server_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated online_user_facts module has been removed. Use online_user_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated purefa_facts module has been removed. Use purestorage.flasharray.purefa_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated purefb_facts module has been removed. Use purestorage.flasharray.purefb_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated python_requirements_facts module has been removed. Use python_requirements_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated redfish_facts module has been removed. Use redfish_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated scaleway_image_facts module has been removed. Use scaleway_image_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated scaleway_ip_facts module has been removed. Use scaleway_ip_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated scaleway_organization_facts module has been removed. Use scaleway_organization_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated scaleway_security_group_facts module has been removed. Use scaleway_security_group_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated scaleway_server_facts module has been removed. Use scaleway_server_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated scaleway_snapshot_facts module has been removed. Use scaleway_snapshot_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated scaleway_volume_facts module has been removed. Use scaleway_volume_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated smartos_image_facts module has been removed. Use smartos_image_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated vertica_facts module has been removed. Use vertica_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated xenserver_guest_facts module has been removed. Use xenserver_guest_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated ovirt module has been removed. Use ovirt.ovirt.ovirt_vm instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated ovirt_affinity_label_facts module has been removed. Use ovirt.ovirt.ovirt_affinity_label_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated ovirt_api_facts module has been removed. Use ovirt.ovirt.ovirt_api_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated ovirt_cluster_facts module has been removed. Use ovirt.ovirt.ovirt_cluster_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated ovirt_datacenter_facts module has been removed. Use ovirt.ovirt.ovirt_datacenter_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated ovirt_disk_facts module has been removed. Use ovirt.ovirt.ovirt_disk_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated ovirt_event_facts module has been removed. Use ovirt.ovirt.ovirt_event_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated ovirt_external_provider_facts module has been removed. Use ovirt.ovirt.ovirt_external_provider_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated ovirt_group_facts module has been removed. Use ovirt.ovirt.ovirt_group_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated ovirt_host_facts module has been removed. Use ovirt.ovirt.ovirt_host_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated ovirt_host_storage_facts module has been removed. Use ovirt.ovirt.ovirt_host_storage_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated ovirt_network_facts module has been removed. Use ovirt.ovirt.ovirt_network_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated ovirt_nic_facts module has been removed. Use ovirt.ovirt.ovirt_nic_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated ovirt_permission_facts module has been removed. Use ovirt.ovirt.ovirt_permission_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated ovirt_quota_facts module has been removed. Use ovirt.ovirt.ovirt_quota_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated ovirt_scheduling_policy_facts module has been removed. Use ovirt.ovirt.ovirt_scheduling_policy_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated ovirt_snapshot_facts module has been removed. Use ovirt.ovirt.ovirt_snapshot_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated ovirt_storage_domain_facts module has been removed. Use ovirt.ovirt.ovirt_storage_domain_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated ovirt_storage_template_facts module has been removed. Use ovirt.ovirt.ovirt_storage_template_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated ovirt_storage_vm_facts module has been removed. Use ovirt.ovirt.ovirt_storage_vm_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated ovirt_tag_facts module has been removed. Use ovirt.ovirt.ovirt_tag_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated ovirt_template_facts module has been removed. Use ovirt.ovirt.ovirt_template_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated ovirt_user_facts module has been removed. Use ovirt.ovirt.ovirt_user_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated ovirt_vm_facts module has been removed. Use ovirt.ovirt.ovirt_vm_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated ovirt_vmpool_facts module has been removed. Use ovirt.ovirt.ovirt_vmpool_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The ovirt_facts docs fragment has been removed (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated gluster_heal_info module has been removed. Use gluster.gluster.gluster_heal_info instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated gluster_peer module has been removed. Use gluster.gluster.gluster_peer instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated gluster_volume module has been removed. Use gluster.gluster.gluster_volume instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated helm module has been removed. Use community.kubernetes.helm instead (https://github.com/ansible-collections/community.general/pull/1924)." +- "The deprecated ldap_attr module has been removed. Use ldap_attrs instead (https://github.com/ansible-collections/community.general/pull/1924)." diff --git a/galaxy.yml b/galaxy.yml index bb1eb75153..3676516625 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,6 +1,6 @@ namespace: community name: general -version: 2.5.0 +version: 3.0.0 readme: README.md authors: - Ansible (https://github.com/ansible) diff --git a/meta/runtime.yml b/meta/runtime.yml index 00eed0fa84..c116029974 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -39,7 +39,7 @@ plugin_routing: redirect: community.hashi_vault.hashi_vault modules: ali_instance_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use community.general.ali_instance_info instead. docker_compose: @@ -159,8 +159,7 @@ plugin_routing: gcpubsub_info: redirect: community.google.gcpubsub_info gcpubsub_facts: - redirect: community.google.gcpubsub_info - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use community.google.gcpubsub_info instead. gcspanner: @@ -171,22 +170,23 @@ plugin_routing: tombstone: removal_version: 2.0.0 warning_text: Use community.general.github_webhook and community.general.github_webhook_info instead. - gluster_heal_info: - deprecation: - removal_version: 3.0.0 - warning_text: The gluster modules have migrated to the gluster.gluster collection. Use gluster.gluster.gluster_heal_info instead. - gluster_peer: - deprecation: - removal_version: 3.0.0 - warning_text: The gluster modules have migrated to the gluster.gluster collection. Use gluster.gluster.gluster_peer instead. - gluster_volume: - deprecation: - removal_version: 3.0.0 - warning_text: The gluster modules have migrated to the gluster.gluster collection. Use gluster.gluster.gluster_volume instead. - helm: - deprecation: - removal_version: 3.0.0 - warning_text: The helm module in community.general has been deprecated. Use community.kubernetes.helm instead. + # Adding tombstones burns the old name, so we simply remove the entries: + # gluster_heal_info: + # tombstone: + # removal_version: 3.0.0 + # warning_text: The gluster modules have migrated to the gluster.gluster collection. Use gluster.gluster.gluster_heal_info instead. + # gluster_peer: + # tombstone: + # removal_version: 3.0.0 + # warning_text: The gluster modules have migrated to the gluster.gluster collection. Use gluster.gluster.gluster_peer instead. + # gluster_volume: + # tombstone: + # removal_version: 3.0.0 + # warning_text: The gluster modules have migrated to the gluster.gluster collection. Use gluster.gluster.gluster_volume instead. + # helm: + # tombstone: + # removal_version: 3.0.0 + # warning_text: Use community.kubernetes.helm instead. hetzner_failover_ip: redirect: community.hrobot.failover_ip hetzner_failover_ip_info: @@ -196,19 +196,19 @@ plugin_routing: hetzner_firewall_info: redirect: community.hrobot.firewall_info hpilo_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use community.general.hpilo_info instead. idrac_firmware: redirect: dellemc.openmanage.idrac_firmware idrac_redfish_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use community.general.idrac_redfish_info instead. idrac_server_config_profile: redirect: dellemc.openmanage.idrac_server_config_profile jenkins_job_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use community.general.jenkins_job_info instead. katello: @@ -228,7 +228,7 @@ plugin_routing: kubevirt_vm: redirect: community.kubevirt.kubevirt_vm ldap_attr: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use community.general.ldap_attrs instead. logicmonitor: @@ -240,11 +240,11 @@ plugin_routing: removal_version: 1.0.0 warning_text: The logicmonitor_facts module is no longer maintained and the API used has been disabled in 2017. memset_memstore_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use community.general.memset_memstore_info instead. memset_server_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use community.general.memset_server_info instead. na_cdot_aggregate: @@ -280,161 +280,161 @@ plugin_routing: removal_version: 2.0.0 warning_text: Use netapp.ontap.na_ontap_volume instead. na_ontap_gather_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use netapp.ontap.na_ontap_info instead. nginx_status_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use community.general.nginx_status_info instead. ome_device_info: redirect: dellemc.openmanage.ome_device_info one_image_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use community.general.one_image_info instead. onepassword_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use community.general.onepassword_info instead. oneview_datacenter_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use community.general.oneview_datacenter_info instead. oneview_enclosure_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use community.general.oneview_enclosure_info instead. oneview_ethernet_network_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use community.general.oneview_ethernet_network_info instead. oneview_fc_network_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use community.general.oneview_fc_network_info instead. oneview_fcoe_network_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use community.general.oneview_fcoe_network_info instead. oneview_logical_interconnect_group_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use community.general.oneview_logical_interconnect_group_info instead. oneview_network_set_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use community.general.oneview_network_set_info instead. oneview_san_manager_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use community.general.oneview_san_manager_info instead. online_server_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use community.general.online_server_info instead. online_user_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use community.general.online_user_info instead. ovirt: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use ovirt.ovirt.ovirt_vm instead. ovirt_affinity_label_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use ovirt.ovirt.ovirt_affinity_label_info instead. ovirt_api_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use ovirt.ovirt.ovirt_api_info instead. ovirt_cluster_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use ovirt.ovirt.ovirt_cluster_info instead. ovirt_datacenter_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use ovirt.ovirt.ovirt_datacenter_info instead. ovirt_disk_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use ovirt.ovirt.ovirt_disk_info instead. ovirt_event_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use ovirt.ovirt.ovirt_event_info instead. ovirt_external_provider_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use ovirt.ovirt.ovirt_external_provider_info instead. ovirt_group_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use ovirt.ovirt.ovirt_group_info instead. ovirt_host_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use ovirt.ovirt.ovirt_host_info instead. ovirt_host_storage_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use ovirt.ovirt.ovirt_host_storage_info instead. ovirt_network_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use ovirt.ovirt.ovirt_network_info instead. ovirt_nic_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use ovirt.ovirt.ovirt_nic_info instead. ovirt_permission_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use ovirt.ovirt.ovirt_permission_info instead. ovirt_quota_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use ovirt.ovirt.ovirt_quota_info instead. ovirt_scheduling_policy_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use ovirt.ovirt.ovirt_scheduling_policy_info instead. ovirt_snapshot_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use ovirt.ovirt.ovirt_snapshot_info instead. ovirt_storage_domain_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use ovirt.ovirt.ovirt_storage_domain_info instead. ovirt_storage_template_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use ovirt.ovirt.ovirt_storage_template_info instead. ovirt_storage_vm_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use ovirt.ovirt.ovirt_storage_vm_info instead. ovirt_tag_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use ovirt.ovirt.ovirt_tag_info instead. ovirt_template_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use ovirt.ovirt.ovirt_template_info instead. ovirt_user_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use ovirt.ovirt.ovirt_user_info instead. ovirt_vm_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use ovirt.ovirt.ovirt_vm_info instead. ovirt_vmpool_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use ovirt.ovirt.ovirt_vmpool_info instead. postgresql_copy: @@ -482,47 +482,47 @@ plugin_routing: postgresql_user: redirect: community.postgresql.postgresql_user purefa_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use purestorage.flasharray.purefa_info instead. purefb_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use purestorage.flashblade.purefb_info instead. python_requirements_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use community.general.python_requirements_info instead. redfish_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use community.general.redfish_info instead. scaleway_image_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use community.general.scaleway_image_info instead. scaleway_ip_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use community.general.scaleway_ip_info instead. scaleway_organization_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use community.general.scaleway_organization_info instead. scaleway_security_group_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use community.general.scaleway_security_group_info instead. scaleway_server_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use community.general.scaleway_server_info instead. scaleway_snapshot_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use community.general.scaleway_snapshot_info instead. scaleway_volume_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use community.general.scaleway_volume_info instead. sf_account_manager: @@ -546,15 +546,15 @@ plugin_routing: removal_version: 2.0.0 warning_text: Use netapp.elementsw.na_elementsw_volume instead. smartos_image_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use community.general.smartos_image_info instead. vertica_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use community.general.vertica_info instead. xenserver_guest_facts: - deprecation: + tombstone: removal_version: 3.0.0 warning_text: Use community.general.xenserver_guest_info instead. doc_fragments: diff --git a/plugins/doc_fragments/ovirt_facts.py b/plugins/doc_fragments/ovirt_facts.py deleted file mode 100644 index 43b9b37b0c..0000000000 --- a/plugins/doc_fragments/ovirt_facts.py +++ /dev/null @@ -1,59 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Red Hat, Inc. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - - # info standard oVirt documentation fragment - DOCUMENTATION = r''' -options: - fetch_nested: - description: - - If I(yes) the module will fetch additional data from the API. - - It will fetch only IDs of nested entity. It doesn't fetch multiple levels of nested attributes. - Only the attributes of the current entity. User can configure to fetch other - attributes of the nested entities by specifying C(nested_attributes). - type: bool - default: false - nested_attributes: - description: - - Specifies list of the attributes which should be fetched from the API. - - This parameter apply only when C(fetch_nested) is I(true). - type: list - auth: - description: - - "Dictionary with values needed to create HTTP/HTTPS connection to oVirt:" - - C(username)[I(required)] - The name of the user, something like I(admin@internal). - Default value is set by I(OVIRT_USERNAME) environment variable. - - "C(password)[I(required)] - The password of the user. Default value is set by I(OVIRT_PASSWORD) environment variable." - - "C(url)- A string containing the API URL of the server, usually - something like `I(https://server.example.com/ovirt-engine/api)`. Default value is set by I(OVIRT_URL) environment variable. - Either C(url) or C(hostname) is required." - - "C(hostname) - A string containing the hostname of the server, usually - something like `I(server.example.com)`. Default value is set by I(OVIRT_HOSTNAME) environment variable. - Either C(url) or C(hostname) is required." - - "C(token) - Token to be used instead of login with username/password. Default value is set by I(OVIRT_TOKEN) environment variable." - - "C(insecure) - A boolean flag that indicates if the server TLS - certificate and host name should be checked." - - "C(ca_file) - A PEM file containing the trusted CA certificates. The - certificate presented by the server will be verified using these CA - certificates. If `C(ca_file)` parameter is not set, system wide - CA certificate store is used. Default value is set by I(OVIRT_CAFILE) environment variable." - - "C(kerberos) - A boolean flag indicating if Kerberos authentication - should be used instead of the default basic authentication." - - "C(headers) - Dictionary of HTTP headers to be added to each API call." - type: dict - required: true -requirements: - - python >= 2.7 - - ovirt-engine-sdk-python >= 4.3.0 -notes: - - "In order to use this module you have to install oVirt Python SDK. - To ensure it's installed with correct version you can create the following task: - ansible.builtin.pip: name=ovirt-engine-sdk-python version=4.3.0" -''' diff --git a/plugins/module_utils/_ovirt.py b/plugins/module_utils/_ovirt.py deleted file mode 100644 index 5ccd1482a0..0000000000 --- a/plugins/module_utils/_ovirt.py +++ /dev/null @@ -1,871 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (c) 2016 Red Hat, Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import inspect -import os -import time - -from abc import ABCMeta, abstractmethod -from datetime import datetime -from distutils.version import LooseVersion - -from ansible_collections.community.general.plugins.module_utils.cloud import CloudRetry -from ansible.module_utils.common._collections_compat import Mapping - -try: - from enum import Enum # enum is a ovirtsdk4 requirement - import ovirtsdk4 as sdk - import ovirtsdk4.version as sdk_version - import ovirtsdk4.types as otypes - HAS_SDK = LooseVersion(sdk_version.VERSION) >= LooseVersion('4.3.0') -except ImportError: - HAS_SDK = False - - -BYTES_MAP = { - 'kib': 2**10, - 'mib': 2**20, - 'gib': 2**30, - 'tib': 2**40, - 'pib': 2**50, -} - - -def check_sdk(module): - if not HAS_SDK: - module.fail_json( - msg='ovirtsdk4 version 4.3.0 or higher is required for this module' - ) - - -def get_dict_of_struct(struct, connection=None, fetch_nested=False, attributes=None): - """ - Convert SDK Struct type into dictionary. - """ - res = {} - - def resolve_href(value): - # Fetch nested values of struct: - try: - value = connection.follow_link(value) - except sdk.Error: - value = None - nested_obj = dict( - (attr, convert_value(getattr(value, attr))) - for attr in attributes if getattr(value, attr, None) is not None - ) - nested_obj['id'] = getattr(value, 'id', None) - nested_obj['href'] = getattr(value, 'href', None) - return nested_obj - - def remove_underscore(val): - if val.startswith('_'): - val = val[1:] - remove_underscore(val) - return val - - def convert_value(value): - nested = False - - if isinstance(value, sdk.Struct): - if not fetch_nested or not value.href: - return get_dict_of_struct(value) - return resolve_href(value) - - elif isinstance(value, Enum) or isinstance(value, datetime): - return str(value) - elif isinstance(value, list) or isinstance(value, sdk.List): - if isinstance(value, sdk.List) and fetch_nested and value.href: - try: - value = connection.follow_link(value) - nested = True - except sdk.Error: - value = [] - - ret = [] - for i in value: - if isinstance(i, sdk.Struct): - if not nested and fetch_nested and i.href: - ret.append(resolve_href(i)) - elif not nested: - ret.append(get_dict_of_struct(i)) - else: - nested_obj = dict( - (attr, convert_value(getattr(i, attr))) - for attr in attributes if getattr(i, attr, None) - ) - nested_obj['id'] = getattr(i, 'id', None) - ret.append(nested_obj) - elif isinstance(i, Enum): - ret.append(str(i)) - else: - ret.append(i) - return ret - else: - return value - - if struct is not None: - for key, value in struct.__dict__.items(): - if value is None: - continue - - key = remove_underscore(key) - res[key] = convert_value(value) - - return res - - -def engine_version(connection): - """ - Return string representation of oVirt engine version. - """ - engine_api = connection.system_service().get() - engine_version = engine_api.product_info.version - return '%s.%s' % (engine_version.major, engine_version.minor) - - -def create_connection(auth): - """ - Create a connection to Python SDK, from task `auth` parameter. - If user doesnt't have SSO token the `auth` dictionary has following parameters mandatory: - url, username, password - - If user has SSO token the `auth` dictionary has following parameters mandatory: - url, token - - The `ca_file` parameter is mandatory in case user want to use secure connection, - in case user want to use insecure connection, it's mandatory to send insecure=True. - - :param auth: dictionary which contains needed values for connection creation - :return: Python SDK connection - """ - - url = auth.get('url') - if url is None and auth.get('hostname') is not None: - url = 'https://{0}/ovirt-engine/api'.format(auth.get('hostname')) - - return sdk.Connection( - url=url, - username=auth.get('username'), - password=auth.get('password'), - ca_file=auth.get('ca_file', None), - insecure=auth.get('insecure', False), - token=auth.get('token', None), - kerberos=auth.get('kerberos', None), - headers=auth.get('headers', None), - ) - - -def convert_to_bytes(param): - """ - This method convert units to bytes, which follow IEC standard. - - :param param: value to be converted - """ - if param is None: - return None - - # Get rid of whitespaces: - param = ''.join(param.split()) - - # Convert to bytes: - if len(param) > 3 and param[-3].lower() in ['k', 'm', 'g', 't', 'p']: - return int(param[:-3]) * BYTES_MAP.get(param[-3:].lower(), 1) - elif param.isdigit(): - return int(param) * 2**10 - else: - raise ValueError( - "Unsupported value(IEC supported): '{value}'".format(value=param) - ) - - -def follow_link(connection, link): - """ - This method returns the entity of the element which link points to. - - :param connection: connection to the Python SDK - :param link: link of the entity - :return: entity which link points to - """ - - if link: - return connection.follow_link(link) - else: - return None - - -def get_link_name(connection, link): - """ - This method returns the name of the element which link points to. - - :param connection: connection to the Python SDK - :param link: link of the entity - :return: name of the entity, which link points to - """ - - if link: - return connection.follow_link(link).name - else: - return None - - -def equal(param1, param2, ignore_case=False): - """ - Compare two parameters and return if they are equal. - This parameter doesn't run equal operation if first parameter is None. - With this approach we don't run equal operation in case user don't - specify parameter in their task. - - :param param1: user inputted parameter - :param param2: value of entity parameter - :return: True if parameters are equal or first parameter is None, otherwise False - """ - if param1 is not None: - if ignore_case: - return param1.lower() == param2.lower() - return param1 == param2 - return True - - -def search_by_attributes(service, list_params=None, **kwargs): - """ - Search for the entity by attributes. Nested entities don't support search - via REST, so in case using search for nested entity we return all entities - and filter them by specified attributes. - """ - list_params = list_params or {} - # Check if 'list' method support search(look for search parameter): - if 'search' in inspect.getargspec(service.list)[0]: - res = service.list( - # There must be double quotes around name, because some oVirt resources it's possible to create then with space in name. - search=' and '.join('{0}="{1}"'.format(k, v) for k, v in kwargs.items()), - **list_params - ) - else: - res = [ - e for e in service.list(**list_params) if len([ - k for k, v in kwargs.items() if getattr(e, k, None) == v - ]) == len(kwargs) - ] - - res = res or [None] - return res[0] - - -def search_by_name(service, name, **kwargs): - """ - Search for the entity by its name. Nested entities don't support search - via REST, so in case using search for nested entity we return all entities - and filter them by name. - - :param service: service of the entity - :param name: name of the entity - :return: Entity object returned by Python SDK - """ - # Check if 'list' method support search(look for search parameter): - if 'search' in inspect.getargspec(service.list)[0]: - res = service.list( - # There must be double quotes around name, because some oVirt resources it's possible to create then with space in name. - search='name="{name}"'.format(name=name) - ) - else: - res = [e for e in service.list() if e.name == name] - - if kwargs: - res = [ - e for e in service.list() if len([ - k for k, v in kwargs.items() if getattr(e, k, None) == v - ]) == len(kwargs) - ] - - res = res or [None] - return res[0] - - -def get_entity(service, get_params=None): - """ - Ignore SDK Error in case of getting an entity from service. - """ - entity = None - try: - if get_params is not None: - entity = service.get(**get_params) - else: - entity = service.get() - except sdk.Error: - # We can get here 404, we should ignore it, in case - # of removing entity for example. - pass - return entity - - -def get_id_by_name(service, name, raise_error=True, ignore_case=False): - """ - Search an entity ID by it's name. - """ - entity = search_by_name(service, name) - - if entity is not None: - return entity.id - - if raise_error: - raise Exception("Entity '%s' was not found." % name) - - -def wait( - service, - condition, - fail_condition=lambda e: False, - timeout=180, - wait=True, - poll_interval=3, -): - """ - Wait until entity fulfill expected condition. - - :param service: service of the entity - :param condition: condition to be fulfilled - :param fail_condition: if this condition is true, raise Exception - :param timeout: max time to wait in seconds - :param wait: if True wait for condition, if False don't wait - :param poll_interval: Number of seconds we should wait until next condition check - """ - # Wait until the desired state of the entity: - if wait: - start = time.time() - while time.time() < start + timeout: - # Exit if the condition of entity is valid: - entity = get_entity(service) - if condition(entity): - return - elif fail_condition(entity): - raise Exception("Error while waiting on result state of the entity.") - - # Sleep for `poll_interval` seconds if none of the conditions apply: - time.sleep(float(poll_interval)) - - raise Exception("Timeout exceed while waiting on result state of the entity.") - - -def __get_auth_dict(): - OVIRT_URL = os.environ.get('OVIRT_URL') - OVIRT_HOSTNAME = os.environ.get('OVIRT_HOSTNAME') - OVIRT_USERNAME = os.environ.get('OVIRT_USERNAME') - OVIRT_PASSWORD = os.environ.get('OVIRT_PASSWORD') - OVIRT_TOKEN = os.environ.get('OVIRT_TOKEN') - OVIRT_CAFILE = os.environ.get('OVIRT_CAFILE') - OVIRT_INSECURE = OVIRT_CAFILE is None - - env_vars = None - if OVIRT_URL is None and OVIRT_HOSTNAME is not None: - OVIRT_URL = 'https://{0}/ovirt-engine/api'.format(OVIRT_HOSTNAME) - if OVIRT_URL and ((OVIRT_USERNAME and OVIRT_PASSWORD) or OVIRT_TOKEN): - env_vars = { - 'url': OVIRT_URL, - 'username': OVIRT_USERNAME, - 'password': OVIRT_PASSWORD, - 'insecure': OVIRT_INSECURE, - 'token': OVIRT_TOKEN, - 'ca_file': OVIRT_CAFILE, - } - if env_vars is not None: - auth = dict(default=env_vars, type='dict') - else: - auth = dict(required=True, type='dict') - - return auth - - -def ovirt_info_full_argument_spec(**kwargs): - """ - Extend parameters of info module with parameters which are common to all - oVirt info modules. - - :param kwargs: kwargs to be extended - :return: extended dictionary with common parameters - """ - spec = dict( - auth=__get_auth_dict(), - fetch_nested=dict(default=False, type='bool'), - nested_attributes=dict(type='list', default=list()), - ) - spec.update(kwargs) - return spec - - -# Left for third-party module compatibility -def ovirt_facts_full_argument_spec(**kwargs): - """ - This is deprecated. Please use ovirt_info_full_argument_spec instead! - - :param kwargs: kwargs to be extended - :return: extended dictionary with common parameters - """ - return ovirt_info_full_argument_spec(**kwargs) - - -def ovirt_full_argument_spec(**kwargs): - """ - Extend parameters of module with parameters which are common to all oVirt modules. - - :param kwargs: kwargs to be extended - :return: extended dictionary with common parameters - """ - spec = dict( - auth=__get_auth_dict(), - timeout=dict(default=180, type='int'), - wait=dict(default=True, type='bool'), - poll_interval=dict(default=3, type='int'), - fetch_nested=dict(default=False, type='bool'), - nested_attributes=dict(type='list', default=list()), - ) - spec.update(kwargs) - return spec - - -def check_params(module): - """ - Most modules must have either `name` or `id` specified. - """ - if module.params.get('name') is None and module.params.get('id') is None: - module.fail_json(msg='"name" or "id" is required') - - -def engine_supported(connection, version): - return LooseVersion(engine_version(connection)) >= LooseVersion(version) - - -def check_support(version, connection, module, params): - """ - Check if parameters used by user are supported by oVirt Python SDK - and oVirt engine. - """ - api_version = LooseVersion(engine_version(connection)) - version = LooseVersion(version) - for param in params: - if module.params.get(param) is not None: - return LooseVersion(sdk_version.VERSION) >= version and api_version >= version - - return True - - -class BaseModule(object): - """ - This is base class for oVirt modules. oVirt modules should inherit this - class and override method to customize specific needs of the module. - The only abstract method of this class is `build_entity`, which must - to be implemented in child class. - """ - __metaclass__ = ABCMeta - - def __init__(self, connection, module, service, changed=False): - self._connection = connection - self._module = module - self._service = service - self._changed = changed - self._diff = {'after': dict(), 'before': dict()} - - @property - def changed(self): - return self._changed - - @changed.setter - def changed(self, changed): - if not self._changed: - self._changed = changed - - @abstractmethod - def build_entity(self): - """ - This method should return oVirt Python SDK type, which we want to - create or update, initialized by values passed by Ansible module. - - For example if we want to create VM, we will return following: - types.Vm(name=self._module.params['vm_name']) - - :return: Specific instance of sdk.Struct. - """ - pass - - def param(self, name, default=None): - """ - Return a module parameter specified by it's name. - """ - return self._module.params.get(name, default) - - def update_check(self, entity): - """ - This method handle checks whether the entity values are same as values - passed to ansible module. By default we don't compare any values. - - :param entity: Entity we want to compare with Ansible module values. - :return: True if values are same, so we don't need to update the entity. - """ - return True - - def pre_create(self, entity): - """ - This method is called right before entity is created. - - :param entity: Entity to be created or updated. - """ - pass - - def post_create(self, entity): - """ - This method is called right after entity is created. - - :param entity: Entity which was created. - """ - pass - - def post_update(self, entity): - """ - This method is called right after entity is updated. - - :param entity: Entity which was updated. - """ - pass - - def diff_update(self, after, update): - for k, v in update.items(): - if isinstance(v, Mapping): - after[k] = self.diff_update(after.get(k, dict()), v) - else: - after[k] = update[k] - return after - - def create( - self, - entity=None, - result_state=None, - fail_condition=lambda e: False, - search_params=None, - update_params=None, - _wait=None, - force_create=False, - **kwargs - ): - """ - Method which is called when state of the entity is 'present'. If user - don't provide `entity` parameter the entity is searched using - `search_params` parameter. If entity is found it's updated, whether - the entity should be updated is checked by `update_check` method. - The corresponding updated entity is build by `build_entity` method. - - Function executed after entity is created can optionally be specified - in `post_create` parameter. Function executed after entity is updated - can optionally be specified in `post_update` parameter. - - :param entity: Entity we want to update, if exists. - :param result_state: State which should entity has in order to finish task. - :param fail_condition: Function which checks incorrect state of entity, if it returns `True` Exception is raised. - :param search_params: Dictionary of parameters to be used for search. - :param update_params: The params which should be passed to update method. - :param kwargs: Additional parameters passed when creating entity. - :return: Dictionary with values returned by Ansible module. - """ - if entity is None and not force_create: - entity = self.search_entity(search_params) - - self.pre_create(entity) - - if entity: - # Entity exists, so update it: - entity_service = self._service.service(entity.id) - if not self.update_check(entity): - new_entity = self.build_entity() - if not self._module.check_mode: - update_params = update_params or {} - updated_entity = entity_service.update( - new_entity, - **update_params - ) - self.post_update(entity) - - # Update diffs only if user specified --diff parameter, - # so we don't useless overload API: - if self._module._diff: - before = get_dict_of_struct( - entity, - self._connection, - fetch_nested=True, - attributes=['name'], - ) - after = before.copy() - self.diff_update(after, get_dict_of_struct(new_entity)) - self._diff['before'] = before - self._diff['after'] = after - - self.changed = True - else: - # Entity don't exists, so create it: - if not self._module.check_mode: - entity = self._service.add( - self.build_entity(), - **kwargs - ) - self.post_create(entity) - self.changed = True - - if not self._module.check_mode: - # Wait for the entity to be created and to be in the defined state: - entity_service = self._service.service(entity.id) - - def state_condition(entity): - return entity - - if result_state: - - def state_condition(entity): - return entity and entity.status == result_state - - wait( - service=entity_service, - condition=state_condition, - fail_condition=fail_condition, - wait=_wait if _wait is not None else self._module.params['wait'], - timeout=self._module.params['timeout'], - poll_interval=self._module.params['poll_interval'], - ) - - return { - 'changed': self.changed, - 'id': getattr(entity, 'id', None), - type(entity).__name__.lower(): get_dict_of_struct( - struct=entity, - connection=self._connection, - fetch_nested=self._module.params.get('fetch_nested'), - attributes=self._module.params.get('nested_attributes'), - ), - 'diff': self._diff, - } - - def pre_remove(self, entity): - """ - This method is called right before entity is removed. - - :param entity: Entity which we want to remove. - """ - pass - - def entity_name(self, entity): - return "{e_type} '{e_name}'".format( - e_type=type(entity).__name__.lower(), - e_name=getattr(entity, 'name', None), - ) - - def remove(self, entity=None, search_params=None, **kwargs): - """ - Method which is called when state of the entity is 'absent'. If user - don't provide `entity` parameter the entity is searched using - `search_params` parameter. If entity is found it's removed. - - Function executed before remove is executed can optionally be specified - in `pre_remove` parameter. - - :param entity: Entity we want to remove. - :param search_params: Dictionary of parameters to be used for search. - :param kwargs: Additional parameters passed when removing entity. - :return: Dictionary with values returned by Ansible module. - """ - if entity is None: - entity = self.search_entity(search_params) - - if entity is None: - return { - 'changed': self.changed, - 'msg': "Entity wasn't found." - } - - self.pre_remove(entity) - - entity_service = self._service.service(entity.id) - if not self._module.check_mode: - entity_service.remove(**kwargs) - wait( - service=entity_service, - condition=lambda entity: not entity, - wait=self._module.params['wait'], - timeout=self._module.params['timeout'], - poll_interval=self._module.params['poll_interval'], - ) - self.changed = True - - return { - 'changed': self.changed, - 'id': entity.id, - type(entity).__name__.lower(): get_dict_of_struct( - struct=entity, - connection=self._connection, - fetch_nested=self._module.params.get('fetch_nested'), - attributes=self._module.params.get('nested_attributes'), - ), - } - - def action( - self, - action, - entity=None, - action_condition=lambda e: e, - wait_condition=lambda e: e, - fail_condition=lambda e: False, - pre_action=lambda e: e, - post_action=lambda e: None, - search_params=None, - **kwargs - ): - """ - This method is executed when we want to change the state of some oVirt - entity. The action to be executed on oVirt service is specified by - `action` parameter. Whether the action should be executed can be - specified by passing `action_condition` parameter. State which the - entity should be in after execution of the action can be specified - by `wait_condition` parameter. - - Function executed before an action on entity can optionally be specified - in `pre_action` parameter. Function executed after an action on entity can - optionally be specified in `post_action` parameter. - - :param action: Action which should be executed by service on entity. - :param entity: Entity we want to run action on. - :param action_condition: Function which is executed when checking if action should be executed. - :param fail_condition: Function which checks incorrect state of entity, if it returns `True` Exception is raised. - :param wait_condition: Function which is executed when waiting on result state. - :param pre_action: Function which is executed before running the action. - :param post_action: Function which is executed after running the action. - :param search_params: Dictionary of parameters to be used for search. - :param kwargs: Additional parameters passed to action. - :return: Dictionary with values returned by Ansible module. - """ - if entity is None: - entity = self.search_entity(search_params) - - entity = pre_action(entity) - - if entity is None: - self._module.fail_json( - msg="Entity not found, can't run action '{0}'.".format( - action - ) - ) - - entity_service = self._service.service(entity.id) - entity = entity_service.get() - if action_condition(entity): - if not self._module.check_mode: - getattr(entity_service, action)(**kwargs) - self.changed = True - - post_action(entity) - - wait( - service=self._service.service(entity.id), - condition=wait_condition, - fail_condition=fail_condition, - wait=self._module.params['wait'], - timeout=self._module.params['timeout'], - poll_interval=self._module.params['poll_interval'], - ) - return { - 'changed': self.changed, - 'id': entity.id, - type(entity).__name__.lower(): get_dict_of_struct( - struct=entity, - connection=self._connection, - fetch_nested=self._module.params.get('fetch_nested'), - attributes=self._module.params.get('nested_attributes'), - ), - 'diff': self._diff, - } - - def wait_for_import(self, condition=lambda e: True): - if self._module.params['wait']: - start = time.time() - timeout = self._module.params['timeout'] - poll_interval = self._module.params['poll_interval'] - while time.time() < start + timeout: - entity = self.search_entity() - if entity and condition(entity): - return entity - time.sleep(poll_interval) - - def search_entity(self, search_params=None, list_params=None): - """ - Always first try to search by `ID`, if ID isn't specified, - check if user constructed special search in `search_params`, - if not search by `name`. - """ - entity = None - - if 'id' in self._module.params and self._module.params['id'] is not None: - entity = get_entity(self._service.service(self._module.params['id']), get_params=list_params) - elif search_params is not None: - entity = search_by_attributes(self._service, list_params=list_params, **search_params) - elif self._module.params.get('name') is not None: - entity = search_by_attributes(self._service, list_params=list_params, name=self._module.params['name']) - - return entity - - def _get_major(self, full_version): - if full_version is None or full_version == "": - return None - if isinstance(full_version, otypes.Version): - return int(full_version.major) - return int(full_version.split('.')[0]) - - def _get_minor(self, full_version): - if full_version is None or full_version == "": - return None - if isinstance(full_version, otypes.Version): - return int(full_version.minor) - return int(full_version.split('.')[1]) - - -def _sdk4_error_maybe(): - """ - Allow for ovirtsdk4 not being installed. - """ - if HAS_SDK: - return sdk.Error - return type(None) - - -class OvirtRetry(CloudRetry): - base_class = _sdk4_error_maybe() - - @staticmethod - def status_code_from_exception(error): - return error.code - - @staticmethod - def found(response_code, catch_extra_error_codes=None): - # This is a list of error codes to retry. - retry_on = [ - # HTTP status: Conflict - 409, - ] - if catch_extra_error_codes: - retry_on.extend(catch_extra_error_codes) - - return response_code in retry_on diff --git a/plugins/modules/ali_instance_facts.py b/plugins/modules/ali_instance_facts.py deleted file mode 120000 index 7c9f5ebe6e..0000000000 --- a/plugins/modules/ali_instance_facts.py +++ /dev/null @@ -1 +0,0 @@ -cloud/alicloud/ali_instance_facts.py \ No newline at end of file diff --git a/plugins/modules/cloud/alicloud/ali_instance_facts.py b/plugins/modules/cloud/alicloud/ali_instance_facts.py deleted file mode 120000 index 5202c55448..0000000000 --- a/plugins/modules/cloud/alicloud/ali_instance_facts.py +++ /dev/null @@ -1 +0,0 @@ -ali_instance_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/alicloud/ali_instance_info.py b/plugins/modules/cloud/alicloud/ali_instance_info.py index 33b3f8a6ad..8a3b8aeed0 100644 --- a/plugins/modules/cloud/alicloud/ali_instance_info.py +++ b/plugins/modules/cloud/alicloud/ali_instance_info.py @@ -383,9 +383,6 @@ def main(): ) ) module = AnsibleModule(argument_spec=argument_spec) - if module._name in ('ali_instance_facts', 'community.general.ali_instance_facts'): - module.deprecate("The 'ali_instance_facts' module has been renamed to 'ali_instance_info'", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 if HAS_FOOTMARK is False: module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR) diff --git a/plugins/modules/cloud/memset/memset_memstore_facts.py b/plugins/modules/cloud/memset/memset_memstore_facts.py deleted file mode 120000 index aa78805717..0000000000 --- a/plugins/modules/cloud/memset/memset_memstore_facts.py +++ /dev/null @@ -1 +0,0 @@ -memset_memstore_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/memset/memset_memstore_info.py b/plugins/modules/cloud/memset/memset_memstore_info.py index 5eea6ab191..df5ede1a62 100644 --- a/plugins/modules/cloud/memset/memset_memstore_info.py +++ b/plugins/modules/cloud/memset/memset_memstore_info.py @@ -151,9 +151,6 @@ def main(): ), supports_check_mode=False ) - if module._name in ('memset_memstore_facts', 'community.general.memset_memstore_facts'): - module.deprecate("The 'memset_memstore_facts' module has been renamed to 'memset_memstore_info'", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 # populate the dict with the user-provided vars. args = dict() diff --git a/plugins/modules/cloud/memset/memset_server_facts.py b/plugins/modules/cloud/memset/memset_server_facts.py deleted file mode 120000 index 0a5766aee4..0000000000 --- a/plugins/modules/cloud/memset/memset_server_facts.py +++ /dev/null @@ -1 +0,0 @@ -memset_server_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/memset/memset_server_info.py b/plugins/modules/cloud/memset/memset_server_info.py index d8943c1454..50fe39fd99 100644 --- a/plugins/modules/cloud/memset/memset_server_info.py +++ b/plugins/modules/cloud/memset/memset_server_info.py @@ -276,9 +276,6 @@ def main(): ), supports_check_mode=False ) - if module._name in ('memset_server_facts', 'community.general.memset_server_facts'): - module.deprecate("The 'memset_server_facts' module has been renamed to 'memset_server_info'", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 # populate the dict with the user-provided vars. args = dict() diff --git a/plugins/modules/cloud/misc/helm.py b/plugins/modules/cloud/misc/helm.py deleted file mode 100644 index dd592d6e92..0000000000 --- a/plugins/modules/cloud/misc/helm.py +++ /dev/null @@ -1,216 +0,0 @@ -#!/usr/bin/python -# (c) 2016, Flavio Percoco -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -deprecated: - removed_in: 3.0.0 # was Ansible 2.14 - why: For more details https://github.com/ansible/ansible/issues/61546. - alternative: Use M(community.kubernetes.helm) instead. -module: helm -short_description: Manages Kubernetes packages with the Helm package manager -author: "Flavio Percoco (@flaper87)" -description: - - Install, upgrade, delete and list packages with the Helm package manager. -requirements: - - "pyhelm" - - "grpcio" -options: - host: - description: - - Tiller's server host. - type: str - default: "localhost" - port: - description: - - Tiller's server port. - type: int - default: 44134 - namespace: - description: - - Kubernetes namespace where the chart should be installed. - type: str - default: "default" - name: - description: - - Release name to manage. - type: str - state: - description: - - Whether to install C(present), remove C(absent), or purge C(purged) a package. - choices: ['absent', 'purged', 'present'] - type: str - default: "present" - chart: - description: - - A map describing the chart to install. See examples for available options. - type: dict - default: {} - values: - description: - - A map of value options for the chart. - type: dict - default: {} - disable_hooks: - description: - - Whether to disable hooks during the uninstall process. - type: bool - default: 'no' -''' - -RETURN = ''' # ''' - -EXAMPLES = ''' -- name: Install helm chart - community.general.helm: - host: localhost - chart: - name: memcached - version: 0.4.0 - source: - type: repo - location: https://kubernetes-charts.storage.googleapis.com - state: present - name: my-memcached - namespace: default - -- name: Uninstall helm chart - community.general.helm: - host: localhost - state: absent - name: my-memcached - -- name: Install helm chart from a git repo - community.general.helm: - host: localhost - chart: - source: - type: git - location: https://github.com/user/helm-chart.git - state: present - name: my-example - namespace: default - values: - foo: "bar" - -- name: Install helm chart from a git repo specifying path - community.general.helm: - host: localhost - chart: - source: - type: git - location: https://github.com/helm/charts.git - path: stable/memcached - state: present - name: my-memcached - namespace: default - values: "{{ lookup('file', '/path/to/file/values.yaml') | from_yaml }}" -''' - -import traceback -HELM_IMPORT_ERR = None -try: - import grpc - from pyhelm import tiller - from pyhelm import chartbuilder -except ImportError: - HELM_IMPORT_ERR = traceback.format_exc() - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -def install(module, tserver): - changed = False - params = module.params - name = params['name'] - values = params['values'] - chart = module.params['chart'] - namespace = module.params['namespace'] - - chartb = chartbuilder.ChartBuilder(chart) - r_matches = (x for x in tserver.list_releases() - if x.name == name and x.namespace == namespace) - installed_release = next(r_matches, None) - if installed_release: - if installed_release.chart.metadata.version != chart['version']: - tserver.update_release(chartb.get_helm_chart(), False, - namespace, name=name, values=values) - changed = True - else: - tserver.install_release(chartb.get_helm_chart(), namespace, - dry_run=False, name=name, - values=values) - changed = True - - return dict(changed=changed) - - -def delete(module, tserver, purge=False): - changed = False - params = module.params - - if not module.params['name']: - module.fail_json(msg='Missing required field name') - - name = module.params['name'] - disable_hooks = params['disable_hooks'] - - try: - tserver.uninstall_release(name, disable_hooks, purge) - changed = True - except grpc._channel._Rendezvous as exc: - if 'not found' not in str(exc): - raise exc - - return dict(changed=changed) - - -def main(): - """The main function.""" - module = AnsibleModule( - argument_spec=dict( - host=dict(type='str', default='localhost'), - port=dict(type='int', default=44134), - name=dict(type='str', default=''), - chart=dict(type='dict'), - state=dict( - choices=['absent', 'purged', 'present'], - default='present' - ), - # Install options - values=dict(type='dict'), - namespace=dict(type='str', default='default'), - - # Uninstall options - disable_hooks=dict(type='bool', default=False), - ), - supports_check_mode=True) - - if HELM_IMPORT_ERR: - module.fail_json(msg=missing_required_lib('pyhelm'), exception=HELM_IMPORT_ERR) - - host = module.params['host'] - port = module.params['port'] - state = module.params['state'] - tserver = tiller.Tiller(host, port) - - if state == 'present': - rst = install(module, tserver) - - if state in 'absent': - rst = delete(module, tserver) - - if state in 'purged': - rst = delete(module, tserver, True) - - module.exit_json(**rst) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/misc/ovirt.py b/plugins/modules/cloud/misc/ovirt.py deleted file mode 100644 index 25e3081c8f..0000000000 --- a/plugins/modules/cloud/misc/ovirt.py +++ /dev/null @@ -1,503 +0,0 @@ -#!/usr/bin/python - -# Copyright: (c) 2013, Vincent Van der Kussen -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ovirt -author: -- Vincent Van der Kussen (@vincentvdk) -short_description: oVirt/RHEV platform management -deprecated: - removed_in: 3.0.0 # was Ansible 2.14 - why: This module is for deprecated version of ovirt. - alternative: Use C(ovirt_vm) from the C(ovirt.ovirt) collection instead -description: - - This module only supports oVirt/RHEV version 3. A newer module M(ovirt.ovirt.ovirt_vm) supports oVirt/RHV version 4. - - Allows you to create new instances, either from scratch or an image, in addition to deleting or stopping instances on the oVirt/RHEV platform. -options: - user: - description: - - The user to authenticate with. - type: str - required: true - url: - description: - - The url of the oVirt instance. - type: str - required: true - instance_name: - description: - - The name of the instance to use. - type: str - required: true - aliases: [ vmname ] - password: - description: - - Password of the user to authenticate with. - type: str - required: true - image: - description: - - The template to use for the instance. - type: str - resource_type: - description: - - Whether you want to deploy an image or create an instance from scratch. - type: str - choices: [ new, template ] - zone: - description: - - Deploy the image to this oVirt cluster. - type: str - instance_disksize: - description: - - Size of the instance's disk in GB. - type: str - aliases: [ vm_disksize] - instance_cpus: - description: - - The instance's number of CPUs. - type: str - default: 1 - aliases: [ vmcpus ] - instance_nic: - description: - - The name of the network interface in oVirt/RHEV. - type: str - aliases: [ vmnic ] - instance_network: - description: - - The logical network the machine should belong to. - type: str - default: rhevm - aliases: [ vmnetwork ] - instance_mem: - description: - - The instance's amount of memory in MB. - type: str - aliases: [ vmmem ] - instance_type: - description: - - Define whether the instance is a server, desktop or high_performance. - - I(high_performance) is supported since Ansible 2.5 and oVirt/RHV 4.2. - type: str - choices: [ desktop, server, high_performance ] - default: server - aliases: [ vmtype ] - disk_alloc: - description: - - Define whether disk is thin or preallocated. - type: str - choices: [ preallocated, thin ] - default: thin - disk_int: - description: - - Interface type of the disk. - type: str - choices: [ ide, virtio ] - default: virtio - instance_os: - description: - - Type of Operating System. - type: str - aliases: [ vmos ] - instance_cores: - description: - - Define the instance's number of cores. - type: str - default: 1 - aliases: [ vmcores ] - sdomain: - description: - - The Storage Domain where you want to create the instance's disk on. - type: str - region: - description: - - The oVirt/RHEV datacenter where you want to deploy to. - type: str - instance_dns: - description: - - Define the instance's Primary DNS server. - type: str - aliases: [ dns ] - instance_domain: - description: - - Define the instance's Domain. - type: str - aliases: [ domain ] - instance_hostname: - description: - - Define the instance's Hostname. - type: str - aliases: [ hostname ] - instance_ip: - description: - - Define the instance's IP. - type: str - aliases: [ ip ] - instance_netmask: - description: - - Define the instance's Netmask. - type: str - aliases: [ netmask ] - instance_gateway: - description: - - Define the instance's Gateway. - type: str - aliases: [ gateway ] - instance_rootpw: - description: - - Define the instance's Root password. - type: str - aliases: [ rootpw ] - instance_key: - description: - - Define the instance's Authorized key. - type: str - aliases: [ key ] - state: - description: - - Create, terminate or remove instances. - type: str - choices: [ absent, present, restart, shutdown, started ] - default: present -requirements: - - ovirt-engine-sdk-python -''' - -EXAMPLES = ''' -- name: Basic example to provision from image - community.general.ovirt: - user: admin@internal - url: https://ovirt.example.com - instance_name: ansiblevm04 - password: secret - image: centos_64 - zone: cluster01 - resource_type: template - -- name: Full example to create new instance from scratch - community.general.ovirt: - instance_name: testansible - resource_type: new - instance_type: server - user: admin@internal - password: secret - url: https://ovirt.example.com - instance_disksize: 10 - zone: cluster01 - region: datacenter1 - instance_cpus: 1 - instance_nic: nic1 - instance_network: rhevm - instance_mem: 1000 - disk_alloc: thin - sdomain: FIBER01 - instance_cores: 1 - instance_os: rhel_6x64 - disk_int: virtio - -- name: Stopping an existing instance - community.general.ovirt: - instance_name: testansible - state: stopped - user: admin@internal - password: secret - url: https://ovirt.example.com - -- name: Start an existing instance - community.general.ovirt: - instance_name: testansible - state: started - user: admin@internal - password: secret - url: https://ovirt.example.com - -- name: Start an instance with cloud init information - community.general.ovirt: - instance_name: testansible - state: started - user: admin@internal - password: secret - url: https://ovirt.example.com - hostname: testansible - domain: ansible.local - ip: 192.0.2.100 - netmask: 255.255.255.0 - gateway: 192.0.2.1 - rootpw: bigsecret -''' - -import time - -try: - from ovirtsdk.api import API - from ovirtsdk.xml import params - HAS_OVIRTSDK = True -except ImportError: - HAS_OVIRTSDK = False - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.removed import removed_module - - -# ------------------------------------------------------------------- # -# create connection with API -# -def conn(url, user, password): - api = API(url=url, username=user, password=password, insecure=True) - try: - value = api.test() - except Exception: - raise Exception("error connecting to the oVirt API") - return api - - -# ------------------------------------------------------------------- # -# Create VM from scratch -def create_vm(conn, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int): - if vmdisk_alloc == 'thin': - # define VM params - vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), os=params.OperatingSystem(type_=vmos), - template=conn.templates.get(name="Blank"), memory=1024 * 1024 * int(vmmem), - cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores), sockets=vmcpus)), type_=vmtype) - # define disk params - vmdisk = params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=True, interface=vmdisk_int, type_="System", - format='cow', - storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)])) - # define network parameters - network_net = params.Network(name=vmnetwork) - nic_net1 = params.NIC(name='nic1', network=network_net, interface='virtio') - elif vmdisk_alloc == 'preallocated': - # define VM params - vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), os=params.OperatingSystem(type_=vmos), - template=conn.templates.get(name="Blank"), memory=1024 * 1024 * int(vmmem), - cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores), sockets=vmcpus)), type_=vmtype) - # define disk params - vmdisk = params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=False, interface=vmdisk_int, type_="System", - format='raw', storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)])) - # define network parameters - network_net = params.Network(name=vmnetwork) - nic_net1 = params.NIC(name=vmnic, network=network_net, interface='virtio') - - try: - conn.vms.add(vmparams) - except Exception: - raise Exception("Error creating VM with specified parameters") - vm = conn.vms.get(name=vmname) - try: - vm.disks.add(vmdisk) - except Exception: - raise Exception("Error attaching disk") - try: - vm.nics.add(nic_net1) - except Exception: - raise Exception("Error adding nic") - - -# create an instance from a template -def create_vm_template(conn, vmname, image, zone): - vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), template=conn.templates.get(name=image), disks=params.Disks(clone=True)) - try: - conn.vms.add(vmparams) - except Exception: - raise Exception('error adding template %s' % image) - - -# start instance -def vm_start(conn, vmname, hostname=None, ip=None, netmask=None, gateway=None, - domain=None, dns=None, rootpw=None, key=None): - vm = conn.vms.get(name=vmname) - use_cloud_init = False - nics = None - nic = None - if hostname or ip or netmask or gateway or domain or dns or rootpw or key: - use_cloud_init = True - if ip and netmask and gateway: - ipinfo = params.IP(address=ip, netmask=netmask, gateway=gateway) - nic = params.GuestNicConfiguration(name='eth0', boot_protocol='STATIC', ip=ipinfo, on_boot=True) - nics = params.Nics() - nics = params.GuestNicsConfiguration(nic_configuration=[nic]) - initialization = params.Initialization(regenerate_ssh_keys=True, host_name=hostname, domain=domain, user_name='root', - root_password=rootpw, nic_configurations=nics, dns_servers=dns, - authorized_ssh_keys=key) - action = params.Action(use_cloud_init=use_cloud_init, vm=params.VM(initialization=initialization)) - vm.start(action=action) - - -# Stop instance -def vm_stop(conn, vmname): - vm = conn.vms.get(name=vmname) - vm.stop() - - -# restart instance -def vm_restart(conn, vmname): - state = vm_status(conn, vmname) - vm = conn.vms.get(name=vmname) - vm.stop() - while conn.vms.get(vmname).get_status().get_state() != 'down': - time.sleep(5) - vm.start() - - -# remove an instance -def vm_remove(conn, vmname): - vm = conn.vms.get(name=vmname) - vm.delete() - - -# ------------------------------------------------------------------- # -# VM statuses -# -# Get the VMs status -def vm_status(conn, vmname): - status = conn.vms.get(name=vmname).status.state - return status - - -# Get VM object and return it's name if object exists -def get_vm(conn, vmname): - vm = conn.vms.get(name=vmname) - if vm is None: - name = "empty" - else: - name = vm.get_name() - return name - -# ------------------------------------------------------------------- # -# Hypervisor operations -# -# not available yet -# ------------------------------------------------------------------- # -# Main - - -def main(): - module = AnsibleModule( - argument_spec=dict( - state=dict(type='str', default='present', choices=['absent', 'present', 'restart', 'shutdown', 'started']), - user=dict(type='str', required=True), - url=dict(type='str', required=True), - instance_name=dict(type='str', required=True, aliases=['vmname']), - password=dict(type='str', required=True, no_log=True), - image=dict(type='str'), - resource_type=dict(type='str', choices=['new', 'template']), - zone=dict(type='str'), - instance_disksize=dict(type='str', aliases=['vm_disksize']), - instance_cpus=dict(type='str', default=1, aliases=['vmcpus']), - instance_nic=dict(type='str', aliases=['vmnic']), - instance_network=dict(type='str', default='rhevm', aliases=['vmnetwork']), - instance_mem=dict(type='str', aliases=['vmmem']), - instance_type=dict(type='str', default='server', aliases=['vmtype'], choices=['desktop', 'server', 'high_performance']), - disk_alloc=dict(type='str', default='thin', choices=['preallocated', 'thin']), - disk_int=dict(type='str', default='virtio', choices=['ide', 'virtio']), - instance_os=dict(type='str', aliases=['vmos']), - instance_cores=dict(type='str', default=1, aliases=['vmcores']), - instance_hostname=dict(type='str', aliases=['hostname']), - instance_ip=dict(type='str', aliases=['ip']), - instance_netmask=dict(type='str', aliases=['netmask']), - instance_gateway=dict(type='str', aliases=['gateway']), - instance_domain=dict(type='str', aliases=['domain']), - instance_dns=dict(type='str', aliases=['dns']), - instance_rootpw=dict(type='str', aliases=['rootpw'], no_log=True), - instance_key=dict(type='str', aliases=['key'], no_log=True), - sdomain=dict(type='str'), - region=dict(type='str'), - ), - ) - - if not HAS_OVIRTSDK: - module.fail_json(msg='ovirtsdk required for this module') - - state = module.params['state'] - user = module.params['user'] - url = module.params['url'] - vmname = module.params['instance_name'] - password = module.params['password'] - image = module.params['image'] # name of the image to deploy - resource_type = module.params['resource_type'] # template or from scratch - zone = module.params['zone'] # oVirt cluster - vmdisk_size = module.params['instance_disksize'] # disksize - vmcpus = module.params['instance_cpus'] # number of cpu - vmnic = module.params['instance_nic'] # network interface - vmnetwork = module.params['instance_network'] # logical network - vmmem = module.params['instance_mem'] # mem size - vmdisk_alloc = module.params['disk_alloc'] # thin, preallocated - vmdisk_int = module.params['disk_int'] # disk interface virtio or ide - vmos = module.params['instance_os'] # Operating System - vmtype = module.params['instance_type'] # server, desktop or high_performance - vmcores = module.params['instance_cores'] # number of cores - sdomain = module.params['sdomain'] # storage domain to store disk on - region = module.params['region'] # oVirt Datacenter - hostname = module.params['instance_hostname'] - ip = module.params['instance_ip'] - netmask = module.params['instance_netmask'] - gateway = module.params['instance_gateway'] - domain = module.params['instance_domain'] - dns = module.params['instance_dns'] - rootpw = module.params['instance_rootpw'] - key = module.params['instance_key'] - # initialize connection - try: - c = conn(url + "/api", user, password) - except Exception as e: - module.fail_json(msg='%s' % e) - - if state == 'present': - if get_vm(c, vmname) == "empty": - if resource_type == 'template': - try: - create_vm_template(c, vmname, image, zone) - except Exception as e: - module.fail_json(msg='%s' % e) - module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmname, image)) - elif resource_type == 'new': - # FIXME: refactor, use keyword args. - try: - create_vm(c, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int) - except Exception as e: - module.fail_json(msg='%s' % e) - module.exit_json(changed=True, msg="deployed VM %s from scratch" % vmname) - else: - module.exit_json(changed=False, msg="You did not specify a resource type") - else: - module.exit_json(changed=False, msg="VM %s already exists" % vmname) - - if state == 'started': - if vm_status(c, vmname) == 'up': - module.exit_json(changed=False, msg="VM %s is already running" % vmname) - else: - # vm_start(c, vmname) - vm_start(c, vmname, hostname, ip, netmask, gateway, domain, dns, rootpw, key) - module.exit_json(changed=True, msg="VM %s started" % vmname) - - if state == 'shutdown': - if vm_status(c, vmname) == 'down': - module.exit_json(changed=False, msg="VM %s is already shutdown" % vmname) - else: - vm_stop(c, vmname) - module.exit_json(changed=True, msg="VM %s is shutting down" % vmname) - - if state == 'restart': - if vm_status(c, vmname) == 'up': - vm_restart(c, vmname) - module.exit_json(changed=True, msg="VM %s is restarted" % vmname) - else: - module.exit_json(changed=False, msg="VM %s is not running" % vmname) - - if state == 'absent': - if get_vm(c, vmname) == "empty": - module.exit_json(changed=False, msg="VM %s does not exist" % vmname) - else: - vm_remove(c, vmname) - module.exit_json(changed=True, msg="VM %s removed" % vmname) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/online/online_server_facts.py b/plugins/modules/cloud/online/online_server_facts.py deleted file mode 100644 index f1e74aa6f0..0000000000 --- a/plugins/modules/cloud/online/online_server_facts.py +++ /dev/null @@ -1,175 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) - -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: online_server_facts -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: Deprecated in favour of C(_info) module. - alternative: Use M(community.general.online_server_info) instead. -short_description: Gather facts about Online servers. -description: - - Gather facts about the servers. - - U(https://www.online.net/en/dedicated-server) -author: - - "Remy Leone (@sieben)" -extends_documentation_fragment: -- community.general.online - -''' - -EXAMPLES = r''' -- name: Gather Online server facts - community.general.online_server_facts: - api_token: '0d1627e8-bbf0-44c5-a46f-5c4d3aef033f' -''' - -RETURN = r''' ---- -online_server_facts: - description: Response from Online API - returned: success - type: complex - sample: - "online_server_facts": [ - { - "abuse": "abuse@example.com", - "anti_ddos": false, - "bmc": { - "session_key": null - }, - "boot_mode": "normal", - "contacts": { - "owner": "foobar", - "tech": "foobar" - }, - "disks": [ - { - "$ref": "/api/v1/server/hardware/disk/68452" - }, - { - "$ref": "/api/v1/server/hardware/disk/68453" - } - ], - "drive_arrays": [ - { - "disks": [ - { - "$ref": "/api/v1/server/hardware/disk/68452" - }, - { - "$ref": "/api/v1/server/hardware/disk/68453" - } - ], - "raid_controller": { - "$ref": "/api/v1/server/hardware/raidController/9910" - }, - "raid_level": "RAID1" - } - ], - "hardware_watch": true, - "hostname": "sd-42", - "id": 42, - "ip": [ - { - "address": "195.154.172.149", - "mac": "28:92:4a:33:5e:c6", - "reverse": "195-154-172-149.rev.poneytelecom.eu.", - "switch_port_state": "up", - "type": "public" - }, - { - "address": "10.90.53.212", - "mac": "28:92:4a:33:5e:c7", - "reverse": null, - "switch_port_state": "up", - "type": "private" - } - ], - "last_reboot": "2018-08-23T08:32:03.000Z", - "location": { - "block": "A", - "datacenter": "DC3", - "position": 19, - "rack": "A23", - "room": "4 4-4" - }, - "network": { - "ip": [ - "195.154.172.149" - ], - "ipfo": [], - "private": [ - "10.90.53.212" - ] - }, - "offer": "Pro-1-S-SATA", - "os": { - "name": "FreeBSD", - "version": "11.1-RELEASE" - }, - "power": "ON", - "proactive_monitoring": false, - "raid_controllers": [ - { - "$ref": "/api/v1/server/hardware/raidController/9910" - } - ], - "support": "Basic service level" - } - ] -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.online import ( - Online, OnlineException, online_argument_spec -) - - -class OnlineServerFacts(Online): - - def __init__(self, module): - super(OnlineServerFacts, self).__init__(module) - self.name = 'api/v1/server' - - def _get_server_detail(self, server_path): - try: - return self.get(path=server_path).json - except OnlineException as exc: - self.module.fail_json(msg="A problem occurred while fetching: %s (%s)" % (server_path, exc)) - - def all_detailed_servers(self): - servers_api_path = self.get_resources() - - server_data = ( - self._get_server_detail(server_api_path) - for server_api_path in servers_api_path - ) - - return [s for s in server_data if s is not None] - - -def main(): - module = AnsibleModule( - argument_spec=online_argument_spec(), - supports_check_mode=True, - ) - - try: - servers_facts = OnlineServerFacts(module).all_detailed_servers() - module.exit_json( - ansible_facts={'online_server_facts': servers_facts} - ) - except OnlineException as exc: - module.fail_json(msg=exc.message) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/online/online_user_facts.py b/plugins/modules/cloud/online/online_user_facts.py deleted file mode 100644 index 7b78924e92..0000000000 --- a/plugins/modules/cloud/online/online_user_facts.py +++ /dev/null @@ -1,76 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: online_user_facts -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: Deprecated in favour of C(_info) module. - alternative: Use M(community.general.online_user_info) instead. -short_description: Gather facts about Online user. -description: - - Gather facts about the user. -author: - - "Remy Leone (@sieben)" -extends_documentation_fragment: -- community.general.online - -''' - -EXAMPLES = r''' -- name: Gather Online user facts - community.general.online_user_facts: -''' - -RETURN = r''' ---- -online_user_facts: - description: Response from Online API - returned: success - type: complex - sample: - "online_user_facts": { - "company": "foobar LLC", - "email": "foobar@example.com", - "first_name": "foo", - "id": 42, - "last_name": "bar", - "login": "foobar" - } -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.online import ( - Online, OnlineException, online_argument_spec -) - - -class OnlineUserFacts(Online): - - def __init__(self, module): - super(OnlineUserFacts, self).__init__(module) - self.name = 'api/v1/user' - - -def main(): - module = AnsibleModule( - argument_spec=online_argument_spec(), - supports_check_mode=True, - ) - - try: - module.exit_json( - ansible_facts={'online_user_facts': OnlineUserFacts(module).get_resources()} - ) - except OnlineException as exc: - module.fail_json(msg=exc.message) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/opennebula/one_image_facts.py b/plugins/modules/cloud/opennebula/one_image_facts.py deleted file mode 120000 index 96b8357316..0000000000 --- a/plugins/modules/cloud/opennebula/one_image_facts.py +++ /dev/null @@ -1 +0,0 @@ -one_image_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/opennebula/one_image_info.py b/plugins/modules/cloud/opennebula/one_image_info.py index 77c280d07b..e03b8ad724 100644 --- a/plugins/modules/cloud/opennebula/one_image_info.py +++ b/plugins/modules/cloud/opennebula/one_image_info.py @@ -261,9 +261,6 @@ def main(): module = AnsibleModule(argument_spec=fields, mutually_exclusive=[['ids', 'name']], supports_check_mode=True) - if module._name in ('one_image_facts', 'community.general.one_image_facts'): - module.deprecate("The 'one_image_facts' module has been renamed to 'one_image_info'", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 if not HAS_PYONE: module.fail_json(msg='This module requires pyone to work!') diff --git a/plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py b/plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py deleted file mode 100644 index e560e13e30..0000000000 --- a/plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py +++ /dev/null @@ -1,196 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2016 Red Hat, Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ovirt_affinity_label_facts -short_description: Retrieve information about one or more oVirt/RHV affinity labels -author: "Ondra Machacek (@machacekondra)" -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: When migrating to collection we decided to use only _info modules. - alternative: Use M(ovirt.ovirt.ovirt_affinity_label_info) instead. -description: - - "Retrieve information about one or more oVirt/RHV affinity labels." -notes: - - "This module returns a variable C(ovirt_affinity_labels), which - contains a list of affinity labels. You need to register the result with - the I(register) keyword to use it." -options: - name: - description: - - "Name of the affinity labels which should be listed." - vm: - description: - - "Name of the VM, which affinity labels should be listed." - host: - description: - - "Name of the host, which affinity labels should be listed." -extends_documentation_fragment: -- community.general.ovirt_facts - -''' - -EXAMPLES = ''' -# Examples don't contain auth parameter for simplicity, -# look at ovirt_auth module to see how to reuse authentication: - -- name: Gather information about all affinity labels, which names start with label - ovirt_affinity_label_info: - name: label* - register: result - -- name: Print gathered information - ansible.builtin.debug: - msg: "{{ result.ovirt_affinity_labels }}" - -- name: > - Gather information about all affinity labels, which are assigned to VMs - which names start with postgres - ovirt_affinity_label_info: - vm: postgres* - register: result - -- name: Print gathered information - ansible.builtin.debug: - msg: "{{ result.ovirt_affinity_labels }}" - -- name: > - Gather information about all affinity labels, which are assigned to hosts - which names start with west - ovirt_affinity_label_info: - host: west* - register: result - -- name: Print gathered information - ansible.builtin.debug: - msg: "{{ result.ovirt_affinity_labels }}" - -- name: > - Gather information about all affinity labels, which are assigned to hosts - which names start with west or VMs which names start with postgres - ovirt_affinity_label_info: - host: west* - vm: postgres* - register: result - -- name: Print gathered information - ansible.builtin.debug: - msg: "{{ result.ovirt_affinity_labels }}" -''' - -RETURN = ''' -ovirt_affinity_labels: - description: "List of dictionaries describing the affinity labels. Affinity labels attributes are mapped to dictionary keys, - all affinity labels attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/affinity_label." - returned: On success. - type: list -''' - -import fnmatch -import traceback - -from ansible.module_utils.common.removed import removed_module -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils._ovirt import ( - check_sdk, - create_connection, - get_dict_of_struct, - ovirt_info_full_argument_spec, - search_by_name, -) - - -def main(): - argument_spec = ovirt_info_full_argument_spec( - name=dict(default=None), - host=dict(default=None), - vm=dict(default=None), - ) - module = AnsibleModule(argument_spec) - is_old_facts = module._name in ('ovirt_affinity_label_facts', 'community.general.ovirt_affinity_label_facts') - if is_old_facts: - module.deprecate("The 'ovirt_affinity_label_facts' module has been renamed to 'ovirt_affinity_label_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 - - check_sdk(module) - - try: - auth = module.params.pop('auth') - connection = create_connection(auth) - affinity_labels_service = connection.system_service().affinity_labels_service() - labels = [] - all_labels = affinity_labels_service.list() - if module.params['name']: - labels.extend([ - l for l in all_labels - if fnmatch.fnmatch(l.name, module.params['name']) - ]) - if module.params['host']: - hosts_service = connection.system_service().hosts_service() - if search_by_name(hosts_service, module.params['host']) is None: - raise Exception("Host '%s' was not found." % module.params['host']) - labels.extend([ - label - for label in all_labels - for host in connection.follow_link(label.hosts) - if fnmatch.fnmatch(hosts_service.service(host.id).get().name, module.params['host']) - ]) - if module.params['vm']: - vms_service = connection.system_service().vms_service() - if search_by_name(vms_service, module.params['vm']) is None: - raise Exception("Vm '%s' was not found." % module.params['vm']) - labels.extend([ - label - for label in all_labels - for vm in connection.follow_link(label.vms) - if fnmatch.fnmatch(vms_service.service(vm.id).get().name, module.params['vm']) - ]) - - if not (module.params['vm'] or module.params['host'] or module.params['name']): - labels = all_labels - - result = dict( - ovirt_affinity_labels=[ - get_dict_of_struct( - struct=l, - connection=connection, - fetch_nested=module.params.get('fetch_nested'), - attributes=module.params.get('nested_attributes'), - ) for l in labels - ], - ) - if is_old_facts: - module.exit_json(changed=False, ansible_facts=result) - else: - module.exit_json(changed=False, **result) - except Exception as e: - module.fail_json(msg=str(e), exception=traceback.format_exc()) - finally: - connection.close(logout=auth.get('token') is None) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/ovirt/ovirt_api_facts.py b/plugins/modules/cloud/ovirt/ovirt_api_facts.py deleted file mode 100644 index 4085a7022b..0000000000 --- a/plugins/modules/cloud/ovirt/ovirt_api_facts.py +++ /dev/null @@ -1,98 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: ovirt_api_facts -short_description: Retrieve information about the oVirt/RHV API -author: "Ondra Machacek (@machacekondra)" -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: When migrating to collection we decided to use only _info modules. - alternative: Use M(ovirt.ovirt.ovirt_api_info) instead. -description: - - "Retrieve information about the oVirt/RHV API." -notes: - - "This module returns a variable C(ovirt_api), - which contains a information about oVirt/RHV API. You need to register the result with - the I(register) keyword to use it." -extends_documentation_fragment: -- community.general.ovirt_facts - -''' - -EXAMPLES = ''' -# Examples don't contain auth parameter for simplicity, -# look at ovirt_auth module to see how to reuse authentication: - -- name: Gather information oVirt API - ovirt_api_info: - register: result - -- name: Print gathered information - ansible.builtin.debug: - msg: "{{ result.ovirt_api }}" -''' - -RETURN = ''' -ovirt_api: - description: "Dictionary describing the oVirt API information. - Api attributes are mapped to dictionary keys, - all API attributes can be found at following - url: https://ovirt.example.com/ovirt-engine/api/model#types/api." - returned: On success. - type: dict -''' - -import traceback - -from ansible.module_utils.common.removed import removed_module -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils._ovirt import ( - check_sdk, - create_connection, - get_dict_of_struct, - ovirt_info_full_argument_spec, -) - - -def main(): - argument_spec = ovirt_info_full_argument_spec() - module = AnsibleModule(argument_spec) - is_old_facts = module._name in ('ovirt_api_facts', 'community.general.ovirt_api_facts') - if is_old_facts: - module.deprecate("The 'ovirt_api_facts' module has been renamed to 'ovirt_api_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 - check_sdk(module) - - try: - auth = module.params.pop('auth') - connection = create_connection(auth) - api = connection.system_service().get() - result = dict( - ovirt_api=get_dict_of_struct( - struct=api, - connection=connection, - fetch_nested=module.params.get('fetch_nested'), - attributes=module.params.get('nested_attributes'), - ) - ) - if is_old_facts: - module.exit_json(changed=False, ansible_facts=result) - else: - module.exit_json(changed=False, **result) - except Exception as e: - module.fail_json(msg=str(e), exception=traceback.format_exc()) - finally: - connection.close(logout=auth.get('token') is None) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/ovirt/ovirt_cluster_facts.py b/plugins/modules/cloud/ovirt/ovirt_cluster_facts.py deleted file mode 100644 index e4916a2684..0000000000 --- a/plugins/modules/cloud/ovirt/ovirt_cluster_facts.py +++ /dev/null @@ -1,125 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2016 Red Hat, Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ovirt_cluster_facts -short_description: Retrieve information about one or more oVirt/RHV clusters -author: "Ondra Machacek (@machacekondra)" -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: When migrating to collection we decided to use only _info modules. - alternative: Use M(ovirt.ovirt.ovirt_cluster_info) instead. -description: - - "Retrieve information about one or more oVirt/RHV clusters." -notes: - - "This module returns a variable C(ovirt_clusters), which - contains a list of clusters. You need to register the result with - the I(register) keyword to use it." -options: - pattern: - description: - - "Search term which is accepted by oVirt/RHV search backend." - - "For example to search cluster X from datacenter Y use following pattern: - name=X and datacenter=Y" -extends_documentation_fragment: -- community.general.ovirt_facts - -''' - -EXAMPLES = ''' -# Examples don't contain auth parameter for simplicity, -# look at ovirt_auth module to see how to reuse authentication: - -- name: Gather information about all clusters which names start with production - ovirt_cluster_info: - pattern: - name: 'production*' - register: result - -- name: Print gathered information - ansible.builtin.debug: - msg: "{{ result.ovirt_clusters }}" -''' - -RETURN = ''' -ovirt_clusters: - description: "List of dictionaries describing the clusters. Cluster attributes are mapped to dictionary keys, - all clusters attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/cluster." - returned: On success. - type: list -''' - -import traceback - -from ansible.module_utils.common.removed import removed_module -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils._ovirt import ( - check_sdk, - create_connection, - get_dict_of_struct, - ovirt_info_full_argument_spec, -) - - -def main(): - argument_spec = ovirt_info_full_argument_spec( - pattern=dict(default='', required=False), - ) - module = AnsibleModule(argument_spec) - is_old_facts = module._name in ('ovirt_cluster_facts', 'community.general.ovirt_cluster_facts') - if is_old_facts: - module.deprecate("The 'ovirt_cluster_facts' module has been renamed to 'ovirt_cluster_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 - - check_sdk(module) - - try: - auth = module.params.pop('auth') - connection = create_connection(auth) - clusters_service = connection.system_service().clusters_service() - clusters = clusters_service.list(search=module.params['pattern']) - result = dict( - ovirt_clusters=[ - get_dict_of_struct( - struct=c, - connection=connection, - fetch_nested=module.params.get('fetch_nested'), - attributes=module.params.get('nested_attributes'), - ) for c in clusters - ], - ) - if is_old_facts: - module.exit_json(changed=False, ansible_facts=result) - else: - module.exit_json(changed=False, **result) - except Exception as e: - module.fail_json(msg=str(e), exception=traceback.format_exc()) - finally: - connection.close(logout=auth.get('token') is None) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py b/plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py deleted file mode 100644 index 0de7272939..0000000000 --- a/plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2016 Red Hat, Inc. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ovirt_datacenter_facts -short_description: Retrieve information about one or more oVirt/RHV datacenters -author: "Ondra Machacek (@machacekondra)" -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: When migrating to collection we decided to use only _info modules. - alternative: Use M(ovirt.ovirt.ovirt_datacenter_info) instead. -description: - - "Retrieve information about one or more oVirt/RHV datacenters." -notes: - - "This module returns a variable C(ovirt_datacenters), which - contains a list of datacenters. You need to register the result with - the I(register) keyword to use it." -options: - pattern: - description: - - "Search term which is accepted by oVirt/RHV search backend." - - "For example to search datacenter I(X) use following pattern: I(name=X)" -extends_documentation_fragment: -- community.general.ovirt_facts - -''' - -EXAMPLES = ''' -# Examples don't contain auth parameter for simplicity, -# look at ovirt_auth module to see how to reuse authentication: - -- name: Gather information about all data centers which names start with production - ovirt_datacenter_info: - pattern: name=production* - register: result - -- name: Print gathered information - ansible.builtin.debug: - msg: "{{ result.ovirt_datacenters }}" -''' - -RETURN = ''' -ovirt_datacenters: - description: "List of dictionaries describing the datacenters. Datacenter attributes are mapped to dictionary keys, - all datacenters attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/data_center." - returned: On success. - type: list -''' - -import traceback - -from ansible.module_utils.common.removed import removed_module -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils._ovirt import ( - check_sdk, - create_connection, - get_dict_of_struct, - ovirt_info_full_argument_spec, -) - - -def main(): - argument_spec = ovirt_info_full_argument_spec( - pattern=dict(default='', required=False), - ) - module = AnsibleModule(argument_spec) - is_old_facts = module._name in ('ovirt_datacenter_facts', 'community.general.ovirt_datacenter_facts') - if is_old_facts: - module.deprecate("The 'ovirt_datacenter_facts' module has been renamed to 'ovirt_datacenter_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 - - check_sdk(module) - - try: - auth = module.params.pop('auth') - connection = create_connection(auth) - datacenters_service = connection.system_service().data_centers_service() - datacenters = datacenters_service.list(search=module.params['pattern']) - result = dict( - ovirt_datacenters=[ - get_dict_of_struct( - struct=d, - connection=connection, - fetch_nested=module.params.get('fetch_nested'), - attributes=module.params.get('nested_attributes'), - ) for d in datacenters - ], - ) - if is_old_facts: - module.exit_json(changed=False, ansible_facts=result) - else: - module.exit_json(changed=False, **result) - except Exception as e: - module.fail_json(msg=str(e), exception=traceback.format_exc()) - finally: - connection.close(logout=auth.get('token') is None) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/ovirt/ovirt_disk_facts.py b/plugins/modules/cloud/ovirt/ovirt_disk_facts.py deleted file mode 100644 index 6e0c9f699d..0000000000 --- a/plugins/modules/cloud/ovirt/ovirt_disk_facts.py +++ /dev/null @@ -1,125 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2017 Red Hat, Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ovirt_disk_facts -short_description: Retrieve information about one or more oVirt/RHV disks -author: "Katerina Koukiou (@KKoukiou)" -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: When migrating to collection we decided to use only _info modules. - alternative: Use M(ovirt.ovirt.ovirt_disk_info) instead -description: - - "Retrieve information about one or more oVirt/RHV disks." -notes: - - "This module returns a variable C(ovirt_disks), which - contains a list of disks. You need to register the result with - the I(register) keyword to use it." -options: - pattern: - description: - - "Search term which is accepted by oVirt/RHV search backend." - - "For example to search Disk X from storage Y use following pattern: - name=X and storage.name=Y" -extends_documentation_fragment: -- community.general.ovirt_facts - -''' - -EXAMPLES = ''' -# Examples don't contain auth parameter for simplicity, -# look at ovirt_auth module to see how to reuse authentication: - -- name: Gather information about all Disks which names start with centos - ovirt_disk_info: - pattern: name=centos* - register: result - -- name: Print gathered information - ansible.builtin.debug: - msg: "{{ result.ovirt_disks }}" -''' - -RETURN = ''' -ovirt_disks: - description: "List of dictionaries describing the Disks. Disk attributes are mapped to dictionary keys, - all Disks attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/disk." - returned: On success. - type: list -''' - -import traceback - -from ansible.module_utils.common.removed import removed_module -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils._ovirt import ( - check_sdk, - create_connection, - get_dict_of_struct, - ovirt_info_full_argument_spec, -) - - -def main(): - argument_spec = ovirt_info_full_argument_spec( - pattern=dict(default='', required=False), - ) - module = AnsibleModule(argument_spec) - is_old_facts = module._name in ('ovirt_disk_facts', 'community.general.ovirt_disk_facts') - if is_old_facts: - module.deprecate("The 'ovirt_disk_facts' module has been renamed to 'ovirt_disk_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 - check_sdk(module) - - try: - auth = module.params.pop('auth') - connection = create_connection(auth) - disks_service = connection.system_service().disks_service() - disks = disks_service.list( - search=module.params['pattern'], - ) - result = dict( - ovirt_disks=[ - get_dict_of_struct( - struct=c, - connection=connection, - fetch_nested=module.params.get('fetch_nested'), - attributes=module.params.get('nested_attributes'), - ) for c in disks - ], - ) - if is_old_facts: - module.exit_json(changed=False, ansible_facts=result) - else: - module.exit_json(changed=False, **result) - except Exception as e: - module.fail_json(msg=str(e), exception=traceback.format_exc()) - finally: - connection.close(logout=auth.get('token') is None) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/ovirt/ovirt_event_facts.py b/plugins/modules/cloud/ovirt/ovirt_event_facts.py deleted file mode 100644 index 50a2065498..0000000000 --- a/plugins/modules/cloud/ovirt/ovirt_event_facts.py +++ /dev/null @@ -1,170 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2019, Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ovirt_event_facts -short_description: This module can be used to retrieve information about one or more oVirt/RHV events -author: "Chris Keller (@nasx)" -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: When migrating to collection we decided to use only _info modules. - alternative: Use M(ovirt.ovirt.ovirt_event_info) instead. -description: - - "Retrieve information about one or more oVirt/RHV events." -options: - case_sensitive: - description: - - "Indicates if the search performed using the search parameter should be performed taking case - into account. The default value is true, which means that case is taken into account. If you - want to search ignoring case set it to false." - required: false - default: true - type: bool - - from_: - description: - - "Indicates the event index after which events should be returned. The indexes of events are - strictly increasing, so when this parameter is used only the events with greater indexes - will be returned." - required: false - type: int - - max: - description: - - "Sets the maximum number of events to return. If not specified all the events are returned." - required: false - type: int - - search: - description: - - "Search term which is accepted by the oVirt/RHV API." - - "For example to search for events of severity alert use the following pattern: severity=alert" - required: false - type: str - - headers: - description: - - "Additional HTTP headers." - required: false - type: str - - query: - description: - - "Additional URL query parameters." - required: false - type: str - - wait: - description: - - "If True wait for the response." - required: false - default: true - type: bool -extends_documentation_fragment: -- community.general.ovirt_facts - -''' - -EXAMPLES = ''' -# Examples don't contain the auth parameter for simplicity, -# look at the ovirt_auth module to see how to reuse authentication. - -- name: Return all events - ovirt_event_info: - register: result - -- name: Return the last 10 events - ovirt_event_info: - max: 10 - register: result - -- name: Return all events of type alert - ovirt_event_info: - search: "severity=alert" - register: result -- ansible.builtin.debug: - msg: "{{ result.ovirt_events }}" -''' - -RETURN = ''' -ovirt_events: - description: "List of dictionaries describing the events. Event attributes are mapped to dictionary keys. - All event attributes can be found at the following url: - http://ovirt.github.io/ovirt-engine-api-model/master/#types/event" - returned: On success." - type: list -''' - -import traceback - -from ansible.module_utils.common.removed import removed_module -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils._ovirt import ( - check_sdk, - create_connection, - get_dict_of_struct, - ovirt_info_full_argument_spec, -) - - -def main(): - argument_spec = ovirt_info_full_argument_spec( - case_sensitive=dict(default=True, type='bool', required=False), - from_=dict(default=None, type='int', required=False), - max=dict(default=None, type='int', required=False), - search=dict(default='', required=False), - headers=dict(default='', required=False), - query=dict(default='', required=False), - wait=dict(default=True, type='bool', required=False) - ) - module = AnsibleModule(argument_spec) - is_old_facts = module._name in ('ovirt_event_facts', 'community.general.ovirt_event_facts') - if is_old_facts: - module.deprecate("The 'ovirt_event_facts' module has been renamed to 'ovirt_event_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 - - check_sdk(module) - - try: - auth = module.params.pop('auth') - connection = create_connection(auth) - events_service = connection.system_service().events_service() - events = events_service.list( - case_sensitive=module.params['case_sensitive'], - from_=module.params['from_'], - max=module.params['max'], - search=module.params['search'], - headers=module.params['headers'], - query=module.params['query'], - wait=module.params['wait'] - ) - - result = dict( - ovirt_events=[ - get_dict_of_struct( - struct=c, - connection=connection, - fetch_nested=module.params.get('fetch_nested'), - attributes=module.params.get('nested_attributes'), - ) for c in events - ], - ) - if is_old_facts: - module.exit_json(changed=False, ansible_facts=result) - else: - module.exit_json(changed=False, **result) - except Exception as e: - module.fail_json(msg=str(e), exception=traceback.format_exc()) - finally: - connection.close(logout=auth.get('token') is None) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/ovirt/ovirt_external_provider_facts.py b/plugins/modules/cloud/ovirt/ovirt_external_provider_facts.py deleted file mode 100644 index f9ac8b9777..0000000000 --- a/plugins/modules/cloud/ovirt/ovirt_external_provider_facts.py +++ /dev/null @@ -1,165 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2016 Red Hat, Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ovirt_external_provider_facts -short_description: Retrieve information about one or more oVirt/RHV external providers -author: "Ondra Machacek (@machacekondra)" -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: When migrating to collection we decided to use only _info modules. - alternative: Use M(ovirt.ovirt.ovirt_external_provider_info) instead. -description: - - "Retrieve information about one or more oVirt/RHV external providers." -notes: - - "This module returns a variable C(ovirt_external_providers), which - contains a list of external_providers. You need to register the result with - the I(register) keyword to use it." -options: - type: - description: - - "Type of the external provider." - choices: ['os_image', 'os_network', 'os_volume', 'foreman'] - required: true - type: str - name: - description: - - "Name of the external provider, can be used as glob expression." - type: str -extends_documentation_fragment: -- community.general.ovirt_facts - -''' - -EXAMPLES = ''' -# Examples don't contain auth parameter for simplicity, -# look at ovirt_auth module to see how to reuse authentication: - -- name: Gather information about all image external providers named glance - ovirt_external_provider_info: - type: os_image - name: glance - register: result - -- name: Print gathered information - ansible.builtin.debug: - msg: "{{ result.ovirt_external_providers }}" -''' - -RETURN = ''' -ovirt_external_providers: - description: - - "List of dictionaries. Content depends on I(type)." - - "For type C(foreman), attributes appearing in the dictionary can be found on your oVirt/RHV instance - at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/external_host_provider." - - "For type C(os_image), attributes appearing in the dictionary can be found on your oVirt/RHV instance - at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_image_provider." - - "For type C(os_volume), attributes appearing in the dictionary can be found on your oVirt/RHV instance - at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_volume_provider." - - "For type C(os_network), attributes appearing in the dictionary can be found on your oVirt/RHV instance - at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_network_provider." - returned: On success - type: list -''' - -import fnmatch -import traceback - -from ansible.module_utils.common.removed import removed_module -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils._ovirt import ( - check_sdk, - create_connection, - get_dict_of_struct, - ovirt_info_full_argument_spec, -) - - -def _external_provider_service(provider_type, system_service): - if provider_type == 'os_image': - return system_service.openstack_image_providers_service() - elif provider_type == 'os_network': - return system_service.openstack_network_providers_service() - elif provider_type == 'os_volume': - return system_service.openstack_volume_providers_service() - elif provider_type == 'foreman': - return system_service.external_host_providers_service() - - -def main(): - argument_spec = ovirt_info_full_argument_spec( - name=dict(default=None, required=False), - type=dict( - required=True, - choices=['os_image', 'os_network', 'os_volume', 'foreman'], - aliases=['provider'], - ), - ) - module = AnsibleModule(argument_spec) - is_old_facts = module._name in ('ovirt_external_provider_facts', 'community.general.ovirt_external_provider_facts') - if is_old_facts: - module.deprecate("The 'ovirt_external_provider_facts' module has been renamed to 'ovirt_external_provider_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 - - check_sdk(module) - - try: - auth = module.params.pop('auth') - connection = create_connection(auth) - external_providers_service = _external_provider_service( - provider_type=module.params.pop('type'), - system_service=connection.system_service(), - ) - if module.params['name']: - external_providers = [ - e for e in external_providers_service.list() - if fnmatch.fnmatch(e.name, module.params['name']) - ] - else: - external_providers = external_providers_service.list() - - result = dict( - ovirt_external_providers=[ - get_dict_of_struct( - struct=c, - connection=connection, - fetch_nested=module.params.get('fetch_nested'), - attributes=module.params.get('nested_attributes'), - ) for c in external_providers - ], - ) - if is_old_facts: - module.exit_json(changed=False, ansible_facts=result) - else: - module.exit_json(changed=False, **result) - except Exception as e: - module.fail_json(msg=str(e), exception=traceback.format_exc()) - finally: - connection.close(logout=auth.get('token') is None) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/ovirt/ovirt_group_facts.py b/plugins/modules/cloud/ovirt/ovirt_group_facts.py deleted file mode 100644 index 40b037f4ee..0000000000 --- a/plugins/modules/cloud/ovirt/ovirt_group_facts.py +++ /dev/null @@ -1,123 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2016 Red Hat, Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ovirt_group_facts -short_description: Retrieve information about one or more oVirt/RHV groups -author: "Ondra Machacek (@machacekondra)" -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: When migrating to collection we decided to use only _info modules. - alternative: Use M(ovirt.ovirt.ovirt_group_info) instead. -description: - - "Retrieve information about one or more oVirt/RHV groups." -notes: - - "This module returns a variable C(ovirt_groups), which - contains a list of groups. You need to register the result with - the I(register) keyword to use it." -options: - pattern: - description: - - "Search term which is accepted by oVirt/RHV search backend." - - "For example to search group X use following pattern: name=X" -extends_documentation_fragment: -- community.general.ovirt_facts - -''' - -EXAMPLES = ''' -# Examples don't contain auth parameter for simplicity, -# look at ovirt_auth module to see how to reuse authentication: - -- name: Gather information about all groups which names start with admin - ovirt_group_info: - pattern: name=admin* - register: result - -- name: Print gathered information - ansible.builtin.debug: - msg: "{{ result.ovirt_groups }}" -''' - -RETURN = ''' -ovirt_groups: - description: "List of dictionaries describing the groups. Group attributes are mapped to dictionary keys, - all groups attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/group." - returned: On success. - type: list -''' - -import traceback - -from ansible.module_utils.common.removed import removed_module -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils._ovirt import ( - check_sdk, - create_connection, - get_dict_of_struct, - ovirt_info_full_argument_spec, -) - - -def main(): - argument_spec = ovirt_info_full_argument_spec( - pattern=dict(default='', required=False), - ) - module = AnsibleModule(argument_spec) - is_old_facts = module._name in ('ovirt_group_facts', 'community.general.ovirt_group_facts') - if is_old_facts: - module.deprecate("The 'ovirt_group_facts' module has been renamed to 'ovirt_group_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 - - check_sdk(module) - - try: - auth = module.params.pop('auth') - connection = create_connection(auth) - groups_service = connection.system_service().groups_service() - groups = groups_service.list(search=module.params['pattern']) - result = dict( - ovirt_groups=[ - get_dict_of_struct( - struct=c, - connection=connection, - fetch_nested=module.params.get('fetch_nested'), - attributes=module.params.get('nested_attributes'), - ) for c in groups - ], - ) - if is_old_facts: - module.exit_json(changed=False, ansible_facts=result) - else: - module.exit_json(changed=False, **result) - except Exception as e: - module.fail_json(msg=str(e), exception=traceback.format_exc()) - finally: - connection.close(logout=auth.get('token') is None) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/ovirt/ovirt_host_facts.py b/plugins/modules/cloud/ovirt/ovirt_host_facts.py deleted file mode 100644 index ea585e9051..0000000000 --- a/plugins/modules/cloud/ovirt/ovirt_host_facts.py +++ /dev/null @@ -1,149 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2016 Red Hat, Inc. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ovirt_host_facts -short_description: Retrieve information about one or more oVirt/RHV hosts -author: "Ondra Machacek (@machacekondra)" -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: When migrating to collection we decided to use only _info modules. - alternative: Use M(ovirt.ovirt.ovirt_host_info) instead. -description: - - "Retrieve information about one or more oVirt/RHV hosts." -notes: - - "This module returns a variable C(ovirt_hosts), which - contains a list of hosts. You need to register the result with - the I(register) keyword to use it." -options: - pattern: - description: - - "Search term which is accepted by oVirt/RHV search backend." - - "For example to search host X from datacenter Y use following pattern: - name=X and datacenter=Y" - all_content: - description: - - "If I(true) all the attributes of the hosts should be - included in the response." - default: False - type: bool - cluster_version: - description: - - "Filter the hosts based on the cluster version." - type: str - -extends_documentation_fragment: -- community.general.ovirt_facts - -''' - -EXAMPLES = ''' -# Examples don't contain auth parameter for simplicity, -# look at ovirt_auth module to see how to reuse authentication: - -- name: Gather information about all hosts which names start with host and belong to data center west - ovirt_host_info: - pattern: name=host* and datacenter=west - register: result - -- name: Print gathered information - ansible.builtin.debug: - msg: "{{ result.ovirt_hosts }}" - -- name: Gather information about all hosts with cluster version 4.2 - ovirt_host_info: - pattern: name=host* - cluster_version: "4.2" - register: result - -- name: Print gathered information - ansible.builtin.debug: - msg: "{{ result.ovirt_hosts }}" -''' - -RETURN = ''' -ovirt_hosts: - description: "List of dictionaries describing the hosts. Host attributes are mapped to dictionary keys, - all hosts attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/host." - returned: On success. - type: list -''' - -import traceback - -from ansible.module_utils.common.removed import removed_module -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils._ovirt import ( - check_sdk, - create_connection, - get_dict_of_struct, - ovirt_info_full_argument_spec, -) - - -def get_filtered_hosts(cluster_version, hosts, connection): - # Filtering by cluster version returns only those which have same cluster version as input - filtered_hosts = [] - for host in hosts: - cluster = connection.follow_link(host.cluster) - cluster_version_host = str(cluster.version.major) + '.' + str(cluster.version.minor) - if cluster_version_host == cluster_version: - filtered_hosts.append(host) - return filtered_hosts - - -def main(): - argument_spec = ovirt_info_full_argument_spec( - pattern=dict(default='', required=False), - all_content=dict(default=False, type='bool'), - cluster_version=dict(default=None, type='str'), - ) - module = AnsibleModule(argument_spec) - is_old_facts = module._name in ('ovirt_host_facts', 'community.general.ovirt_host_facts') - if is_old_facts: - module.deprecate("The 'ovirt_host_facts' module has been renamed to 'ovirt_host_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 - - check_sdk(module) - - try: - auth = module.params.pop('auth') - connection = create_connection(auth) - hosts_service = connection.system_service().hosts_service() - hosts = hosts_service.list( - search=module.params['pattern'], - all_content=module.params['all_content'] - ) - cluster_version = module.params.get('cluster_version') - if cluster_version is not None: - hosts = get_filtered_hosts(cluster_version, hosts, connection) - result = dict( - ovirt_hosts=[ - get_dict_of_struct( - struct=c, - connection=connection, - fetch_nested=module.params.get('fetch_nested'), - attributes=module.params.get('nested_attributes'), - ) for c in hosts - ], - ) - if is_old_facts: - module.exit_json(changed=False, ansible_facts=result) - else: - module.exit_json(changed=False, **result) - except Exception as e: - module.fail_json(msg=str(e), exception=traceback.format_exc()) - finally: - connection.close(logout=auth.get('token') is None) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py b/plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py deleted file mode 100644 index 62af3e4ba1..0000000000 --- a/plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py +++ /dev/null @@ -1,187 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2017 Red Hat, Inc. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ovirt_host_storage_facts -short_description: Retrieve information about one or more oVirt/RHV HostStorages (applicable only for block storage) -author: "Daniel Erez (@derez)" -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: When migrating to collection we decided to use only _info modules. - alternative: Use M(ovirt.ovirt.ovirt_host_storage_info) instead. -description: - - "Retrieve information about one or more oVirt/RHV HostStorages (applicable only for block storage)." -options: - host: - description: - - "Host to get device list from." - required: true - iscsi: - description: - - "Dictionary with values for iSCSI storage type:" - suboptions: - address: - description: - - "Address of the iSCSI storage server." - target: - description: - - "The target IQN for the storage device." - username: - description: - - "A CHAP user name for logging into a target." - password: - description: - - "A CHAP password for logging into a target." - portal: - description: - - "The portal being used to connect with iscsi." - fcp: - description: - - "Dictionary with values for fibre channel storage type:" - suboptions: - address: - description: - - "Address of the fibre channel storage server." - port: - description: - - "Port of the fibre channel storage server." - lun_id: - description: - - "LUN id." -extends_documentation_fragment: -- community.general.ovirt_facts - -''' - -EXAMPLES = ''' -# Examples don't contain auth parameter for simplicity, -# look at ovirt_auth module to see how to reuse authentication: - -- name: Gather information about HostStorages with specified target and address - ovirt_host_storage_info: - host: myhost - iscsi: - target: iqn.2016-08-09.domain-01:nickname - address: 10.34.63.204 - register: result - -- name: Print gathered information - ansible.builtin.debug: - msg: "{{ result.ovirt_host_storages }}" -''' - -RETURN = ''' -ovirt_host_storages: - description: "List of dictionaries describing the HostStorage. HostStorage attributes are mapped to dictionary keys, - all HostStorage attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/host_storage." - returned: On success. - type: list -''' - -import traceback - -try: - import ovirtsdk4.types as otypes -except ImportError: - pass - -from ansible.module_utils.common.removed import removed_module -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils._ovirt import ( - check_sdk, - create_connection, - get_dict_of_struct, - ovirt_info_full_argument_spec, - get_id_by_name, -) - - -def _login(host_service, iscsi): - host_service.iscsi_login( - iscsi=otypes.IscsiDetails( - username=iscsi.get('username'), - password=iscsi.get('password'), - address=iscsi.get('address'), - target=iscsi.get('target'), - portal=iscsi.get('portal') - ), - ) - - -def _get_storage_type(params): - for sd_type in ['iscsi', 'fcp']: - if params.get(sd_type) is not None: - return sd_type - - -def main(): - argument_spec = ovirt_info_full_argument_spec( - host=dict(required=True), - iscsi=dict(default=None, type='dict'), - fcp=dict(default=None, type='dict'), - ) - module = AnsibleModule(argument_spec) - is_old_facts = module._name in ('ovirt_host_storage_facts', 'community.general.ovirt_host_storage_facts') - if is_old_facts: - module.deprecate("The 'ovirt_host_storage_facts' module has been renamed to 'ovirt_host_storage_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 - check_sdk(module) - - try: - auth = module.params.pop('auth') - connection = create_connection(auth) - - # Get Host - hosts_service = connection.system_service().hosts_service() - host_id = get_id_by_name(hosts_service, module.params['host']) - storage_type = _get_storage_type(module.params) - host_service = hosts_service.host_service(host_id) - - if storage_type == 'iscsi': - # Login - iscsi = module.params.get('iscsi') - _login(host_service, iscsi) - - # Get LUNs exposed from the specified target - host_storages = host_service.storage_service().list() - - if storage_type == 'iscsi': - filterred_host_storages = [host_storage for host_storage in host_storages - if host_storage.type == otypes.StorageType.ISCSI] - if 'target' in iscsi: - filterred_host_storages = [host_storage for host_storage in filterred_host_storages - if iscsi.get('target') == host_storage.logical_units[0].target] - elif storage_type == 'fcp': - filterred_host_storages = [host_storage for host_storage in host_storages - if host_storage.type == otypes.StorageType.FCP] - - result = dict( - ovirt_host_storages=[ - get_dict_of_struct( - struct=c, - connection=connection, - fetch_nested=module.params.get('fetch_nested'), - attributes=module.params.get('nested_attributes'), - ) for c in filterred_host_storages - ], - ) - if is_old_facts: - module.exit_json(changed=False, ansible_facts=result) - else: - module.exit_json(changed=False, **result) - except Exception as e: - module.fail_json(msg=str(e), exception=traceback.format_exc()) - finally: - connection.close(logout=auth.get('token') is None) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/ovirt/ovirt_network_facts.py b/plugins/modules/cloud/ovirt/ovirt_network_facts.py deleted file mode 100644 index 781dd85805..0000000000 --- a/plugins/modules/cloud/ovirt/ovirt_network_facts.py +++ /dev/null @@ -1,125 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2016 Red Hat, Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ovirt_network_facts -short_description: Retrieve information about one or more oVirt/RHV networks -author: "Ondra Machacek (@machacekondra)" -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: When migrating to collection we decided to use only _info modules. - alternative: Use M(ovirt.ovirt.ovirt_network_info) instead. -description: - - "Retrieve information about one or more oVirt/RHV networks." -notes: - - "This module returns a variable C(ovirt_networks), which - contains a list of networks. You need to register the result with - the I(register) keyword to use it." -options: - pattern: - description: - - "Search term which is accepted by oVirt/RHV search backend." - - "For example to search network starting with string vlan1 use: name=vlan1*" -extends_documentation_fragment: -- community.general.ovirt_facts - -''' - - -EXAMPLES = ''' -# Examples don't contain auth parameter for simplicity, -# look at ovirt_auth module to see how to reuse authentication: - -- name: Gather information about all networks which names start with vlan1 - ovirt_network_info: - pattern: name=vlan1* - register: result - -- name: Print gathered information - ansible.builtin.debug: - msg: "{{ result.ovirt_networks }}" -''' - - -RETURN = ''' -ovirt_networks: - description: "List of dictionaries describing the networks. Network attributes are mapped to dictionary keys, - all networks attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/network." - returned: On success. - type: list -''' - -import traceback - -from ansible.module_utils.common.removed import removed_module -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils._ovirt import ( - check_sdk, - create_connection, - get_dict_of_struct, - ovirt_info_full_argument_spec, -) - - -def main(): - argument_spec = ovirt_info_full_argument_spec( - pattern=dict(default='', required=False), - ) - module = AnsibleModule(argument_spec) - is_old_facts = module._name in ('ovirt_network_facts', 'community.general.ovirt_network_facts') - if is_old_facts: - module.deprecate("The 'ovirt_network_facts' module has been renamed to 'ovirt_network_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 - - check_sdk(module) - - try: - auth = module.params.pop('auth') - connection = create_connection(auth) - networks_service = connection.system_service().networks_service() - networks = networks_service.list(search=module.params['pattern']) - result = dict( - ovirt_networks=[ - get_dict_of_struct( - struct=c, - connection=connection, - fetch_nested=module.params.get('fetch_nested'), - attributes=module.params.get('nested_attributes'), - ) for c in networks - ], - ) - if is_old_facts: - module.exit_json(changed=False, ansible_facts=result) - else: - module.exit_json(changed=False, **result) - except Exception as e: - module.fail_json(msg=str(e), exception=traceback.format_exc()) - finally: - connection.close(logout=auth.get('token') is None) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/ovirt/ovirt_nic_facts.py b/plugins/modules/cloud/ovirt/ovirt_nic_facts.py deleted file mode 100644 index 2cc1194fbc..0000000000 --- a/plugins/modules/cloud/ovirt/ovirt_nic_facts.py +++ /dev/null @@ -1,143 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2016 Red Hat, Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ovirt_nic_facts -short_description: Retrieve information about one or more oVirt/RHV virtual machine network interfaces -author: "Ondra Machacek (@machacekondra)" -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: When migrating to collection we decided to use only _info modules. - alternative: Use M(ovirt.ovirt.ovirt_nic_info) instead. -description: - - "Retrieve information about one or more oVirt/RHV virtual machine network interfaces." -notes: - - "This module returns a variable C(ovirt_nics), which - contains a list of NICs. You need to register the result with - the I(register) keyword to use it." -options: - vm: - description: - - "Name of the VM where NIC is attached." - required: true - name: - description: - - "Name of the NIC, can be used as glob expression." -extends_documentation_fragment: -- community.general.ovirt_facts - -''' - -EXAMPLES = ''' -# Examples don't contain auth parameter for simplicity, -# look at ovirt_auth module to see how to reuse authentication: - -- name: Gather information about all NICs which names start with eth for VM named centos7 - ovirt_nic_info: - vm: centos7 - name: eth* - register: result - -- name: Print gathered information - ansible.builtin.debug: - msg: "{{ result.ovirt_nics }}" -''' - -RETURN = ''' -ovirt_nics: - description: "List of dictionaries describing the network interfaces. NIC attributes are mapped to dictionary keys, - all NICs attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/nic." - returned: On success. - type: list -''' - -import fnmatch -import traceback - -from ansible.module_utils.common.removed import removed_module -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils._ovirt import ( - check_sdk, - create_connection, - get_dict_of_struct, - ovirt_info_full_argument_spec, - search_by_name, -) - - -def main(): - argument_spec = ovirt_info_full_argument_spec( - vm=dict(required=True), - name=dict(default=None), - ) - module = AnsibleModule(argument_spec) - is_old_facts = module._name in ('ovirt_nic_facts', 'community.general.ovirt_nic_facts') - if is_old_facts: - module.deprecate("The 'ovirt_nic_facts' module has been renamed to 'ovirt_nic_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 - - check_sdk(module) - - try: - auth = module.params.pop('auth') - connection = create_connection(auth) - vms_service = connection.system_service().vms_service() - vm_name = module.params['vm'] - vm = search_by_name(vms_service, vm_name) - if vm is None: - raise Exception("VM '%s' was not found." % vm_name) - - nics_service = vms_service.service(vm.id).nics_service() - if module.params['name']: - nics = [ - e for e in nics_service.list() - if fnmatch.fnmatch(e.name, module.params['name']) - ] - else: - nics = nics_service.list() - - result = dict( - ovirt_nics=[ - get_dict_of_struct( - struct=c, - connection=connection, - fetch_nested=module.params.get('fetch_nested'), - attributes=module.params.get('nested_attributes'), - ) for c in nics - ], - ) - if is_old_facts: - module.exit_json(changed=False, ansible_facts=result) - else: - module.exit_json(changed=False, **result) - except Exception as e: - module.fail_json(msg=str(e), exception=traceback.format_exc()) - finally: - connection.close(logout=auth.get('token') is None) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/ovirt/ovirt_permission_facts.py b/plugins/modules/cloud/ovirt/ovirt_permission_facts.py deleted file mode 100644 index 52ba3624f1..0000000000 --- a/plugins/modules/cloud/ovirt/ovirt_permission_facts.py +++ /dev/null @@ -1,166 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2016 Red Hat, Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ovirt_permission_facts -short_description: Retrieve information about one or more oVirt/RHV permissions -author: "Ondra Machacek (@machacekondra)" -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: When migrating to collection we decided to use only _info modules. - alternative: Use M(ovirt.ovirt.ovirt_permission_info) instead. -description: - - "Retrieve information about one or more oVirt/RHV permissions." -notes: - - "This module returns a variable C(ovirt_permissions), which - contains a list of permissions. You need to register the result with - the I(register) keyword to use it." -options: - user_name: - description: - - "Username of the user to manage. In most LDAPs it's I(uid) of the user, but in Active Directory you must specify I(UPN) of the user." - group_name: - description: - - "Name of the group to manage." - authz_name: - description: - - "Authorization provider of the user/group. In previous versions of oVirt/RHV known as domain." - required: true - aliases: ['domain'] - namespace: - description: - - "Namespace of the authorization provider, where user/group resides." - required: false -extends_documentation_fragment: -- community.general.ovirt_facts - -''' - -EXAMPLES = ''' -# Examples don't contain auth parameter for simplicity, -# look at ovirt_auth module to see how to reuse authentication: - -- name: Gather information about all permissions of user with username john - ovirt_permission_info: - user_name: john - authz_name: example.com-authz - register: result - -- name: Print gathered information - ansible.builtin.debug: - msg: "{{ result.ovirt_permissions }}" -''' - -RETURN = ''' -ovirt_permissions: - description: "List of dictionaries describing the permissions. Permission attributes are mapped to dictionary keys, - all permissions attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/permission." - returned: On success. - type: list -''' - -import traceback - -try: - import ovirtsdk4 as sdk -except ImportError: - pass - -from ansible.module_utils.common.removed import removed_module -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils._ovirt import ( - check_sdk, - create_connection, - get_link_name, - ovirt_info_full_argument_spec, - search_by_name, -) - - -def _permissions_service(connection, module): - if module.params['user_name']: - service = connection.system_service().users_service() - entity = next( - iter( - service.list( - search='usrname={0}'.format( - '{0}@{1}'.format(module.params['user_name'], module.params['authz_name']) - ) - ) - ), - None - ) - else: - service = connection.system_service().groups_service() - entity = search_by_name(service, module.params['group_name']) - - if entity is None: - raise Exception("User/Group wasn't found.") - - return service.service(entity.id).permissions_service() - - -def main(): - argument_spec = ovirt_info_full_argument_spec( - authz_name=dict(required=True, aliases=['domain']), - user_name=dict(default=None), - group_name=dict(default=None), - namespace=dict(default=None), - ) - module = AnsibleModule(argument_spec) - is_old_facts = module._name in ('ovirt_permission_facts', 'community.general.ovirt_permission_facts') - if is_old_facts: - module.deprecate("The 'ovirt_permission_facts' module has been renamed to 'ovirt_permission_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 - - check_sdk(module) - - try: - auth = module.params.pop('auth') - connection = create_connection(auth) - permissions_service = _permissions_service(connection, module) - permissions = [] - for p in permissions_service.list(): - newperm = dict() - for key, value in p.__dict__.items(): - if value and isinstance(value, sdk.Struct): - newperm[key[1:]] = get_link_name(connection, value) - newperm['%s_id' % key[1:]] = value.id - permissions.append(newperm) - - result = dict(ovirt_permissions=permissions) - if is_old_facts: - module.exit_json(changed=False, ansible_facts=result) - else: - module.exit_json(changed=False, **result) - except Exception as e: - module.fail_json(msg=str(e), exception=traceback.format_exc()) - finally: - connection.close(logout=auth.get('token') is None) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/ovirt/ovirt_quota_facts.py b/plugins/modules/cloud/ovirt/ovirt_quota_facts.py deleted file mode 100644 index b2424305ae..0000000000 --- a/plugins/modules/cloud/ovirt/ovirt_quota_facts.py +++ /dev/null @@ -1,143 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2016 Red Hat, Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ovirt_quota_facts -short_description: Retrieve information about one or more oVirt/RHV quotas -author: "Maor Lipchuk (@machacekondra)" -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: When migrating to collection we decided to use only _info modules. - alternative: Use M(ovirt.ovirt.ovirt_quota_info) instead. -description: - - "Retrieve information about one or more oVirt/RHV quotas." -notes: - - "This module returns a variable C(ovirt_quotas), which - contains a list of quotas. You need to register the result with - the I(register) keyword to use it." -options: - data_center: - description: - - "Name of the datacenter where quota resides." - required: true - name: - description: - - "Name of the quota, can be used as glob expression." -extends_documentation_fragment: -- community.general.ovirt_facts - -''' - -EXAMPLES = ''' -# Examples don't contain auth parameter for simplicity, -# look at ovirt_auth module to see how to reuse authentication: - -- name: Gather information about quota named C in Default datacenter - ovirt_quota_info: - data_center: Default - name: myquota - register: result - -- name: Print gathered information - ansible.builtin.debug: - msg: "{{ result.ovirt_quotas }}" -''' - -RETURN = ''' -ovirt_quotas: - description: "List of dictionaries describing the quotas. Quota attributes are mapped to dictionary keys, - all quotas attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/quota." - returned: On success. - type: list -''' - -import fnmatch -import traceback - -from ansible.module_utils.common.removed import removed_module -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils._ovirt import ( - check_sdk, - create_connection, - get_dict_of_struct, - ovirt_info_full_argument_spec, - search_by_name, -) - - -def main(): - argument_spec = ovirt_info_full_argument_spec( - data_center=dict(required=True), - name=dict(default=None), - ) - module = AnsibleModule(argument_spec) - is_old_facts = module._name in ('ovirt_quota_facts', 'community.general.ovirt_quota_facts') - if is_old_facts: - module.deprecate("The 'ovirt_quota_facts' module has been renamed to 'ovirt_quota_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 - - check_sdk(module) - - try: - auth = module.params.pop('auth') - connection = create_connection(auth) - datacenters_service = connection.system_service().data_centers_service() - dc_name = module.params['data_center'] - dc = search_by_name(datacenters_service, dc_name) - if dc is None: - raise Exception("Datacenter '%s' was not found." % dc_name) - - quotas_service = datacenters_service.service(dc.id).quotas_service() - if module.params['name']: - quotas = [ - e for e in quotas_service.list() - if fnmatch.fnmatch(e.name, module.params['name']) - ] - else: - quotas = quotas_service.list() - - result = dict( - ovirt_quotas=[ - get_dict_of_struct( - struct=c, - connection=connection, - fetch_nested=module.params.get('fetch_nested'), - attributes=module.params.get('nested_attributes'), - ) for c in quotas - ], - ) - if is_old_facts: - module.exit_json(changed=False, ansible_facts=result) - else: - module.exit_json(changed=False, **result) - except Exception as e: - module.fail_json(msg=str(e), exception=traceback.format_exc()) - finally: - connection.close(logout=auth.get('token') is None) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py b/plugins/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py deleted file mode 100644 index eeaeb61051..0000000000 --- a/plugins/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py +++ /dev/null @@ -1,140 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2017 Red Hat, Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ovirt_scheduling_policy_facts -short_description: Retrieve information about one or more oVirt scheduling policies -author: "Ondra Machacek (@machacekondra)" -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: When migrating to collection we decided to use only _info modules. - alternative: Use M(ovirt.ovirt.ovirt_scheduling_policy_info) instead. -description: - - "Retrieve information about one or more oVirt scheduling policies." -notes: - - "This module returns a variable C(ovirt_scheduling_policies), - which contains a list of scheduling policies. You need to register the result with - the I(register) keyword to use it." -options: - id: - description: - - "ID of the scheduling policy." - name: - description: - - "Name of the scheduling policy, can be used as glob expression." -extends_documentation_fragment: -- community.general.ovirt_facts - -''' - -EXAMPLES = ''' -# Examples don't contain auth parameter for simplicity, -# look at ovirt_auth module to see how to reuse authentication: - -- name: Gather information about all scheduling policies with name InClusterUpgrade - ovirt_scheduling_policy_info: - name: InClusterUpgrade - register: result - -- name: Print gathered information - ansible.builtin.debug: - msg: "{{ result.ovirt_scheduling_policies }}" -''' - -RETURN = ''' -ovirt_scheduling_policies: - description: "List of dictionaries describing the scheduling policies. - Scheduling policies attributes are mapped to dictionary keys, - all scheduling policies attributes can be found at following - url: https://ovirt.example.com/ovirt-engine/api/model#types/scheduling_policy." - returned: On success. - type: list -''' - -import fnmatch -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils._ovirt import ( - check_sdk, - create_connection, - get_dict_of_struct, - ovirt_info_full_argument_spec, -) - - -def main(): - argument_spec = ovirt_info_full_argument_spec( - id=dict(default=None), - name=dict(default=None), - ) - module = AnsibleModule(argument_spec) - is_old_facts = module._name in ('ovirt_scheduling_policy_facts', 'community.general.ovirt_scheduling_policy_facts') - if is_old_facts: - module.deprecate("The 'ovirt_scheduling_policy_facts' module has been renamed to 'ovirt_scheduling_policy_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 - - check_sdk(module) - - try: - auth = module.params.pop('auth') - connection = create_connection(auth) - system_service = connection.system_service() - sched_policies_service = system_service.scheduling_policies_service() - if module.params['name']: - sched_policies = [ - e for e in sched_policies_service.list() - if fnmatch.fnmatch(e.name, module.params['name']) - ] - elif module.params['id']: - sched_policies = [ - sched_policies_service.service(module.params['id']).get() - ] - else: - sched_policies = sched_policies_service.list() - - result = dict( - ovirt_scheduling_policies=[ - get_dict_of_struct( - struct=c, - connection=connection, - fetch_nested=module.params.get('fetch_nested'), - attributes=module.params.get('nested_attributes'), - ) for c in sched_policies - ], - ) - if is_old_facts: - module.exit_json(changed=False, ansible_facts=result) - else: - module.exit_json(changed=False, **result) - except Exception as e: - module.fail_json(msg=str(e), exception=traceback.format_exc()) - finally: - connection.close(logout=auth.get('token') is None) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/ovirt/ovirt_snapshot_facts.py b/plugins/modules/cloud/ovirt/ovirt_snapshot_facts.py deleted file mode 100644 index 737468835e..0000000000 --- a/plugins/modules/cloud/ovirt/ovirt_snapshot_facts.py +++ /dev/null @@ -1,137 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2016 Red Hat, Inc. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ovirt_snapshot_facts -short_description: Retrieve information about one or more oVirt/RHV virtual machine snapshots -author: "Ondra Machacek (@machacekondra)" -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: When migrating to collection we decided to use only _info modules. - alternative: Use M(ovirt.ovirt.ovirt_snapshot_info) instead. -description: - - "Retrieve information about one or more oVirt/RHV virtual machine snapshots." -notes: - - "This module returns a variable C(ovirt_snapshots), which - contains a list of snapshots. You need to register the result with - the I(register) keyword to use it." -options: - vm: - description: - - "Name of the VM with snapshot." - required: true - description: - description: - - "Description of the snapshot, can be used as glob expression." - snapshot_id: - description: - - "Id of the snapshot we want to retrieve information about." -extends_documentation_fragment: -- community.general.ovirt_facts - -''' - -EXAMPLES = ''' -# Examples don't contain auth parameter for simplicity, -# look at ovirt_auth module to see how to reuse authentication: - -- name: Gather information about all snapshots which description start with update for VM named centos7 - ovirt_snapshot_info: - vm: centos7 - description: update* - register: result - -- name: Print gathered information - ansible.builtin.debug: - msg: "{{ result.ovirt_snapshots }}" -''' - -RETURN = ''' -ovirt_snapshots: - description: "List of dictionaries describing the snapshot. Snapshot attributes are mapped to dictionary keys, - all snapshot attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/snapshot." - returned: On success. - type: list -''' - - -import fnmatch -import traceback - -from ansible.module_utils.common.removed import removed_module -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils._ovirt import ( - check_sdk, - create_connection, - get_dict_of_struct, - ovirt_info_full_argument_spec, - search_by_name, -) - - -def main(): - argument_spec = ovirt_info_full_argument_spec( - vm=dict(required=True), - description=dict(default=None), - snapshot_id=dict(default=None), - ) - module = AnsibleModule(argument_spec) - is_old_facts = module._name in ('ovirt_snapshot_facts', 'community.general.ovirt_snapshot_facts') - if is_old_facts: - module.deprecate("The 'ovirt_snapshot_facts' module has been renamed to 'ovirt_snapshot_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 - - check_sdk(module) - - try: - auth = module.params.pop('auth') - connection = create_connection(auth) - vms_service = connection.system_service().vms_service() - vm_name = module.params['vm'] - vm = search_by_name(vms_service, vm_name) - if vm is None: - raise Exception("VM '%s' was not found." % vm_name) - - snapshots_service = vms_service.service(vm.id).snapshots_service() - if module.params['description']: - snapshots = [ - e for e in snapshots_service.list() - if fnmatch.fnmatch(e.description, module.params['description']) - ] - elif module.params['snapshot_id']: - snapshots = [ - snapshots_service.snapshot_service(module.params['snapshot_id']).get() - ] - else: - snapshots = snapshots_service.list() - - result = dict( - ovirt_snapshots=[ - get_dict_of_struct( - struct=c, - connection=connection, - fetch_nested=module.params.get('fetch_nested'), - attributes=module.params.get('nested_attributes'), - ) for c in snapshots - ], - ) - if is_old_facts: - module.exit_json(changed=False, ansible_facts=result) - else: - module.exit_json(changed=False, **result) - except Exception as e: - module.fail_json(msg=str(e), exception=traceback.format_exc()) - finally: - connection.close(logout=auth.get('token') is None) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/ovirt/ovirt_storage_domain_facts.py b/plugins/modules/cloud/ovirt/ovirt_storage_domain_facts.py deleted file mode 100644 index b9d814c121..0000000000 --- a/plugins/modules/cloud/ovirt/ovirt_storage_domain_facts.py +++ /dev/null @@ -1,126 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2016 Red Hat, Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ovirt_storage_domain_facts -short_description: Retrieve information about one or more oVirt/RHV storage domains -author: "Ondra Machacek (@machacekondra)" -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: When migrating to collection we decided to use only _info modules. - alternative: Use M(ovirt.ovirt.ovirt_storage_domain_info) instead. -description: - - "Retrieve information about one or more oVirt/RHV storage domains." -notes: - - "This module returns a variable C(ovirt_storage_domains), which - contains a list of storage domains. You need to register the result with - the I(register) keyword to use it." -options: - pattern: - description: - - "Search term which is accepted by oVirt/RHV search backend." - - "For example to search storage domain X from datacenter Y use following pattern: - name=X and datacenter=Y" -extends_documentation_fragment: -- community.general.ovirt_facts - -''' - -EXAMPLES = ''' -# Examples don't contain auth parameter for simplicity, -# look at ovirt_auth module to see how to reuse authentication: - -- name: > - Gather information about all storage domains which names - start with data and belong to data center west - ovirt_storage_domain_info: - pattern: name=data* and datacenter=west - register: result - -- name: Print gathered information - ansible.builtin.debug: - msg: "{{ result.ovirt_storage_domains }}" -''' - -RETURN = ''' -ovirt_storage_domains: - description: "List of dictionaries describing the storage domains. Storage_domain attributes are mapped to dictionary keys, - all storage domains attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/storage_domain." - returned: On success. - type: list -''' - -import traceback - -from ansible.module_utils.common.removed import removed_module -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils._ovirt import ( - check_sdk, - create_connection, - get_dict_of_struct, - ovirt_info_full_argument_spec, -) - - -def main(): - argument_spec = ovirt_info_full_argument_spec( - pattern=dict(default='', required=False), - ) - module = AnsibleModule(argument_spec) - is_old_facts = module._name in ('ovirt_storage_domain_facts', 'community.general.ovirt_storage_domain_facts') - if is_old_facts: - module.deprecate("The 'ovirt_storage_domain_facts' module has been renamed to 'ovirt_storage_domain_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 - - check_sdk(module) - - try: - auth = module.params.pop('auth') - connection = create_connection(auth) - storage_domains_service = connection.system_service().storage_domains_service() - storage_domains = storage_domains_service.list(search=module.params['pattern']) - result = dict( - ovirt_storage_domains=[ - get_dict_of_struct( - struct=c, - connection=connection, - fetch_nested=module.params.get('fetch_nested'), - attributes=module.params.get('nested_attributes'), - ) for c in storage_domains - ], - ) - if is_old_facts: - module.exit_json(changed=False, ansible_facts=result) - else: - module.exit_json(changed=False, **result) - except Exception as e: - module.fail_json(msg=str(e), exception=traceback.format_exc()) - finally: - connection.close(logout=auth.get('token') is None) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py b/plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py deleted file mode 100644 index 1c58327801..0000000000 --- a/plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py +++ /dev/null @@ -1,142 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2017 Red Hat, Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ovirt_storage_template_facts -short_description: Retrieve information about one or more oVirt/RHV templates relate to a storage domain. -author: "Maor Lipchuk (@machacekondra)" -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: When migrating to collection we decided to use only _info modules. - alternative: Use M(ovirt.ovirt.ovirt_storage_template_info) instead. -description: - - "Retrieve information about one or more oVirt/RHV templates relate to a storage domain." -notes: - - "This module returns a variable C(ovirt_storage_templates), which - contains a list of templates. You need to register the result with - the I(register) keyword to use it." -options: - unregistered: - description: - - "Flag which indicates whether to get unregistered templates which contain one or more - disks which reside on a storage domain or diskless templates." - type: bool - default: false - max: - description: - - "Sets the maximum number of templates to return. If not specified all the templates are returned." - storage_domain: - description: - - "The storage domain name where the templates should be listed." -extends_documentation_fragment: -- community.general.ovirt_facts - -''' - -EXAMPLES = ''' -# Examples don't contain auth parameter for simplicity, -# look at ovirt_auth module to see how to reuse authentication: - -- name: Gather information about all templates which relate to a storage domain and are unregistered - ovirt_storage_template_info: - unregistered: yes - register: result - -- name: Print gathered information - ansible.builtin.debug: - msg: "{{ result.ovirt_storage_templates }}" -''' - -RETURN = ''' -ovirt_storage_templates: - description: "List of dictionaries describing the Templates. Template attributes are mapped to dictionary keys, - all Templates attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/template." - returned: On success. - type: list -''' - -import traceback - -from ansible.module_utils.common.removed import removed_module -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils._ovirt import ( - check_sdk, - create_connection, - get_dict_of_struct, - ovirt_info_full_argument_spec, - get_id_by_name -) - - -def main(): - argument_spec = ovirt_info_full_argument_spec( - storage_domain=dict(default=None), - max=dict(default=None, type='int'), - unregistered=dict(default=False, type='bool'), - ) - module = AnsibleModule(argument_spec) - is_old_facts = module._name in ('ovirt_storage_template_facts', 'community.general.ovirt_storage_template_facts') - if is_old_facts: - module.deprecate("The 'ovirt_storage_template_facts' module has been renamed to 'ovirt_storage_template_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 - - check_sdk(module) - - try: - auth = module.params.pop('auth') - connection = create_connection(auth) - storage_domains_service = connection.system_service().storage_domains_service() - sd_id = get_id_by_name(storage_domains_service, module.params['storage_domain']) - storage_domain_service = storage_domains_service.storage_domain_service(sd_id) - templates_service = storage_domain_service.templates_service() - - # Find the unregistered Template we want to register: - if module.params.get('unregistered'): - templates = templates_service.list(unregistered=True) - else: - templates = templates_service.list(max=module.params['max']) - result = dict( - ovirt_storage_templates=[ - get_dict_of_struct( - struct=c, - connection=connection, - fetch_nested=module.params.get('fetch_nested'), - attributes=module.params.get('nested_attributes'), - ) for c in templates - ], - ) - if is_old_facts: - module.exit_json(changed=False, ansible_facts=result) - else: - module.exit_json(changed=False, **result) - except Exception as e: - module.fail_json(msg=str(e), exception=traceback.format_exc()) - finally: - connection.close(logout=auth.get('token') is None) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py b/plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py deleted file mode 100644 index d024794849..0000000000 --- a/plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py +++ /dev/null @@ -1,142 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2017 Red Hat, Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ovirt_storage_vm_facts -short_description: Retrieve information about one or more oVirt/RHV virtual machines relate to a storage domain. -author: "Maor Lipchuk (@machacekondra)" -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: When migrating to collection we decided to use only _info modules. - alternative: Use M(ovirt.ovirt.ovirt_storage_vm_info) instead. -description: - - "Retrieve information about one or more oVirt/RHV virtual machines relate to a storage domain." -notes: - - "This module returns a variable C(ovirt_storage_vms), which - contains a list of virtual machines. You need to register the result with - the I(register) keyword to use it." -options: - unregistered: - description: - - "Flag which indicates whether to get unregistered virtual machines which contain one or more - disks which reside on a storage domain or diskless virtual machines." - type: bool - default: false - max: - description: - - "Sets the maximum number of virtual machines to return. If not specified all the virtual machines are returned." - storage_domain: - description: - - "The storage domain name where the virtual machines should be listed." -extends_documentation_fragment: -- community.general.ovirt_facts - -''' - -EXAMPLES = ''' -# Examples don't contain auth parameter for simplicity, -# look at ovirt_auth module to see how to reuse authentication: - -- name: Gather information about all VMs which relate to a storage domain and are unregistered - ovirt_vms_info: - unregistered: yes - register: result - -- name: Print gathered information - ansible.builtin.debug: - msg: "{{ result.ovirt_storage_vms }}" -''' - -RETURN = ''' -ovirt_storage_vms: - description: "List of dictionaries describing the VMs. VM attributes are mapped to dictionary keys, - all VMs attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm." - returned: On success. - type: list -''' - -import traceback - -from ansible.module_utils.common.removed import removed_module -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils._ovirt import ( - check_sdk, - create_connection, - get_dict_of_struct, - ovirt_info_full_argument_spec, - get_id_by_name -) - - -def main(): - argument_spec = ovirt_info_full_argument_spec( - storage_domain=dict(default=None), - max=dict(default=None, type='int'), - unregistered=dict(default=False, type='bool'), - ) - module = AnsibleModule(argument_spec) - is_old_facts = module._name in ('ovirt_storage_vm_facts', 'community.general.ovirt_storage_vm_facts') - if is_old_facts: - module.deprecate("The 'ovirt_storage_vm_facts' module has been renamed to 'ovirt_storage_vm_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 - - check_sdk(module) - - try: - auth = module.params.pop('auth') - connection = create_connection(auth) - storage_domains_service = connection.system_service().storage_domains_service() - sd_id = get_id_by_name(storage_domains_service, module.params['storage_domain']) - storage_domain_service = storage_domains_service.storage_domain_service(sd_id) - vms_service = storage_domain_service.vms_service() - - # Find the unregistered VM we want to register: - if module.params.get('unregistered'): - vms = vms_service.list(unregistered=True) - else: - vms = vms_service.list() - result = dict( - ovirt_storage_vms=[ - get_dict_of_struct( - struct=c, - connection=connection, - fetch_nested=module.params.get('fetch_nested'), - attributes=module.params.get('nested_attributes'), - ) for c in vms - ], - ) - if is_old_facts: - module.exit_json(changed=False, ansible_facts=result) - else: - module.exit_json(changed=False, **result) - except Exception as e: - module.fail_json(msg=str(e), exception=traceback.format_exc()) - finally: - connection.close(logout=auth.get('token') is None) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/ovirt/ovirt_tag_facts.py b/plugins/modules/cloud/ovirt/ovirt_tag_facts.py deleted file mode 100644 index a6ce97dd42..0000000000 --- a/plugins/modules/cloud/ovirt/ovirt_tag_facts.py +++ /dev/null @@ -1,172 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2016 Red Hat, Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ovirt_tag_facts -short_description: Retrieve information about one or more oVirt/RHV tags -author: "Ondra Machacek (@machacekondra)" -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: When migrating to collection we decided to use only _info modules. - alternative: Use M(ovirt.ovirt.ovirt_tag_info) instead. -description: - - "Retrieve information about one or more oVirt/RHV tags." -notes: - - "This module returns a variable C(ovirt_tags), which - contains a list of tags. You need to register the result with - the I(register) keyword to use it." -options: - name: - description: - - "Name of the tag which should be listed." - vm: - description: - - "Name of the VM, which tags should be listed." - host: - description: - - "Name of the host, which tags should be listed." -extends_documentation_fragment: -- community.general.ovirt_facts - -''' - -EXAMPLES = ''' -# Examples don't contain auth parameter for simplicity, -# look at ovirt_auth module to see how to reuse authentication: - -- name: Gather information about all tags, which names start with tag - ovirt_tag_info: - name: tag* - register: result - -- name: Print gathered information - ansible.builtin.debug: - msg: "{{ result.ovirt_tags }}" - -- name: Gather information about all tags, which are assigned to VM postgres - ovirt_tag_info: - vm: postgres - register: result - -- name: Print gathered information - ansible.builtin.debug: - msg: "{{ result.ovirt_tags }}" - -- name: Gather information about all tags, which are assigned to host west - ovirt_tag_info: - host: west - register: result - -- name: Print gathered information - ansible.builtin.debug: - msg: "{{ result.ovirt_tags }}" -''' - -RETURN = ''' -ovirt_tags: - description: "List of dictionaries describing the tags. Tags attributes are mapped to dictionary keys, - all tags attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/tag." - returned: On success. - type: list -''' - -import fnmatch -import traceback - -from ansible.module_utils.common.removed import removed_module -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils._ovirt import ( - check_sdk, - create_connection, - get_dict_of_struct, - ovirt_info_full_argument_spec, - search_by_name, -) - - -def main(): - argument_spec = ovirt_info_full_argument_spec( - name=dict(default=None), - host=dict(default=None), - vm=dict(default=None), - ) - module = AnsibleModule(argument_spec) - is_old_facts = module._name in ('ovirt_tag_facts', 'community.general.ovirt_tag_facts') - if is_old_facts: - module.deprecate("The 'ovirt_tag_facts' module has been renamed to 'ovirt_tag_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 - - check_sdk(module) - - try: - auth = module.params.pop('auth') - connection = create_connection(auth) - tags_service = connection.system_service().tags_service() - tags = [] - all_tags = tags_service.list() - if module.params['name']: - tags.extend([ - t for t in all_tags - if fnmatch.fnmatch(t.name, module.params['name']) - ]) - if module.params['host']: - hosts_service = connection.system_service().hosts_service() - host = search_by_name(hosts_service, module.params['host']) - if host is None: - raise Exception("Host '%s' was not found." % module.params['host']) - tags.extend(hosts_service.host_service(host.id).tags_service().list()) - if module.params['vm']: - vms_service = connection.system_service().vms_service() - vm = search_by_name(vms_service, module.params['vm']) - if vm is None: - raise Exception("Vm '%s' was not found." % module.params['vm']) - tags.extend(vms_service.vm_service(vm.id).tags_service().list()) - - if not (module.params['vm'] or module.params['host'] or module.params['name']): - tags = all_tags - - result = dict( - ovirt_tags=[ - get_dict_of_struct( - struct=t, - connection=connection, - fetch_nested=module.params['fetch_nested'], - attributes=module.params['nested_attributes'], - ) for t in tags - ], - ) - if is_old_facts: - module.exit_json(changed=False, ansible_facts=result) - else: - module.exit_json(changed=False, **result) - except Exception as e: - module.fail_json(msg=str(e), exception=traceback.format_exc()) - finally: - connection.close(logout=auth.get('token') is None) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/ovirt/ovirt_template_facts.py b/plugins/modules/cloud/ovirt/ovirt_template_facts.py deleted file mode 100644 index 7595c64afa..0000000000 --- a/plugins/modules/cloud/ovirt/ovirt_template_facts.py +++ /dev/null @@ -1,124 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2016 Red Hat, Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ovirt_template_facts -short_description: Retrieve information about one or more oVirt/RHV templates -author: "Ondra Machacek (@machacekondra)" -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: When migrating to collection we decided to use only _info modules. - alternative: Use M(ovirt.ovirt.ovirt_template_info) instead. -description: - - "Retrieve information about one or more oVirt/RHV templates." -notes: - - "This module returns a variable C(ovirt_templates), which - contains a list of templates. You need to register the result with - the I(register) keyword to use it." -options: - pattern: - description: - - "Search term which is accepted by oVirt/RHV search backend." - - "For example to search template X from datacenter Y use following pattern: - name=X and datacenter=Y" -extends_documentation_fragment: -- community.general.ovirt_facts - -''' - -EXAMPLES = ''' -# Examples don't contain auth parameter for simplicity, -# look at ovirt_auth module to see how to reuse authentication: - -- name: Gather information about all templates which names start with centos and belongs to data center west - ovirt_template_info: - pattern: name=centos* and datacenter=west - register: result - -- name: Print gathered information - ansible.builtin.debug: - msg: "{{ result.ovirt_templates }}" -''' - -RETURN = ''' -ovirt_templates: - description: "List of dictionaries describing the templates. Template attributes are mapped to dictionary keys, - all templates attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/template." - returned: On success. - type: list -''' - -import traceback - -from ansible.module_utils.common.removed import removed_module -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils._ovirt import ( - check_sdk, - create_connection, - get_dict_of_struct, - ovirt_info_full_argument_spec, -) - - -def main(): - argument_spec = ovirt_info_full_argument_spec( - pattern=dict(default='', required=False), - ) - module = AnsibleModule(argument_spec) - is_old_facts = module._name in ('ovirt_template_facts', 'community.general.ovirt_template_facts') - if is_old_facts: - module.deprecate("The 'ovirt_template_facts' module has been renamed to 'ovirt_template_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 - - check_sdk(module) - - try: - auth = module.params.pop('auth') - connection = create_connection(auth) - templates_service = connection.system_service().templates_service() - templates = templates_service.list(search=module.params['pattern']) - result = dict( - ovirt_templates=[ - get_dict_of_struct( - struct=c, - connection=connection, - fetch_nested=module.params.get('fetch_nested'), - attributes=module.params.get('nested_attributes'), - ) for c in templates - ], - ) - if is_old_facts: - module.exit_json(changed=False, ansible_facts=result) - else: - module.exit_json(changed=False, **result) - except Exception as e: - module.fail_json(msg=str(e), exception=traceback.format_exc()) - finally: - connection.close(logout=auth.get('token') is None) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/ovirt/ovirt_user_facts.py b/plugins/modules/cloud/ovirt/ovirt_user_facts.py deleted file mode 100644 index ce7ab8d661..0000000000 --- a/plugins/modules/cloud/ovirt/ovirt_user_facts.py +++ /dev/null @@ -1,123 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2016 Red Hat, Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ovirt_user_facts -short_description: Retrieve information about one or more oVirt/RHV users -author: "Ondra Machacek (@machacekondra)" -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: When migrating to collection we decided to use only _info modules. - alternative: Use M(ovirt.ovirt.ovirt_user_info) instead. -description: - - "Retrieve information about one or more oVirt/RHV users." -notes: - - "This module returns a variable C(ovirt_users), which - contains a list of users. You need to register the result with - the I(register) keyword to use it." -options: - pattern: - description: - - "Search term which is accepted by oVirt/RHV search backend." - - "For example to search user X use following pattern: name=X" -extends_documentation_fragment: -- community.general.ovirt_facts - -''' - -EXAMPLES = ''' -# Examples don't contain auth parameter for simplicity, -# look at ovirt_auth module to see how to reuse authentication: - -- name: Gather information about all users which first names start with john - ovirt_user_info: - pattern: name=john* - register: result - -- name: Print gathered information - ansible.builtin.debug: - msg: "{{ result.ovirt_users }}" -''' - -RETURN = ''' -ovirt_users: - description: "List of dictionaries describing the users. User attributes are mapped to dictionary keys, - all users attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/user." - returned: On success. - type: list -''' - -import traceback - -from ansible.module_utils.common.removed import removed_module -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils._ovirt import ( - check_sdk, - create_connection, - get_dict_of_struct, - ovirt_info_full_argument_spec, -) - - -def main(): - argument_spec = ovirt_info_full_argument_spec( - pattern=dict(default='', required=False), - ) - module = AnsibleModule(argument_spec) - is_old_facts = module._name in ('ovirt_user_facts', 'community.general.ovirt_user_facts') - if is_old_facts: - module.deprecate("The 'ovirt_user_facts' module has been renamed to 'ovirt_user_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 - - check_sdk(module) - - try: - auth = module.params.pop('auth') - connection = create_connection(auth) - users_service = connection.system_service().users_service() - users = users_service.list(search=module.params['pattern']) - result = dict( - ovirt_users=[ - get_dict_of_struct( - struct=c, - connection=connection, - fetch_nested=module.params.get('fetch_nested'), - attributes=module.params.get('nested_attributes'), - ) for c in users - ], - ) - if is_old_facts: - module.exit_json(changed=False, ansible_facts=result) - else: - module.exit_json(changed=False, **result) - except Exception as e: - module.fail_json(msg=str(e), exception=traceback.format_exc()) - finally: - connection.close(logout=auth.get('token') is None) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/ovirt/ovirt_vm_facts.py b/plugins/modules/cloud/ovirt/ovirt_vm_facts.py deleted file mode 100644 index a5182755e0..0000000000 --- a/plugins/modules/cloud/ovirt/ovirt_vm_facts.py +++ /dev/null @@ -1,166 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2016 Red Hat, Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ovirt_vm_facts -short_description: Retrieve information about one or more oVirt/RHV virtual machines -author: "Ondra Machacek (@machacekondra)" -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: When migrating to collection we decided to use only _info modules. - alternative: Use M(ovirt.ovirt.ovirt_vm_info) instead. -description: - - "Retrieve information about one or more oVirt/RHV virtual machines." -notes: - - "This module returns a variable C(ovirt_vms), which - contains a list of virtual machines. You need to register the result with - the I(register) keyword to use it." -options: - pattern: - description: - - "Search term which is accepted by oVirt/RHV search backend." - - "For example to search VM X from cluster Y use following pattern: - name=X and cluster=Y" - all_content: - description: - - "If I(true) all the attributes of the virtual machines should be - included in the response." - type: bool - default: false - case_sensitive: - description: - - "If I(true) performed search will take case into account." - type: bool - default: true - max: - description: - - "The maximum number of results to return." - next_run: - description: - - "Indicates if the returned result describes the virtual machine as it is currently running or if describes - the virtual machine with the modifications that have already been performed but that will only come into - effect when the virtual machine is restarted. By default the value is set by engine." - type: bool -extends_documentation_fragment: -- community.general.ovirt_facts - -''' - -EXAMPLES = ''' -# Examples don't contain auth parameter for simplicity, -# look at ovirt_auth module to see how to reuse authentication: - -- name: Gather information about all VMs which names start with centos and belong to cluster west - ovirt_vm_info: - pattern: name=centos* and cluster=west - register: result - -- name: Print gathered information - ansible.builtin.debug: - msg: "{{ result.ovirt_vms }}" - -- name: Gather info about next run configuration of virtual machine named myvm - ovirt_vm_info: - pattern: name=myvm - next_run: true - register: result - -- name: Print gathered information - ansible.builtin.debug: - msg: "{{ result.ovirt_vms[0] }}" -''' - -RETURN = ''' -ovirt_vms: - description: "List of dictionaries describing the VMs. VM attributes are mapped to dictionary keys, - all VMs attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm." - returned: On success. - type: list -''' - -import traceback - -from ansible.module_utils.common.removed import removed_module -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils._ovirt import ( - check_sdk, - create_connection, - get_dict_of_struct, - ovirt_info_full_argument_spec, -) - - -def main(): - argument_spec = ovirt_info_full_argument_spec( - pattern=dict(default='', required=False), - all_content=dict(default=False, type='bool'), - next_run=dict(default=None, type='bool'), - case_sensitive=dict(default=True, type='bool'), - max=dict(default=None, type='int'), - ) - module = AnsibleModule(argument_spec) - is_old_facts = module._name in ('ovirt_vm_facts', 'community.general.ovirt_vm_facts') - if is_old_facts: - module.deprecate("The 'ovirt_vm_facts' module has been renamed to 'ovirt_vm_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 - - check_sdk(module) - - try: - auth = module.params.pop('auth') - connection = create_connection(auth) - vms_service = connection.system_service().vms_service() - vms = vms_service.list( - search=module.params['pattern'], - all_content=module.params['all_content'], - case_sensitive=module.params['case_sensitive'], - max=module.params['max'], - ) - if module.params['next_run']: - vms = [vms_service.vm_service(vm.id).get(next_run=True) for vm in vms] - - result = dict( - ovirt_vms=[ - get_dict_of_struct( - struct=c, - connection=connection, - fetch_nested=module.params.get('fetch_nested'), - attributes=module.params.get('nested_attributes'), - ) for c in vms - ], - ) - if is_old_facts: - module.exit_json(changed=False, ansible_facts=result) - else: - module.exit_json(changed=False, **result) - except Exception as e: - module.fail_json(msg=str(e), exception=traceback.format_exc()) - finally: - connection.close(logout=auth.get('token') is None) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py b/plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py deleted file mode 100644 index 24842be56c..0000000000 --- a/plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py +++ /dev/null @@ -1,123 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2016 Red Hat, Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: ovirt_vmpool_facts -short_description: Retrieve information about one or more oVirt/RHV vmpools -author: "Ondra Machacek (@machacekondra)" -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: When migrating to collection we decided to use only _info modules. - alternative: Use M(ovirt.ovirt.ovirt_vmpool_info) instead. -description: - - "Retrieve information about one or more oVirt/RHV vmpools." -notes: - - "This module returns a variable C(ovirt_vmpools), which - contains a list of vmpools. You need to register the result with - the I(register) keyword to use it." -options: - pattern: - description: - - "Search term which is accepted by oVirt/RHV search backend." - - "For example to search vmpool X: name=X" -extends_documentation_fragment: -- community.general.ovirt_facts - -''' - -EXAMPLES = ''' -# Examples don't contain auth parameter for simplicity, -# look at ovirt_auth module to see how to reuse authentication: - -- name: Gather information about all vm pools which names start with centos - ovirt_vmpool_info: - pattern: name=centos* - register: result - -- name: Print gathered information - ansible.builtin.debug: - msg: "{{ result.ovirt_vm_pools }}" -''' - -RETURN = ''' -ovirt_vm_pools: - description: "List of dictionaries describing the vmpools. Vm pool attributes are mapped to dictionary keys, - all vmpools attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm_pool." - returned: On success. - type: list -''' - -import traceback - -from ansible.module_utils.common.removed import removed_module -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils._ovirt import ( - check_sdk, - create_connection, - get_dict_of_struct, - ovirt_info_full_argument_spec, -) - - -def main(): - argument_spec = ovirt_info_full_argument_spec( - pattern=dict(default='', required=False), - ) - module = AnsibleModule(argument_spec) - is_old_facts = module._name in ('ovirt_vmpool_facts', 'community.general.ovirt_vmpool_facts') - if is_old_facts: - module.deprecate("The 'ovirt_vmpool_facts' module has been renamed to 'ovirt_vmpool_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 - - check_sdk(module) - - try: - auth = module.params.pop('auth') - connection = create_connection(auth) - vmpools_service = connection.system_service().vm_pools_service() - vmpools = vmpools_service.list(search=module.params['pattern']) - result = dict( - ovirt_vm_pools=[ - get_dict_of_struct( - struct=c, - connection=connection, - fetch_nested=module.params.get('fetch_nested'), - attributes=module.params.get('nested_attributes'), - ) for c in vmpools - ], - ) - if is_old_facts: - module.exit_json(changed=False, ansible_facts=result) - else: - module.exit_json(changed=False, **result) - except Exception as e: - module.fail_json(msg=str(e), exception=traceback.format_exc()) - finally: - connection.close(logout=auth.get('token') is None) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/scaleway/scaleway_image_facts.py b/plugins/modules/cloud/scaleway/scaleway_image_facts.py deleted file mode 100644 index 31bbfa76ad..0000000000 --- a/plugins/modules/cloud/scaleway/scaleway_image_facts.py +++ /dev/null @@ -1,125 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2018, Yanis Guenane -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: scaleway_image_facts -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: Deprecated in favour of C(_info) module. - alternative: Use M(community.general.scaleway_image_info) instead. -short_description: Gather facts about the Scaleway images available. -description: - - Gather facts about the Scaleway images available. -author: - - "Yanis Guenane (@Spredzy)" - - "Remy Leone (@sieben)" -extends_documentation_fragment: -- community.general.scaleway - - -options: - region: - type: str - description: - - Scaleway compute zone - required: true - choices: - - ams1 - - EMEA-NL-EVS - - par1 - - EMEA-FR-PAR1 - - par2 - - EMEA-FR-PAR2 - - waw1 - - EMEA-PL-WAW1 -''' - -EXAMPLES = r''' -- name: Gather Scaleway images facts - community.general.scaleway_image_facts: - region: par1 -''' - -RETURN = r''' ---- -scaleway_image_facts: - description: Response from Scaleway API - returned: success - type: complex - sample: - "scaleway_image_facts": [ - { - "arch": "x86_64", - "creation_date": "2018-07-17T16:18:49.276456+00:00", - "default_bootscript": { - "architecture": "x86_64", - "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", - "default": false, - "dtb": "", - "id": "15fbd2f7-a0f9-412b-8502-6a44da8d98b8", - "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz", - "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.9-4.9.93-rev1/vmlinuz-4.9.93", - "organization": "11111111-1111-4111-8111-111111111111", - "public": true, - "title": "x86_64 mainline 4.9.93 rev1" - }, - "extra_volumes": [], - "from_server": null, - "id": "00ae4a88-3252-4eda-9feb-5f6b56bf5ef0", - "modification_date": "2018-07-17T16:42:06.319315+00:00", - "name": "Debian Stretch", - "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db", - "public": true, - "root_volume": { - "id": "da32dfbb-c5ff-476d-ae2d-c297dd09b7dd", - "name": "snapshot-2a7229dc-d431-4dc5-b66e-95db08b773af-2018-07-17_16:18", - "size": 25000000000, - "volume_type": "l_ssd" - }, - "state": "available" - } - ] -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.scaleway import ( - Scaleway, ScalewayException, scaleway_argument_spec, SCALEWAY_LOCATION) - - -class ScalewayImageFacts(Scaleway): - - def __init__(self, module): - super(ScalewayImageFacts, self).__init__(module) - self.name = 'images' - - region = module.params["region"] - self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - )) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - try: - module.exit_json( - ansible_facts={'scaleway_image_facts': ScalewayImageFacts(module).get_resources()} - ) - except ScalewayException as exc: - module.fail_json(msg=exc.message) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/scaleway/scaleway_ip_facts.py b/plugins/modules/cloud/scaleway/scaleway_ip_facts.py deleted file mode 100644 index 4227f36067..0000000000 --- a/plugins/modules/cloud/scaleway/scaleway_ip_facts.py +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2018, Yanis Guenane -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: scaleway_ip_facts -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: Deprecated in favour of C(_info) module. - alternative: Use M(community.general.scaleway_ip_info) instead. -short_description: Gather facts about the Scaleway ips available. -description: - - Gather facts about the Scaleway ips available. -author: - - "Yanis Guenane (@Spredzy)" - - "Remy Leone (@sieben)" -extends_documentation_fragment: -- community.general.scaleway - -options: - region: - type: str - description: - - Scaleway region to use (for example par1). - required: true - choices: - - ams1 - - EMEA-NL-EVS - - par1 - - EMEA-FR-PAR1 - - par2 - - EMEA-FR-PAR2 - - waw1 - - EMEA-PL-WAW1 -''' - -EXAMPLES = r''' -- name: Gather Scaleway ips facts - community.general.scaleway_ip_facts: - region: par1 -''' - -RETURN = r''' ---- -scaleway_ip_facts: - description: Response from Scaleway API - returned: success - type: complex - sample: - "scaleway_ip_facts": [ - { - "address": "163.172.170.243", - "id": "ea081794-a581-8899-8451-386ddaf0a451", - "organization": "3f709602-5e6c-4619-b80c-e324324324af", - "reverse": null, - "server": { - "id": "12f19bc7-109c-4517-954c-e6b3d0311363", - "name": "scw-e0d158" - } - } - ] -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.scaleway import ( - Scaleway, - ScalewayException, - scaleway_argument_spec, - SCALEWAY_LOCATION, -) - - -class ScalewayIpFacts(Scaleway): - - def __init__(self, module): - super(ScalewayIpFacts, self).__init__(module) - self.name = 'ips' - - region = module.params["region"] - self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - )) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - try: - module.exit_json( - ansible_facts={'scaleway_ip_facts': ScalewayIpFacts(module).get_resources()} - ) - except ScalewayException as exc: - module.fail_json(msg=exc.message) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/scaleway/scaleway_organization_facts.py b/plugins/modules/cloud/scaleway/scaleway_organization_facts.py deleted file mode 100644 index ee571cdc1f..0000000000 --- a/plugins/modules/cloud/scaleway/scaleway_organization_facts.py +++ /dev/null @@ -1,104 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2018, Yanis Guenane -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: scaleway_organization_facts -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: Deprecated in favour of C(_info) module. - alternative: Use M(community.general.scaleway_organization_info) instead. -short_description: Gather facts about the Scaleway organizations available. -description: - - Gather facts about the Scaleway organizations available. -author: - - "Yanis Guenane (@Spredzy)" - - "Remy Leone (@sieben)" -options: - api_url: - description: - - Scaleway API URL - default: 'https://account.scaleway.com' - aliases: ['base_url'] -extends_documentation_fragment: -- community.general.scaleway - -''' - -EXAMPLES = r''' -- name: Gather Scaleway organizations facts - community.general.scaleway_organization_facts: -''' - -RETURN = r''' ---- -scaleway_organization_facts: - description: Response from Scaleway API - returned: success - type: complex - sample: - "scaleway_organization_facts": [ - { - "address_city_name": "Paris", - "address_country_code": "FR", - "address_line1": "42 Rue de l'univers", - "address_line2": null, - "address_postal_code": "75042", - "address_subdivision_code": "FR-75", - "creation_date": "2018-08-06T13:43:28.508575+00:00", - "currency": "EUR", - "customer_class": "individual", - "id": "3f709602-5e6c-4619-b80c-e8432ferewtr", - "locale": "fr_FR", - "modification_date": "2018-08-06T14:56:41.401685+00:00", - "name": "James Bond", - "support_id": "694324", - "support_level": "basic", - "support_pin": "9324", - "users": [], - "vat_number": null, - "warnings": [] - } - ] -''' - -from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible_collections.community.general.plugins.module_utils.scaleway import ( - Scaleway, ScalewayException, scaleway_argument_spec -) - - -class ScalewayOrganizationFacts(Scaleway): - - def __init__(self, module): - super(ScalewayOrganizationFacts, self).__init__(module) - self.name = 'organizations' - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://account.scaleway.com', aliases=['base_url']), - )) - - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - try: - module.exit_json( - ansible_facts={'scaleway_organization_facts': ScalewayOrganizationFacts(module).get_resources()} - ) - except ScalewayException as exc: - module.fail_json(msg=exc.message) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/scaleway/scaleway_security_group_facts.py b/plugins/modules/cloud/scaleway/scaleway_security_group_facts.py deleted file mode 100644 index a43bfedb94..0000000000 --- a/plugins/modules/cloud/scaleway/scaleway_security_group_facts.py +++ /dev/null @@ -1,112 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2018, Yanis Guenane -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: scaleway_security_group_facts -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: Deprecated in favour of C(_info) module. - alternative: Use M(community.general.scaleway_security_group_info) instead. -short_description: Gather facts about the Scaleway security groups available. -description: - - Gather facts about the Scaleway security groups available. -author: - - "Yanis Guenane (@Spredzy)" - - "Remy Leone (@sieben)" -options: - region: - type: str - description: - - Scaleway region to use (for example par1). - required: true - choices: - - ams1 - - EMEA-NL-EVS - - par1 - - EMEA-FR-PAR1 - - par2 - - EMEA-FR-PAR2 - - waw1 - - EMEA-PL-WAW1 -extends_documentation_fragment: -- community.general.scaleway - -''' - -EXAMPLES = r''' -- name: Gather Scaleway security groups facts - community.general.scaleway_security_group_facts: - region: par1 -''' - -RETURN = r''' ---- -scaleway_security_group_facts: - description: Response from Scaleway API - returned: success - type: complex - sample: - "scaleway_security_group_facts": [ - { - "description": "test-ams", - "enable_default_security": true, - "id": "7fcde327-8bed-43a6-95c4-6dfbc56d8b51", - "name": "test-ams", - "organization": "3f709602-5e6c-4619-b80c-e841c89734af", - "organization_default": false, - "servers": [ - { - "id": "12f19bc7-108c-4517-954c-e6b3d0311363", - "name": "scw-e0d158" - } - ] - } - ] -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.scaleway import ( - Scaleway, - ScalewayException, - scaleway_argument_spec, - SCALEWAY_LOCATION, -) - - -class ScalewaySecurityGroupFacts(Scaleway): - - def __init__(self, module): - super(ScalewaySecurityGroupFacts, self).__init__(module) - self.name = 'security_groups' - - region = module.params["region"] - self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - )) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - try: - module.exit_json( - ansible_facts={'scaleway_security_group_facts': ScalewaySecurityGroupFacts(module).get_resources()} - ) - except ScalewayException as exc: - module.fail_json(msg=exc.message) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/scaleway/scaleway_server_facts.py b/plugins/modules/cloud/scaleway/scaleway_server_facts.py deleted file mode 100644 index d3e7366934..0000000000 --- a/plugins/modules/cloud/scaleway/scaleway_server_facts.py +++ /dev/null @@ -1,195 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2018, Yanis Guenane -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: scaleway_server_facts -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: Deprecated in favour of C(_info) module. - alternative: Use M(community.general.scaleway_server_info) instead. -short_description: Gather facts about the Scaleway servers available. -description: - - Gather facts about the Scaleway servers available. -author: - - "Yanis Guenane (@Spredzy)" - - "Remy Leone (@sieben)" -extends_documentation_fragment: -- community.general.scaleway - -options: - region: - type: str - description: - - Scaleway region to use (for example par1). - required: true - choices: - - ams1 - - EMEA-NL-EVS - - par1 - - EMEA-FR-PAR1 - - par2 - - EMEA-FR-PAR2 - - waw1 - - EMEA-PL-WAW1 -''' - -EXAMPLES = r''' -- name: Gather Scaleway servers facts - community.general.scaleway_server_facts: - region: par1 -''' - -RETURN = r''' ---- -scaleway_server_facts: - description: Response from Scaleway API - returned: success - type: complex - sample: - "scaleway_server_facts": [ - { - "arch": "x86_64", - "boot_type": "local", - "bootscript": { - "architecture": "x86_64", - "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", - "default": true, - "dtb": "", - "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9", - "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz", - "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127", - "organization": "11111111-1111-4111-8111-111111111111", - "public": true, - "title": "x86_64 mainline 4.4.127 rev1" - }, - "commercial_type": "START1-XS", - "creation_date": "2018-08-14T21:36:56.271545+00:00", - "dynamic_ip_required": false, - "enable_ipv6": false, - "extra_networks": [], - "hostname": "scw-e0d256", - "id": "12f19bc7-108c-4517-954c-e6b3d0311363", - "image": { - "arch": "x86_64", - "creation_date": "2018-04-26T12:42:21.619844+00:00", - "default_bootscript": { - "architecture": "x86_64", - "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", - "default": true, - "dtb": "", - "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9", - "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz", - "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127", - "organization": "11111111-1111-4111-8111-111111111111", - "public": true, - "title": "x86_64 mainline 4.4.127 rev1" - }, - "extra_volumes": [], - "from_server": null, - "id": "67375eb1-f14d-4f02-bb42-6119cecbde51", - "modification_date": "2018-04-26T12:49:07.573004+00:00", - "name": "Ubuntu Xenial", - "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db", - "public": true, - "root_volume": { - "id": "020b8d61-3867-4a0e-84a4-445c5393e05d", - "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42", - "size": 25000000000, - "volume_type": "l_ssd" - }, - "state": "available" - }, - "ipv6": null, - "location": { - "cluster_id": "5", - "hypervisor_id": "412", - "node_id": "2", - "platform_id": "13", - "zone_id": "par1" - }, - "maintenances": [], - "modification_date": "2018-08-14T21:37:28.630882+00:00", - "name": "scw-e0d256", - "organization": "3f709602-5e6c-4619-b80c-e841c89734af", - "private_ip": "10.14.222.131", - "protected": false, - "public_ip": { - "address": "163.172.170.197", - "dynamic": false, - "id": "ea081794-a581-4495-8451-386ddaf0a451" - }, - "security_group": { - "id": "a37379d2-d8b0-4668-9cfb-1233fc436f7e", - "name": "Default security group" - }, - "state": "running", - "state_detail": "booted", - "tags": [], - "volumes": { - "0": { - "creation_date": "2018-08-14T21:36:56.271545+00:00", - "export_uri": "device://dev/vda", - "id": "68386fae-4f55-4fbf-aabb-953036a85872", - "modification_date": "2018-08-14T21:36:56.271545+00:00", - "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42", - "organization": "3f709602-5e6c-4619-b80c-e841c89734af", - "server": { - "id": "12f19bc7-108c-4517-954c-e6b3d0311363", - "name": "scw-e0d256" - }, - "size": 25000000000, - "state": "available", - "volume_type": "l_ssd" - } - } - } - ] -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.scaleway import ( - Scaleway, - ScalewayException, - scaleway_argument_spec, - SCALEWAY_LOCATION, -) - - -class ScalewayServerFacts(Scaleway): - - def __init__(self, module): - super(ScalewayServerFacts, self).__init__(module) - self.name = 'servers' - - region = module.params["region"] - self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - )) - - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - try: - module.exit_json( - ansible_facts={'scaleway_server_facts': ScalewayServerFacts(module).get_resources()} - ) - except ScalewayException as exc: - module.fail_json(msg=exc.message) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/scaleway/scaleway_snapshot_facts.py b/plugins/modules/cloud/scaleway/scaleway_snapshot_facts.py deleted file mode 100644 index 25f99e729b..0000000000 --- a/plugins/modules/cloud/scaleway/scaleway_snapshot_facts.py +++ /dev/null @@ -1,113 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2018, Yanis Guenane -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: scaleway_snapshot_facts -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: Deprecated in favour of C(_info) module. - alternative: Use M(community.general.scaleway_snapshot_info) instead. -short_description: Gather facts about the Scaleway snapshots available. -description: - - Gather facts about the Scaleway snapshot available. -author: - - "Yanis Guenane (@Spredzy)" - - "Remy Leone (@sieben)" -extends_documentation_fragment: -- community.general.scaleway - -options: - region: - type: str - description: - - Scaleway region to use (for example par1). - required: true - choices: - - ams1 - - EMEA-NL-EVS - - par1 - - EMEA-FR-PAR1 - - par2 - - EMEA-FR-PAR2 - - waw1 - - EMEA-PL-WAW1 -''' - -EXAMPLES = r''' -- name: Gather Scaleway snapshots facts - community.general.scaleway_snapshot_facts: - region: par1 -''' - -RETURN = r''' ---- -scaleway_snapshot_facts: - description: Response from Scaleway API - returned: success - type: complex - sample: - "scaleway_snapshot_facts": [ - { - "base_volume": { - "id": "68386fae-4f55-4fbf-aabb-953036a85872", - "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42" - }, - "creation_date": "2018-08-14T22:34:35.299461+00:00", - "id": "b61b4b03-a2e9-4da5-b5ea-e462ac0662d2", - "modification_date": "2018-08-14T22:34:54.520560+00:00", - "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42 snapshot", - "organization": "3f709602-5e6c-4619-b80c-e841c89734af", - "size": 25000000000, - "state": "available", - "volume_type": "l_ssd" - } - ] -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.scaleway import ( - Scaleway, - ScalewayException, - scaleway_argument_spec, - SCALEWAY_LOCATION -) - - -class ScalewaySnapshotFacts(Scaleway): - - def __init__(self, module): - super(ScalewaySnapshotFacts, self).__init__(module) - self.name = 'snapshots' - - region = module.params["region"] - self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - )) - - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - try: - module.exit_json( - ansible_facts={'scaleway_snapshot_facts': ScalewaySnapshotFacts(module).get_resources()} - ) - except ScalewayException as exc: - module.fail_json(msg=exc.message) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/scaleway/scaleway_volume_facts.py b/plugins/modules/cloud/scaleway/scaleway_volume_facts.py deleted file mode 100644 index e894f96548..0000000000 --- a/plugins/modules/cloud/scaleway/scaleway_volume_facts.py +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2018, Yanis Guenane -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: scaleway_volume_facts -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: Deprecated in favour of C(_info) module. - alternative: Use M(community.general.scaleway_volume_info) instead. -short_description: Gather facts about the Scaleway volumes available. -description: - - Gather facts about the Scaleway volumes available. -author: - - "Yanis Guenane (@Spredzy)" - - "Remy Leone (@sieben)" -extends_documentation_fragment: -- community.general.scaleway - -options: - region: - type: str - description: - - Scaleway region to use (for example par1). - required: true - choices: - - ams1 - - EMEA-NL-EVS - - par1 - - EMEA-FR-PAR1 - - par2 - - EMEA-FR-PAR2 - - waw1 - - EMEA-PL-WAW1 -''' - -EXAMPLES = r''' -- name: Gather Scaleway volumes facts - community.general.scaleway_volume_facts: - region: par1 -''' - -RETURN = r''' ---- -scaleway_volume_facts: - description: Response from Scaleway API - returned: success - type: complex - sample: - "scaleway_volume_facts": [ - { - "creation_date": "2018-08-14T20:56:24.949660+00:00", - "export_uri": null, - "id": "b8d51a06-daeb-4fef-9539-a8aea016c1ba", - "modification_date": "2018-08-14T20:56:24.949660+00:00", - "name": "test-volume", - "organization": "3f709602-5e6c-4619-b80c-e841c89734af", - "server": null, - "size": 50000000000, - "state": "available", - "volume_type": "l_ssd" - } - ] -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.scaleway import ( - Scaleway, ScalewayException, scaleway_argument_spec, - SCALEWAY_LOCATION) - - -class ScalewayVolumeFacts(Scaleway): - - def __init__(self, module): - super(ScalewayVolumeFacts, self).__init__(module) - self.name = 'volumes' - - region = module.params["region"] - self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"] - - -def main(): - argument_spec = scaleway_argument_spec() - argument_spec.update(dict( - region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())), - )) - - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, - ) - - try: - module.exit_json( - ansible_facts={'scaleway_volume_facts': ScalewayVolumeFacts(module).get_resources()} - ) - except ScalewayException as exc: - module.fail_json(msg=exc.message) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud/smartos/smartos_image_facts.py b/plugins/modules/cloud/smartos/smartos_image_facts.py deleted file mode 120000 index d7206ed938..0000000000 --- a/plugins/modules/cloud/smartos/smartos_image_facts.py +++ /dev/null @@ -1 +0,0 @@ -smartos_image_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/smartos/smartos_image_info.py b/plugins/modules/cloud/smartos/smartos_image_info.py index 45d8e34085..f1c75bc26c 100644 --- a/plugins/modules/cloud/smartos/smartos_image_info.py +++ b/plugins/modules/cloud/smartos/smartos_image_info.py @@ -47,9 +47,6 @@ EXAMPLES = ''' has {{ result.smartos_images[item]['clones'] }} VM(s)" with_items: "{{ result.smartos_images.keys() | list }}" -# When the module is called as smartos_image_facts, return values are published -# in ansible_facts['smartos_images'] and can be used as follows. -# Note that this is deprecated and will stop working in community.general 3.0.0. - name: Print information ansible.builtin.debug: msg: "{{ smartos_images[item]['name'] }}-{{ smartos_images[item]['version'] }} @@ -102,20 +99,12 @@ def main(): ), supports_check_mode=False, ) - is_old_facts = module._name in ('smartos_image_facts', 'community.general.smartos_image_facts') - if is_old_facts: - module.deprecate("The 'smartos_image_facts' module has been renamed to 'smartos_image_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 image_facts = ImageFacts(module) data = dict(smartos_images=image_facts.return_all_installed_images()) - if is_old_facts: - module.exit_json(ansible_facts=data) - else: - module.exit_json(**data) + module.exit_json(**data) if __name__ == '__main__': diff --git a/plugins/modules/cloud/xenserver/xenserver_guest_facts.py b/plugins/modules/cloud/xenserver/xenserver_guest_facts.py deleted file mode 120000 index e4f2c814ea..0000000000 --- a/plugins/modules/cloud/xenserver/xenserver_guest_facts.py +++ /dev/null @@ -1 +0,0 @@ -xenserver_guest_info.py \ No newline at end of file diff --git a/plugins/modules/cloud/xenserver/xenserver_guest_info.py b/plugins/modules/cloud/xenserver/xenserver_guest_info.py index d3260b6e73..a2e777253e 100644 --- a/plugins/modules/cloud/xenserver/xenserver_guest_info.py +++ b/plugins/modules/cloud/xenserver/xenserver_guest_info.py @@ -204,10 +204,6 @@ def main(): ], ) - if module._name in ('xenserver_guest_facts', 'community.general.xenserver_guest_facts'): - module.deprecate("The 'xenserver_guest_facts' module has been renamed to 'xenserver_guest_info'", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 - result = {'failed': False, 'changed': False} # Module will exit with an error message if no VM is found. diff --git a/plugins/modules/database/vertica/vertica_facts.py b/plugins/modules/database/vertica/vertica_facts.py deleted file mode 120000 index bf964af0ae..0000000000 --- a/plugins/modules/database/vertica/vertica_facts.py +++ /dev/null @@ -1 +0,0 @@ -vertica_info.py \ No newline at end of file diff --git a/plugins/modules/database/vertica/vertica_info.py b/plugins/modules/database/vertica/vertica_info.py index ace130b89d..c0aa94be1e 100644 --- a/plugins/modules/database/vertica/vertica_info.py +++ b/plugins/modules/database/vertica/vertica_info.py @@ -233,11 +233,6 @@ def main(): login_user=dict(default='dbadmin'), login_password=dict(default=None, no_log=True), ), supports_check_mode=True) - is_old_facts = module._name in ('vertica_facts', 'community.general.vertica_facts') - if is_old_facts: - module.deprecate("The 'vertica_facts' module has been renamed to 'vertica_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 if not pyodbc_found: module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR) @@ -269,20 +264,12 @@ def main(): configuration_facts = get_configuration_facts(cursor) node_facts = get_node_facts(cursor) - if is_old_facts: - module.exit_json(changed=False, - ansible_facts={'vertica_schemas': schema_facts, - 'vertica_users': user_facts, - 'vertica_roles': role_facts, - 'vertica_configuration': configuration_facts, - 'vertica_nodes': node_facts}) - else: - module.exit_json(changed=False, - vertica_schemas=schema_facts, - vertica_users=user_facts, - vertica_roles=role_facts, - vertica_configuration=configuration_facts, - vertica_nodes=node_facts) + module.exit_json(changed=False, + vertica_schemas=schema_facts, + vertica_users=user_facts, + vertica_roles=role_facts, + vertica_configuration=configuration_facts, + vertica_nodes=node_facts) except NotSupportedError as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) except SystemExit: diff --git a/plugins/modules/gluster_heal_info.py b/plugins/modules/gluster_heal_info.py deleted file mode 120000 index 7db0a47d9c..0000000000 --- a/plugins/modules/gluster_heal_info.py +++ /dev/null @@ -1 +0,0 @@ -./storage/glusterfs/gluster_heal_info.py \ No newline at end of file diff --git a/plugins/modules/gluster_peer.py b/plugins/modules/gluster_peer.py deleted file mode 120000 index a54980c8b8..0000000000 --- a/plugins/modules/gluster_peer.py +++ /dev/null @@ -1 +0,0 @@ -./storage/glusterfs/gluster_peer.py \ No newline at end of file diff --git a/plugins/modules/gluster_volume.py b/plugins/modules/gluster_volume.py deleted file mode 120000 index 3e9f5c1b6c..0000000000 --- a/plugins/modules/gluster_volume.py +++ /dev/null @@ -1 +0,0 @@ -./storage/glusterfs/gluster_volume.py \ No newline at end of file diff --git a/plugins/modules/helm.py b/plugins/modules/helm.py deleted file mode 120000 index b0554c73cf..0000000000 --- a/plugins/modules/helm.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/misc/helm.py \ No newline at end of file diff --git a/plugins/modules/hpilo_facts.py b/plugins/modules/hpilo_facts.py deleted file mode 120000 index 966a95c5e5..0000000000 --- a/plugins/modules/hpilo_facts.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/hpilo/hpilo_facts.py \ No newline at end of file diff --git a/plugins/modules/identity/onepassword_facts.py b/plugins/modules/identity/onepassword_facts.py deleted file mode 120000 index 4e4c2b117c..0000000000 --- a/plugins/modules/identity/onepassword_facts.py +++ /dev/null @@ -1 +0,0 @@ -onepassword_info.py \ No newline at end of file diff --git a/plugins/modules/identity/onepassword_info.py b/plugins/modules/identity/onepassword_info.py index a085331e7d..42a6311c0d 100644 --- a/plugins/modules/identity/onepassword_info.py +++ b/plugins/modules/identity/onepassword_info.py @@ -20,9 +20,6 @@ requirements: notes: - Tested with C(op) version 0.5.5 - "Based on the C(onepassword) lookup plugin by Scott Buchanan ." - - When this module is called with the deprecated C(onepassword_facts) name, potentially sensitive data - from 1Password is returned as Ansible facts. Facts are subject to caching if enabled, which means this - data could be stored in clear text on disk or in a database. short_description: Gather items from 1Password description: - M(community.general.onepassword_info) wraps the C(op) command line utility to fetch data about one or more 1Password items. @@ -380,13 +377,7 @@ def main(): results = {'onepassword': OnePasswordInfo().run()} - if module._name in ('onepassword_facts', 'community.general.onepassword_facts'): - module.deprecate("The 'onepassword_facts' module has been renamed to 'onepassword_info'. " - "When called with the new name it no longer returns 'ansible_facts'", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 - module.exit_json(changed=False, ansible_facts=results) - else: - module.exit_json(changed=False, **results) + module.exit_json(changed=False, **results) if __name__ == '__main__': diff --git a/plugins/modules/idrac_redfish_facts.py b/plugins/modules/idrac_redfish_facts.py deleted file mode 120000 index 90b0eeef45..0000000000 --- a/plugins/modules/idrac_redfish_facts.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/redfish/idrac_redfish_facts.py \ No newline at end of file diff --git a/plugins/modules/jenkins_job_facts.py b/plugins/modules/jenkins_job_facts.py deleted file mode 120000 index 3a73187dd7..0000000000 --- a/plugins/modules/jenkins_job_facts.py +++ /dev/null @@ -1 +0,0 @@ -web_infrastructure/jenkins_job_facts.py \ No newline at end of file diff --git a/plugins/modules/ldap_attr.py b/plugins/modules/ldap_attr.py deleted file mode 120000 index eebbcfd69e..0000000000 --- a/plugins/modules/ldap_attr.py +++ /dev/null @@ -1 +0,0 @@ -./net_tools/ldap/ldap_attr.py \ No newline at end of file diff --git a/plugins/modules/memset_memstore_facts.py b/plugins/modules/memset_memstore_facts.py deleted file mode 120000 index 60ab47ac21..0000000000 --- a/plugins/modules/memset_memstore_facts.py +++ /dev/null @@ -1 +0,0 @@ -cloud/memset/memset_memstore_facts.py \ No newline at end of file diff --git a/plugins/modules/memset_server_facts.py b/plugins/modules/memset_server_facts.py deleted file mode 120000 index 74e604166c..0000000000 --- a/plugins/modules/memset_server_facts.py +++ /dev/null @@ -1 +0,0 @@ -cloud/memset/memset_server_facts.py \ No newline at end of file diff --git a/plugins/modules/na_ontap_gather_facts.py b/plugins/modules/na_ontap_gather_facts.py deleted file mode 120000 index 71f4d1b45f..0000000000 --- a/plugins/modules/na_ontap_gather_facts.py +++ /dev/null @@ -1 +0,0 @@ -./storage/netapp/na_ontap_gather_facts.py \ No newline at end of file diff --git a/plugins/modules/net_tools/ldap/ldap_attr.py b/plugins/modules/net_tools/ldap/ldap_attr.py deleted file mode 100644 index f983b85745..0000000000 --- a/plugins/modules/net_tools/ldap/ldap_attr.py +++ /dev/null @@ -1,284 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2016, Peter Sagerson -# Copyright: (c) 2016, Jiri Tyr -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: ldap_attr -short_description: Add or remove LDAP attribute values -description: - - Add or remove LDAP attribute values. -notes: - - This only deals with attributes on existing entries. To add or remove - whole entries, see M(community.general.ldap_entry). - - The default authentication settings will attempt to use a SASL EXTERNAL - bind over a UNIX domain socket. This works well with the default Ubuntu - install for example, which includes a cn=peercred,cn=external,cn=auth ACL - rule allowing root to modify the server configuration. If you need to use - a simple bind to access your server, pass the credentials in I(bind_dn) - and I(bind_pw). - - For I(state=present) and I(state=absent), all value comparisons are - performed on the server for maximum accuracy. For I(state=exact), values - have to be compared in Python, which obviously ignores LDAP matching - rules. This should work out in most cases, but it is theoretically - possible to see spurious changes when target and actual values are - semantically identical but lexically distinct. - - "The I(params) parameter was removed due to circumventing Ansible's parameter - handling. The I(params) parameter started disallowing setting the I(bind_pw) parameter in - Ansible-2.7 as it was insecure to set the parameter that way." -deprecated: - removed_in: 3.0.0 # was Ansible 2.14 - why: 'The current "ldap_attr" module does not support LDAP attribute insertions or deletions with objectClass dependencies.' - alternative: 'Use M(community.general.ldap_attrs) instead. Deprecated in community.general 0.2.0.' -author: - - Jiri Tyr (@jtyr) -requirements: - - python-ldap -options: - name: - description: - - The name of the attribute to modify. - type: str - required: true - state: - description: - - The state of the attribute values. - - If C(present), all given values will be added if they're missing. - - If C(absent), all given values will be removed if present. - - If C(exact), the set of values will be forced to exactly those provided and no others. - - If I(state=exact) and I(value) is an empty list, all values for this attribute will be removed. - type: str - choices: [ absent, exact, present ] - default: present - values: - description: - - The value(s) to add or remove. This can be a string or a list of - strings. The complex argument format is required in order to pass - a list of strings (see examples). - type: raw - required: true -extends_documentation_fragment: -- community.general.ldap.documentation - -''' - -EXAMPLES = r''' -- name: Configure directory number 1 for example.com - community.general.ldap_attr: - dn: olcDatabase={1}hdb,cn=config - name: olcSuffix - values: dc=example,dc=com - state: exact - -# The complex argument format is required here to pass a list of ACL strings. -- name: Set up the ACL - community.general.ldap_attr: - dn: olcDatabase={1}hdb,cn=config - name: olcAccess - values: - - >- - {0}to attrs=userPassword,shadowLastChange - by self write - by anonymous auth - by dn="cn=admin,dc=example,dc=com" write - by * none' - - >- - {1}to dn.base="dc=example,dc=com" - by dn="cn=admin,dc=example,dc=com" write - by * read - state: exact - -- name: Declare some indexes - community.general.ldap_attr: - dn: olcDatabase={1}hdb,cn=config - name: olcDbIndex - values: "{{ item }}" - with_items: - - objectClass eq - - uid eq - -- name: Set up a root user, which we can use later to bootstrap the directory - community.general.ldap_attr: - dn: olcDatabase={1}hdb,cn=config - name: "{{ item.key }}" - values: "{{ item.value }}" - state: exact - with_dict: - olcRootDN: cn=root,dc=example,dc=com - olcRootPW: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND" - -- name: Get rid of an unneeded attribute - community.general.ldap_attr: - dn: uid=jdoe,ou=people,dc=example,dc=com - name: shadowExpire - values: [] - state: exact - server_uri: ldap://localhost/ - bind_dn: cn=admin,dc=example,dc=com - bind_pw: password - -# -# The same as in the previous example but with the authentication details -# stored in the ldap_auth variable: -# -# ldap_auth: -# server_uri: ldap://localhost/ -# bind_dn: cn=admin,dc=example,dc=com -# bind_pw: password -# -# In the example below, 'args' is a task keyword, passed at the same level as the module -- name: Get rid of an unneeded attribute - community.general.ldap_attr: - dn: uid=jdoe,ou=people,dc=example,dc=com - name: shadowExpire - values: [] - state: exact - args: "{{ ldap_auth }}" -''' - -RETURN = r''' -modlist: - description: list of modified parameters - returned: success - type: list - sample: '[[2, "olcRootDN", ["cn=root,dc=example,dc=com"]]]' -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native, to_bytes -from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs - -LDAP_IMP_ERR = None -try: - import ldap - - HAS_LDAP = True -except ImportError: - LDAP_IMP_ERR = traceback.format_exc() - HAS_LDAP = False - - -class LdapAttr(LdapGeneric): - def __init__(self, module): - LdapGeneric.__init__(self, module) - - # Shortcuts - self.name = self.module.params['name'] - self.state = self.module.params['state'] - - # Normalize values - if isinstance(self.module.params['values'], list): - self.values = list(map(to_bytes, self.module.params['values'])) - else: - self.values = [to_bytes(self.module.params['values'])] - - def add(self): - values_to_add = list(filter(self._is_value_absent, self.values)) - - if len(values_to_add) > 0: - modlist = [(ldap.MOD_ADD, self.name, values_to_add)] - else: - modlist = [] - - return modlist - - def delete(self): - values_to_delete = list(filter(self._is_value_present, self.values)) - - if len(values_to_delete) > 0: - modlist = [(ldap.MOD_DELETE, self.name, values_to_delete)] - else: - modlist = [] - - return modlist - - def exact(self): - try: - results = self.connection.search_s( - self.dn, ldap.SCOPE_BASE, attrlist=[self.name]) - except ldap.LDAPError as e: - self.fail("Cannot search for attribute %s" % self.name, e) - - current = results[0][1].get(self.name, []) - modlist = [] - - if frozenset(self.values) != frozenset(current): - if len(current) == 0: - modlist = [(ldap.MOD_ADD, self.name, self.values)] - elif len(self.values) == 0: - modlist = [(ldap.MOD_DELETE, self.name, None)] - else: - modlist = [(ldap.MOD_REPLACE, self.name, self.values)] - - return modlist - - def _is_value_present(self, value): - """ True if the target attribute has the given value. """ - try: - is_present = bool( - self.connection.compare_s(self.dn, self.name, value)) - except ldap.NO_SUCH_ATTRIBUTE: - is_present = False - - return is_present - - def _is_value_absent(self, value): - """ True if the target attribute doesn't have the given value. """ - return not self._is_value_present(value) - - -def main(): - module = AnsibleModule( - argument_spec=gen_specs( - name=dict(type='str', required=True), - params=dict(type='dict'), - state=dict(type='str', default='present', choices=['absent', 'exact', 'present']), - values=dict(type='raw', required=True), - ), - supports_check_mode=True, - ) - - if not HAS_LDAP: - module.fail_json(msg=missing_required_lib('python-ldap'), - exception=LDAP_IMP_ERR) - - if module.params['params']: - module.fail_json(msg="The `params` option to ldap_attr was removed in since it circumvents Ansible's option handling") - - # Instantiate the LdapAttr object - ldap = LdapAttr(module) - - state = module.params['state'] - - # Perform action - if state == 'present': - modlist = ldap.add() - elif state == 'absent': - modlist = ldap.delete() - elif state == 'exact': - modlist = ldap.exact() - - changed = False - - if len(modlist) > 0: - changed = True - - if not module.check_mode: - try: - ldap.connection.modify_s(ldap.dn, modlist) - except Exception as e: - module.fail_json(msg="Attribute action failed.", details=to_native(e)) - - module.exit_json(changed=changed, modlist=modlist) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/net_tools/ldap/ldap_entry.py b/plugins/modules/net_tools/ldap/ldap_entry.py index 7ee0c3ddec..093c49ad50 100644 --- a/plugins/modules/net_tools/ldap/ldap_entry.py +++ b/plugins/modules/net_tools/ldap/ldap_entry.py @@ -17,7 +17,7 @@ short_description: Add or remove LDAP entries. description: - Add or remove LDAP entries. This module only asserts the existence or non-existence of an LDAP entry, not its attributes. To assert the - attribute values of an entry, see M(community.general.ldap_attr). + attribute values of an entry, see M(community.general.ldap_attrs). notes: - The default authentication settings will attempt to use a SASL EXTERNAL bind over a UNIX domain socket. This works well with the default Ubuntu @@ -37,7 +37,7 @@ options: description: - If I(state=present), attributes necessary to create an entry. Existing entries are never modified. To assert specific attribute values on an - existing entry, use M(community.general.ldap_attr) module instead. + existing entry, use M(community.general.ldap_attrs) module instead. type: dict objectClass: description: @@ -199,7 +199,7 @@ def main(): exception=LDAP_IMP_ERR) if module.params['params']: - module.fail_json(msg="The `params` option to ldap_attr was removed since it circumvents Ansible's option handling") + module.fail_json(msg="The `params` option to ldap_entry was removed since it circumvents Ansible's option handling") state = module.params['state'] diff --git a/plugins/modules/nginx_status_facts.py b/plugins/modules/nginx_status_facts.py deleted file mode 120000 index 40b80e4ccb..0000000000 --- a/plugins/modules/nginx_status_facts.py +++ /dev/null @@ -1 +0,0 @@ -./web_infrastructure/nginx_status_facts.py \ No newline at end of file diff --git a/plugins/modules/one_image_facts.py b/plugins/modules/one_image_facts.py deleted file mode 120000 index 8466150797..0000000000 --- a/plugins/modules/one_image_facts.py +++ /dev/null @@ -1 +0,0 @@ -cloud/opennebula/one_image_facts.py \ No newline at end of file diff --git a/plugins/modules/onepassword_facts.py b/plugins/modules/onepassword_facts.py deleted file mode 120000 index fb2d91a6b0..0000000000 --- a/plugins/modules/onepassword_facts.py +++ /dev/null @@ -1 +0,0 @@ -identity/onepassword_facts.py \ No newline at end of file diff --git a/plugins/modules/oneview_datacenter_facts.py b/plugins/modules/oneview_datacenter_facts.py deleted file mode 120000 index 9339da90e3..0000000000 --- a/plugins/modules/oneview_datacenter_facts.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/oneview/oneview_datacenter_facts.py \ No newline at end of file diff --git a/plugins/modules/oneview_enclosure_facts.py b/plugins/modules/oneview_enclosure_facts.py deleted file mode 120000 index 6ecbdf647b..0000000000 --- a/plugins/modules/oneview_enclosure_facts.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/oneview/oneview_enclosure_facts.py \ No newline at end of file diff --git a/plugins/modules/oneview_ethernet_network_facts.py b/plugins/modules/oneview_ethernet_network_facts.py deleted file mode 120000 index cd2fbecc10..0000000000 --- a/plugins/modules/oneview_ethernet_network_facts.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/oneview/oneview_ethernet_network_facts.py \ No newline at end of file diff --git a/plugins/modules/oneview_fc_network_facts.py b/plugins/modules/oneview_fc_network_facts.py deleted file mode 120000 index 29a5b85424..0000000000 --- a/plugins/modules/oneview_fc_network_facts.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/oneview/oneview_fc_network_facts.py \ No newline at end of file diff --git a/plugins/modules/oneview_fcoe_network_facts.py b/plugins/modules/oneview_fcoe_network_facts.py deleted file mode 120000 index 82e76c0eac..0000000000 --- a/plugins/modules/oneview_fcoe_network_facts.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/oneview/oneview_fcoe_network_facts.py \ No newline at end of file diff --git a/plugins/modules/oneview_logical_interconnect_group_facts.py b/plugins/modules/oneview_logical_interconnect_group_facts.py deleted file mode 120000 index 0bd235c44e..0000000000 --- a/plugins/modules/oneview_logical_interconnect_group_facts.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/oneview/oneview_logical_interconnect_group_facts.py \ No newline at end of file diff --git a/plugins/modules/oneview_network_set_facts.py b/plugins/modules/oneview_network_set_facts.py deleted file mode 120000 index c3908dc8c5..0000000000 --- a/plugins/modules/oneview_network_set_facts.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/oneview/oneview_network_set_facts.py \ No newline at end of file diff --git a/plugins/modules/oneview_san_manager_facts.py b/plugins/modules/oneview_san_manager_facts.py deleted file mode 120000 index 5dbf968ec2..0000000000 --- a/plugins/modules/oneview_san_manager_facts.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/oneview/oneview_san_manager_facts.py \ No newline at end of file diff --git a/plugins/modules/online_server_facts.py b/plugins/modules/online_server_facts.py deleted file mode 120000 index 14f853f0df..0000000000 --- a/plugins/modules/online_server_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/online/online_server_facts.py \ No newline at end of file diff --git a/plugins/modules/online_user_facts.py b/plugins/modules/online_user_facts.py deleted file mode 120000 index 9a2c3d4554..0000000000 --- a/plugins/modules/online_user_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/online/online_user_facts.py \ No newline at end of file diff --git a/plugins/modules/ovirt.py b/plugins/modules/ovirt.py deleted file mode 120000 index f216c1e261..0000000000 --- a/plugins/modules/ovirt.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/misc/ovirt.py \ No newline at end of file diff --git a/plugins/modules/ovirt_affinity_label_facts.py b/plugins/modules/ovirt_affinity_label_facts.py deleted file mode 120000 index 475d598263..0000000000 --- a/plugins/modules/ovirt_affinity_label_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/ovirt/ovirt_affinity_label_facts.py \ No newline at end of file diff --git a/plugins/modules/ovirt_api_facts.py b/plugins/modules/ovirt_api_facts.py deleted file mode 120000 index 13e7347b13..0000000000 --- a/plugins/modules/ovirt_api_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/ovirt/ovirt_api_facts.py \ No newline at end of file diff --git a/plugins/modules/ovirt_cluster_facts.py b/plugins/modules/ovirt_cluster_facts.py deleted file mode 120000 index 435e641fc5..0000000000 --- a/plugins/modules/ovirt_cluster_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/ovirt/ovirt_cluster_facts.py \ No newline at end of file diff --git a/plugins/modules/ovirt_datacenter_facts.py b/plugins/modules/ovirt_datacenter_facts.py deleted file mode 120000 index 9dabaa5584..0000000000 --- a/plugins/modules/ovirt_datacenter_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/ovirt/ovirt_datacenter_facts.py \ No newline at end of file diff --git a/plugins/modules/ovirt_disk_facts.py b/plugins/modules/ovirt_disk_facts.py deleted file mode 120000 index 25c83690ab..0000000000 --- a/plugins/modules/ovirt_disk_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/ovirt/ovirt_disk_facts.py \ No newline at end of file diff --git a/plugins/modules/ovirt_event_facts.py b/plugins/modules/ovirt_event_facts.py deleted file mode 120000 index ef780af7b5..0000000000 --- a/plugins/modules/ovirt_event_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/ovirt/ovirt_event_facts.py \ No newline at end of file diff --git a/plugins/modules/ovirt_external_provider_facts.py b/plugins/modules/ovirt_external_provider_facts.py deleted file mode 120000 index 1032fdf813..0000000000 --- a/plugins/modules/ovirt_external_provider_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/ovirt/ovirt_external_provider_facts.py \ No newline at end of file diff --git a/plugins/modules/ovirt_group_facts.py b/plugins/modules/ovirt_group_facts.py deleted file mode 120000 index 8c18fb2093..0000000000 --- a/plugins/modules/ovirt_group_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/ovirt/ovirt_group_facts.py \ No newline at end of file diff --git a/plugins/modules/ovirt_host_facts.py b/plugins/modules/ovirt_host_facts.py deleted file mode 120000 index 7f5e9fd2fb..0000000000 --- a/plugins/modules/ovirt_host_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/ovirt/ovirt_host_facts.py \ No newline at end of file diff --git a/plugins/modules/ovirt_host_storage_facts.py b/plugins/modules/ovirt_host_storage_facts.py deleted file mode 120000 index 76c4c589d1..0000000000 --- a/plugins/modules/ovirt_host_storage_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/ovirt/ovirt_host_storage_facts.py \ No newline at end of file diff --git a/plugins/modules/ovirt_network_facts.py b/plugins/modules/ovirt_network_facts.py deleted file mode 120000 index a1ee16d2fb..0000000000 --- a/plugins/modules/ovirt_network_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/ovirt/ovirt_network_facts.py \ No newline at end of file diff --git a/plugins/modules/ovirt_nic_facts.py b/plugins/modules/ovirt_nic_facts.py deleted file mode 120000 index f1d06f7145..0000000000 --- a/plugins/modules/ovirt_nic_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/ovirt/ovirt_nic_facts.py \ No newline at end of file diff --git a/plugins/modules/ovirt_permission_facts.py b/plugins/modules/ovirt_permission_facts.py deleted file mode 120000 index dead73d39d..0000000000 --- a/plugins/modules/ovirt_permission_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/ovirt/ovirt_permission_facts.py \ No newline at end of file diff --git a/plugins/modules/ovirt_quota_facts.py b/plugins/modules/ovirt_quota_facts.py deleted file mode 120000 index b2ce94e972..0000000000 --- a/plugins/modules/ovirt_quota_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/ovirt/ovirt_quota_facts.py \ No newline at end of file diff --git a/plugins/modules/ovirt_scheduling_policy_facts.py b/plugins/modules/ovirt_scheduling_policy_facts.py deleted file mode 120000 index 33d453afe6..0000000000 --- a/plugins/modules/ovirt_scheduling_policy_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/ovirt/ovirt_scheduling_policy_facts.py \ No newline at end of file diff --git a/plugins/modules/ovirt_snapshot_facts.py b/plugins/modules/ovirt_snapshot_facts.py deleted file mode 120000 index d0121c4bd1..0000000000 --- a/plugins/modules/ovirt_snapshot_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/ovirt/ovirt_snapshot_facts.py \ No newline at end of file diff --git a/plugins/modules/ovirt_storage_domain_facts.py b/plugins/modules/ovirt_storage_domain_facts.py deleted file mode 120000 index 1a077fb571..0000000000 --- a/plugins/modules/ovirt_storage_domain_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/ovirt/ovirt_storage_domain_facts.py \ No newline at end of file diff --git a/plugins/modules/ovirt_storage_template_facts.py b/plugins/modules/ovirt_storage_template_facts.py deleted file mode 120000 index 968a987ad2..0000000000 --- a/plugins/modules/ovirt_storage_template_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/ovirt/ovirt_storage_template_facts.py \ No newline at end of file diff --git a/plugins/modules/ovirt_storage_vm_facts.py b/plugins/modules/ovirt_storage_vm_facts.py deleted file mode 120000 index 481e49c860..0000000000 --- a/plugins/modules/ovirt_storage_vm_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/ovirt/ovirt_storage_vm_facts.py \ No newline at end of file diff --git a/plugins/modules/ovirt_tag_facts.py b/plugins/modules/ovirt_tag_facts.py deleted file mode 120000 index b4e6ccb8ec..0000000000 --- a/plugins/modules/ovirt_tag_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/ovirt/ovirt_tag_facts.py \ No newline at end of file diff --git a/plugins/modules/ovirt_template_facts.py b/plugins/modules/ovirt_template_facts.py deleted file mode 120000 index c823c91888..0000000000 --- a/plugins/modules/ovirt_template_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/ovirt/ovirt_template_facts.py \ No newline at end of file diff --git a/plugins/modules/ovirt_user_facts.py b/plugins/modules/ovirt_user_facts.py deleted file mode 120000 index ecfe67d2f8..0000000000 --- a/plugins/modules/ovirt_user_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/ovirt/ovirt_user_facts.py \ No newline at end of file diff --git a/plugins/modules/ovirt_vm_facts.py b/plugins/modules/ovirt_vm_facts.py deleted file mode 120000 index 03c6bed61a..0000000000 --- a/plugins/modules/ovirt_vm_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/ovirt/ovirt_vm_facts.py \ No newline at end of file diff --git a/plugins/modules/ovirt_vmpool_facts.py b/plugins/modules/ovirt_vmpool_facts.py deleted file mode 120000 index 82936451da..0000000000 --- a/plugins/modules/ovirt_vmpool_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/ovirt/ovirt_vmpool_facts.py \ No newline at end of file diff --git a/plugins/modules/purefa_facts.py b/plugins/modules/purefa_facts.py deleted file mode 120000 index f4234d9410..0000000000 --- a/plugins/modules/purefa_facts.py +++ /dev/null @@ -1 +0,0 @@ -./storage/purestorage/purefa_facts.py \ No newline at end of file diff --git a/plugins/modules/purefb_facts.py b/plugins/modules/purefb_facts.py deleted file mode 120000 index 49975e2332..0000000000 --- a/plugins/modules/purefb_facts.py +++ /dev/null @@ -1 +0,0 @@ -./storage/purestorage/purefb_facts.py \ No newline at end of file diff --git a/plugins/modules/python_requirements_facts.py b/plugins/modules/python_requirements_facts.py deleted file mode 120000 index 6631804417..0000000000 --- a/plugins/modules/python_requirements_facts.py +++ /dev/null @@ -1 +0,0 @@ -system/python_requirements_facts.py \ No newline at end of file diff --git a/plugins/modules/redfish_facts.py b/plugins/modules/redfish_facts.py deleted file mode 120000 index c317efc6c2..0000000000 --- a/plugins/modules/redfish_facts.py +++ /dev/null @@ -1 +0,0 @@ -remote_management/redfish/redfish_facts.py \ No newline at end of file diff --git a/plugins/modules/remote_management/hpilo/hpilo_facts.py b/plugins/modules/remote_management/hpilo/hpilo_facts.py deleted file mode 120000 index 792c1a7fbc..0000000000 --- a/plugins/modules/remote_management/hpilo/hpilo_facts.py +++ /dev/null @@ -1 +0,0 @@ -hpilo_info.py \ No newline at end of file diff --git a/plugins/modules/remote_management/hpilo/hpilo_info.py b/plugins/modules/remote_management/hpilo/hpilo_info.py index af43ca195e..0f204b4a15 100644 --- a/plugins/modules/remote_management/hpilo/hpilo_info.py +++ b/plugins/modules/remote_management/hpilo/hpilo_info.py @@ -159,11 +159,6 @@ def main(): ), supports_check_mode=True, ) - is_old_facts = module._name in ('hpilo_facts', 'community.general.hpilo_facts') - if is_old_facts: - module.deprecate("The 'hpilo_facts' module has been renamed to 'hpilo_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 if not HAS_HPILO: module.fail_json(msg=missing_required_lib('python-hpilo'), exception=HPILO_IMP_ERR) @@ -248,10 +243,7 @@ def main(): # reformat into a text friendly format info['hw_memory_total'] = "{0} GB".format(info['hw_memory_total']) - if is_old_facts: - module.exit_json(ansible_facts=info) - else: - module.exit_json(**info) + module.exit_json(**info) if __name__ == '__main__': diff --git a/plugins/modules/remote_management/oneview/oneview_datacenter_facts.py b/plugins/modules/remote_management/oneview/oneview_datacenter_facts.py deleted file mode 120000 index 290e891ee9..0000000000 --- a/plugins/modules/remote_management/oneview/oneview_datacenter_facts.py +++ /dev/null @@ -1 +0,0 @@ -oneview_datacenter_info.py \ No newline at end of file diff --git a/plugins/modules/remote_management/oneview/oneview_datacenter_info.py b/plugins/modules/remote_management/oneview/oneview_datacenter_info.py index 35c2e7acf4..13ab883330 100644 --- a/plugins/modules/remote_management/oneview/oneview_datacenter_info.py +++ b/plugins/modules/remote_management/oneview/oneview_datacenter_info.py @@ -117,11 +117,6 @@ class DatacenterInfoModule(OneViewModuleBase): def __init__(self): super(DatacenterInfoModule, self).__init__(additional_arg_spec=self.argument_spec) - self.is_old_facts = self.module._name in ('oneview_datacenter_facts', 'community.general.oneview_datacenter_facts') - if self.is_old_facts: - self.module.deprecate("The 'oneview_datacenter_facts' module has been renamed to 'oneview_datacenter_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 def execute_module(self): @@ -141,11 +136,7 @@ class DatacenterInfoModule(OneViewModuleBase): else: info['datacenters'] = client.get_all(**self.facts_params) - if self.is_old_facts: - return dict(changed=False, - ansible_facts=info) - else: - return dict(changed=False, **info) + return dict(changed=False, **info) def main(): diff --git a/plugins/modules/remote_management/oneview/oneview_enclosure_facts.py b/plugins/modules/remote_management/oneview/oneview_enclosure_facts.py deleted file mode 120000 index 98e325454c..0000000000 --- a/plugins/modules/remote_management/oneview/oneview_enclosure_facts.py +++ /dev/null @@ -1 +0,0 @@ -oneview_enclosure_info.py \ No newline at end of file diff --git a/plugins/modules/remote_management/oneview/oneview_enclosure_info.py b/plugins/modules/remote_management/oneview/oneview_enclosure_info.py index 8ee92c689f..1889dc1a4f 100644 --- a/plugins/modules/remote_management/oneview/oneview_enclosure_info.py +++ b/plugins/modules/remote_management/oneview/oneview_enclosure_info.py @@ -164,11 +164,6 @@ class EnclosureInfoModule(OneViewModuleBase): def __init__(self): super(EnclosureInfoModule, self).__init__(additional_arg_spec=self.argument_spec) - self.is_old_facts = self.module._name in ('oneview_enclosure_facts', 'community.general.oneview_enclosure_facts') - if self.is_old_facts: - self.module.deprecate("The 'oneview_enclosure_facts' module has been renamed to 'oneview_enclosure_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 def execute_module(self): @@ -184,11 +179,7 @@ class EnclosureInfoModule(OneViewModuleBase): info['enclosures'] = enclosures - if self.is_old_facts: - return dict(changed=False, - ansible_facts=info) - else: - return dict(changed=False, **info) + return dict(changed=False, **info) def _gather_optional_info(self, options, enclosure): diff --git a/plugins/modules/remote_management/oneview/oneview_ethernet_network_facts.py b/plugins/modules/remote_management/oneview/oneview_ethernet_network_facts.py deleted file mode 120000 index b6dd1014c6..0000000000 --- a/plugins/modules/remote_management/oneview/oneview_ethernet_network_facts.py +++ /dev/null @@ -1 +0,0 @@ -oneview_ethernet_network_info.py \ No newline at end of file diff --git a/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py b/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py index bc8765c352..4021b768f9 100644 --- a/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py +++ b/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py @@ -115,11 +115,6 @@ class EthernetNetworkInfoModule(OneViewModuleBase): def __init__(self): super(EthernetNetworkInfoModule, self).__init__(additional_arg_spec=self.argument_spec) - self.is_old_facts = self.module._name in ('oneview_ethernet_network_facts', 'community.general.oneview_ethernet_network_facts') - if self.is_old_facts: - self.module.deprecate("The 'oneview_ethernet_network_facts' module has been renamed to 'oneview_ethernet_network_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 self.resource_client = self.oneview_client.ethernet_networks @@ -135,10 +130,7 @@ class EthernetNetworkInfoModule(OneViewModuleBase): info['ethernet_networks'] = ethernet_networks - if self.is_old_facts: - return dict(changed=False, ansible_facts=info) - else: - return dict(changed=False, **info) + return dict(changed=False, **info) def __gather_optional_info(self, ethernet_network): diff --git a/plugins/modules/remote_management/oneview/oneview_fc_network_facts.py b/plugins/modules/remote_management/oneview/oneview_fc_network_facts.py deleted file mode 120000 index f7739f3bab..0000000000 --- a/plugins/modules/remote_management/oneview/oneview_fc_network_facts.py +++ /dev/null @@ -1 +0,0 @@ -oneview_fc_network_info.py \ No newline at end of file diff --git a/plugins/modules/remote_management/oneview/oneview_fc_network_info.py b/plugins/modules/remote_management/oneview/oneview_fc_network_info.py index db48f19f84..21d9673b51 100644 --- a/plugins/modules/remote_management/oneview/oneview_fc_network_info.py +++ b/plugins/modules/remote_management/oneview/oneview_fc_network_info.py @@ -84,11 +84,6 @@ class FcNetworkInfoModule(OneViewModuleBase): ) super(FcNetworkInfoModule, self).__init__(additional_arg_spec=argument_spec) - self.is_old_facts = self.module._name in ('oneview_fc_network_facts', 'community.general.oneview_fc_network_facts') - if self.is_old_facts: - self.module.deprecate("The 'oneview_fc_network_facts' module has been renamed to 'oneview_fc_network_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 def execute_module(self): @@ -97,10 +92,7 @@ class FcNetworkInfoModule(OneViewModuleBase): else: fc_networks = self.oneview_client.fc_networks.get_all(**self.facts_params) - if self.is_old_facts: - return dict(changed=False, ansible_facts=dict(fc_networks=fc_networks)) - else: - return dict(changed=False, fc_networks=fc_networks) + return dict(changed=False, fc_networks=fc_networks) def main(): diff --git a/plugins/modules/remote_management/oneview/oneview_fcoe_network_facts.py b/plugins/modules/remote_management/oneview/oneview_fcoe_network_facts.py deleted file mode 120000 index 3ff2b0deaa..0000000000 --- a/plugins/modules/remote_management/oneview/oneview_fcoe_network_facts.py +++ /dev/null @@ -1 +0,0 @@ -oneview_fcoe_network_info.py \ No newline at end of file diff --git a/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py b/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py index e5e1bc08e4..e207670a9a 100644 --- a/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py +++ b/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py @@ -83,11 +83,6 @@ class FcoeNetworkInfoModule(OneViewModuleBase): ) super(FcoeNetworkInfoModule, self).__init__(additional_arg_spec=argument_spec) - self.is_old_facts = self.module._name in ('oneview_fcoe_network_facts', 'community.general.oneview_fcoe_network_facts') - if self.is_old_facts: - self.module.deprecate("The 'oneview_fcoe_network_facts' module has been renamed to 'oneview_fcoe_network_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 def execute_module(self): @@ -96,11 +91,7 @@ class FcoeNetworkInfoModule(OneViewModuleBase): else: fcoe_networks = self.oneview_client.fcoe_networks.get_all(**self.facts_params) - if self.is_old_facts: - return dict(changed=False, - ansible_facts=dict(fcoe_networks=fcoe_networks)) - else: - return dict(changed=False, fcoe_networks=fcoe_networks) + return dict(changed=False, fcoe_networks=fcoe_networks) def main(): diff --git a/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_facts.py b/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_facts.py deleted file mode 120000 index 2539865f5f..0000000000 --- a/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_facts.py +++ /dev/null @@ -1 +0,0 @@ -oneview_logical_interconnect_group_info.py \ No newline at end of file diff --git a/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py b/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py index 3488be92a6..1f7f3c9613 100644 --- a/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py +++ b/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py @@ -97,11 +97,6 @@ class LogicalInterconnectGroupInfoModule(OneViewModuleBase): ) super(LogicalInterconnectGroupInfoModule, self).__init__(additional_arg_spec=argument_spec) - self.is_old_facts = self.module._name in ('oneview_logical_interconnect_group_facts', 'community.general.oneview_logical_interconnect_group_facts') - if self.is_old_facts: - self.module.deprecate("The 'oneview_logical_interconnect_group_facts' module has been renamed to 'oneview_logical_interconnect_group_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 def execute_module(self): if self.module.params.get('name'): @@ -109,10 +104,7 @@ class LogicalInterconnectGroupInfoModule(OneViewModuleBase): else: ligs = self.oneview_client.logical_interconnect_groups.get_all(**self.facts_params) - if self.is_old_facts: - return dict(changed=False, ansible_facts=dict(logical_interconnect_groups=ligs)) - else: - return dict(changed=False, logical_interconnect_groups=ligs) + return dict(changed=False, logical_interconnect_groups=ligs) def main(): diff --git a/plugins/modules/remote_management/oneview/oneview_network_set_facts.py b/plugins/modules/remote_management/oneview/oneview_network_set_facts.py deleted file mode 120000 index 78f61020d5..0000000000 --- a/plugins/modules/remote_management/oneview/oneview_network_set_facts.py +++ /dev/null @@ -1 +0,0 @@ -oneview_network_set_info.py \ No newline at end of file diff --git a/plugins/modules/remote_management/oneview/oneview_network_set_info.py b/plugins/modules/remote_management/oneview/oneview_network_set_info.py index bfc212d40c..bc76cb36b1 100644 --- a/plugins/modules/remote_management/oneview/oneview_network_set_info.py +++ b/plugins/modules/remote_management/oneview/oneview_network_set_info.py @@ -136,11 +136,6 @@ class NetworkSetInfoModule(OneViewModuleBase): def __init__(self): super(NetworkSetInfoModule, self).__init__(additional_arg_spec=self.argument_spec) - self.is_old_facts = self.module._name in ('oneview_network_set_facts', 'community.general.oneview_network_set_facts') - if self.is_old_facts: - self.module.deprecate("The 'oneview_network_set_facts' module has been renamed to 'oneview_network_set_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 def execute_module(self): @@ -154,11 +149,7 @@ class NetworkSetInfoModule(OneViewModuleBase): else: network_sets = self.oneview_client.network_sets.get_all(**self.facts_params) - if self.is_old_facts: - return dict(changed=False, - ansible_facts=dict(network_sets=network_sets)) - else: - return dict(changed=False, network_sets=network_sets) + return dict(changed=False, network_sets=network_sets) def main(): diff --git a/plugins/modules/remote_management/oneview/oneview_san_manager_facts.py b/plugins/modules/remote_management/oneview/oneview_san_manager_facts.py deleted file mode 120000 index d64693b48e..0000000000 --- a/plugins/modules/remote_management/oneview/oneview_san_manager_facts.py +++ /dev/null @@ -1 +0,0 @@ -oneview_san_manager_info.py \ No newline at end of file diff --git a/plugins/modules/remote_management/oneview/oneview_san_manager_info.py b/plugins/modules/remote_management/oneview/oneview_san_manager_info.py index 2e462b966d..5dbc28afc2 100644 --- a/plugins/modules/remote_management/oneview/oneview_san_manager_info.py +++ b/plugins/modules/remote_management/oneview/oneview_san_manager_info.py @@ -92,11 +92,6 @@ class SanManagerInfoModule(OneViewModuleBase): def __init__(self): super(SanManagerInfoModule, self).__init__(additional_arg_spec=self.argument_spec) self.resource_client = self.oneview_client.san_managers - self.is_old_facts = self.module._name in ('oneview_san_manager_facts', 'community.general.oneview_san_manager_facts') - if self.is_old_facts: - self.module.deprecate("The 'oneview_san_manager_facts' module has been renamed to 'oneview_san_manager_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 def execute_module(self): if self.module.params.get('provider_display_name'): @@ -109,10 +104,7 @@ class SanManagerInfoModule(OneViewModuleBase): else: resources = self.oneview_client.san_managers.get_all(**self.facts_params) - if self.is_old_facts: - return dict(changed=False, ansible_facts=dict(san_managers=resources)) - else: - return dict(changed=False, san_managers=resources) + return dict(changed=False, san_managers=resources) def main(): diff --git a/plugins/modules/remote_management/redfish/idrac_redfish_facts.py b/plugins/modules/remote_management/redfish/idrac_redfish_facts.py deleted file mode 120000 index 794ab40206..0000000000 --- a/plugins/modules/remote_management/redfish/idrac_redfish_facts.py +++ /dev/null @@ -1 +0,0 @@ -idrac_redfish_info.py \ No newline at end of file diff --git a/plugins/modules/remote_management/redfish/idrac_redfish_info.py b/plugins/modules/remote_management/redfish/idrac_redfish_info.py index 65fbd5a58b..0033db7384 100644 --- a/plugins/modules/remote_management/redfish/idrac_redfish_info.py +++ b/plugins/modules/remote_management/redfish/idrac_redfish_info.py @@ -193,11 +193,6 @@ def main(): ], supports_check_mode=False ) - is_old_facts = module._name in ('idrac_redfish_facts', 'community.general.idrac_redfish_facts') - if is_old_facts: - module.deprecate("The 'idrac_redfish_facts' module has been renamed to 'idrac_redfish_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 category = module.params['category'] command_list = module.params['command'] @@ -239,10 +234,7 @@ def main(): # Return data back or fail with proper message if result['ret'] is True: del result['ret'] - if is_old_facts: - module.exit_json(ansible_facts=dict(redfish_facts=result)) - else: - module.exit_json(redfish_facts=result) + module.exit_json(redfish_facts=result) else: module.fail_json(msg=to_native(result['msg'])) diff --git a/plugins/modules/remote_management/redfish/redfish_facts.py b/plugins/modules/remote_management/redfish/redfish_facts.py deleted file mode 120000 index ef039d9a5d..0000000000 --- a/plugins/modules/remote_management/redfish/redfish_facts.py +++ /dev/null @@ -1 +0,0 @@ -redfish_info.py \ No newline at end of file diff --git a/plugins/modules/remote_management/redfish/redfish_info.py b/plugins/modules/remote_management/redfish/redfish_info.py index 782115d464..41d5bfb04a 100644 --- a/plugins/modules/remote_management/redfish/redfish_info.py +++ b/plugins/modules/remote_management/redfish/redfish_info.py @@ -320,11 +320,6 @@ def main(): ], supports_check_mode=False ) - is_old_facts = module._name in ('redfish_facts', 'community.general.redfish_facts') - if is_old_facts: - module.deprecate("The 'redfish_facts' module has been renamed to 'redfish_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 # admin credentials used for authentication creds = {'user': module.params['username'], @@ -472,10 +467,7 @@ def main(): result["health_report"] = rf_utils.get_multi_manager_health_report() # Return data back - if is_old_facts: - module.exit_json(ansible_facts=dict(redfish_facts=result)) - else: - module.exit_json(redfish_facts=result) + module.exit_json(redfish_facts=result) if __name__ == '__main__': diff --git a/plugins/modules/scaleway_image_facts.py b/plugins/modules/scaleway_image_facts.py deleted file mode 120000 index eea032e70a..0000000000 --- a/plugins/modules/scaleway_image_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/scaleway/scaleway_image_facts.py \ No newline at end of file diff --git a/plugins/modules/scaleway_ip_facts.py b/plugins/modules/scaleway_ip_facts.py deleted file mode 120000 index 08fc50d3b9..0000000000 --- a/plugins/modules/scaleway_ip_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/scaleway/scaleway_ip_facts.py \ No newline at end of file diff --git a/plugins/modules/scaleway_organization_facts.py b/plugins/modules/scaleway_organization_facts.py deleted file mode 120000 index 71092e2777..0000000000 --- a/plugins/modules/scaleway_organization_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/scaleway/scaleway_organization_facts.py \ No newline at end of file diff --git a/plugins/modules/scaleway_security_group_facts.py b/plugins/modules/scaleway_security_group_facts.py deleted file mode 120000 index 992b352e94..0000000000 --- a/plugins/modules/scaleway_security_group_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/scaleway/scaleway_security_group_facts.py \ No newline at end of file diff --git a/plugins/modules/scaleway_server_facts.py b/plugins/modules/scaleway_server_facts.py deleted file mode 120000 index e7b44b1f7a..0000000000 --- a/plugins/modules/scaleway_server_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/scaleway/scaleway_server_facts.py \ No newline at end of file diff --git a/plugins/modules/scaleway_snapshot_facts.py b/plugins/modules/scaleway_snapshot_facts.py deleted file mode 120000 index f0ea21055c..0000000000 --- a/plugins/modules/scaleway_snapshot_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/scaleway/scaleway_snapshot_facts.py \ No newline at end of file diff --git a/plugins/modules/scaleway_volume_facts.py b/plugins/modules/scaleway_volume_facts.py deleted file mode 120000 index 9338d01799..0000000000 --- a/plugins/modules/scaleway_volume_facts.py +++ /dev/null @@ -1 +0,0 @@ -./cloud/scaleway/scaleway_volume_facts.py \ No newline at end of file diff --git a/plugins/modules/smartos_image_facts.py b/plugins/modules/smartos_image_facts.py deleted file mode 120000 index 5c350d8dc8..0000000000 --- a/plugins/modules/smartos_image_facts.py +++ /dev/null @@ -1 +0,0 @@ -cloud/smartos/smartos_image_facts.py \ No newline at end of file diff --git a/plugins/modules/source_control/github/github_webhook_info.py b/plugins/modules/source_control/github/github_webhook_info.py index 0fd0b97bc2..2e7012e631 100644 --- a/plugins/modules/source_control/github/github_webhook_info.py +++ b/plugins/modules/source_control/github/github_webhook_info.py @@ -125,9 +125,6 @@ def main(): mutually_exclusive=(('password', 'token'), ), required_one_of=(("password", "token"), ), supports_check_mode=True) - if module._name in ('github_webhook_facts', 'community.general.github_webhook_facts'): - module.deprecate("The 'github_webhook_facts' module has been renamed to 'github_webhook_info'", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 if not HAS_GITHUB: module.fail_json(msg=missing_required_lib('PyGithub'), diff --git a/plugins/modules/storage/glusterfs/gluster_heal_info.py b/plugins/modules/storage/glusterfs/gluster_heal_info.py deleted file mode 100644 index 1ba16121e0..0000000000 --- a/plugins/modules/storage/glusterfs/gluster_heal_info.py +++ /dev/null @@ -1,203 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright: (c) 2016, Red Hat, Inc. -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: gluster_heal_info -short_description: Gather information on self-heal or rebalance status -deprecated: - removed_in: 3.0.0 - why: The gluster modules have migrated to the gluster.gluster collection. - alternative: Use M(gluster.gluster.gluster_heal_info) instead. -author: "Devyani Kota (@devyanikota)" -description: - - Gather facts about either self-heal or rebalance status. - - This module was called C(gluster_heal_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.gluster_heal_info) module no longer returns C(ansible_facts)! -options: - name: - description: - - The volume name. - required: true - aliases: ['volume'] - status_filter: - default: "self-heal" - choices: ["self-heal", "rebalance"] - description: - - Determines which facts are to be returned. - - If the C(status_filter) is C(self-heal), status of self-heal, along with the number of files still in process are returned. - - If the C(status_filter) is C(rebalance), rebalance status is returned. -requirements: - - GlusterFS > 3.2 -''' - -EXAMPLES = ''' -- name: Gather self-heal facts about all gluster hosts in the cluster - community.general.gluster_heal_info: - name: test_volume - status_filter: self-heal - register: self_heal_status -- ansible.builtin.debug: - var: self_heal_status - -- name: Gather rebalance facts about all gluster hosts in the cluster - community.general.gluster_heal_info: - name: test_volume - status_filter: rebalance - register: rebalance_status -- ansible.builtin.debug: - var: rebalance_status -''' - -RETURN = ''' -name: - description: GlusterFS volume name - returned: always - type: str -status_filter: - description: Whether self-heal or rebalance status is to be returned - returned: always - type: str -heal_info: - description: List of files that still need healing process - returned: On success - type: list -rebalance_status: - description: Status of rebalance operation - returned: On success - type: list -''' - -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native -from distutils.version import LooseVersion - -glusterbin = '' - - -def run_gluster(gargs, **kwargs): - global glusterbin - global module - args = [glusterbin, '--mode=script'] - args.extend(gargs) - try: - rc, out, err = module.run_command(args, **kwargs) - if rc != 0: - module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % - (' '.join(args), rc, out or err), exception=traceback.format_exc()) - except Exception as e: - module.fail_json(msg='error running gluster (%s) command: %s' % (' '.join(args), - to_native(e)), exception=traceback.format_exc()) - return out - - -def get_self_heal_status(name): - out = run_gluster(['volume', 'heal', name, 'info'], environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')) - raw_out = out.split("\n") - heal_info = [] - # return files that still need healing. - for line in raw_out: - if 'Brick' in line: - br_dict = {} - br_dict['brick'] = line.strip().strip("Brick") - elif 'Status' in line: - br_dict['status'] = line.split(":")[1].strip() - elif 'Number' in line: - br_dict['no_of_entries'] = line.split(":")[1].strip() - elif line.startswith('/') or line.startswith('<') or '\n' in line: - continue - else: - br_dict and heal_info.append(br_dict) - br_dict = {} - return heal_info - - -def get_rebalance_status(name): - out = run_gluster(['volume', 'rebalance', name, 'status'], environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')) - raw_out = out.split("\n") - rebalance_status = [] - # return the files that are either still 'in progress' state or 'completed'. - for line in raw_out: - line = " ".join(line.split()) - line_vals = line.split(" ") - if line_vals[0].startswith('-') or line_vals[0].startswith('Node'): - continue - node_dict = {} - if len(line_vals) == 1 or len(line_vals) == 4: - continue - node_dict['node'] = line_vals[0] - node_dict['rebalanced_files'] = line_vals[1] - node_dict['failures'] = line_vals[4] - if 'in progress' in line: - node_dict['status'] = line_vals[5] + line_vals[6] - rebalance_status.append(node_dict) - elif 'completed' in line: - node_dict['status'] = line_vals[5] - rebalance_status.append(node_dict) - return rebalance_status - - -def is_invalid_gluster_version(module, required_version): - cmd = module.get_bin_path('gluster', True) + ' --version' - result = module.run_command(cmd) - ver_line = result[1].split('\n')[0] - version = ver_line.split(' ')[1] - # If the installed version is less than 3.2, it is an invalid version - # return True - return LooseVersion(version) < LooseVersion(required_version) - - -def main(): - global module - global glusterbin - module = AnsibleModule( - argument_spec=dict( - name=dict(type='str', required=True, aliases=['volume']), - status_filter=dict(type='str', default='self-heal', choices=['self-heal', 'rebalance']), - ), - ) - is_old_facts = module._name in ('gluster_heal_facts', 'community.general.gluster_heal_facts') - if is_old_facts: - module.deprecate("The 'gluster_heal_facts' module has been renamed to 'gluster_heal_info', " - "and the renamed one no longer returns ansible_facts", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 - - glusterbin = module.get_bin_path('gluster', True) - required_version = "3.2" - status_filter = module.params['status_filter'] - volume_name = module.params['name'] - heal_info = '' - rebalance_status = '' - - # Verify if required GlusterFS version is installed - if is_invalid_gluster_version(module, required_version): - module.fail_json(msg="GlusterFS version > %s is required" % - required_version) - - try: - if status_filter == "self-heal": - heal_info = get_self_heal_status(volume_name) - elif status_filter == "rebalance": - rebalance_status = get_rebalance_status(volume_name) - except Exception as e: - module.fail_json(msg='Error retrieving status: %s' % e, exception=traceback.format_exc()) - - facts = {} - facts['glusterfs'] = {'volume': volume_name, 'status_filter': status_filter, 'heal_info': heal_info, 'rebalance': rebalance_status} - - if is_old_facts: - module.exit_json(ansible_facts=facts) - else: - module.exit_json(**facts) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/storage/glusterfs/gluster_peer.py b/plugins/modules/storage/glusterfs/gluster_peer.py deleted file mode 100644 index 29134abd29..0000000000 --- a/plugins/modules/storage/glusterfs/gluster_peer.py +++ /dev/null @@ -1,176 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright 2015 Nandaja Varma -# Copyright 2018 Red Hat, Inc. -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: gluster_peer -short_description: Attach/Detach peers to/from the cluster -deprecated: - removed_in: 3.0.0 - why: The gluster modules have migrated to the gluster.gluster collection. - alternative: Use M(gluster.gluster.gluster_peer) instead. -description: - - Create or diminish a GlusterFS trusted storage pool. A set of nodes can be - added into an existing trusted storage pool or a new storage pool can be - formed. Or, nodes can be removed from an existing trusted storage pool. -author: Sachidananda Urs (@sac) -options: - state: - choices: ["present", "absent"] - default: "present" - description: - - Determines whether the nodes should be attached to the pool or - removed from the pool. If the state is present, nodes will be - attached to the pool. If state is absent, nodes will be detached - from the pool. - type: str - nodes: - description: - - List of nodes that have to be probed into the pool. - required: true - type: list - force: - type: bool - default: false - description: - - Applicable only while removing the nodes from the pool. gluster - will refuse to detach a node from the pool if any one of the node - is down, in such cases force can be used. -requirements: - - GlusterFS > 3.2 -notes: - - This module does not support check mode. -''' - -EXAMPLES = ''' -- name: Create a trusted storage pool - community.general.gluster_peer: - state: present - nodes: - - 10.0.1.5 - - 10.0.1.10 - -- name: Delete a node from the trusted storage pool - community.general.gluster_peer: - state: absent - nodes: - - 10.0.1.10 - -- name: Delete a node from the trusted storage pool by force - community.general.gluster_peer: - state: absent - nodes: - - 10.0.0.1 - force: true -''' - -RETURN = ''' -''' - -from ansible.module_utils.basic import AnsibleModule -from distutils.version import LooseVersion - - -class Peer(object): - def __init__(self, module): - self.module = module - self.state = self.module.params['state'] - self.nodes = self.module.params['nodes'] - self.glustercmd = self.module.get_bin_path('gluster', True) - self.lang = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') - self.action = '' - self.force = '' - - def gluster_peer_ops(self): - if not self.nodes: - self.module.fail_json(msg="nodes list cannot be empty") - self.force = 'force' if self.module.params.get('force') else '' - if self.state == 'present': - self.nodes = self.get_to_be_probed_hosts(self.nodes) - self.action = 'probe' - # In case of peer probe, we do not need `force' - self.force = '' - else: - self.action = 'detach' - self.call_peer_commands() - - def get_to_be_probed_hosts(self, hosts): - peercmd = [self.glustercmd, 'pool', 'list', '--mode=script'] - rc, output, err = self.module.run_command(peercmd, - environ_update=self.lang) - peers_in_cluster = [line.split('\t')[1].strip() for - line in filter(None, output.split('\n')[1:])] - try: - peers_in_cluster.remove('localhost') - except ValueError: - # It is ok not to have localhost in list - pass - hosts_to_be_probed = [host for host in hosts if host not in - peers_in_cluster] - return hosts_to_be_probed - - def call_peer_commands(self): - result = {} - result['msg'] = '' - result['changed'] = False - - for node in self.nodes: - peercmd = [self.glustercmd, 'peer', self.action, node, '--mode=script'] - if self.force: - peercmd.append(self.force) - rc, out, err = self.module.run_command(peercmd, - environ_update=self.lang) - if rc: - result['rc'] = rc - result['msg'] = err - # Fail early, do not wait for the loop to finish - self.module.fail_json(**result) - else: - if 'already in peer' in out or \ - 'localhost not needed' in out: - result['changed'] |= False - else: - result['changed'] = True - self.module.exit_json(**result) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - force=dict(type='bool', required=False, default=False), - nodes=dict(type='list', required=True), - state=dict(type='str', choices=['absent', 'present'], - default='present'), - ), - supports_check_mode=False - ) - pops = Peer(module) - required_version = "3.2" - # Verify if required GlusterFS version is installed - if is_invalid_gluster_version(module, required_version): - module.fail_json(msg="GlusterFS version > %s is required" % - required_version) - pops.gluster_peer_ops() - - -def is_invalid_gluster_version(module, required_version): - cmd = module.get_bin_path('gluster', True) + ' --version' - result = module.run_command(cmd) - ver_line = result[1].split('\n')[0] - version = ver_line.split(' ')[1] - # If the installed version is less than 3.2, it is an invalid version - # return True - return LooseVersion(version) < LooseVersion(required_version) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/storage/glusterfs/gluster_volume.py b/plugins/modules/storage/glusterfs/gluster_volume.py deleted file mode 100644 index 1a7f4cfce4..0000000000 --- a/plugins/modules/storage/glusterfs/gluster_volume.py +++ /dev/null @@ -1,608 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright: (c) 2014, Taneli Leppä -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' -module: gluster_volume -short_description: Manage GlusterFS volumes -deprecated: - removed_in: 3.0.0 - why: The gluster modules have migrated to the gluster.gluster collection. - alternative: Use M(gluster.gluster.gluster_volume) instead. -description: - - Create, remove, start, stop and tune GlusterFS volumes -options: - name: - description: - - The volume name. - required: true - aliases: ['volume'] - state: - description: - - Use present/absent ensure if a volume exists or not. - Use started/stopped to control its availability. - required: true - choices: ['absent', 'present', 'started', 'stopped'] - cluster: - description: - - List of hosts to use for probing and brick setup. - host: - description: - - Override local hostname (for peer probing purposes). - replicas: - description: - - Replica count for volume. - arbiters: - description: - - Arbiter count for volume. - stripes: - description: - - Stripe count for volume. - disperses: - description: - - Disperse count for volume. - redundancies: - description: - - Redundancy count for volume. - transport: - description: - - Transport type for volume. - default: tcp - choices: [ tcp, rdma, 'tcp,rdma' ] - bricks: - description: - - Brick paths on servers. Multiple brick paths can be separated by commas. - aliases: [ brick ] - start_on_create: - description: - - Controls whether the volume is started after creation or not. - type: bool - default: 'yes' - rebalance: - description: - - Controls whether the cluster is rebalanced after changes. - type: bool - default: 'no' - directory: - description: - - Directory for limit-usage. - options: - description: - - A dictionary/hash with options/settings for the volume. - quota: - description: - - Quota value for limit-usage (be sure to use 10.0MB instead of 10MB, see quota list). - force: - description: - - If brick is being created in the root partition, module will fail. - Set force to true to override this behaviour. - type: bool - default: false -notes: - - Requires cli tools for GlusterFS on servers. - - Will add new bricks, but not remove them. -author: -- Taneli Leppä (@rosmo) -''' - -EXAMPLES = """ -- name: Create gluster volume - community.general.gluster_volume: - state: present - name: test1 - bricks: /bricks/brick1/g1 - rebalance: yes - cluster: - - 192.0.2.10 - - 192.0.2.11 - run_once: true - -- name: Tune - community.general.gluster_volume: - state: present - name: test1 - options: - performance.cache-size: 256MB - -- name: Set multiple options on GlusterFS volume - community.general.gluster_volume: - state: present - name: test1 - options: - { performance.cache-size: 128MB, - write-behind: 'off', - quick-read: 'on' - } - -- name: Start gluster volume - community.general.gluster_volume: - state: started - name: test1 - -- name: Limit usage - community.general.gluster_volume: - state: present - name: test1 - directory: /foo - quota: 20.0MB - -- name: Stop gluster volume - community.general.gluster_volume: - state: stopped - name: test1 - -- name: Remove gluster volume - community.general.gluster_volume: - state: absent - name: test1 - -- name: Create gluster volume with multiple bricks - community.general.gluster_volume: - state: present - name: test2 - bricks: /bricks/brick1/g2,/bricks/brick2/g2 - cluster: - - 192.0.2.10 - - 192.0.2.11 - run_once: true - -- name: Remove the bricks from gluster volume - community.general.gluster_volume: - state: present - name: testvol - bricks: /bricks/brick1/b1,/bricks/brick2/b2 - cluster: - - 10.70.42.85 - force: true - run_once: true - -- name: Reduce cluster configuration - community.general.gluster_volume: - state: present - name: testvol - bricks: /bricks/brick3/b1,/bricks/brick4/b2 - replicas: 2 - cluster: - - 10.70.42.85 - force: true - run_once: true -""" - -import re -import socket -import time -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native - -glusterbin = '' - - -def run_gluster(gargs, **kwargs): - global glusterbin - global module - args = [glusterbin, '--mode=script'] - args.extend(gargs) - try: - rc, out, err = module.run_command(args, **kwargs) - if rc != 0: - module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % - (' '.join(args), rc, out or err), exception=traceback.format_exc()) - except Exception as e: - module.fail_json(msg='error running gluster (%s) command: %s' % (' '.join(args), - to_native(e)), exception=traceback.format_exc()) - return out - - -def run_gluster_nofail(gargs, **kwargs): - global glusterbin - global module - args = [glusterbin] - args.extend(gargs) - rc, out, err = module.run_command(args, **kwargs) - if rc != 0: - return None - return out - - -def get_peers(): - out = run_gluster(['peer', 'status']) - peers = {} - hostname = None - uuid = None - state = None - shortNames = False - for row in out.split('\n'): - if ': ' in row: - key, value = row.split(': ') - if key.lower() == 'hostname': - hostname = value - shortNames = False - if key.lower() == 'uuid': - uuid = value - if key.lower() == 'state': - state = value - peers[hostname] = [uuid, state] - elif row.lower() == 'other names:': - shortNames = True - elif row != '' and shortNames is True: - peers[row] = [uuid, state] - elif row == '': - shortNames = False - return peers - - -def get_volumes(): - out = run_gluster(['volume', 'info']) - - volumes = {} - volume = {} - for row in out.split('\n'): - if ': ' in row: - key, value = row.split(': ') - if key.lower() == 'volume name': - volume['name'] = value - volume['options'] = {} - volume['quota'] = False - if key.lower() == 'volume id': - volume['id'] = value - if key.lower() == 'status': - volume['status'] = value - if key.lower() == 'transport-type': - volume['transport'] = value - if value.lower().endswith(' (arbiter)'): - if 'arbiters' not in volume: - volume['arbiters'] = [] - value = value[:-10] - volume['arbiters'].append(value) - elif key.lower() == 'number of bricks': - volume['replicas'] = value[-1:] - if key.lower() != 'bricks' and key.lower()[:5] == 'brick': - if 'bricks' not in volume: - volume['bricks'] = [] - volume['bricks'].append(value) - # Volume options - if '.' in key: - if 'options' not in volume: - volume['options'] = {} - volume['options'][key] = value - if key == 'features.quota' and value == 'on': - volume['quota'] = True - else: - if row.lower() != 'bricks:' and row.lower() != 'options reconfigured:': - if len(volume) > 0: - volumes[volume['name']] = volume - volume = {} - return volumes - - -def get_quotas(name, nofail): - quotas = {} - if nofail: - out = run_gluster_nofail(['volume', 'quota', name, 'list']) - if not out: - return quotas - else: - out = run_gluster(['volume', 'quota', name, 'list']) - for row in out.split('\n'): - if row[:1] == '/': - q = re.split(r'\s+', row) - quotas[q[0]] = q[1] - return quotas - - -def wait_for_peer(host): - for x in range(0, 4): - peers = get_peers() - if host in peers and peers[host][1].lower().find('peer in cluster') != -1: - return True - time.sleep(1) - return False - - -def probe(host, myhostname): - global module - out = run_gluster(['peer', 'probe', host]) - if out.find('localhost') == -1 and not wait_for_peer(host): - module.fail_json(msg='failed to probe peer %s on %s' % (host, myhostname)) - - -def probe_all_peers(hosts, peers, myhostname): - for host in hosts: - host = host.strip() # Clean up any extra space for exact comparison - if host not in peers: - probe(host, myhostname) - - -def create_volume(name, stripe, replica, arbiter, disperse, redundancy, transport, hosts, bricks, force): - args = ['volume', 'create'] - args.append(name) - if stripe: - args.append('stripe') - args.append(str(stripe)) - if replica: - args.append('replica') - args.append(str(replica)) - if arbiter: - args.append('arbiter') - args.append(str(arbiter)) - if disperse: - args.append('disperse') - args.append(str(disperse)) - if redundancy: - args.append('redundancy') - args.append(str(redundancy)) - args.append('transport') - args.append(transport) - for brick in bricks: - for host in hosts: - args.append(('%s:%s' % (host, brick))) - if force: - args.append('force') - run_gluster(args) - - -def start_volume(name): - run_gluster(['volume', 'start', name]) - - -def stop_volume(name): - run_gluster(['volume', 'stop', name]) - - -def set_volume_option(name, option, parameter): - run_gluster(['volume', 'set', name, option, parameter]) - - -def add_bricks(name, new_bricks, stripe, replica, force): - args = ['volume', 'add-brick', name] - if stripe: - args.append('stripe') - args.append(str(stripe)) - if replica: - args.append('replica') - args.append(str(replica)) - args.extend(new_bricks) - if force: - args.append('force') - run_gluster(args) - - -def remove_bricks(name, removed_bricks, force): - # max-tries=12 with default_interval=10 secs - max_tries = 12 - retries = 0 - success = False - args = ['volume', 'remove-brick', name] - args.extend(removed_bricks) - # create a copy of args to use for commit operation - args_c = args[:] - args.append('start') - run_gluster(args) - # remove-brick operation needs to be followed by commit operation. - if not force: - module.fail_json(msg="Force option is mandatory.") - else: - while retries < max_tries: - last_brick = removed_bricks[-1] - out = run_gluster(['volume', 'remove-brick', name, last_brick, 'status']) - for row in out.split('\n')[1:]: - if 'completed' in row: - # remove-brick successful, call commit operation. - args_c.append('commit') - out = run_gluster(args_c) - success = True - break - else: - time.sleep(10) - if success: - break - retries += 1 - if not success: - # remove-brick still in process, needs to be committed after completion. - module.fail_json(msg="Exceeded number of tries, check remove-brick status.\n" - "Commit operation needs to be followed.") - - -def reduce_config(name, removed_bricks, replicas, force): - out = run_gluster(['volume', 'heal', name, 'info']) - summary = out.split("\n") - for line in summary: - if 'Number' in line and int(line.split(":")[1].strip()) != 0: - module.fail_json(msg="Operation aborted, self-heal in progress.") - args = ['volume', 'remove-brick', name, 'replica', replicas] - args.extend(removed_bricks) - if force: - args.append('force') - else: - module.fail_json(msg="Force option is mandatory") - run_gluster(args) - - -def do_rebalance(name): - run_gluster(['volume', 'rebalance', name, 'start']) - - -def enable_quota(name): - run_gluster(['volume', 'quota', name, 'enable']) - - -def set_quota(name, directory, value): - run_gluster(['volume', 'quota', name, 'limit-usage', directory, value]) - - -def main(): - # MAIN - - global module - module = AnsibleModule( - argument_spec=dict( - name=dict(type='str', required=True, aliases=['volume']), - state=dict(type='str', required=True, choices=['absent', 'started', 'stopped', 'present']), - cluster=dict(type='list'), - host=dict(type='str'), - stripes=dict(type='int'), - replicas=dict(type='int'), - arbiters=dict(type='int'), - disperses=dict(type='int'), - redundancies=dict(type='int'), - transport=dict(type='str', default='tcp', choices=['tcp', 'rdma', 'tcp,rdma']), - bricks=dict(type='str', aliases=['brick']), - start_on_create=dict(type='bool', default=True), - rebalance=dict(type='bool', default=False), - options=dict(type='dict', default={}), - quota=dict(type='str'), - directory=dict(type='str'), - force=dict(type='bool', default=False), - ), - ) - - global glusterbin - glusterbin = module.get_bin_path('gluster', True) - - changed = False - - action = module.params['state'] - volume_name = module.params['name'] - cluster = module.params['cluster'] - brick_paths = module.params['bricks'] - stripes = module.params['stripes'] - replicas = module.params['replicas'] - arbiters = module.params['arbiters'] - disperses = module.params['disperses'] - redundancies = module.params['redundancies'] - transport = module.params['transport'] - myhostname = module.params['host'] - start_on_create = module.boolean(module.params['start_on_create']) - rebalance = module.boolean(module.params['rebalance']) - force = module.boolean(module.params['force']) - - if not myhostname: - myhostname = socket.gethostname() - - # Clean up if last element is empty. Consider that yml can look like this: - # cluster="{% for host in groups['glusterfs'] %}{{ hostvars[host]['private_ip'] }},{% endfor %}" - if cluster is not None and len(cluster) > 1 and cluster[-1] == '': - cluster = cluster[0:-1] - - if cluster is None: - cluster = [] - - if brick_paths is not None and "," in brick_paths: - brick_paths = brick_paths.split(",") - else: - brick_paths = [brick_paths] - - options = module.params['options'] - quota = module.params['quota'] - directory = module.params['directory'] - - # get current state info - peers = get_peers() - volumes = get_volumes() - quotas = {} - if volume_name in volumes and volumes[volume_name]['quota'] and volumes[volume_name]['status'].lower() == 'started': - quotas = get_quotas(volume_name, True) - - # do the work! - if action == 'absent': - if volume_name in volumes: - if volumes[volume_name]['status'].lower() != 'stopped': - stop_volume(volume_name) - run_gluster(['volume', 'delete', volume_name]) - changed = True - - if action == 'present': - probe_all_peers(cluster, peers, myhostname) - - # create if it doesn't exist - if volume_name not in volumes: - create_volume(volume_name, stripes, replicas, arbiters, disperses, redundancies, transport, cluster, brick_paths, force) - volumes = get_volumes() - changed = True - - if volume_name in volumes: - if volumes[volume_name]['status'].lower() != 'started' and start_on_create: - start_volume(volume_name) - changed = True - - # switch bricks - new_bricks = [] - removed_bricks = [] - all_bricks = [] - bricks_in_volume = volumes[volume_name]['bricks'] - - for node in cluster: - for brick_path in brick_paths: - brick = '%s:%s' % (node, brick_path) - all_bricks.append(brick) - if brick not in bricks_in_volume: - new_bricks.append(brick) - - if not new_bricks and len(all_bricks) > 0 and \ - len(all_bricks) < len(bricks_in_volume): - for brick in bricks_in_volume: - if brick not in all_bricks: - removed_bricks.append(brick) - - if new_bricks: - add_bricks(volume_name, new_bricks, stripes, replicas, force) - changed = True - - if removed_bricks: - if replicas and int(replicas) < int(volumes[volume_name]['replicas']): - reduce_config(volume_name, removed_bricks, str(replicas), force) - else: - remove_bricks(volume_name, removed_bricks, force) - changed = True - - # handle quotas - if quota: - if not volumes[volume_name]['quota']: - enable_quota(volume_name) - quotas = get_quotas(volume_name, False) - if directory not in quotas or quotas[directory] != quota: - set_quota(volume_name, directory, quota) - changed = True - - # set options - for option in options.keys(): - if option not in volumes[volume_name]['options'] or volumes[volume_name]['options'][option] != options[option]: - set_volume_option(volume_name, option, options[option]) - changed = True - - else: - module.fail_json(msg='failed to create volume %s' % volume_name) - - if action != 'absent' and volume_name not in volumes: - module.fail_json(msg='volume not found %s' % volume_name) - - if action == 'started': - if volumes[volume_name]['status'].lower() != 'started': - start_volume(volume_name) - changed = True - - if action == 'stopped': - if volumes[volume_name]['status'].lower() != 'stopped': - stop_volume(volume_name) - changed = True - - if changed: - volumes = get_volumes() - if rebalance: - do_rebalance(volume_name) - - facts = {} - facts['glusterfs'] = {'peers': peers, 'volumes': volumes, 'quotas': quotas} - - module.exit_json(changed=changed, ansible_facts=facts) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/storage/netapp/na_ontap_gather_facts.py b/plugins/modules/storage/netapp/na_ontap_gather_facts.py deleted file mode 100644 index c7b541ff08..0000000000 --- a/plugins/modules/storage/netapp/na_ontap_gather_facts.py +++ /dev/null @@ -1,613 +0,0 @@ -#!/usr/bin/python - -# (c) 2018 Piotr Olczak -# (c) 2018-2019, NetApp, Inc -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' -module: na_ontap_gather_facts -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: Deprecated in favour of C(_info) module. - alternative: Use M(netapp.ontap.na_ontap_info) instead. -author: Piotr Olczak (@dprts) -extends_documentation_fragment: -- community.general._netapp.na_ontap - -short_description: NetApp information gatherer -description: - - This module allows you to gather various information about ONTAP configuration -requirements: - - netapp_lib -options: - state: - description: - - Returns "info" - default: info - choices: [info] - type: str - gather_subset: - description: - - When supplied, this argument will restrict the facts collected - to a given subset. Possible values for this argument include - C(aggregate_info), C(cluster_node_info), C(igroup_info), C(lun_info), C(net_dns_info), - C(net_ifgrp_info), - C(net_interface_info), C(net_port_info), C(nvme_info), C(nvme_interface_info), - C(nvme_namespace_info), C(nvme_subsystem_info), C(ontap_version), - C(qos_adaptive_policy_info), C(qos_policy_info), C(security_key_manager_key_info), - C(security_login_account_info), C(storage_failover_info), C(volume_info), - C(vserver_info), C(vserver_login_banner_info), C(vserver_motd_info), C(vserver_nfs_info) - Can specify a list of values to include a larger subset. Values can also be used - with an initial C(M(!)) to specify that a specific subset should - not be collected. - - nvme is supported with ONTAP 9.4 onwards. - - use C(help) to get a list of supported facts for your system. - default: all - type: list - elements: str -''' - -EXAMPLES = ''' -- name: Get NetApp info (Password Authentication) - community.general.na_ontap_gather_facts: - state: info - hostname: "na-vsim" - username: "admin" - password: "admins_password" -- ansible.builtin.debug: - var: ontap_facts -- name: Limit Fact Gathering to Aggregate Information - community.general.na_ontap_gather_facts: - state: info - hostname: "na-vsim" - username: "admin" - password: "admins_password" - gather_subset: "aggregate_info" -- name: Limit Fact Gathering to Volume and Lun Information - community.general.na_ontap_gather_facts: - state: info - hostname: "na-vsim" - username: "admin" - password: "admins_password" - gather_subset: - - volume_info - - lun_info -- name: Gather all facts except for volume and lun information - community.general.na_ontap_gather_facts: - state: info - hostname: "na-vsim" - username: "admin" - password: "admins_password" - gather_subset: - - "!volume_info" - - "!lun_info" -''' - -RETURN = ''' -ontap_facts: - description: Returns various information about NetApp cluster configuration - returned: always - type: dict - sample: '{ - "ontap_facts": { - "aggregate_info": {...}, - "cluster_node_info": {...}, - "net_dns_info": {...}, - "net_ifgrp_info": {...}, - "net_interface_info": {...}, - "net_port_info": {...}, - "security_key_manager_key_info": {...}, - "security_login_account_info": {...}, - "volume_info": {...}, - "lun_info": {...}, - "storage_failover_info": {...}, - "vserver_login_banner_info": {...}, - "vserver_motd_info": {...}, - "vserver_info": {...}, - "vserver_nfs_info": {...}, - "ontap_version": {...}, - "igroup_info": {...}, - "qos_policy_info": {...}, - "qos_adaptive_policy_info": {...} - }' -''' - -import traceback -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native -import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils - -try: - import xmltodict - HAS_XMLTODICT = True -except ImportError: - HAS_XMLTODICT = False - -try: - import json - HAS_JSON = True -except ImportError: - HAS_JSON = False - -HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() - - -class NetAppONTAPGatherFacts(object): - '''Class with gather facts methods''' - - def __init__(self, module): - self.module = module - self.netapp_info = dict() - - # thanks to coreywan (https://github.com/ansible/ansible/pull/47016) - # for starting this - # min_version identifies the ontapi version which supports this ZAPI - # use 0 if it is supported since 9.1 - self.fact_subsets = { - 'net_dns_info': { - 'method': self.get_generic_get_iter, - 'kwargs': { - 'call': 'net-dns-get-iter', - 'attribute': 'net-dns-info', - 'field': 'vserver-name', - 'query': {'max-records': '1024'}, - }, - 'min_version': '0', - }, - 'net_interface_info': { - 'method': self.get_generic_get_iter, - 'kwargs': { - 'call': 'net-interface-get-iter', - 'attribute': 'net-interface-info', - 'field': 'interface-name', - 'query': {'max-records': '1024'}, - }, - 'min_version': '0', - }, - 'net_port_info': { - 'method': self.get_generic_get_iter, - 'kwargs': { - 'call': 'net-port-get-iter', - 'attribute': 'net-port-info', - 'field': ('node', 'port'), - 'query': {'max-records': '1024'}, - }, - 'min_version': '0', - }, - 'cluster_node_info': { - 'method': self.get_generic_get_iter, - 'kwargs': { - 'call': 'cluster-node-get-iter', - 'attribute': 'cluster-node-info', - 'field': 'node-name', - 'query': {'max-records': '1024'}, - }, - 'min_version': '0', - }, - 'security_login_account_info': { - 'method': self.get_generic_get_iter, - 'kwargs': { - 'call': 'security-login-get-iter', - 'attribute': 'security-login-account-info', - 'field': ('vserver', 'user-name', 'application', 'authentication-method'), - 'query': {'max-records': '1024'}, - }, - 'min_version': '0', - }, - 'aggregate_info': { - 'method': self.get_generic_get_iter, - 'kwargs': { - 'call': 'aggr-get-iter', - 'attribute': 'aggr-attributes', - 'field': 'aggregate-name', - 'query': {'max-records': '1024'}, - }, - 'min_version': '0', - }, - 'volume_info': { - 'method': self.get_generic_get_iter, - 'kwargs': { - 'call': 'volume-get-iter', - 'attribute': 'volume-attributes', - 'field': ('name', 'owning-vserver-name'), - 'query': {'max-records': '1024'}, - }, - 'min_version': '0', - }, - 'lun_info': { - 'method': self.get_generic_get_iter, - 'kwargs': { - 'call': 'lun-get-iter', - 'attribute': 'lun-info', - 'field': 'path', - 'query': {'max-records': '1024'}, - }, - 'min_version': '0', - }, - 'storage_failover_info': { - 'method': self.get_generic_get_iter, - 'kwargs': { - 'call': 'cf-get-iter', - 'attribute': 'storage-failover-info', - 'field': 'node', - 'query': {'max-records': '1024'}, - }, - 'min_version': '0', - }, - 'vserver_motd_info': { - 'method': self.get_generic_get_iter, - 'kwargs': { - 'call': 'vserver-motd-get-iter', - 'attribute': 'vserver-motd-info', - 'field': 'vserver', - 'query': {'max-records': '1024'}, - }, - 'min_version': '0', - }, - 'vserver_login_banner_info': { - 'method': self.get_generic_get_iter, - 'kwargs': { - 'call': 'vserver-login-banner-get-iter', - 'attribute': 'vserver-login-banner-info', - 'field': 'vserver', - 'query': {'max-records': '1024'}, - }, - 'min_version': '0', - }, - 'security_key_manager_key_info': { - 'method': self.get_generic_get_iter, - 'kwargs': { - 'call': 'security-key-manager-key-get-iter', - 'attribute': 'security-key-manager-key-info', - 'field': ('node', 'key-id'), - 'query': {'max-records': '1024'}, - }, - 'min_version': '0', - }, - 'vserver_info': { - 'method': self.get_generic_get_iter, - 'kwargs': { - 'call': 'vserver-get-iter', - 'attribute': 'vserver-info', - 'field': 'vserver-name', - 'query': {'max-records': '1024'}, - }, - 'min_version': '0', - }, - 'vserver_nfs_info': { - 'method': self.get_generic_get_iter, - 'kwargs': { - 'call': 'nfs-service-get-iter', - 'attribute': 'nfs-info', - 'field': 'vserver', - 'query': {'max-records': '1024'}, - }, - 'min_version': '0', - }, - 'net_ifgrp_info': { - 'method': self.get_ifgrp_info, - 'kwargs': {}, - 'min_version': '0', - }, - 'ontap_version': { - 'method': self.ontapi, - 'kwargs': {}, - 'min_version': '0', - }, - 'system_node_info': { - 'method': self.get_generic_get_iter, - 'kwargs': { - 'call': 'system-node-get-iter', - 'attribute': 'node-details-info', - 'field': 'node', - 'query': {'max-records': '1024'}, - }, - 'min_version': '0', - }, - 'igroup_info': { - 'method': self.get_generic_get_iter, - 'kwargs': { - 'call': 'igroup-get-iter', - 'attribute': 'initiator-group-info', - 'field': ('vserver', 'initiator-group-name'), - 'query': {'max-records': '1024'}, - }, - 'min_version': '0', - }, - 'qos_policy_info': { - 'method': self.get_generic_get_iter, - 'kwargs': { - 'call': 'qos-policy-group-get-iter', - 'attribute': 'qos-policy-group-info', - 'field': 'policy-group', - 'query': {'max-records': '1024'}, - }, - 'min_version': '0', - }, - # supported in ONTAP 9.3 and onwards - 'qos_adaptive_policy_info': { - 'method': self.get_generic_get_iter, - 'kwargs': { - 'call': 'qos-adaptive-policy-group-get-iter', - 'attribute': 'qos-adaptive-policy-group-info', - 'field': 'policy-group', - 'query': {'max-records': '1024'}, - }, - 'min_version': '130', - }, - # supported in ONTAP 9.4 and onwards - 'nvme_info': { - 'method': self.get_generic_get_iter, - 'kwargs': { - 'call': 'nvme-get-iter', - 'attribute': 'nvme-target-service-info', - 'field': 'vserver', - 'query': {'max-records': '1024'}, - }, - 'min_version': '140', - }, - 'nvme_interface_info': { - 'method': self.get_generic_get_iter, - 'kwargs': { - 'call': 'nvme-interface-get-iter', - 'attribute': 'nvme-interface-info', - 'field': 'vserver', - 'query': {'max-records': '1024'}, - }, - 'min_version': '140', - }, - 'nvme_subsystem_info': { - 'method': self.get_generic_get_iter, - 'kwargs': { - 'call': 'nvme-subsystem-get-iter', - 'attribute': 'nvme-subsystem-info', - 'field': 'subsystem', - 'query': {'max-records': '1024'}, - }, - 'min_version': '140', - }, - 'nvme_namespace_info': { - 'method': self.get_generic_get_iter, - 'kwargs': { - 'call': 'nvme-namespace-get-iter', - 'attribute': 'nvme-namespace-info', - 'field': 'path', - 'query': {'max-records': '1024'}, - }, - 'min_version': '140', - }, - } - - if HAS_NETAPP_LIB is False: - self.module.fail_json(msg="the python NetApp-Lib module is required") - else: - self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) - - def ontapi(self): - '''Method to get ontapi version''' - - api = 'system-get-ontapi-version' - api_call = netapp_utils.zapi.NaElement(api) - try: - results = self.server.invoke_successfully(api_call, enable_tunneling=False) - ontapi_version = results.get_child_content('minor-version') - return ontapi_version if ontapi_version is not None else '0' - except netapp_utils.zapi.NaApiError as error: - self.module.fail_json(msg="Error calling API %s: %s" % - (api, to_native(error)), exception=traceback.format_exc()) - - def call_api(self, call, query=None): - '''Main method to run an API call''' - - api_call = netapp_utils.zapi.NaElement(call) - result = None - - if query: - for key, val in query.items(): - # Can val be nested? - api_call.add_new_child(key, val) - try: - result = self.server.invoke_successfully(api_call, enable_tunneling=False) - return result - except netapp_utils.zapi.NaApiError as error: - if call in ['security-key-manager-key-get-iter']: - return result - else: - self.module.fail_json(msg="Error calling API %s: %s" - % (call, to_native(error)), exception=traceback.format_exc()) - - def get_ifgrp_info(self): - '''Method to get network port ifgroups info''' - - try: - net_port_info = self.netapp_info['net_port_info'] - except KeyError: - net_port_info_calls = self.fact_subsets['net_port_info'] - net_port_info = net_port_info_calls['method'](**net_port_info_calls['kwargs']) - interfaces = net_port_info.keys() - - ifgrps = [] - for ifn in interfaces: - if net_port_info[ifn]['port_type'] == 'if_group': - ifgrps.append(ifn) - - net_ifgrp_info = dict() - for ifgrp in ifgrps: - query = dict() - query['node'], query['ifgrp-name'] = ifgrp.split(':') - - tmp = self.get_generic_get_iter('net-port-ifgrp-get', field=('node', 'ifgrp-name'), - attribute='net-ifgrp-info', query=query) - net_ifgrp_info = net_ifgrp_info.copy() - net_ifgrp_info.update(tmp) - return net_ifgrp_info - - def get_generic_get_iter(self, call, attribute=None, field=None, query=None): - '''Method to run a generic get-iter call''' - - generic_call = self.call_api(call, query) - - if call == 'net-port-ifgrp-get': - children = 'attributes' - else: - children = 'attributes-list' - - if generic_call is None: - return None - - if field is None: - out = [] - else: - out = {} - - attributes_list = generic_call.get_child_by_name(children) - - if attributes_list is None: - return None - - for child in attributes_list.get_children(): - dic = xmltodict.parse(child.to_string(), xml_attribs=False) - - if attribute is not None: - dic = dic[attribute] - - if isinstance(field, str): - unique_key = _finditem(dic, field) - out = out.copy() - out.update({unique_key: convert_keys(json.loads(json.dumps(dic)))}) - elif isinstance(field, tuple): - unique_key = ':'.join([_finditem(dic, el) for el in field]) - out = out.copy() - out.update({unique_key: convert_keys(json.loads(json.dumps(dic)))}) - else: - out.append(convert_keys(json.loads(json.dumps(dic)))) - - return out - - def get_all(self, gather_subset): - '''Method to get all subsets''' - - results = netapp_utils.get_cserver(self.server) - cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results) - netapp_utils.ems_log_event("na_ontap_gather_facts", cserver) - - self.netapp_info['ontap_version'] = self.ontapi() - - run_subset = self.get_subset(gather_subset, self.netapp_info['ontap_version']) - if 'help' in gather_subset: - self.netapp_info['help'] = sorted(run_subset) - else: - for subset in run_subset: - call = self.fact_subsets[subset] - self.netapp_info[subset] = call['method'](**call['kwargs']) - - return self.netapp_info - - def get_subset(self, gather_subset, version): - '''Method to get a single subset''' - - runable_subsets = set() - exclude_subsets = set() - usable_subsets = [key for key in self.fact_subsets.keys() if version >= self.fact_subsets[key]['min_version']] - if 'help' in gather_subset: - return usable_subsets - for subset in gather_subset: - if subset == 'all': - runable_subsets.update(usable_subsets) - return runable_subsets - if subset.startswith('!'): - subset = subset[1:] - if subset == 'all': - return set() - exclude = True - else: - exclude = False - - if subset not in usable_subsets: - if subset not in self.fact_subsets.keys(): - self.module.fail_json(msg='Bad subset: %s' % subset) - self.module.fail_json(msg='Remote system at version %s does not support %s' % - (version, subset)) - - if exclude: - exclude_subsets.add(subset) - else: - runable_subsets.add(subset) - - if not runable_subsets: - runable_subsets.update(usable_subsets) - - runable_subsets.difference_update(exclude_subsets) - - return runable_subsets - - -# https://stackoverflow.com/questions/14962485/finding-a-key-recursively-in-a-dictionary -def __finditem(obj, key): - - if key in obj: - return obj[key] - for dummy, val in obj.items(): - if isinstance(val, dict): - item = __finditem(val, key) - if item is not None: - return item - return None - - -def _finditem(obj, key): - - value = __finditem(obj, key) - if value is not None: - return value - raise KeyError(key) - - -def convert_keys(d_param): - '''Method to convert hyphen to underscore''' - - out = {} - if isinstance(d_param, dict): - for key, val in d_param.items(): - val = convert_keys(val) - out[key.replace('-', '_')] = val - else: - return d_param - return out - - -def main(): - '''Execute action''' - - argument_spec = netapp_utils.na_ontap_host_argument_spec() - argument_spec.update(dict( - state=dict(default='info', choices=['info']), - gather_subset=dict(default=['all'], type='list', elements='str'), - )) - - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True - ) - - if not HAS_XMLTODICT: - module.fail_json(msg="xmltodict missing") - - if not HAS_JSON: - module.fail_json(msg="json missing") - - state = module.params['state'] - gather_subset = module.params['gather_subset'] - if gather_subset is None: - gather_subset = ['all'] - gf_obj = NetAppONTAPGatherFacts(module) - gf_all = gf_obj.get_all(gather_subset) - result = {'state': state, 'changed': False} - module.exit_json(ansible_facts={'ontap_facts': gf_all}, **result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/storage/purestorage/purefa_facts.py b/plugins/modules/storage/purestorage/purefa_facts.py deleted file mode 100644 index 5e8b593260..0000000000 --- a/plugins/modules/storage/purestorage/purefa_facts.py +++ /dev/null @@ -1,858 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2018, Simon Dodsley (simon@purestorage.com) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: purefa_facts -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: Deprecated in favor of C(_info) module. - alternative: Use M(purestorage.flasharray.purefa_info) instead. -short_description: Collect facts from Pure Storage FlashArray -description: - - Collect facts information from a Pure Storage Flasharray running the - Purity//FA operating system. By default, the module will collect basic - fact information including hosts, host groups, protection - groups and volume counts. Additional fact information can be collected - based on the configured set of arguments. -author: - - Pure Storage ansible Team (@sdodsley) -options: - gather_subset: - description: - - When supplied, this argument will define the facts to be collected. - Possible values for this include all, minimum, config, performance, - capacity, network, subnet, interfaces, hgroups, pgroups, hosts, - admins, volumes, snapshots, pods, vgroups, offload, apps and arrays. - type: list - required: false - default: minimum -extends_documentation_fragment: -- community.general.purestorage.fa - -''' - -EXAMPLES = r''' -- name: Collect default set of facts - community.general.purefa_facts: - fa_url: 10.10.10.2 - api_token: e31060a7-21fc-e277-6240-25983c6c4592 - -- name: Collect configuration and capacity facts - community.general.purefa_facts: - gather_subset: - - config - - capacity - fa_url: 10.10.10.2 - api_token: e31060a7-21fc-e277-6240-25983c6c4592 - -- name: Collect all facts - community.general.purefa_facts: - gather_subset: - - all - fa_url: 10.10.10.2 - api_token: e31060a7-21fc-e277-6240-25983c6c4592 -''' - -RETURN = r''' -ansible_facts: - description: Returns the facts collected from the FlashArray - returned: always - type: complex - sample: { - "capacity": {}, - "config": { - "directory_service": { - "array_admin_group": null, - "base_dn": null, - "bind_password": null, - "bind_user": null, - "check_peer": false, - "enabled": false, - "group_base": null, - "readonly_group": null, - "storage_admin_group": null, - "uri": [] - }, - "dns": { - "domain": "domain.com", - "nameservers": [ - "8.8.8.8", - "8.8.4.4" - ] - }, - "ntp": [ - "0.ntp.pool.org", - "1.ntp.pool.org", - "2.ntp.pool.org", - "3.ntp.pool.org" - ], - "smtp": [ - { - "enabled": true, - "name": "alerts@acme.com" - }, - { - "enabled": true, - "name": "user@acme.com" - } - ], - "snmp": [ - { - "auth_passphrase": null, - "auth_protocol": null, - "community": null, - "host": "localhost", - "name": "localhost", - "privacy_passphrase": null, - "privacy_protocol": null, - "user": null, - "version": "v2c" - } - ], - "ssl_certs": { - "country": null, - "email": null, - "issued_by": "", - "issued_to": "", - "key_size": 2048, - "locality": null, - "organization": "Acme Storage, Inc.", - "organizational_unit": "Acme Storage, Inc.", - "state": null, - "status": "self-signed", - "valid_from": "2017-08-11T23:09:06Z", - "valid_to": "2027-08-09T23:09:06Z" - }, - "syslog": [] - }, - "default": { - "array_name": "flasharray1", - "connected_arrays": 1, - "hostgroups": 0, - "hosts": 10, - "pods": 3, - "protection_groups": 1, - "purity_version": "5.0.4", - "snapshots": 1, - "volume_groups": 2 - }, - "hgroups": {}, - "hosts": { - "host1": { - "hgroup": null, - "iqn": [ - "iqn.1994-05.com.redhat:2f6f5715a533" - ], - "wwn": [] - }, - "host2": { - "hgroup": null, - "iqn": [ - "iqn.1994-05.com.redhat:d17fb13fe0b" - ], - "wwn": [] - }, - "host3": { - "hgroup": null, - "iqn": [ - "iqn.1994-05.com.redhat:97b1351bfb2" - ], - "wwn": [] - }, - "host4": { - "hgroup": null, - "iqn": [ - "iqn.1994-05.com.redhat:dd84e9a7b2cb" - ], - "wwn": [ - "10000000C96C48D1", - "10000000C96C48D2" - ] - } - }, - "interfaces": { - "CT0.ETH4": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682", - "CT0.ETH5": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682", - "CT1.ETH4": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682", - "CT1.ETH5": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682" - }, - "network": { - "ct0.eth0": { - "address": "10.10.10.10", - "gateway": "10.10.10.1", - "hwaddr": "ec:f4:bb:c8:8a:04", - "mtu": 1500, - "netmask": "255.255.255.0", - "services": [ - "management" - ], - "speed": 1000000000 - }, - "ct0.eth2": { - "address": "10.10.10.11", - "gateway": null, - "hwaddr": "ec:f4:bb:c8:8a:00", - "mtu": 1500, - "netmask": "255.255.255.0", - "services": [ - "replication" - ], - "speed": 10000000000 - }, - "ct0.eth3": { - "address": "10.10.10.12", - "gateway": null, - "hwaddr": "ec:f4:bb:c8:8a:02", - "mtu": 1500, - "netmask": "255.255.255.0", - "services": [ - "replication" - ], - "speed": 10000000000 - }, - "ct0.eth4": { - "address": "10.10.10.13", - "gateway": null, - "hwaddr": "90:e2:ba:83:79:0c", - "mtu": 1500, - "netmask": "255.255.255.0", - "services": [ - "iscsi" - ], - "speed": 10000000000 - }, - "ct0.eth5": { - "address": "10.10.10.14", - "gateway": null, - "hwaddr": "90:e2:ba:83:79:0d", - "mtu": 1500, - "netmask": "255.255.255.0", - "services": [ - "iscsi" - ], - "speed": 10000000000 - }, - "vir0": { - "address": "10.10.10.20", - "gateway": "10.10.10.1", - "hwaddr": "fe:ba:e9:e7:6b:0f", - "mtu": 1500, - "netmask": "255.255.255.0", - "services": [ - "management" - ], - "speed": 1000000000 - } - }, - "offload": { - "nfstarget": { - "address": "10.0.2.53", - "mount_options": null, - "mount_point": "/offload", - "protocol": "nfs", - "status": "scanning" - } - }, - "performance": { - "input_per_sec": 8191, - "output_per_sec": 0, - "queue_depth": 1, - "reads_per_sec": 0, - "san_usec_per_write_op": 15, - "usec_per_read_op": 0, - "usec_per_write_op": 642, - "writes_per_sec": 2 - }, - "pgroups": { - "consisgroup-07b6b983-986e-46f5-bdc3-deaa3dbb299e-cinder": { - "hgroups": null, - "hosts": null, - "source": "host1", - "targets": null, - "volumes": [ - "volume-1" - ] - } - }, - "pods": { - "srm-pod": { - "arrays": [ - { - "array_id": "52595f7e-b460-4b46-8851-a5defd2ac192", - "mediator_status": "online", - "name": "sn1-405-c09-37", - "status": "online" - }, - { - "array_id": "a2c32301-f8a0-4382-949b-e69b552ce8ca", - "mediator_status": "online", - "name": "sn1-420-c11-31", - "status": "online" - } - ], - "source": null - } - }, - "snapshots": { - "consisgroup.cgsnapshot": { - "created": "2018-03-28T09:34:02Z", - "size": 13958643712, - "source": "volume-1" - } - }, - "subnet": {}, - "vgroups": { - "vvol--vSphere-HA-0ffc7dd1-vg": { - "volumes": [ - "vvol--vSphere-HA-0ffc7dd1-vg/Config-aad5d7c6" - ] - } - }, - "volumes": { - "ansible_data": { - "bandwidth": null, - "hosts": [ - [ - "host1", - 1 - ] - ], - "serial": "43BE47C12334399B000114A6", - "size": 1099511627776, - "source": null - } - } - } -''' - - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.pure import get_system, purefa_argument_spec - - -ADMIN_API_VERSION = '1.14' -S3_REQUIRED_API_VERSION = '1.16' -LATENCY_REQUIRED_API_VERSION = '1.16' -AC_REQUIRED_API_VERSION = '1.14' -CAP_REQUIRED_API_VERSION = '1.6' -SAN_REQUIRED_API_VERSION = '1.10' -NVME_API_VERSION = '1.16' -PREFERRED_API_VERSION = '1.15' -CONN_STATUS_API_VERSION = '1.17' - - -def generate_default_dict(array): - default_facts = {} - defaults = array.get() - api_version = array._list_available_rest_versions() - if AC_REQUIRED_API_VERSION in api_version: - default_facts['volume_groups'] = len(array.list_vgroups()) - default_facts['connected_arrays'] = len(array.list_array_connections()) - default_facts['pods'] = len(array.list_pods()) - default_facts['connection_key'] = array.get(connection_key=True)['connection_key'] - hosts = array.list_hosts() - admins = array.list_admins() - snaps = array.list_volumes(snap=True, pending=True) - pgroups = array.list_pgroups(pending=True) - hgroups = array.list_hgroups() - # Old FA arrays only report model from the primary controller - ct0_model = array.get_hardware('CT0')['model'] - if ct0_model: - model = ct0_model - else: - ct1_model = array.get_hardware('CT1')['model'] - model = ct1_model - default_facts['array_model'] = model - default_facts['array_name'] = defaults['array_name'] - default_facts['purity_version'] = defaults['version'] - default_facts['hosts'] = len(hosts) - default_facts['snapshots'] = len(snaps) - default_facts['protection_groups'] = len(pgroups) - default_facts['hostgroups'] = len(hgroups) - default_facts['admins'] = len(admins) - return default_facts - - -def generate_perf_dict(array): - perf_facts = {} - api_version = array._list_available_rest_versions() - if LATENCY_REQUIRED_API_VERSION in api_version: - latency_info = array.get(action='monitor', latency=True)[0] - perf_info = array.get(action='monitor')[0] - # IOPS - perf_facts['writes_per_sec'] = perf_info['writes_per_sec'] - perf_facts['reads_per_sec'] = perf_info['reads_per_sec'] - - # Bandwidth - perf_facts['input_per_sec'] = perf_info['input_per_sec'] - perf_facts['output_per_sec'] = perf_info['output_per_sec'] - - # Latency - if LATENCY_REQUIRED_API_VERSION in api_version: - perf_facts['san_usec_per_read_op'] = latency_info['san_usec_per_read_op'] - perf_facts['san_usec_per_write_op'] = latency_info['san_usec_per_write_op'] - perf_facts['queue_usec_per_read_op'] = latency_info['queue_usec_per_read_op'] - perf_facts['queue_usec_per_write_op'] = latency_info['queue_usec_per_write_op'] - perf_facts['qos_rate_limit_usec_per_read_op'] = latency_info['qos_rate_limit_usec_per_read_op'] - perf_facts['qos_rate_limit_usec_per_write_op'] = latency_info['qos_rate_limit_usec_per_write_op'] - perf_facts['local_queue_usec_per_op'] = perf_info['local_queue_usec_per_op'] - perf_facts['usec_per_read_op'] = perf_info['usec_per_read_op'] - perf_facts['usec_per_write_op'] = perf_info['usec_per_write_op'] - perf_facts['queue_depth'] = perf_info['queue_depth'] - return perf_facts - - -def generate_config_dict(array): - config_facts = {} - api_version = array._list_available_rest_versions() - # DNS - config_facts['dns'] = array.get_dns() - # SMTP - config_facts['smtp'] = array.list_alert_recipients() - # SNMP - config_facts['snmp'] = array.list_snmp_managers() - config_facts['snmp_v3_engine_id'] = array.get_snmp_engine_id()['engine_id'] - # DS - config_facts['directory_service'] = array.get_directory_service() - if S3_REQUIRED_API_VERSION in api_version: - config_facts['directory_service_roles'] = {} - roles = array.list_directory_service_roles() - for role in range(0, len(roles)): - role_name = roles[role]['name'] - config_facts['directory_service_roles'][role_name] = { - 'group': roles[role]['group'], - 'group_base': roles[role]['group_base'], - } - else: - config_facts['directory_service'].update(array.get_directory_service(groups=True)) - # NTP - config_facts['ntp'] = array.get(ntpserver=True)['ntpserver'] - # SYSLOG - config_facts['syslog'] = array.get(syslogserver=True)['syslogserver'] - # Phonehome - config_facts['phonehome'] = array.get(phonehome=True)['phonehome'] - # Proxy - config_facts['proxy'] = array.get(proxy=True)['proxy'] - # Relay Host - config_facts['relayhost'] = array.get(relayhost=True)['relayhost'] - # Sender Domain - config_facts['senderdomain'] = array.get(senderdomain=True)['senderdomain'] - # SYSLOG - config_facts['syslog'] = array.get(syslogserver=True)['syslogserver'] - # Idle Timeout - config_facts['idle_timeout'] = array.get(idle_timeout=True)['idle_timeout'] - # SCSI Timeout - config_facts['scsi_timeout'] = array.get(scsi_timeout=True)['scsi_timeout'] - # SSL - config_facts['ssl_certs'] = array.get_certificate() - # Global Admin settings - if S3_REQUIRED_API_VERSION in api_version: - config_facts['global_admin'] = array.get_global_admin_attributes() - return config_facts - - -def generate_admin_dict(array): - api_version = array._list_available_rest_versions() - admin_facts = {} - if ADMIN_API_VERSION in api_version: - admins = array.list_admins() - for admin in range(0, len(admins)): - admin_name = admins[admin]['name'] - admin_facts[admin_name] = { - 'type': admins[admin]['type'], - 'role': admins[admin]['role'], - } - return admin_facts - - -def generate_subnet_dict(array): - sub_facts = {} - subnets = array.list_subnets() - for sub in range(0, len(subnets)): - sub_name = subnets[sub]['name'] - if subnets[sub]['enabled']: - sub_facts[sub_name] = { - 'gateway': subnets[sub]['gateway'], - 'mtu': subnets[sub]['mtu'], - 'vlan': subnets[sub]['vlan'], - 'prefix': subnets[sub]['prefix'], - 'interfaces': subnets[sub]['interfaces'], - 'services': subnets[sub]['services'], - } - return sub_facts - - -def generate_network_dict(array): - net_facts = {} - ports = array.list_network_interfaces() - for port in range(0, len(ports)): - int_name = ports[port]['name'] - net_facts[int_name] = { - 'hwaddr': ports[port]['hwaddr'], - 'mtu': ports[port]['mtu'], - 'enabled': ports[port]['enabled'], - 'speed': ports[port]['speed'], - 'address': ports[port]['address'], - 'slaves': ports[port]['slaves'], - 'services': ports[port]['services'], - 'gateway': ports[port]['gateway'], - 'netmask': ports[port]['netmask'], - } - if ports[port]['subnet']: - subnets = array.get_subnet(ports[port]['subnet']) - if subnets['enabled']: - net_facts[int_name]['subnet'] = { - 'name': subnets['name'], - 'prefix': subnets['prefix'], - 'vlan': subnets['vlan'], - } - return net_facts - - -def generate_capacity_dict(array): - capacity_facts = {} - api_version = array._list_available_rest_versions() - if CAP_REQUIRED_API_VERSION in api_version: - volumes = array.list_volumes(pending=True) - capacity_facts['provisioned_space'] = sum(item['size'] for item in volumes) - capacity = array.get(space=True) - total_capacity = capacity[0]['capacity'] - used_space = capacity[0]["total"] - capacity_facts['free_space'] = total_capacity - used_space - capacity_facts['total_capacity'] = total_capacity - capacity_facts['data_reduction'] = capacity[0]['data_reduction'] - capacity_facts['system_space'] = capacity[0]['system'] - capacity_facts['volume_space'] = capacity[0]['volumes'] - capacity_facts['shared_space'] = capacity[0]['shared_space'] - capacity_facts['snapshot_space'] = capacity[0]['snapshots'] - capacity_facts['thin_provisioning'] = capacity[0]['thin_provisioning'] - capacity_facts['total_reduction'] = capacity[0]['total_reduction'] - - return capacity_facts - - -def generate_snap_dict(array): - snap_facts = {} - snaps = array.list_volumes(snap=True) - for snap in range(0, len(snaps)): - snapshot = snaps[snap]['name'] - snap_facts[snapshot] = { - 'size': snaps[snap]['size'], - 'source': snaps[snap]['source'], - 'created': snaps[snap]['created'], - } - return snap_facts - - -def generate_vol_dict(array): - volume_facts = {} - vols = array.list_volumes() - for vol in range(0, len(vols)): - volume = vols[vol]['name'] - volume_facts[volume] = { - 'source': vols[vol]['source'], - 'size': vols[vol]['size'], - 'serial': vols[vol]['serial'], - 'hosts': [], - 'bandwidth': "" - } - api_version = array._list_available_rest_versions() - if AC_REQUIRED_API_VERSION in api_version: - qvols = array.list_volumes(qos=True) - for qvol in range(0, len(qvols)): - volume = qvols[qvol]['name'] - qos = qvols[qvol]['bandwidth_limit'] - volume_facts[volume]['bandwidth'] = qos - vvols = array.list_volumes(protocol_endpoint=True) - for vvol in range(0, len(vvols)): - volume = vvols[vvol]['name'] - volume_facts[volume] = { - 'source': vvols[vvol]['source'], - 'serial': vvols[vvol]['serial'], - 'hosts': [] - } - cvols = array.list_volumes(connect=True) - for cvol in range(0, len(cvols)): - volume = cvols[cvol]['name'] - voldict = [cvols[cvol]['host'], cvols[cvol]['lun']] - volume_facts[volume]['hosts'].append(voldict) - return volume_facts - - -def generate_host_dict(array): - api_version = array._list_available_rest_versions() - host_facts = {} - hosts = array.list_hosts() - for host in range(0, len(hosts)): - hostname = hosts[host]['name'] - tports = [] - host_all_info = array.get_host(hostname, all=True) - if host_all_info: - tports = host_all_info[0]['target_port'] - host_facts[hostname] = { - 'hgroup': hosts[host]['hgroup'], - 'iqn': hosts[host]['iqn'], - 'wwn': hosts[host]['wwn'], - 'personality': array.get_host(hostname, - personality=True)['personality'], - 'target_port': tports - } - if NVME_API_VERSION in api_version: - host_facts[hostname]['nqn'] = hosts[host]['nqn'] - if PREFERRED_API_VERSION in api_version: - hosts = array.list_hosts(preferred_array=True) - for host in range(0, len(hosts)): - hostname = hosts[host]['name'] - host_facts[hostname]['preferred_array'] = hosts[host]['preferred_array'] - return host_facts - - -def generate_pgroups_dict(array): - pgroups_facts = {} - pgroups = array.list_pgroups() - for pgroup in range(0, len(pgroups)): - protgroup = pgroups[pgroup]['name'] - pgroups_facts[protgroup] = { - 'hgroups': pgroups[pgroup]['hgroups'], - 'hosts': pgroups[pgroup]['hosts'], - 'source': pgroups[pgroup]['source'], - 'targets': pgroups[pgroup]['targets'], - 'volumes': pgroups[pgroup]['volumes'], - } - prot_sched = array.get_pgroup(protgroup, schedule=True) - prot_reten = array.get_pgroup(protgroup, retention=True) - if prot_sched['snap_enabled'] or prot_sched['replicate_enabled']: - pgroups_facts[protgroup]['snap_freqyency'] = prot_sched['snap_frequency'] - pgroups_facts[protgroup]['replicate_freqyency'] = prot_sched['replicate_frequency'] - pgroups_facts[protgroup]['snap_enabled'] = prot_sched['snap_enabled'] - pgroups_facts[protgroup]['replicate_enabled'] = prot_sched['replicate_enabled'] - pgroups_facts[protgroup]['snap_at'] = prot_sched['snap_at'] - pgroups_facts[protgroup]['replicate_at'] = prot_sched['replicate_at'] - pgroups_facts[protgroup]['replicate_blackout'] = prot_sched['replicate_blackout'] - pgroups_facts[protgroup]['per_day'] = prot_reten['per_day'] - pgroups_facts[protgroup]['target_per_day'] = prot_reten['target_per_day'] - pgroups_facts[protgroup]['target_days'] = prot_reten['target_days'] - pgroups_facts[protgroup]['days'] = prot_reten['days'] - pgroups_facts[protgroup]['all_for'] = prot_reten['all_for'] - pgroups_facts[protgroup]['target_all_for'] = prot_reten['target_all_for'] - if ":" in protgroup: - snap_transfers = array.get_pgroup(protgroup, snap=True, transfer=True) - pgroups_facts[protgroup]['snaps'] = {} - for snap_transfer in range(0, len(snap_transfers)): - snap = snap_transfers[snap_transfer]['name'] - pgroups_facts[protgroup]['snaps'][snap] = { - 'created': snap_transfers[snap_transfer]['created'], - 'started': snap_transfers[snap_transfer]['started'], - 'completed': snap_transfers[snap_transfer]['completed'], - 'physical_bytes_written': snap_transfers[snap_transfer]['physical_bytes_written'], - 'data_transferred': snap_transfers[snap_transfer]['data_transferred'], - 'progress': snap_transfers[snap_transfer]['progress'], - } - return pgroups_facts - - -def generate_pods_dict(array): - pods_facts = {} - api_version = array._list_available_rest_versions() - if AC_REQUIRED_API_VERSION in api_version: - pods = array.list_pods() - for pod in range(0, len(pods)): - acpod = pods[pod]['name'] - pods_facts[acpod] = { - 'source': pods[pod]['source'], - 'arrays': pods[pod]['arrays'], - } - return pods_facts - - -def generate_conn_array_dict(array): - conn_array_facts = {} - api_version = array._list_available_rest_versions() - if CONN_STATUS_API_VERSION in api_version: - carrays = array.list_connected_arrays() - for carray in range(0, len(carrays)): - arrayname = carrays[carray]['array_name'] - conn_array_facts[arrayname] = { - 'array_id': carrays[carray]['id'], - 'throtled': carrays[carray]['throtled'], - 'version': carrays[carray]['version'], - 'type': carrays[carray]['type'], - 'mgmt_ip': carrays[carray]['management_address'], - 'repl_ip': carrays[carray]['replication_address'], - } - if CONN_STATUS_API_VERSION in api_version: - conn_array_facts[arrayname]['status'] = carrays[carray]['status'] - return conn_array_facts - - -def generate_apps_dict(array): - apps_facts = {} - api_version = array._list_available_rest_versions() - if SAN_REQUIRED_API_VERSION in api_version: - apps = array.list_apps() - for app in range(0, len(apps)): - appname = apps[app]['name'] - apps_facts[appname] = { - 'version': apps[app]['version'], - 'status': apps[app]['status'], - 'description': apps[app]['description'], - } - return apps_facts - - -def generate_vgroups_dict(array): - vgroups_facts = {} - api_version = array._list_available_rest_versions() - if AC_REQUIRED_API_VERSION in api_version: - vgroups = array.list_vgroups() - for vgroup in range(0, len(vgroups)): - virtgroup = vgroups[vgroup]['name'] - vgroups_facts[virtgroup] = { - 'volumes': vgroups[vgroup]['volumes'], - } - return vgroups_facts - - -def generate_nfs_offload_dict(array): - offload_facts = {} - api_version = array._list_available_rest_versions() - if AC_REQUIRED_API_VERSION in api_version: - offload = array.list_nfs_offload() - for target in range(0, len(offload)): - offloadt = offload[target]['name'] - offload_facts[offloadt] = { - 'status': offload[target]['status'], - 'mount_point': offload[target]['mount_point'], - 'protocol': offload[target]['protocol'], - 'mount_options': offload[target]['mount_options'], - 'address': offload[target]['address'], - } - return offload_facts - - -def generate_s3_offload_dict(array): - offload_facts = {} - api_version = array._list_available_rest_versions() - if S3_REQUIRED_API_VERSION in api_version: - offload = array.list_s3_offload() - for target in range(0, len(offload)): - offloadt = offload[target]['name'] - offload_facts[offloadt] = { - 'status': offload[target]['status'], - 'bucket': offload[target]['bucket'], - 'protocol': offload[target]['protocol'], - 'access_key_id': offload[target]['access_key_id'], - } - return offload_facts - - -def generate_hgroups_dict(array): - hgroups_facts = {} - hgroups = array.list_hgroups() - for hgroup in range(0, len(hgroups)): - hostgroup = hgroups[hgroup]['name'] - hgroups_facts[hostgroup] = { - 'hosts': hgroups[hgroup]['hosts'], - 'pgs': [], - 'vols': [], - } - pghgroups = array.list_hgroups(protect=True) - for pghg in range(0, len(pghgroups)): - pgname = pghgroups[pghg]['name'] - hgroups_facts[pgname]['pgs'].append(pghgroups[pghg]['protection_group']) - volhgroups = array.list_hgroups(connect=True) - for pgvol in range(0, len(volhgroups)): - pgname = volhgroups[pgvol]['name'] - volpgdict = [volhgroups[pgvol]['vol'], volhgroups[pgvol]['lun']] - hgroups_facts[pgname]['vols'].append(volpgdict) - return hgroups_facts - - -def generate_interfaces_dict(array): - api_version = array._list_available_rest_versions() - int_facts = {} - ports = array.list_ports() - for port in range(0, len(ports)): - int_name = ports[port]['name'] - if ports[port]['wwn']: - int_facts[int_name] = ports[port]['wwn'] - if ports[port]['iqn']: - int_facts[int_name] = ports[port]['iqn'] - if NVME_API_VERSION in api_version: - if ports[port]['nqn']: - int_facts[int_name] = ports[port]['nqn'] - return int_facts - - -def main(): - argument_spec = purefa_argument_spec() - argument_spec.update(dict( - gather_subset=dict(default='minimum', type='list',) - )) - - module = AnsibleModule(argument_spec, supports_check_mode=False) - - array = get_system(module) - - subset = [test.lower() for test in module.params['gather_subset']] - valid_subsets = ('all', 'minimum', 'config', 'performance', 'capacity', - 'network', 'subnet', 'interfaces', 'hgroups', 'pgroups', - 'hosts', 'admins', 'volumes', 'snapshots', 'pods', - 'vgroups', 'offload', 'apps', 'arrays') - subset_test = (test in valid_subsets for test in subset) - if not all(subset_test): - module.fail_json(msg="value must gather_subset must be one or more of: %s, got: %s" - % (",".join(valid_subsets), ",".join(subset))) - - facts = {} - - if 'minimum' in subset or 'all' in subset: - facts['default'] = generate_default_dict(array) - if 'performance' in subset or 'all' in subset: - facts['performance'] = generate_perf_dict(array) - if 'config' in subset or 'all' in subset: - facts['config'] = generate_config_dict(array) - if 'capacity' in subset or 'all' in subset: - facts['capacity'] = generate_capacity_dict(array) - if 'network' in subset or 'all' in subset: - facts['network'] = generate_network_dict(array) - if 'subnet' in subset or 'all' in subset: - facts['subnet'] = generate_subnet_dict(array) - if 'interfaces' in subset or 'all' in subset: - facts['interfaces'] = generate_interfaces_dict(array) - if 'hosts' in subset or 'all' in subset: - facts['hosts'] = generate_host_dict(array) - if 'volumes' in subset or 'all' in subset: - facts['volumes'] = generate_vol_dict(array) - if 'snapshots' in subset or 'all' in subset: - facts['snapshots'] = generate_snap_dict(array) - if 'hgroups' in subset or 'all' in subset: - facts['hgroups'] = generate_hgroups_dict(array) - if 'pgroups' in subset or 'all' in subset: - facts['pgroups'] = generate_pgroups_dict(array) - if 'pods' in subset or 'all' in subset: - facts['pods'] = generate_pods_dict(array) - if 'admins' in subset or 'all' in subset: - facts['admins'] = generate_admin_dict(array) - if 'vgroups' in subset or 'all' in subset: - facts['vgroups'] = generate_vgroups_dict(array) - if 'offload' in subset or 'all' in subset: - facts['nfs_offload'] = generate_nfs_offload_dict(array) - facts['s3_offload'] = generate_s3_offload_dict(array) - if 'apps' in subset or 'all' in subset: - facts['apps'] = generate_apps_dict(array) - if 'arrays' in subset or 'all' in subset: - facts['arrays'] = generate_conn_array_dict(array) - - module.exit_json(ansible_facts={'ansible_purefa_facts': facts}) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/storage/purestorage/purefb_facts.py b/plugins/modules/storage/purestorage/purefb_facts.py deleted file mode 100644 index 8c5a40c0dc..0000000000 --- a/plugins/modules/storage/purestorage/purefb_facts.py +++ /dev/null @@ -1,652 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2018, Simon Dodsley (simon@purestorage.com) -# GNU General Public License v3.0+ (see COPYING or -# https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: purefb_facts -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: Deprecated in favor of C(_info) module. - alternative: Use M(purestorage.flashblade.purefb_info) instead. -short_description: Collect facts from Pure Storage FlashBlade -description: - - Collect facts information from a Pure Storage FlashBlade running the - Purity//FB operating system. By default, the module will collect basic - fact information including hosts, host groups, protection - groups and volume counts. Additional fact information can be collected - based on the configured set of arguments. -author: - - Pure Storage Ansible Team (@sdodsley) -options: - gather_subset: - description: - - When supplied, this argument will define the facts to be collected. - Possible values for this include all, minimum, config, performance, - capacity, network, subnets, lags, filesystems and snapshots. - required: false - type: list - default: minimum -extends_documentation_fragment: -- community.general.purestorage.fb - -''' - -EXAMPLES = r''' -- name: Collect default set of facts - community.general.purefb_facts: - fb_url: 10.10.10.2 - api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641 - -- name: Collect configuration and capacity facts - community.general.purefb_facts: - gather_subset: - - config - - capacity - fb_url: 10.10.10.2 - api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641 - -- name: Collect all facts - community.general.purefb_facts: - gather_subset: - - all - fb_url: 10.10.10.2 - api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641 -''' - -RETURN = r''' -ansible_facts: - description: Returns the facts collected from the FlashBlade - returned: always - type: complex - sample: { - "capacity": { - "aggregate": { - "data_reduction": 1.1179228, - "snapshots": 0, - "total_physical": 17519748439, - "unique": 17519748439, - "virtual": 19585726464 - }, - "file-system": { - "data_reduction": 1.3642412, - "snapshots": 0, - "total_physical": 4748219708, - "unique": 4748219708, - "virtual": 6477716992 - }, - "object-store": { - "data_reduction": 1.0263462, - "snapshots": 0, - "total_physical": 12771528731, - "unique": 12771528731, - "virtual": 6477716992 - }, - "total": 83359896948925 - }, - "config": { - "alert_watchers": { - "enabled": true, - "name": "notify@acmestorage.com" - }, - "array_management": { - "base_dn": null, - "bind_password": null, - "bind_user": null, - "enabled": false, - "name": "management", - "services": [ - "management" - ], - "uris": [] - }, - "directory_service_roles": { - "array_admin": { - "group": null, - "group_base": null - }, - "ops_admin": { - "group": null, - "group_base": null - }, - "readonly": { - "group": null, - "group_base": null - }, - "storage_admin": { - "group": null, - "group_base": null - } - }, - "dns": { - "domain": "demo.acmestorage.com", - "name": "demo-fb-1", - "nameservers": [ - "8.8.8.8" - ], - "search": [ - "demo.acmestorage.com" - ] - }, - "nfs_directory_service": { - "base_dn": null, - "bind_password": null, - "bind_user": null, - "enabled": false, - "name": "nfs", - "services": [ - "nfs" - ], - "uris": [] - }, - "ntp": [ - "0.ntp.pool.org" - ], - "smb_directory_service": { - "base_dn": null, - "bind_password": null, - "bind_user": null, - "enabled": false, - "name": "smb", - "services": [ - "smb" - ], - "uris": [] - }, - "smtp": { - "name": "demo-fb-1", - "relay_host": null, - "sender_domain": "acmestorage.com" - }, - "ssl_certs": { - "certificate": "-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----", - "common_name": "Acme Storage", - "country": "US", - "email": null, - "intermediate_certificate": null, - "issued_by": "Acme Storage", - "issued_to": "Acme Storage", - "key_size": 4096, - "locality": null, - "name": "global", - "organization": "Acme Storage", - "organizational_unit": "Acme Storage", - "passphrase": null, - "private_key": null, - "state": null, - "status": "self-signed", - "valid_from": "1508433967000", - "valid_to": "2458833967000" - } - }, - "default": { - "blades": 15, - "buckets": 7, - "filesystems": 2, - "flashblade_name": "demo-fb-1", - "object_store_accounts": 1, - "object_store_users": 1, - "purity_version": "2.2.0", - "snapshots": 1, - "total_capacity": 83359896948925 - }, - "filesystems": { - "k8s-pvc-d24b1357-579e-11e8-811f-ecf4bbc88f54": { - "destroyed": false, - "fast_remove": false, - "hard_limit": true, - "nfs_rules": "*(rw,no_root_squash)", - "provisioned": 21474836480, - "snapshot_enabled": false - }, - "z": { - "destroyed": false, - "fast_remove": false, - "hard_limit": false, - "provisioned": 1073741824, - "snapshot_enabled": false - } - }, - "lag": { - "uplink": { - "lag_speed": 0, - "port_speed": 40000000000, - "ports": [ - { - "name": "CH1.FM1.ETH1.1" - }, - { - "name": "CH1.FM1.ETH1.2" - }, - ], - "status": "healthy" - } - }, - "network": { - "fm1.admin0": { - "address": "10.10.100.6", - "gateway": "10.10.100.1", - "mtu": 1500, - "netmask": "255.255.255.0", - "services": [ - "support" - ], - "type": "vip", - "vlan": 2200 - }, - "fm2.admin0": { - "address": "10.10.100.7", - "gateway": "10.10.100.1", - "mtu": 1500, - "netmask": "255.255.255.0", - "services": [ - "support" - ], - "type": "vip", - "vlan": 2200 - }, - "nfs1": { - "address": "10.10.100.4", - "gateway": "10.10.100.1", - "mtu": 1500, - "netmask": "255.255.255.0", - "services": [ - "data" - ], - "type": "vip", - "vlan": 2200 - }, - "vir0": { - "address": "10.10.100.5", - "gateway": "10.10.100.1", - "mtu": 1500, - "netmask": "255.255.255.0", - "services": [ - "management" - ], - "type": "vip", - "vlan": 2200 - } - }, - "performance": { - "aggregate": { - "bytes_per_op": 0, - "bytes_per_read": 0, - "bytes_per_write": 0, - "read_bytes_per_sec": 0, - "reads_per_sec": 0, - "usec_per_other_op": 0, - "usec_per_read_op": 0, - "usec_per_write_op": 0, - "write_bytes_per_sec": 0, - "writes_per_sec": 0 - }, - "http": { - "bytes_per_op": 0, - "bytes_per_read": 0, - "bytes_per_write": 0, - "read_bytes_per_sec": 0, - "reads_per_sec": 0, - "usec_per_other_op": 0, - "usec_per_read_op": 0, - "usec_per_write_op": 0, - "write_bytes_per_sec": 0, - "writes_per_sec": 0 - }, - "nfs": { - "bytes_per_op": 0, - "bytes_per_read": 0, - "bytes_per_write": 0, - "read_bytes_per_sec": 0, - "reads_per_sec": 0, - "usec_per_other_op": 0, - "usec_per_read_op": 0, - "usec_per_write_op": 0, - "write_bytes_per_sec": 0, - "writes_per_sec": 0 - }, - "s3": { - "bytes_per_op": 0, - "bytes_per_read": 0, - "bytes_per_write": 0, - "read_bytes_per_sec": 0, - "reads_per_sec": 0, - "usec_per_other_op": 0, - "usec_per_read_op": 0, - "usec_per_write_op": 0, - "write_bytes_per_sec": 0, - "writes_per_sec": 0 - } - }, - "snapshots": { - "z.188": { - "destroyed": false, - "source": "z", - "source_destroyed": false, - "suffix": "188" - } - }, - "subnet": { - "new-mgmt": { - "gateway": "10.10.100.1", - "interfaces": [ - { - "name": "fm1.admin0" - }, - { - "name": "fm2.admin0" - }, - { - "name": "nfs1" - }, - { - "name": "vir0" - } - ], - "lag": "uplink", - "mtu": 1500, - "prefix": "10.10.100.0/24", - "services": [ - "data", - "management", - "support" - ], - "vlan": 2200 - } - } - } -''' - - -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.pure import get_blade, purefb_argument_spec - - -MIN_REQUIRED_API_VERSION = '1.3' -HARD_LIMIT_API_VERSION = '1.4' - - -def generate_default_dict(blade): - default_facts = {} - defaults = blade.arrays.list_arrays().items[0] - default_facts['flashblade_name'] = defaults.name - default_facts['purity_version'] = defaults.version - default_facts['filesystems'] = \ - len(blade.file_systems.list_file_systems().items) - default_facts['snapshots'] = \ - len(blade.file_system_snapshots.list_file_system_snapshots().items) - default_facts['buckets'] = len(blade.buckets.list_buckets().items) - default_facts['object_store_users'] = \ - len(blade.object_store_users.list_object_store_users().items) - default_facts['object_store_accounts'] = \ - len(blade.object_store_accounts.list_object_store_accounts().items) - default_facts['blades'] = len(blade.blade.list_blades().items) - default_facts['total_capacity'] = \ - blade.arrays.list_arrays_space().items[0].capacity - return default_facts - - -def generate_perf_dict(blade): - perf_facts = {} - total_perf = blade.arrays.list_arrays_performance() - http_perf = blade.arrays.list_arrays_performance(protocol='http') - s3_perf = blade.arrays.list_arrays_performance(protocol='s3') - nfs_perf = blade.arrays.list_arrays_performance(protocol='nfs') - perf_facts['aggregate'] = { - 'bytes_per_op': total_perf.items[0].bytes_per_op, - 'bytes_per_read': total_perf.items[0].bytes_per_read, - 'bytes_per_write': total_perf.items[0].bytes_per_write, - 'read_bytes_per_sec': total_perf.items[0].read_bytes_per_sec, - 'reads_per_sec': total_perf.items[0].reads_per_sec, - 'usec_per_other_op': total_perf.items[0].usec_per_other_op, - 'usec_per_read_op': total_perf.items[0].usec_per_read_op, - 'usec_per_write_op': total_perf.items[0].usec_per_write_op, - 'write_bytes_per_sec': total_perf.items[0].write_bytes_per_sec, - 'writes_per_sec': total_perf.items[0].writes_per_sec, - } - perf_facts['http'] = { - 'bytes_per_op': http_perf.items[0].bytes_per_op, - 'bytes_per_read': http_perf.items[0].bytes_per_read, - 'bytes_per_write': http_perf.items[0].bytes_per_write, - 'read_bytes_per_sec': http_perf.items[0].read_bytes_per_sec, - 'reads_per_sec': http_perf.items[0].reads_per_sec, - 'usec_per_other_op': http_perf.items[0].usec_per_other_op, - 'usec_per_read_op': http_perf.items[0].usec_per_read_op, - 'usec_per_write_op': http_perf.items[0].usec_per_write_op, - 'write_bytes_per_sec': http_perf.items[0].write_bytes_per_sec, - 'writes_per_sec': http_perf.items[0].writes_per_sec, - } - perf_facts['s3'] = { - 'bytes_per_op': s3_perf.items[0].bytes_per_op, - 'bytes_per_read': s3_perf.items[0].bytes_per_read, - 'bytes_per_write': s3_perf.items[0].bytes_per_write, - 'read_bytes_per_sec': s3_perf.items[0].read_bytes_per_sec, - 'reads_per_sec': s3_perf.items[0].reads_per_sec, - 'usec_per_other_op': s3_perf.items[0].usec_per_other_op, - 'usec_per_read_op': s3_perf.items[0].usec_per_read_op, - 'usec_per_write_op': s3_perf.items[0].usec_per_write_op, - 'write_bytes_per_sec': s3_perf.items[0].write_bytes_per_sec, - 'writes_per_sec': s3_perf.items[0].writes_per_sec, - } - perf_facts['nfs'] = { - 'bytes_per_op': nfs_perf.items[0].bytes_per_op, - 'bytes_per_read': nfs_perf.items[0].bytes_per_read, - 'bytes_per_write': nfs_perf.items[0].bytes_per_write, - 'read_bytes_per_sec': nfs_perf.items[0].read_bytes_per_sec, - 'reads_per_sec': nfs_perf.items[0].reads_per_sec, - 'usec_per_other_op': nfs_perf.items[0].usec_per_other_op, - 'usec_per_read_op': nfs_perf.items[0].usec_per_read_op, - 'usec_per_write_op': nfs_perf.items[0].usec_per_write_op, - 'write_bytes_per_sec': nfs_perf.items[0].write_bytes_per_sec, - 'writes_per_sec': nfs_perf.items[0].writes_per_sec, - } - - return perf_facts - - -def generate_config_dict(blade): - config_facts = {} - config_facts['dns'] = blade.dns.list_dns().items[0].to_dict() - config_facts['smtp'] = blade.smtp.list_smtp().items[0].to_dict() - config_facts['alert_watchers'] = \ - blade.alert_watchers.list_alert_watchers().items[0].to_dict() - api_version = blade.api_version.list_versions().versions - if HARD_LIMIT_API_VERSION in api_version: - config_facts['array_management'] = \ - blade.directory_services.list_directory_services(names=['management']).items[0].to_dict() - config_facts['directory_service_roles'] = {} - roles = blade.directory_services.list_directory_services_roles() - for role in range(0, len(roles.items)): - role_name = roles.items[role].name - config_facts['directory_service_roles'][role_name] = { - 'group': roles.items[role].group, - 'group_base': roles.items[role].group_base - } - config_facts['nfs_directory_service'] = \ - blade.directory_services.list_directory_services(names=['nfs']).items[0].to_dict() - config_facts['smb_directory_service'] = \ - blade.directory_services.list_directory_services(names=['smb']).items[0].to_dict() - config_facts['ntp'] = blade.arrays.list_arrays().items[0].ntp_servers - config_facts['ssl_certs'] = \ - blade.certificates.list_certificates().items[0].to_dict() - return config_facts - - -def generate_subnet_dict(blade): - sub_facts = {} - subnets = blade.subnets.list_subnets() - for sub in range(0, len(subnets.items)): - sub_name = subnets.items[sub].name - if subnets.items[sub].enabled: - sub_facts[sub_name] = { - 'gateway': subnets.items[sub].gateway, - 'mtu': subnets.items[sub].mtu, - 'vlan': subnets.items[sub].vlan, - 'prefix': subnets.items[sub].prefix, - 'services': subnets.items[sub].services, - } - sub_facts[sub_name]['lag'] = subnets.items[sub].link_aggregation_group.name - sub_facts[sub_name]['interfaces'] = [] - for iface in range(0, len(subnets.items[sub].interfaces)): - sub_facts[sub_name]['interfaces'].append({'name': subnets.items[sub].interfaces[iface].name}) - return sub_facts - - -def generate_lag_dict(blade): - lag_facts = {} - groups = blade.link_aggregation_groups.list_link_aggregation_groups() - for groupcnt in range(0, len(groups.items)): - lag_name = groups.items[groupcnt].name - lag_facts[lag_name] = { - 'lag_speed': groups.items[groupcnt].lag_speed, - 'port_speed': groups.items[groupcnt].port_speed, - 'status': groups.items[groupcnt].status, - } - lag_facts[lag_name]['ports'] = [] - for port in range(0, len(groups.items[groupcnt].ports)): - lag_facts[lag_name]['ports'].append({'name': groups.items[groupcnt].ports[port].name}) - return lag_facts - - -def generate_network_dict(blade): - net_facts = {} - ports = blade.network_interfaces.list_network_interfaces() - for portcnt in range(0, len(ports.items)): - int_name = ports.items[portcnt].name - if ports.items[portcnt].enabled: - net_facts[int_name] = { - 'type': ports.items[portcnt].type, - 'mtu': ports.items[portcnt].mtu, - 'vlan': ports.items[portcnt].vlan, - 'address': ports.items[portcnt].address, - 'services': ports.items[portcnt].services, - 'gateway': ports.items[portcnt].gateway, - 'netmask': ports.items[portcnt].netmask, - } - return net_facts - - -def generate_capacity_dict(blade): - capacity_facts = {} - total_cap = blade.arrays.list_arrays_space() - file_cap = blade.arrays.list_arrays_space(type='file-system') - object_cap = blade.arrays.list_arrays_space(type='object-store') - capacity_facts['total'] = total_cap.items[0].capacity - capacity_facts['aggregate'] = { - 'data_reduction': total_cap.items[0].space.data_reduction, - 'snapshots': total_cap.items[0].space.snapshots, - 'total_physical': total_cap.items[0].space.total_physical, - 'unique': total_cap.items[0].space.unique, - 'virtual': total_cap.items[0].space.virtual, - } - capacity_facts['file-system'] = { - 'data_reduction': file_cap.items[0].space.data_reduction, - 'snapshots': file_cap.items[0].space.snapshots, - 'total_physical': file_cap.items[0].space.total_physical, - 'unique': file_cap.items[0].space.unique, - 'virtual': file_cap.items[0].space.virtual, - } - capacity_facts['object-store'] = { - 'data_reduction': object_cap.items[0].space.data_reduction, - 'snapshots': object_cap.items[0].space.snapshots, - 'total_physical': object_cap.items[0].space.total_physical, - 'unique': object_cap.items[0].space.unique, - 'virtual': file_cap.items[0].space.virtual, - } - - return capacity_facts - - -def generate_snap_dict(blade): - snap_facts = {} - snaps = blade.file_system_snapshots.list_file_system_snapshots() - for snap in range(0, len(snaps.items)): - snapshot = snaps.items[snap].name - snap_facts[snapshot] = { - 'destroyed': snaps.items[snap].destroyed, - 'source': snaps.items[snap].source, - 'suffix': snaps.items[snap].suffix, - 'source_destroyed': snaps.items[snap].source_destroyed, - } - return snap_facts - - -def generate_fs_dict(blade): - fs_facts = {} - fsys = blade.file_systems.list_file_systems() - for fsystem in range(0, len(fsys.items)): - share = fsys.items[fsystem].name - fs_facts[share] = { - 'fast_remove': fsys.items[fsystem].fast_remove_directory_enabled, - 'snapshot_enabled': fsys.items[fsystem].snapshot_directory_enabled, - 'provisioned': fsys.items[fsystem].provisioned, - 'destroyed': fsys.items[fsystem].destroyed, - } - if fsys.items[fsystem].http.enabled: - fs_facts[share]['http'] = fsys.items[fsystem].http.enabled - if fsys.items[fsystem].smb.enabled: - fs_facts[share]['smb_mode'] = fsys.items[fsystem].smb.acl_mode - if fsys.items[fsystem].nfs.enabled: - fs_facts[share]['nfs_rules'] = fsys.items[fsystem].nfs.rules - api_version = blade.api_version.list_versions().versions - if HARD_LIMIT_API_VERSION in api_version: - fs_facts[share]['hard_limit'] = fsys.items[fsystem].hard_limit_enabled - - return fs_facts - - -def main(): - argument_spec = purefb_argument_spec() - argument_spec.update(dict( - gather_subset=dict(default='minimum', type='list',) - )) - - module = AnsibleModule(argument_spec, supports_check_mode=True) - - blade = get_blade(module) - versions = blade.api_version.list_versions().versions - - if MIN_REQUIRED_API_VERSION not in versions: - module.fail_json(msg='FlashBlade REST version not supported. Minimum version required: {0}'.format(MIN_REQUIRED_API_VERSION)) - - subset = [test.lower() for test in module.params['gather_subset']] - valid_subsets = ('all', 'minimum', 'config', 'performance', 'capacity', - 'network', 'subnets', 'lags', - 'filesystems', 'snapshots') - subset_test = (test in valid_subsets for test in subset) - if not all(subset_test): - module.fail_json(msg="value must gather_subset must be one or more of: %s, got: %s" - % (",".join(valid_subsets), ",".join(subset))) - - facts = {} - - if 'minimum' in subset or 'all' in subset: - facts['default'] = generate_default_dict(blade) - if 'performance' in subset or 'all' in subset: - facts['performance'] = generate_perf_dict(blade) - if 'config' in subset or 'all' in subset: - facts['config'] = generate_config_dict(blade) - if 'capacity' in subset or 'all' in subset: - facts['capacity'] = generate_capacity_dict(blade) - if 'lags' in subset or 'all' in subset: - facts['lag'] = generate_lag_dict(blade) - if 'network' in subset or 'all' in subset: - facts['network'] = generate_network_dict(blade) - if 'subnets' in subset or 'all' in subset: - facts['subnet'] = generate_subnet_dict(blade) - if 'filesystems' in subset or 'all' in subset: - facts['filesystems'] = generate_fs_dict(blade) - if 'snapshots' in subset or 'all' in subset: - facts['snapshots'] = generate_snap_dict(blade) - - module.exit_json(ansible_facts={'ansible_purefb_facts': facts}) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/system/python_requirements_facts.py b/plugins/modules/system/python_requirements_facts.py deleted file mode 120000 index d816829034..0000000000 --- a/plugins/modules/system/python_requirements_facts.py +++ /dev/null @@ -1 +0,0 @@ -python_requirements_info.py \ No newline at end of file diff --git a/plugins/modules/system/python_requirements_info.py b/plugins/modules/system/python_requirements_info.py index 5ffb277612..08a9ddd64e 100644 --- a/plugins/modules/system/python_requirements_info.py +++ b/plugins/modules/system/python_requirements_info.py @@ -113,9 +113,6 @@ def main(): ), supports_check_mode=True, ) - if module._name in ('python_requirements_facts', 'community.general.python_requirements_facts'): - module.deprecate("The 'python_requirements_facts' module has been renamed to 'python_requirements_info'", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 if not HAS_DISTUTILS: module.fail_json( msg='Could not import "distutils" and "pkg_resources" libraries to introspect python environment.', diff --git a/plugins/modules/vertica_facts.py b/plugins/modules/vertica_facts.py deleted file mode 120000 index f98801c7ad..0000000000 --- a/plugins/modules/vertica_facts.py +++ /dev/null @@ -1 +0,0 @@ -database/vertica/vertica_facts.py \ No newline at end of file diff --git a/plugins/modules/web_infrastructure/jenkins_job_facts.py b/plugins/modules/web_infrastructure/jenkins_job_facts.py deleted file mode 120000 index 7a78b2faee..0000000000 --- a/plugins/modules/web_infrastructure/jenkins_job_facts.py +++ /dev/null @@ -1 +0,0 @@ -jenkins_job_info.py \ No newline at end of file diff --git a/plugins/modules/web_infrastructure/jenkins_job_info.py b/plugins/modules/web_infrastructure/jenkins_job_info.py index f0d13262b0..c927e5b954 100644 --- a/plugins/modules/web_infrastructure/jenkins_job_info.py +++ b/plugins/modules/web_infrastructure/jenkins_job_info.py @@ -237,9 +237,6 @@ def main(): ], supports_check_mode=True, ) - if module._name in ('jenkins_job_facts', 'community.general.jenkins_job_facts'): - module.deprecate("The 'jenkins_job_facts' module has been renamed to 'jenkins_job_info'", - version='3.0.0', collection_name='community.general') # was Ansible 2.13 test_dependencies(module) jobs = list() diff --git a/plugins/modules/web_infrastructure/nginx_status_facts.py b/plugins/modules/web_infrastructure/nginx_status_facts.py deleted file mode 100644 index 3a68f8da99..0000000000 --- a/plugins/modules/web_infrastructure/nginx_status_facts.py +++ /dev/null @@ -1,160 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# (c) 2016, René Moser -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: nginx_status_facts -deprecated: - removed_in: 3.0.0 # was Ansible 2.13 - why: Deprecated in favour of C(_info) module. - alternative: Use M(community.general.nginx_status_info) instead. -short_description: Retrieve nginx status facts. -description: - - Gathers facts from nginx from an URL having C(stub_status) enabled. -author: "René Moser (@resmo)" -options: - url: - type: str - description: - - URL of the nginx status. - required: true - timeout: - type: int - description: - - HTTP connection timeout in seconds. - required: false - default: 10 - -notes: - - See http://nginx.org/en/docs/http/ngx_http_stub_status_module.html for more information. -''' - -EXAMPLES = ''' -# Gather status facts from nginx on localhost -- name: Get current http stats - community.general.nginx_status_facts: - url: http://localhost/nginx_status - -# Gather status facts from nginx on localhost with a custom timeout of 20 seconds -- name: Get current http stats - community.general.nginx_status_facts: - url: http://localhost/nginx_status - timeout: 20 -''' - -RETURN = ''' ---- -nginx_status_facts.active_connections: - description: Active connections. - returned: success - type: int - sample: 2340 -nginx_status_facts.accepts: - description: The total number of accepted client connections. - returned: success - type: int - sample: 81769947 -nginx_status_facts.handled: - description: The total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached. - returned: success - type: int - sample: 81769947 -nginx_status_facts.requests: - description: The total number of client requests. - returned: success - type: int - sample: 144332345 -nginx_status_facts.reading: - description: The current number of connections where nginx is reading the request header. - returned: success - type: int - sample: 0 -nginx_status_facts.writing: - description: The current number of connections where nginx is writing the response back to the client. - returned: success - type: int - sample: 241 -nginx_status_facts.waiting: - description: The current number of idle client connections waiting for a request. - returned: success - type: int - sample: 2092 -nginx_status_facts.data: - description: HTTP response as is. - returned: success - type: str - sample: "Active connections: 2340 \nserver accepts handled requests\n 81769947 81769947 144332345 \nReading: 0 Writing: 241 Waiting: 2092 \n" -''' - -import re -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url -from ansible.module_utils._text import to_text - - -class NginxStatusFacts(object): - - def __init__(self): - self.url = module.params.get('url') - self.timeout = module.params.get('timeout') - - def run(self): - result = { - 'nginx_status_facts': { - 'active_connections': None, - 'accepts': None, - 'handled': None, - 'requests': None, - 'reading': None, - 'writing': None, - 'waiting': None, - 'data': None, - } - } - (response, info) = fetch_url(module=module, url=self.url, force=True, timeout=self.timeout) - if not response: - module.fail_json(msg="No valid or no response from url %s within %s seconds (timeout)" % (self.url, self.timeout)) - - data = to_text(response.read(), errors='surrogate_or_strict') - if not data: - return result - - result['nginx_status_facts']['data'] = data - expr = r'Active connections: ([0-9]+) \nserver accepts handled requests\n ([0-9]+) ([0-9]+) ([0-9]+) \n' \ - r'Reading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+)' - match = re.match(expr, data, re.S) - if match: - result['nginx_status_facts']['active_connections'] = int(match.group(1)) - result['nginx_status_facts']['accepts'] = int(match.group(2)) - result['nginx_status_facts']['handled'] = int(match.group(3)) - result['nginx_status_facts']['requests'] = int(match.group(4)) - result['nginx_status_facts']['reading'] = int(match.group(5)) - result['nginx_status_facts']['writing'] = int(match.group(6)) - result['nginx_status_facts']['waiting'] = int(match.group(7)) - return result - - -def main(): - global module - module = AnsibleModule( - argument_spec=dict( - url=dict(required=True), - timeout=dict(type='int', default=10), - ), - supports_check_mode=True, - ) - - nginx_status_facts = NginxStatusFacts().run() - result = dict(changed=False, ansible_facts=nginx_status_facts) - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/xenserver_guest_facts.py b/plugins/modules/xenserver_guest_facts.py deleted file mode 120000 index 23d0dde0aa..0000000000 --- a/plugins/modules/xenserver_guest_facts.py +++ /dev/null @@ -1 +0,0 @@ -cloud/xenserver/xenserver_guest_facts.py \ No newline at end of file diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index e3f5531991..582f2dc2fb 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -9,78 +9,19 @@ plugins/modules/cloud/linode/linode.py validate-modules:undocumented-parameter plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-choice -plugins/modules/cloud/online/online_server_facts.py validate-modules:return-syntax-error plugins/modules/cloud/online/online_server_info.py validate-modules:return-syntax-error -plugins/modules/cloud/online/online_user_facts.py validate-modules:return-syntax-error plugins/modules/cloud/online/online_user_info.py validate-modules:return-syntax-error -plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_api_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_cluster_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_cluster_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_disk_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_disk_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_event_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_external_provider_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_external_provider_facts.py validate-modules:undocumented-parameter -plugins/modules/cloud/ovirt/ovirt_group_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_group_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_host_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_host_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/ovirt/ovirt_network_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_network_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_nic_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_nic_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_permission_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_permission_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_quota_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_quota_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_snapshot_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_snapshot_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_storage_domain_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_storage_domain_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/ovirt/ovirt_tag_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_tag_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_template_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_template_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_user_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_user_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:parameter-list-no-elements plugins/modules/cloud/rackspace/rax.py use-argspec-type-path # fix needed plugins/modules/cloud/rackspace/rax_files.py validate-modules:parameter-state-invalid-choice plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path plugins/modules/cloud/rackspace/rax_mon_notification_plan.py validate-modules:parameter-list-no-elements plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path # fix needed, expanduser() applied to dict values -plugins/modules/cloud/scaleway/scaleway_image_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_image_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_ip_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_ip_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_organization_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_organization_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_security_group_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_security_group_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_server_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_server_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_snapshot_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_snapshot_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_volume_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_volume_info.py validate-modules:return-syntax-error plugins/modules/cloud/smartos/vmadm.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/smartos/vmadm.py validate-modules:undocumented-parameter @@ -101,8 +42,6 @@ plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:undocumented plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-state-invalid-choice -plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:parameter-type-not-in-doc # This triggers when a parameter is undocumented -plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:doc-missing-type plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:parameter-type-not-in-doc plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code @@ -130,16 +69,6 @@ plugins/modules/remote_management/stacki/stacki_host.py validate-modules:doc-def plugins/modules/remote_management/stacki/stacki_host.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/stacki/stacki_host.py validate-modules:undocumented-parameter plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-invalid -plugins/modules/storage/glusterfs/gluster_heal_info.py validate-modules:parameter-type-not-in-doc -plugins/modules/storage/glusterfs/gluster_peer.py validate-modules:parameter-list-no-elements -plugins/modules/storage/glusterfs/gluster_volume.py validate-modules:parameter-list-no-elements -plugins/modules/storage/glusterfs/gluster_volume.py validate-modules:parameter-type-not-in-doc -plugins/modules/storage/netapp/na_ontap_gather_facts.py validate-modules:parameter-state-invalid-choice -plugins/modules/storage/purestorage/purefa_facts.py validate-modules:doc-required-mismatch -plugins/modules/storage/purestorage/purefa_facts.py validate-modules:parameter-list-no-elements -plugins/modules/storage/purestorage/purefa_facts.py validate-modules:return-syntax-error -plugins/modules/storage/purestorage/purefb_facts.py validate-modules:parameter-list-no-elements -plugins/modules/storage/purestorage/purefb_facts.py validate-modules:return-syntax-error plugins/modules/system/gconftool2.py validate-modules:parameter-state-invalid-choice plugins/modules/system/iptables_state.py validate-modules:undocumented-parameter plugins/modules/system/launchd.py use-argspec-type-path # False positive diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 547a788be0..51fe901ba5 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -8,78 +8,19 @@ plugins/modules/cloud/linode/linode.py validate-modules:undocumented-parameter plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-choice -plugins/modules/cloud/online/online_server_facts.py validate-modules:return-syntax-error plugins/modules/cloud/online/online_server_info.py validate-modules:return-syntax-error -plugins/modules/cloud/online/online_user_facts.py validate-modules:return-syntax-error plugins/modules/cloud/online/online_user_info.py validate-modules:return-syntax-error -plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_api_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_cluster_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_cluster_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_disk_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_disk_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_event_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_external_provider_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_external_provider_facts.py validate-modules:undocumented-parameter -plugins/modules/cloud/ovirt/ovirt_group_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_group_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_host_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_host_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/ovirt/ovirt_network_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_network_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_nic_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_nic_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_permission_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_permission_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_quota_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_quota_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_snapshot_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_snapshot_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_storage_domain_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_storage_domain_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/ovirt/ovirt_tag_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_tag_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_template_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_template_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_user_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_user_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:parameter-list-no-elements plugins/modules/cloud/rackspace/rax.py use-argspec-type-path # fix needed plugins/modules/cloud/rackspace/rax_files.py validate-modules:parameter-state-invalid-choice plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path plugins/modules/cloud/rackspace/rax_mon_notification_plan.py validate-modules:parameter-list-no-elements plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path # fix needed, expanduser() applied to dict values -plugins/modules/cloud/scaleway/scaleway_image_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_image_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_ip_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_ip_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_organization_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_organization_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_security_group_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_security_group_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_server_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_server_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_snapshot_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_snapshot_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_volume_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_volume_info.py validate-modules:return-syntax-error plugins/modules/cloud/smartos/vmadm.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/smartos/vmadm.py validate-modules:undocumented-parameter @@ -100,8 +41,6 @@ plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:undocumented plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-state-invalid-choice -plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:parameter-type-not-in-doc # This triggers when a parameter is undocumented -plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:doc-missing-type plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:parameter-type-not-in-doc plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code @@ -129,16 +68,6 @@ plugins/modules/remote_management/stacki/stacki_host.py validate-modules:doc-def plugins/modules/remote_management/stacki/stacki_host.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/stacki/stacki_host.py validate-modules:undocumented-parameter plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-invalid -plugins/modules/storage/glusterfs/gluster_heal_info.py validate-modules:parameter-type-not-in-doc -plugins/modules/storage/glusterfs/gluster_peer.py validate-modules:parameter-list-no-elements -plugins/modules/storage/glusterfs/gluster_volume.py validate-modules:parameter-list-no-elements -plugins/modules/storage/glusterfs/gluster_volume.py validate-modules:parameter-type-not-in-doc -plugins/modules/storage/netapp/na_ontap_gather_facts.py validate-modules:parameter-state-invalid-choice -plugins/modules/storage/purestorage/purefa_facts.py validate-modules:doc-required-mismatch -plugins/modules/storage/purestorage/purefa_facts.py validate-modules:parameter-list-no-elements -plugins/modules/storage/purestorage/purefa_facts.py validate-modules:return-syntax-error -plugins/modules/storage/purestorage/purefb_facts.py validate-modules:parameter-list-no-elements -plugins/modules/storage/purestorage/purefb_facts.py validate-modules:return-syntax-error plugins/modules/system/gconftool2.py validate-modules:parameter-state-invalid-choice plugins/modules/system/iptables_state.py validate-modules:undocumented-parameter plugins/modules/system/launchd.py use-argspec-type-path # False positive diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt index 547a788be0..51fe901ba5 100644 --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -8,78 +8,19 @@ plugins/modules/cloud/linode/linode.py validate-modules:undocumented-parameter plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-choice -plugins/modules/cloud/online/online_server_facts.py validate-modules:return-syntax-error plugins/modules/cloud/online/online_server_info.py validate-modules:return-syntax-error -plugins/modules/cloud/online/online_user_facts.py validate-modules:return-syntax-error plugins/modules/cloud/online/online_user_info.py validate-modules:return-syntax-error -plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_api_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_cluster_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_cluster_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_disk_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_disk_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_event_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_external_provider_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_external_provider_facts.py validate-modules:undocumented-parameter -plugins/modules/cloud/ovirt/ovirt_group_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_group_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_host_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_host_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/ovirt/ovirt_network_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_network_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_nic_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_nic_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_permission_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_permission_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_quota_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_quota_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_snapshot_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_snapshot_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_storage_domain_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_storage_domain_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/ovirt/ovirt_tag_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_tag_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_template_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_template_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_user_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_user_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:parameter-list-no-elements plugins/modules/cloud/rackspace/rax.py use-argspec-type-path # fix needed plugins/modules/cloud/rackspace/rax_files.py validate-modules:parameter-state-invalid-choice plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path plugins/modules/cloud/rackspace/rax_mon_notification_plan.py validate-modules:parameter-list-no-elements plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path # fix needed, expanduser() applied to dict values -plugins/modules/cloud/scaleway/scaleway_image_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_image_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_ip_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_ip_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_organization_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_organization_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_security_group_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_security_group_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_server_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_server_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_snapshot_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_snapshot_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_volume_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_volume_info.py validate-modules:return-syntax-error plugins/modules/cloud/smartos/vmadm.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/smartos/vmadm.py validate-modules:undocumented-parameter @@ -100,8 +41,6 @@ plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:undocumented plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-state-invalid-choice -plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:parameter-type-not-in-doc # This triggers when a parameter is undocumented -plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:doc-missing-type plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:parameter-type-not-in-doc plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code @@ -129,16 +68,6 @@ plugins/modules/remote_management/stacki/stacki_host.py validate-modules:doc-def plugins/modules/remote_management/stacki/stacki_host.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/stacki/stacki_host.py validate-modules:undocumented-parameter plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-invalid -plugins/modules/storage/glusterfs/gluster_heal_info.py validate-modules:parameter-type-not-in-doc -plugins/modules/storage/glusterfs/gluster_peer.py validate-modules:parameter-list-no-elements -plugins/modules/storage/glusterfs/gluster_volume.py validate-modules:parameter-list-no-elements -plugins/modules/storage/glusterfs/gluster_volume.py validate-modules:parameter-type-not-in-doc -plugins/modules/storage/netapp/na_ontap_gather_facts.py validate-modules:parameter-state-invalid-choice -plugins/modules/storage/purestorage/purefa_facts.py validate-modules:doc-required-mismatch -plugins/modules/storage/purestorage/purefa_facts.py validate-modules:parameter-list-no-elements -plugins/modules/storage/purestorage/purefa_facts.py validate-modules:return-syntax-error -plugins/modules/storage/purestorage/purefb_facts.py validate-modules:parameter-list-no-elements -plugins/modules/storage/purestorage/purefb_facts.py validate-modules:return-syntax-error plugins/modules/system/gconftool2.py validate-modules:parameter-state-invalid-choice plugins/modules/system/iptables_state.py validate-modules:undocumented-parameter plugins/modules/system/launchd.py use-argspec-type-path # False positive diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index f5bbfa704d..c10f7be552 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -7,122 +7,17 @@ plugins/modules/cloud/linode/linode.py validate-modules:parameter-type-not-in-do plugins/modules/cloud/linode/linode.py validate-modules:undocumented-parameter plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen -plugins/modules/cloud/misc/helm.py validate-modules:deprecation-mismatch -plugins/modules/cloud/misc/helm.py validate-modules:invalid-documentation -plugins/modules/cloud/misc/ovirt.py validate-modules:deprecation-mismatch -plugins/modules/cloud/misc/ovirt.py validate-modules:invalid-documentation -plugins/modules/cloud/online/online_server_facts.py validate-modules:deprecation-mismatch -plugins/modules/cloud/online/online_server_facts.py validate-modules:invalid-documentation -plugins/modules/cloud/online/online_server_facts.py validate-modules:return-syntax-error plugins/modules/cloud/online/online_server_info.py validate-modules:return-syntax-error -plugins/modules/cloud/online/online_user_facts.py validate-modules:deprecation-mismatch -plugins/modules/cloud/online/online_user_facts.py validate-modules:invalid-documentation -plugins/modules/cloud/online/online_user_facts.py validate-modules:return-syntax-error plugins/modules/cloud/online/online_user_info.py validate-modules:return-syntax-error -plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py validate-modules:deprecation-mismatch -plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py validate-modules:invalid-documentation -plugins/modules/cloud/ovirt/ovirt_api_facts.py validate-modules:deprecation-mismatch -plugins/modules/cloud/ovirt/ovirt_api_facts.py validate-modules:invalid-documentation -plugins/modules/cloud/ovirt/ovirt_cluster_facts.py validate-modules:deprecation-mismatch -plugins/modules/cloud/ovirt/ovirt_cluster_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_cluster_facts.py validate-modules:invalid-documentation -plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py validate-modules:deprecation-mismatch -plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py validate-modules:invalid-documentation -plugins/modules/cloud/ovirt/ovirt_disk_facts.py validate-modules:deprecation-mismatch -plugins/modules/cloud/ovirt/ovirt_disk_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_disk_facts.py validate-modules:invalid-documentation -plugins/modules/cloud/ovirt/ovirt_event_facts.py validate-modules:deprecation-mismatch -plugins/modules/cloud/ovirt/ovirt_event_facts.py validate-modules:invalid-documentation -plugins/modules/cloud/ovirt/ovirt_external_provider_facts.py validate-modules:deprecation-mismatch -plugins/modules/cloud/ovirt/ovirt_external_provider_facts.py validate-modules:invalid-documentation -plugins/modules/cloud/ovirt/ovirt_external_provider_facts.py validate-modules:undocumented-parameter -plugins/modules/cloud/ovirt/ovirt_group_facts.py validate-modules:deprecation-mismatch -plugins/modules/cloud/ovirt/ovirt_group_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_group_facts.py validate-modules:invalid-documentation -plugins/modules/cloud/ovirt/ovirt_host_facts.py validate-modules:deprecation-mismatch -plugins/modules/cloud/ovirt/ovirt_host_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_host_facts.py validate-modules:invalid-documentation -plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py validate-modules:deprecation-mismatch -plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py validate-modules:invalid-documentation -plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/ovirt/ovirt_network_facts.py validate-modules:deprecation-mismatch -plugins/modules/cloud/ovirt/ovirt_network_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_network_facts.py validate-modules:invalid-documentation -plugins/modules/cloud/ovirt/ovirt_nic_facts.py validate-modules:deprecation-mismatch -plugins/modules/cloud/ovirt/ovirt_nic_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_nic_facts.py validate-modules:invalid-documentation -plugins/modules/cloud/ovirt/ovirt_permission_facts.py validate-modules:deprecation-mismatch -plugins/modules/cloud/ovirt/ovirt_permission_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_permission_facts.py validate-modules:invalid-documentation -plugins/modules/cloud/ovirt/ovirt_quota_facts.py validate-modules:deprecation-mismatch -plugins/modules/cloud/ovirt/ovirt_quota_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_quota_facts.py validate-modules:invalid-documentation -plugins/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py validate-modules:deprecation-mismatch -plugins/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py validate-modules:invalid-documentation -plugins/modules/cloud/ovirt/ovirt_snapshot_facts.py validate-modules:deprecation-mismatch -plugins/modules/cloud/ovirt/ovirt_snapshot_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_snapshot_facts.py validate-modules:invalid-documentation -plugins/modules/cloud/ovirt/ovirt_storage_domain_facts.py validate-modules:deprecation-mismatch -plugins/modules/cloud/ovirt/ovirt_storage_domain_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_storage_domain_facts.py validate-modules:invalid-documentation -plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py validate-modules:deprecation-mismatch -plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py validate-modules:invalid-documentation -plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py validate-modules:deprecation-mismatch -plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py validate-modules:invalid-documentation -plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/ovirt/ovirt_tag_facts.py validate-modules:deprecation-mismatch -plugins/modules/cloud/ovirt/ovirt_tag_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_tag_facts.py validate-modules:invalid-documentation -plugins/modules/cloud/ovirt/ovirt_template_facts.py validate-modules:deprecation-mismatch -plugins/modules/cloud/ovirt/ovirt_template_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_template_facts.py validate-modules:invalid-documentation -plugins/modules/cloud/ovirt/ovirt_user_facts.py validate-modules:deprecation-mismatch -plugins/modules/cloud/ovirt/ovirt_user_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_user_facts.py validate-modules:invalid-documentation -plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:deprecation-mismatch -plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:invalid-documentation -plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:deprecation-mismatch -plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:doc-missing-type -plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:invalid-documentation plugins/modules/cloud/rackspace/rax.py use-argspec-type-path plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path # fix needed, expanduser() applied to dict values -plugins/modules/cloud/scaleway/scaleway_image_facts.py validate-modules:deprecation-mismatch -plugins/modules/cloud/scaleway/scaleway_image_facts.py validate-modules:invalid-documentation -plugins/modules/cloud/scaleway/scaleway_image_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_image_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_ip_facts.py validate-modules:deprecation-mismatch -plugins/modules/cloud/scaleway/scaleway_ip_facts.py validate-modules:invalid-documentation -plugins/modules/cloud/scaleway/scaleway_ip_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_ip_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_organization_facts.py validate-modules:deprecation-mismatch -plugins/modules/cloud/scaleway/scaleway_organization_facts.py validate-modules:invalid-documentation -plugins/modules/cloud/scaleway/scaleway_organization_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_organization_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_security_group_facts.py validate-modules:deprecation-mismatch -plugins/modules/cloud/scaleway/scaleway_security_group_facts.py validate-modules:invalid-documentation -plugins/modules/cloud/scaleway/scaleway_security_group_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_security_group_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_server_facts.py validate-modules:deprecation-mismatch -plugins/modules/cloud/scaleway/scaleway_server_facts.py validate-modules:invalid-documentation -plugins/modules/cloud/scaleway/scaleway_server_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_server_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_snapshot_facts.py validate-modules:deprecation-mismatch -plugins/modules/cloud/scaleway/scaleway_snapshot_facts.py validate-modules:invalid-documentation -plugins/modules/cloud/scaleway/scaleway_snapshot_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_snapshot_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_volume_facts.py validate-modules:deprecation-mismatch -plugins/modules/cloud/scaleway/scaleway_volume_facts.py validate-modules:invalid-documentation -plugins/modules/cloud/scaleway/scaleway_volume_facts.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_volume_info.py validate-modules:return-syntax-error plugins/modules/cloud/smartos/vmadm.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/smartos/vmadm.py validate-modules:undocumented-parameter @@ -137,10 +32,6 @@ plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:parameter-ty plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter -plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:deprecation-mismatch -plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:invalid-documentation -plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:parameter-type-not-in-doc -plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:doc-missing-type plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:parameter-type-not-in-doc plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code @@ -165,22 +56,6 @@ plugins/modules/remote_management/stacki/stacki_host.py validate-modules:doc-def plugins/modules/remote_management/stacki/stacki_host.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/stacki/stacki_host.py validate-modules:undocumented-parameter plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-invalid -plugins/modules/storage/glusterfs/gluster_heal_info.py validate-modules:deprecation-mismatch -plugins/modules/storage/glusterfs/gluster_heal_info.py validate-modules:invalid-documentation -plugins/modules/storage/glusterfs/gluster_heal_info.py validate-modules:parameter-type-not-in-doc -plugins/modules/storage/glusterfs/gluster_peer.py validate-modules:deprecation-mismatch -plugins/modules/storage/glusterfs/gluster_peer.py validate-modules:invalid-documentation -plugins/modules/storage/glusterfs/gluster_volume.py validate-modules:deprecation-mismatch -plugins/modules/storage/glusterfs/gluster_volume.py validate-modules:invalid-documentation -plugins/modules/storage/glusterfs/gluster_volume.py validate-modules:parameter-type-not-in-doc -plugins/modules/storage/netapp/na_ontap_gather_facts.py validate-modules:deprecation-mismatch -plugins/modules/storage/netapp/na_ontap_gather_facts.py validate-modules:invalid-documentation -plugins/modules/storage/purestorage/purefa_facts.py validate-modules:deprecation-mismatch -plugins/modules/storage/purestorage/purefa_facts.py validate-modules:invalid-documentation -plugins/modules/storage/purestorage/purefa_facts.py validate-modules:return-syntax-error -plugins/modules/storage/purestorage/purefb_facts.py validate-modules:deprecation-mismatch -plugins/modules/storage/purestorage/purefb_facts.py validate-modules:invalid-documentation -plugins/modules/storage/purestorage/purefb_facts.py validate-modules:return-syntax-error plugins/modules/system/iptables_state.py validate-modules:undocumented-parameter plugins/modules/system/launchd.py use-argspec-type-path # False positive plugins/modules/system/puppet.py use-argspec-type-path @@ -189,8 +64,6 @@ plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc plugins/modules/system/ssh_config.py use-argspec-type-path # Required since module uses other methods to specify path plugins/modules/system/xfconf.py validate-modules:return-syntax-error plugins/modules/web_infrastructure/jenkins_plugin.py use-argspec-type-path -plugins/modules/web_infrastructure/nginx_status_facts.py validate-modules:deprecation-mismatch -plugins/modules/web_infrastructure/nginx_status_facts.py validate-modules:invalid-documentation tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.6 # django generated code tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.7 # django generated code tests/utils/shippable/check_matrix.py replace-urlopen From bfd6d2b3aa962cd22956804071bac70a624ec620 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Wed, 14 Apr 2021 19:11:31 +1200 Subject: [PATCH 0183/3093] jira - fixed isinstance error (#2236) * fixed isinstance error * added changelog fragment --- changelogs/fragments/2236-jira-isinstance.yml | 2 ++ plugins/modules/web_infrastructure/jira.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2236-jira-isinstance.yml diff --git a/changelogs/fragments/2236-jira-isinstance.yml b/changelogs/fragments/2236-jira-isinstance.yml new file mode 100644 index 0000000000..e80cbacdf9 --- /dev/null +++ b/changelogs/fragments/2236-jira-isinstance.yml @@ -0,0 +1,2 @@ +bugfixes: + - jira - fixed calling of ``isinstance`` (https://github.com/ansible-collections/community.general/issues/2234). diff --git a/plugins/modules/web_infrastructure/jira.py b/plugins/modules/web_infrastructure/jira.py index d4ddf53015..d7c88c01b8 100644 --- a/plugins/modules/web_infrastructure/jira.py +++ b/plugins/modules/web_infrastructure/jira.py @@ -389,7 +389,7 @@ def request( auth = to_text(base64.b64encode(to_bytes('{0}:{1}'.format(user, passwd), errors='surrogate_or_strict'))) headers = {} - if isinstance(additional_headers) == dict: + if isinstance(additional_headers, dict): headers = additional_headers.copy() headers.update({ "Content-Type": content_type, From f97d5ca70157c6d567e56279243f9aa4e0fffec8 Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Wed, 14 Apr 2021 11:55:40 +0430 Subject: [PATCH 0184/3093] Use nomad_job_info in nomad_job_info examples (#2233) --- plugins/modules/clustering/nomad/nomad_job_info.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/modules/clustering/nomad/nomad_job_info.py b/plugins/modules/clustering/nomad/nomad_job_info.py index d10c0a0438..5e9455f77b 100644 --- a/plugins/modules/clustering/nomad/nomad_job_info.py +++ b/plugins/modules/clustering/nomad/nomad_job_info.py @@ -36,13 +36,13 @@ seealso: EXAMPLES = ''' - name: Get info for job awx - community.general.nomad_job: + community.general.nomad_job_info: host: localhost name: awx register: result - name: List Nomad jobs - community.general.nomad_job: + community.general.nomad_job_info: host: localhost register: result From f4858d64f46e9599dad0f03ed75e0288506018cb Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 14 Apr 2021 03:25:57 -0400 Subject: [PATCH 0185/3093] funcd connection plugin is now usable/loadable (#2235) * funcd connection plugin is now usable/loadable * Update changelogs/fragments/allow_funcd_to_load.yml Co-authored-by: Felix Fontein --- changelogs/fragments/allow_funcd_to_load.yml | 2 ++ plugins/connection/funcd.py | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/allow_funcd_to_load.yml diff --git a/changelogs/fragments/allow_funcd_to_load.yml b/changelogs/fragments/allow_funcd_to_load.yml new file mode 100644 index 0000000000..3336b0aaf4 --- /dev/null +++ b/changelogs/fragments/allow_funcd_to_load.yml @@ -0,0 +1,2 @@ +bugfixes: + - funcd connection plugin - can now load (https://github.com/ansible-collections/community.general/pull/2235). diff --git a/plugins/connection/funcd.py b/plugins/connection/funcd.py index 193e298fe2..3aed7145cb 100644 --- a/plugins/connection/funcd.py +++ b/plugins/connection/funcd.py @@ -37,12 +37,13 @@ import tempfile import shutil from ansible.errors import AnsibleError +from ansible.plugins.connection import ConnectionBase from ansible.utils.display import Display display = Display() -class Connection(object): +class Connection(ConnectionBase): ''' Func-based connections ''' has_pipelining = False From 69a9a77b65dee8e91600de05950447b5d534ce67 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sat, 17 Apr 2021 19:32:13 +1200 Subject: [PATCH 0186/3093] xenserver_guest - fixed validations (#2243) * fixed cdrom parameter * fixed networks and custom_params parameters * fixed disks parameter * fixed remaining parameters * removed plugins/modules/cloud/xenserver/xenserver_guest.py from ignore list * comments from PR * comments from PR * more adjustments from the PR --- plugins/module_utils/xenserver.py | 1 - .../cloud/xenserver/xenserver_guest.py | 191 +++++++++++++----- tests/sanity/ignore-2.10.txt | 5 - tests/sanity/ignore-2.11.txt | 5 - tests/sanity/ignore-2.12.txt | 5 - tests/sanity/ignore-2.9.txt | 4 - 6 files changed, 142 insertions(+), 69 deletions(-) diff --git a/plugins/module_utils/xenserver.py b/plugins/module_utils/xenserver.py index dbc6a0adbe..015b10215e 100644 --- a/plugins/module_utils/xenserver.py +++ b/plugins/module_utils/xenserver.py @@ -20,7 +20,6 @@ except ImportError: XENAPI_IMP_ERR = traceback.format_exc() from ansible.module_utils.basic import env_fallback, missing_required_lib -from ansible.module_utils.common.network import is_mac from ansible.module_utils.ansible_release import __version__ as ANSIBLE_VERSION diff --git a/plugins/modules/cloud/xenserver/xenserver_guest.py b/plugins/modules/cloud/xenserver/xenserver_guest.py index 2316168e9d..b90b380c3f 100644 --- a/plugins/modules/cloud/xenserver/xenserver_guest.py +++ b/plugins/modules/cloud/xenserver/xenserver_guest.py @@ -4,7 +4,7 @@ # Copyright: (c) 2018, Bojan Vitnik # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function +from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = r''' @@ -24,14 +24,14 @@ notes: Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub: U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)' -- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are +- 'If no scheme is specified in I(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are accessing XenServer host in trusted environment or use C(https://) scheme explicitly.' -- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no) +- 'To use C(https://) scheme for I(hostname) you have to either import host certificate to your OS certificate store or use I(validate_certs): C(no) which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.' -- 'Network configuration inside a guest OS, by using C(networks.type), C(networks.ip), C(networks.gateway) etc. parameters, is supported on +- 'Network configuration inside a guest OS, by using I(networks.type), I(networks.ip), I(networks.gateway) etc. parameters, is supported on XenServer 7.0 or newer for Windows guests by using official XenServer Guest agent support for network configuration. The module will try to detect if such support is available and utilize it, else it will use a custom method of configuration via xenstore. Since XenServer Guest - agent only support None and Static types of network configuration, where None means DHCP configured interface, C(networks.type) and C(networks.type6) + agent only support None and Static types of network configuration, where None means DHCP configured interface, I(networks.type) and I(networks.type6) values C(none) and C(dhcp) have same effect. More info here: U(https://www.citrix.com/community/citrix-developer/citrix-hypervisor-developer/citrix-hypervisor-developing-products/citrix-hypervisor-staticip.html)' - 'On platforms without official support for network configuration inside a guest OS, network parameters will be written to xenstore @@ -49,10 +49,10 @@ options: state: description: - Specify the state VM should be in. - - If C(state) is set to C(present) and VM exists, ensure the VM configuration conforms to given parameters. - - If C(state) is set to C(present) and VM does not exist, then VM is deployed with given parameters. - - If C(state) is set to C(absent) and VM exists, then VM is removed with its associated components. - - If C(state) is set to C(poweredon) and VM does not exist, then VM is deployed with given parameters and powered on automatically. + - If I(state) is set to C(present) and VM exists, ensure the VM configuration conforms to given parameters. + - If I(state) is set to C(present) and VM does not exist, then VM is deployed with given parameters. + - If I(state) is set to C(absent) and VM exists, then VM is removed with its associated components. + - If I(state) is set to C(poweredon) and VM does not exist, then VM is deployed with given parameters and powered on automatically. type: str default: present choices: [ present, absent, poweredon ] @@ -60,10 +60,9 @@ options: description: - Name of the VM to work with. - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found. - - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage. + - In case of multiple VMs with same name, use I(uuid) to uniquely specify VM to manage. - This parameter is case sensitive. type: str - required: yes aliases: [ name_label ] name_desc: description: @@ -79,7 +78,7 @@ options: description: - Name of a template, an existing VM (must be shut down) or a snapshot that should be used to create VM. - Templates/VMs/snapshots on XenServer do not necessarily have unique names. The module will fail if multiple templates with same name are found. - - In case of multiple templates/VMs/snapshots with same name, use C(template_uuid) to uniquely specify source template. + - In case of multiple templates/VMs/snapshots with same name, use I(template_uuid) to uniquely specify source template. - If VM already exists, this setting will be ignored. - This parameter is case sensitive. type: str @@ -104,56 +103,138 @@ options: hardware: description: - Manage VM's hardware parameters. VM needs to be shut down to reconfigure these parameters. - - 'Valid parameters are:' - - ' - C(num_cpus) (integer): Number of CPUs.' - - ' - C(num_cpu_cores_per_socket) (integer): Number of Cores Per Socket. C(num_cpus) has to be a multiple of C(num_cpu_cores_per_socket).' - - ' - C(memory_mb) (integer): Amount of memory in MB.' type: dict + suboptions: + num_cpus: + description: + - Number of CPUs. + type: int + num_cpu_cores_per_socket: + description: + - Number of Cores Per Socket. I(num_cpus) has to be a multiple of I(num_cpu_cores_per_socket). + type: int + memory_mb: + description: + - Amount of memory in MB. + type: int disks: description: - A list of disks to add to VM. - All parameters are case sensitive. - Removing or detaching existing disks of VM is not supported. - - 'Required parameters per entry:' - - ' - C(size_[tb,gb,mb,kb,b]) (integer): Disk storage size in specified unit. VM needs to be shut down to reconfigure this parameter.' - - 'Optional parameters per entry:' - - ' - C(name) (string): Disk name. You can also use C(name_label) as an alias.' - - ' - C(name_desc) (string): Disk description.' - - ' - C(sr) (string): Storage Repository to create disk on. If not specified, will use default SR. Cannot be used for moving disk to other SR.' - - ' - C(sr_uuid) (string): UUID of a SR to create disk on. Use if SR name is not unique.' + - New disks are required to have either a I(size) or one of I(size_[tb,gb,mb,kb,b]) parameters specified. + - VM needs to be shut down to reconfigure disk size. type: list elements: dict aliases: [ disk ] + suboptions: + size: + description: + - 'Disk size with unit. Unit must be: C(b), C(kb), C(mb), C(gb), C(tb). VM needs to be shut down to reconfigure this parameter.' + - If no unit is specified, size is assumed to be in bytes. + type: str + size_b: + description: + - Disk size in bytes. + type: str + size_kb: + description: + - Disk size in kilobytes. + type: str + size_mb: + description: + - Disk size in megabytes. + type: str + size_gb: + description: + - Disk size in gigabytes. + type: str + size_tb: + description: + - Disk size in terabytes. + type: str + name: + description: + - Disk name. + type: str + aliases: [ name_label ] + name_desc: + description: + - Disk description. + type: str + sr: + description: + - Storage Repository to create disk on. If not specified, will use default SR. Cannot be used for moving disk to other SR. + type: str + sr_uuid: + description: + - UUID of a SR to create disk on. Use if SR name is not unique. + type: str cdrom: description: - A CD-ROM configuration for the VM. - All parameters are case sensitive. - - 'Valid parameters are:' - - ' - C(type) (string): The type of CD-ROM, valid options are C(none) or C(iso). With C(none) the CD-ROM device will be present but empty.' - - ' - C(iso_name) (string): The file name of an ISO image from one of the XenServer ISO Libraries (implies C(type: iso)). - Required if C(type) is set to C(iso).' type: dict + suboptions: + type: + description: + - The type of CD-ROM. With C(none) the CD-ROM device will be present but empty. + type: str + choices: [ none, iso ] + iso_name: + description: + - 'The file name of an ISO image from one of the XenServer ISO Libraries (implies I(type): C(iso)).' + - Required if I(type) is set to C(iso). + type: str networks: description: - A list of networks (in the order of the NICs). - All parameters are case sensitive. - - 'Required parameters per entry:' - - ' - C(name) (string): Name of a XenServer network to attach the network interface to. You can also use C(name_label) as an alias.' - - 'Optional parameters per entry (used for VM hardware):' - - ' - C(mac) (string): Customize MAC address of the interface.' - - 'Optional parameters per entry (used for OS customization):' - - ' - C(type) (string): Type of IPv4 assignment, valid options are C(none), C(dhcp) or C(static). Value C(none) means whatever is default for OS. - On some operating systems it could be DHCP configured (e.g. Windows) or unconfigured interface (e.g. Linux).' - - ' - C(ip) (string): Static IPv4 address (implies C(type: static)). Can include prefix in format / instead of using C(netmask).' - - ' - C(netmask) (string): Static IPv4 netmask required for C(ip) if prefix is not specified.' - - ' - C(gateway) (string): Static IPv4 gateway.' - - ' - C(type6) (string): Type of IPv6 assignment, valid options are C(none), C(dhcp) or C(static). Value C(none) means whatever is default for OS. - On some operating systems it could be DHCP configured (e.g. Windows) or unconfigured interface (e.g. Linux).' - - ' - C(ip6) (string): Static IPv6 address (implies C(type6: static)) with prefix in format /.' - - ' - C(gateway6) (string): Static IPv6 gateway.' + - Name is required for new NICs. Other parameters are optional in all cases. type: list elements: dict aliases: [ network ] + suboptions: + name: + description: + - Name of a XenServer network to attach the network interface to. + type: str + aliases: [ name_label ] + mac: + description: + - Customize MAC address of the interface. + type: str + type: + description: + - Type of IPv4 assignment. Value C(none) means whatever is default for OS. + - On some operating systems it could be DHCP configured (e.g. Windows) or unconfigured interface (e.g. Linux). + type: str + choices: [ none, dhcp, static ] + ip: + description: + - 'Static IPv4 address (implies I(type): C(static)). Can include prefix in format C(/) instead of using C(netmask).' + type: str + netmask: + description: + - Static IPv4 netmask required for I(ip) if prefix is not specified. + type: str + gateway: + description: + - Static IPv4 gateway. + type: str + type6: + description: + - Type of IPv6 assignment. Value C(none) means whatever is default for OS. + type: str + choices: [ none, dhcp, static ] + ip6: + description: + - 'Static IPv6 address (implies I(type6): C(static)) with prefix in format C(/).' + type: str + gateway6: + description: + - Static IPv6 gateway. + type: str home_server: description: - Name of a XenServer host that will be a Home Server for the VM. @@ -163,18 +244,29 @@ options: description: - Define a list of custom VM params to set on VM. - Useful for advanced users familiar with managing VM params trough xe CLI. - - A custom value object takes two fields C(key) and C(value) (see example below). + - A custom value object takes two fields I(key) and I(value) (see example below). type: list elements: dict + suboptions: + key: + description: + - VM param name. + type: str + required: yes + value: + description: + - VM param value. + type: raw + required: yes wait_for_ip_address: description: - - Wait until XenServer detects an IP address for the VM. If C(state) is set to C(absent), this parameter is ignored. + - Wait until XenServer detects an IP address for the VM. If I(state) is set to C(absent), this parameter is ignored. - This requires XenServer Tools to be preinstalled on the VM to work properly. type: bool default: no state_change_timeout: description: - - 'By default, module will wait indefinitely for VM to accquire an IP address if C(wait_for_ip_address: yes).' + - 'By default, module will wait indefinitely for VM to accquire an IP address if I(wait_for_ip_address): C(yes).' - If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change. - In case of timeout, module will generate an error message. type: int @@ -441,11 +533,12 @@ except ImportError: from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.network import is_mac from ansible.module_utils import six -from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XAPI, XenServerObject, get_object_ref, - gather_vm_params, gather_vm_facts, set_vm_power_state, - wait_for_vm_ip_address, is_valid_ip_addr, is_valid_ip_netmask, - is_valid_ip_prefix, ip_prefix_to_netmask, ip_netmask_to_prefix, - is_valid_ip6_addr, is_valid_ip6_prefix) +from ansible_collections.community.general.plugins.module_utils.xenserver import ( + xenserver_common_argument_spec, XenServerObject, get_object_ref, + gather_vm_params, gather_vm_facts, set_vm_power_state, + wait_for_vm_ip_address, is_valid_ip_addr, is_valid_ip_netmask, + is_valid_ip_prefix, ip_prefix_to_netmask, ip_netmask_to_prefix, + is_valid_ip6_addr, is_valid_ip6_prefix) class XenServerVM(XenServerObject): diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 582f2dc2fb..c098f7dbfe 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -34,11 +34,6 @@ plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:parameter-type plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:undocumented-parameter plugins/modules/cloud/univention/udm_share.py validate-modules:parameter-list-no-elements plugins/modules/cloud/univention/udm_user.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:doc-choices-do-not-match-spec # missing docs on suboptions -plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:doc-required-mismatch # missing docs on suboptions -plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:missing-suboption-docs # missing docs on suboptions -plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:parameter-type-not-in-doc # missing docs on suboptions -plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:undocumented-parameter # missing docs on suboptions plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-state-invalid-choice diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 51fe901ba5..52512a444c 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -33,11 +33,6 @@ plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:parameter-type plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:undocumented-parameter plugins/modules/cloud/univention/udm_share.py validate-modules:parameter-list-no-elements plugins/modules/cloud/univention/udm_user.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:doc-choices-do-not-match-spec -plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:doc-required-mismatch -plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:missing-suboption-docs -plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-state-invalid-choice diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt index 51fe901ba5..52512a444c 100644 --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -33,11 +33,6 @@ plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:parameter-type plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:undocumented-parameter plugins/modules/cloud/univention/udm_share.py validate-modules:parameter-list-no-elements plugins/modules/cloud/univention/udm_user.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:doc-choices-do-not-match-spec -plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:doc-required-mismatch -plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:missing-suboption-docs -plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-state-invalid-choice diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index c10f7be552..748e340741 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -26,10 +26,6 @@ plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:undo plugins/modules/cloud/univention/udm_dns_record.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:undocumented-parameter -plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:doc-choices-do-not-match-spec -plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:missing-suboption-docs -plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:doc-missing-type From 085c43b76b5712e8102278a82bf51a0f0d41627f Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Sat, 17 Apr 2021 12:21:03 +0430 Subject: [PATCH 0187/3093] Remove unnecessary required=False s in linode_v4 module (#2251) --- plugins/modules/cloud/linode/linode_v4.py | 25 ++++++++--------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/plugins/modules/cloud/linode/linode_v4.py b/plugins/modules/cloud/linode/linode_v4.py index aec7704c25..34d33871bd 100644 --- a/plugins/modules/cloud/linode/linode_v4.py +++ b/plugins/modules/cloud/linode/linode_v4.py @@ -28,7 +28,6 @@ options: - The region of the instance. This is a required parameter only when creating Linode instances. See U(https://www.linode.com/docs/api/regions/). - required: false type: str image: description: @@ -36,14 +35,12 @@ options: creating Linode instances. See U(https://www.linode.com/docs/api/images/). type: str - required: false type: description: - The type of the instance. This is a required parameter only when creating Linode instances. See U(https://www.linode.com/docs/api/linode-types/). type: str - required: false label: description: - The instance label. This label is used as the main determiner for @@ -56,12 +53,10 @@ options: group labelling is deprecated but still supported. The encouraged method for marking instances is to use tags. type: str - required: false tags: description: - The tags that the instance should be marked under. See U(https://www.linode.com/docs/api/tags/). - required: false type: list elements: str root_pass: @@ -69,12 +64,10 @@ options: - The password for the root user. If not specified, one will be generated. This generated password will be available in the task success JSON. - required: false type: str authorized_keys: description: - A list of SSH public key parts to deploy for the root user. - required: false type: list elements: str state: @@ -242,15 +235,15 @@ def initialise_module(): no_log=True, fallback=(env_fallback, ['LINODE_ACCESS_TOKEN']), ), - authorized_keys=dict(type='list', elements='str', required=False, no_log=False), - group=dict(type='str', required=False), - image=dict(type='str', required=False), - region=dict(type='str', required=False), - root_pass=dict(type='str', required=False, no_log=True), - tags=dict(type='list', elements='str', required=False), - type=dict(type='str', required=False), - stackscript_id=dict(type='int', required=False), - stackscript_data=dict(type='dict', required=False), + authorized_keys=dict(type='list', elements='str', no_log=False), + group=dict(type='str'), + image=dict(type='str'), + region=dict(type='str'), + root_pass=dict(type='str', no_log=True), + tags=dict(type='list', elements='str'), + type=dict(type='str'), + stackscript_id=dict(type='int'), + stackscript_data=dict(type='dict'), ), supports_check_mode=False, required_one_of=( From 123b5a9a3cbf992420d4836aeac3b612af4d51d9 Mon Sep 17 00:00:00 2001 From: Jeffrey van Pelt Date: Sat, 17 Apr 2021 10:08:41 +0200 Subject: [PATCH 0188/3093] Proxmox inv fix agent string parsing (#2245) * Added handling for commas in the agent field for agent configuration * Removed test statement * Added changelog fragment * Fixed spelling on fragment :-) --- .../fragments/2245-proxmox_fix_agent_string_handling.yml | 3 +++ plugins/inventory/proxmox.py | 4 +++- tests/unit/plugins/inventory/test_proxmox.py | 2 +- 3 files changed, 7 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/2245-proxmox_fix_agent_string_handling.yml diff --git a/changelogs/fragments/2245-proxmox_fix_agent_string_handling.yml b/changelogs/fragments/2245-proxmox_fix_agent_string_handling.yml new file mode 100644 index 0000000000..3eae94f4ea --- /dev/null +++ b/changelogs/fragments/2245-proxmox_fix_agent_string_handling.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - proxmox inventory - added handling of commas in KVM agent configuration string (https://github.com/ansible-collections/community.general/pull/2245). diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py index 036c3dc7bf..44b807f230 100644 --- a/plugins/inventory/proxmox.py +++ b/plugins/inventory/proxmox.py @@ -281,7 +281,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): parsed_value = [tag.strip() for tag in value.split(",")] self.inventory.set_variable(name, parsed_key, parsed_value) - if config == 'agent' and int(value): + # The first field in the agent string tells you whether the agent is enabled + # the rest of the comma separated string is extra config for the agent + if config == 'agent' and int(value.split(',')[0]): agent_iface_key = self.to_safe('%s%s' % (key, "_interfaces")) agent_iface_value = self._get_agent_network_interfaces(node, vmid, vmtype) if agent_iface_value: diff --git a/tests/unit/plugins/inventory/test_proxmox.py b/tests/unit/plugins/inventory/test_proxmox.py index ee6c0e2963..a68203b725 100644 --- a/tests/unit/plugins/inventory/test_proxmox.py +++ b/tests/unit/plugins/inventory/test_proxmox.py @@ -205,7 +205,7 @@ def get_json(url): "hotplug": "network,disk,usb", "scsi0": "local-lvm:vm-101-disk-0,size=8G", "net0": "virtio=ff:ff:ff:ff:ff:ff,bridge=vmbr0,firewall=1", - "agent": "1", + "agent": "1,fstrim_cloned_disks=1", "bios": "seabios", "ide0": "local-lvm:vm-101-cloudinit,media=cdrom,size=4M", "boot": "cdn", From f77aa51ab8853442a968d30881d5a9c9afcbde5d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gon=C3=A9ri=20Le=20Bouder?= Date: Sat, 17 Apr 2021 04:33:40 -0400 Subject: [PATCH 0189/3093] nmcli: do not set IP configuration bond slaves (#2223) The master interface holds the IP configuration. If we try to update the IP configuration of the slaves, NetworkManager raises an error. --- changelogs/fragments/2223_nmcli_no_IP_config_on_slave.yaml | 3 +++ plugins/modules/net_tools/nmcli.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2223_nmcli_no_IP_config_on_slave.yaml diff --git a/changelogs/fragments/2223_nmcli_no_IP_config_on_slave.yaml b/changelogs/fragments/2223_nmcli_no_IP_config_on_slave.yaml new file mode 100644 index 0000000000..4d98b62922 --- /dev/null +++ b/changelogs/fragments/2223_nmcli_no_IP_config_on_slave.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: +- "nmcli - do not set IP configuration on slave connection (https://github.com/ansible-collections/community.general/pull/2223)." diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index 2967996f3c..d5b329fe03 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -695,7 +695,7 @@ class Nmcli(object): } # IP address options. - if self.ip_conn_type: + if self.ip_conn_type and not self.master: options.update({ 'ipv4.addresses': self.ip4, 'ipv4.dhcp-client-id': self.dhcp_client_id, From 3a8206fe62bee5cce842041204ca25387050fefb Mon Sep 17 00:00:00 2001 From: quidame Date: Sat, 17 Apr 2021 18:17:53 +0200 Subject: [PATCH 0190/3093] java_keystore: add `certificate_path` and `private_key_path` options (#2230) * java_keystore: add `certificate_path` and `private_key_path` options * Update DOCUMENTATION and EXAMPLES accordingly. * Refactor integration tests to play the same tasks twice. * Add a changelog fragment (minor_changes). refactor DOCUMENTATION * Add useful info for better understanding of what options allow keystore regeneration on the fly, and what other options lead the module to fail, if their values change. * Fix indentation and tenses. * Add myself as author. * readability-related stuff + changelog fragment --- ..._keystore-1669-ssl-input-files-by-path.yml | 6 + plugins/modules/system/java_keystore.py | 209 +++++++++++------- .../targets/java_keystore/defaults/main.yml | 16 ++ .../targets/java_keystore/tasks/main.yml | 142 ++---------- .../targets/java_keystore/tasks/prepare.yml | 33 +++ .../targets/java_keystore/tasks/tests.yml | 123 +++++++++++ 6 files changed, 328 insertions(+), 201 deletions(-) create mode 100644 changelogs/fragments/2230-java_keystore-1669-ssl-input-files-by-path.yml create mode 100644 tests/integration/targets/java_keystore/defaults/main.yml create mode 100644 tests/integration/targets/java_keystore/tasks/prepare.yml create mode 100644 tests/integration/targets/java_keystore/tasks/tests.yml diff --git a/changelogs/fragments/2230-java_keystore-1669-ssl-input-files-by-path.yml b/changelogs/fragments/2230-java_keystore-1669-ssl-input-files-by-path.yml new file mode 100644 index 0000000000..0622e93c31 --- /dev/null +++ b/changelogs/fragments/2230-java_keystore-1669-ssl-input-files-by-path.yml @@ -0,0 +1,6 @@ +--- +minor_changes: + - "java_keystore - add options ``certificate_path`` and ``private_key_path``, + mutually exclusive with ``certificate`` and ``private_key`` respectively, and + targetting files on remote hosts rather than their contents on the controller. + (https://github.com/ansible-collections/community.general/issues/1669)." diff --git a/plugins/modules/system/java_keystore.py b/plugins/modules/system/java_keystore.py index 8143d1d4ef..2a34175552 100644 --- a/plugins/modules/system/java_keystore.py +++ b/plugins/modules/system/java_keystore.py @@ -1,7 +1,8 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# (c) 2016, Guillaume Grossetie +# Copyright: (c) 2016, Guillaume Grossetie +# Copyright: (c) 2021, quidame # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) @@ -11,68 +12,98 @@ __metaclass__ = type DOCUMENTATION = ''' --- module: java_keystore -short_description: Create or delete a Java keystore in JKS format. +short_description: Create a Java keystore in JKS format description: - - Create or delete a Java keystore in JKS format for a given certificate. + - Bundle a x509 certificate and its private key into a Java Keystore in JKS format. options: - name: - type: str - description: - - Name of the certificate. - required: true - certificate: - type: str - description: - - Certificate that should be used to create the key store. - required: true - private_key: - type: str - description: - - Private key that should be used to create the key store. - required: true - private_key_passphrase: - description: - - Pass phrase for reading the private key, if required. - type: str - required: false - version_added: '0.2.0' - password: - type: str - description: - - Password that should be used to secure the key store. - required: true - dest: - type: path - description: - - Absolute path where the jks should be generated. - required: true - owner: - description: - - Name of the user that should own jks file. - required: false - group: - description: - - Name of the group that should own jks file. - required: false - mode: - description: - - Mode the file should be. - required: false - force: - description: - - Key store will be created even if it already exists. - required: false - type: bool - default: 'no' -requirements: [openssl, keytool] -author: Guillaume Grossetie (@Mogztter) + name: + description: + - Name of the certificate in the keystore. + - If the provided name does not exist in the keystore, the module fails. + This behavior will change in a next release. + type: str + required: true + certificate: + description: + - Content of the certificate used to create the keystore. + - If the fingerprint of the provided certificate does not match the + fingerprint of the certificate bundled in the keystore, the keystore + is regenerated with the provided certificate. + - Exactly one of I(certificate) or I(certificate_path) is required. + type: str + certificate_path: + description: + - Location of the certificate used to create the keystore. + - If the fingerprint of the provided certificate does not match the + fingerprint of the certificate bundled in the keystore, the keystore + is regenerated with the provided certificate. + - Exactly one of I(certificate) or I(certificate_path) is required. + type: path + version_added: '3.0.0' + private_key: + description: + - Content of the private key used to create the keystore. + - Exactly one of I(private_key) or I(private_key_path) is required. + type: str + private_key_path: + description: + - Location of the private key used to create the keystore. + - Exactly one of I(private_key) or I(private_key_path) is required. + type: path + version_added: '3.0.0' + private_key_passphrase: + description: + - Passphrase used to read the private key, if required. + type: str + version_added: '0.2.0' + password: + description: + - Password that should be used to secure the keystore. + - If the provided password fails to unlock the keystore, the module + fails. This behavior will change in a next release. + type: str + required: true + dest: + description: + - Absolute path of the generated keystore. + type: path + required: true + force: + description: + - Keystore is created even if it already exists. + type: bool + default: 'no' + owner: + description: + - Name of the user that should own jks file. + required: false + group: + description: + - Name of the group that should own jks file. + required: false + mode: + description: + - Mode the file should be. + required: false +requirements: + - openssl in PATH + - keytool in PATH +author: + - Guillaume Grossetie (@Mogztter) + - quidame (@quidame) extends_documentation_fragment: -- files - + - files +seealso: + - module: community.general.java_cert +notes: + - I(certificate) and I(private_key) require that their contents are available + on the controller (either inline in a playbook, or with the C(file) lookup), + while I(certificate_path) and I(private_key_path) require that the files are + available on the target host. ''' EXAMPLES = ''' -- name: Create a key store for the given certificate (inline) +- name: Create a keystore for the given certificate/private key pair (inline) community.general.java_keystore: name: example certificate: | @@ -88,11 +119,19 @@ EXAMPLES = ''' password: changeit dest: /etc/security/keystore.jks -- name: Create a key store for the given certificate (lookup) +- name: Create a keystore for the given certificate/private key pair (with files on controller) community.general.java_keystore: name: example - certificate: "{{lookup('file', '/path/to/certificate.crt') }}" - private_key: "{{lookup('file', '/path/to/private.key') }}" + certificate: "{{ lookup('file', '/path/to/certificate.crt') }}" + private_key: "{{ lookup('file', '/path/to/private.key') }}" + password: changeit + dest: /etc/security/keystore.jks + +- name: Create a keystore for the given certificate/private key pair (with files on target host) + community.general.java_keystore: + name: snakeoil + certificate_path: /etc/ssl/certs/ssl-cert-snakeoil.pem + private_key_path: /etc/ssl/private/ssl-cert-snakeoil.key password: changeit dest: /etc/security/keystore.jks ''' @@ -198,22 +237,32 @@ def create_tmp_private_key(module): def cert_changed(module, openssl_bin, keytool_bin, keystore_path, keystore_pass, alias): - certificate_path = create_tmp_certificate(module) + certificate_path = module.params['certificate_path'] + if certificate_path is None: + certificate_path = create_tmp_certificate(module) try: current_certificate_fingerprint = read_certificate_fingerprint(module, openssl_bin, certificate_path) stored_certificate_fingerprint = read_stored_certificate_fingerprint(module, keytool_bin, alias, keystore_path, keystore_pass) return current_certificate_fingerprint != stored_certificate_fingerprint finally: - os.remove(certificate_path) + if module.params['certificate_path'] is None: + os.remove(certificate_path) def create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password, keypass): if module.check_mode: return module.exit_json(changed=True) - certificate_path = create_tmp_certificate(module) - private_key_path = create_tmp_private_key(module) + certificate_path = module.params['certificate_path'] + if certificate_path is None: + certificate_path = create_tmp_certificate(module) + + private_key_path = module.params['private_key_path'] + if private_key_path is None: + private_key_path = create_tmp_private_key(module) + keystore_p12_path = create_path() + try: if os.path.exists(keystore_path): os.remove(keystore_path) @@ -257,8 +306,10 @@ def create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password, cmd=import_keystore_cmd, rc=rc) finally: - os.remove(certificate_path) - os.remove(private_key_path) + if module.params['certificate_path'] is None: + os.remove(certificate_path) + if module.params['private_key_path'] is None: + os.remove(private_key_path) os.remove(keystore_p12_path) @@ -301,23 +352,33 @@ class ArgumentSpec(object): self.supports_check_mode = True self.add_file_common_args = True argument_spec = dict( - name=dict(required=True), - certificate=dict(required=True, no_log=True), - private_key=dict(required=True, no_log=True), - password=dict(required=True, no_log=True), - dest=dict(required=True, type='path'), - force=dict(required=False, default=False, type='bool'), - private_key_passphrase=dict(required=False, no_log=True, type='str') + name=dict(type='str', required=True), + dest=dict(type='path', required=True), + certificate=dict(type='str', no_log=True), + certificate_path=dict(type='path'), + private_key=dict(type='str', no_log=True), + private_key_path=dict(type='path', no_log=False), + private_key_passphrase=dict(type='str', no_log=True), + password=dict(type='str', required=True, no_log=True), + force=dict(type='bool', default=False), + ) + choose_between = ( + ['certificate', 'certificate_path'], + ['private_key', 'private_key_path'], ) self.argument_spec = argument_spec + self.required_one_of = choose_between + self.mutually_exclusive = choose_between def main(): spec = ArgumentSpec() module = AnsibleModule( argument_spec=spec.argument_spec, + required_one_of=spec.required_one_of, + mutually_exclusive=spec.mutually_exclusive, + supports_check_mode=spec.supports_check_mode, add_file_common_args=spec.add_file_common_args, - supports_check_mode=spec.supports_check_mode ) module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') process_jks(module) diff --git a/tests/integration/targets/java_keystore/defaults/main.yml b/tests/integration/targets/java_keystore/defaults/main.yml new file mode 100644 index 0000000000..1fce6b4601 --- /dev/null +++ b/tests/integration/targets/java_keystore/defaults/main.yml @@ -0,0 +1,16 @@ +--- +java_keystore_certs: + - name: cert + commonName: example.com + - name: cert-pw + passphrase: hunter2 + commonName: example.com + +java_keystore_new_certs: + - name: cert2 + keyname: cert + commonName: example.org + - name: cert2-pw + keyname: cert-pw + passphrase: hunter2 + commonName: example.org diff --git a/tests/integration/targets/java_keystore/tasks/main.yml b/tests/integration/targets/java_keystore/tasks/main.yml index bba7a4facd..358222aea8 100644 --- a/tests/integration/targets/java_keystore/tasks/main.yml +++ b/tests/integration/targets/java_keystore/tasks/main.yml @@ -4,134 +4,22 @@ # and should not be used as examples of how to write Ansible roles # #################################################################### - when: has_java_keytool + connection: local block: - - name: Create private keys - community.crypto.openssl_privatekey: - path: "{{ output_dir ~ '/' ~ (item.keyname | default(item.name)) ~ '.key' }}" - size: 2048 # this should work everywhere - # The following is more efficient, but might not work everywhere: - # type: ECC - # curve: secp384r1 - cipher: "{{ 'auto' if item.passphrase is defined else omit }}" - passphrase: "{{ item.passphrase | default(omit) }}" - loop: - - name: cert - - name: cert-pw - passphrase: hunter2 + - name: Include tasks to create ssl materials on the controller + include_tasks: prepare.yml - - name: Create CSRs - community.crypto.openssl_csr: - path: "{{ output_dir ~ '/' ~ item.name ~ '.csr' }}" - privatekey_path: "{{ output_dir ~ '/' ~ (item.keyname | default(item.name)) ~ '.key' }}" - privatekey_passphrase: "{{ item.passphrase | default(omit) }}" - commonName: "{{ item.commonName }}" - loop: - - name: cert - commonName: example.com - - name: cert-pw - passphrase: hunter2 - commonName: example.com - - name: cert2 - keyname: cert - commonName: example.org - - name: cert2-pw - keyname: cert-pw - passphrase: hunter2 - commonName: example.org +- when: has_java_keytool + block: + - name: Include tasks to play with 'certificate' and 'private_key' contents + include_tasks: tests.yml + vars: + remote_cert: false - - name: Create certificates - community.crypto.x509_certificate: - path: "{{ output_dir ~ '/' ~ item.name ~ '.pem' }}" - csr_path: "{{ output_dir ~ '/' ~ item.name ~ '.csr' }}" - privatekey_path: "{{ output_dir ~ '/' ~ (item.keyname | default(item.name)) ~ '.key' }}" - privatekey_passphrase: "{{ item.passphrase | default(omit) }}" - provider: selfsigned - loop: - - name: cert - commonName: example.com - - name: cert-pw - passphrase: hunter2 - commonName: example.com - - name: cert2 - keyname: cert - commonName: example.org - - name: cert2-pw - keyname: cert-pw - passphrase: hunter2 - commonName: example.org + - name: Include tasks to create ssl materials on the remote host + include_tasks: prepare.yml - - name: Create a Java key store for the given certificates (check mode) - community.general.java_keystore: &create_key_store_data - name: example - certificate: "{{ lookup('file', output_dir ~ '/' ~ item.name ~ '.pem') }}" - private_key: "{{ lookup('file', output_dir ~ '/' ~ (item.keyname | default(item.name)) ~ '.key') }}" - private_key_passphrase: "{{ item.passphrase | default(omit) }}" - password: changeit - dest: "{{ output_dir ~ '/' ~ (item.keyname | default(item.name)) ~ '.jks' }}" - loop: &create_key_store_loop - - name: cert - - name: cert-pw - passphrase: hunter2 - check_mode: yes - register: result_check - - - name: Create a Java key store for the given certificates - community.general.java_keystore: *create_key_store_data - loop: *create_key_store_loop - register: result - - - name: Create a Java key store for the given certificates (idempotency, check mode) - community.general.java_keystore: *create_key_store_data - loop: *create_key_store_loop - check_mode: yes - register: result_idem_check - - - name: Create a Java key store for the given certificates (idempotency) - community.general.java_keystore: *create_key_store_data - loop: *create_key_store_loop - register: result_idem - - - name: Create a Java key store for the given certificates (certificate changed, check mode) - community.general.java_keystore: *create_key_store_data - loop: &create_key_store_loop_new_certs - - name: cert2 - keyname: cert - - name: cert2-pw - keyname: cert-pw - passphrase: hunter2 - check_mode: yes - register: result_change_check - - - name: Create a Java key store for the given certificates (certificate changed) - community.general.java_keystore: *create_key_store_data - loop: *create_key_store_loop_new_certs - register: result_change - - - name: Create a Java key store for the given certificates (password changed, check mode) - community.general.java_keystore: - <<: *create_key_store_data - password: hunter2 - loop: *create_key_store_loop_new_certs - check_mode: yes - register: result_pw_change_check - when: false # FIXME: module currently crashes - - - name: Create a Java key store for the given certificates (password changed) - community.general.java_keystore: - <<: *create_key_store_data - password: hunter2 - loop: *create_key_store_loop_new_certs - register: result_pw_change - when: false # FIXME: module currently crashes - - - name: Validate results - assert: - that: - - result is changed - - result_check is changed - - result_idem is not changed - - result_idem_check is not changed - - result_change is changed - - result_change_check is changed - # - result_pw_change is changed # FIXME: module currently crashes - # - result_pw_change_check is changed # FIXME: module currently crashes + - name: Include tasks to play with 'certificate_path' and 'private_key_path' locations + include_tasks: tests.yml + vars: + remote_cert: true diff --git a/tests/integration/targets/java_keystore/tasks/prepare.yml b/tests/integration/targets/java_keystore/tasks/prepare.yml new file mode 100644 index 0000000000..f8811c03ed --- /dev/null +++ b/tests/integration/targets/java_keystore/tasks/prepare.yml @@ -0,0 +1,33 @@ +--- +- name: Create test directory + ansible.builtin.file: + path: "{{ output_dir }}" + state: directory + +- name: Create private keys + community.crypto.openssl_privatekey: + path: "{{ output_dir ~ '/' ~ (item.keyname | default(item.name)) ~ '.key' }}" + size: 2048 # this should work everywhere + # The following is more efficient, but might not work everywhere: + # type: ECC + # curve: secp384r1 + cipher: "{{ 'auto' if item.passphrase is defined else omit }}" + passphrase: "{{ item.passphrase | default(omit) }}" + loop: "{{ java_keystore_certs }}" + +- name: Create CSRs + community.crypto.openssl_csr: + path: "{{ output_dir ~ '/' ~ item.name ~ '.csr' }}" + privatekey_path: "{{ output_dir ~ '/' ~ (item.keyname | default(item.name)) ~ '.key' }}" + privatekey_passphrase: "{{ item.passphrase | default(omit) }}" + commonName: "{{ item.commonName }}" + loop: "{{ java_keystore_certs + java_keystore_new_certs }}" + +- name: Create certificates + community.crypto.x509_certificate: + path: "{{ output_dir ~ '/' ~ item.name ~ '.pem' }}" + csr_path: "{{ output_dir ~ '/' ~ item.name ~ '.csr' }}" + privatekey_path: "{{ output_dir ~ '/' ~ (item.keyname | default(item.name)) ~ '.key' }}" + privatekey_passphrase: "{{ item.passphrase | default(omit) }}" + provider: selfsigned + loop: "{{ java_keystore_certs + java_keystore_new_certs }}" diff --git a/tests/integration/targets/java_keystore/tasks/tests.yml b/tests/integration/targets/java_keystore/tasks/tests.yml new file mode 100644 index 0000000000..4511af033d --- /dev/null +++ b/tests/integration/targets/java_keystore/tasks/tests.yml @@ -0,0 +1,123 @@ +--- +- name: Create test directory + ansible.builtin.file: + path: "{{ output_dir }}" + state: directory + +- name: Ensure the Java keystore does not exist (cleanup between tests) + ansible.builtin.file: + path: "{{ output_dir ~ '/' ~ item.name ~ '.jks' }}" + state: absent + loop: "{{ java_keystore_certs }}" + loop_control: + label: "{{ output_dir ~ '/' ~ item.name ~ '.jks' }}" + + +- name: Create a Java keystore for the given ({{ 'remote' if remote_cert else 'local' }}) certificates (check mode) + community.general.java_keystore: &java_keystore_params + name: example + dest: "{{ output_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.jks' }}" + certificate: "{{ omit if remote_cert else lookup('file', output_dir ~ '/' ~ item.name ~ '.pem') }}" + private_key: "{{ omit if remote_cert else lookup('file', output_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.key') }}" + certificate_path: "{{ omit if not remote_cert else output_dir ~ '/' ~ item.name ~ '.pem' }}" + private_key_path: "{{ omit if not remote_cert else output_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.key' }}" + private_key_passphrase: "{{ item.passphrase | d(omit) }}" + password: changeit + loop: "{{ java_keystore_certs }}" + check_mode: yes + register: result_check + +- name: Create a Java keystore for the given certificates + community.general.java_keystore: *java_keystore_params + loop: "{{ java_keystore_certs }}" + register: result + + +- name: Create a Java keystore for the given certificates (idempotency, check mode) + community.general.java_keystore: *java_keystore_params + loop: "{{ java_keystore_certs }}" + check_mode: yes + register: result_idem_check + +- name: Create a Java keystore for the given certificates (idempotency) + community.general.java_keystore: *java_keystore_params + loop: "{{ java_keystore_certs }}" + register: result_idem + + +- name: Create a Java keystore for the given certificates (certificate changed, check mode) + community.general.java_keystore: *java_keystore_params + loop: "{{ java_keystore_new_certs }}" + check_mode: yes + register: result_change_check + +- name: Create a Java keystore for the given certificates (certificate changed) + community.general.java_keystore: *java_keystore_params + loop: "{{ java_keystore_new_certs }}" + register: result_change + + +- name: Create a Java keystore for the given certificates (alias changed, check mode) + community.general.java_keystore: + <<: *java_keystore_params + name: foobar + loop: "{{ java_keystore_new_certs }}" + check_mode: yes + register: result_alias_change_check + when: false # FIXME: module currently crashes + +- name: Create a Java keystore for the given certificates (alias changed) + community.general.java_keystore: + <<: *java_keystore_params + name: foobar + loop: "{{ java_keystore_new_certs }}" + register: result_alias_change + when: false # FIXME: module currently crashes + + +- name: Create a Java keystore for the given certificates (password changed, check mode) + community.general.java_keystore: + <<: *java_keystore_params + name: foobar + password: hunter2 + loop: "{{ java_keystore_new_certs }}" + check_mode: yes + register: result_pw_change_check + when: false # FIXME: module currently crashes + +- name: Create a Java keystore for the given certificates (password changed) + community.general.java_keystore: + <<: *java_keystore_params + name: foobar + password: hunter2 + loop: "{{ java_keystore_new_certs }}" + register: result_pw_change + when: false # FIXME: module currently crashes + +- name: Check that the remote certificates have not been removed + ansible.builtin.file: + path: "{{ output_dir ~ '/' ~ item.name ~ '.pem' }}" + state: file + loop: "{{ java_keystore_certs + java_keystore_new_certs }}" + when: remote_cert + +- name: Check that the remote private keys have not been removed + ansible.builtin.file: + path: "{{ output_dir ~ '/' ~ item.name ~ '.key' }}" + state: file + loop: "{{ java_keystore_certs }}" + when: remote_cert + +- name: Validate results + assert: + that: + - result is changed + - result_check is changed + - result_idem is not changed + - result_idem_check is not changed + - result_change is changed + - result_change_check is changed + # - result_alias_change is changed # FIXME: module currently crashes + # - result_alias_change_check is changed # FIXME: module currently crashes + # - result_pw_change is changed # FIXME: module currently crashes + # - result_pw_change_check is changed # FIXME: module currently crashes From d09bc2525b90ff847a5306a7a653c81fa27a9f0b Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 17 Apr 2021 19:11:08 +0200 Subject: [PATCH 0191/3093] Fix problems with pip2.6 included in CentOS 6. (#2256) --- tests/utils/constraints.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/utils/constraints.txt b/tests/utils/constraints.txt index c5db3156ab..de18826818 100644 --- a/tests/utils/constraints.txt +++ b/tests/utils/constraints.txt @@ -1,8 +1,8 @@ coverage >= 4.2, < 5.0.0, != 4.3.2 ; python_version <= '3.7' # features in 4.2+ required, avoid known bug in 4.3.2 on python 2.6, coverage 5.0+ incompatible coverage >= 4.5.4, < 5.0.0 ; python_version > '3.7' # coverage had a bug in < 4.5.4 that would cause unit tests to hang in Python 3.8, coverage 5.0+ incompatible cryptography < 2.2 ; python_version < '2.7' # cryptography 2.2 drops support for python 2.6 -cryptography >= 3.0, < 3.4 ; python_version < '3.6' # cryptography 3.4 drops support for python 2.7 -cryptography >= 3.3, < 3.4 # FIXME: the upper limit is needed for RHEL8.2, CentOS 8, Ubuntu 18.04, and OpenSuSE 15 +cryptography >= 3.0, < 3.4 ; python_version < '3.6' and python_version >= '2.7' # cryptography 3.4 drops support for python 2.7 +cryptography >= 3.3, < 3.4 ; python_version >= '2.7' # FIXME: the upper limit is needed for RHEL8.2, CentOS 8, Ubuntu 18.04, and OpenSuSE 15 deepdiff < 4.0.0 ; python_version < '3' # deepdiff 4.0.0 and later require python 3 jinja2 < 2.11 ; python_version < '2.7' # jinja2 2.11 and later require python 2.7 or later urllib3 < 1.24 ; python_version < '2.7' # urllib3 1.24 and later require python 2.7 or later From 118d903e7db7f80dc64c80e08e4c45113228f492 Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Sat, 17 Apr 2021 14:00:03 -0400 Subject: [PATCH 0192/3093] New filter plugins: hashids_encode and hashids_decode (#2244) * New filters hashids_encode and hashids_decode * Adding changelog * Correcting whitespace issue in vars file * Attempt to fix integration test failures * Correcting copyright * Addressing initial review comments * Updating decoded sequence return from tuple to list * Correcting capitilization and spelling --- changelogs/fragments/2244-hashids-filters.yml | 6 ++ plugins/filter/hashids.py | 97 +++++++++++++++++++ .../targets/filter_hashids/aliases | 2 + .../targets/filter_hashids/runme.sh | 13 +++ .../targets/filter_hashids/runme.yml | 3 + .../targets/filter_hashids/tasks/main.yml | 58 +++++++++++ .../targets/filter_hashids/vars/main.yml | 4 + 7 files changed, 183 insertions(+) create mode 100644 changelogs/fragments/2244-hashids-filters.yml create mode 100644 plugins/filter/hashids.py create mode 100644 tests/integration/targets/filter_hashids/aliases create mode 100755 tests/integration/targets/filter_hashids/runme.sh create mode 100644 tests/integration/targets/filter_hashids/runme.yml create mode 100644 tests/integration/targets/filter_hashids/tasks/main.yml create mode 100644 tests/integration/targets/filter_hashids/vars/main.yml diff --git a/changelogs/fragments/2244-hashids-filters.yml b/changelogs/fragments/2244-hashids-filters.yml new file mode 100644 index 0000000000..568119e890 --- /dev/null +++ b/changelogs/fragments/2244-hashids-filters.yml @@ -0,0 +1,6 @@ +--- +add plugin.filter: + - name: hashids_encode + description: Encodes YouTube-like hashes from a sequence of integers + - name: hashids_decode + description: Decodes a sequence of numbers from a YouTube-like hash diff --git a/plugins/filter/hashids.py b/plugins/filter/hashids.py new file mode 100644 index 0000000000..c4735afeae --- /dev/null +++ b/plugins/filter/hashids.py @@ -0,0 +1,97 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Andrew Pantuso (@ajpantuso) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.errors import ( + AnsibleError, + AnsibleFilterError, + AnsibleFilterTypeError, +) + +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.common.collections import is_sequence + +try: + from hashids import Hashids + HAS_HASHIDS = True +except ImportError: + HAS_HASHIDS = False + + +def initialize_hashids(**kwargs): + if not HAS_HASHIDS: + raise AnsibleError("The hashids library must be installed in order to use this plugin") + + params = dict((k, v) for k, v in kwargs.items() if v) + + try: + return Hashids(**params) + except TypeError as e: + raise AnsibleFilterError( + "The provided parameters %s are invalid: %s" % ( + ', '.join(["%s=%s" % (k, v) for k, v in params.items()]), + to_native(e) + ) + ) + + +def hashids_encode(nums, salt=None, alphabet=None, min_length=None): + """Generates a YouTube-like hash from a sequence of ints + + :nums: Sequence of one or more ints to hash + :salt: String to use as salt when hashing + :alphabet: String of 16 or more unique characters to produce a hash + :min_length: Minimum length of hash produced + """ + + hashids = initialize_hashids( + salt=salt, + alphabet=alphabet, + min_length=min_length + ) + + # Handles the case where a single int is not encapsulated in a list or tuple. + # User convenience seems preferable to strict typing in this case + # Also avoids obfuscated error messages related to single invalid inputs + if not is_sequence(nums): + nums = [nums] + + try: + hashid = hashids.encode(*nums) + except TypeError as e: + raise AnsibleFilterTypeError( + "Data to encode must by a tuple or list of ints: %s" % to_native(e) + ) + + return hashid + + +def hashids_decode(hashid, salt=None, alphabet=None, min_length=None): + """Decodes a YouTube-like hash to a sequence of ints + + :hashid: Hash string to decode + :salt: String to use as salt when hashing + :alphabet: String of 16 or more unique characters to produce a hash + :min_length: Minimum length of hash produced + """ + + hashids = initialize_hashids( + salt=salt, + alphabet=alphabet, + min_length=min_length + ) + nums = hashids.decode(hashid) + return list(nums) + + +class FilterModule(object): + + def filters(self): + return { + 'hashids_encode': hashids_encode, + 'hashids_decode': hashids_decode, + } diff --git a/tests/integration/targets/filter_hashids/aliases b/tests/integration/targets/filter_hashids/aliases new file mode 100644 index 0000000000..f04737b845 --- /dev/null +++ b/tests/integration/targets/filter_hashids/aliases @@ -0,0 +1,2 @@ +shippable/posix/group2 +skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller diff --git a/tests/integration/targets/filter_hashids/runme.sh b/tests/integration/targets/filter_hashids/runme.sh new file mode 100755 index 0000000000..313ea4bb83 --- /dev/null +++ b/tests/integration/targets/filter_hashids/runme.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +set -eux + +export ANSIBLE_TEST_PREFER_VENV=1 # see https://github.com/ansible/ansible/pull/73000#issuecomment-757012395; can be removed once Ansible 2.9 and ansible-base 2.10 support has been dropped +source virtualenv.sh + +# Requirements have to be installed prior to running ansible-playbook +# because plugins and requirements are loaded before the task runs + +pip install hashids + +ANSIBLE_ROLES_PATH=../ ansible-playbook runme.yml "$@" diff --git a/tests/integration/targets/filter_hashids/runme.yml b/tests/integration/targets/filter_hashids/runme.yml new file mode 100644 index 0000000000..b2a39e27a6 --- /dev/null +++ b/tests/integration/targets/filter_hashids/runme.yml @@ -0,0 +1,3 @@ +- hosts: localhost + roles: + - { role: filter_hashids } diff --git a/tests/integration/targets/filter_hashids/tasks/main.yml b/tests/integration/targets/filter_hashids/tasks/main.yml new file mode 100644 index 0000000000..95bcc91346 --- /dev/null +++ b/tests/integration/targets/filter_hashids/tasks/main.yml @@ -0,0 +1,58 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- name: Test valid hashable inputs + assert: + that: + - "single_int | community.general.hashids_encode | community.general.hashids_decode == [single_int]" + - "int_list | community.general.hashids_encode | community.general.hashids_decode | list == int_list" + - "(1,2,3) | community.general.hashids_encode | community.general.hashids_decode == [1,2,3]" + +- name: Test valid parameters + assert: + that: + - "single_int | community.general.hashids_encode(salt='test') | community.general.hashids_decode(salt='test') == [single_int]" + - "single_int | community.general.hashids_encode(alphabet='1234567890abcdef') | community.general.hashids_decode(alphabet='1234567890abcdef') == [single_int]" + - "single_int | community.general.hashids_encode(min_length=20) | community.general.hashids_decode(min_length=20) == [single_int]" + - "single_int | community.general.hashids_encode(min_length=20) | length == 20" + +- name: Test valid unhashable inputs + assert: + that: + - "single_float | community.general.hashids_encode | community.general.hashids_decode == []" + - "arbitrary_string | community.general.hashids_encode | community.general.hashids_decode == []" + +- name: Register result of invalid salt + debug: + var: "invalid_input | community.general.hashids_encode(salt=10)" + register: invalid_salt_message + ignore_errors: true + +- name: Test invalid salt fails + assert: + that: + - invalid_salt_message is failed + +- name: Register result of invalid alphabet + debug: + var: "invalid_input | community.general.hashids_encode(alphabet='abc')" + register: invalid_alphabet_message + ignore_errors: true + +- name: Test invalid alphabet fails + assert: + that: + - invalid_alphabet_message is failed + +- name: Register result of invalid min_length + debug: + var: "invalid_input | community.general.hashids_encode(min_length='foo')" + register: invalid_min_length_message + ignore_errors: true + +- name: Test invalid min_length fails + assert: + that: + - invalid_min_length_message is failed diff --git a/tests/integration/targets/filter_hashids/vars/main.yml b/tests/integration/targets/filter_hashids/vars/main.yml new file mode 100644 index 0000000000..3f2b0c5f98 --- /dev/null +++ b/tests/integration/targets/filter_hashids/vars/main.yml @@ -0,0 +1,4 @@ +single_int: 1 +int_list: [1, 2, 3] +single_float: [2.718] +arbitrary_string: "will not hash" From 1400051890cbf5674c2aa4939f9c716a0f3e4bd8 Mon Sep 17 00:00:00 2001 From: rainerleber <39616583+rainerleber@users.noreply.github.com> Date: Sat, 17 Apr 2021 22:32:54 +0200 Subject: [PATCH 0193/3093] Fix for Terraform 0.15 (#2246) * Fix for Terraform 0.15 removed the append of variables in terraform validate because this is deprecated in Terraform 0.15. See: https://github.com/hashicorp/terraform/blob/v0.15/CHANGELOG.md >> The -var and -var-file options are no longer available on terraform validate. These were deprecated and have had no effect since Terraform v0.12 * Create terraform-validate.yaml * Update and rename terraform-validate.yaml to 2246-terraform-validate.yaml * Update changelogs/fragments/2246-terraform-validate.yaml Co-authored-by: Amin Vakil * Update terraform.py remove `-force` add `-auto-approve` on destroy as described in issue #2247 * Update and rename 2246-terraform-validate.yaml to 2246-terraform.yaml * Update 2246-terraform.yaml * add a function which check the used tf version * add a function which check the used tf version * Update changelogs/fragments/2246-terraform.yaml Co-authored-by: Amin Vakil * Update changelogs/fragments/2246-terraform.yaml Co-authored-by: Amin Vakil * add version return to function * changed it to pass sanity check * change variable name * changed to a more specialized data types * remove use_unsafe_shell=True * Update changelogs/fragments/2246-terraform.yaml Co-authored-by: Felix Fontein * add description * Update changelogs/fragments/2246-terraform.yaml Co-authored-by: Felix Fontein Co-authored-by: Amin Vakil Co-authored-by: Rainer Leber Co-authored-by: Felix Fontein --- changelogs/fragments/2246-terraform.yaml | 4 +++ plugins/modules/cloud/misc/terraform.py | 32 ++++++++++++++++++------ 2 files changed, 28 insertions(+), 8 deletions(-) create mode 100644 changelogs/fragments/2246-terraform.yaml diff --git a/changelogs/fragments/2246-terraform.yaml b/changelogs/fragments/2246-terraform.yaml new file mode 100644 index 0000000000..d2dd93e22e --- /dev/null +++ b/changelogs/fragments/2246-terraform.yaml @@ -0,0 +1,4 @@ +bugfixes: + - terraform - fix issue that cause the execution fail because from Terraform 0.15 on, the ``-var`` and ``-var-file`` options are no longer available on ``terraform validate`` (https://github.com/ansible-collections/community.general/pull/2246). + - terraform - fix issue that cause the destroy to fail because from Terraform 0.15 on, the ``terraform destroy -force`` option is replaced with ``terraform destroy -auto-approve`` (https://github.com/ansible-collections/community.general/issues/2247). + - terraform - remove uses of ``use_unsafe_shell=True`` (https://github.com/ansible-collections/community.general/pull/2246). diff --git a/plugins/modules/cloud/misc/terraform.py b/plugins/modules/cloud/misc/terraform.py index 680bab9aed..f395c8e278 100644 --- a/plugins/modules/cloud/misc/terraform.py +++ b/plugins/modules/cloud/misc/terraform.py @@ -8,7 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: terraform short_description: Manages a Terraform deployment (and plans) @@ -177,24 +177,31 @@ command: import os import json import tempfile +from distutils.version import LooseVersion from ansible.module_utils.six.moves import shlex_quote from ansible.module_utils.basic import AnsibleModule -DESTROY_ARGS = ('destroy', '-no-color', '-force') -APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve=true') module = None -def preflight_validation(bin_path, project_path, variables_args=None, plan_file=None): +def get_version(bin_path): + extract_version = module.run_command([bin_path, 'version', '-json']) + terraform_version = (json.loads(extract_version[1]))['terraform_version'] + return terraform_version + + +def preflight_validation(bin_path, project_path, version, variables_args=None, plan_file=None): if project_path in [None, ''] or '/' not in project_path: module.fail_json(msg="Path for Terraform project can not be None or ''.") if not os.path.exists(bin_path): module.fail_json(msg="Path for Terraform binary '{0}' doesn't exist on this host - check the path and try again please.".format(bin_path)) if not os.path.isdir(project_path): module.fail_json(msg="Path for Terraform project '{0}' doesn't exist on this host - check the path and try again please.".format(project_path)) - - rc, out, err = module.run_command([bin_path, 'validate'] + variables_args, check_rc=True, cwd=project_path, use_unsafe_shell=True) + if LooseVersion(version) < LooseVersion('0.15.0'): + rc, out, err = module.run_command([bin_path, 'validate'] + variables_args, check_rc=True, cwd=project_path) + else: + rc, out, err = module.run_command([bin_path, 'validate'], check_rc=True, cwd=project_path) def _state_args(state_file): @@ -267,7 +274,7 @@ def build_plan(command, project_path, variables_args, state_file, targets, state plan_command.extend(_state_args(state_file)) - rc, out, err = module.run_command(plan_command + variables_args, cwd=project_path, use_unsafe_shell=True) + rc, out, err = module.run_command(plan_command + variables_args, cwd=project_path) if rc == 0: # no changes @@ -326,6 +333,15 @@ def main(): else: command = [module.get_bin_path('terraform', required=True)] + checked_version = get_version(command[0]) + + if LooseVersion(checked_version) < LooseVersion('0.15.0'): + DESTROY_ARGS = ('destroy', '-no-color', '-force') + APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve=true') + else: + DESTROY_ARGS = ('destroy', '-no-color', '-auto-approve') + APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve') + if force_init: init_plugins(command[0], project_path, backend_config, backend_config_files, init_reconfigure) @@ -351,7 +367,7 @@ def main(): for f in variables_files: variables_args.extend(['-var-file', f]) - preflight_validation(command[0], project_path, variables_args) + preflight_validation(command[0], project_path, checked_version, variables_args) if module.params.get('lock') is not None: if module.params.get('lock'): From f9e3e229ddfc9ffca9871b292c3be9e825244446 Mon Sep 17 00:00:00 2001 From: Fabio Sangiovanni <4040184+sanjioh@users.noreply.github.com> Date: Sun, 18 Apr 2021 09:45:26 +0200 Subject: [PATCH 0194/3093] linode_v4: add support for `private_ip` option. (#2249) * linode_v4: add support for `private_ip` option. * linode_v4: remove `required` attribute from `private_ip` parameter. * linode_v4: add changelog fragment. * linode_v4: add PR link to changelog fragment. Co-authored-by: Amin Vakil * linode_v4: add the `version_added` attribute to the `private_ip` section of module documentation Co-authored-by: Felix Fontein * linode_v4: improve styling of `private_ip` docs Co-authored-by: Felix Fontein Co-authored-by: Amin Vakil Co-authored-by: Felix Fontein --- ...9-linode_v4-support-private_ip-option.yaml | 2 + plugins/modules/cloud/linode/linode_v4.py | 9 ++++ .../modules/cloud/linode/test_linode_v4.py | 49 +++++++++++++++++++ 3 files changed, 60 insertions(+) create mode 100644 changelogs/fragments/2249-linode_v4-support-private_ip-option.yaml diff --git a/changelogs/fragments/2249-linode_v4-support-private_ip-option.yaml b/changelogs/fragments/2249-linode_v4-support-private_ip-option.yaml new file mode 100644 index 0000000000..e5d6ca02d7 --- /dev/null +++ b/changelogs/fragments/2249-linode_v4-support-private_ip-option.yaml @@ -0,0 +1,2 @@ +minor_changes: + - linode_v4 - add support for ``private_ip`` option (https://github.com/ansible-collections/community.general/pull/2249). diff --git a/plugins/modules/cloud/linode/linode_v4.py b/plugins/modules/cloud/linode/linode_v4.py index 34d33871bd..0f1133bac0 100644 --- a/plugins/modules/cloud/linode/linode_v4.py +++ b/plugins/modules/cloud/linode/linode_v4.py @@ -53,6 +53,13 @@ options: group labelling is deprecated but still supported. The encouraged method for marking instances is to use tags. type: str + private_ip: + description: + - If C(true), the created Linode will have private networking enabled and + assigned a private IPv4 address. + type: bool + default: false + version_added: 3.0.0 tags: description: - The tags that the instance should be marked under. See @@ -238,6 +245,7 @@ def initialise_module(): authorized_keys=dict(type='list', elements='str', no_log=False), group=dict(type='str'), image=dict(type='str'), + private_ip=dict(type='bool', default=False), region=dict(type='str'), root_pass=dict(type='str', no_log=True), tags=dict(type='list', elements='str'), @@ -283,6 +291,7 @@ def main(): group=module.params['group'], image=module.params['image'], label=module.params['label'], + private_ip=module.params['private_ip'], region=module.params['region'], root_pass=module.params['root_pass'], tags=module.params['tags'], diff --git a/tests/unit/plugins/modules/cloud/linode/test_linode_v4.py b/tests/unit/plugins/modules/cloud/linode/test_linode_v4.py index fece341431..c966f79d5b 100644 --- a/tests/unit/plugins/modules/cloud/linode/test_linode_v4.py +++ b/tests/unit/plugins/modules/cloud/linode/test_linode_v4.py @@ -175,6 +175,55 @@ def test_optional_image_is_validated(default_args, capfd, access_token): )) +@pytest.mark.parametrize('value', [True, False]) +def test_private_ip_valid_values(default_args, access_token, value): + default_args.update({'private_ip': value}) + set_module_args(default_args) + + module = linode_v4.initialise_module() + + assert module.params['private_ip'] is value + + +@pytest.mark.parametrize('value', ['not-a-bool', 42]) +def test_private_ip_invalid_values(default_args, capfd, access_token, value): + default_args.update({'private_ip': value}) + set_module_args(default_args) + + with pytest.raises(SystemExit): + linode_v4.initialise_module() + + out, err = capfd.readouterr() + results = json.loads(out) + + assert results['failed'] is True + assert 'not a valid boolean' in results['msg'] + + +def test_private_ip_default_value(default_args, access_token): + default_args.pop('private_ip', None) + set_module_args(default_args) + + module = linode_v4.initialise_module() + + assert module.params['private_ip'] is False + + +def test_private_ip_is_forwarded_to_linode(default_args, mock_linode, access_token): + default_args.update({'private_ip': True}) + set_module_args(default_args) + + target = 'linode_api4.linode_client.LinodeGroup.instances' + with mock.patch(target, return_value=[]): + with pytest.raises(SystemExit): + target = 'linode_api4.linode_client.LinodeGroup.instance_create' + with mock.patch(target, return_value=(mock_linode, 'passw0rd')) as instance_create_mock: + linode_v4.main() + + args, kwargs = instance_create_mock.call_args + assert kwargs['private_ip'] is True + + def test_instance_already_created(default_args, mock_linode, capfd, From 721589827eb8b361a2b139aa37bfbce8aaf36165 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 18 Apr 2021 10:09:46 +0200 Subject: [PATCH 0195/3093] Remove ldap_entry's params option completely (#2257) * Remove ldap_entry's params option completely. * Regular sanity error fixing. --- changelogs/fragments/2257-ldap_entry-params.yml | 2 ++ plugins/modules/net_tools/ldap/ldap_entry.py | 8 +------- tests/sanity/ignore-2.10.txt | 3 --- tests/sanity/ignore-2.11.txt | 3 --- tests/sanity/ignore-2.12.txt | 3 --- tests/sanity/ignore-2.9.txt | 3 --- 6 files changed, 3 insertions(+), 19 deletions(-) create mode 100644 changelogs/fragments/2257-ldap_entry-params.yml diff --git a/changelogs/fragments/2257-ldap_entry-params.yml b/changelogs/fragments/2257-ldap_entry-params.yml new file mode 100644 index 0000000000..f5c92d0b9c --- /dev/null +++ b/changelogs/fragments/2257-ldap_entry-params.yml @@ -0,0 +1,2 @@ +removed_features: +- "ldap_entry - the ``params`` parameter is now completely removed. Using it already triggered an error since community.general 0.1.2 (https://github.com/ansible-collections/community.general/pull/2257)." diff --git a/plugins/modules/net_tools/ldap/ldap_entry.py b/plugins/modules/net_tools/ldap/ldap_entry.py index 093c49ad50..ac1d63ac0e 100644 --- a/plugins/modules/net_tools/ldap/ldap_entry.py +++ b/plugins/modules/net_tools/ldap/ldap_entry.py @@ -25,9 +25,6 @@ notes: rule allowing root to modify the server configuration. If you need to use a simple bind to access your server, pass the credentials in I(bind_dn) and I(bind_pw). - - "The I(params) parameter was removed due to circumventing Ansible's parameter - handling. The I(params) parameter started disallowing setting the I(bind_pw) parameter in - Ansible-2.7 as it was insecure to set the parameter that way." author: - Jiri Tyr (@jtyr) requirements: @@ -51,6 +48,7 @@ options: - The target state of the entry. choices: [present, absent] default: present + type: str extends_documentation_fragment: - community.general.ldap.documentation @@ -187,7 +185,6 @@ def main(): argument_spec=gen_specs( attributes=dict(default={}, type='dict'), objectClass=dict(type='list', elements='str'), - params=dict(type='dict'), state=dict(default='present', choices=['present', 'absent']), ), required_if=[('state', 'present', ['objectClass'])], @@ -198,9 +195,6 @@ def main(): module.fail_json(msg=missing_required_lib('python-ldap'), exception=LDAP_IMP_ERR) - if module.params['params']: - module.fail_json(msg="The `params` option to ldap_entry was removed since it circumvents Ansible's option handling") - state = module.params['state'] # Instantiate the LdapEntry object diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index c098f7dbfe..2ef37bd2c4 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -37,9 +37,6 @@ plugins/modules/cloud/univention/udm_user.py validate-modules:parameter-list-no- plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-state-invalid-choice -plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:doc-missing-type -plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:parameter-type-not-in-doc -plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code plugins/modules/notification/grove.py validate-modules:invalid-argument-name plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 52512a444c..60387b1333 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -36,9 +36,6 @@ plugins/modules/cloud/univention/udm_user.py validate-modules:parameter-list-no- plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-state-invalid-choice -plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:doc-missing-type -plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:parameter-type-not-in-doc -plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code plugins/modules/notification/grove.py validate-modules:invalid-argument-name plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt index 52512a444c..60387b1333 100644 --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -36,9 +36,6 @@ plugins/modules/cloud/univention/udm_user.py validate-modules:parameter-list-no- plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-state-invalid-choice -plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:doc-missing-type -plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:parameter-type-not-in-doc -plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code plugins/modules/notification/grove.py validate-modules:invalid-argument-name plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 748e340741..a8ab8c457c 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -28,9 +28,6 @@ plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:parameter-type plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter -plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:doc-missing-type -plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:parameter-type-not-in-doc -plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid From ec9c23437c6125ae0900fbe47b77676a1043b73b Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 18 Apr 2021 20:55:47 +1200 Subject: [PATCH 0196/3093] cpanm - revamp module (#2218) * copying from the previous branch * passing sanity - docs incomplete * adjusted parameter name * adjusted unit tests for mode=new * adjusted integration tests for mode=new * added 'russoz' to list of maintainers for cpanm * Update tests/integration/targets/cpanm/tasks/main.yml * Update tests/integration/targets/cpanm/tasks/main.yml * ensuring backward compatibility + tests * added changelog fragment * version for new parameter and adjusted example * typo and formatting * Update plugins/modules/packaging/language/cpanm.py Co-authored-by: Felix Fontein * Update plugins/modules/packaging/language/cpanm.py Co-authored-by: Felix Fontein * Update plugins/modules/packaging/language/cpanm.py Co-authored-by: Felix Fontein * multiple changes - some fixes from PR - supporting tests - integration is no longer unsupported => destructive, should run on apt- and rpm-based systems only * only run integration tests in redhat-family > v7 or debian-family Co-authored-by: Felix Fontein --- .github/BOTMETA.yml | 2 +- changelogs/fragments/2218-cpanm-revamp.yml | 5 + plugins/modules/packaging/language/cpanm.py | 242 +++++++++------ tests/integration/targets/cpanm/aliases | 6 + tests/integration/targets/cpanm/meta/main.yml | 2 + .../integration/targets/cpanm/tasks/main.yml | 64 ++++ .../modules/packaging/language/test_cpanm.py | 288 ++++++++++++++++++ 7 files changed, 508 insertions(+), 101 deletions(-) create mode 100644 changelogs/fragments/2218-cpanm-revamp.yml create mode 100644 tests/integration/targets/cpanm/aliases create mode 100644 tests/integration/targets/cpanm/meta/main.yml create mode 100644 tests/integration/targets/cpanm/tasks/main.yml create mode 100644 tests/unit/plugins/modules/packaging/language/test_cpanm.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 144eca81a7..850f2278ca 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -545,7 +545,7 @@ files: $modules/packaging/language/composer.py: maintainers: dmtrs resmo $modules/packaging/language/cpanm.py: - maintainers: fcuny + maintainers: fcuny russoz $modules/packaging/language/easy_install.py: maintainers: mattupstate $modules/packaging/language/gem.py: diff --git a/changelogs/fragments/2218-cpanm-revamp.yml b/changelogs/fragments/2218-cpanm-revamp.yml new file mode 100644 index 0000000000..668a84f06b --- /dev/null +++ b/changelogs/fragments/2218-cpanm-revamp.yml @@ -0,0 +1,5 @@ +minor_changes: + - cpanm - rewritten using ``ModuleHelper`` (https://github.com/ansible-collections/community.general/pull/2218). + - cpanm - honor and install specified version when running in ``new`` mode; that feature is not available in ``compatibility`` mode (https://github.com/ansible-collections/community.general/issues/208). +deprecated_features: + - cpanm - parameter ``system_lib`` deprecated in favor of using ``become`` (https://github.com/ansible-collections/community.general/pull/2218). diff --git a/plugins/modules/packaging/language/cpanm.py b/plugins/modules/packaging/language/cpanm.py index 3b43b44349..b8ab7e1a2f 100644 --- a/plugins/modules/packaging/language/cpanm.py +++ b/plugins/modules/packaging/language/cpanm.py @@ -2,6 +2,7 @@ # -*- coding: utf-8 -*- # (c) 2012, Franck Cuny +# (c) 2021, Alexei Znamensky # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function @@ -13,58 +14,91 @@ DOCUMENTATION = ''' module: cpanm short_description: Manages Perl library dependencies. description: - - Manage Perl library dependencies. + - Manage Perl library dependencies using cpanminus. options: name: type: str description: - - The name of the Perl library to install. You may use the "full distribution path", e.g. MIYAGAWA/Plack-0.99_05.tar.gz - aliases: ["pkg"] + - The Perl library to install. Valid values change according to the I(mode), see notes for more details. + - Note that for installing from a local path the parameter I(from_path) should be used. + aliases: [pkg] from_path: type: path description: - - The local directory from where to install + - The local directory or C(tar.gz) file to install from. notest: description: - - Do not run unit tests + - Do not run unit tests. type: bool default: no locallib: description: - - Specify the install base to install modules + - Specify the install base to install modules. type: path mirror: description: - - Specifies the base URL for the CPAN mirror to use + - Specifies the base URL for the CPAN mirror to use. type: str mirror_only: description: - - Use the mirror's index file instead of the CPAN Meta DB + - Use the mirror's index file instead of the CPAN Meta DB. type: bool default: no installdeps: description: - - Only install dependencies + - Only install dependencies. type: bool default: no version: description: - - minimum version of perl module to consider acceptable + - Version specification for the perl module. When I(mode) is C(new), C(cpanm) version operators are accepted. type: str system_lib: description: - - Use this if you want to install modules to the system perl include path. You must be root or have "passwordless" sudo for this to work. - - This uses the cpanm commandline option '--sudo', which has nothing to do with ansible privilege escalation. + - Use this if you want to install modules to the system perl include path. You must be root or have "passwordless" sudo for this to work. + - This uses the cpanm commandline option C(--sudo), which has nothing to do with ansible privilege escalation. + - > + This option is not recommended for use and it will be deprecated in the future. If you need to escalate privileges + please consider using any of the multiple mechanisms available in Ansible. type: bool default: no aliases: ['use_sudo'] executable: description: - - Override the path to the cpanm executable + - Override the path to the cpanm executable. type: path + mode: + description: + - Controls the module behavior. See notes below for more details. + type: str + choices: [compatibility, new] + default: compatibility + version_added: 3.0.0 + name_check: + description: + - When in C(new) mode, this parameter can be used to check if there is a module I(name) installed (at I(version), when specified). + type: str + version_added: 3.0.0 notes: - - Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host. -author: "Franck Cuny (@fcuny)" + - Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host. + - "This module now comes with a choice of execution I(mode): C(compatibility) or C(new)." + - "C(compatibility) mode:" + - When using C(compatibility) mode, the module will keep backward compatibility. This is the default mode. + - I(name) must be either a module name or a distribution file. + - > + If the perl module given by I(name) is installed (at the exact I(version) when specified), then nothing happens. + Otherwise, it will be installed using the C(cpanm) executable. + - I(name) cannot be an URL, or a git URL. + - C(cpanm) version specifiers do not work in this mode. + - "C(new) mode:" + - "When using C(new) mode, the module will behave differently" + - > + The I(name) parameter may refer to a module name, a distribution file, + a HTTP URL or a git repository URL as described in C(cpanminus) documentation. + - C(cpanm) version specifiers are recognized. +author: + - "Franck Cuny (@fcuny)" + - "Alexei Znamensky (@russoz)" ''' EXAMPLES = ''' @@ -97,9 +131,9 @@ EXAMPLES = ''' mirror: 'http://cpan.cpantesters.org/' - name: Install Dancer perl package into the system root path + become: yes community.general.cpanm: name: Dancer - system_lib: yes - name: Install Dancer if it is not already installed OR the installed version is older than version 1.0 community.general.cpanm: @@ -109,105 +143,113 @@ EXAMPLES = ''' import os -from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.module_helper import ( + ModuleHelper, CmdMixin, ArgFormat, ModuleHelperException +) -def _is_package_installed(module, name, locallib, cpanm, version): - cmd = "" - if locallib: - os.environ["PERL5LIB"] = "%s/lib/perl5" % locallib - cmd = "%s perl -e ' use %s" % (cmd, name) - if version: - cmd = "%s %s;'" % (cmd, version) - else: - cmd = "%s;'" % cmd - res, stdout, stderr = module.run_command(cmd, check_rc=False) - return res == 0 +class CPANMinus(CmdMixin, ModuleHelper): + output_params = ['name', 'version'] + module = dict( + argument_spec=dict( + name=dict(type='str', aliases=['pkg']), + version=dict(type='str'), + from_path=dict(type='path'), + notest=dict(type='bool', default=False), + locallib=dict(type='path'), + mirror=dict(type='str'), + mirror_only=dict(type='bool', default=False), + installdeps=dict(type='bool', default=False), + system_lib=dict(type='bool', default=False, aliases=['use_sudo'], + removed_in_version="4.0.0", removed_from_collection="community.general"), + executable=dict(type='path'), + mode=dict(type='str', choices=['compatibility', 'new'], default='compatibility'), + name_check=dict(type='str') + ), + required_one_of=[('name', 'from_path')], + ) + command = 'cpanm' + command_args_formats = dict( + notest=dict(fmt="--notest", style=ArgFormat.BOOLEAN), + locallib=dict(fmt=('--local-lib', '{0}'),), + mirror=dict(fmt=('--mirror', '{0}'),), + mirror_only=dict(fmt="--mirror-only", style=ArgFormat.BOOLEAN), + installdeps=dict(fmt="--installdeps", style=ArgFormat.BOOLEAN), + system_lib=dict(fmt="--sudo", style=ArgFormat.BOOLEAN), + ) + check_rc = True -def _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, installdeps, cpanm, use_sudo): - # this code should use "%s" like everything else and just return early but not fixing all of it now. - # don't copy stuff like this - if from_path: - cmd = cpanm + " " + from_path - else: - cmd = cpanm + " " + name + def __init_module__(self): + v = self.vars + if v.mode == "compatibility": + if v.name_check: + raise ModuleHelperException("Parameter name_check can only be used with mode=new") + else: + if v.name and v.from_path: + raise ModuleHelperException("Parameters 'name' and 'from_path' are mutually exclusive when 'mode=new'") + if v.system_lib: + raise ModuleHelperException("Parameter 'system_lib' is invalid when 'mode=new'") - if notest is True: - cmd = cmd + " -n" + self.command = self.module.get_bin_path(v.executable if v.executable else self.command) + self.vars.set("binary", self.command) - if locallib is not None: - cmd = cmd + " -l " + locallib + def _is_package_installed(self, name, locallib, version): + if name is None or name.endswith('.tar.gz'): + return False + version = "" if version is None else " " + version - if mirror is not None: - cmd = cmd + " --mirror " + mirror + env = {"PERL5LIB": "%s/lib/perl5" % locallib} if locallib else {} + cmd = ['perl', '-le', 'use %s%s;' % (name, version)] + rc, out, err = self.module.run_command(cmd, check_rc=False, environ_update=env) - if mirror_only is True: - cmd = cmd + " --mirror-only" + return rc == 0 - if installdeps is True: - cmd = cmd + " --installdeps" + @staticmethod + def sanitize_pkg_spec_version(pkg_spec, version): + if version is None: + return pkg_spec + if pkg_spec.endswith('.tar.gz'): + raise ModuleHelperException(msg="parameter 'version' must not be used when installing from a file") + if os.path.isdir(pkg_spec): + raise ModuleHelperException(msg="parameter 'version' must not be used when installing from a directory") + if pkg_spec.endswith('.git'): + if version.startswith('~'): + raise ModuleHelperException(msg="operator '~' not allowed in version parameter when installing from git repository") + version = version if version.startswith('@') else '@' + version + elif version[0] not in ('@', '~'): + version = '~' + version + return pkg_spec + version - if use_sudo is True: - cmd = cmd + " --sudo" + def __run__(self): + v = self.vars + pkg_param = 'from_path' if v.from_path else 'name' - return cmd + if v.mode == 'compatibility': + if self._is_package_installed(v.name, v.locallib, v.version): + return + pkg_spec = v[pkg_param] + self.changed = self.run_command( + params=['notest', 'locallib', 'mirror', 'mirror_only', 'installdeps', 'system_lib', {'name': pkg_spec}], + ) + else: + installed = self._is_package_installed(v.name_check, v.locallib, v.version) if v.name_check else False + if installed: + return + pkg_spec = self.sanitize_pkg_spec_version(v[pkg_param], v.version) + self.changed = self.run_command( + params=['notest', 'locallib', 'mirror', 'mirror_only', 'installdeps', {'name': pkg_spec}], + ) - -def _get_cpanm_path(module): - if module.params['executable']: - result = module.params['executable'] - else: - result = module.get_bin_path('cpanm', True) - return result + def process_command_output(self, rc, out, err): + if self.vars.mode == "compatibility" and rc != 0: + raise ModuleHelperException(msg=err, cmd=self.vars.cmd_args) + return 'is up to date' not in err and 'is up to date' not in out def main(): - arg_spec = dict( - name=dict(default=None, required=False, aliases=['pkg']), - from_path=dict(default=None, required=False, type='path'), - notest=dict(default=False, type='bool'), - locallib=dict(default=None, required=False, type='path'), - mirror=dict(default=None, required=False), - mirror_only=dict(default=False, type='bool'), - installdeps=dict(default=False, type='bool'), - system_lib=dict(default=False, type='bool', aliases=['use_sudo']), - version=dict(default=None, required=False), - executable=dict(required=False, type='path'), - ) - - module = AnsibleModule( - argument_spec=arg_spec, - required_one_of=[['name', 'from_path']], - ) - - cpanm = _get_cpanm_path(module) - name = module.params['name'] - from_path = module.params['from_path'] - notest = module.boolean(module.params.get('notest', False)) - locallib = module.params['locallib'] - mirror = module.params['mirror'] - mirror_only = module.params['mirror_only'] - installdeps = module.params['installdeps'] - use_sudo = module.params['system_lib'] - version = module.params['version'] - - changed = False - - installed = _is_package_installed(module, name, locallib, cpanm, version) - - if not installed: - cmd = _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, installdeps, cpanm, use_sudo) - - rc_cpanm, out_cpanm, err_cpanm = module.run_command(cmd, check_rc=False) - - if rc_cpanm != 0: - module.fail_json(msg=err_cpanm, cmd=cmd) - - if (err_cpanm.find('is up to date') == -1 and out_cpanm.find('is up to date') == -1): - changed = True - - module.exit_json(changed=changed, binary=cpanm, name=name) + cpanm = CPANMinus() + cpanm.run() if __name__ == '__main__': diff --git a/tests/integration/targets/cpanm/aliases b/tests/integration/targets/cpanm/aliases new file mode 100644 index 0000000000..d014dd3438 --- /dev/null +++ b/tests/integration/targets/cpanm/aliases @@ -0,0 +1,6 @@ +shippable/posix/group3 +destructive +skip/macos +skip/osx +skip/freebsd +skip/aix diff --git a/tests/integration/targets/cpanm/meta/main.yml b/tests/integration/targets/cpanm/meta/main.yml new file mode 100644 index 0000000000..5438ced5c3 --- /dev/null +++ b/tests/integration/targets/cpanm/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_pkg_mgr diff --git a/tests/integration/targets/cpanm/tasks/main.yml b/tests/integration/targets/cpanm/tasks/main.yml new file mode 100644 index 0000000000..66f4396685 --- /dev/null +++ b/tests/integration/targets/cpanm/tasks/main.yml @@ -0,0 +1,64 @@ +# (c) 2020, Berkhan Berkdemir +# (c) 2021, Alexei Znamensky +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +- name: bail out for non-supported platforms + meta: end_play + when: + - (ansible_os_family != "RedHat" or ansible_distribution_major_version|int < 7) + - ansible_os_family != "Debian" + +- name: install perl development package for Red Hat family + package: + name: + - perl-devel + - perl-App-cpanminus + state: present + become: yes + when: ansible_os_family == "RedHat" + +- name: install perl development package for Debian family + package: + name: + - cpanminus + state: present + become: yes + when: ansible_os_family == "Debian" + +- name: install a Perl package + cpanm: + name: JSON + notest: yes + register: install_perl_package_result + +- name: assert package is installed + assert: + that: + - install_perl_package_result is changed + - install_perl_package_result is not failed + +- name: install same Perl package + cpanm: + name: JSON + notest: yes + register: install_same_perl_package_result + +- name: assert same package is installed + assert: + that: + - install_same_perl_package_result is not changed + - install_same_perl_package_result is not failed + +- name: install a Perl package with version operator + cpanm: + name: JSON + version: "@4.01" + notest: yes + mode: new + register: install_perl_package_with_version_op_result + +- name: assert package with version operator is installed + assert: + that: + - install_perl_package_with_version_op_result is changed + - install_perl_package_with_version_op_result is not failed diff --git a/tests/unit/plugins/modules/packaging/language/test_cpanm.py b/tests/unit/plugins/modules/packaging/language/test_cpanm.py new file mode 100644 index 0000000000..fd52fc1cc9 --- /dev/null +++ b/tests/unit/plugins/modules/packaging/language/test_cpanm.py @@ -0,0 +1,288 @@ +# Author: Alexei Znamensky (russoz@gmail.com) +# Largely adapted from test_redhat_subscription by +# Jiri Hnidek (jhnidek@redhat.com) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + +from ansible_collections.community.general.plugins.modules.packaging.language import cpanm + +import pytest + +TESTED_MODULE = cpanm.__name__ + + +@pytest.fixture +def patch_cpanm(mocker): + """ + Function used for mocking some parts of redhat_subscribtion module + """ + mocker.patch('ansible_collections.community.general.plugins.module_utils.module_helper.AnsibleModule.get_bin_path', + return_value='/testbin/cpanm') + + +TEST_CASES = [ + [ + {'name': 'Dancer'}, + { + 'id': 'install_dancer_compatibility', + 'run_command.calls': [ + ( + ['perl', '-le', 'use Dancer;'], + {'environ_update': {}, 'check_rc': False}, + (2, '', 'error, not installed',), # output rc, out, err + ), + ( + ['/testbin/cpanm', 'Dancer'], + {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + (0, '', '',), # output rc, out, err + ), + ], + 'changed': True, + } + ], + [ + {'name': 'Dancer'}, + { + 'id': 'install_dancer_already_installed_compatibility', + 'run_command.calls': [ + ( + ['perl', '-le', 'use Dancer;'], + {'environ_update': {}, 'check_rc': False}, + (0, '', '',), # output rc, out, err + ), + ], + 'changed': False, + } + ], + [ + {'name': 'Dancer', 'mode': 'new'}, + { + 'id': 'install_dancer', + 'run_command.calls': [( + ['/testbin/cpanm', 'Dancer'], + {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + (0, '', '',), # output rc, out, err + )], + 'changed': True, + } + ], + [ + {'name': 'MIYAGAWA/Plack-0.99_05.tar.gz'}, + { + 'id': 'install_distribution_file_compatibility', + 'run_command.calls': [( + ['/testbin/cpanm', 'MIYAGAWA/Plack-0.99_05.tar.gz'], + {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + (0, '', '',), # output rc, out, err + )], + 'changed': True, + } + ], + [ + {'name': 'MIYAGAWA/Plack-0.99_05.tar.gz', 'mode': 'new'}, + { + 'id': 'install_distribution_file', + 'run_command.calls': [( + ['/testbin/cpanm', 'MIYAGAWA/Plack-0.99_05.tar.gz'], + {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + (0, '', '',), # output rc, out, err + )], + 'changed': True, + } + ], + [ + {'name': 'Dancer', 'locallib': '/srv/webapps/my_app/extlib', 'mode': 'new'}, + { + 'id': 'install_into_locallib', + 'run_command.calls': [( + ['/testbin/cpanm', '--local-lib', '/srv/webapps/my_app/extlib', 'Dancer'], + {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + (0, '', '',), # output rc, out, err + )], + 'changed': True, + } + ], + [ + {'from_path': '/srv/webapps/my_app/src/', 'mode': 'new'}, + { + 'id': 'install_from_local_directory', + 'run_command.calls': [( + ['/testbin/cpanm', '/srv/webapps/my_app/src/'], + {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + (0, '', '',), # output rc, out, err + )], + 'changed': True, + } + ], + [ + {'name': 'Dancer', 'locallib': '/srv/webapps/my_app/extlib', 'notest': True, 'mode': 'new'}, + { + 'id': 'install_into_locallib_no_unit_testing', + 'run_command.calls': [( + ['/testbin/cpanm', '--notest', '--local-lib', '/srv/webapps/my_app/extlib', 'Dancer'], + {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + (0, '', '',), # output rc, out, err + )], + 'changed': True, + } + ], + [ + {'name': 'Dancer', 'mirror': 'http://cpan.cpantesters.org/', 'mode': 'new'}, + { + 'id': 'install_from_mirror', + 'run_command.calls': [( + ['/testbin/cpanm', '--mirror', 'http://cpan.cpantesters.org/', 'Dancer'], + {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + (0, '', '',), # output rc, out, err + )], + 'changed': True, + } + ], + [ + {'name': 'Dancer', 'system_lib': True, 'mode': 'new'}, + { + 'id': 'install_into_system_lib', + 'run_command.calls': [], + 'changed': False, + 'failed': True, + } + ], + [ + {'name': 'Dancer', 'version': '1.0', 'mode': 'new'}, + { + 'id': 'install_minversion_implicit', + 'run_command.calls': [( + ['/testbin/cpanm', 'Dancer~1.0'], + {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + (0, '', '',), # output rc, out, err + )], + 'changed': True, + } + ], + [ + {'name': 'Dancer', 'version': '~1.5', 'mode': 'new'}, + { + 'id': 'install_minversion_explicit', + 'run_command.calls': [( + ['/testbin/cpanm', 'Dancer~1.5'], + {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + (0, '', '',), # output rc, out, err + )], + 'changed': True, + } + ], + [ + {'name': 'Dancer', 'version': '@1.7', 'mode': 'new'}, + { + 'id': 'install_specific_version', + 'run_command.calls': [( + ['/testbin/cpanm', 'Dancer@1.7'], + {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + (0, '', '',), # output rc, out, err + )], + 'changed': True, + 'failed': False, + } + ], + [ + {'name': 'MIYAGAWA/Plack-0.99_05.tar.gz', 'version': '@1.7', 'mode': 'new'}, + { + 'id': 'install_specific_version_from_file_error', + 'run_command.calls': [], + 'changed': False, + 'failed': True, + 'msg': "parameter 'version' must not be used when installing from a file", + } + ], + [ + {'from_path': '~/', 'version': '@1.7', 'mode': 'new'}, + { + 'id': 'install_specific_version_from_directory_error', + 'run_command.calls': [], + 'changed': False, + 'failed': True, + 'msg': "parameter 'version' must not be used when installing from a directory", + } + ], + [ + {'name': 'git://github.com/plack/Plack.git', 'version': '@1.7', 'mode': 'new'}, + { + 'id': 'install_specific_version_from_git_url_explicit', + 'run_command.calls': [( + ['/testbin/cpanm', 'git://github.com/plack/Plack.git@1.7'], + {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + (0, '', '',), # output rc, out, err + )], + 'changed': True, + 'failed': False, + } + ], + [ + {'name': 'git://github.com/plack/Plack.git', 'version': '2.5', 'mode': 'new'}, + { + 'id': 'install_specific_version_from_git_url_implicit', + 'run_command.calls': [( + ['/testbin/cpanm', 'git://github.com/plack/Plack.git@2.5'], + {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + (0, '', '',), # output rc, out, err + )], + 'changed': True, + 'failed': False, + } + ], + [ + {'name': 'git://github.com/plack/Plack.git', 'version': '~2.5', 'mode': 'new'}, + { + 'id': 'install_version_operator_from_git_url_error', + 'run_command.calls': [], + 'changed': False, + 'failed': True, + 'msg': "operator '~' not allowed in version parameter when installing from git repository", + } + ], +] +TEST_CASES_IDS = [item[1]['id'] for item in TEST_CASES] + + +@pytest.mark.parametrize('patch_ansible_module, testcase', + TEST_CASES, + ids=TEST_CASES_IDS, + indirect=['patch_ansible_module']) +@pytest.mark.usefixtures('patch_ansible_module') +def test_cpanm(mocker, capfd, patch_cpanm, testcase): + """ + Run unit tests for test cases listen in TEST_CASES + """ + + # Mock function used for running commands first + call_results = [item[2] for item in testcase['run_command.calls']] + mock_run_command = mocker.patch( + 'ansible_collections.community.general.plugins.module_utils.module_helper.AnsibleModule.run_command', + side_effect=call_results) + + # Try to run test case + with pytest.raises(SystemExit): + cpanm.main() + + out, err = capfd.readouterr() + results = json.loads(out) + print("results =\n%s" % results) + + assert mock_run_command.call_count == len(testcase['run_command.calls']) + if mock_run_command.call_count: + call_args_list = [(item[0][0], item[1]) for item in mock_run_command.call_args_list] + expected_call_args_list = [(item[0], item[1]) for item in testcase['run_command.calls']] + print("call args list =\n%s" % call_args_list) + print("expected args list =\n%s" % expected_call_args_list) + assert call_args_list == expected_call_args_list + + assert results.get('changed', False) == testcase['changed'] + if 'failed' in testcase: + assert results.get('failed', False) == testcase['failed'] + if 'msg' in testcase: + assert results.get('msg', '') == testcase['msg'] From 6a8eb7b388c53acce4d8d701bda86fc9382df7b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gon=C3=A9ri=20Le=20Bouder?= Date: Sun, 18 Apr 2021 04:59:23 -0400 Subject: [PATCH 0197/3093] nmcli: add wifi support (#2220) * nmcli: add wifi support Ability to: - connect NetworkManager to a Wifi network - attach the connection to a master (bond). * target 3.0.0 --- .../fragments/2220_nmcli_wifi_support.yaml | 3 ++ plugins/modules/net_tools/nmcli.py | 52 +++++++++++++++++-- 2 files changed, 51 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/2220_nmcli_wifi_support.yaml diff --git a/changelogs/fragments/2220_nmcli_wifi_support.yaml b/changelogs/fragments/2220_nmcli_wifi_support.yaml new file mode 100644 index 0000000000..224c4dc526 --- /dev/null +++ b/changelogs/fragments/2220_nmcli_wifi_support.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: +- "nmcli - add ability to connect to a Wifi network and also to attach it to a master (bond) (https://github.com/ansible-collections/community.general/pull/2220)." diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index d5b329fe03..2b402a2230 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -54,7 +54,7 @@ options: - Type C(generic) is added in Ansible 2.5. - Type C(infiniband) is added in community.general 2.0.0. type: str - choices: [ bond, bond-slave, bridge, bridge-slave, ethernet, generic, infiniband, ipip, sit, team, team-slave, vlan, vxlan ] + choices: [ bond, bond-slave, bridge, bridge-slave, ethernet, generic, infiniband, ipip, sit, team, team-slave, vlan, vxlan, wifi ] mode: description: - This is the type of device or network connection that you wish to create for a bond, team or bridge. @@ -279,6 +279,19 @@ options: - When updating this property on a currently activated connection, the change takes effect immediately. type: str version_added: 2.0.0 + wifi_sec: + description: + - 'The security configuration of the Wifi connection. The valid attributes are listed on:' + - 'U(https://developer.gnome.org/NetworkManager/stable/settings-802-11-wireless-security.html)' + - 'For instance to use common WPA-PSK auth with a password:' + - '- C({key-mgmt: wpa-psk, psk: my_password})' + type: dict + version_added: 3.0.0 + ssid: + description: + - Name of the Wireless router or the access point. + type: str + version_added: 3.0.0 ''' EXAMPLES = r''' @@ -582,6 +595,19 @@ EXAMPLES = r''' # - 8 NetworkManager is not running # - 9 nmcli and NetworkManager versions mismatch # - 10 Connection, device, or access point does not exist. + +- name: Create the wifi connection + community.general.nmcli: + type: wifi + conn_name: Brittany + ifname: wlp4s0 + ssid: Brittany + wifi_sec: + key-mgmt: wpa-psk + psk: my_password + autoconnect: true + state: present + ''' RETURN = r"""# @@ -665,6 +691,8 @@ class Nmcli(object): self.nmcli_bin = self.module.get_bin_path('nmcli', True) self.dhcp_client_id = module.params['dhcp_client_id'] self.zone = module.params['zone'] + self.ssid = module.params['ssid'] + self.wifi_sec = module.params['wifi_sec'] if self.method4: self.ipv4_method = self.method4 @@ -774,7 +802,10 @@ class Nmcli(object): 'vxlan.local': self.vxlan_local, 'vxlan.remote': self.vxlan_remote, }) - + elif self.type == 'wifi': + options.update({ + 'connection.slave-type': 'bond' if self.master else None, + }) # Convert settings values based on the situation. for setting, value in options.items(): setting_type = self.settings_type(setting) @@ -808,6 +839,7 @@ class Nmcli(object): 'infiniband', 'team', 'vlan', + 'wifi' ) @property @@ -845,6 +877,7 @@ class Nmcli(object): 'bond-slave', 'bridge-slave', 'team-slave', + 'wifi', ) @property @@ -919,6 +952,13 @@ class Nmcli(object): else: ifname = self.ifname + if self.type == "wifi": + cmd.append('ssid') + cmd.append(self.ssid) + if self.wifi_sec: + for name, value in self.wifi_sec.items(): + cmd += ['wifi-sec.%s' % name, value] + options = { 'connection.interface-name': ifname, } @@ -940,7 +980,7 @@ class Nmcli(object): @property def create_connection_up(self): - if self.type in ('bond', 'ethernet', 'infiniband'): + if self.type in ('bond', 'ethernet', 'infiniband', 'wifi'): if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None): return True elif self.type == 'team': @@ -1088,7 +1128,8 @@ def main(): 'team', 'team-slave', 'vlan', - 'vxlan' + 'vxlan', + 'wifi', ]), ip4=dict(type='str'), gw4=dict(type='str'), @@ -1141,8 +1182,11 @@ def main(): ip_tunnel_dev=dict(type='str'), ip_tunnel_local=dict(type='str'), ip_tunnel_remote=dict(type='str'), + ssid=dict(type='str'), + wifi_sec=dict(type='dict', no_log=True), ), mutually_exclusive=[['never_default4', 'gw4']], + required_if=[("type", "wifi", [("ssid")])], supports_check_mode=True, ) module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') From 91a0264f38baab1ccba537bd645b1d47c45eccf6 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 19 Apr 2021 06:59:52 +0200 Subject: [PATCH 0198/3093] java_keystore: overwrite instead of fail when password or alias does not match (#2262) * Overwrite instead of fail when password does not match. * Update documentation. * Fix tests. * Update plugins/modules/system/java_keystore.py Co-authored-by: Amin Vakil * Fix documentation. * Apply suggestions from code review Co-authored-by: quidame * Update tests/unit/plugins/modules/system/test_java_keystore.py * One more. Co-authored-by: Amin Vakil Co-authored-by: quidame --- .../2262-java_keystore-passphrase.yml | 8 +++++ plugins/modules/system/java_keystore.py | 24 +++++++------- .../targets/java_keystore/tasks/tests.yml | 12 +++---- .../modules/system/test_java_keystore.py | 32 +++++++++++++------ 4 files changed, 46 insertions(+), 30 deletions(-) create mode 100644 changelogs/fragments/2262-java_keystore-passphrase.yml diff --git a/changelogs/fragments/2262-java_keystore-passphrase.yml b/changelogs/fragments/2262-java_keystore-passphrase.yml new file mode 100644 index 0000000000..882ada97c3 --- /dev/null +++ b/changelogs/fragments/2262-java_keystore-passphrase.yml @@ -0,0 +1,8 @@ +breaking_changes: +- "java_keystore - instead of failing, now overwrites keystore if the alias (name) is changed. + This was originally the intended behavior, but did not work due to a logic error. Make sure + that your playbooks and roles do not depend on the old behavior of failing instead of + overwriting (https://github.com/ansible-collections/community.general/issues/1671)." +- "java_keystore - instead of failing, now overwrites keystore if the passphrase is changed. + Make sure that your playbooks and roles do not depend on the old behavior of failing instead + of overwriting (https://github.com/ansible-collections/community.general/issues/1671)." diff --git a/plugins/modules/system/java_keystore.py b/plugins/modules/system/java_keystore.py index 2a34175552..ebfe6abdd7 100644 --- a/plugins/modules/system/java_keystore.py +++ b/plugins/modules/system/java_keystore.py @@ -19,8 +19,9 @@ options: name: description: - Name of the certificate in the keystore. - - If the provided name does not exist in the keystore, the module fails. - This behavior will change in a next release. + - If the provided name does not exist in the keystore, the module + will re-create the keystore. This behavior changed in community.general 3.0.0, + before that the module would fail when the name did not match. type: str required: true certificate: @@ -60,7 +61,9 @@ options: description: - Password that should be used to secure the keystore. - If the provided password fails to unlock the keystore, the module - fails. This behavior will change in a next release. + will re-create the keystore with the new passphrase. This behavior + changed in community.general 3.0.0, before that the module would fail + when the password did not match. type: str required: true dest: @@ -187,16 +190,11 @@ def read_stored_certificate_fingerprint(module, keytool_bin, alias, keystore_pat (rc, stored_certificate_fingerprint_out, stored_certificate_fingerprint_err) = run_commands( module, stored_certificate_fingerprint_cmd, environ_update=dict(STOREPASS=keystore_password)) if rc != 0: - # First intention was to not fail, and overwrite the keystore instead, - # in case of alias mismatch; but an issue in error handling caused the - # module to fail anyway. - # See: https://github.com/ansible-collections/community.general/issues/1671 - # And: https://github.com/ansible-collections/community.general/pull/2183 - # if "keytool error: java.lang.Exception: Alias <%s> does not exist" % alias in stored_certificate_fingerprint_out: - # return "alias mismatch" - # if re.match(r'keytool error: java\.io\.IOException: [Kk]eystore( was tampered with, or)? password was incorrect', - # stored_certificate_fingerprint_out): - # return "password mismatch" + if "keytool error: java.lang.Exception: Alias <%s> does not exist" % alias in stored_certificate_fingerprint_out: + return "alias mismatch" + if re.match(r'keytool error: java\.io\.IOException: [Kk]eystore( was tampered with, or)? password was incorrect', + stored_certificate_fingerprint_out): + return "password mismatch" return module.fail_json(msg=stored_certificate_fingerprint_out, err=stored_certificate_fingerprint_err, cmd=stored_certificate_fingerprint_cmd, diff --git a/tests/integration/targets/java_keystore/tasks/tests.yml b/tests/integration/targets/java_keystore/tasks/tests.yml index 4511af033d..e0de1c6836 100644 --- a/tests/integration/targets/java_keystore/tasks/tests.yml +++ b/tests/integration/targets/java_keystore/tasks/tests.yml @@ -64,7 +64,6 @@ loop: "{{ java_keystore_new_certs }}" check_mode: yes register: result_alias_change_check - when: false # FIXME: module currently crashes - name: Create a Java keystore for the given certificates (alias changed) community.general.java_keystore: @@ -72,7 +71,6 @@ name: foobar loop: "{{ java_keystore_new_certs }}" register: result_alias_change - when: false # FIXME: module currently crashes - name: Create a Java keystore for the given certificates (password changed, check mode) @@ -83,7 +81,6 @@ loop: "{{ java_keystore_new_certs }}" check_mode: yes register: result_pw_change_check - when: false # FIXME: module currently crashes - name: Create a Java keystore for the given certificates (password changed) community.general.java_keystore: @@ -92,7 +89,6 @@ password: hunter2 loop: "{{ java_keystore_new_certs }}" register: result_pw_change - when: false # FIXME: module currently crashes - name: Check that the remote certificates have not been removed ansible.builtin.file: @@ -117,7 +113,7 @@ - result_idem_check is not changed - result_change is changed - result_change_check is changed - # - result_alias_change is changed # FIXME: module currently crashes - # - result_alias_change_check is changed # FIXME: module currently crashes - # - result_pw_change is changed # FIXME: module currently crashes - # - result_pw_change_check is changed # FIXME: module currently crashes + - result_alias_change is changed + - result_alias_change_check is changed + - result_pw_change is changed + - result_pw_change_check is changed diff --git a/tests/unit/plugins/modules/system/test_java_keystore.py b/tests/unit/plugins/modules/system/test_java_keystore.py index 94332d6192..ec14b3734d 100644 --- a/tests/unit/plugins/modules/system/test_java_keystore.py +++ b/tests/unit/plugins/modules/system/test_java_keystore.py @@ -250,19 +250,33 @@ class TestCertChanged(ModuleTestCase): supports_check_mode=self.spec.supports_check_mode ) - module.fail_json = Mock() - with patch('os.remove', return_value=True): self.create_file.side_effect = ['/tmp/placeholder'] self.run_commands.side_effect = [(0, 'foo=abcd:1234:efgh', ''), (1, 'keytool error: java.lang.Exception: Alias does not exist', '')] - cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo') - module.fail_json.assert_called_once_with( - cmd=["keytool", "-list", "-alias", "foo", "-keystore", "/path/to/keystore.jks", "-storepass:env", "STOREPASS", "-v"], - msg='keytool error: java.lang.Exception: Alias does not exist', - err='', - rc=1 - ) + result = cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo') + self.assertTrue(result, 'Alias mismatch detected') + + def test_cert_changed_password_mismatch(self): + set_module_args(dict( + certificate='cert-foo', + private_key='private-foo', + dest='/path/to/keystore.jks', + name='foo', + password='changeit' + )) + + module = AnsibleModule( + argument_spec=self.spec.argument_spec, + supports_check_mode=self.spec.supports_check_mode + ) + + with patch('os.remove', return_value=True): + self.create_file.side_effect = ['/tmp/placeholder'] + self.run_commands.side_effect = [(0, 'foo=abcd:1234:efgh', ''), + (1, 'keytool error: java.io.IOException: Keystore password was incorrect', '')] + result = cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo') + self.assertTrue(result, 'Password mismatch detected') def test_cert_changed_fail_read_cert(self): set_module_args(dict( From f87a39b21dd3113af976b179600903f64a54cfc6 Mon Sep 17 00:00:00 2001 From: quidame Date: Mon, 19 Apr 2021 07:04:29 +0200 Subject: [PATCH 0199/3093] new module: filesize - create or resize a file, given its size (#2232) * new module: filesize * description: create or resize a file, given its size * with integration tests * Update plugins/modules/files/filesize.py (version_added) Co-authored-by: Andrew Klychkov * Update filesize.py (extends_documentation_fragment: use fqcn) Co-authored-by: Amin Vakil * doc: use strict lowercase booleans (true/false) rather than other variants * use *raw* type to manage size values * drop 'miB' unit family * Apply suggestions from code review Co-authored-by: Felix Fontein * add more details Co-authored-by: Andrew Klychkov Co-authored-by: Amin Vakil Co-authored-by: Felix Fontein --- plugins/modules/files/filesize.py | 483 ++++++++++++++++++ plugins/modules/filesize.py | 1 + tests/integration/targets/filesize/aliases | 1 + .../targets/filesize/defaults/main.yml | 4 + .../targets/filesize/tasks/basics.yml | 407 +++++++++++++++ .../targets/filesize/tasks/errors.yml | 129 +++++ .../targets/filesize/tasks/floats.yml | 245 +++++++++ .../targets/filesize/tasks/main.yml | 40 ++ .../targets/filesize/tasks/sparse.yml | 282 ++++++++++ .../targets/filesize/tasks/symlinks.yml | 93 ++++ 10 files changed, 1685 insertions(+) create mode 100644 plugins/modules/files/filesize.py create mode 120000 plugins/modules/filesize.py create mode 100644 tests/integration/targets/filesize/aliases create mode 100644 tests/integration/targets/filesize/defaults/main.yml create mode 100644 tests/integration/targets/filesize/tasks/basics.yml create mode 100644 tests/integration/targets/filesize/tasks/errors.yml create mode 100644 tests/integration/targets/filesize/tasks/floats.yml create mode 100644 tests/integration/targets/filesize/tasks/main.yml create mode 100644 tests/integration/targets/filesize/tasks/sparse.yml create mode 100644 tests/integration/targets/filesize/tasks/symlinks.yml diff --git a/plugins/modules/files/filesize.py b/plugins/modules/files/filesize.py new file mode 100644 index 0000000000..5b22fb4512 --- /dev/null +++ b/plugins/modules/files/filesize.py @@ -0,0 +1,483 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, quidame +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = r''' +--- +module: filesize + +short_description: Create a file with a given size, or resize it if it exists + +description: + - This module is a simple wrapper around C(dd) to create, extend or truncate + a file, given its size. It can be used to manage swap files (that require + contiguous blocks) or alternatively, huge sparse files. + +author: + - quidame (@quidame) + +version_added: "3.0.0" + +options: + path: + description: + - Path of the regular file to create or resize. + type: path + required: true + size: + description: + - Requested size of the file. + - The value is a number (either C(int) or C(float)) optionally followed + by a multiplicative suffix, that can be one of C(B) (bytes), C(KB) or + C(kB) (= 1000B), C(MB) or C(mB) (= 1000kB), C(GB) or C(gB) (= 1000MB), + and so on for C(T), C(P), C(E), C(Z) and C(Y); or alternatively one of + C(K), C(k) or C(KiB) (= 1024B); C(M), C(m) or C(MiB) (= 1024KiB); + C(G), C(g) or C(GiB) (= 1024MiB); and so on. + - If the multiplicative suffix is not provided, the value is treated as + an integer number of blocks of I(blocksize) bytes each (float values + are rounded to the closest integer). + - When the I(size) value is equal to the current file size, does nothing. + - When the I(size) value is bigger than the current file size, bytes from + I(source) (if I(sparse) is not C(false)) are appended to the file + without truncating it, in other words, without modifying the existing + bytes of the file. + - When the I(size) value is smaller than the current file size, it is + truncated to the requested value without modifying bytes before this + value. + - That means that a file of any arbitrary size can be grown to any other + arbitrary size, and then resized down to its initial size without + modifying its initial content. + type: raw + required: true + blocksize: + description: + - Size of blocks, in bytes if not followed by a multiplicative suffix. + - The numeric value (before the unit) C(MUST) be an integer (or a C(float) + if it equals an integer). + - If not set, the size of blocks is guessed from the OS and commonly + results in C(512) or C(4096) bytes, that is used internally by the + module or when I(size) has no unit. + type: raw + source: + description: + - Device or file that provides input data to provision the file. + - This parameter is ignored when I(sparse=true). + type: path + default: /dev/zero + force: + description: + - Whether or not to overwrite the file if it exists, in other words, to + truncate it from 0. When C(true), the module is not idempotent, that + means it always reports I(changed=true). + - I(force=true) and I(sparse=true) are mutually exclusive. + type: bool + default: false + sparse: + description: + - Whether or not the file to create should be a sparse file. + - This option is effective only on newly created files, or when growing a + file, only for the bytes to append. + - This option is not supported on OpenBSD, Solaris and AIX. + - I(force=true) and I(sparse=true) are mutually exclusive. + type: bool + default: false + +notes: + - This module supports C(check_mode) and C(diff). + +requirements: + - dd (Data Duplicator) in PATH + +extends_documentation_fragment: + - ansible.builtin.files + +seealso: + - name: dd(1) manpage for Linux + description: Manual page of the GNU/Linux's dd implementation (from GNU coreutils). + link: https://man7.org/linux/man-pages/man1/dd.1.html + + - name: dd(1) manpage for IBM AIX + description: Manual page of the IBM AIX's dd implementation. + link: https://www.ibm.com/support/knowledgecenter/ssw_aix_72/d_commands/dd.html + + - name: dd(1) manpage for Mac OSX + description: Manual page of the Mac OSX's dd implementation. + link: https://www.unix.com/man-page/osx/1/dd/ + + - name: dd(1M) manpage for Solaris + description: Manual page of the Oracle Solaris's dd implementation. + link: https://docs.oracle.com/cd/E36784_01/html/E36871/dd-1m.html + + - name: dd(1) manpage for FreeBSD + description: Manual page of the FreeBSD's dd implementation. + link: https://www.freebsd.org/cgi/man.cgi?dd(1) + + - name: dd(1) manpage for OpenBSD + description: Manual page of the OpenBSD's dd implementation. + link: https://man.openbsd.org/dd + + - name: dd(1) manpage for NetBSD + description: Manual page of the NetBSD's dd implementation. + link: https://man.netbsd.org/dd.1 +''' + +EXAMPLES = r''' +- name: Create a file of 1G filled with null bytes + community.general.filesize: + path: /var/bigfile + size: 1G + +- name: Extend the file to 2G (2*1024^3) + community.general.filesize: + path: /var/bigfile + size: 2G + +- name: Reduce the file to 2GB (2*1000^3) + community.general.filesize: + path: /var/bigfile + size: 2GB + +- name: Fill a file with random bytes for backing a LUKS device + community.general.filesize: + path: ~/diskimage.luks + size: 512.0 MiB + source: /dev/urandom + +- name: Take a backup of MBR boot code into a file, overwriting it if it exists + community.general.filesize: + path: /media/sdb1/mbr.bin + size: 440B + source: /dev/sda + force: true + +- name: Create/resize a sparse file of/to 8TB + community.general.filesize: + path: /var/local/sparsefile + size: 8TB + sparse: true + +- name: Create a file with specific size and attributes, to be used as swap space + community.general.filesize: + path: /var/swapfile + size: 2G + blocksize: 512B + mode: u=rw,go= + owner: root + group: root +''' + +RETURN = r''' +cmd: + description: Command executed to create or resize the file. + type: str + returned: when changed or failed + sample: /usr/bin/dd if=/dev/zero of=/var/swapfile bs=1048576 seek=3072 count=1024 + +filesize: + description: Dictionary of sizes related to the file. + type: dict + returned: always + contains: + blocks: + description: Number of blocks in the file. + type: int + sample: 500 + blocksize: + description: Size of the blocks in bytes. + type: int + sample: 1024 + bytes: + description: Size of the file, in bytes, as the product of C(blocks) and C(blocksize). + type: int + sample: 512000 + iec: + description: Size of the file, in human-readable format, following IEC standard. + type: str + sample: 500.0 KiB + si: + description: Size of the file, in human-readable format, following SI standard. + type: str + sample: 512.0 kB + +size_diff: + description: Difference (positive or negative) between old size and new size, in bytes. + type: int + sample: -1234567890 + returned: always + +path: + description: Realpath of the file if it is a symlink, otherwise the same than module's param. + type: str + sample: /var/swap0 + returned: always +''' + + +import re +import os +import math + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + + +# These are the multiplicative suffixes understood (or returned) by dd and +# others (ls, df, lvresize, lsblk...). +SIZE_UNITS = dict( + B=1, + kB=1000**1, KB=1000**1, KiB=1024**1, K=1024**1, k=1024**1, + MB=1000**2, mB=1000**2, MiB=1024**2, M=1024**2, m=1024**2, + GB=1000**3, gB=1000**3, GiB=1024**3, G=1024**3, g=1024**3, + TB=1000**4, tB=1000**4, TiB=1024**4, T=1024**4, t=1024**4, + PB=1000**5, pB=1000**5, PiB=1024**5, P=1024**5, p=1024**5, + EB=1000**6, eB=1000**6, EiB=1024**6, E=1024**6, e=1024**6, + ZB=1000**7, zB=1000**7, ZiB=1024**7, Z=1024**7, z=1024**7, + YB=1000**8, yB=1000**8, YiB=1024**8, Y=1024**8, y=1024**8, +) + + +def bytes_to_human(size, iec=False): + """Return human-readable size (with SI or IEC suffix) from bytes. This is + only to populate the returned result of the module, not to handle the + file itself (we only rely on bytes for that). + """ + unit = 'B' + for (u, v) in SIZE_UNITS.items(): + if size < v: + continue + if iec: + if 'i' not in u or size / v >= 1024: + continue + else: + if v % 5 or size / v >= 1000: + continue + unit = u + + hsize = round(size / SIZE_UNITS[unit], 2) + if unit == 'B': + hsize = int(hsize) + + unit = re.sub(r'^(.)', lambda m: m.expand(r'\1').upper(), unit) + if unit == 'KB': + unit = 'kB' + + return '%s %s' % (str(hsize), unit) + + +def smart_blocksize(size, unit, product, bsize): + """Ensure the total size can be written as blocks*blocksize, with blocks + and blocksize being integers. + """ + if not product % bsize: + return bsize + + # Basically, for a file of 8kB (=8000B), system's block size of 4096 bytes + # is not usable. The smallest integer number of kB to work with 512B blocks + # is 64, the nexts are 128, 192, 256, and so on. + + unit_size = SIZE_UNITS[unit] + + if size == int(size): + if unit_size > SIZE_UNITS['MiB']: + if unit_size % 5: + return SIZE_UNITS['MiB'] + return SIZE_UNITS['MB'] + return unit_size + + if unit == 'B': + raise AssertionError("byte is the smallest unit and requires an integer value") + + if 0 < product < bsize: + return product + + for bsz in (1024, 1000, 512, 256, 128, 100, 64, 32, 16, 10, 8, 4, 2): + if not product % bsz: + return bsz + return 1 + + +def split_size_unit(string, isint=False): + """Split a string between the size value (int or float) and the unit. + Support optional space(s) between the numeric value and the unit. + """ + unit = re.sub(r'(\d|\.)', r'', string).strip() + value = float(re.sub(r'%s' % unit, r'', string).strip()) + if isint and unit in ('B', ''): + if int(value) != value: + raise AssertionError("invalid blocksize value: bytes require an integer value") + + if not unit: + unit = None + product = int(round(value)) + else: + if unit not in SIZE_UNITS.keys(): + raise AssertionError("invalid size unit (%s): unit must be one of %s, or none." % + (unit, ', '.join(sorted(SIZE_UNITS, key=SIZE_UNITS.get)))) + product = int(round(value * SIZE_UNITS[unit])) + return value, unit, product + + +def size_string(value): + """Convert a raw value to a string, but only if it is an integer, a float + or a string itself. + """ + if not isinstance(value, (int, float, str)): + raise AssertionError("invalid value type (%s): size must be integer, float or string" % type(value)) + return str(value) + + +def size_spec(args): + """Return a dictionary with size specifications, especially the size in + bytes (after rounding it to an integer number of blocks). + """ + blocksize_in_bytes = split_size_unit(args['blocksize'], True)[2] + if blocksize_in_bytes == 0: + raise AssertionError("block size cannot be equal to zero") + + size_value, size_unit, size_result = split_size_unit(args['size']) + if not size_unit: + blocks = int(math.ceil(size_value)) + else: + blocksize_in_bytes = smart_blocksize(size_value, size_unit, size_result, blocksize_in_bytes) + blocks = int(math.ceil(size_result / blocksize_in_bytes)) + + args['size_diff'] = round_bytes = int(blocks * blocksize_in_bytes) + args['size_spec'] = dict(blocks=blocks, blocksize=blocksize_in_bytes, bytes=round_bytes, + iec=bytes_to_human(round_bytes, True), + si=bytes_to_human(round_bytes)) + return args['size_spec'] + + +def current_size(args): + """Return the size of the file at the given location if it exists, or None.""" + path = args['path'] + if os.path.exists(path): + if not os.path.isfile(path): + raise AssertionError("%s exists but is not a regular file" % path) + args['file_size'] = os.stat(path).st_size + else: + args['file_size'] = None + return args['file_size'] + + +def complete_dd_cmdline(args, dd_cmd): + """Compute dd options to grow or truncate a file.""" + if args['file_size'] == args['size_spec']['bytes'] and not args['force']: + # Nothing to do. + return list() + + bs = args['size_spec']['blocksize'] + conv = list() + + # For sparse files (create, truncate, grow): write count=0 block. + if args['sparse']: + seek = args['size_spec']['blocks'] + conv += ['sparse'] + elif args['force'] or not os.path.exists(args['path']): # Create file + seek = 0 + elif args['size_diff'] < 0: # Truncate file + seek = args['size_spec']['blocks'] + elif args['size_diff'] % bs: # Grow file + seek = int(args['file_size'] / bs) + 1 + else: + seek = int(args['file_size'] / bs) + + count = args['size_spec']['blocks'] - seek + dd_cmd += ['bs=%s' % str(bs), 'seek=%s' % str(seek), 'count=%s' % str(count)] + if conv: + dd_cmd += ['conv=%s' % ','.join(conv)] + + return dd_cmd + + +def main(): + module = AnsibleModule( + argument_spec=dict( + path=dict(type='path', required=True), + size=dict(type='raw', required=True), + blocksize=dict(type='raw'), + source=dict(type='path', default='/dev/zero'), + sparse=dict(type='bool', default=False), + force=dict(type='bool', default=False), + ), + supports_check_mode=True, + add_file_common_args=True, + ) + args = dict(**module.params) + diff = dict(before=dict(), after=dict()) + + if args['sparse'] and args['force']: + module.fail_json(msg='parameters values are mutually exclusive: force=true|sparse=true') + if not os.path.exists(os.path.dirname(args['path'])): + module.fail_json(msg='parent directory of the file must exist prior to run this module') + if not args['blocksize']: + args['blocksize'] = str(os.statvfs(os.path.dirname(args['path'])).f_frsize) + + try: + args['size'] = size_string(args['size']) + args['blocksize'] = size_string(args['blocksize']) + initial_filesize = current_size(args) + size_descriptors = size_spec(args) + except AssertionError as err: + module.fail_json(msg=to_native(err)) + + expected_filesize = size_descriptors['bytes'] + if initial_filesize: + args['size_diff'] = expected_filesize - initial_filesize + diff['after']['size'] = expected_filesize + diff['before']['size'] = initial_filesize + + result = dict( + changed=args['force'], + size_diff=args['size_diff'], + path=args['path'], + filesize=size_descriptors) + + dd_bin = module.get_bin_path('dd', True) + dd_cmd = [dd_bin, 'if=%s' % args['source'], 'of=%s' % args['path']] + + if expected_filesize != initial_filesize or args['force']: + result['cmd'] = ' '.join(complete_dd_cmdline(args, dd_cmd)) + if module.check_mode: + result['changed'] = True + else: + result['rc'], dummy, result['stderr'] = module.run_command(dd_cmd) + + diff['after']['size'] = result_filesize = result['size_diff'] = current_size(args) + if initial_filesize: + result['size_diff'] = result_filesize - initial_filesize + if not args['force']: + result['changed'] = result_filesize != initial_filesize + + if result['rc']: + msg = "dd error while creating file %s with size %s from source %s: see stderr for details" % ( + args['path'], args['size'], args['source']) + module.fail_json(msg=msg, **result) + if result_filesize != expected_filesize: + msg = "module error while creating file %s with size %s from source %s: file is %s bytes long" % ( + args['path'], args['size'], args['source'], result_filesize) + module.fail_json(msg=msg, **result) + + # dd follows symlinks, and so does this module, while file module doesn't. + # If we call it, this is to manage file's mode, owner and so on, not the + # symlink's ones. + file_params = dict(**module.params) + if os.path.islink(args['path']): + file_params['path'] = result['path'] = os.path.realpath(args['path']) + + if args['file_size'] is not None: + file_args = module.load_file_common_arguments(file_params) + result['changed'] = module.set_fs_attributes_if_different(file_args, result['changed'], diff=diff) + result['diff'] = diff + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/filesize.py b/plugins/modules/filesize.py new file mode 120000 index 0000000000..fc4a211c87 --- /dev/null +++ b/plugins/modules/filesize.py @@ -0,0 +1 @@ +files/filesize.py \ No newline at end of file diff --git a/tests/integration/targets/filesize/aliases b/tests/integration/targets/filesize/aliases new file mode 100644 index 0000000000..a6dafcf8cd --- /dev/null +++ b/tests/integration/targets/filesize/aliases @@ -0,0 +1 @@ +shippable/posix/group1 diff --git a/tests/integration/targets/filesize/defaults/main.yml b/tests/integration/targets/filesize/defaults/main.yml new file mode 100644 index 0000000000..b575e029f0 --- /dev/null +++ b/tests/integration/targets/filesize/defaults/main.yml @@ -0,0 +1,4 @@ +--- +filesize_testdir: "/tmp/testdir" +filesize_testfile: "{{ filesize_testdir }}/testfile" +filesize_testlink: "{{ filesize_testdir }}/testlink" diff --git a/tests/integration/targets/filesize/tasks/basics.yml b/tests/integration/targets/filesize/tasks/basics.yml new file mode 100644 index 0000000000..1d5281b7e1 --- /dev/null +++ b/tests/integration/targets/filesize/tasks/basics.yml @@ -0,0 +1,407 @@ +--- +# Test module with basic parameters. +# Create a file, grow it, reduce it to its initial size and check the match +# between initial and final checksums. Also check size formats consistency +# (as 57001B == 57001 B == 57.001 kB, for example, or 0 block or 0 unit is +# zero, etc). + +- name: Create an empty file (check mode) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 0 + register: filesize_test_basic_01 + check_mode: yes + +- name: Stat the file (should not exist) + ansible.builtin.stat: + path: "{{ filesize_testfile }}" + register: filesize_stat_basic_01 + + +- name: Create an empty file + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 0 + register: filesize_test_basic_02 + +- name: Stat the file (should exist now) + ansible.builtin.stat: + path: "{{ filesize_testfile }}" + register: filesize_stat_basic_02 + + +- name: Create an empty file (check mode, idempotency) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 0G + register: filesize_test_basic_03 + check_mode: yes + +- name: Create an empty file (idempotency) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 0G + register: filesize_test_basic_04 + +- name: Stat the file (should still exist, unchanged) + ansible.builtin.stat: + path: "{{ filesize_testfile }}" + register: filesize_stat_basic_04 + + +- name: Assert that results are as expected + ansible.builtin.assert: + that: + # check_mode & idempotency are in good shape. + - filesize_test_basic_01 is changed + - filesize_test_basic_02 is changed + - filesize_test_basic_03 is not changed + - filesize_test_basic_04 is not changed + + # check_mode returns the same command than actual mode. + - filesize_test_basic_02.cmd == filesize_test_basic_01.cmd + - filesize_test_basic_03.cmd is undefined + - filesize_test_basic_04.cmd is undefined + + # Module's specific return results are consistent with user input, that + # means: with *expected* results. + - filesize_test_basic_01.filesize.bytes == 0 + - filesize_test_basic_02.filesize.bytes == 0 + - filesize_test_basic_03.filesize.bytes == 0 + - filesize_test_basic_04.filesize.bytes == 0 + + - filesize_test_basic_01.size_diff == 0 + - filesize_test_basic_02.size_diff == 0 + - filesize_test_basic_03.size_diff == 0 + - filesize_test_basic_04.size_diff == 0 + + # Results populated by module.set_fs_attributes_if_different() are still + # consistent with current state of the file. + - filesize_test_basic_01.state is undefined + - filesize_test_basic_02.state in ["file"] + - filesize_test_basic_01.size is undefined + - filesize_test_basic_02.size == 0 + - filesize_test_basic_03.size == 0 + - filesize_test_basic_04.size == 0 + + # Cross results with those retrieved by another module. + - not filesize_stat_basic_01.stat.exists + - filesize_stat_basic_02.stat.exists + - filesize_stat_basic_02.stat.isreg + - filesize_stat_basic_02.stat.size == 0 + - filesize_stat_basic_04.stat.size == 0 + + +- name: Fill the file up to 57kB (57000B) with random data (check mode) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 57kB + source: /dev/urandom + register: filesize_test_basic_11 + check_mode: yes + +- name: Stat the file (should still be unchanged) + ansible.builtin.stat: + path: "{{ filesize_testfile }}" + register: filesize_stat_basic_11 + + +- name: Fill the file up to 57kB (57000B) with random data + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 57kB + source: /dev/urandom + register: filesize_test_basic_12 + +- name: Stat the resulting file (and get its checksum) + ansible.builtin.stat: + path: "{{ filesize_testfile }}" + register: filesize_stat_basic_12 + +- name: Store checksum as fact + ansible.builtin.set_fact: + filesize_test_checksum: "{{ filesize_stat_basic_12.stat.checksum }}" + + +- name: Fill the file up to 57000B (57kB) with random data (check mode, idempotency) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 57000B + source: /dev/urandom + register: filesize_test_basic_13 + check_mode: yes + +- name: Fill the file up to 57000B (57kB) with random data (idempotency) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 57000B + source: /dev/urandom + register: filesize_test_basic_14 + +- name: Stat the file again (should remain the same) + ansible.builtin.stat: + path: "{{ filesize_testfile }}" + register: filesize_stat_basic_14 + + +- name: Assert that results are as expected + ansible.builtin.assert: + that: + - filesize_test_basic_11 is changed + - filesize_test_basic_12 is changed + - filesize_test_basic_13 is not changed + - filesize_test_basic_14 is not changed + + - filesize_test_basic_12.cmd == filesize_test_basic_11.cmd + - filesize_test_basic_13.cmd is undefined + - filesize_test_basic_14.cmd is undefined + + - filesize_test_basic_11.filesize.bytes == 57000 + - filesize_test_basic_12.filesize.bytes == 57000 + - filesize_test_basic_13.filesize.bytes == 57000 + - filesize_test_basic_14.filesize.bytes == 57000 + + - filesize_test_basic_11.size_diff == 57000 + - filesize_test_basic_12.size_diff == 57000 + - filesize_test_basic_13.size_diff == 0 + - filesize_test_basic_14.size_diff == 0 + + - filesize_stat_basic_11.stat.size == 0 + - filesize_stat_basic_12.stat.size == 57000 + - filesize_stat_basic_14.stat.size == 57000 + + - filesize_stat_basic_14.stat.checksum == filesize_test_checksum + + + +- name: Expand the file with 1 byte (57001B) (check mode) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 57001B + register: filesize_test_basic_21 + check_mode: yes + +- name: Stat the file again (should remain the same) + ansible.builtin.stat: + path: "{{ filesize_testfile }}" + register: filesize_stat_basic_21 + + +- name: Expand the file with 1 byte (57001B) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 57001B + register: filesize_test_basic_22 + +- name: Stat the file (should have grown of 1 byte) + ansible.builtin.stat: + path: "{{ filesize_testfile }}" + register: filesize_stat_basic_22 + + +- name: Expand the file with 1 byte (57.001 kB) (check mode, idempotency) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 57.001 kB + register: filesize_test_basic_23 + check_mode: yes + +- name: Expand the file with 1 byte (57.001 kB) (idempotency) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 57.001 kB + register: filesize_test_basic_24 + +- name: Stat the file again (should remain the same) + ansible.builtin.stat: + path: "{{ filesize_testfile }}" + register: filesize_stat_basic_24 + + +- name: Assert that results are as expected + ansible.builtin.assert: + that: + - filesize_test_basic_21 is changed + - filesize_test_basic_22 is changed + - filesize_test_basic_23 is not changed + - filesize_test_basic_24 is not changed + + - filesize_test_basic_22.cmd == filesize_test_basic_21.cmd + - filesize_test_basic_23.cmd is undefined + - filesize_test_basic_24.cmd is undefined + + - filesize_test_basic_21.filesize.bytes == 57001 + - filesize_test_basic_22.filesize.bytes == 57001 + - filesize_test_basic_23.filesize.bytes == 57001 + - filesize_test_basic_24.filesize.bytes == 57001 + + - filesize_test_basic_21.size_diff == 1 + - filesize_test_basic_22.size_diff == 1 + - filesize_test_basic_23.size_diff == 0 + - filesize_test_basic_24.size_diff == 0 + + - filesize_stat_basic_21.stat.size == 57000 + - filesize_stat_basic_22.stat.size == 57001 + - filesize_stat_basic_24.stat.size == 57001 + + - filesize_stat_basic_21.stat.checksum == filesize_test_checksum + - filesize_stat_basic_22.stat.checksum != filesize_test_checksum + - filesize_stat_basic_24.stat.checksum != filesize_test_checksum + + + +- name: Expand the file up to 2 MiB (2*1024*1024 bytes) (check mode) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 2 MiB + register: filesize_test_basic_31 + check_mode: yes + +- name: Stat the file again (should remain the same) + ansible.builtin.stat: + path: "{{ filesize_testfile }}" + register: filesize_stat_basic_31 + + +- name: Expand the file up to 2 MiB (2*1024*1024 bytes) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 2 MiB + register: filesize_test_basic_32 + +- name: Stat the file again (should have grown to 2MiB) + ansible.builtin.stat: + path: "{{ filesize_testfile }}" + register: filesize_stat_basic_32 + + +- name: Expand the file up to 2×1M (2*1024*1024 bytes) (check mode, idempotency) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 2 + blocksize: 1M + register: filesize_test_basic_33 + check_mode: yes + +- name: Expand the file up to 2×1M (2*1024*1024 bytes) (idempotency) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 2 + blocksize: 1M + register: filesize_test_basic_34 + +- name: Stat the file again (should remain the same) + ansible.builtin.stat: + path: "{{ filesize_testfile }}" + register: filesize_stat_basic_34 + + +- name: Assert that results are as expected + ansible.builtin.assert: + that: + - filesize_test_basic_31 is changed + - filesize_test_basic_32 is changed + - filesize_test_basic_33 is not changed + - filesize_test_basic_34 is not changed + + - filesize_test_basic_32.cmd == filesize_test_basic_31.cmd + - filesize_test_basic_33.cmd is undefined + - filesize_test_basic_34.cmd is undefined + + - filesize_test_basic_31.filesize.bytes == 2*1024**2 + - filesize_test_basic_32.filesize.bytes == 2*1024**2 + - filesize_test_basic_33.filesize.bytes == 2*1024**2 + - filesize_test_basic_34.filesize.bytes == 2*1024**2 + + - filesize_test_basic_31.size_diff == 2*1024**2 - 57001 + - filesize_test_basic_32.size_diff == 2*1024**2 - 57001 + - filesize_test_basic_33.size_diff == 0 + - filesize_test_basic_34.size_diff == 0 + + - filesize_stat_basic_31.stat.size == 57001 + - filesize_stat_basic_32.stat.size == 2*1024**2 + - filesize_stat_basic_34.stat.size == 2*1024**2 + + + +- name: Truncate the file to 57kB (57000B) (check mode) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 57kB + register: filesize_test_basic_41 + check_mode: yes + +- name: Stat the resulting file (should be unchanged) + ansible.builtin.stat: + path: "{{ filesize_testfile }}" + register: filesize_stat_basic_41 + + +- name: Truncate the file to 57kB (57000B) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 57kB + register: filesize_test_basic_42 + +- name: Stat the resulting file (and get its checksum) + ansible.builtin.stat: + path: "{{ filesize_testfile }}" + register: filesize_stat_basic_42 + + +- name: Truncate the file to 57000 B (57kB) (check mode, idempotency) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 57000 B + register: filesize_test_basic_43 + check_mode: yes + +- name: Truncate the file to 57000 B (57kB) (idempotency) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 57000 B + register: filesize_test_basic_44 + +- name: Stat the file again (should remain the same) + ansible.builtin.stat: + path: "{{ filesize_testfile }}" + register: filesize_stat_basic_44 + + +- name: Assert that results are as expected + ansible.builtin.assert: + that: + - filesize_test_basic_41 is changed + - filesize_test_basic_42 is changed + - filesize_test_basic_43 is not changed + - filesize_test_basic_44 is not changed + + - filesize_test_basic_42.cmd == filesize_test_basic_41.cmd + - filesize_test_basic_43.cmd is undefined + - filesize_test_basic_44.cmd is undefined + + - filesize_test_basic_41.filesize.bytes == 57000 + - filesize_test_basic_42.filesize.bytes == 57000 + - filesize_test_basic_43.filesize.bytes == 57000 + - filesize_test_basic_44.filesize.bytes == 57000 + + - filesize_test_basic_41.size_diff == 57000 - 2*1024**2 + - filesize_test_basic_42.size_diff == 57000 - 2*1024**2 + - filesize_test_basic_43.size_diff == 0 + - filesize_test_basic_44.size_diff == 0 + + - filesize_stat_basic_41.stat.size == 2*1024**2 + - filesize_stat_basic_42.stat.size == 57000 + - filesize_stat_basic_44.stat.size == 57000 + + # The original random file is back. + - filesize_stat_basic_41.stat.checksum != filesize_test_checksum + - filesize_stat_basic_42.stat.checksum == filesize_test_checksum + - filesize_stat_basic_44.stat.checksum == filesize_test_checksum + + + +- name: Remove test file + ansible.builtin.file: + path: "{{ filesize_testfile }}" + state: absent diff --git a/tests/integration/targets/filesize/tasks/errors.yml b/tests/integration/targets/filesize/tasks/errors.yml new file mode 100644 index 0000000000..ffb17d6187 --- /dev/null +++ b/tests/integration/targets/filesize/tasks/errors.yml @@ -0,0 +1,129 @@ +--- +# Check error handling of the module. +# 1. Missing or unknown parameters +# 2. Wrong values (missing source device, invalid size...) + +- name: Trigger an error due to missing parameter (path) + community.general.filesize: + size: 1kB + register: filesize_test_error_01 + ignore_errors: yes + + +- name: Trigger an error due to missing parameter (size) + community.general.filesize: + path: "{{ filesize_testfile }}" + register: filesize_test_error_02 + ignore_errors: yes + + +- name: Trigger an error due to conflicting parameters (force|sparse) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 1MB + force: yes + sparse: yes + register: filesize_test_error_03 + ignore_errors: yes + + +- name: Trigger an error due to invalid file path (not a file) + community.general.filesize: + path: "{{ filesize_testdir }}" + size: 4096B + register: filesize_test_error_04 + ignore_errors: yes + + +- name: Trigger an error due to invalid file path (unexisting parent dir) + community.general.filesize: + path: "/unexistent/{{ filesize_testfile }}" + size: 4096B + register: filesize_test_error_05 + ignore_errors: yes + + +- name: Trigger an error due to invalid size unit (b)" + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 4096b + register: filesize_test_error_06 + ignore_errors: yes + + +- name: Trigger an error due to invalid size value (bytes require integer) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 1000.5B + register: filesize_test_error_07 + ignore_errors: yes + + +- name: Trigger an error due to invalid blocksize value (not an integer) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 1M + blocksize: "12.5" + register: filesize_test_error_08 + ignore_errors: yes + + +- name: Trigger an error due to invalid blocksize value type (dict) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 1M + blocksize: + bytes: 512 + register: filesize_test_error_09 + ignore_errors: yes + + +- name: Trigger an error due to invalid source device (/dev/unexistent) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 1M + source: /dev/unexistent + register: filesize_test_error_10 + ignore_errors: yes + + +- name: Trigger an error due to invalid source device (/dev/null) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 1M + source: /dev/null + register: filesize_test_error_11 + ignore_errors: yes + + +- name: Assert that expected errors have been triggered + ansible.builtin.assert: + that: + - "filesize_test_error_01 is failed" + - "filesize_test_error_01.msg == 'missing required arguments: path'" + - "filesize_test_error_02 is failed" + - "filesize_test_error_02.msg == 'missing required arguments: size'" + - "filesize_test_error_03 is failed" + - "filesize_test_error_03.msg == 'parameters values are mutually exclusive: force=true|sparse=true'" + - "filesize_test_error_04 is failed" + - "filesize_test_error_04.msg == '%s exists but is not a regular file' % filesize_testdir" + - "filesize_test_error_05 is failed" + - "filesize_test_error_05.msg == 'parent directory of the file must exist prior to run this module'" + - "filesize_test_error_06 is failed" + - "filesize_test_error_06.msg is match('invalid size unit')" + - "filesize_test_error_07 is failed" + - "filesize_test_error_07.msg == 'byte is the smallest unit and requires an integer value'" + - "filesize_test_error_08 is failed" + - "filesize_test_error_08.msg == 'invalid blocksize value: bytes require an integer value'" + - "filesize_test_error_09 is failed" + - "filesize_test_error_09.msg is match('invalid value type')" + - "filesize_test_error_10 is failed" + - "filesize_test_error_10.msg == 'dd error while creating file %s with size 1M from source /dev/unexistent: see stderr for details' % filesize_testfile" + - "filesize_test_error_11 is failed" + - "filesize_test_error_11.msg == 'module error while creating file %s with size 1M from source /dev/null: file is 0 bytes long' % filesize_testfile" + + +- name: Remove test file + ansible.builtin.file: + path: "{{ filesize_testfile }}" + state: absent diff --git a/tests/integration/targets/filesize/tasks/floats.yml b/tests/integration/targets/filesize/tasks/floats.yml new file mode 100644 index 0000000000..cf24b1b845 --- /dev/null +++ b/tests/integration/targets/filesize/tasks/floats.yml @@ -0,0 +1,245 @@ +--- +# Test module with floating point numbers (ensure they're not rounded too +# wrongly), since in python floats are tricky: +# 256.256 * 1000 == 256255.9999999997 +# 512.512 * 1000 == 512511.9999999994 +# 512.513 * 1000 == 512513.0000000006 != .512513 * 1000000 + +- name: Create a file with a size of 512.512kB (check mode) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 512.512kB + register: filesize_test_float_01 + check_mode: yes + +- name: Stat the file (should not exist) + ansible.builtin.stat: + path: "{{ filesize_testfile }}" + register: filesize_stat_float_01 + + +- name: Create a file with a size of 512.512kB + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 512.512kB + register: filesize_test_float_02 + +- name: Stat the file (should exist now) + ansible.builtin.stat: + path: "{{ filesize_testfile }}" + register: filesize_stat_float_02 + + +- name: Create a file with a size of 0.512512MB (check mode, idempotency) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 0.512512MB + register: filesize_test_float_03 + check_mode: yes + +- name: Create a file with a size of 0.512512MB (idempotency) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 0.512512MB + register: filesize_test_float_04 + +- name: Stat the file (should still exist, unchanged) + ansible.builtin.stat: + path: "{{ filesize_testfile }}" + register: filesize_stat_float_04 + + +- name: Assert that results are as expected + ansible.builtin.assert: + that: + - filesize_test_float_01 is changed + - filesize_test_float_02 is changed + - filesize_test_float_03 is not changed + - filesize_test_float_04 is not changed + + - filesize_test_float_02.cmd == filesize_test_float_01.cmd + - filesize_test_float_03.cmd is undefined + - filesize_test_float_04.cmd is undefined + + - filesize_test_float_01.filesize.bytes == 512512 + - filesize_test_float_02.filesize.bytes == 512512 + - filesize_test_float_03.filesize.bytes == 512512 + - filesize_test_float_04.filesize.bytes == 512512 + + - filesize_test_float_01.size_diff == 512512 + - filesize_test_float_02.size_diff == 512512 + - filesize_test_float_03.size_diff == 0 + - filesize_test_float_04.size_diff == 0 + + - filesize_test_float_01.state is undefined + - filesize_test_float_02.state in ["file"] + - filesize_test_float_01.size is undefined + - filesize_test_float_02.size == 512512 + - filesize_test_float_03.size == 512512 + - filesize_test_float_04.size == 512512 + + - not filesize_stat_float_01.stat.exists + - filesize_stat_float_02.stat.exists + - filesize_stat_float_02.stat.isreg + - filesize_stat_float_02.stat.size == 512512 + - filesize_stat_float_04.stat.size == 512512 + + + +- name: Create a file with a size of 512.513kB (check mode) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 512.513kB + register: filesize_test_float_11 + check_mode: yes + +- name: Stat the file again (should remain the same) + ansible.builtin.stat: + path: "{{ filesize_testfile }}" + register: filesize_stat_float_11 + + +- name: Create a file with a size of 512.513kB + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 512.513kB + register: filesize_test_float_12 + +- name: Stat the file (should have grown of 1 byte) + ansible.builtin.stat: + path: "{{ filesize_testfile }}" + register: filesize_stat_float_12 + + +- name: Create a file with a size of 0.512513MB (check mode, idempotency) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 0.512513MB + register: filesize_test_float_13 + check_mode: yes + +- name: Create a file with a size of 0.512513MB (idempotency) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 0.512513MB + register: filesize_test_float_14 + +- name: Stat the file again (should remain the same) + ansible.builtin.stat: + path: "{{ filesize_testfile }}" + register: filesize_stat_float_14 + + +- name: Assert that results are as expected + ansible.builtin.assert: + that: + - filesize_test_float_11 is changed + - filesize_test_float_12 is changed + - filesize_test_float_13 is not changed + - filesize_test_float_14 is not changed + + - filesize_test_float_12.cmd == filesize_test_float_11.cmd + - filesize_test_float_13.cmd is undefined + - filesize_test_float_14.cmd is undefined + + - filesize_test_float_11.filesize.bytes == 512513 + - filesize_test_float_12.filesize.bytes == 512513 + - filesize_test_float_13.filesize.bytes == 512513 + - filesize_test_float_14.filesize.bytes == 512513 + + - filesize_test_float_11.size_diff == 1 + - filesize_test_float_12.size_diff == 1 + - filesize_test_float_13.size_diff == 0 + - filesize_test_float_14.size_diff == 0 + + - filesize_test_float_11.size == 512512 + - filesize_test_float_12.size == 512513 + - filesize_test_float_13.size == 512513 + - filesize_test_float_14.size == 512513 + + - filesize_stat_float_11.stat.size == 512512 + - filesize_stat_float_12.stat.size == 512513 + - filesize_stat_float_14.stat.size == 512513 + + + +- name: Create a file with a size of 4.004MB (check mode) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 4.004MB + register: filesize_test_float_21 + check_mode: yes + +- name: Stat the file again (should remain the same) + ansible.builtin.stat: + path: "{{ filesize_testfile }}" + register: filesize_stat_float_21 + + +- name: Create a file with a size of 4.004MB + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 4.004MB + register: filesize_test_float_22 + +- name: Stat the file (should have grown to 4.004MB) + ansible.builtin.stat: + path: "{{ filesize_testfile }}" + register: filesize_stat_float_22 + + +- name: Create a file with a size of 4.004MB (check mode, idempotency) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 4.004MB + register: filesize_test_float_23 + check_mode: yes + +- name: Create a file with a size of 4.004MB (idempotency) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 4.004MB + register: filesize_test_float_24 + +- name: Stat the file again (should remain the same) + ansible.builtin.stat: + path: "{{ filesize_testfile }}" + register: filesize_stat_float_24 + + +- name: Assert that results are as expected + ansible.builtin.assert: + that: + - filesize_test_float_21 is changed + - filesize_test_float_22 is changed + - filesize_test_float_23 is not changed + - filesize_test_float_24 is not changed + + - filesize_test_float_22.cmd == filesize_test_float_21.cmd + - filesize_test_float_23.cmd is undefined + - filesize_test_float_24.cmd is undefined + + - filesize_test_float_21.filesize.bytes == 4004000 + - filesize_test_float_22.filesize.bytes == 4004000 + - filesize_test_float_23.filesize.bytes == 4004000 + - filesize_test_float_24.filesize.bytes == 4004000 + + - filesize_test_float_21.size_diff == 4004000 - 512513 + - filesize_test_float_22.size_diff == 4004000 - 512513 + - filesize_test_float_23.size_diff == 0 + - filesize_test_float_24.size_diff == 0 + + - filesize_test_float_21.size == 512513 + - filesize_test_float_22.size == 4004000 + - filesize_test_float_23.size == 4004000 + - filesize_test_float_24.size == 4004000 + + - filesize_stat_float_21.stat.size == 512513 + - filesize_stat_float_22.stat.size == 4004000 + - filesize_stat_float_24.stat.size == 4004000 + + +- name: Remove test file + ansible.builtin.file: + path: "{{ filesize_testfile }}" + state: absent diff --git a/tests/integration/targets/filesize/tasks/main.yml b/tests/integration/targets/filesize/tasks/main.yml new file mode 100644 index 0000000000..14415dac9a --- /dev/null +++ b/tests/integration/targets/filesize/tasks/main.yml @@ -0,0 +1,40 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- name: Ensure the test dir is present + ansible.builtin.file: + path: "{{ filesize_testdir }}" + state: directory + +- name: Ensure the test file is absent + ansible.builtin.file: + path: "{{ filesize_testfile }}" + state: absent + +- name: Run all tests and remove the workspace anyway + block: + - name: Include tasks to test error handling + include_tasks: errors.yml + + - name: Include tasks to test basic behaviours + include_tasks: basics.yml + + - name: Include tasks to test playing with floating point numbers + include_tasks: floats.yml + + - name: Include tasks to test playing with sparse files + include_tasks: sparse.yml + when: + - not (ansible_os_family == 'Darwin' and ansible_distribution_version is version('11', '<')) + + - name: Include tasks to test playing with symlinks + include_tasks: symlinks.yml + + always: + - name: Remove test dir + ansible.builtin.file: + path: "{{ filesize_testdir }}" + state: absent diff --git a/tests/integration/targets/filesize/tasks/sparse.yml b/tests/integration/targets/filesize/tasks/sparse.yml new file mode 100644 index 0000000000..6f864c2d15 --- /dev/null +++ b/tests/integration/targets/filesize/tasks/sparse.yml @@ -0,0 +1,282 @@ +--- +# Test module with sparse files + +- name: Create a huge sparse file of 4TB (check mode) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 4TB + sparse: yes + register: filesize_test_sparse_01 + check_mode: yes + +- name: Stat the file (should not exist) + ansible.builtin.stat: + path: "{{ filesize_testfile }}" + get_checksum: no + register: filesize_stat_sparse_01 + + +- name: Create a huge sparse file of 4TB + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 4TB + sparse: yes + register: filesize_test_sparse_02 + +- name: Stat the resulting file (should exist now) + ansible.builtin.stat: + path: "{{ filesize_testfile }}" + get_checksum: no + register: filesize_stat_sparse_02 + + +- name: Create a huge sparse file of 4TB (4000GB) (check mode, idempotency) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 4000GB + sparse: yes + register: filesize_test_sparse_03 + check_mode: yes + +- name: Create a huge sparse file of 4TB (4000GB) (idempotency) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 4000GB + sparse: yes + register: filesize_test_sparse_04 + +- name: Create a huge sparse file of 4TB (4000000 × 1MB) (check mode, idempotency) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 4000000 + blocksize: 1MB + sparse: yes + register: filesize_test_sparse_05 + check_mode: yes + +- name: Create a huge sparse file of 4TB (4000000 × 1MB) (idempotency) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 4000000 + blocksize: 1MB + sparse: yes + register: filesize_test_sparse_06 + +- name: Stat the file again (should remain the same) + ansible.builtin.stat: + path: "{{ filesize_testfile }}" + get_checksum: no + register: filesize_stat_sparse_06 + + +- name: Assert that results are as expected + ansible.builtin.assert: + that: + - filesize_test_sparse_01 is changed + - filesize_test_sparse_02 is changed + - filesize_test_sparse_03 is not changed + - filesize_test_sparse_04 is not changed + - filesize_test_sparse_05 is not changed + - filesize_test_sparse_06 is not changed + + - filesize_test_sparse_02.cmd == filesize_test_sparse_01.cmd + - filesize_test_sparse_03.cmd is undefined + - filesize_test_sparse_04.cmd is undefined + - filesize_test_sparse_05.cmd is undefined + - filesize_test_sparse_06.cmd is undefined + + - filesize_test_sparse_01.filesize.bytes == 4*1000**4 + - filesize_test_sparse_02.filesize.bytes == 4*1000**4 + - filesize_test_sparse_03.filesize.bytes == 4*1000**4 + - filesize_test_sparse_04.filesize.bytes == 4*1000**4 + - filesize_test_sparse_05.filesize.bytes == 4*1000**4 + - filesize_test_sparse_06.filesize.bytes == 4*1000**4 + + - filesize_test_sparse_01.size_diff == 4*1000**4 + - filesize_test_sparse_02.size_diff == 4*1000**4 + - filesize_test_sparse_03.size_diff == 0 + - filesize_test_sparse_04.size_diff == 0 + - filesize_test_sparse_05.size_diff == 0 + - filesize_test_sparse_06.size_diff == 0 + + - filesize_test_sparse_01.state is undefined + - filesize_test_sparse_02.state in ["file"] + - filesize_test_sparse_01.size is undefined + - filesize_test_sparse_02.size == 4*1000**4 + - filesize_test_sparse_03.size == 4*1000**4 + - filesize_test_sparse_04.size == 4*1000**4 + - filesize_test_sparse_05.size == 4*1000**4 + - filesize_test_sparse_06.size == 4*1000**4 + + - not filesize_stat_sparse_01.stat.exists + - filesize_stat_sparse_02.stat.exists + - filesize_stat_sparse_02.stat.isreg + - filesize_stat_sparse_02.stat.size == 4*1000**4 + - filesize_stat_sparse_06.stat.size == 4*1000**4 + + + +- name: Change sparse file size to 4TiB (check mode) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 4TiB + sparse: yes + register: filesize_test_sparse_11 + check_mode: yes + +- name: Stat the file again (should remain the same) + ansible.builtin.stat: + path: "{{ filesize_testfile }}" + get_checksum: no + register: filesize_stat_sparse_11 + + +- name: Change sparse file size to 4TiB + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 4TiB + sparse: yes + register: filesize_test_sparse_12 + +- name: Stat the file again (should have grown) + ansible.builtin.stat: + path: "{{ filesize_testfile }}" + get_checksum: no + register: filesize_stat_sparse_12 + + +- name: Change sparse file size to 4TiB (4096GiB) (check mode, idempotency) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 4096GiB + sparse: yes + register: filesize_test_sparse_13 + check_mode: yes + +- name: Change sparse file size to 4TiB (4096GiB) (idempotency) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 4096GiB + sparse: yes + register: filesize_test_sparse_14 + +- name: Stat the file again (should remain the same) + ansible.builtin.stat: + path: "{{ filesize_testfile }}" + get_checksum: no + register: filesize_stat_sparse_14 + + +- name: Assert that results are as expected + ansible.builtin.assert: + that: + - filesize_test_sparse_11 is changed + - filesize_test_sparse_12 is changed + - filesize_test_sparse_13 is not changed + - filesize_test_sparse_14 is not changed + + - filesize_test_sparse_12.cmd == filesize_test_sparse_11.cmd + - filesize_test_sparse_13.cmd is undefined + - filesize_test_sparse_14.cmd is undefined + + - filesize_test_sparse_11.size_diff == 398046511104 + - filesize_test_sparse_12.size_diff == 398046511104 + - filesize_test_sparse_13.size_diff == 0 + - filesize_test_sparse_14.size_diff == 0 + + - filesize_test_sparse_11.size == 4000000000000 + - filesize_test_sparse_12.size == 4398046511104 + - filesize_test_sparse_13.size == 4398046511104 + - filesize_test_sparse_14.size == 4398046511104 + + - filesize_stat_sparse_11.stat.size == 4000000000000 + - filesize_stat_sparse_12.stat.size == 4398046511104 + - filesize_stat_sparse_14.stat.size == 4398046511104 + + + +- name: Change sparse file size to 4.321TB (check mode) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 4.321TB + sparse: yes + register: filesize_test_sparse_21 + check_mode: yes + +- name: Stat the file again (should remain the same) + ansible.builtin.stat: + path: "{{ filesize_testfile }}" + get_checksum: no + register: filesize_stat_sparse_21 + + +- name: Change sparse file size to 4.321TB + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 4.321TB + sparse: yes + register: filesize_test_sparse_22 + +- name: Stat the file again (should have been reduced) + ansible.builtin.stat: + path: "{{ filesize_testfile }}" + get_checksum: no + register: filesize_stat_sparse_22 + + +- name: Change sparse file size to 4321×1GB (check mode, idempotency) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 4321 + blocksize: 1GB + sparse: yes + register: filesize_test_sparse_23 + check_mode: yes + +- name: Change sparse file size to 4321×1GB (idempotency) + community.general.filesize: + path: "{{ filesize_testfile }}" + size: 4321 + blocksize: 1GB + sparse: yes + register: filesize_test_sparse_24 + +- name: Stat the file again (should remain the same) + ansible.builtin.stat: + path: "{{ filesize_testfile }}" + get_checksum: no + register: filesize_stat_sparse_24 + + +- name: Assert that results are as expected + ansible.builtin.assert: + that: + - filesize_test_sparse_21 is changed + - filesize_test_sparse_22 is changed + - filesize_test_sparse_23 is not changed + - filesize_test_sparse_24 is not changed + + - filesize_test_sparse_22.cmd == filesize_test_sparse_21.cmd + - filesize_test_sparse_23.cmd is undefined + - filesize_test_sparse_24.cmd is undefined + + - filesize_test_sparse_21.size_diff == 4321*1000**3 - 4*1024**4 + - filesize_test_sparse_22.size_diff == 4321*1000**3 - 4*1024**4 + - filesize_test_sparse_23.size_diff == 0 + - filesize_test_sparse_24.size_diff == 0 + + - filesize_test_sparse_21.size == 4398046511104 + - filesize_test_sparse_22.size == 4321000000000 + - filesize_test_sparse_23.size == 4321000000000 + - filesize_test_sparse_24.size == 4321000000000 + + - filesize_stat_sparse_21.stat.size == 4398046511104 + - filesize_stat_sparse_22.stat.size == 4321000000000 + - filesize_stat_sparse_24.stat.size == 4321000000000 + + + +- name: Remove test file + ansible.builtin.file: + path: "{{ filesize_testfile }}" + state: absent diff --git a/tests/integration/targets/filesize/tasks/symlinks.yml b/tests/integration/targets/filesize/tasks/symlinks.yml new file mode 100644 index 0000000000..61666497ff --- /dev/null +++ b/tests/integration/targets/filesize/tasks/symlinks.yml @@ -0,0 +1,93 @@ +--- +# Check that the module works with symlinks, as expected, i.e. as dd does: +# follow symlinks. + +- name: Ensure the test file is absent + ansible.builtin.file: + path: "{{ filesize_testfile }}" + state: absent + +- name: Create a broken symlink in the same directory + ansible.builtin.file: + src: "{{ filesize_testfile | basename }}" + dest: "{{ filesize_testlink }}" + state: link + force: yes + follow: no + + + +- name: Create a file with a size of 512 kB (512000 bytes) (check mode) + community.general.filesize: + path: "{{ filesize_testlink }}" + size: "512 kB" + register: filesize_test_symlink_01 + check_mode: yes + +- name: Create a file with a size of 512 kB (512000 bytes) + community.general.filesize: + path: "{{ filesize_testlink }}" + size: "512 kB" + register: filesize_test_symlink_02 + +- name: Stat the resulting file (not the symlink) + ansible.builtin.stat: + path: "{{ filesize_test_symlink_02.path }}" + register: filesize_stat_symlink_02 + + +- name: Create a file with a size of 500 KiB (512000 bytes) (check mode, idempotency) + community.general.filesize: + path: "{{ filesize_testlink }}" + size: "500 KiB" + register: filesize_test_symlink_03 + check_mode: yes + +- name: Create a file with a size of 500 KiB (512000 bytes) (idempotency) + community.general.filesize: + path: "{{ filesize_testlink }}" + size: "500 KiB" + register: filesize_test_symlink_04 + +- name: Stat the file again (should remain the same) + ansible.builtin.stat: + path: "{{ filesize_test_symlink_04.path }}" + register: filesize_stat_symlink_04 + + +- name: Assert that results are as expected + ansible.builtin.assert: + that: + - filesize_test_symlink_01 is changed + - filesize_test_symlink_02 is changed + - filesize_test_symlink_03 is not changed + - filesize_test_symlink_04 is not changed + + - filesize_test_symlink_02.cmd == filesize_test_symlink_01.cmd + - filesize_test_symlink_03.cmd is undefined + - filesize_test_symlink_04.cmd is undefined + + - filesize_test_symlink_01.state is undefined + - filesize_test_symlink_02.state in ["file"] + - filesize_test_symlink_01.size is undefined + - filesize_test_symlink_02.size == 512000 + - filesize_test_symlink_03.size == 512000 + - filesize_test_symlink_04.size == 512000 + + - filesize_stat_symlink_02.stat.size == 512000 + - filesize_stat_symlink_04.stat.size == 512000 + + - filesize_test_symlink_04.path == filesize_test_symlink_02.path + - filesize_test_symlink_04.path != filesize_testlink + + + +- name: Remove test file + ansible.builtin.file: + path: "{{ filesize_testfile }}" + state: absent + +- name: Remove test link + ansible.builtin.file: + path: "{{ filesize_testlink }}" + state: absent From d5c26b6f70f415e8a6e990c9ef62ca69b1a0490c Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Mon, 19 Apr 2021 08:14:21 +0200 Subject: [PATCH 0200/3093] filesize: add maintainer (#2264) --- .github/BOTMETA.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 850f2278ca..0b30789aba 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -333,6 +333,8 @@ files: maintainers: dareko $modules/files/archive.py: maintainers: bendoh + $modules/files/filesize.py: + maintainers: quidame $modules/files/ini_file.py: maintainers: jpmens noseka1 $modules/files/iso_extract.py: From fbbd8ecd6ff4aaf6445e1ed610f83424fb3d1c0f Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 20 Apr 2021 04:48:16 +1200 Subject: [PATCH 0201/3093] Fixed validations in a couple of modules (#2268) * fixed udm_dns_zone * fixed udm_dns_record * removed lines from ignore files * added changelog fragment * moved the list elements fix to minor_changes --- .../fragments/2268-validation-univetion.yml | 4 +++ .../cloud/univention/udm_dns_record.py | 11 ++++--- .../modules/cloud/univention/udm_dns_zone.py | 33 ++++++++++++------- tests/sanity/ignore-2.10.txt | 4 --- tests/sanity/ignore-2.11.txt | 4 --- tests/sanity/ignore-2.12.txt | 4 --- tests/sanity/ignore-2.9.txt | 3 -- 7 files changed, 32 insertions(+), 31 deletions(-) create mode 100644 changelogs/fragments/2268-validation-univetion.yml diff --git a/changelogs/fragments/2268-validation-univetion.yml b/changelogs/fragments/2268-validation-univetion.yml new file mode 100644 index 0000000000..f245380441 --- /dev/null +++ b/changelogs/fragments/2268-validation-univetion.yml @@ -0,0 +1,4 @@ +bugfixes: + - udm_dns_record - fixed default value of parameter ``data`` to match its type (https://github.com/ansible-collections/community.general/pull/2268). +minor_changes: + - udm_dns_zone - elements of list parameters ``nameserver``, ``interfaces``, and ``mx`` are now validated (https://github.com/ansible-collections/community.general/pull/2268). diff --git a/plugins/modules/cloud/univention/udm_dns_record.py b/plugins/modules/cloud/univention/udm_dns_record.py index db89bd46b6..90654bee3c 100644 --- a/plugins/modules/cloud/univention/udm_dns_record.py +++ b/plugins/modules/cloud/univention/udm_dns_record.py @@ -23,21 +23,24 @@ requirements: - Univention options: state: - required: false + type: str default: "present" choices: [ present, absent ] description: - Whether the dns record is present or not. name: + type: str required: true description: - "Name of the record, this is also the DNS record. E.g. www for www.example.com." zone: + type: str required: true description: - Corresponding DNS zone for this record, e.g. example.com. type: + type: str required: true description: - "Define the record type. C(host_record) is a A or AAAA record, @@ -45,8 +48,8 @@ options: is a SRV record and C(txt_record) is a TXT record." - "The available choices are: C(host_record), C(alias), C(ptr_record), C(srv_record), C(txt_record)." data: - required: false - default: [] + type: dict + default: {} description: - "Additional data for this record, e.g. ['a': '192.0.2.1']. Required if C(state=present)." @@ -98,7 +101,7 @@ def main(): type='str'), name=dict(required=True, type='str'), - data=dict(default=[], + data=dict(default={}, type='dict'), state=dict(default='present', choices=['present', 'absent'], diff --git a/plugins/modules/cloud/univention/udm_dns_zone.py b/plugins/modules/cloud/univention/udm_dns_zone.py index 2428650e90..3e0cae523d 100644 --- a/plugins/modules/cloud/univention/udm_dns_zone.py +++ b/plugins/modules/cloud/univention/udm_dns_zone.py @@ -22,58 +22,64 @@ requirements: - Python >= 2.6 options: state: - required: false + type: str default: "present" choices: [ present, absent ] description: - Whether the dns zone is present or not. type: + type: str required: true description: - Define if the zone is a forward or reverse DNS zone. - "The available choices are: C(forward_zone), C(reverse_zone)." zone: + type: str required: true description: - DNS zone name, e.g. C(example.com). + aliases: [name] nameserver: - required: false + type: list + elements: str description: - List of appropriate name servers. Required if C(state=present). interfaces: - required: false + type: list + elements: str description: - List of interface IP addresses, on which the server should response this zone. Required if C(state=present). refresh: - required: false + type: int default: 3600 description: - Interval before the zone should be refreshed. retry: - required: false + type: int default: 1800 description: - Interval that should elapse before a failed refresh should be retried. expire: - required: false + type: int default: 604800 description: - Specifies the upper limit on the time interval that can elapse before the zone is no longer authoritative. ttl: - required: false + type: int default: 600 description: - Minimum TTL field that should be exported with any RR from this zone. contact: - required: false + type: str default: '' description: - Contact person in the SOA record. mx: - required: false + type: list + elements: str default: [] description: - List of MX servers. (Must declared as A or AAAA records). @@ -128,9 +134,11 @@ def main(): aliases=['name'], type='str'), nameserver=dict(default=[], - type='list'), + type='list', + elements='str'), interfaces=dict(default=[], - type='list'), + type='list', + elements='str'), refresh=dict(default=3600, type='int'), retry=dict(default=1800, @@ -142,7 +150,8 @@ def main(): contact=dict(default='', type='str'), mx=dict(default=[], - type='list'), + type='list', + elements='str'), state=dict(default='present', choices=['present', 'absent'], type='str') diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 2ef37bd2c4..7beedfa206 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -28,10 +28,6 @@ plugins/modules/cloud/smartos/vmadm.py validate-modules:undocumented-parameter plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:parameter-list-no-elements plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:undocumented-parameter -plugins/modules/cloud/univention/udm_dns_record.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:undocumented-parameter plugins/modules/cloud/univention/udm_share.py validate-modules:parameter-list-no-elements plugins/modules/cloud/univention/udm_user.py validate-modules:parameter-list-no-elements plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 60387b1333..80975cf389 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -27,10 +27,6 @@ plugins/modules/cloud/smartos/vmadm.py validate-modules:undocumented-parameter plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:parameter-list-no-elements plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:undocumented-parameter -plugins/modules/cloud/univention/udm_dns_record.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:undocumented-parameter plugins/modules/cloud/univention/udm_share.py validate-modules:parameter-list-no-elements plugins/modules/cloud/univention/udm_user.py validate-modules:parameter-list-no-elements plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt index 60387b1333..80975cf389 100644 --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -27,10 +27,6 @@ plugins/modules/cloud/smartos/vmadm.py validate-modules:undocumented-parameter plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:parameter-list-no-elements plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:undocumented-parameter -plugins/modules/cloud/univention/udm_dns_record.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:undocumented-parameter plugins/modules/cloud/univention/udm_share.py validate-modules:parameter-list-no-elements plugins/modules/cloud/univention/udm_user.py validate-modules:parameter-list-no-elements plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index a8ab8c457c..36a0c3e08e 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -23,9 +23,6 @@ plugins/modules/cloud/smartos/vmadm.py validate-modules:parameter-type-not-in-do plugins/modules/cloud/smartos/vmadm.py validate-modules:undocumented-parameter plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:undocumented-parameter -plugins/modules/cloud/univention/udm_dns_record.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid From 93046e03507fbebbb2bd95b4cce78f9f00f929dd Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Mon, 19 Apr 2021 18:53:08 +0200 Subject: [PATCH 0202/3093] BOTMETA.yml: monitoring/spectrum_model_attrs - add a maintainer (#2274) --- .github/BOTMETA.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 0b30789aba..3d4e34e21f 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -421,6 +421,8 @@ files: maintainers: andsens $modules/monitoring/spectrum_device.py: maintainers: orgito + $modules/monitoring/spectrum_model_attrs.py: + maintainers: tgates81 $modules/monitoring/stackdriver.py: maintainers: bwhaley $modules/monitoring/statsd.py: From eb3ee8314663b99d2fb99e6a5259cd1ffcb62812 Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Mon, 19 Apr 2021 18:53:21 +0200 Subject: [PATCH 0203/3093] BOTMETA.yml: filter/version_sort - add a maintainer (#2273) --- .github/BOTMETA.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 3d4e34e21f..a3c089b1eb 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -67,6 +67,8 @@ files: maintainers: felixfontein $filters/time.py: maintainers: resmo + $filters/version_sort.py: + maintainers: ericzolf $httpapis/: maintainers: $team_networking labels: networking From 20401c63cd67bdd2de33415a76ba0c63f465493d Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Mon, 19 Apr 2021 18:53:36 +0200 Subject: [PATCH 0204/3093] BOTMETA.yml: filter/from_csv - add a maintainer (#2272) --- .github/BOTMETA.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index a3c089b1eb..01608864d5 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -59,6 +59,8 @@ files: maintainers: felixfontein $filters/dict_kv.py: maintainers: giner + $filters/from_csv.py: + maintainers: Ajpantuso $filters/jc.py: maintainers: kellyjonbrazil $filters/list.py: From 04bf8137fa9ddcb2b25ff39fed0ae8cfcc5d20aa Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Mon, 19 Apr 2021 18:53:48 +0200 Subject: [PATCH 0205/3093] BOTMETA.yml: callback/loganalytics - add a maintainer (#2271) --- .github/BOTMETA.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 01608864d5..63a389d65a 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -17,6 +17,8 @@ files: labels: become $callbacks/: labels: callbacks + $callbacks/loganalytics.py: + maintainers: zhcli $callbacks/logstash.py: maintainers: ujenmr $callbacks/say.py: From b10d707a8baa2530deb98a218c820762a2ce879a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gon=C3=A9ri=20Le=20Bouder?= Date: Mon, 19 Apr 2021 13:02:48 -0400 Subject: [PATCH 0206/3093] nmcli: manual overwrite MAC address for any devices (#2224) * nmcli: manual overwrite MAC address for any devices Don't restrict the ability to manually set the MAC address to the bridge. NetworkManager is able to set a static MAC address to the vaste majority of the device types. * Update changelogs/fragments/2224_nmcli_allow_MAC_overwrite.yaml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- changelogs/fragments/2224_nmcli_allow_MAC_overwrite.yaml | 3 +++ plugins/modules/net_tools/nmcli.py | 8 ++------ 2 files changed, 5 insertions(+), 6 deletions(-) create mode 100644 changelogs/fragments/2224_nmcli_allow_MAC_overwrite.yaml diff --git a/changelogs/fragments/2224_nmcli_allow_MAC_overwrite.yaml b/changelogs/fragments/2224_nmcli_allow_MAC_overwrite.yaml new file mode 100644 index 0000000000..98852463d8 --- /dev/null +++ b/changelogs/fragments/2224_nmcli_allow_MAC_overwrite.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: +- "nmcli - don't restrict the ability to manually set the MAC address to the bridge (https://github.com/ansible-collections/community.general/pull/2224)." diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index 2b402a2230..4ae5a1dac9 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -210,7 +210,7 @@ options: default: 300 mac: description: - - This is only used with bridge - MAC address of the bridge. + - MAC address of the connection. - Note this requires a recent kernel feature, originally introduced in 3.15 upstream kernel. type: str slavepriority: @@ -742,7 +742,7 @@ class Nmcli(object): }) # Layer 2 options. - if self.mac_conn_type: + if self.mac: options.update({self.mac_setting: self.mac}) if self.mtu_conn_type: @@ -842,10 +842,6 @@ class Nmcli(object): 'wifi' ) - @property - def mac_conn_type(self): - return self.type == 'bridge' - @property def mac_setting(self): if self.type == 'bridge': From 412b4711c3838133eede34722a2e7dcee45a6655 Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Tue, 20 Apr 2021 08:03:28 +0200 Subject: [PATCH 0207/3093] BOTMETA: hashids plugin - add a maintainer (#2270) --- .github/BOTMETA.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 63a389d65a..1f44f022f0 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -63,6 +63,8 @@ files: maintainers: giner $filters/from_csv.py: maintainers: Ajpantuso + $filters/hashids: + maintainers: Ajpantuso $filters/jc.py: maintainers: kellyjonbrazil $filters/list.py: From 7aaa26b591df23c119f18a40488bcb7d4cca2897 Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Tue, 20 Apr 2021 12:55:58 +0200 Subject: [PATCH 0208/3093] BOTMETA.yml: update team_gitlab (#2287) --- .github/BOTMETA.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 1f44f022f0..33a5ad9a20 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1000,7 +1000,7 @@ macros: team_cyberark_conjur: jvanderhoof ryanprior team_e_spirit: MatrixCrawler getjack team_flatpak: JayKayy oolongbrothers - team_gitlab: Lunik Shaps dj-wasabi marwatk waheedi zanssa scodeman + team_gitlab: Lunik Shaps dj-wasabi marwatk waheedi zanssa scodeman metanovii team_hpux: bcoca davx8342 team_huawei: QijunPan TommyLike edisonxiang freesky-edward hwDCN niuzhenguo xuxiaowei0512 yanzhangi zengchen1024 zhongjun2 team_ipa: Akasurde Nosmoht fxfitz From 49c07dc18ba601cc4d38c11067f516af14af84d9 Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Tue, 20 Apr 2021 12:58:56 +0200 Subject: [PATCH 0209/3093] BOTMETA.yml: java_cert - add a new maintainer (#2288) --- .github/BOTMETA.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 33a5ad9a20..751c3f19f5 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -830,7 +830,7 @@ files: $modules/system/iptables_state.py: maintainers: quidame $modules/system/java_cert.py: - maintainers: haad + maintainers: haad absynth76 $modules/system/java_keystore.py: maintainers: Mogztter $modules/system/kernel_blacklist.py: From 84a79c3da433673637e306577a6045856b8a2e69 Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Tue, 20 Apr 2021 13:01:29 +0200 Subject: [PATCH 0210/3093] BOTMETA.yml: inventory/proxmox - add a new maintainer (#2291) --- .github/BOTMETA.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 751c3f19f5..f01cbc7c5a 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -88,6 +88,8 @@ files: maintainers: $team_linode labels: cloud linode keywords: linode dynamic inventory script + $inventories/proxmox.py: + maintainers: $team_virt ilijamt $inventories/scaleway.py: maintainers: $team_scaleway labels: cloud scaleway From 5b4fab80e2b34aa1a4c020ad244540bcf1b02ded Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Tue, 20 Apr 2021 13:03:01 +0200 Subject: [PATCH 0211/3093] BOTMETA.yml: expand team_opennebula (#2292) --- .github/BOTMETA.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index f01cbc7c5a..87cfed765b 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1013,7 +1013,7 @@ macros: team_manageiq: abellotti cben gtanzillo yaacov zgalor dkorn evertmulder team_netapp: amit0701 carchi8py hulquest lmprice lonico ndswartz schmots1 team_networking: NilashishC Qalthos danielmellado ganeshrn justjais trishnaguha sganesh-infoblox privateip - team_opennebula: ilicmilan meerkampdvv rsmontero xorel + team_opennebula: ilicmilan meerkampdvv rsmontero xorel nilsding team_oracle: manojmeda mross22 nalsaber team_purestorage: bannaych dnix101 genegr lionmax opslounge raekins sdodsley sile16 team_redfish: mraineri tomasg2012 xmadsen renxulei From 6ab9b05da39344475b93238b3fc59fdbab62e73f Mon Sep 17 00:00:00 2001 From: Gaetan2907 <48204380+Gaetan2907@users.noreply.github.com> Date: Tue, 20 Apr 2021 13:20:46 +0200 Subject: [PATCH 0212/3093] Allow keycloak modules to take token as parameter for the auth. (#2250) * Allow keycloak_group.py to take token as parameter for the authentification * Fix some pep8 issues * Add changelog fragment * Refactor get_token to pass module.params + Documentation * Update plugins/module_utils/identity/keycloak/keycloak.py Co-authored-by: Felix Fontein * Update plugins/module_utils/identity/keycloak/keycloak.py Co-authored-by: Felix Fontein * Fix unit test and add new one for token as param * Fix identation * Check base_url format also if token is given * Update plugins/doc_fragments/keycloak.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_client.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_clienttemplate.py Co-authored-by: Felix Fontein * Allow keycloak_group.py to take token as parameter for the authentification * Refactor get_token to pass module.params + Documentation * Update plugins/module_utils/identity/keycloak/keycloak.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_group.py Co-authored-by: Felix Fontein * Check if base_url is None before to check format * Fix unit test: rename base_url parameter to auth_keycloak_url * Update plugins/module_utils/identity/keycloak/keycloak.py Co-authored-by: Felix Fontein * Update changelogs/fragments/2250-allow-keycloak-modules-to-take-token-as-param.yml Co-authored-by: Amin Vakil * Update plugins/module_utils/identity/keycloak/keycloak.py Co-authored-by: Amin Vakil * Update plugins/modules/identity/keycloak/keycloak_client.py Co-authored-by: Amin Vakil * Update plugins/modules/identity/keycloak/keycloak_client.py Co-authored-by: Amin Vakil * Update plugins/modules/identity/keycloak/keycloak_clienttemplate.py Co-authored-by: Amin Vakil * Update changelogs/fragments/2250-allow-keycloak-modules-to-take-token-as-param.yml Co-authored-by: Amin Vakil * Update plugins/module_utils/identity/keycloak/keycloak.py Co-authored-by: Amin Vakil * Update plugins/modules/identity/keycloak/keycloak_clienttemplate.py Co-authored-by: Amin Vakil * Update plugins/modules/identity/keycloak/keycloak_group.py Co-authored-by: Amin Vakil * Update plugins/modules/identity/keycloak/keycloak_group.py Co-authored-by: Amin Vakil * Switch to modern syntax for the documentation (e.g. community.general.keycloak_client) * Add check either creds or token as argument of all keyloak_* modules * Update plugins/modules/identity/keycloak/keycloak_client.py Co-authored-by: Felix Fontein Co-authored-by: Amin Vakil --- ...eycloak-modules-to-take-token-as-param.yml | 5 ++ plugins/doc_fragments/keycloak.py | 9 +- .../identity/keycloak/keycloak.py | 88 +++++++++++-------- .../identity/keycloak/keycloak_client.py | 42 +++++---- .../keycloak/keycloak_clienttemplate.py | 38 ++++---- .../identity/keycloak/keycloak_group.py | 29 +++--- .../keycloak/test_keycloak_connect.py | 66 ++++++-------- 7 files changed, 155 insertions(+), 122 deletions(-) create mode 100644 changelogs/fragments/2250-allow-keycloak-modules-to-take-token-as-param.yml diff --git a/changelogs/fragments/2250-allow-keycloak-modules-to-take-token-as-param.yml b/changelogs/fragments/2250-allow-keycloak-modules-to-take-token-as-param.yml new file mode 100644 index 0000000000..5b8deb2a03 --- /dev/null +++ b/changelogs/fragments/2250-allow-keycloak-modules-to-take-token-as-param.yml @@ -0,0 +1,5 @@ +--- +minor_changes: + - keycloak_* modules - allow the keycloak modules to use a token for the + authentication, the modules can take either a token or the credentials + (https://github.com/ansible-collections/community.general/pull/2250). diff --git a/plugins/doc_fragments/keycloak.py b/plugins/doc_fragments/keycloak.py index e664d7ec89..72e0b71d50 100644 --- a/plugins/doc_fragments/keycloak.py +++ b/plugins/doc_fragments/keycloak.py @@ -30,7 +30,6 @@ options: description: - Keycloak realm name to authenticate to for API access. type: str - required: true auth_client_secret: description: @@ -41,7 +40,6 @@ options: description: - Username to authenticate for API access with. type: str - required: true aliases: - username @@ -49,10 +47,15 @@ options: description: - Password to authenticate for API access with. type: str - required: true aliases: - password + token: + description: + - Authentication token for Keycloak API. + type: str + version_added: 3.0.0 + validate_certs: description: - Verify TLS certificates (do not disable this in production). diff --git a/plugins/module_utils/identity/keycloak/keycloak.py b/plugins/module_utils/identity/keycloak/keycloak.py index 58a39645e4..0f73b729cc 100644 --- a/plugins/module_utils/identity/keycloak/keycloak.py +++ b/plugins/module_utils/identity/keycloak/keycloak.py @@ -57,11 +57,12 @@ def keycloak_argument_spec(): return dict( auth_keycloak_url=dict(type='str', aliases=['url'], required=True, no_log=False), auth_client_id=dict(type='str', default='admin-cli'), - auth_realm=dict(type='str', required=True), + auth_realm=dict(type='str'), auth_client_secret=dict(type='str', default=None, no_log=True), - auth_username=dict(type='str', aliases=['username'], required=True), - auth_password=dict(type='str', aliases=['password'], required=True, no_log=True), - validate_certs=dict(type='bool', default=True) + auth_username=dict(type='str', aliases=['username']), + auth_password=dict(type='str', aliases=['password'], no_log=True), + validate_certs=dict(type='bool', default=True), + token=dict(type='str', no_log=True), ) @@ -73,41 +74,58 @@ class KeycloakError(Exception): pass -def get_token(base_url, validate_certs, auth_realm, client_id, - auth_username, auth_password, client_secret): +def get_token(module_params): + """ Obtains connection header with token for the authentication, + token already given or obtained from credentials + :param module_params: parameters of the module + :return: connection header + """ + token = module_params.get('token') + base_url = module_params.get('auth_keycloak_url') + if not base_url.lower().startswith(('http', 'https')): raise KeycloakError("auth_url '%s' should either start with 'http' or 'https'." % base_url) - auth_url = URL_TOKEN.format(url=base_url, realm=auth_realm) - temp_payload = { - 'grant_type': 'password', - 'client_id': client_id, - 'client_secret': client_secret, - 'username': auth_username, - 'password': auth_password, - } - # Remove empty items, for instance missing client_secret - payload = dict( - (k, v) for k, v in temp_payload.items() if v is not None) - try: - r = json.loads(to_native(open_url(auth_url, method='POST', - validate_certs=validate_certs, - data=urlencode(payload)).read())) - except ValueError as e: - raise KeycloakError( - 'API returned invalid JSON when trying to obtain access token from %s: %s' - % (auth_url, str(e))) - except Exception as e: - raise KeycloakError('Could not obtain access token from %s: %s' - % (auth_url, str(e))) - try: - return { - 'Authorization': 'Bearer ' + r['access_token'], - 'Content-Type': 'application/json' + if token is None: + base_url = module_params.get('auth_keycloak_url') + validate_certs = module_params.get('validate_certs') + auth_realm = module_params.get('auth_realm') + client_id = module_params.get('auth_client_id') + auth_username = module_params.get('auth_username') + auth_password = module_params.get('auth_password') + client_secret = module_params.get('auth_client_secret') + auth_url = URL_TOKEN.format(url=base_url, realm=auth_realm) + temp_payload = { + 'grant_type': 'password', + 'client_id': client_id, + 'client_secret': client_secret, + 'username': auth_username, + 'password': auth_password, } - except KeyError: - raise KeycloakError( - 'Could not obtain access token from %s' % auth_url) + # Remove empty items, for instance missing client_secret + payload = dict( + (k, v) for k, v in temp_payload.items() if v is not None) + try: + r = json.loads(to_native(open_url(auth_url, method='POST', + validate_certs=validate_certs, + data=urlencode(payload)).read())) + except ValueError as e: + raise KeycloakError( + 'API returned invalid JSON when trying to obtain access token from %s: %s' + % (auth_url, str(e))) + except Exception as e: + raise KeycloakError('Could not obtain access token from %s: %s' + % (auth_url, str(e))) + + try: + token = r['access_token'] + except KeyError: + raise KeycloakError( + 'Could not obtain access token from %s' % auth_url) + return { + 'Authorization': 'Bearer ' + token, + 'Content-Type': 'application/json' + } class KeycloakAPI(object): diff --git a/plugins/modules/identity/keycloak/keycloak_client.py b/plugins/modules/identity/keycloak/keycloak_client.py index e49edcf1d2..e3e39fc173 100644 --- a/plugins/modules/identity/keycloak/keycloak_client.py +++ b/plugins/modules/identity/keycloak/keycloak_client.py @@ -511,20 +511,30 @@ author: ''' EXAMPLES = ''' -- name: Create or update Keycloak client (minimal example) - local_action: - module: keycloak_client - auth_client_id: admin-cli +- name: Create or update Keycloak client (minimal example), authentication with credentials + community.general.keycloak_client: auth_keycloak_url: https://auth.example.com/auth auth_realm: master auth_username: USERNAME auth_password: PASSWORD client_id: test state: present + delegate_to: localhost + + +- name: Create or update Keycloak client (minimal example), authentication with token + community.general.keycloak_client: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + token: TOKEN + client_id: test + state: present + delegate_to: localhost + - name: Delete a Keycloak client - local_action: - module: keycloak_client + community.general.keycloak_client: auth_client_id: admin-cli auth_keycloak_url: https://auth.example.com/auth auth_realm: master @@ -532,10 +542,11 @@ EXAMPLES = ''' auth_password: PASSWORD client_id: test state: absent + delegate_to: localhost + - name: Create or update a Keycloak client (with all the bells and whistles) - local_action: - module: keycloak_client + community.general.keycloak_client: auth_client_id: admin-cli auth_keycloak_url: https://auth.example.com/auth auth_realm: master @@ -619,6 +630,7 @@ EXAMPLES = ''' use.jwks.url: true jwks.url: JWKS_URL_FOR_CLIENT_AUTH_JWT jwt.credential.certificate: JWT_CREDENTIAL_CERTIFICATE_FOR_CLIENT_AUTH + delegate_to: localhost ''' RETURN = ''' @@ -740,21 +752,15 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, - required_one_of=([['client_id', 'id']])) + required_one_of=([['client_id', 'id'], + ['token', 'auth_realm', 'auth_username', 'auth_password']]), + required_together=([['auth_realm', 'auth_username', 'auth_password']])) result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) # Obtain access token, initialize API try: - connection_header = get_token( - base_url=module.params.get('auth_keycloak_url'), - validate_certs=module.params.get('validate_certs'), - auth_realm=module.params.get('auth_realm'), - client_id=module.params.get('auth_client_id'), - auth_username=module.params.get('auth_username'), - auth_password=module.params.get('auth_password'), - client_secret=module.params.get('auth_client_secret'), - ) + connection_header = get_token(module.params) except KeycloakError as e: module.fail_json(msg=str(e)) diff --git a/plugins/modules/identity/keycloak/keycloak_clienttemplate.py b/plugins/modules/identity/keycloak/keycloak_clienttemplate.py index d68198d570..82991aea85 100644 --- a/plugins/modules/identity/keycloak/keycloak_clienttemplate.py +++ b/plugins/modules/identity/keycloak/keycloak_clienttemplate.py @@ -169,9 +169,8 @@ author: ''' EXAMPLES = ''' -- name: Create or update Keycloak client template (minimal) - local_action: - module: keycloak_clienttemplate +- name: Create or update Keycloak client template (minimal), authentication with credentials + community.general.keycloak_client: auth_client_id: admin-cli auth_keycloak_url: https://auth.example.com/auth auth_realm: master @@ -179,10 +178,20 @@ EXAMPLES = ''' auth_password: PASSWORD realm: master name: this_is_a_test + delegate_to: localhost + +- name: Create or update Keycloak client template (minimal), authentication with token + community.general.keycloak_clienttemplate: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + token: TOKEN + realm: master + name: this_is_a_test + delegate_to: localhost - name: Delete Keycloak client template - local_action: - module: keycloak_clienttemplate + community.general.keycloak_client: auth_client_id: admin-cli auth_keycloak_url: https://auth.example.com/auth auth_realm: master @@ -191,10 +200,10 @@ EXAMPLES = ''' realm: master state: absent name: test01 + delegate_to: localhost - name: Create or update Keycloak client template (with a protocol mapper) - local_action: - module: keycloak_clienttemplate + community.general.keycloak_client: auth_client_id: admin-cli auth_keycloak_url: https://auth.example.com/auth auth_realm: master @@ -217,6 +226,7 @@ EXAMPLES = ''' protocolMapper: oidc-usermodel-property-mapper full_scope_allowed: false id: bce6f5e9-d7d3-4955-817e-c5b7f8d65b3f + delegate_to: localhost ''' RETURN = ''' @@ -296,21 +306,15 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, - required_one_of=([['id', 'name']])) + required_one_of=([['id', 'name'], + ['token', 'auth_realm', 'auth_username', 'auth_password']]), + required_together=([['auth_realm', 'auth_username', 'auth_password']])) result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) # Obtain access token, initialize API try: - connection_header = get_token( - base_url=module.params.get('auth_keycloak_url'), - validate_certs=module.params.get('validate_certs'), - auth_realm=module.params.get('auth_realm'), - client_id=module.params.get('auth_client_id'), - auth_username=module.params.get('auth_username'), - auth_password=module.params.get('auth_password'), - client_secret=module.params.get('auth_client_secret'), - ) + connection_header = get_token(module.params) except KeycloakError as e: module.fail_json(msg=str(e)) kc = KeycloakAPI(module, connection_header) diff --git a/plugins/modules/identity/keycloak/keycloak_group.py b/plugins/modules/identity/keycloak/keycloak_group.py index 45b5c2905b..56e72fcb94 100644 --- a/plugins/modules/identity/keycloak/keycloak_group.py +++ b/plugins/modules/identity/keycloak/keycloak_group.py @@ -81,7 +81,7 @@ author: ''' EXAMPLES = ''' -- name: Create a Keycloak group +- name: Create a Keycloak group, authentication with credentials community.general.keycloak_group: name: my-new-kc-group realm: MyCustomRealm @@ -93,6 +93,16 @@ EXAMPLES = ''' auth_password: PASSWORD delegate_to: localhost +- name: Create a Keycloak group, authentication with token + community.general.keycloak_group: + name: my-new-kc-group + realm: MyCustomRealm + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + delegate_to: localhost + - name: Delete a keycloak group community.general.keycloak_group: id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' @@ -217,30 +227,25 @@ def main(): realm=dict(default='master'), id=dict(type='str'), name=dict(type='str'), - attributes=dict(type='dict') + attributes=dict(type='dict'), ) argument_spec.update(meta_args) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, - required_one_of=([['id', 'name']])) + required_one_of=([['id', 'name'], + ['token', 'auth_realm', 'auth_username', 'auth_password']]), + required_together=([['auth_realm', 'auth_username', 'auth_password']])) result = dict(changed=False, msg='', diff={}, group='') # Obtain access token, initialize API try: - connection_header = get_token( - base_url=module.params.get('auth_keycloak_url'), - validate_certs=module.params.get('validate_certs'), - auth_realm=module.params.get('auth_realm'), - client_id=module.params.get('auth_client_id'), - auth_username=module.params.get('auth_username'), - auth_password=module.params.get('auth_password'), - client_secret=module.params.get('auth_client_secret'), - ) + connection_header = get_token(module.params) except KeycloakError as e: module.fail_json(msg=str(e)) + kc = KeycloakAPI(module, connection_header) realm = module.params.get('realm') diff --git a/tests/unit/plugins/module_utils/identity/keycloak/test_keycloak_connect.py b/tests/unit/plugins/module_utils/identity/keycloak/test_keycloak_connect.py index a929382abb..49692a412e 100644 --- a/tests/unit/plugins/module_utils/identity/keycloak/test_keycloak_connect.py +++ b/tests/unit/plugins/module_utils/identity/keycloak/test_keycloak_connect.py @@ -11,6 +11,16 @@ from ansible_collections.community.general.plugins.module_utils.identity.keycloa from ansible.module_utils.six import StringIO from ansible.module_utils.six.moves.urllib.error import HTTPError +module_params_creds = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'validate_certs': True, + 'auth_realm': 'master', + 'client_id': 'admin-cli', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'client_secret': None, +} + def build_mocked_request(get_id_user_count, response_dict): def _mocked_requests(*args, **kwargs): @@ -58,16 +68,22 @@ def mock_good_connection(mocker): ) -def test_connect_to_keycloak(mock_good_connection): - keycloak_header = get_token( - base_url='http://keycloak.url/auth', - validate_certs=True, - auth_realm='master', - client_id='admin-cli', - auth_username='admin', - auth_password='admin', - client_secret=None - ) +def test_connect_to_keycloak_with_creds(mock_good_connection): + keycloak_header = get_token(module_params_creds) + assert keycloak_header == { + 'Authorization': 'Bearer alongtoken', + 'Content-Type': 'application/json' + } + + +def test_connect_to_keycloak_with_token(mock_good_connection): + module_params_token = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'validate_certs': True, + 'client_id': 'admin-cli', + 'token': "alongtoken" + } + keycloak_header = get_token(module_params_token) assert keycloak_header == { 'Authorization': 'Bearer alongtoken', 'Content-Type': 'application/json' @@ -87,15 +103,7 @@ def mock_bad_json_returned(mocker): def test_bad_json_returned(mock_bad_json_returned): with pytest.raises(KeycloakError) as raised_error: - get_token( - base_url='http://keycloak.url/auth', - validate_certs=True, - auth_realm='master', - client_id='admin-cli', - auth_username='admin', - auth_password='admin', - client_secret=None - ) + get_token(module_params_creds) # cannot check all the message, different errors message for the value # error in python 2.6, 2.7 and 3.*. assert ( @@ -125,15 +133,7 @@ def mock_401_returned(mocker): def test_error_returned(mock_401_returned): with pytest.raises(KeycloakError) as raised_error: - get_token( - base_url='http://keycloak.url/auth', - validate_certs=True, - auth_realm='master', - client_id='admin-cli', - auth_username='notadminuser', - auth_password='notadminpassword', - client_secret=None - ) + get_token(module_params_creds) assert str(raised_error.value) == ( 'Could not obtain access token from http://keycloak.url' '/auth/realms/master/protocol/openid-connect/token: ' @@ -154,15 +154,7 @@ def mock_json_without_token_returned(mocker): def test_json_without_token_returned(mock_json_without_token_returned): with pytest.raises(KeycloakError) as raised_error: - get_token( - base_url='http://keycloak.url/auth', - validate_certs=True, - auth_realm='master', - client_id='admin-cli', - auth_username='admin', - auth_password='admin', - client_secret=None - ) + get_token(module_params_creds) assert str(raised_error.value) == ( 'Could not obtain access token from http://keycloak.url' '/auth/realms/master/protocol/openid-connect/token' From 0231dad3e8eb8166bc4487914fd346fb3cf99313 Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Tue, 20 Apr 2021 14:36:25 +0200 Subject: [PATCH 0213/3093] BOTMETA.yml: update team_ipa (#2289) --- .github/BOTMETA.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 87cfed765b..14d78cfc33 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1005,7 +1005,7 @@ macros: team_gitlab: Lunik Shaps dj-wasabi marwatk waheedi zanssa scodeman metanovii team_hpux: bcoca davx8342 team_huawei: QijunPan TommyLike edisonxiang freesky-edward hwDCN niuzhenguo xuxiaowei0512 yanzhangi zengchen1024 zhongjun2 - team_ipa: Akasurde Nosmoht fxfitz + team_ipa: Akasurde Nosmoht fxfitz justchris1 team_jboss: Wolfant jairojunior wbrefvem team_keycloak: eikef ndclt team_linode: InTheCloudDan decentral1se displague rmcintosh Charliekenney23 LBGarber From fc12eca65da7d1321932167b6c09f5ede3434edd Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Tue, 20 Apr 2021 15:15:10 +0200 Subject: [PATCH 0214/3093] BOTMETA.yml: update team_virt (#2286) --- .github/BOTMETA.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 14d78cfc33..cf9e3ad92b 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1021,4 +1021,4 @@ macros: team_scaleway: QuentinBrosse abarbare jerome-quere kindermoumoute remyleone sieben team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l team_suse: commel dcermak evrardjp lrupp toabctl AnderEnder alxgu andytom - team_virt: joshainglis karmab Aversiste Thulium-Drake + team_virt: joshainglis karmab Aversiste Thulium-Drake Ajpantuso From f7928d3eb712792abaeadf3f7fcb2e91eac3a622 Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Tue, 20 Apr 2021 22:31:59 +0200 Subject: [PATCH 0215/3093] Update BOTMETA.yml (#2304) --- .github/BOTMETA.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index cf9e3ad92b..399dfbc7a0 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1021,4 +1021,4 @@ macros: team_scaleway: QuentinBrosse abarbare jerome-quere kindermoumoute remyleone sieben team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l team_suse: commel dcermak evrardjp lrupp toabctl AnderEnder alxgu andytom - team_virt: joshainglis karmab Aversiste Thulium-Drake Ajpantuso + team_virt: joshainglis karmab tleguern Thulium-Drake Ajpantuso From 8ab19fc50b875e97fb896df894fe9073c3879002 Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Wed, 21 Apr 2021 11:22:06 +0200 Subject: [PATCH 0216/3093] BOTMETA.yml: github/github_repo - add a new maintainer (#2309) --- .github/BOTMETA.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 399dfbc7a0..b2aed3ca5c 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -758,6 +758,8 @@ files: ignore: erydo $modules/source_control/github/github_release.py: maintainers: adrianmoisey + $modules/source_control/github/github_repo.py: + maintainers: atorrescogollo $modules/source_control/github/: maintainers: stpierre $modules/source_control/gitlab/: From fe2757f0570affc2bd92ee2f060aa10d55d4189d Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Wed, 21 Apr 2021 12:40:45 +0200 Subject: [PATCH 0217/3093] BOTMETA.yml: lxd/lxd_profile - add a new maintainer (#2310) --- .github/BOTMETA.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index b2aed3ca5c..424bad19fd 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -190,6 +190,8 @@ files: maintainers: zbal $modules/cloud/lxc/lxc_container.py: maintainers: cloudnull + $modules/cloud/lxc/lxc_profile.py: + maintainers: conloos $modules/cloud/lxd/: ignore: hnakamur $modules/cloud/memset/: From cecbc2be2d9a85d5bbccc9851fbb52f2ef4f39f4 Mon Sep 17 00:00:00 2001 From: kris2kris <36845394+kris2kris@users.noreply.github.com> Date: Wed, 21 Apr 2021 12:42:28 +0200 Subject: [PATCH 0218/3093] Keycloak: add realm management (#2155) * Keycloak: add realm management * Minor fixes * Review fixes * Remove sort of list because condition on list cannot be true * Add exception in fail_json and update test * Change output for secret and version in comments * Update copyright Co-authored-by: Felix Fontein * Add missing traceback * Apply suggestions from code review * Update plugins/modules/identity/keycloak/keycloak_realm.py Co-authored-by: Felix Fontein --- .../identity/keycloak/keycloak.py | 73 ++ .../identity/keycloak/keycloak_realm.py | 787 ++++++++++++++++++ plugins/modules/keycloak_realm.py | 1 + .../modules/identity/keycloak/__init__.py | 0 .../identity/keycloak/test_keycloak_realm.py | 310 +++++++ 5 files changed, 1171 insertions(+) create mode 100644 plugins/modules/identity/keycloak/keycloak_realm.py create mode 120000 plugins/modules/keycloak_realm.py create mode 100644 tests/unit/plugins/modules/identity/keycloak/__init__.py create mode 100644 tests/unit/plugins/modules/identity/keycloak/test_keycloak_realm.py diff --git a/plugins/module_utils/identity/keycloak/keycloak.py b/plugins/module_utils/identity/keycloak/keycloak.py index 0f73b729cc..082e0af391 100644 --- a/plugins/module_utils/identity/keycloak/keycloak.py +++ b/plugins/module_utils/identity/keycloak/keycloak.py @@ -30,12 +30,16 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type import json +import traceback from ansible.module_utils.urls import open_url from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils.six.moves.urllib.error import HTTPError from ansible.module_utils._text import to_native +URL_REALMS = "{url}/realms" +URL_REALM = "{url}/realms/{realm}" + URL_TOKEN = "{url}/realms/{realm}/protocol/openid-connect/token" URL_CLIENT = "{url}/admin/realms/{realm}/clients/{id}" URL_CLIENTS = "{url}/admin/realms/{realm}/clients" @@ -138,6 +142,75 @@ class KeycloakAPI(object): self.validate_certs = self.module.params.get('validate_certs') self.restheaders = connection_header + def get_realm_by_id(self, realm='master'): + """ Obtain realm representation by id + + :param realm: realm id + :return: dict of real, representation or None if none matching exist + """ + realm_url = URL_REALM.format(url=self.baseurl, realm=realm) + + try: + return json.loads(to_native(open_url(realm_url, method='GET', headers=self.restheaders, + validate_certs=self.validate_certs).read())) + + except HTTPError as e: + if e.code == 404: + return None + else: + self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)), + exception=traceback.format_exc()) + except ValueError as e: + self.module.fail_json(msg='API returned incorrect JSON when trying to obtain realm %s: %s' % (realm, str(e)), + exception=traceback.format_exc()) + except Exception as e: + self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)), + exception=traceback.format_exc()) + + def update_realm(self, realmrep, realm="master"): + """ Update an existing realm + :param realmrep: corresponding (partial/full) realm representation with updates + :param realm: realm to be updated in Keycloak + :return: HTTPResponse object on success + """ + realm_url = URL_REALM.format(url=self.baseurl, realm=realm) + + try: + return open_url(realm_url, method='PUT', headers=self.restheaders, + data=json.dumps(realmrep), validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Could not update realm %s: %s' % (realm, str(e)), + exception=traceback.format_exc()) + + def create_realm(self, realmrep): + """ Create a realm in keycloak + :param realmrep: Realm representation of realm to be created. + :return: HTTPResponse object on success + """ + realm_url = URL_REALMS.format(url=self.baseurl) + + try: + return open_url(realm_url, method='POST', headers=self.restheaders, + data=json.dumps(realmrep), validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Could not create realm %s: %s' % (realmrep['id'], str(e)), + exception=traceback.format_exc()) + + def delete_realm(self, realm="master"): + """ Delete a realm from Keycloak + + :param realm: realm to be deleted + :return: HTTPResponse object on success + """ + realm_url = URL_REALM.format(url=self.baseurl, realm=realm) + + try: + return open_url(realm_url, method='DELETE', headers=self.restheaders, + validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Could not delete realm %s: %s' % (realm, str(e)), + exception=traceback.format_exc()) + def get_clients(self, realm='master', filter=None): """ Obtains client representations for clients in a realm diff --git a/plugins/modules/identity/keycloak/keycloak_realm.py b/plugins/modules/identity/keycloak/keycloak_realm.py new file mode 100644 index 0000000000..7e80bd3d3d --- /dev/null +++ b/plugins/modules/identity/keycloak/keycloak_realm.py @@ -0,0 +1,787 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017, Eike Frost +# Copyright (c) 2021, Christophe Gilles +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: keycloak_realm + +short_description: Allows administration of Keycloak realm via Keycloak API +version_added: 3.0.0 + + +description: + - This module allows the administration of Keycloak realm via the Keycloak REST API. It + requires access to the REST API via OpenID Connect; the user connecting and the realm being + used must have the requisite access rights. In a default Keycloak installation, admin-cli + and an admin user would work, as would a separate realm definition with the scope tailored + to your needs and a user having the expected roles. + + - The names of module options are snake_cased versions of the camelCase ones found in the + Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). + Aliases are provided so camelCased versions can be used as well. + + - The Keycloak API does not always sanity check inputs e.g. you can set + SAML-specific settings on an OpenID Connect client for instance and vice versa. Be careful. + If you do not specify a setting, usually a sensible default is chosen. + +options: + state: + description: + - State of the realm. + - On C(present), the realm will be created (or updated if it exists already). + - On C(absent), the realm will be removed if it exists. + choices: ['present', 'absent'] + default: 'present' + type: str + + id: + description: + - The realm to create. + type: str + realm: + description: + - The realm name. + type: str + access_code_lifespan: + description: + - The realm access code lifespan. + aliases: + - accessCodeLifespan + type: int + access_code_lifespan_login: + description: + - The realm access code lifespan login. + aliases: + - accessCodeLifespanLogin + type: int + access_code_lifespan_user_action: + description: + - The realm access code lifespan user action. + aliases: + - accessCodeLifespanUserAction + type: int + access_token_lifespan: + description: + - The realm access token lifespan. + aliases: + - accessTokenLifespan + type: int + access_token_lifespan_for_implicit_flow: + description: + - The realm access token lifespan for implicit flow. + aliases: + - accessTokenLifespanForImplicitFlow + type: int + account_theme: + description: + - The realm account theme. + aliases: + - accountTheme + type: str + action_token_generated_by_admin_lifespan: + description: + - The realm action token generated by admin lifespan. + aliases: + - actionTokenGeneratedByAdminLifespan + type: int + action_token_generated_by_user_lifespan: + description: + - The realm action token generated by user lifespan. + aliases: + - actionTokenGeneratedByUserLifespan + type: int + admin_events_details_enabled: + description: + - The realm admin events details enabled. + aliases: + - adminEventsDetailsEnabled + type: bool + admin_events_enabled: + description: + - The realm admin events enabled. + aliases: + - adminEventsEnabled + type: bool + admin_theme: + description: + - The realm admin theme. + aliases: + - adminTheme + type: str + attributes: + description: + - The realm attributes. + type: dict + browser_flow: + description: + - The realm browser flow. + aliases: + - browserFlow + type: str + browser_security_headers: + description: + - The realm browser security headers. + aliases: + - browserSecurityHeaders + type: dict + brute_force_protected: + description: + - The realm brute force protected. + aliases: + - bruteForceProtected + type: bool + client_authentication_flow: + description: + - The realm client authentication flow. + aliases: + - clientAuthenticationFlow + type: str + client_scope_mappings: + description: + - The realm client scope mappings. + aliases: + - clientScopeMappings + type: dict + default_default_client_scopes: + description: + - The realm default default client scopes. + aliases: + - defaultDefaultClientScopes + type: list + elements: dict + default_groups: + description: + - The realm default groups. + aliases: + - defaultGroups + type: list + elements: dict + default_locale: + description: + - The realm default locale. + aliases: + - defaultLocale + type: str + default_optional_client_scopes: + description: + - The realm default optional client scopes. + aliases: + - defaultOptionalClientScopes + type: list + elements: dict + default_roles: + description: + - The realm default roles. + aliases: + - defaultRoles + type: list + elements: dict + default_signature_algorithm: + description: + - The realm default signature algorithm. + aliases: + - defaultSignatureAlgorithm + type: str + direct_grant_flow: + description: + - The realm direct grant flow. + aliases: + - directGrantFlow + type: str + display_name: + description: + - The realm display name. + aliases: + - displayName + type: str + display_name_html: + description: + - The realm display name HTML. + aliases: + - displayNameHtml + type: str + docker_authentication_flow: + description: + - The realm docker authentication flow. + aliases: + - dockerAuthenticationFlow + type: str + duplicate_emails_allowed: + description: + - The realm duplicate emails allowed option. + aliases: + - duplicateEmailsAllowed + type: bool + edit_username_allowed: + description: + - The realm edit username allowed option. + aliases: + - editUsernameAllowed + type: bool + email_theme: + description: + - The realm email theme. + aliases: + - emailTheme + type: str + enabled: + description: + - The realm enabled option. + type: bool + enabled_event_types: + description: + - The realm enabled event types. + aliases: + - enabledEventTypes + type: list + elements: str + events_expiration: + description: + - The realm events expiration. + aliases: + - eventsExpiration + type: int + events_listeners: + description: + - The realm events listeners. + aliases: + - eventsListeners + type: list + elements: dict + failure_factor: + description: + - The realm failure factor. + aliases: + - failureFactor + type: int + internationalization_enabled: + description: + - The realm internationalization enabled option. + aliases: + - internationalizationEnabled + type: bool + login_theme: + description: + - The realm login theme. + aliases: + - loginTheme + type: str + login_with_email_allowed: + description: + - The realm login with email allowed option. + aliases: + - loginWithEmailAllowed + type: bool + max_delta_time_seconds: + description: + - The realm max delta time in seconds. + aliases: + - maxDeltaTimeSeconds + type: int + max_failure_wait_seconds: + description: + - The realm max failure wait in seconds. + aliases: + - maxFailureWaitSeconds + type: int + minimum_quick_login_wait_seconds: + description: + - The realm minimum quick login wait in seconds. + aliases: + - minimumQuickLoginWaitSeconds + type: int + not_before: + description: + - The realm not before. + aliases: + - notBefore + type: int + offline_session_idle_timeout: + description: + - The realm offline session idle timeout. + aliases: + - offlineSessionIdleTimeout + type: int + offline_session_max_lifespan: + description: + - The realm offline session max lifespan. + aliases: + - offlineSessionMaxLifespan + type: int + offline_session_max_lifespan_enabled: + description: + - The realm offline session max lifespan enabled option. + aliases: + - offlineSessionMaxLifespanEnabled + type: bool + otp_policy_algorithm: + description: + - The realm otp policy algorithm. + aliases: + - otpPolicyAlgorithm + type: str + otp_policy_digits: + description: + - The realm otp policy digits. + aliases: + - otpPolicyDigits + type: int + otp_policy_initial_counter: + description: + - The realm otp policy initial counter. + aliases: + - otpPolicyInitialCounter + type: int + otp_policy_look_ahead_window: + description: + - The realm otp policy look ahead window. + aliases: + - otpPolicyLookAheadWindow + type: int + otp_policy_period: + description: + - The realm otp policy period. + aliases: + - otpPolicyPeriod + type: int + otp_policy_type: + description: + - The realm otp policy type. + aliases: + - otpPolicyType + type: str + otp_supported_applications: + description: + - The realm otp supported applications. + aliases: + - otpSupportedApplications + type: list + elements: str + password_policy: + description: + - The realm password policy. + aliases: + - passwordPolicy + type: str + permanent_lockout: + description: + - The realm permanent lockout. + aliases: + - permanentLockout + type: bool + quick_login_check_milli_seconds: + description: + - The realm quick login check in milliseconds. + aliases: + - quickLoginCheckMilliSeconds + type: int + refresh_token_max_reuse: + description: + - The realm refresh token max reuse. + aliases: + - refreshTokenMaxReuse + type: int + registration_allowed: + description: + - The realm registration allowed option. + aliases: + - registrationAllowed + type: bool + registration_email_as_username: + description: + - The realm registration email as username option. + aliases: + - registrationEmailAsUsername + type: bool + registration_flow: + description: + - The realm registration flow. + aliases: + - registrationFlow + type: str + remember_me: + description: + - The realm remember me option. + aliases: + - rememberMe + type: bool + reset_credentials_flow: + description: + - The realm reset credentials flow. + aliases: + - resetCredentialsFlow + type: str + reset_password_allowed: + description: + - The realm reset password allowed option. + aliases: + - resetPasswordAllowed + type: bool + revoke_refresh_token: + description: + - The realm revoke refresh token option. + aliases: + - revokeRefreshToken + type: bool + smtp_server: + description: + - The realm smtp server. + aliases: + - smtpServer + type: dict + ssl_required: + description: + - The realm ssl required option. + aliases: + - sslRequired + type: bool + sso_session_idle_timeout: + description: + - The realm sso session idle timeout. + aliases: + - ssoSessionIdleTimeout + type: int + sso_session_idle_timeout_remember_me: + description: + - The realm sso session idle timeout remember me. + aliases: + - ssoSessionIdleTimeoutRememberMe + type: int + sso_session_max_lifespan: + description: + - The realm sso session max lifespan. + aliases: + - ssoSessionMaxLifespan + type: int + sso_session_max_lifespan_remember_me: + description: + - The realm sso session max lifespan remember me. + aliases: + - ssoSessionMaxLifespanRememberMe + type: int + supported_locales: + description: + - The realm supported locales. + aliases: + - supportedLocales + type: list + elements: str + user_managed_access_allowed: + description: + - The realm user managed access allowed option. + aliases: + - userManagedAccessAllowed + type: bool + verify_email: + description: + - The realm verify email option. + aliases: + - verifyEmail + type: bool + wait_increment_seconds: + description: + - The realm wait increment in seconds. + aliases: + - waitIncrementSeconds + type: int + +extends_documentation_fragment: +- community.general.keycloak + + +author: + - Christophe Gilles (@kris2kris) +''' + +EXAMPLES = ''' +- name: Create or update Keycloak realm (minimal example) + community.general.keycloak_realm: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + id: realm + state: present + +- name: Delete a Keycloak realm + community.general.keycloak_realm: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + id: test + state: absent + +''' + +RETURN = ''' +msg: + description: Message as to what action was taken + returned: always + type: str + sample: "Realm testrealm has been updated" + +proposed: + description: realm representation of proposed changes to realm + returned: always + type: dict + sample: { + id: "test" + } +existing: + description: realm representation of existing realm (sample is truncated) + returned: always + type: dict + sample: { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256", + } + } +end_state: + description: realm representation of realm after module execution (sample is truncated) + returned: always + type: dict + sample: { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256", + } + } +''' + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule + + +def sanitize_cr(realmrep): + """ Removes probably sensitive details from a realm representation + + :param realmrep: the realmrep dict to be sanitized + :return: sanitized realmrep dict + """ + result = realmrep.copy() + if 'secret' in result: + result['secret'] = '********' + if 'attributes' in result: + if 'saml.signing.private.key' in result['attributes']: + result['attributes'] = result['attributes'].copy() + result['attributes']['saml.signing.private.key'] = '********' + return result + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + meta_args = dict( + state=dict(default='present', choices=['present', 'absent']), + + id=dict(type='str'), + realm=dict(type='str'), + access_code_lifespan=dict(type='int', aliases=['accessCodeLifespan']), + access_code_lifespan_login=dict(type='int', aliases=['accessCodeLifespanLogin']), + access_code_lifespan_user_action=dict(type='int', aliases=['accessCodeLifespanUserAction']), + access_token_lifespan=dict(type='int', aliases=['accessTokenLifespan'], no_log=False), + access_token_lifespan_for_implicit_flow=dict(type='int', aliases=['accessTokenLifespanForImplicitFlow'], no_log=False), + account_theme=dict(type='str', aliases=['accountTheme']), + action_token_generated_by_admin_lifespan=dict(type='int', aliases=['actionTokenGeneratedByAdminLifespan'], no_log=False), + action_token_generated_by_user_lifespan=dict(type='int', aliases=['actionTokenGeneratedByUserLifespan'], no_log=False), + admin_events_details_enabled=dict(type='bool', aliases=['adminEventsDetailsEnabled']), + admin_events_enabled=dict(type='bool', aliases=['adminEventsEnabled']), + admin_theme=dict(type='str', aliases=['adminTheme']), + attributes=dict(type='dict'), + browser_flow=dict(type='str', aliases=['browserFlow']), + browser_security_headers=dict(type='dict', aliases=['browserSecurityHeaders']), + brute_force_protected=dict(type='bool', aliases=['bruteForceProtected']), + client_authentication_flow=dict(type='str', aliases=['clientAuthenticationFlow']), + client_scope_mappings=dict(type='dict', aliases=['clientScopeMappings']), + default_default_client_scopes=dict(type='list', elements='dict', aliases=['defaultDefaultClientScopes']), + default_groups=dict(type='list', elements='dict', aliases=['defaultGroups']), + default_locale=dict(type='str', aliases=['defaultLocale']), + default_optional_client_scopes=dict(type='list', elements='dict', aliases=['defaultOptionalClientScopes']), + default_roles=dict(type='list', elements='dict', aliases=['defaultRoles']), + default_signature_algorithm=dict(type='str', aliases=['defaultSignatureAlgorithm']), + direct_grant_flow=dict(type='str', aliases=['directGrantFlow']), + display_name=dict(type='str', aliases=['displayName']), + display_name_html=dict(type='str', aliases=['displayNameHtml']), + docker_authentication_flow=dict(type='str', aliases=['dockerAuthenticationFlow']), + duplicate_emails_allowed=dict(type='bool', aliases=['duplicateEmailsAllowed']), + edit_username_allowed=dict(type='bool', aliases=['editUsernameAllowed']), + email_theme=dict(type='str', aliases=['emailTheme']), + enabled=dict(type='bool'), + enabled_event_types=dict(type='list', elements='str', aliases=['enabledEventTypes']), + events_expiration=dict(type='int', aliases=['eventsExpiration']), + events_listeners=dict(type='list', elements='dict', aliases=['eventsListeners']), + failure_factor=dict(type='int', aliases=['failureFactor']), + internationalization_enabled=dict(type='bool', aliases=['internationalizationEnabled']), + login_theme=dict(type='str', aliases=['loginTheme']), + login_with_email_allowed=dict(type='bool', aliases=['loginWithEmailAllowed']), + max_delta_time_seconds=dict(type='int', aliases=['maxDeltaTimeSeconds']), + max_failure_wait_seconds=dict(type='int', aliases=['maxFailureWaitSeconds']), + minimum_quick_login_wait_seconds=dict(type='int', aliases=['minimumQuickLoginWaitSeconds']), + not_before=dict(type='int', aliases=['notBefore']), + offline_session_idle_timeout=dict(type='int', aliases=['offlineSessionIdleTimeout']), + offline_session_max_lifespan=dict(type='int', aliases=['offlineSessionMaxLifespan']), + offline_session_max_lifespan_enabled=dict(type='bool', aliases=['offlineSessionMaxLifespanEnabled']), + otp_policy_algorithm=dict(type='str', aliases=['otpPolicyAlgorithm']), + otp_policy_digits=dict(type='int', aliases=['otpPolicyDigits']), + otp_policy_initial_counter=dict(type='int', aliases=['otpPolicyInitialCounter']), + otp_policy_look_ahead_window=dict(type='int', aliases=['otpPolicyLookAheadWindow']), + otp_policy_period=dict(type='int', aliases=['otpPolicyPeriod']), + otp_policy_type=dict(type='str', aliases=['otpPolicyType']), + otp_supported_applications=dict(type='list', elements='str', aliases=['otpSupportedApplications']), + password_policy=dict(type='str', aliases=['passwordPolicy'], no_log=False), + permanent_lockout=dict(type='bool', aliases=['permanentLockout']), + quick_login_check_milli_seconds=dict(type='int', aliases=['quickLoginCheckMilliSeconds']), + refresh_token_max_reuse=dict(type='int', aliases=['refreshTokenMaxReuse'], no_log=False), + registration_allowed=dict(type='bool', aliases=['registrationAllowed']), + registration_email_as_username=dict(type='bool', aliases=['registrationEmailAsUsername']), + registration_flow=dict(type='str', aliases=['registrationFlow']), + remember_me=dict(type='bool', aliases=['rememberMe']), + reset_credentials_flow=dict(type='str', aliases=['resetCredentialsFlow']), + reset_password_allowed=dict(type='bool', aliases=['resetPasswordAllowed']), + revoke_refresh_token=dict(type='bool', aliases=['revokeRefreshToken']), + smtp_server=dict(type='dict', aliases=['smtpServer']), + ssl_required=dict(type='bool', aliases=['sslRequired']), + sso_session_idle_timeout=dict(type='int', aliases=['ssoSessionIdleTimeout']), + sso_session_idle_timeout_remember_me=dict(type='int', aliases=['ssoSessionIdleTimeoutRememberMe']), + sso_session_max_lifespan=dict(type='int', aliases=['ssoSessionMaxLifespan']), + sso_session_max_lifespan_remember_me=dict(type='int', aliases=['ssoSessionMaxLifespanRememberMe']), + supported_locales=dict(type='list', elements='str', aliases=['supportedLocales']), + user_managed_access_allowed=dict(type='bool', aliases=['userManagedAccessAllowed']), + verify_email=dict(type='bool', aliases=['verifyEmail']), + wait_increment_seconds=dict(type='int', aliases=['waitIncrementSeconds']), + ) + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['id', 'realm', 'enabled'], + ['token', 'auth_realm', 'auth_username', 'auth_password']]), + required_together=([['auth_realm', 'auth_username', 'auth_password']])) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + state = module.params.get('state') + + # convert module parameters to realm representation parameters (if they belong in there) + params_to_ignore = list(keycloak_argument_spec().keys()) + ['state'] + realm_params = [x for x in module.params + if x not in params_to_ignore and + module.params.get(x) is not None] + + # See whether the realm already exists in Keycloak + before_realm = kc.get_realm_by_id(realm=realm) or {} + + # Build a proposed changeset from parameters given to this module + changeset = dict() + + for realm_param in realm_params: + new_param_value = module.params.get(realm_param) + changeset[camel(realm_param)] = new_param_value + + # Whether creating or updating a realm, take the before-state and merge the changeset into it + updated_realm = before_realm.copy() + updated_realm.update(changeset) + + result['proposed'] = sanitize_cr(changeset) + before_realm_sanitized = sanitize_cr(before_realm) + result['existing'] = before_realm_sanitized + + # If the realm does not exist yet, before_realm is still empty + if not before_realm: + if state == 'absent': + # do nothing and exit + if module._diff: + result['diff'] = dict(before='', after='') + result['msg'] = 'Realm does not exist, doing nothing.' + module.exit_json(**result) + + # create new realm + result['changed'] = True + if 'id' not in updated_realm: + module.fail_json(msg='id needs to be specified when creating a new realm') + + if module._diff: + result['diff'] = dict(before='', after=sanitize_cr(updated_realm)) + + if module.check_mode: + module.exit_json(**result) + + kc.create_realm(updated_realm) + after_realm = kc.get_realm_by_id(updated_realm['id']) + + result['end_state'] = sanitize_cr(after_realm) + + result['msg'] = 'Realm %s has been created.' % updated_realm['id'] + module.exit_json(**result) + else: + if state == 'present': + # update existing realm + result['changed'] = True + if module.check_mode: + # We can only compare the current realm with the proposed updates we have + if module._diff: + result['diff'] = dict(before=before_realm_sanitized, + after=sanitize_cr(updated_realm)) + result['changed'] = (before_realm != updated_realm) + + module.exit_json(**result) + + kc.update_realm(updated_realm, realm=realm) + + after_realm = kc.get_realm_by_id(realm=realm) + if before_realm == after_realm: + result['changed'] = False + if module._diff: + result['diff'] = dict(before=before_realm_sanitized, + after=sanitize_cr(after_realm)) + result['end_state'] = sanitize_cr(after_realm) + + result['msg'] = 'Realm %s has been updated.' % updated_realm['id'] + module.exit_json(**result) + else: + # Delete existing realm + result['changed'] = True + if module._diff: + result['diff']['before'] = before_realm_sanitized + result['diff']['after'] = '' + + if module.check_mode: + module.exit_json(**result) + + kc.delete_realm(realm=realm) + result['proposed'] = dict() + result['end_state'] = dict() + result['msg'] = 'Realm %s has been deleted.' % before_realm['id'] + module.exit_json(**result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_realm.py b/plugins/modules/keycloak_realm.py new file mode 120000 index 0000000000..1cdeb46a81 --- /dev/null +++ b/plugins/modules/keycloak_realm.py @@ -0,0 +1 @@ +./identity/keycloak/keycloak_realm.py \ No newline at end of file diff --git a/tests/unit/plugins/modules/identity/keycloak/__init__.py b/tests/unit/plugins/modules/identity/keycloak/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit/plugins/modules/identity/keycloak/test_keycloak_realm.py b/tests/unit/plugins/modules/identity/keycloak/test_keycloak_realm.py new file mode 100644 index 0000000000..06548ad3e6 --- /dev/null +++ b/tests/unit/plugins/modules/identity/keycloak/test_keycloak_realm.py @@ -0,0 +1,310 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from contextlib import contextmanager + +from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.general.tests.unit.compat.mock import call, patch +from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args + +from ansible_collections.community.general.plugins.modules.identity.keycloak import keycloak_realm + +from itertools import count + +from ansible.module_utils.six import StringIO + + +@contextmanager +def patch_keycloak_api(get_realm_by_id, create_realm=None, update_realm=None, delete_realm=None): + """Mock context manager for patching the methods in PwPolicyIPAClient that contact the IPA server + + Patches the `login` and `_post_json` methods + + Keyword arguments are passed to the mock object that patches `_post_json` + + No arguments are passed to the mock object that patches `login` because no tests require it + + Example:: + + with patch_ipa(return_value={}) as (mock_login, mock_post): + ... + """ + + obj = keycloak_realm.KeycloakAPI + with patch.object(obj, 'get_realm_by_id', side_effect=get_realm_by_id) as mock_get_realm_by_id: + with patch.object(obj, 'create_realm', side_effect=create_realm) as mock_create_realm: + with patch.object(obj, 'update_realm', side_effect=update_realm) as mock_update_realm: + with patch.object(obj, 'delete_realm', side_effect=delete_realm) as mock_delete_realm: + yield mock_get_realm_by_id, mock_create_realm, mock_update_realm, mock_delete_realm + + +def get_response(object_with_future_response, method, get_id_call_count): + if callable(object_with_future_response): + return object_with_future_response() + if isinstance(object_with_future_response, dict): + return get_response( + object_with_future_response[method], method, get_id_call_count) + if isinstance(object_with_future_response, list): + call_number = next(get_id_call_count) + return get_response( + object_with_future_response[call_number], method, get_id_call_count) + return object_with_future_response + + +def build_mocked_request(get_id_user_count, response_dict): + def _mocked_requests(*args, **kwargs): + url = args[0] + method = kwargs['method'] + future_response = response_dict.get(url, None) + return get_response(future_response, method, get_id_user_count) + return _mocked_requests + + +def create_wrapper(text_as_string): + """Allow to mock many times a call to one address. + Without this function, the StringIO is empty for the second call. + """ + def _create_wrapper(): + return StringIO(text_as_string) + return _create_wrapper + + +def mock_good_connection(): + token_response = { + 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), } + return patch( + 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + side_effect=build_mocked_request(count(), token_response), + autospec=True + ) + + +class TestKeycloakRealm(ModuleTestCase): + def setUp(self): + super(TestKeycloakRealm, self).setUp() + self.module = keycloak_realm + + def test_create_when_absent(self): + """Add a new realm""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + 'validate_certs': True, + 'id': 'realm-name', + 'realm': 'realm-name', + 'enabled': True + } + return_value_absent = [None, {'id': 'realm-name', 'realm': 'realm-name', 'enabled': True}] + return_value_created = [{ + 'code': 201, + 'id': 'realm-name', + 'realm': 'realm-name', + 'enabled': True + }] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_realm_by_id=return_value_absent, create_realm=return_value_created) \ + as (mock_get_realm_by_id, mock_create_realm, mock_update_realm, mock_delete_realm): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_realm_by_id.mock_calls), 2) + self.assertEqual(len(mock_create_realm.mock_calls), 1) + self.assertEqual(len(mock_update_realm.mock_calls), 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_create_when_present_with_change(self): + """Update with change a realm""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + 'validate_certs': True, + 'id': 'realm-name', + 'realm': 'realm-name', + 'enabled': False + } + return_value_absent = [ + { + 'id': 'realm-name', + 'realm': 'realm-name', + 'enabled': True + }, + { + 'id': 'realm-name', + 'realm': 'realm-name', + 'enabled': False + } + ] + return_value_updated = [{ + 'code': 201, + 'id': 'realm-name', + 'realm': 'realm-name', + 'enabled': False + }] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_realm_by_id=return_value_absent, update_realm=return_value_updated) \ + as (mock_get_realm_by_id, mock_create_realm, mock_update_realm, mock_delete_realm): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_realm_by_id.mock_calls), 2) + self.assertEqual(len(mock_create_realm.mock_calls), 0) + self.assertEqual(len(mock_update_realm.mock_calls), 1) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_create_when_present_no_change(self): + """Update without change a realm""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + 'validate_certs': True, + 'id': 'realm-name', + 'realm': 'realm-name', + 'enabled': True + } + return_value_absent = [ + { + 'id': 'realm-name', + 'realm': 'realm-name', + 'enabled': True + }, + { + 'id': 'realm-name', + 'realm': 'realm-name', + 'enabled': True + } + ] + return_value_updated = [{ + 'code': 201, + 'id': 'realm-name', + 'realm': 'realm-name', + 'enabled': True + }] + changed = False + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_realm_by_id=return_value_absent, update_realm=return_value_updated) \ + as (mock_get_realm_by_id, mock_create_realm, mock_update_realm, mock_delete_realm): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_realm_by_id.mock_calls), 2) + self.assertEqual(len(mock_create_realm.mock_calls), 0) + self.assertEqual(len(mock_update_realm.mock_calls), 1) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_delete_when_absent(self): + """Remove an absent realm""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + 'validate_certs': True, + 'id': 'realm-name', + 'realm': 'realm-name', + 'enabled': True, + 'state': 'absent' + } + return_value_absent = [None] + return_value_deleted = [None] + changed = False + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_realm_by_id=return_value_absent, delete_realm=return_value_deleted) \ + as (mock_get_realm_by_id, mock_create_realm, mock_update_realm, mock_delete_realm): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_realm_by_id.mock_calls), 1) + self.assertEqual(len(mock_delete_realm.mock_calls), 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_delete_when_present(self): + """Remove a present realm""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + 'validate_certs': True, + 'id': 'realm-name', + 'realm': 'realm-name', + 'enabled': True, + 'state': 'absent' + } + return_value_absent = [ + { + 'id': 'realm-name', + 'realm': 'realm-name' + }] + return_value_deleted = [None] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_realm_by_id=return_value_absent, delete_realm=return_value_deleted) \ + as (mock_get_realm_by_id, mock_create_realm, mock_update_realm, mock_delete_realm): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_realm_by_id.mock_calls), 1) + self.assertEqual(len(mock_delete_realm.mock_calls), 1) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + +if __name__ == '__main__': + unittest.main() From 68243063d1f8537bea75a15d86d099d1eab89111 Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Wed, 21 Apr 2021 06:45:09 -0400 Subject: [PATCH 0219/3093] pids module: Adding pattern and ignore_case options (#2280) * Adding pattern and ignore_case options * Adding changelog fragment * Fixing changelog fragment * Addressing FreeBSD 11.4/python 3 errors with explicit conversion * Correcting descriptions * Reverting back to regex input * Fixing test syntax errors --- .../2280-pids-new-pattern-option.yml | 3 + plugins/modules/system/pids.py | 68 +++++++++++++++++-- tests/integration/targets/pids/tasks/main.yml | 30 ++++++++ 3 files changed, 94 insertions(+), 7 deletions(-) create mode 100644 changelogs/fragments/2280-pids-new-pattern-option.yml diff --git a/changelogs/fragments/2280-pids-new-pattern-option.yml b/changelogs/fragments/2280-pids-new-pattern-option.yml new file mode 100644 index 0000000000..fb9f07e744 --- /dev/null +++ b/changelogs/fragments/2280-pids-new-pattern-option.yml @@ -0,0 +1,3 @@ +--- +minor_changes: +- pids - new options ``pattern`` and `ignore_case`` for retrieving PIDs of processes matching a supplied pattern (https://github.com/ansible-collections/community.general/pull/2280). diff --git a/plugins/modules/system/pids.py b/plugins/modules/system/pids.py index 1bee180b08..e7312465f1 100644 --- a/plugins/modules/system/pids.py +++ b/plugins/modules/system/pids.py @@ -2,6 +2,7 @@ # Copyright: (c) 2019, Saranya Sridharan # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) + __metaclass__ = type DOCUMENTATION = ''' @@ -14,12 +15,20 @@ requirements: - psutil(python module) options: name: - description: the name of the process you want to get PID for. - required: true + description: The name of the process(es) you want to get PID(s) for. type: str + pattern: + description: The pattern (regular expression) to match the process(es) you want to get PID(s) for. + type: str + version_added: 3.0.0 + ignore_case: + description: Ignore case in pattern if using the I(pattern) option. + type: bool + default: false + version_added: 3.0.0 ''' -EXAMPLES = ''' +EXAMPLES = r''' # Pass the process name - name: Getting process IDs of the process community.general.pids: @@ -29,6 +38,11 @@ EXAMPLES = ''' - name: Printing the process IDs obtained ansible.builtin.debug: msg: "PIDS of python:{{pids_of_python.pids|join(',')}}" + +- name: Getting process IDs of processes matching pattern + community.general.pids: + pattern: python(2(\.7)?|3(\.6)?)?\s+myapp\.py + register: myapp_pids ''' RETURN = ''' @@ -39,9 +53,15 @@ pids: sample: [100,200] ''' -from ansible.module_utils.basic import AnsibleModule +import re +from os.path import basename + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_native + try: import psutil + HAS_PSUTIL = True except ImportError: HAS_PSUTIL = False @@ -66,17 +86,51 @@ def get_pid(name): return pids +def get_matching_command_pids(pattern, ignore_case): + flags = 0 + if ignore_case: + flags |= re.I + + regex = re.compile(pattern, flags) + # See https://psutil.readthedocs.io/en/latest/#find-process-by-name for more information + return [p.pid for p in psutil.process_iter(["name", "exe", "cmdline"]) + if regex.search(to_native(p.info["name"])) + or (p.info["exe"] and regex.search(basename(to_native(p.info["exe"])))) + or (p.info["cmdline"] and regex.search(to_native(' '.join(p.cmdline())))) + ] + + def main(): module = AnsibleModule( argument_spec=dict( - name=dict(required=True, type="str"), + name=dict(type="str"), + pattern=dict(type="str"), + ignore_case=dict(type="bool", default=False), ), + required_one_of=[ + ('name', 'pattern') + ], + mutually_exclusive=[ + ('name', 'pattern') + ], supports_check_mode=True, ) + if not HAS_PSUTIL: - module.fail_json(msg="Missing required 'psutil' python module. Try installing it with: pip install psutil") + module.fail_json(msg=missing_required_lib('psutil')) + name = module.params["name"] - response = dict(pids=get_pid(name)) + pattern = module.params["pattern"] + ignore_case = module.params["ignore_case"] + + if name: + response = dict(pids=get_pid(name)) + else: + try: + response = dict(pids=get_matching_command_pids(pattern, ignore_case)) + except re.error as e: + module.fail_json(msg="'%s' is not a valid regular expression: %s" % (pattern, to_native(e))) + module.exit_json(**response) diff --git a/tests/integration/targets/pids/tasks/main.yml b/tests/integration/targets/pids/tasks/main.yml index 4cc691633c..b56093cf0c 100644 --- a/tests/integration/targets/pids/tasks/main.yml +++ b/tests/integration/targets/pids/tasks/main.yml @@ -56,6 +56,22 @@ name: "{{ random_name[0:5] }}" register: exactpidmatch +- name: "Checking that patterns can be used with the pattern option" + pids: + pattern: "{{ random_name[0:5] }}" + register: pattern_pid_match + +- name: "Checking that case-insensitive patterns can be used with the pattern option" + pids: + pattern: "{{ random_name[0:5] | upper }}" + ignore_case: true + register: caseinsensitive_pattern_pid_match + +- name: "Checking that .* includes test pid" + pids: + pattern: .* + register: match_all + - name: "Reading pid from the file" slurp: src: "{{ output_dir }}/obtainpid.txt" @@ -67,3 +83,17 @@ - "pids.pids | join(' ') == newpid.content | b64decode | trim" - "pids.pids | length > 0" - "exactpidmatch.pids == []" + - "pattern_pid_match.pids | join(' ') == newpid.content | b64decode | trim" + - "caseinsensitive_pattern_pid_match.pids | join(' ') == newpid.content | b64decode | trim" + - newpid.content | b64decode | trim | int in match_all.pids + +- name: "Register output of bad input pattern" + pids: + pattern: (unterminated + register: bad_pattern_result + ignore_errors: true + +- name: "Verify that bad input pattern result is failed" + assert: + that: + - bad_pattern_result is failed \ No newline at end of file From c768060d956185ce31fb0a326ee8d5023e32c33b Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Wed, 21 Apr 2021 12:54:35 +0200 Subject: [PATCH 0220/3093] More renames. (#2307) --- plugins/modules/cloud/misc/proxmox_domain_info.py | 4 ++-- plugins/modules/cloud/misc/proxmox_group_info.py | 2 +- plugins/modules/cloud/misc/proxmox_storage_info.py | 4 ++-- plugins/modules/cloud/misc/proxmox_user_info.py | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/plugins/modules/cloud/misc/proxmox_domain_info.py b/plugins/modules/cloud/misc/proxmox_domain_info.py index fc7c37c613..1034bc8d30 100644 --- a/plugins/modules/cloud/misc/proxmox_domain_info.py +++ b/plugins/modules/cloud/misc/proxmox_domain_info.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- # -# Copyright: Tristan Le Guern (@Aversiste) +# Copyright: Tristan Le Guern (@tleguern) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function @@ -21,7 +21,7 @@ options: - Restrict results to a specific authentication realm. aliases: ['realm', 'name'] type: str -author: Tristan Le Guern (@Aversiste) +author: Tristan Le Guern (@tleguern) extends_documentation_fragment: community.general.proxmox.documentation ''' diff --git a/plugins/modules/cloud/misc/proxmox_group_info.py b/plugins/modules/cloud/misc/proxmox_group_info.py index 063d28e559..bf88659656 100644 --- a/plugins/modules/cloud/misc/proxmox_group_info.py +++ b/plugins/modules/cloud/misc/proxmox_group_info.py @@ -21,7 +21,7 @@ options: - Restrict results to a specific group. aliases: ['groupid', 'name'] type: str -author: Tristan Le Guern (@Aversiste) +author: Tristan Le Guern (@tleguern) extends_documentation_fragment: community.general.proxmox.documentation ''' diff --git a/plugins/modules/cloud/misc/proxmox_storage_info.py b/plugins/modules/cloud/misc/proxmox_storage_info.py index fb495435e0..d06c9be8c1 100644 --- a/plugins/modules/cloud/misc/proxmox_storage_info.py +++ b/plugins/modules/cloud/misc/proxmox_storage_info.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- # -# Copyright: Tristan Le Guern (@Aversiste) +# Copyright: Tristan Le Guern (@tleguern) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function @@ -25,7 +25,7 @@ options: description: - Filter on a specifc storage type. type: str -author: Tristan Le Guern (@Aversiste) +author: Tristan Le Guern (@tleguern) extends_documentation_fragment: community.general.proxmox.documentation notes: - Storage specific options can be returned by this module, please look at the documentation at U(https://pve.proxmox.com/wiki/Storage). diff --git a/plugins/modules/cloud/misc/proxmox_user_info.py b/plugins/modules/cloud/misc/proxmox_user_info.py index 1de93e6009..49a890b9f6 100644 --- a/plugins/modules/cloud/misc/proxmox_user_info.py +++ b/plugins/modules/cloud/misc/proxmox_user_info.py @@ -30,7 +30,7 @@ options: description: - Restrict results to a specific user ID, which is a concatenation of a user and domain parts. type: str -author: Tristan Le Guern (@Aversiste) +author: Tristan Le Guern (@tleguern) extends_documentation_fragment: community.general.proxmox.documentation ''' From e1d28cf052be642fc34a72b83a56a4f25057c5c0 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Wed, 21 Apr 2021 13:37:10 +0200 Subject: [PATCH 0221/3093] Temporarily disable sysrc tests since they fail often. (#2318) --- tests/integration/targets/sysrc/aliases | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/targets/sysrc/aliases b/tests/integration/targets/sysrc/aliases index 360849e61b..c7d183fb65 100644 --- a/tests/integration/targets/sysrc/aliases +++ b/tests/integration/targets/sysrc/aliases @@ -3,3 +3,4 @@ needs/root skip/docker skip/osx skip/rhel +disabled # FIXME From 8db59ff02dcf8f07929b283d90da0b889edb6c0e Mon Sep 17 00:00:00 2001 From: Nick Date: Wed, 21 Apr 2021 08:04:46 -0400 Subject: [PATCH 0222/3093] `with_filetree:`: use `splitext` for compatibility with `template:` (#2285) * with_filetree: use splitext for compatibility with template The example code given deploys files with their .j2 extensions intact, which is probably not what you want. * Explain how templates interact with splitext|first * Update plugins/lookup/filetree.py Co-authored-by: Felix Fontein * Don't encourage setting the mode of symlinks On ext4, maybe most filesystems, symlinks always have the artificial mode of 0777, and `chmod $mode $symlink` *writes through* the symlink to its target file. An effect of this is that if you deploy a file and a symlink to it (e.g. this common situation: /etc/nginx/sites-available/default and /etc/nginx/sites-enabled/default -> ../sites-available/default) then `with_filetree` will forever first deploy the file with the right mode, then corrupt its mode to 0777, and every redeploy will see a change to fix, forever in a loop. Probably `file:` should refuse `mode:` on `state: link`s, but in the meantime, avoid recommending it in `filetree` * Use `follow: false` instead of just the mode. This should be more cross-compatible. https://github.com/ansible-collections/community.general/pull/2285#discussion_r616571873 * Update plugins/lookup/filetree.py Co-authored-by: Felix Fontein --- plugins/lookup/filetree.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/plugins/lookup/filetree.py b/plugins/lookup/filetree.py index fe93cd240d..40e449e600 100644 --- a/plugins/lookup/filetree.py +++ b/plugins/lookup/filetree.py @@ -31,7 +31,9 @@ EXAMPLES = r""" - name: Template files (explicitly skip directories in order to use the 'src' attribute) ansible.builtin.template: src: '{{ item.src }}' - dest: /web/{{ item.path }} + # Your template files should be stored with a .j2 file extension, + # but should not be deployed with it. splitext|first removes it. + dest: /web/{{ item.path | splitext | first }} mode: '{{ item.mode }}' with_community.general.filetree: web/ when: item.state == 'file' @@ -41,6 +43,7 @@ EXAMPLES = r""" src: '{{ item.src }}' dest: /web/{{ item.path }} state: link + follow: false # avoid corrupting target files if the link already exists force: yes mode: '{{ item.mode }}' with_community.general.filetree: web/ From 32d071e349fc92c7ff82ac215bfa5f8aaa52a37e Mon Sep 17 00:00:00 2001 From: John R Barker Date: Wed, 21 Apr 2021 13:33:43 +0100 Subject: [PATCH 0223/3093] Update bug_report.yml --- .github/ISSUE_TEMPLATE/bug_report.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 4ebff09890..8f4a16e93b 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -1,7 +1,6 @@ --- name: Bug report description: Create a report to help us improve -issue_body: false # default: true, adds a classic WSYWIG textarea, if on body: - type: markdown From 96ad40ac1cada1aff53ed040524fc164d1f3e040 Mon Sep 17 00:00:00 2001 From: John R Barker Date: Wed, 21 Apr 2021 13:34:13 +0100 Subject: [PATCH 0224/3093] Update documentation_report.yml --- .github/ISSUE_TEMPLATE/documentation_report.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/documentation_report.yml b/.github/ISSUE_TEMPLATE/documentation_report.yml index 2d30534840..c24265acb1 100644 --- a/.github/ISSUE_TEMPLATE/documentation_report.yml +++ b/.github/ISSUE_TEMPLATE/documentation_report.yml @@ -2,7 +2,6 @@ name: Documentation Report description: Ask us about docs # NOTE: issue body is enabled to allow screenshots -issue_body: true # default: true, adds a classic WSYWIG textarea, if on body: - type: markdown From 5ca19086a474b41b348fa19149bc9162fda1a397 Mon Sep 17 00:00:00 2001 From: John R Barker Date: Wed, 21 Apr 2021 13:34:30 +0100 Subject: [PATCH 0225/3093] Update feature_request.yml --- .github/ISSUE_TEMPLATE/feature_request.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index 0116d94d45..5f89dec77a 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -1,7 +1,6 @@ --- name: Feature request description: Suggest an idea for this project -issue_body: false # default: true, adds a classic WSYWIG textarea, if on body: - type: markdown From ca48917b4f6f49bda043e71e3edaae20f1eec2c9 Mon Sep 17 00:00:00 2001 From: Ilija Matoski Date: Wed, 21 Apr 2021 19:52:45 +0200 Subject: [PATCH 0226/3093] inventory/proxmox: Added some cases for unsupported network interface and multi-nic and unsupported guest error (#2263) * added some cases for unsupported network interface and multi-nic without IP addresses * added changelog fragment * lint fixes and added option for ansible_host setting * added description about the new option * lint fix too long line * Update changelogs/fragments/2259-proxmox-multi-nic-and-unsupported.yml Co-authored-by: Felix Fontein * Update plugins/inventory/proxmox.py Co-authored-by: Felix Fontein * Update plugins/inventory/proxmox.py Co-authored-by: Felix Fontein * Update plugins/inventory/proxmox.py Co-authored-by: Felix Fontein * Added CommandDisabled * refactored to code and added a test case where an interfaces doesnt have a mac address or is invalid to reset it to 00:00:00:00:00:00 * Update tests/unit/plugins/inventory/test_proxmox.py Co-authored-by: Ajpantuso * Update tests/unit/plugins/inventory/test_proxmox.py Co-authored-by: Ajpantuso * mac-address is set to None instead of 00:00:... when not defined * changed None to empty string for mac-address Co-authored-by: Felix Fontein Co-authored-by: Ajpantuso --- ...2259-proxmox-multi-nic-and-unsupported.yml | 5 + plugins/inventory/proxmox.py | 29 +- tests/unit/plugins/inventory/test_proxmox.py | 247 ++++++++++++++++++ 3 files changed, 275 insertions(+), 6 deletions(-) create mode 100644 changelogs/fragments/2259-proxmox-multi-nic-and-unsupported.yml diff --git a/changelogs/fragments/2259-proxmox-multi-nic-and-unsupported.yml b/changelogs/fragments/2259-proxmox-multi-nic-and-unsupported.yml new file mode 100644 index 0000000000..d8f6f80385 --- /dev/null +++ b/changelogs/fragments/2259-proxmox-multi-nic-and-unsupported.yml @@ -0,0 +1,5 @@ +--- +bugfixes: + - proxmox inventory plugin - support network interfaces without IP addresses, multiple network interfaces and unsupported/commanddisabled guest error (https://github.com/ansible-collections/community.general/pull/2263). +minor_changes: + - proxmox inventory plugin - allow to select whether ``ansible_host`` should be set for the proxmox nodes (https://github.com/ansible-collections/community.general/pull/2263). diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py index 44b807f230..be3ecd4365 100644 --- a/plugins/inventory/proxmox.py +++ b/plugins/inventory/proxmox.py @@ -70,6 +70,13 @@ DOCUMENTATION = ''' description: Gather LXC/QEMU configuration facts. default: no type: bool + want_proxmox_nodes_ansible_host: + version_added: 3.0.0 + description: + - Whether to set C(ansbile_host) for proxmox nodes. + - When set to C(true) (default), will use the first available interface. This can be different from what you expect. + default: true + type: bool strict: version_added: 2.5.0 compose: @@ -234,13 +241,22 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): ) )['result'] + if "error" in ifaces: + if "class" in ifaces["error"]: + # This happens on Windows, even though qemu agent is running, the IP address + # cannot be fetched, as it's unsupported, also a command disabled can happen. + errorClass = ifaces["error"]["class"] + if errorClass in ["Unsupported"]: + self.display.v("Retrieving network interfaces from guest agents on windows with older qemu-guest-agents is not supported") + elif errorClass in ["CommandDisabled"]: + self.display.v("Retrieving network interfaces from guest agents has been disabled") + return result + for iface in ifaces: result.append({ 'name': iface['name'], - 'mac-address': iface['hardware-address'], - 'ip-addresses': [ - "%s/%s" % (ip['ip-address'], ip['prefix']) for ip in iface['ip-addresses'] - ] + 'mac-address': iface['hardware-address'] if 'hardware-address' in iface else '', + 'ip-addresses': ["%s/%s" % (ip['ip-address'], ip['prefix']) for ip in iface['ip-addresses']] if 'ip-addresses' in iface else [] }) except requests.HTTPError: pass @@ -354,8 +370,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): self.inventory.add_child(nodes_group, node['node']) # get node IP address - ip = self._get_node_ip(node['node']) - self.inventory.set_variable(node['node'], 'ansible_host', ip) + if self.get_option("want_proxmox_nodes_ansible_host"): + ip = self._get_node_ip(node['node']) + self.inventory.set_variable(node['node'], 'ansible_host', ip) # get LXC containers for this node node_lxc_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), ('%s_lxc' % node['node']).lower())) diff --git a/tests/unit/plugins/inventory/test_proxmox.py b/tests/unit/plugins/inventory/test_proxmox.py index a68203b725..e248fb05e3 100644 --- a/tests/unit/plugins/inventory/test_proxmox.py +++ b/tests/unit/plugins/inventory/test_proxmox.py @@ -90,6 +90,38 @@ def get_json(url): "uptime": 1000, "disk": 0, "status": "running"}, + {"name": "test-qemu-windows", + "cpus": 1, + "mem": 1000, + "template": "", + "diskread": 0, + "cpu": 0.01, + "maxmem": 1000, + "diskwrite": 0, + "netout": 1000, + "pid": "1001", + "netin": 1000, + "maxdisk": 1000, + "vmid": "102", + "uptime": 1000, + "disk": 0, + "status": "running"}, + {"name": "test-qemu-multi-nic", + "cpus": 1, + "mem": 1000, + "template": "", + "diskread": 0, + "cpu": 0.01, + "maxmem": 1000, + "diskwrite": 0, + "netout": 1000, + "pid": "1001", + "netin": 1000, + "maxdisk": 1000, + "vmid": "103", + "uptime": 1000, + "disk": 0, + "status": "running"}, {"name": "test-qemu-template", "cpus": 1, "mem": 0, @@ -212,6 +244,54 @@ def get_json(url): "scsihw": "virtio-scsi-pci", "smbios1": "uuid=ffffffff-ffff-ffff-ffff-ffffffffffff" } + elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu/102/config": + # _get_vm_config (qemu) + return { + "numa": 0, + "digest": "460add1531a7068d2ae62d54f67e8fb9493dece9", + "ide2": "none,media=cdrom", + "bootdisk": "sata0", + "name": "test-qemu-windows", + "balloon": 0, + "cpulimit": "4", + "agent": "1", + "cores": 6, + "sata0": "storage:vm-102-disk-0,size=100G", + "memory": 10240, + "smbios1": "uuid=127301fc-0122-48d5-8fc5-c04fa78d8146", + "scsihw": "virtio-scsi-pci", + "sockets": 1, + "ostype": "win8", + "net0": "virtio=ff:ff:ff:ff:ff:ff,bridge=vmbr0", + "onboot": 1 + } + elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu/103/config": + # _get_vm_config (qemu) + return { + 'scsi1': 'storage:vm-103-disk-3,size=30G', + 'sockets': 1, + 'memory': 8192, + 'ostype': 'l26', + 'scsihw': 'virtio-scsi-pci', + "net0": "virtio=ff:ff:ff:ff:ff:ff,bridge=vmbr0", + "net1": "virtio=ff:ff:ff:ff:ff:ff,bridge=vmbr1", + 'bootdisk': 'scsi0', + 'scsi0': 'storage:vm-103-disk-0,size=10G', + 'name': 'test-qemu-multi-nic', + 'cores': 4, + 'digest': '51b7599f869b9a3f564804a0aed290f3de803292', + 'smbios1': 'uuid=863b31c3-42ca-4a92-aed7-4111f342f70a', + 'agent': '1,type=virtio', + 'ide2': 'none,media=cdrom', + 'balloon': 0, + 'numa': 0, + 'scsi2': 'storage:vm-103-disk-2,size=10G', + 'serial0': 'socket', + 'vmgenid': 'ddfb79b2-b484-4d66-88e7-6e76f2d1be77', + 'onboot': 1, + 'tablet': 0 + } + elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu/101/agent/network-get-interfaces": # _get_agent_network_interfaces return {"result": [ @@ -281,6 +361,155 @@ def get_json(url): "tx-errs": 0, "tx-bytes": 0 }}]} + elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu/102/agent/network-get-interfaces": + # _get_agent_network_interfaces + return {"result": {'error': {'desc': 'this feature or command is not currently supported', 'class': 'Unsupported'}}} + elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu/103/agent/network-get-interfaces": + # _get_agent_network_interfaces + return { + "result": [ + { + "statistics": { + "tx-errs": 0, + "rx-errs": 0, + "rx-dropped": 0, + "tx-bytes": 48132932372, + "tx-dropped": 0, + "rx-bytes": 48132932372, + "tx-packets": 178578980, + "rx-packets": 178578980 + }, + "hardware-address": "ff:ff:ff:ff:ff:ff", + "ip-addresses": [ + { + "ip-address-type": "ipv4", + "prefix": 8, + "ip-address": "127.0.0.1" + } + ], + "name": "lo" + }, + { + "name": "eth0", + "ip-addresses": [ + { + "ip-address-type": "ipv4", + "prefix": 24, + "ip-address": "172.16.0.143" + } + ], + "statistics": { + "rx-errs": 0, + "tx-errs": 0, + "rx-packets": 660028, + "tx-packets": 304599, + "tx-dropped": 0, + "rx-bytes": 1846743499, + "tx-bytes": 1287844926, + "rx-dropped": 0 + }, + "hardware-address": "ff:ff:ff:ff:ff:ff" + }, + { + "name": "eth1", + "hardware-address": "ff:ff:ff:ff:ff:ff", + "statistics": { + "rx-bytes": 235717091946, + "tx-dropped": 0, + "rx-dropped": 0, + "tx-bytes": 123411636251, + "rx-packets": 540431277, + "tx-packets": 468411864, + "rx-errs": 0, + "tx-errs": 0 + }, + "ip-addresses": [ + { + "ip-address": "10.0.0.133", + "prefix": 24, + "ip-address-type": "ipv4" + } + ] + }, + { + "name": "docker0", + "ip-addresses": [ + { + "ip-address": "172.17.0.1", + "prefix": 16, + "ip-address-type": "ipv4" + } + ], + "hardware-address": "ff:ff:ff:ff:ff:ff", + "statistics": { + "rx-errs": 0, + "tx-errs": 0, + "rx-packets": 0, + "tx-packets": 0, + "tx-dropped": 0, + "rx-bytes": 0, + "rx-dropped": 0, + "tx-bytes": 0 + } + }, + { + "hardware-address": "ff:ff:ff:ff:ff:ff", + "name": "datapath" + }, + { + "name": "weave", + "ip-addresses": [ + { + "ip-address": "10.42.0.1", + "ip-address-type": "ipv4", + "prefix": 16 + } + ], + "hardware-address": "ff:ff:ff:ff:ff:ff", + "statistics": { + "rx-bytes": 127289123306, + "tx-dropped": 0, + "rx-dropped": 0, + "tx-bytes": 43827573343, + "rx-packets": 132750542, + "tx-packets": 74218762, + "rx-errs": 0, + "tx-errs": 0 + } + }, + { + "name": "vethwe-datapath", + "hardware-address": "ff:ff:ff:ff:ff:ff" + }, + { + "name": "vethwe-bridge", + "hardware-address": "ff:ff:ff:ff:ff:ff" + }, + { + "hardware-address": "ff:ff:ff:ff:ff:ff", + "name": "vxlan-6784" + }, + { + "name": "vethwepl0dfe1fe", + "hardware-address": "ff:ff:ff:ff:ff:ff" + }, + { + "name": "vethweplf1e7715", + "hardware-address": "ff:ff:ff:ff:ff:ff" + }, + { + "hardware-address": "ff:ff:ff:ff:ff:ff", + "name": "vethwepl9d244a1" + }, + { + "hardware-address": "ff:ff:ff:ff:ff:ff", + "name": "vethwepl2ca477b" + }, + { + "name": "nomacorip", + } + ] + } def get_vm_status(node, vmtype, vmid, name): @@ -294,6 +523,8 @@ def get_option(option): return 'proxmox_' elif option == 'want_facts': return True + elif option == 'want_proxmox_nodes_ansible_host': + return True else: return False @@ -313,6 +544,8 @@ def test_populate(inventory, mocker): # get different hosts host_qemu = inventory.inventory.get_host('test-qemu') + host_qemu_windows = inventory.inventory.get_host('test-qemu-windows') + host_qemu_multi_nic = inventory.inventory.get_host('test-qemu-multi-nic') host_qemu_template = inventory.inventory.get_host('test-qemu-template') host_lxc = inventory.inventory.get_host('test-lxc') host_node = inventory.inventory.get_host('testnode') @@ -325,6 +558,20 @@ def test_populate(inventory, mocker): # check if qemu-test has eth0 interface in agent_interfaces fact assert 'eth0' in [d['name'] for d in host_qemu.get_vars()['proxmox_agent_interfaces']] + # check if qemu-multi-nic has multiple network interfaces + for iface_name in ['eth0', 'eth1', 'weave']: + assert iface_name in [d['name'] for d in host_qemu_multi_nic.get_vars()['proxmox_agent_interfaces']] + + # check if interface with no mac-address or ip-address defaults correctly + assert [iface for iface in host_qemu_multi_nic.get_vars()['proxmox_agent_interfaces'] + if iface['name'] == 'nomacorip' + and iface['mac-address'] == '' + and iface['ip-addresses'] == [] + ] + + # check to make sure qemu-windows doesn't have proxmox_agent_interfaces + assert "proxmox_agent_interfaces" not in host_qemu_windows.get_vars() + # check if lxc-test has been discovered correctly group_lxc = inventory.inventory.groups['proxmox_all_lxc'] assert group_lxc.hosts == [host_lxc] From 6f6b80fd896d4928d1fa2e68c80742db589769f1 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Wed, 21 Apr 2021 22:41:55 +0200 Subject: [PATCH 0227/3093] Add PR URLs to changelog fragment. --- .../fragments/remove-deprecated-features.yml | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/changelogs/fragments/remove-deprecated-features.yml b/changelogs/fragments/remove-deprecated-features.yml index 9d202e9cec..e728ce62d3 100644 --- a/changelogs/fragments/remove-deprecated-features.yml +++ b/changelogs/fragments/remove-deprecated-features.yml @@ -1,16 +1,16 @@ removed_features: -- "airbrake_deployment - removed deprecated ``token`` parameter. Use ``project_id`` and ``project_key`` instead." -- "bigpanda - the alias ``message`` has been removed. Use ``deployment_message`` instead." -- "cisco_spark, cisco_webex - the alias ``message`` has been removed. Use ``msg`` instead." -- "clc_aa_policy - the ``wait`` parameter has been removed. It did not have any effect." -- "datadog_monitor - the alias ``message`` has been removed. Use ``notification_message`` instead." -- "django_manage - the parameter ``liveserver`` has been removed." -- "idrac_redfish_config - the parameters ``manager_attribute_name`` and ``manager_attribute_value`` have been removed. Use ``manager_attributes`` instead." -- "iso_extract - the alias ``thirsty`` has been removed. Use ``force`` instead." -- "redfish_config - the parameters ``bios_attribute_name`` and ``bios_attribute_value`` have been removed. Use ``bios_attributes`` instead." -- "syspatch - the ``apply`` parameter has been removed. This is the default mode, so simply removing it will not change the behavior." -- "xbps - the ``force`` parameter has been removed. It did not have any effect." -- "redfish modules - issuing a data modification command without specifying the ID of the target System, Chassis or Manager resource when there is more than one is no longer allowed. Use the ``resource_id`` option to specify the target ID." -- "pulp_repo - the alias ``ca_cert`` has been removed. Use ``feed_ca_cert`` instead." -- "pulp_repo - the ``feed_client_cert`` parameter no longer defaults to the value of the ``client_cert`` parameter." -- "pulp_repo - the ``feed_client_key`` parameter no longer defaults to the value of the ``client_key`` parameter." +- "airbrake_deployment - removed deprecated ``token`` parameter. Use ``project_id`` and ``project_key`` instead (https://github.com/ansible-collections/community.general/pull/1926)." +- "bigpanda - the alias ``message`` has been removed. Use ``deployment_message`` instead (https://github.com/ansible-collections/community.general/pull/1926)." +- "cisco_spark, cisco_webex - the alias ``message`` has been removed. Use ``msg`` instead (https://github.com/ansible-collections/community.general/pull/1926)." +- "clc_aa_policy - the ``wait`` parameter has been removed. It did not have any effect (https://github.com/ansible-collections/community.general/pull/1926)." +- "datadog_monitor - the alias ``message`` has been removed. Use ``notification_message`` instead (https://github.com/ansible-collections/community.general/pull/1926)." +- "django_manage - the parameter ``liveserver`` has been removed (https://github.com/ansible-collections/community.general/pull/1926)." +- "idrac_redfish_config - the parameters ``manager_attribute_name`` and ``manager_attribute_value`` have been removed. Use ``manager_attributes`` instead (https://github.com/ansible-collections/community.general/pull/1926)." +- "iso_extract - the alias ``thirsty`` has been removed. Use ``force`` instead (https://github.com/ansible-collections/community.general/pull/1926)." +- "redfish_config - the parameters ``bios_attribute_name`` and ``bios_attribute_value`` have been removed. Use ``bios_attributes`` instead (https://github.com/ansible-collections/community.general/pull/1926)." +- "syspatch - the ``apply`` parameter has been removed. This is the default mode, so simply removing it will not change the behavior (https://github.com/ansible-collections/community.general/pull/1926)." +- "xbps - the ``force`` parameter has been removed. It did not have any effect (https://github.com/ansible-collections/community.general/pull/1926)." +- "redfish modules - issuing a data modification command without specifying the ID of the target System, Chassis or Manager resource when there is more than one is no longer allowed. Use the ``resource_id`` option to specify the target ID (https://github.com/ansible-collections/community.general/pull/1926)." +- "pulp_repo - the alias ``ca_cert`` has been removed. Use ``feed_ca_cert`` instead (https://github.com/ansible-collections/community.general/pull/1926)." +- "pulp_repo - the ``feed_client_cert`` parameter no longer defaults to the value of the ``client_cert`` parameter (https://github.com/ansible-collections/community.general/pull/1926)." +- "pulp_repo - the ``feed_client_key`` parameter no longer defaults to the value of the ``client_key`` parameter (https://github.com/ansible-collections/community.general/pull/1926)." From 31c9ed0fe61b001bcb22677921a7c6255fcab0ca Mon Sep 17 00:00:00 2001 From: Dennis Israelsson Date: Thu, 22 Apr 2021 01:00:03 +0200 Subject: [PATCH 0228/3093] nmap: fix cache support (#2282) * add cache support * pep8 e501 fix * revert verify_file function * revert description update * add changelog fragment Co-authored-by: Dennis Israelsson --- .../fragments/2282-nmap-fix-cache-support.yml | 2 + plugins/inventory/nmap.py | 167 +++++++++++------- 2 files changed, 108 insertions(+), 61 deletions(-) create mode 100644 changelogs/fragments/2282-nmap-fix-cache-support.yml diff --git a/changelogs/fragments/2282-nmap-fix-cache-support.yml b/changelogs/fragments/2282-nmap-fix-cache-support.yml new file mode 100644 index 0000000000..62b026eb25 --- /dev/null +++ b/changelogs/fragments/2282-nmap-fix-cache-support.yml @@ -0,0 +1,2 @@ +bugfixes: + - nmap inventory plugin - fix cache and constructed group support (https://github.com/ansible-collections/community.general/issues/2242). diff --git a/plugins/inventory/nmap.py b/plugins/inventory/nmap.py index ea6175e86d..687317abfa 100644 --- a/plugins/inventory/nmap.py +++ b/plugins/inventory/nmap.py @@ -71,6 +71,25 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): self._nmap = None super(InventoryModule, self).__init__() + def _populate(self, hosts): + # Use constructed if applicable + strict = self.get_option('strict') + + for host in hosts: + hostname = host['name'] + self.inventory.add_host(hostname) + for var, value in host.items(): + self.inventory.set_variable(hostname, var, value) + + # Composed variables + self._set_composite_vars(self.get_option('compose'), host, hostname, strict=strict) + + # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group + self._add_host_to_composed_groups(self.get_option('groups'), host, hostname, strict=strict) + + # Create groups based on variable values and add the corresponding hosts to it + self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname, strict=strict) + def verify_file(self, path): valid = False @@ -82,7 +101,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): return valid - def parse(self, inventory, loader, path, cache=False): + def parse(self, inventory, loader, path, cache=True): try: self._nmap = get_bin_path('nmap') @@ -93,75 +112,101 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): self._read_config_data(path) - # setup command - cmd = [self._nmap] - if not self._options['ports']: - cmd.append('-sP') + cache_key = self.get_cache_key(path) - if self._options['ipv4'] and not self._options['ipv6']: - cmd.append('-4') - elif self._options['ipv6'] and not self._options['ipv4']: - cmd.append('-6') - elif not self._options['ipv6'] and not self._options['ipv4']: - raise AnsibleParserError('One of ipv4 or ipv6 must be enabled for this plugin') + # cache may be True or False at this point to indicate if the inventory is being refreshed + # get the user's cache option too to see if we should save the cache if it is changing + user_cache_setting = self.get_option('cache') - if self._options['exclude']: - cmd.append('--exclude') - cmd.append(','.join(self._options['exclude'])) - - cmd.append(self._options['address']) - try: - # execute - p = Popen(cmd, stdout=PIPE, stderr=PIPE) - stdout, stderr = p.communicate() - if p.returncode != 0: - raise AnsibleParserError('Failed to run nmap, rc=%s: %s' % (p.returncode, to_native(stderr))) - - # parse results - host = None - ip = None - ports = [] + # read if the user has caching enabled and the cache isn't being refreshed + attempt_to_read_cache = user_cache_setting and cache + # update if the user has caching enabled and the cache is being refreshed; update this value to True if the cache has expired below + cache_needs_update = user_cache_setting and not cache + if attempt_to_read_cache: try: - t_stdout = to_text(stdout, errors='surrogate_or_strict') - except UnicodeError as e: - raise AnsibleParserError('Invalid (non unicode) input returned: %s' % to_native(e)) + results = self._cache[cache_key] + except KeyError: + # This occurs if the cache_key is not in the cache or if the cache_key expired, so the cache needs to be updated + cache_needs_update = True - for line in t_stdout.splitlines(): - hits = self.find_host.match(line) - if hits: - if host is not None: - self.inventory.set_variable(host, 'ports', ports) + if cache_needs_update: + # setup command + cmd = [self._nmap] + if not self._options['ports']: + cmd.append('-sP') - # if dns only shows arpa, just use ip instead as hostname - if hits.group(1).endswith('.in-addr.arpa'): - host = hits.group(2) - else: - host = hits.group(1) + if self._options['ipv4'] and not self._options['ipv6']: + cmd.append('-4') + elif self._options['ipv6'] and not self._options['ipv4']: + cmd.append('-6') + elif not self._options['ipv6'] and not self._options['ipv4']: + raise AnsibleParserError('One of ipv4 or ipv6 must be enabled for this plugin') - # if no reverse dns exists, just use ip instead as hostname - if hits.group(2) is not None: - ip = hits.group(2) - else: - ip = hits.group(1) + if self._options['exclude']: + cmd.append('--exclude') + cmd.append(','.join(self._options['exclude'])) - if host is not None: - # update inventory - self.inventory.add_host(host) - self.inventory.set_variable(host, 'ip', ip) - ports = [] - continue + cmd.append(self._options['address']) + try: + # execute + p = Popen(cmd, stdout=PIPE, stderr=PIPE) + stdout, stderr = p.communicate() + if p.returncode != 0: + raise AnsibleParserError('Failed to run nmap, rc=%s: %s' % (p.returncode, to_native(stderr))) - host_ports = self.find_port.match(line) - if host is not None and host_ports: - ports.append({'port': host_ports.group(1), 'protocol': host_ports.group(2), 'state': host_ports.group(3), 'service': host_ports.group(4)}) - continue + # parse results + host = None + ip = None + ports = [] + results = [] - # TODO: parse more data, OS? + try: + t_stdout = to_text(stdout, errors='surrogate_or_strict') + except UnicodeError as e: + raise AnsibleParserError('Invalid (non unicode) input returned: %s' % to_native(e)) - # if any leftovers - if host and ports: - self.inventory.set_variable(host, 'ports', ports) + for line in t_stdout.splitlines(): + hits = self.find_host.match(line) + if hits: + if host is not None and ports: + results[-1]['ports'] = ports - except Exception as e: - raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e))) + # if dns only shows arpa, just use ip instead as hostname + if hits.group(1).endswith('.in-addr.arpa'): + host = hits.group(2) + else: + host = hits.group(1) + + # if no reverse dns exists, just use ip instead as hostname + if hits.group(2) is not None: + ip = hits.group(2) + else: + ip = hits.group(1) + + if host is not None: + # update inventory + results.append(dict()) + results[-1]['name'] = host + results[-1]['ip'] = ip + ports = [] + continue + + host_ports = self.find_port.match(line) + if host is not None and host_ports: + ports.append({'port': host_ports.group(1), + 'protocol': host_ports.group(2), + 'state': host_ports.group(3), + 'service': host_ports.group(4)}) + continue + + # if any leftovers + if host and ports: + results[-1]['ports'] = ports + + except Exception as e: + raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e))) + + self._cache[cache_key] = results + + self._populate(results) From fb1b756d48cfb784e56902ab51e4a5a503558094 Mon Sep 17 00:00:00 2001 From: TeamOSEOLA Date: Wed, 21 Apr 2021 18:05:37 -0500 Subject: [PATCH 0229/3093] Add plugin_paths parameter to terraform module. (#2308) * Add plugin_paths parameter to terraform module. The list `plugin_paths` is used in the init phase of terraform by setting the `-plugin-dir` command line argument to a path that contains terraform plugins. When the `-plugin-dir` argument is set, the plugin discovery and auto-download of terraform is disabled. This is useful when running terraform in automation environments for testing where the production endpoints are strictly controlled or on air-gapped networks and you need to control the versions of plugins available. * Added `elements: path` to `plugin_paths` parameter documentation in terraform.py * added changelog fragment * Update changelogs/fragments/2308-terraform-add-plugin_paths-parameter.yaml use double back-ticks vice single Co-authored-by: Felix Fontein * Update plugins/modules/cloud/misc/terraform.py Add version added to documentation Co-authored-by: Felix Fontein * Update plugins/modules/cloud/misc/terraform.py Add markup around URL in documentation Co-authored-by: Felix Fontein * Update plugins/modules/cloud/misc/terraform.py remove `required=False` and `default=None` from module argument spec, as that is the default behavior and redundant Co-authored-by: Amin Vakil * Added directory structure info to documentation and examples sections. * Update plugins/modules/cloud/misc/terraform.py grammatical fix Co-authored-by: Felix Fontein * grammar fix to description. Co-authored-by: Felix Fontein Co-authored-by: Amin Vakil --- ...-terraform-add-plugin_paths-parameter.yaml | 3 ++ plugins/modules/cloud/misc/terraform.py | 44 ++++++++++++++++++- 2 files changed, 45 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/2308-terraform-add-plugin_paths-parameter.yaml diff --git a/changelogs/fragments/2308-terraform-add-plugin_paths-parameter.yaml b/changelogs/fragments/2308-terraform-add-plugin_paths-parameter.yaml new file mode 100644 index 0000000000..ec389b270c --- /dev/null +++ b/changelogs/fragments/2308-terraform-add-plugin_paths-parameter.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - terraform - add ``plugin_paths`` parameter which allows disabling Terraform from performing plugin discovery and auto-download (https://github.com/ansible-collections/community.general/pull/2308). diff --git a/plugins/modules/cloud/misc/terraform.py b/plugins/modules/cloud/misc/terraform.py index f395c8e278..0a4e41b5f0 100644 --- a/plugins/modules/cloud/misc/terraform.py +++ b/plugins/modules/cloud/misc/terraform.py @@ -33,6 +33,19 @@ options: vars.tf/main.tf/etc to use. type: path required: true + plugin_paths: + description: + - List of paths containing Terraform plugin executable files. + - Plugin executables can be downloaded from U(https://releases.hashicorp.com/). + - When set, the plugin discovery and auto-download behavior of Terraform is disabled. + - The directory structure in the plugin path can be tricky. The Terraform docs + U(https://learn.hashicorp.com/tutorials/terraform/automate-terraform#pre-installed-plugins) + show a simple directory of files, but actually, the directory structure + has to follow the same structure you would see if Terraform auto-downloaded the plugins. + See the examples below for a tree output of an example plugin directory. + type: list + elements: path + version_added: 3.0.0 workspace: description: - The terraform workspace to work with. @@ -141,6 +154,28 @@ EXAMPLES = """ backend_config_files: - /path/to/backend_config_file_1 - /path/to/backend_config_file_2 + +- name: Disable plugin discovery and auto-download by setting plugin_paths + community.general.terraform: + project_path: 'project/' + state: "{{ state }}" + force_init: true + plugin_paths: + - /path/to/plugins_dir_1 + - /path/to/plugins_dir_2 + +### Example directory structure for plugin_paths example +# $ tree /path/to/plugins_dir_1 +# /path/to/plugins_dir_1/ +# └── registry.terraform.io +# └── hashicorp +# └── vsphere +# ├── 1.24.0 +# │ └── linux_amd64 +# │ └── terraform-provider-vsphere_v1.24.0_x4 +# └── 1.26.0 +# └── linux_amd64 +# └── terraform-provider-vsphere_v1.26.0_x4 """ RETURN = """ @@ -212,7 +247,7 @@ def _state_args(state_file): return [] -def init_plugins(bin_path, project_path, backend_config, backend_config_files, init_reconfigure): +def init_plugins(bin_path, project_path, backend_config, backend_config_files, init_reconfigure, plugin_paths): command = [bin_path, 'init', '-input=false'] if backend_config: for key, val in backend_config.items(): @@ -225,6 +260,9 @@ def init_plugins(bin_path, project_path, backend_config, backend_config_files, i command.extend(['-backend-config', f]) if init_reconfigure: command.extend(['-reconfigure']) + if plugin_paths: + for plugin_path in plugin_paths: + command.extend(['-plugin-dir', plugin_path]) rc, out, err = module.run_command(command, check_rc=True, cwd=project_path) @@ -295,6 +333,7 @@ def main(): argument_spec=dict( project_path=dict(required=True, type='path'), binary_path=dict(type='path'), + plugin_paths=dict(type='list', elements='path'), workspace=dict(required=False, type='str', default='default'), purge_workspace=dict(type='bool', default=False), state=dict(default='present', choices=['present', 'absent', 'planned']), @@ -316,6 +355,7 @@ def main(): project_path = module.params.get('project_path') bin_path = module.params.get('binary_path') + plugin_paths = module.params.get('plugin_paths') workspace = module.params.get('workspace') purge_workspace = module.params.get('purge_workspace') state = module.params.get('state') @@ -343,7 +383,7 @@ def main(): APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve') if force_init: - init_plugins(command[0], project_path, backend_config, backend_config_files, init_reconfigure) + init_plugins(command[0], project_path, backend_config, backend_config_files, init_reconfigure, plugin_paths) workspace_ctx = get_workspace_context(command[0], project_path) if workspace_ctx["current"] != workspace: From e0b731e76ff4f474f04034acf4888599fd974501 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 22 Apr 2021 07:08:25 +0200 Subject: [PATCH 0230/3093] Replace FreeBSD 11.4 with 13.0 for devel testing. (#2324) --- .azure-pipelines/azure-pipelines.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index 2aa559a03f..8c0804ab31 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -175,10 +175,10 @@ stages: test: rhel/7.9 - name: RHEL 8.3 test: rhel/8.3 - - name: FreeBSD 11.4 - test: freebsd/11.4 - name: FreeBSD 12.2 test: freebsd/12.2 + - name: FreeBSD 13.0 + test: freebsd/13.0 groups: - 1 - 2 From 2799cd4ac7d069acbd24bfe5574756af93caee78 Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Thu, 22 Apr 2021 21:52:06 +0300 Subject: [PATCH 0231/3093] BOTMETA.yml: haproxy - add a maintainer (#2331) --- .github/BOTMETA.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 424bad19fd..88c789d5af 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -451,7 +451,7 @@ files: $modules/net_tools/dnsmadeeasy.py: maintainers: briceburg $modules/net_tools/haproxy.py: - maintainers: ravibhure + maintainers: ravibhure Normo $modules/net_tools/: maintainers: nerzhul $modules/net_tools/infinity/infinity.py: From f11f6595cc7a9f90aa8b1812d32da3fab5bd3d12 Mon Sep 17 00:00:00 2001 From: Anubhav Chakraborty <47817745+coderfool@users.noreply.github.com> Date: Sat, 24 Apr 2021 15:50:11 +0530 Subject: [PATCH 0232/3093] convert string returned by plugin to unicode (#2329) * convert string returned by plugin to unicode * add changelog fragment * fix changelog format * fix changelog format yet again Co-authored-by: Anubhav Chakraborty --- changelogs/fragments/2329-hiera-lookup-plugin-return-type.yaml | 2 ++ plugins/lookup/hiera.py | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2329-hiera-lookup-plugin-return-type.yaml diff --git a/changelogs/fragments/2329-hiera-lookup-plugin-return-type.yaml b/changelogs/fragments/2329-hiera-lookup-plugin-return-type.yaml new file mode 100644 index 0000000000..4cced727a2 --- /dev/null +++ b/changelogs/fragments/2329-hiera-lookup-plugin-return-type.yaml @@ -0,0 +1,2 @@ +bugfixes: + - hiera lookup plugin - converts the return type of plugin to unicode string (https://github.com/ansible-collections/community.general/pull/2329). diff --git a/plugins/lookup/hiera.py b/plugins/lookup/hiera.py index 1ce82d7bd6..899820191a 100644 --- a/plugins/lookup/hiera.py +++ b/plugins/lookup/hiera.py @@ -63,6 +63,7 @@ import os from ansible.plugins.lookup import LookupBase from ansible.utils.cmd_functions import run_cmd +from ansible.module_utils._text import to_text ANSIBLE_HIERA_CFG = os.getenv('ANSIBLE_HIERA_CFG', '/etc/hiera.yaml') ANSIBLE_HIERA_BIN = os.getenv('ANSIBLE_HIERA_BIN', '/usr/bin/hiera') @@ -78,7 +79,7 @@ class Hiera(object): rc, output, err = run_cmd("{0} -c {1} {2}".format( ANSIBLE_HIERA_BIN, ANSIBLE_HIERA_CFG, hiera_key[0])) - return output.strip() + return to_text(output.strip()) class LookupModule(LookupBase): From 399c0ef849164849376d283d0f54ae69cd2c2e69 Mon Sep 17 00:00:00 2001 From: Colin Nolan Date: Sun, 25 Apr 2021 21:48:25 +0100 Subject: [PATCH 0233/3093] Removes colin-nolan as Consul maintainer (#2342) --- .github/BOTMETA.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 88c789d5af..06501fc2aa 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -309,6 +309,7 @@ files: maintainers: bvitnik $modules/clustering/consul/: maintainers: $team_consul + ignore: colin-nolan $modules/clustering/etcd3.py: maintainers: evrardjp ignore: vfauth @@ -1002,7 +1003,7 @@ macros: terminals: plugins/terminal team_aix: MorrisA bcoca d-little flynn1973 gforster kairoaraujo marvin-sinister mator molekuul ramooncamacho wtcross team_bsd: JoergFiedler MacLemon bcoca dch jasperla mekanix opoplawski overhacked tuxillo - team_consul: colin-nolan sgargan + team_consul: sgargan team_cyberark_conjur: jvanderhoof ryanprior team_e_spirit: MatrixCrawler getjack team_flatpak: JayKayy oolongbrothers From 8ddb81a36f755849e8c90a82612271719f19d0a8 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 26 Apr 2021 16:54:57 +1200 Subject: [PATCH 0234/3093] jira - fixed base64 decode bug (#2349) * fixed base64 decode bug * added changelog fragment --- changelogs/fragments/2349-jira-bugfix-b64decode.yml | 2 ++ plugins/modules/web_infrastructure/jira.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2349-jira-bugfix-b64decode.yml diff --git a/changelogs/fragments/2349-jira-bugfix-b64decode.yml b/changelogs/fragments/2349-jira-bugfix-b64decode.yml new file mode 100644 index 0000000000..41a1dabb94 --- /dev/null +++ b/changelogs/fragments/2349-jira-bugfix-b64decode.yml @@ -0,0 +1,2 @@ +bugfixes: + - jira - fixed error when loading base64-encoded content as attachment (https://github.com/ansible-collections/community.general/pull/2349). diff --git a/plugins/modules/web_infrastructure/jira.py b/plugins/modules/web_infrastructure/jira.py index d7c88c01b8..3c1a8a27c9 100644 --- a/plugins/modules/web_infrastructure/jira.py +++ b/plugins/modules/web_infrastructure/jira.py @@ -596,7 +596,7 @@ def _prepare_attachment(filename, content=None, mime_type=None): content = f.read() else: try: - content = base64.decode(content) + content = base64.b64decode(content) except binascii.Error as e: raise Exception("Unable to base64 decode file content: %s" % e) From 5195536bd8ffb0cfa89d87ff7ca7a2c9eb646d7f Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 26 Apr 2021 06:56:21 +0200 Subject: [PATCH 0235/3093] Fix Python 2 compatibility issue. (#2340) --- changelogs/fragments/2340-jenkins_plugin-py2.yml | 2 ++ plugins/modules/web_infrastructure/jenkins_plugin.py | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2340-jenkins_plugin-py2.yml diff --git a/changelogs/fragments/2340-jenkins_plugin-py2.yml b/changelogs/fragments/2340-jenkins_plugin-py2.yml new file mode 100644 index 0000000000..f3bcdbd361 --- /dev/null +++ b/changelogs/fragments/2340-jenkins_plugin-py2.yml @@ -0,0 +1,2 @@ +bugfixes: +- "jenkins_plugin - fixes Python 2 compatibility issue (https://github.com/ansible-collections/community.general/pull/2340)." \ No newline at end of file diff --git a/plugins/modules/web_infrastructure/jenkins_plugin.py b/plugins/modules/web_infrastructure/jenkins_plugin.py index e2adf7a69d..c9946023ac 100644 --- a/plugins/modules/web_infrastructure/jenkins_plugin.py +++ b/plugins/modules/web_infrastructure/jenkins_plugin.py @@ -276,6 +276,7 @@ from ansible.module_utils.six import text_type, binary_type from ansible.module_utils._text import to_native import base64 import hashlib +import io import json import os import tempfile @@ -560,7 +561,7 @@ class JenkinsPlugin(object): # Open the updates file try: - f = open(updates_file, encoding='utf-8') + f = io.open(updates_file, encoding='utf-8') except IOError as e: self.module.fail_json( msg="Cannot open temporal updates file.", From 1b7944089617c01df753c31eadefb5014e83ddc1 Mon Sep 17 00:00:00 2001 From: Gaetan2907 <48204380+Gaetan2907@users.noreply.github.com> Date: Mon, 26 Apr 2021 07:01:43 +0200 Subject: [PATCH 0236/3093] Fix incorrect URL_REALMS in keycloak.py (#2335) * Allow keycloak_group.py to take token as parameter for the authentification * Refactor get_token to pass module.params + Documentation * Fix unit test and add new one for token as param * Fix identation * Update plugins/modules/identity/keycloak/keycloak_client.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_clienttemplate.py Co-authored-by: Felix Fontein * Allow keycloak_group.py to take token as parameter for the authentification * Refactor get_token to pass module.params + Documentation * Update plugins/module_utils/identity/keycloak/keycloak.py Co-authored-by: Felix Fontein * Check if base_url is None before to check format * Update plugins/module_utils/identity/keycloak/keycloak.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_client.py Co-authored-by: Amin Vakil * Update plugins/modules/identity/keycloak/keycloak_clienttemplate.py Co-authored-by: Amin Vakil * Switch to modern syntax for the documentation (e.g. community.general.keycloak_client) * Fix URL_REALMS in keycloak.py * Update keycloak_client.py * Update keycloak_clienttemplate.py * Update plugins/module_utils/identity/keycloak/keycloak.py Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein Co-authored-by: Amin Vakil --- plugins/module_utils/identity/keycloak/keycloak.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/module_utils/identity/keycloak/keycloak.py b/plugins/module_utils/identity/keycloak/keycloak.py index 082e0af391..c0a1c2a158 100644 --- a/plugins/module_utils/identity/keycloak/keycloak.py +++ b/plugins/module_utils/identity/keycloak/keycloak.py @@ -37,8 +37,8 @@ from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils.six.moves.urllib.error import HTTPError from ansible.module_utils._text import to_native -URL_REALMS = "{url}/realms" -URL_REALM = "{url}/realms/{realm}" +URL_REALMS = "{url}/admin/realms" +URL_REALM = "{url}/admin/realms/{realm}" URL_TOKEN = "{url}/realms/{realm}/protocol/openid-connect/token" URL_CLIENT = "{url}/admin/realms/{realm}/clients/{id}" From aea12899cc644b3f8037c63cae84a982e9d9e578 Mon Sep 17 00:00:00 2001 From: Xabier Napal Date: Mon, 26 Apr 2021 07:03:36 +0200 Subject: [PATCH 0237/3093] influxdb_retention_policy: ensure duration parameters are idempotent (#2281) (#2284) * influxdb_retention_policy: ensure duration parameters are idempotent (#2281) * add changelog for pr #2284 --- ...-influxdb_retention_policy-idempotence.yml | 4 + .../influxdb/influxdb_retention_policy.py | 131 ++++++++++++++---- 2 files changed, 105 insertions(+), 30 deletions(-) create mode 100644 changelogs/fragments/2284-influxdb_retention_policy-idempotence.yml diff --git a/changelogs/fragments/2284-influxdb_retention_policy-idempotence.yml b/changelogs/fragments/2284-influxdb_retention_policy-idempotence.yml new file mode 100644 index 0000000000..0df25ca462 --- /dev/null +++ b/changelogs/fragments/2284-influxdb_retention_policy-idempotence.yml @@ -0,0 +1,4 @@ +bugfixes: + - influxdb_retention_policy - ensure idempotent module execution with different + duration and shard duration parameter values + (https://github.com/ansible-collections/community.general/issues/2281). diff --git a/plugins/modules/database/influxdb/influxdb_retention_policy.py b/plugins/modules/database/influxdb/influxdb_retention_policy.py index 2d27e6163e..2c2f9674b7 100644 --- a/plugins/modules/database/influxdb/influxdb_retention_policy.py +++ b/plugins/modules/database/influxdb/influxdb_retention_policy.py @@ -31,7 +31,9 @@ options: type: str duration: description: - - Determines how long InfluxDB should keep the data. + - Determines how long InfluxDB should keep the data. If specified, it + should be C(INF) or at least one hour. If not specified, C(INF) is + assumed. Supports complex duration expressions with multiple units. required: true type: str replication: @@ -46,9 +48,10 @@ options: default: false shard_group_duration: description: - - Determines the size of a shard group. - - Value needs to be integer literal followed immediately (with no spaces) by a duration unit. - Supported duration units are C(h) for hours, C(d) for days, and C(w) for weeks. For example C(10d), C(1h), C(2w). + - Determines the time range covered by a shard group. If specified it + must be at least one hour. If none, it's determined by InfluxDB by + the rentention policy's duration. Supports complex duration expressions + with multiple units. type: str version_added: '2.0.0' extends_documentation_fragment: @@ -96,6 +99,17 @@ EXAMPLES = r''' ssl: no validate_certs: no shard_group_duration: 1w + +- name: Create retention policy with complex durations + community.general.influxdb_retention_policy: + hostname: "{{influxdb_ip_address}}" + database_name: "{{influxdb_database_name}}" + policy_name: test + duration: 5d1h30m + replication: 1 + ssl: no + validate_certs: no + shard_group_duration: 1d10h30m ''' RETURN = r''' @@ -115,6 +129,51 @@ from ansible_collections.community.general.plugins.module_utils.influxdb import from ansible.module_utils._text import to_native +VALID_DURATION_REGEX = re.compile(r'^(\d+(ns|u|µ|ms|s|m|h|d|w))+$') + +DURATION_REGEX = re.compile(r'(\d+)(ns|u|µ|ms|s|m|h|d|w)') +EXTENDED_DURATION_REGEX = re.compile(r'(?:(\d+)(ns|u|µ|ms|m|h|d|w)|(\d+(?:\.\d+)?)(s))') + + +def check_duration_literal(value): + return VALID_DURATION_REGEX.search(value) is not None + + +def parse_duration_literal(value, extended=False): + duration = 0.0 + + if value == "INF": + return duration + + lookup = (EXTENDED_DURATION_REGEX if extended else DURATION_REGEX).findall(value) + + for duration_literal in lookup: + if extended and duration_literal[3] == 's': + duration_val = float(duration_literal[2]) + duration += duration_val * 1000 * 1000 * 1000 + else: + duration_val = int(duration_literal[0]) + + if duration_literal[1] == 'ns': + duration += duration_val + elif duration_literal[1] == 'u' or duration_literal[1] == 'µ': + duration += duration_val * 1000 + elif duration_literal[1] == 'ms': + duration += duration_val * 1000 * 1000 + elif duration_literal[1] == 's': + duration += duration_val * 1000 * 1000 * 1000 + elif duration_literal[1] == 'm': + duration += duration_val * 1000 * 1000 * 1000 * 60 + elif duration_literal[1] == 'h': + duration += duration_val * 1000 * 1000 * 1000 * 60 * 60 + elif duration_literal[1] == 'd': + duration += duration_val * 1000 * 1000 * 1000 * 60 * 60 * 24 + elif duration_literal[1] == 'w': + duration += duration_val * 1000 * 1000 * 1000 * 60 * 60 * 24 * 7 + + return duration + + def find_retention_policy(module, client): database_name = module.params['database_name'] policy_name = module.params['policy_name'] @@ -129,6 +188,11 @@ def find_retention_policy(module, client): break except requests.exceptions.ConnectionError as e: module.fail_json(msg="Cannot connect to database %s on %s : %s" % (database_name, hostname, to_native(e))) + + if retention_policy is not None: + retention_policy["duration"] = parse_duration_literal(retention_policy["duration"], extended=True) + retention_policy["shardGroupDuration"] = parse_duration_literal(retention_policy["shardGroupDuration"], extended=True) + return retention_policy @@ -140,6 +204,21 @@ def create_retention_policy(module, client): default = module.params['default'] shard_group_duration = module.params['shard_group_duration'] + if not check_duration_literal(duration): + module.fail_json(msg="Failed to parse value of duration") + + influxdb_duration_format = parse_duration_literal(duration) + if influxdb_duration_format != 0 and influxdb_duration_format < 3600000000000: + module.fail_json(msg="duration value must be at least 1h") + + if shard_group_duration is not None: + if not check_duration_literal(shard_group_duration): + module.fail_json(msg="Failed to parse value of shard_group_duration") + + influxdb_shard_group_duration_format = parse_duration_literal(shard_group_duration) + if influxdb_shard_group_duration_format < 3600000000000: + module.fail_json(msg="shard_group_duration value must be at least 1h") + if not module.check_mode: try: if shard_group_duration: @@ -159,38 +238,30 @@ def alter_retention_policy(module, client, retention_policy): replication = module.params['replication'] default = module.params['default'] shard_group_duration = module.params['shard_group_duration'] - duration_regexp = re.compile(r'(\d+)([hdw]{1})|(^INF$){1}') + changed = False - duration_lookup = duration_regexp.search(duration) + if not check_duration_literal(duration): + module.fail_json(msg="Failed to parse value of duration") - if duration_lookup.group(2) == 'h': - influxdb_duration_format = '%s0m0s' % duration - elif duration_lookup.group(2) == 'd': - influxdb_duration_format = '%sh0m0s' % (int(duration_lookup.group(1)) * 24) - elif duration_lookup.group(2) == 'w': - influxdb_duration_format = '%sh0m0s' % (int(duration_lookup.group(1)) * 24 * 7) - elif duration == 'INF': - influxdb_duration_format = '0' + influxdb_duration_format = parse_duration_literal(duration) + if influxdb_duration_format != 0 and influxdb_duration_format < 3600000000000: + module.fail_json(msg="duration value must be at least 1h") - if shard_group_duration: - shard_group_duration_lookup = duration_regexp.search(shard_group_duration) - if not shard_group_duration_lookup: - module.fail_json( - msg="Failed to parse value of shard_group_duration. Please see the documentation for valid values") - if shard_group_duration_lookup.group(2) == 'h': - influxdb_shard_group_duration_format = '%s0m0s' % duration - elif shard_group_duration_lookup.group(2) == 'd': - influxdb_shard_group_duration_format = '%sh0m0s' % (int(shard_group_duration_lookup.group(1)) * 24) - elif shard_group_duration_lookup.group(2) == 'w': - influxdb_shard_group_duration_format = '%sh0m0s' % (int(shard_group_duration_lookup.group(1)) * 24 * 7) + if shard_group_duration is None: + influxdb_shard_group_duration_format = retention_policy["shardGroupDuration"] else: - influxdb_shard_group_duration_format = retention_policy['shardGroupDuration'] + if not check_duration_literal(shard_group_duration): + module.fail_json(msg="Failed to parse value of shard_group_duration") - if (not retention_policy['duration'] == influxdb_duration_format or - not retention_policy['replicaN'] == int(replication) or - not retention_policy['shardGroupDuration'] == influxdb_shard_group_duration_format or - not retention_policy['default'] == default): + influxdb_shard_group_duration_format = parse_duration_literal(shard_group_duration) + if influxdb_shard_group_duration_format < 3600000000000: + module.fail_json(msg="shard_group_duration value must be at least 1h") + + if (retention_policy['duration'] != influxdb_duration_format or + retention_policy['shardGroupDuration'] != influxdb_shard_group_duration_format or + retention_policy['replicaN'] != int(replication) or + retention_policy['default'] != default): if not module.check_mode: try: client.alter_retention_policy(policy_name, database_name, duration, replication, default, From ffd73296dedb83d897e5ec3fb4de5959f0704c87 Mon Sep 17 00:00:00 2001 From: zigaSRC <65527456+zigaSRC@users.noreply.github.com> Date: Mon, 26 Apr 2021 07:07:14 +0200 Subject: [PATCH 0238/3093] lvol - extending volumes with '+' only work for percentages (#2267) * Merged changes from old PR * Added suppurt for -, other adaptations regarding size. * Implementing +- support for -l * Added changelog * Renamed changelog * Apply suggestions from code review Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- ...vol_size_addition-subtraction_support.yaml | 5 ++ plugins/modules/system/lvol.py | 73 +++++++++++++++---- 2 files changed, 62 insertions(+), 16 deletions(-) create mode 100644 changelogs/fragments/2267-lvol_size_addition-subtraction_support.yaml diff --git a/changelogs/fragments/2267-lvol_size_addition-subtraction_support.yaml b/changelogs/fragments/2267-lvol_size_addition-subtraction_support.yaml new file mode 100644 index 0000000000..25b79f4528 --- /dev/null +++ b/changelogs/fragments/2267-lvol_size_addition-subtraction_support.yaml @@ -0,0 +1,5 @@ +--- +minor_changes: + - lvol - added proper support for ``+-`` options when extending or reducing the logical volume (https://github.com/ansible-collections/community.general/issues/1988). +bugfixes: + - lvol - fixed sizing calculation rounding to match the underlying tools (https://github.com/ansible-collections/community.general/issues/1988). diff --git a/plugins/modules/system/lvol.py b/plugins/modules/system/lvol.py index bef515b8ec..8dc3fac7f5 100644 --- a/plugins/modules/system/lvol.py +++ b/plugins/modules/system/lvol.py @@ -12,6 +12,8 @@ DOCUMENTATION = ''' author: - Jeroen Hoekx (@jhoekx) - Alexander Bulimov (@abulimov) + - Raoul Baudach (@unkaputtbar112) + - Ziga Kern (@zigaSRC) module: lvol short_description: Configure LVM logical volumes description: @@ -33,7 +35,11 @@ options: default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE]; Float values must begin with a digit. - Resizing using percentage values was not supported prior to 2.1. + - When resizing, apart from specifying an absolute size you may, according to + lvextend(8)|lvreduce(8) C(--size), specify the amount to extend the logical volume with + the prefix C(+) or the amount to reduce the logical volume by with prefix C(-). + - Resizing using C(+) or C(-) was not supported prior to community.general 3.0.0. + - Please note that when using C(+) or C(-), the module is B(not idempotent). state: type: str description: @@ -136,6 +142,12 @@ EXAMPLES = ''' lv: test size: +100%FREE +- name: Extend the logical volume by given space + community.general.lvol: + vg: firefly + lv: test + size: +512M + - name: Extend the logical volume to take all remaining space of the PVs and resize the underlying filesystem community.general.lvol: vg: firefly @@ -157,6 +169,13 @@ EXAMPLES = ''' size: 512 force: yes +- name: Reduce the logical volume by given space + community.general.lvol: + vg: firefly + lv: test + size: -512M + force: yes + - name: Set the logical volume to 512m and do not try to shrink if size is lower than current one community.general.lvol: vg: firefly @@ -209,7 +228,6 @@ import re from ansible.module_utils.basic import AnsibleModule - LVOL_ENV_VARS = dict( # make sure we use the C locale when running lvol-related commands LANG='C', @@ -307,6 +325,7 @@ def main(): thinpool = module.params['thinpool'] size_opt = 'L' size_unit = 'm' + size_operator = None snapshot = module.params['snapshot'] pvs = module.params['pvs'] @@ -325,7 +344,16 @@ def main(): test_opt = '' if size: - # LVCREATE(8) -l --extents option with percentage + # LVEXTEND(8)/LVREDUCE(8) -l, -L options: Check for relative value for resizing + if size.startswith('+'): + size_operator = '+' + size = size[1:] + elif size.startswith('-'): + size_operator = '-' + size = size[1:] + # LVCREATE(8) does not support [+-] + + # LVCREATE(8)/LVEXTEND(8)/LVREDUCE(8) -l --extents option with percentage if '%' in size: size_parts = size.split('%', 1) size_percent = int(size_parts[0]) @@ -339,10 +367,10 @@ def main(): size_opt = 'l' size_unit = '' + # LVCREATE(8)/LVEXTEND(8)/LVREDUCE(8) -L --size option unit if '%' not in size: - # LVCREATE(8) -L --size option unit if size[-1].lower() in 'bskmgtpe': - size_unit = size[-1].lower() + size_unit = size[-1] size = size[0:-1] try: @@ -398,7 +426,6 @@ def main(): else: module.fail_json(msg="Snapshot origin LV %s does not exist in volume group %s." % (lv, vg)) check_lv = snapshot - elif thinpool: if lv: # Check thin volume pre-conditions @@ -423,6 +450,8 @@ def main(): msg = '' if this_lv is None: if state == 'present': + if size_operator is not None: + module.fail_json(msg="Bad size specification of '%s%s' for creating LV" % (size_operator, size)) # Require size argument except for snapshot of thin volumes if (lv or thinpool) and not size: for test_lv in lvs: @@ -476,13 +505,19 @@ def main(): else: # size_whole == 'FREE': size_requested = size_percent * this_vg['free'] / 100 - # Round down to the next lowest whole physical extent - size_requested -= (size_requested % this_vg['ext_size']) - - if '+' in size: + # from LVEXTEND(8) - The resulting value is rounded upward. + # from LVREDUCE(8) - The resulting value for the substraction is rounded downward, for the absolute size it is rounded upward. + if size_operator == '+': size_requested += this_lv['size'] + size_requested += this_vg['ext_size'] - (size_requested % this_vg['ext_size']) + elif size_operator == '-': + size_requested = this_lv['size'] - size_requested + size_requested -= (size_requested % this_vg['ext_size']) + else: + size_requested += this_vg['ext_size'] - (size_requested % this_vg['ext_size']) + if this_lv['size'] < size_requested: - if (size_free > 0) and (('+' not in size) or (size_free >= (size_requested - this_lv['size']))): + if (size_free > 0) and (size_free >= (size_requested - this_lv['size'])): tool = module.get_bin_path("lvextend", required=True) else: module.fail_json( @@ -490,7 +525,7 @@ def main(): (this_lv['name'], (size_requested - this_lv['size']), unit, size_free, unit) ) elif shrink and this_lv['size'] > size_requested + this_vg['ext_size']: # more than an extent too large - if size_requested == 0: + if size_requested < 1: module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name'])) elif not force: module.fail_json(msg="Sorry, no shrinking of %s without force=yes" % (this_lv['name'])) @@ -501,7 +536,10 @@ def main(): if tool: if resizefs: tool = '%s %s' % (tool, '--resizefs') - cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs) + if size_operator: + cmd = "%s %s -%s %s%s%s %s/%s %s" % (tool, test_opt, size_opt, size_operator, size, size_unit, vg, this_lv['name'], pvs) + else: + cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs) rc, out, err = module.run_command(cmd) if "Reached maximum COW size" in out: module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out) @@ -518,9 +556,9 @@ def main(): else: # resize LV based on absolute values tool = None - if float(size) > this_lv['size']: + if float(size) > this_lv['size'] or size_operator == '+': tool = module.get_bin_path("lvextend", required=True) - elif shrink and float(size) < this_lv['size']: + elif shrink and float(size) < this_lv['size'] or size_operator == '-': if float(size) == 0: module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name'])) if not force: @@ -532,7 +570,10 @@ def main(): if tool: if resizefs: tool = '%s %s' % (tool, '--resizefs') - cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs) + if size_operator: + cmd = "%s %s -%s %s%s%s %s/%s %s" % (tool, test_opt, size_opt, size_operator, size, size_unit, vg, this_lv['name'], pvs) + else: + cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs) rc, out, err = module.run_command(cmd) if "Reached maximum COW size" in out: module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out) From 99c564398aa452ec99666f6bbb90527a6ed9a83c Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 26 Apr 2021 23:09:19 +1200 Subject: [PATCH 0239/3093] jira - module revamp -> moved code to class (#2208) * rebased after jira bugfix * Update plugins/modules/web_infrastructure/jira.py Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- changelogs/fragments/2208-jira-revamp.yml | 2 + plugins/modules/web_infrastructure/jira.py | 535 +++++++++--------- tests/integration/targets/jira/tasks/main.yml | 47 +- 3 files changed, 294 insertions(+), 290 deletions(-) create mode 100644 changelogs/fragments/2208-jira-revamp.yml diff --git a/changelogs/fragments/2208-jira-revamp.yml b/changelogs/fragments/2208-jira-revamp.yml new file mode 100644 index 0000000000..32f1650aa0 --- /dev/null +++ b/changelogs/fragments/2208-jira-revamp.yml @@ -0,0 +1,2 @@ +minor_changes: + - jira - revamped the module as a class using ``ModuleHelper`` (https://github.com/ansible-collections/community.general/pull/2208). diff --git a/plugins/modules/web_infrastructure/jira.py b/plugins/modules/web_infrastructure/jira.py index 3c1a8a27c9..6acf0c7f51 100644 --- a/plugins/modules/web_infrastructure/jira.py +++ b/plugins/modules/web_infrastructure/jira.py @@ -355,271 +355,16 @@ import mimetypes import os import random import string -import sys import traceback +from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper, cause_changes from ansible.module_utils.six.moves.urllib.request import pathname2url - from ansible.module_utils._text import to_text, to_bytes, to_native - -from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url -def request( - url, - user, - passwd, - timeout, - data=None, - method=None, - content_type='application/json', - additional_headers=None -): - if data and content_type == 'application/json': - data = json.dumps(data) - - # NOTE: fetch_url uses a password manager, which follows the - # standard request-then-challenge basic-auth semantics. However as - # JIRA allows some unauthorised operations it doesn't necessarily - # send the challenge, so the request occurs as the anonymous user, - # resulting in unexpected results. To work around this we manually - # inject the basic-auth header up-front to ensure that JIRA treats - # the requests as authorized for this user. - auth = to_text(base64.b64encode(to_bytes('{0}:{1}'.format(user, passwd), errors='surrogate_or_strict'))) - - headers = {} - if isinstance(additional_headers, dict): - headers = additional_headers.copy() - headers.update({ - "Content-Type": content_type, - "Authorization": "Basic %s" % auth, - }) - - response, info = fetch_url( - module, url, data=data, method=method, timeout=timeout, headers=headers - ) - - if info['status'] not in (200, 201, 204): - error = None - try: - error = json.loads(info['body']) - except Exception: - module.fail_json(msg=to_native(info['body']), exception=traceback.format_exc()) - if error: - msg = [] - for key in ('errorMessages', 'errors'): - if error.get(key): - msg.append(to_native(error[key])) - if msg: - module.fail_json(msg=', '.join(msg)) - module.fail_json(msg=to_native(error)) - # Fallback print body, if it cant be decoded - module.fail_json(msg=to_native(info['body'])) - - body = response.read() - - if body: - return json.loads(to_text(body, errors='surrogate_or_strict')) - return {} - - -def post(url, user, passwd, timeout, data, content_type='application/json', additional_headers=None): - return request(url, user, passwd, timeout, data=data, method='POST', content_type=content_type, additional_headers=additional_headers) - - -def put(url, user, passwd, timeout, data): - return request(url, user, passwd, timeout, data=data, method='PUT') - - -def get(url, user, passwd, timeout): - return request(url, user, passwd, timeout) - - -def create(restbase, user, passwd, params): - createfields = { - 'project': {'key': params['project']}, - 'summary': params['summary'], - 'issuetype': {'name': params['issuetype']}} - - if params['description']: - createfields['description'] = params['description'] - - # Merge in any additional or overridden fields - if params['fields']: - createfields.update(params['fields']) - - data = {'fields': createfields} - - url = restbase + '/issue/' - - return True, post(url, user, passwd, params['timeout'], data) - - -def comment(restbase, user, passwd, params): - data = { - 'body': params['comment'] - } - url = restbase + '/issue/' + params['issue'] + '/comment' - - return True, post(url, user, passwd, params['timeout'], data) - - -def edit(restbase, user, passwd, params): - data = { - 'fields': params['fields'] - } - url = restbase + '/issue/' + params['issue'] - - return True, put(url, user, passwd, params['timeout'], data) - - -def update(restbase, user, passwd, params): - data = { - "update": params['fields'], - } - url = restbase + '/issue/' + params['issue'] - - return True, put(url, user, passwd, params['timeout'], data) - - -def fetch(restbase, user, passwd, params): - url = restbase + '/issue/' + params['issue'] - return False, get(url, user, passwd, params['timeout']) - - -def search(restbase, user, passwd, params): - url = restbase + '/search?jql=' + pathname2url(params['jql']) - if params['fields']: - fields = params['fields'].keys() - url = url + '&fields=' + '&fields='.join([pathname2url(f) for f in fields]) - if params['maxresults']: - url = url + '&maxResults=' + str(params['maxresults']) - return False, get(url, user, passwd, params['timeout']) - - -def transition(restbase, user, passwd, params): - # Find the transition id - turl = restbase + '/issue/' + params['issue'] + "/transitions" - tmeta = get(turl, user, passwd, params['timeout']) - - target = params['status'] - tid = None - for t in tmeta['transitions']: - if t['name'] == target: - tid = t['id'] - break - - if not tid: - raise ValueError("Failed find valid transition for '%s'" % target) - - fields = dict(params['fields']) - if params['summary'] is not None: - fields.update({'summary': params['summary']}) - if params['description'] is not None: - fields.update({'description': params['description']}) - - # Perform it - url = restbase + '/issue/' + params['issue'] + "/transitions" - data = {'transition': {"id": tid}, - 'fields': fields} - if params['comment'] is not None: - data.update({"update": { - "comment": [{ - "add": {"body": params['comment']} - }], - }}) - - return True, post(url, user, passwd, params['timeout'], data) - - -def link(restbase, user, passwd, params): - data = { - 'type': {'name': params['linktype']}, - 'inwardIssue': {'key': params['inwardissue']}, - 'outwardIssue': {'key': params['outwardissue']}, - } - - url = restbase + '/issueLink/' - - return True, post(url, user, passwd, params['timeout'], data) - - -def attach(restbase, user, passwd, params): - filename = params['attachment'].get('filename') - content = params['attachment'].get('content') - - if not any((filename, content)): - raise ValueError('at least one of filename or content must be provided') - mime = params['attachment'].get('mimetype') - - if not os.path.isfile(filename): - raise ValueError('The provided filename does not exist: %s' % filename) - - content_type, data = _prepare_attachment(filename, content, mime) - - url = restbase + '/issue/' + params['issue'] + '/attachments' - return True, post( - url, user, passwd, params['timeout'], data, content_type=content_type, - additional_headers={"X-Atlassian-Token": "no-check"} - ) - - -# Ideally we'd just use prepare_multipart from ansible.module_utils.urls, but -# unfortunately it does not support specifying the encoding and also defaults to -# base64. Jira doesn't support base64 encoded attachments (and is therefore not -# spec compliant. Go figure). I originally wrote this function as an almost -# exact copypasta of prepare_multipart, but ran into some encoding issues when -# using the noop encoder. Hand rolling the entire message body seemed to work -# out much better. -# -# https://community.atlassian.com/t5/Jira-questions/Jira-dosen-t-decode-base64-attachment-request-REST-API/qaq-p/916427 -# -# content is expected to be a base64 encoded string since Ansible doesn't -# support passing raw bytes objects. -def _prepare_attachment(filename, content=None, mime_type=None): - def escape_quotes(s): - return s.replace('"', '\\"') - - boundary = "".join(random.choice(string.digits + string.ascii_letters) for i in range(30)) - name = to_native(os.path.basename(filename)) - - if not mime_type: - try: - mime_type = mimetypes.guess_type(filename or '', strict=False)[0] or 'application/octet-stream' - except Exception: - mime_type = 'application/octet-stream' - main_type, sep, sub_type = mime_type.partition('/') - - if not content and filename: - with open(to_bytes(filename, errors='surrogate_or_strict'), 'rb') as f: - content = f.read() - else: - try: - content = base64.b64decode(content) - except binascii.Error as e: - raise Exception("Unable to base64 decode file content: %s" % e) - - lines = [ - "--{0}".format(boundary), - 'Content-Disposition: form-data; name="file"; filename={0}'.format(escape_quotes(name)), - "Content-Type: {0}".format("{0}/{1}".format(main_type, sub_type)), - '', - to_text(content), - "--{0}--".format(boundary), - "" - ] - - return ( - "multipart/form-data; boundary={0}".format(boundary), - "\r\n".join(lines) - ) - - -def main(): - - global module - module = AnsibleModule( +class JIRA(StateModuleHelper): + module = dict( argument_spec=dict( attachment=dict(type='dict', options=dict( content=dict(type='str'), @@ -627,8 +372,11 @@ def main(): mimetype=dict(type='str') )), uri=dict(type='str', required=True), - operation=dict(type='str', choices=['attach', 'create', 'comment', 'edit', 'update', 'fetch', 'transition', 'link', 'search'], - aliases=['command'], required=True), + operation=dict( + type='str', + choices=['attach', 'create', 'comment', 'edit', 'update', 'fetch', 'transition', 'link', 'search'], + aliases=['command'], required=True + ), username=dict(type='str', required=True), password=dict(type='str', required=True, no_log=True), project=dict(type='str', ), @@ -662,35 +410,258 @@ def main(): supports_check_mode=False ) - op = module.params['operation'] + state_param = 'operation' - # Handle rest of parameters - uri = module.params['uri'] - user = module.params['username'] - passwd = module.params['password'] - if module.params['assignee']: - module.params['fields']['assignee'] = {'name': module.params['assignee']} - if module.params['account_id']: - module.params['fields']['assignee'] = {'accountId': module.params['account_id']} + def __init_module__(self): + if self.vars.fields is None: + self.vars.fields = {} + if self.vars.assignee: + self.vars.fields['assignee'] = {'name': self.vars.assignee} + if self.vars.account_id: + self.vars.fields['assignee'] = {'accountId': self.vars.account_id} + self.vars.uri = self.vars.uri.strip('/') + self.vars.set('restbase', self.vars.uri + '/rest/api/2') - if not uri.endswith('/'): - uri = uri + '/' - restbase = uri + 'rest/api/2' + @cause_changes(on_success=True) + def operation_create(self): + createfields = { + 'project': {'key': self.vars.project}, + 'summary': self.vars.summary, + 'issuetype': {'name': self.vars.issuetype}} - # Dispatch - try: + if self.vars.description: + createfields['description'] = self.vars.description - # Lookup the corresponding method for this operation. This is - # safe as the AnsibleModule should remove any unknown operations. - thismod = sys.modules[__name__] - method = getattr(thismod, op) + # Merge in any additional or overridden fields + if self.vars.fields: + createfields.update(self.vars.fields) - changed, ret = method(restbase, user, passwd, module.params) + data = {'fields': createfields} + url = self.vars.restbase + '/issue/' + self.vars.meta = self.post(url, data) - except Exception as e: - return module.fail_json(msg=to_native(e), exception=traceback.format_exc()) + @cause_changes(on_success=True) + def operation_comment(self): + data = { + 'body': self.vars.comment + } + url = self.vars.restbase + '/issue/' + self.vars.issue + '/comment' + self.vars.meta = self.post(url, data) - module.exit_json(changed=changed, meta=ret) + @cause_changes(on_success=True) + def operation_edit(self): + data = { + 'fields': self.vars.fields + } + url = self.vars.restbase + '/issue/' + self.vars.issue + self.vars.meta = self.put(url, data) + + @cause_changes(on_success=True) + def operation_update(self): + data = { + "update": self.vars.fields, + } + url = self.vars.restbase + '/issue/' + self.vars.issue + self.vars.meta = self.put(url, data) + + def operation_fetch(self): + url = self.vars.restbase + '/issue/' + self.vars.issue + self.vars.meta = self.get(url) + + def operation_search(self): + url = self.vars.restbase + '/search?jql=' + pathname2url(self.vars.jql) + if self.vars.fields: + fields = self.vars.fields.keys() + url = url + '&fields=' + '&fields='.join([pathname2url(f) for f in fields]) + if self.vars.maxresults: + url = url + '&maxResults=' + str(self.vars.maxresults) + + self.vars.meta = self.get(url) + + @cause_changes(on_success=True) + def operation_transition(self): + # Find the transition id + turl = self.vars.restbase + '/issue/' + self.vars.issue + "/transitions" + tmeta = self.get(turl) + + target = self.vars.status + tid = None + for t in tmeta['transitions']: + if t['name'] == target: + tid = t['id'] + break + else: + raise ValueError("Failed find valid transition for '%s'" % target) + + fields = dict(self.vars.fields) + if self.vars.summary is not None: + fields.update({'summary': self.vars.summary}) + if self.vars.description is not None: + fields.update({'description': self.vars.description}) + + # Perform it + data = {'transition': {"id": tid}, + 'fields': fields} + if self.vars.comment is not None: + data.update({"update": { + "comment": [{ + "add": {"body": self.vars.comment} + }], + }}) + url = self.vars.restbase + '/issue/' + self.vars.issue + "/transitions" + self.vars.meta = self.post(url, data) + + @cause_changes(on_success=True) + def operation_link(self): + data = { + 'type': {'name': self.vars.linktype}, + 'inwardIssue': {'key': self.vars.inwardissue}, + 'outwardIssue': {'key': self.vars.outwardissue}, + } + url = self.vars.restbase + '/issueLink/' + self.vars.meta = self.post(url, data) + + @cause_changes(on_success=True) + def operation_attach(self): + v = self.vars + filename = v.attachment.get('filename') + content = v.attachment.get('content') + + if not any((filename, content)): + raise ValueError('at least one of filename or content must be provided') + mime = v.attachment.get('mimetype') + + if not os.path.isfile(filename): + raise ValueError('The provided filename does not exist: %s' % filename) + + content_type, data = self._prepare_attachment(filename, content, mime) + + url = v.restbase + '/issue/' + v.issue + '/attachments' + return True, self.post( + url, data, content_type=content_type, additional_headers={"X-Atlassian-Token": "no-check"} + ) + + # Ideally we'd just use prepare_multipart from ansible.module_utils.urls, but + # unfortunately it does not support specifying the encoding and also defaults to + # base64. Jira doesn't support base64 encoded attachments (and is therefore not + # spec compliant. Go figure). I originally wrote this function as an almost + # exact copypasta of prepare_multipart, but ran into some encoding issues when + # using the noop encoder. Hand rolling the entire message body seemed to work + # out much better. + # + # https://community.atlassian.com/t5/Jira-questions/Jira-dosen-t-decode-base64-attachment-request-REST-API/qaq-p/916427 + # + # content is expected to be a base64 encoded string since Ansible doesn't + # support passing raw bytes objects. + @staticmethod + def _prepare_attachment(filename, content=None, mime_type=None): + def escape_quotes(s): + return s.replace('"', '\\"') + + boundary = "".join(random.choice(string.digits + string.ascii_letters) for dummy in range(30)) + name = to_native(os.path.basename(filename)) + + if not mime_type: + try: + mime_type = mimetypes.guess_type(filename or '', strict=False)[0] or 'application/octet-stream' + except Exception: + mime_type = 'application/octet-stream' + main_type, sep, sub_type = mime_type.partition('/') + + if not content and filename: + with open(to_bytes(filename, errors='surrogate_or_strict'), 'rb') as f: + content = f.read() + else: + try: + content = base64.b64decode(content) + except binascii.Error as e: + raise Exception("Unable to base64 decode file content: %s" % e) + + lines = [ + "--{0}".format(boundary), + 'Content-Disposition: form-data; name="file"; filename={0}'.format(escape_quotes(name)), + "Content-Type: {0}".format("{0}/{1}".format(main_type, sub_type)), + '', + to_text(content), + "--{0}--".format(boundary), + "" + ] + + return ( + "multipart/form-data; boundary={0}".format(boundary), + "\r\n".join(lines) + ) + + def request( + self, + url, + data=None, + method=None, + content_type='application/json', + additional_headers=None + ): + if data and content_type == 'application/json': + data = json.dumps(data) + + # NOTE: fetch_url uses a password manager, which follows the + # standard request-then-challenge basic-auth semantics. However as + # JIRA allows some unauthorised operations it doesn't necessarily + # send the challenge, so the request occurs as the anonymous user, + # resulting in unexpected results. To work around this we manually + # inject the basic-auth header up-front to ensure that JIRA treats + # the requests as authorized for this user. + auth = to_text(base64.b64encode(to_bytes('{0}:{1}'.format(self.vars.username, self.vars.password), + errors='surrogate_or_strict'))) + + headers = {} + if isinstance(additional_headers, dict): + headers = additional_headers.copy() + headers.update({ + "Content-Type": content_type, + "Authorization": "Basic %s" % auth, + }) + + response, info = fetch_url( + self.module, url, data=data, method=method, timeout=self.vars.timeout, headers=headers + ) + + if info['status'] not in (200, 201, 204): + error = None + try: + error = json.loads(info['body']) + except Exception: + self.module.fail_json(msg=to_native(info['body']), exception=traceback.format_exc()) + if error: + msg = [] + for key in ('errorMessages', 'errors'): + if error.get(key): + msg.append(to_native(error[key])) + if msg: + self.module.fail_json(msg=', '.join(msg)) + self.module.fail_json(msg=to_native(error)) + # Fallback print body, if it cant be decoded + self.module.fail_json(msg=to_native(info['body'])) + + body = response.read() + + if body: + return json.loads(to_text(body, errors='surrogate_or_strict')) + return {} + + def post(self, url, data, content_type='application/json', additional_headers=None): + return self.request(url, data=data, method='POST', content_type=content_type, + additional_headers=additional_headers) + + def put(self, url, data): + return self.request(url, data=data, method='PUT') + + def get(self, url): + return self.request(url) + + +def main(): + jira = JIRA() + jira.run() if __name__ == '__main__': diff --git a/tests/integration/targets/jira/tasks/main.yml b/tests/integration/targets/jira/tasks/main.yml index 824de09a89..c1d24a275a 100644 --- a/tests/integration/targets/jira/tasks/main.yml +++ b/tests/integration/targets/jira/tasks/main.yml @@ -1,5 +1,6 @@ --- -- community.general.jira: +- name: create test ticket + community.general.jira: uri: "{{ uri }}" username: "{{ user }}" password: "{{ pasw }}" @@ -9,10 +10,15 @@ description: bla bla bla issuetype: Task register: issue - - debug: msg: Issue={{ issue }} -- name: Add comment bleep bleep +- name: assert test ticket + assert: + that: + - issue is changed + - issue.meta.key.startswith(proj) + +- name: add comment bleep bleep community.general.jira: uri: "{{ uri }}" username: "{{ user }}" @@ -20,7 +26,15 @@ issue: "{{ issue.meta.key }}" operation: comment comment: bleep bleep! -- name: Transition -> In Progress with comment + register: comment_bleep_bleep +- name: assert comment bleep bleep + assert: + that: + - comment_bleep_bleep is changed + - comment_bleep_bleep.meta.body == "bleep bleep!" + - comment_bleep_bleep.meta.body != None + +- name: transition -> In Progress with comment community.general.jira: uri: "{{ uri }}" username: "{{ user }}" @@ -29,15 +43,27 @@ operation: transition status: Start Progress comment: -> in progress -- name: Change assignee + register: transition_inprog +- name: assert transition -> In Progress with comment + assert: + that: + - transition_inprog is changed + +- name: change assignee community.general.jira: uri: "{{ uri }}" username: "{{ user }}" password: "{{ pasw }}" issue: "{{ issue.meta.key }}" operation: edit - accountId: "{{ user2 }}" -- name: Transition -> Resolved with comment + account_id: "{{ user2 }}" + register: assign +- name: assert change assignee + assert: + that: + - assign is changed + +- name: transition -> Resolved with comment community.general.jira: uri: "{{ uri }}" username: "{{ user }}" @@ -46,11 +72,16 @@ operation: transition status: Resolve Issue comment: -> resolved - accountId: "{{ user1 }}" + account_id: "{{ user1 }}" fields: resolution: name: Done description: wakawakawakawaka + register: transition_resolved +- name: assert transition -> Resolved with comment + assert: + that: + - transition_resolved is changed - debug: msg: From 2cc3ce02304d20d9f7092ad50159b521fa151cac Mon Sep 17 00:00:00 2001 From: Frank Dornheim <524257+conloos@users.noreply.github.com> Date: Mon, 26 Apr 2021 18:25:45 +0200 Subject: [PATCH 0240/3093] lxd: dynamic inventory (#2026) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * lxd dynamic inventory and test data * added ``merge_profile`` parameter to merge configurations from the play to an existing profile * cosmetic changes * added ``merge_profile`` parameter to merge configurations from the play to an existing profile * cosmetic changes * fix pylint errors * fix flake8 warnings * fix pep8 errors without "line to long" * fix ansible tests * fix typo * fix version added * fix lost of suggestions from felixfontein * fix filter fix ansible test errors * delete test config * delete 'notes:' and copy content to description * move testdata load testdata by path from config * updated documentation * fix test data and remove inventory branch * fix spellings and rename lxd to community.general.lxd * fix documentation * remove selftest * strip example data * add unit test * switch to ansible.module_utils.common.dict_transformations * documentation cleanup * move lxd_inventory.atd from files to fixtures * update documentation move lxd_inventory.atd * rename self.groups to self dispose remove dumpdata * cleanup * fix unittests comment out dump_data, it breaks the unit tests * fix pep8 * Apply suggestions from code review * Update plugins/inventory/lxd.py * add test if no groupby is selected * rename disposed to groupby remove unused constant other suggested cleanups * Use bundled ipaddress instead of own code. * Update plugins/inventory/lxd.py * Exceptions should not be eaten. * Improve error handling for network range/address parsing. * Fix typo. * Make network range valid. * Do not error when groupby is not a dict. Co-authored-by: Frank Dornheim <“dornheim@posteo.de@users.noreply.github.com”> Co-authored-by: Felix Fontein --- plugins/inventory/lxd.py | 950 ++++++++++++++++++ .../inventory/fixtures/lxd_inventory.atd | 174 ++++ tests/unit/plugins/inventory/test_lxd.py | 100 ++ 3 files changed, 1224 insertions(+) create mode 100644 plugins/inventory/lxd.py create mode 100644 tests/unit/plugins/inventory/fixtures/lxd_inventory.atd create mode 100644 tests/unit/plugins/inventory/test_lxd.py diff --git a/plugins/inventory/lxd.py b/plugins/inventory/lxd.py new file mode 100644 index 0000000000..c48818d595 --- /dev/null +++ b/plugins/inventory/lxd.py @@ -0,0 +1,950 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2021, Frank Dornheim +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' + name: community.general.lxd + short_description: Returns Ansible inventory from lxd host + description: + - Get inventory from the lxd. + - Uses a YAML configuration file that ends with 'lxd.(yml|yaml)'. + version_added: "3.0.0" + author: "Frank Dornheim (@conloos)" + options: + plugin: + description: Token that ensures this is a source file for the 'lxd' plugin. + required: true + choices: [ 'community.general.lxd' ] + url: + description: + - The unix domain socket path or the https URL for the lxd server. + - Sockets in filesystem have to start with C(unix:). + - Mostly C(unix:/var/lib/lxd/unix.socket) or C(unix:/var/snap/lxd/common/lxd/unix.socket). + default: unix:/var/snap/lxd/common/lxd/unix.socket + type: str + client_key: + description: + - The client certificate key file path. + aliases: [ key_file ] + default: $HOME/.config/lxc/client.key + type: path + client_cert: + description: + - The client certificate file path. + aliases: [ cert_file ] + default: $HOME/.config/lxc/client.crt + type: path + trust_password: + description: + - The client trusted password. + - You need to set this password on the lxd server before + running this module using the following command + C(lxc config set core.trust_password ) + See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/). + - If I(trust_password) is set, this module send a request for authentication before sending any requests. + type: str + state: + description: Filter the container according to the current status. + type: str + default: none + choices: [ 'STOPPED', 'STARTING', 'RUNNING', 'none' ] + prefered_container_network_interface: + description: + - If a container has multiple network interfaces, select which one is the prefered as pattern. + - Combined with the first number that can be found e.g. 'eth' + 0. + type: str + default: eth + prefered_container_network_family: + description: + - If a container has multiple network interfaces, which one is the prefered by family. + - Specify C(inet) for IPv4 and C(inet6) for IPv6. + type: str + default: inet + choices: [ 'inet', 'inet6' ] + groupby: + description: + - Create groups by the following keywords C(location), C(pattern), C(network_range), C(os), C(release), C(profile), C(vlanid). + - See example for syntax. + type: json +''' + +EXAMPLES = ''' +# simple lxd.yml +plugin: community.general.lxd +url: unix:/var/snap/lxd/common/lxd/unix.socket + +# simple lxd.yml including filter +plugin: community.general.lxd +url: unix:/var/snap/lxd/common/lxd/unix.socket +state: RUNNING + +# grouping lxd.yml +groupby: + testpattern: + type: pattern + attribute: test + vlan666: + type: vlanid + attribute: 666 + locationBerlin: + type: location + attribute: Berlin + osUbuntu: + type: os + attribute: ubuntu + releaseFocal: + type: release + attribute: focal + releaseBionic: + type: release + attribute: bionic + profileDefault: + type: profile + attribute: default + profileX11: + type: profile + attribute: x11 + netRangeIPv4: + type: network_range + attribute: 10.98.143.0/24 + netRangeIPv6: + type: network_range + attribute: fd42:bd00:7b11:2167:216:3eff::/24 +''' + +import binascii +import json +import re +import time +import os +import socket +from ansible.plugins.inventory import BaseInventoryPlugin +from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.common.dict_transformations import dict_merge +from ansible.errors import AnsibleError, AnsibleParserError +from ansible_collections.community.general.plugins.module_utils.compat import ipaddress +from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException + + +class InventoryModule(BaseInventoryPlugin): + DEBUG = 4 + NAME = 'community.general.lxd' + SNAP_SOCKET_URL = 'unix:/var/snap/lxd/common/lxd/unix.socket' + SOCKET_URL = 'unix:/var/lib/lxd/unix.socket' + + @staticmethod + def load_json_data(path): + """Load json data + + Load json data from file + + Args: + list(path): Path elements + str(file_name): Filename of data + Kwargs: + None + Raises: + None + Returns: + dict(json_data): json data""" + try: + with open(path, 'r') as json_file: + return json.load(json_file) + except (IOError, json.decoder.JSONDecodeError) as err: + raise AnsibleParserError('Could not load the test data from {0}: {1}'.format(to_native(path), to_native(err))) + + def save_json_data(self, path, file_name=None): + """save data as json + + Save data as json file + + Args: + list(path): Path elements + str(file_name): Filename of data + Kwargs: + None + Raises: + None + Returns: + None""" + + if file_name: + path.append(file_name) + else: + prefix = 'lxd_data-' + time_stamp = time.strftime('%Y%m%d-%H%M%S') + suffix = '.atd' + path.append(prefix + time_stamp + suffix) + + try: + cwd = os.path.abspath(os.path.dirname(__file__)) + with open(os.path.abspath(os.path.join(cwd, *path)), 'w') as json_file: + json.dump(self.data, json_file) + except IOError as err: + raise AnsibleParserError('Could not save data: {0}'.format(to_native(err))) + + def verify_file(self, path): + """Check the config + + Return true/false if the config-file is valid for this plugin + + Args: + str(path): path to the config + Kwargs: + None + Raises: + None + Returns: + bool(valid): is valid""" + valid = False + if super(InventoryModule, self).verify_file(path): + if path.endswith(('lxd.yaml', 'lxd.yml')): + valid = True + else: + self.display.vvv('Inventory source not ending in "lxd.yaml" or "lxd.yml"') + return valid + + @staticmethod + def validate_url(url): + """validate url + + check whether the url is correctly formatted + + Args: + url + Kwargs: + None + Raises: + AnsibleError + Returns: + bool""" + if not isinstance(url, str): + return False + if not url.startswith(('unix:', 'https:')): + raise AnsibleError('URL is malformed: {0}'.format(to_native(url))) + return True + + def _connect_to_socket(self): + """connect to lxd socket + + Connect to lxd socket by provided url or defaults + + Args: + None + Kwargs: + None + Raises: + AnsibleError + Returns: + None""" + error_storage = {} + url_list = [self.get_option('url'), self.SNAP_SOCKET_URL, self.SOCKET_URL] + urls = (url for url in url_list if self.validate_url(url)) + for url in urls: + try: + socket_connection = LXDClient(url, self.client_key, self.client_cert, self.debug) + return socket_connection + except LXDClientException as err: + error_storage[url] = err + raise AnsibleError('No connection to the socket: {0}'.format(to_native(error_storage))) + + def _get_networks(self): + """Get Networknames + + Returns all network config names + + Args: + None + Kwargs: + None + Raises: + None + Returns: + list(names): names of all network_configs""" + # e.g. {'type': 'sync', + # 'status': 'Success', + # 'status_code': 200, + # 'operation': '', + # 'error_code': 0, + # 'error': '', + # 'metadata': ['/1.0/networks/lxdbr0']} + network_configs = self.socket.do('GET', '/1.0/networks') + return [m.split('/')[3] for m in network_configs['metadata']] + + def _get_containers(self): + """Get Containernames + + Returns all containernames + + Args: + None + Kwargs: + None + Raises: + None + Returns: + list(names): names of all containers""" + # e.g. {'type': 'sync', + # 'status': 'Success', + # 'status_code': 200, + # 'operation': '', + # 'error_code': 0, + # 'error': '', + # 'metadata': ['/1.0/containers/udemy-ansible-ubuntu-2004']} + containers = self.socket.do('GET', '/1.0/containers') + return [m.split('/')[3] for m in containers['metadata']] + + def _get_config(self, branch, name): + """Get inventory of container + + Get config of container + + Args: + str(branch): Name oft the API-Branch + str(name): Name of Container + Kwargs: + None + Source: + https://github.com/lxc/lxd/blob/master/doc/rest-api.md + Raises: + None + Returns: + dict(config): Config of the container""" + config = {} + if isinstance(branch, (tuple, list)): + config[name] = {branch[1]: self.socket.do('GET', '/1.0/{0}/{1}/{2}'.format(to_native(branch[0]), to_native(name), to_native(branch[1])))} + else: + config[name] = {branch: self.socket.do('GET', '/1.0/{0}/{1}'.format(to_native(branch), to_native(name)))} + return config + + def get_container_data(self, names): + """Create Inventory of the container + + Iterate through the different branches of the containers and collect Informations. + + Args: + list(names): List of container names + Kwargs: + None + Raises: + None + Returns: + None""" + # tuple(('instances','metadata/templates')) to get section in branch + # e.g. /1.0/instances//metadata/templates + branches = ['containers', ('instances', 'state')] + container_config = {} + for branch in branches: + for name in names: + container_config['containers'] = self._get_config(branch, name) + self.data = dict_merge(container_config, self.data) + + def get_network_data(self, names): + """Create Inventory of the container + + Iterate through the different branches of the containers and collect Informations. + + Args: + list(names): List of container names + Kwargs: + None + Raises: + None + Returns: + None""" + # tuple(('instances','metadata/templates')) to get section in branch + # e.g. /1.0/instances//metadata/templates + branches = [('networks', 'state')] + network_config = {} + for branch in branches: + for name in names: + try: + network_config['networks'] = self._get_config(branch, name) + except LXDClientException: + network_config['networks'] = {name: None} + self.data = dict_merge(network_config, self.data) + + def extract_network_information_from_container_config(self, container_name): + """Returns the network interface configuration + + Returns the network ipv4 and ipv6 config of the container without local-link + + Args: + str(container_name): Name oft he container + Kwargs: + None + Raises: + None + Returns: + dict(network_configuration): network config""" + container_network_interfaces = self._get_data_entry('containers/{0}/state/metadata/network'.format(container_name)) + network_configuration = None + if container_network_interfaces: + network_configuration = {} + gen_interface_names = [interface_name for interface_name in container_network_interfaces if interface_name != 'lo'] + for interface_name in gen_interface_names: + gen_address = [address for address in container_network_interfaces[interface_name]['addresses'] if address.get('scope') != 'link'] + network_configuration[interface_name] = [] + for address in gen_address: + address_set = {} + address_set['family'] = address.get('family') + address_set['address'] = address.get('address') + address_set['netmask'] = address.get('netmask') + address_set['combined'] = address.get('address') + '/' + address.get('netmask') + network_configuration[interface_name].append(address_set) + return network_configuration + + def get_prefered_container_network_interface(self, container_name): + """Helper to get the prefered interface of thr container + + Helper to get the prefered interface provide by neme pattern from 'prefered_container_network_interface'. + + Args: + str(containe_name): name of container + Kwargs: + None + Raises: + None + Returns: + str(prefered_interface): None or interface name""" + container_network_interfaces = self._get_data_entry('inventory/{0}/network_interfaces'.format(container_name)) + prefered_interface = None # init + if container_network_interfaces: # container have network interfaces + # generator if interfaces which start with the desired pattern + net_generator = [interface for interface in container_network_interfaces if interface.startswith(self.prefered_container_network_interface)] + selected_interfaces = [] # init + for interface in net_generator: + selected_interfaces.append(interface) + if len(selected_interfaces) > 0: + prefered_interface = sorted(selected_interfaces)[0] + return prefered_interface + + def get_container_vlans(self, container_name): + """Get VLAN(s) from container + + Helper to get the VLAN_ID from the container + + Args: + str(containe_name): name of container + Kwargs: + None + Raises: + None + Returns: + None""" + # get network device configuration and store {network: vlan_id} + network_vlans = {} + for network in self._get_data_entry('networks'): + if self._get_data_entry('state/metadata/vlan/vid', data=self.data['networks'].get(network)): + network_vlans[network] = self._get_data_entry('state/metadata/vlan/vid', data=self.data['networks'].get(network)) + + # get networkdevices of container and return + # e.g. + # "eth0":{ "name":"eth0", + # "network":"lxdbr0", + # "type":"nic"}, + vlan_ids = {} + devices = self._get_data_entry('containers/{0}/containers/metadata/expanded_devices'.format(to_native(container_name))) + for device in devices: + if 'network' in devices[device]: + if devices[device]['network'] in network_vlans: + vlan_ids[devices[device].get('network')] = network_vlans[devices[device].get('network')] + return vlan_ids if vlan_ids else None + + def _get_data_entry(self, path, data=None, delimiter='/'): + """Helper to get data + + Helper to get data from self.data by a path like 'path/to/target' + Attention: Escaping of the delimiter is not (yet) provided. + + Args: + str(path): path to nested dict + Kwargs: + dict(data): datastore + str(delimiter): delimiter in Path. + Raises: + None + Returns: + *(value)""" + try: + if not data: + data = self.data + if delimiter in path: + path = path.split(delimiter) + + if isinstance(path, list) and len(path) > 1: + data = data[path.pop(0)] + path = delimiter.join(path) + return self._get_data_entry(path, data, delimiter) # recursion + return data[path] + except KeyError: + return None + + def _set_data_entry(self, container_name, key, value, path=None): + """Helper to save data + + Helper to save the data in self.data + Detect if data is allready in branch and use dict_merge() to prevent that branch is overwritten. + + Args: + str(container_name): name of container + str(key): same as dict + *(value): same as dict + Kwargs: + str(path): path to branch-part + Raises: + AnsibleParserError + Returns: + None""" + if not path: + path = self.data['inventory'] + if container_name not in path: + path[container_name] = {} + + try: + if isinstance(value, dict) and key in path[container_name]: + path[container_name] = dict_merge(value, path[container_name][key]) + else: + path[container_name][key] = value + except KeyError as err: + raise AnsibleParserError("Unable to store Informations: {0}".format(to_native(err))) + + def extract_information_from_container_configs(self): + """Process configuration information + + Preparation of the data + + Args: + dict(configs): Container configurations + Kwargs: + None + Raises: + None + Returns: + None""" + # create branch "inventory" + if 'inventory' not in self.data: + self.data['inventory'] = {} + + for container_name in self.data['containers']: + self._set_data_entry(container_name, 'os', self._get_data_entry( + 'containers/{0}/containers/metadata/config/image.os'.format(container_name))) + self._set_data_entry(container_name, 'release', self._get_data_entry( + 'containers/{0}/containers/metadata/config/image.release'.format(container_name))) + self._set_data_entry(container_name, 'version', self._get_data_entry( + 'containers/{0}/containers/metadata/config/image.version'.format(container_name))) + self._set_data_entry(container_name, 'profile', self._get_data_entry( + 'containers/{0}/containers/metadata/profiles'.format(container_name))) + self._set_data_entry(container_name, 'location', self._get_data_entry( + 'containers/{0}/containers/metadata/location'.format(container_name))) + self._set_data_entry(container_name, 'state', self._get_data_entry( + 'containers/{0}/containers/metadata/config/volatile.last_state.power'.format(container_name))) + self._set_data_entry(container_name, 'network_interfaces', self.extract_network_information_from_container_config(container_name)) + self._set_data_entry(container_name, 'preferred_interface', self.get_prefered_container_network_interface(container_name)) + self._set_data_entry(container_name, 'vlan_ids', self.get_container_vlans(container_name)) + + def build_inventory_network(self, container_name): + """Add the network interfaces of the container to the inventory + + Logic: + - if the container have no interface -> 'ansible_connection: local' + - get preferred_interface & prefered_container_network_family -> 'ansible_connection: ssh' & 'ansible_host: ' + - first Interface from: network_interfaces prefered_container_network_family -> 'ansible_connection: ssh' & 'ansible_host: ' + + Args: + str(container_name): name of container + Kwargs: + None + Raises: + None + Returns: + None""" + + def interface_selection(container_name): + """Select container Interface for inventory + + Logic: + - get preferred_interface & prefered_container_network_family -> str(IP) + - first Interface from: network_interfaces prefered_container_network_family -> str(IP) + + Args: + str(container_name): name of container + Kwargs: + None + Raises: + None + Returns: + dict(interface_name: ip)""" + prefered_interface = self._get_data_entry('inventory/{0}/preferred_interface'.format(container_name)) # name or None + prefered_container_network_family = self.prefered_container_network_family + + ip_address = '' + if prefered_interface: + interface = self._get_data_entry('inventory/{0}/network_interfaces/{1}'.format(container_name, prefered_interface)) + for config in interface: + if config['family'] == prefered_container_network_family: + ip_address = config['address'] + break + else: + interface = self._get_data_entry('inventory/{0}/network_interfaces'.format(container_name)) + for config in interface: + if config['family'] == prefered_container_network_family: + ip_address = config['address'] + break + return ip_address + + if self._get_data_entry('inventory/{0}/network_interfaces'.format(container_name)): # container have network interfaces + if self._get_data_entry('inventory/{0}/preferred_interface'.format(container_name)): # container have a preferred interface + self.inventory.set_variable(container_name, 'ansible_connection', 'ssh') + self.inventory.set_variable(container_name, 'ansible_host', interface_selection(container_name)) + else: + self.inventory.set_variable(container_name, 'ansible_connection', 'local') + + def build_inventory_hosts(self): + """Build host-part dynamic inventory + + Build the host-part of the dynamic inventory. + Add Hosts and host_vars to the inventory. + + Args: + None + Kwargs: + None + Raises: + None + Returns: + None""" + for container_name in self.data['inventory']: + # Only consider containers that match the "state" filter, if self.state is not None + if self.filter: + if self.filter.lower() != self._get_data_entry('inventory/{0}/state'.format(container_name)).lower(): + continue + # add container + self.inventory.add_host(container_name) + # add network informations + self.build_inventory_network(container_name) + # add os + self.inventory.set_variable(container_name, 'ansible_lxd_os', self._get_data_entry('inventory/{0}/os'.format(container_name)).lower()) + # add release + self.inventory.set_variable(container_name, 'ansible_lxd_release', self._get_data_entry('inventory/{0}/release'.format(container_name)).lower()) + # add profile + self.inventory.set_variable(container_name, 'ansible_lxd_profile', self._get_data_entry('inventory/{0}/profile'.format(container_name))) + # add state + self.inventory.set_variable(container_name, 'ansible_lxd_state', self._get_data_entry('inventory/{0}/state'.format(container_name)).lower()) + # add location information + if self._get_data_entry('inventory/{0}/location'.format(container_name)) != "none": # wrong type by lxd 'none' != 'None' + self.inventory.set_variable(container_name, 'ansible_lxd_location', self._get_data_entry('inventory/{0}/location'.format(container_name))) + # add VLAN_ID information + if self._get_data_entry('inventory/{0}/vlan_ids'.format(container_name)): + self.inventory.set_variable(container_name, 'ansible_lxd_vlan_ids', self._get_data_entry('inventory/{0}/vlan_ids'.format(container_name))) + + def build_inventory_groups_location(self, group_name): + """create group by attribute: location + + Args: + str(group_name): Group name + Kwargs: + None + Raises: + None + Returns: + None""" + # maybe we just want to expand one group + if group_name not in self.inventory.groups: + self.inventory.add_group(group_name) + + for container_name in self.inventory.hosts: + if 'ansible_lxd_location' in self.inventory.get_host(container_name).get_vars(): + self.inventory.add_child(group_name, container_name) + + def build_inventory_groups_pattern(self, group_name): + """create group by name pattern + + Args: + str(group_name): Group name + Kwargs: + None + Raises: + None + Returns: + None""" + # maybe we just want to expand one group + if group_name not in self.inventory.groups: + self.inventory.add_group(group_name) + + regex_pattern = self.groupby[group_name].get('attribute') + + for container_name in self.inventory.hosts: + result = re.search(regex_pattern, container_name) + if result: + self.inventory.add_child(group_name, container_name) + + def build_inventory_groups_network_range(self, group_name): + """check if IP is in network-class + + Args: + str(group_name): Group name + Kwargs: + None + Raises: + None + Returns: + None""" + # maybe we just want to expand one group + if group_name not in self.inventory.groups: + self.inventory.add_group(group_name) + + try: + network = ipaddress.ip_network(to_text(self.groupby[group_name].get('attribute'))) + except ValueError as err: + raise AnsibleParserError( + 'Error while parsing network range {0}: {1}'.format(self.groupby[group_name].get('attribute'), to_native(err))) + + for container_name in self.inventory.hosts: + if self.data['inventory'][container_name].get('network_interfaces') is not None: + for interface in self.data['inventory'][container_name].get('network_interfaces'): + for interface_family in self.data['inventory'][container_name].get('network_interfaces')[interface]: + try: + address = ipaddress.ip_address(to_text(interface_family['address'])) + if address.version == network.version and address in network: + self.inventory.add_child(group_name, container_name) + except ValueError: + # Ignore invalid IP addresses returned by lxd + pass + + def build_inventory_groups_os(self, group_name): + """create group by attribute: os + + Args: + str(group_name): Group name + Kwargs: + Noneself.data['inventory'][container_name][interface] + Raises: + None + Returns: + None""" + # maybe we just want to expand one group + if group_name not in self.inventory.groups: + self.inventory.add_group(group_name) + + gen_containers = [ + container_name for container_name in self.inventory.hosts + if 'ansible_lxd_os' in self.inventory.get_host(container_name).get_vars()] + for container_name in gen_containers: + if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(container_name).get_vars().get('ansible_lxd_os'): + self.inventory.add_child(group_name, container_name) + + def build_inventory_groups_release(self, group_name): + """create group by attribute: release + + Args: + str(group_name): Group name + Kwargs: + None + Raises: + None + Returns: + None""" + # maybe we just want to expand one group + if group_name not in self.inventory.groups: + self.inventory.add_group(group_name) + + gen_containers = [ + container_name for container_name in self.inventory.hosts + if 'ansible_lxd_release' in self.inventory.get_host(container_name).get_vars()] + for container_name in gen_containers: + if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(container_name).get_vars().get('ansible_lxd_release'): + self.inventory.add_child(group_name, container_name) + + def build_inventory_groups_profile(self, group_name): + """create group by attribute: profile + + Args: + str(group_name): Group name + Kwargs: + None + Raises: + None + Returns: + None""" + # maybe we just want to expand one group + if group_name not in self.inventory.groups: + self.inventory.add_group(group_name) + + gen_containers = [ + container_name for container_name in self.inventory.hosts.keys() + if 'ansible_lxd_profile' in self.inventory.get_host(container_name).get_vars().keys()] + for container_name in gen_containers: + if self.groupby[group_name].get('attribute').lower() in self.inventory.get_host(container_name).get_vars().get('ansible_lxd_profile'): + self.inventory.add_child(group_name, container_name) + + def build_inventory_groups_vlanid(self, group_name): + """create group by attribute: vlanid + + Args: + str(group_name): Group name + Kwargs: + None + Raises: + None + Returns: + None""" + # maybe we just want to expand one group + if group_name not in self.inventory.groups: + self.inventory.add_group(group_name) + + gen_containers = [ + container_name for container_name in self.inventory.hosts.keys() + if 'ansible_lxd_vlan_ids' in self.inventory.get_host(container_name).get_vars().keys()] + for container_name in gen_containers: + if self.groupby[group_name].get('attribute') in self.inventory.get_host(container_name).get_vars().get('ansible_lxd_vlan_ids').values(): + self.inventory.add_child(group_name, container_name) + + def build_inventory_groups(self): + """Build group-part dynamic inventory + + Build the group-part of the dynamic inventory. + Add groups to the inventory. + + Args: + None + Kwargs: + None + Raises: + None + Returns: + None""" + + def group_type(group_name): + """create groups defined by lxd.yml or defaultvalues + + create groups defined by lxd.yml or defaultvalues + supportetd: + * 'location' + * 'pattern' + * 'network_range' + * 'os' + * 'release' + * 'profile' + * 'vlanid' + + Args: + str(group_name): Group name + Kwargs: + None + Raises: + None + Returns: + None""" + + # Due to the compatibility with python 2 no use of map + if self.groupby[group_name].get('type') == 'location': + self.build_inventory_groups_location(group_name) + elif self.groupby[group_name].get('type') == 'pattern': + self.build_inventory_groups_pattern(group_name) + elif self.groupby[group_name].get('type') == 'network_range': + self.build_inventory_groups_network_range(group_name) + elif self.groupby[group_name].get('type') == 'os': + self.build_inventory_groups_os(group_name) + elif self.groupby[group_name].get('type') == 'release': + self.build_inventory_groups_release(group_name) + elif self.groupby[group_name].get('type') == 'profile': + self.build_inventory_groups_profile(group_name) + elif self.groupby[group_name].get('type') == 'vlanid': + self.build_inventory_groups_vlanid(group_name) + else: + raise AnsibleParserError('Unknown group type: {0}'.format(to_native(group_name))) + + if self.groupby: + for group_name in self.groupby: + if not group_name.isalnum(): + raise AnsibleParserError('Invalid character(s) in groupname: {0}'.format(to_native(group_name))) + group_type(group_name) + + def build_inventory(self): + """Build dynamic inventory + + Build the dynamic inventory. + + Args: + None + Kwargs: + None + Raises: + None + Returns: + None""" + + self.build_inventory_hosts() + self.build_inventory_groups() + + def _populate(self): + """Return the hosts and groups + + Returns the processed container configurations from the lxd import + + Args: + None + Kwargs: + None + Raises: + None + Returns: + None""" + + if len(self.data) == 0: # If no data is injected by unittests open socket + self.socket = self._connect_to_socket() + self.get_container_data(self._get_containers()) + self.get_network_data(self._get_networks()) + + self.extract_information_from_container_configs() + + # self.display.vvv(self.save_json_data([os.path.abspath(__file__)])) + + self.build_inventory() + + def parse(self, inventory, loader, path, cache): + """Return dynamic inventory from source + + Returns the processed inventory from the lxd import + + Args: + str(inventory): inventory object with existing data and + the methods to add hosts/groups/variables + to inventory + str(loader): Ansible's DataLoader + str(path): path to the config + bool(cache): use or avoid caches + Kwargs: + None + Raises: + AnsibleParserError + Returns: + None""" + + super(InventoryModule, self).parse(inventory, loader, path, cache=False) + # Read the inventory YAML file + self._read_config_data(path) + try: + self.client_key = self.get_option('client_key') + self.client_cert = self.get_option('client_cert') + self.debug = self.DEBUG + self.data = {} # store for inventory-data + self.groupby = self.get_option('groupby') + self.plugin = self.get_option('plugin') + self.prefered_container_network_family = self.get_option('prefered_container_network_family') + self.prefered_container_network_interface = self.get_option('prefered_container_network_interface') + if self.get_option('state').lower() == 'none': # none in config is str() + self.filter = None + else: + self.filter = self.get_option('state').lower() + self.trust_password = self.get_option('trust_password') + self.url = self.get_option('url') + except Exception as err: + raise AnsibleParserError( + 'All correct options required: {0}'.format(to_native(err))) + # Call our internal helper to populate the dynamic inventory + self._populate() diff --git a/tests/unit/plugins/inventory/fixtures/lxd_inventory.atd b/tests/unit/plugins/inventory/fixtures/lxd_inventory.atd new file mode 100644 index 0000000000..b308243228 --- /dev/null +++ b/tests/unit/plugins/inventory/fixtures/lxd_inventory.atd @@ -0,0 +1,174 @@ +{ + "containers":{ + "vlantest":{ + "containers":{ + "metadata":{ + "config":{ + "image.os":"ubuntu", + "image.release":"focal", + "image.version":"20.04", + "volatile.last_state.power":"RUNNING" + }, + "devices":{ + "eth0":{ + "name":"eth0", + "network":"my-macvlan", + "type":"nic" + } + }, + "profiles":[ + "default" + ], + "expanded_devices":{ + "eth0":{ + "name":"eth0", + "network":"my-macvlan", + "type":"nic" + } + }, + "name":"vlantest", + "status":"Running", + "location":"Berlin" + } + }, + "state":{ + "metadata":{ + "status":"Running", + "network":{ + "eth0":{ + "addresses":[ + { + "family":"inet", + "address":"10.98.143.199", + "netmask":"24", + "scope":"global" + }, + { + "family":"inet6", + "address":"fd42:bd00:7b11:2167:216:3eff:fe78:2ef3", + "netmask":"64", + "scope":"global" + }, + { + "family":"inet6", + "address":"fe80::216:3eff:fed3:7af3", + "netmask":"64", + "scope":"link" + } + ] + }, + "lo":{ + "addresses":[ + { + "family":"inet", + "address":"127.0.0.1", + "netmask":"8", + "scope":"local" + }, + { + "family":"inet6", + "address":"::1", + "netmask":"128", + "scope":"local" + } + ] + } + } + } + } + } + }, + + "networks":{ + "my-macvlan":{ + "state":{ + "metadata":{ + "addresses":[ + { + "family":"inet", + "address":"192.168.178.199", + "netmask":"24", + "scope":"global" + }, + { + "family":"inet6", + "address":"fd42:bd00:7b11:2167:216:3eff:fe78:2ef3", + "netmask":"64", + "scope":"global" + }, + { + "family":"inet6", + "address":"fe80::216:3eff:fed3:7af3", + "netmask":"64", + "scope":"link" + } + ], + "vlan":{ + "lower_device":"eno1", + "vid":666 + } + } + } + }, + "lo":{ + "state":{ + "metadata":{ + "addresses":[ + { + "family":"inet", + "address":"127.0.0.1", + "netmask":"8", + "scope":"local" + }, + { + "family":"inet6", + "address":"::1", + "netmask":"128", + "scope":"local" + } + ], + "vlan":null + } + } + }, + "eno1":{ + "state":{ + "metadata":{ + "addresses":[ + { + "family":"inet", + "address":"192.168.178.126", + "netmask":"24", + "scope":"global" + }, + { + "family":"inet6", + "address":"fe80::3c0b:7da9:3cc7:9e40", + "netmask":"64", + "scope":"link" + } + ], + "vlan":null + } + } + }, + "eno1.666":{ + "state":{ + "metadata":{ + "addresses":[ + { + "family":"inet6", + "address":"fe80::de4a:3eff:fe8d:f356", + "netmask":"64", + "scope":"link" + } + ], + "vlan":{ + "lower_device":"eno1", + "vid":666 + } + } + } + } + } +} diff --git a/tests/unit/plugins/inventory/test_lxd.py b/tests/unit/plugins/inventory/test_lxd.py new file mode 100644 index 0000000000..8a98af6e71 --- /dev/null +++ b/tests/unit/plugins/inventory/test_lxd.py @@ -0,0 +1,100 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2021, Frank Dornheim +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +import os + +from ansible.errors import AnsibleError +from ansible.inventory.data import InventoryData +from ansible_collections.community.general.plugins.inventory.lxd import InventoryModule + + +HOST_COMPARATIVE_DATA = { + 'ansible_connection': 'ssh', 'ansible_host': '10.98.143.199', 'ansible_lxd_os': 'ubuntu', 'ansible_lxd_release': 'focal', + 'ansible_lxd_profile': ['default'], 'ansible_lxd_state': 'running', 'ansible_lxd_location': 'Berlin', + 'ansible_lxd_vlan_ids': {'my-macvlan': 666}, 'inventory_hostname': 'vlantest', 'inventory_hostname_short': 'vlantest'} +GROUP_COMPARATIVE_DATA = { + 'all': [], 'ungrouped': [], 'testpattern': ['vlantest'], 'vlan666': ['vlantest'], 'locationBerlin': ['vlantest'], + 'osUbuntu': ['vlantest'], 'releaseFocal': ['vlantest'], 'releaseBionic': [], 'profileDefault': ['vlantest'], + 'profileX11': [], 'netRangeIPv4': ['vlantest'], 'netRangeIPv6': ['vlantest']} +GROUP_Config = { + 'testpattern': {'type': 'pattern', 'attribute': 'test'}, + 'vlan666': {'type': 'vlanid', 'attribute': 666}, + 'locationBerlin': {'type': 'location', 'attribute': 'Berlin'}, + 'osUbuntu': {'type': 'os', 'attribute': 'ubuntu'}, + 'releaseFocal': {'type': 'release', 'attribute': 'focal'}, + 'releaseBionic': {'type': 'release', 'attribute': 'bionic'}, + 'profileDefault': {'type': 'profile', 'attribute': 'default'}, + 'profileX11': {'type': 'profile', 'attribute': 'x11'}, + 'netRangeIPv4': {'type': 'network_range', 'attribute': '10.98.143.0/24'}, + 'netRangeIPv6': {'type': 'network_range', 'attribute': 'fd42:bd00:7b11:2167:216:3eff::/96'}} + + +@pytest.fixture +def inventory(): + inv = InventoryModule() + inv.inventory = InventoryData() + + # Test Values + inv.data = inv.load_json_data('tests/unit/plugins/inventory/fixtures/lxd_inventory.atd') # Load Test Data + inv.groupby = GROUP_Config + inv.prefered_container_network_interface = 'eth' + inv.prefered_container_network_family = 'inet' + inv.filter = 'running' + inv.dump_data = False + + return inv + + +def test_verify_file_bad_config(inventory): + assert inventory.verify_file('foobar.lxd.yml') is False + + +def test_build_inventory_hosts(inventory): + """Load example data and start the inventoryto test the host generation. + + After the inventory plugin has run with the test data, the result of the host is checked.""" + inventory._populate() + generated_data = inventory.inventory.get_host('vlantest').get_vars() + + eq = True + for key, value in HOST_COMPARATIVE_DATA.items(): + if generated_data[key] != value: + eq = False + assert eq + + +def test_build_inventory_groups(inventory): + """Load example data and start the inventory to test the group generation. + + After the inventory plugin has run with the test data, the result of the host is checked.""" + inventory._populate() + generated_data = inventory.inventory.get_groups_dict() + + eq = True + for key, value in GROUP_COMPARATIVE_DATA.items(): + if generated_data[key] != value: + eq = False + assert eq + + +def test_build_inventory_groups_with_no_groupselection(inventory): + """Load example data and start the inventory to test the group generation with groupby is none. + + After the inventory plugin has run with the test data, the result of the host is checked.""" + inventory.groupby = None + inventory._populate() + generated_data = inventory.inventory.get_groups_dict() + group_comparative_data = {'all': [], 'ungrouped': []} + + eq = True + print("data: {0}".format(generated_data)) + for key, value in group_comparative_data.items(): + if generated_data[key] != value: + eq = False + assert eq From 9931cdc1e7db4fbb4b79ac6f893872ddd3ad8553 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 26 Apr 2021 18:33:30 +0200 Subject: [PATCH 0241/3093] stable-3 has been created, prepare for next 3.x.y release. --- changelogs/changelog.yaml | 2 +- changelogs/fragments/1475-xfconf-facts.yml | 4 -- ...lesystem-fix-1457-resizefs-idempotency.yml | 5 -- ...n_projects_not_initialized_has_changed.yml | 3 - .../1661-gitlab-deploy-key-update-pubkey.yml | 5 -- ...d-id-props-to-redfish-inventory-output.yml | 2 - .../fragments/1695-parted-updatedregex.yaml | 4 -- changelogs/fragments/1702_homebrew_tap.yml | 2 - .../1703-sensu_silence-fix_json_parsing.yml | 2 - .../1714-gitlab_runner-required-reg-token.yml | 2 - .../1715-proxmox_kvm-add-vmid-to-returns.yml | 2 - ...721-fix-nomad_job_info-no-jobs-failure.yml | 2 - changelogs/fragments/1722_timezone.yml | 2 - ...adog_monitor-add-missing-monitor-types.yml | 2 - ...ixes-for-updating-existing-gitlab-user.yml | 2 - changelogs/fragments/1735-imc-sessions.yml | 2 - .../fragments/1740-aerospike_migration.yml | 2 - .../fragments/1741-use-path-argspec.yml | 4 -- ...ase-insensitive-hostname-fqdn-matching.yml | 2 - ...document-fstypes-supported-by-resizefs.yml | 3 - .../1761-redfish-tidy-up-validation.yml | 2 - changelogs/fragments/1765-proxmox-params.yml | 2 - .../fragments/1766-zfs-fixed-sanity.yml | 2 - .../1771-centurylink-validation-elements.yml | 2 - .../fragments/1776-git_config-tilde_value.yml | 2 - .../1783-proxmox-kvm-fix-args-500-error.yaml | 3 - ...ease-nios_host_record-dns-bypass-check.yml | 3 - .../fragments/1795-list-elements-batch1.yml | 27 -------- .../1813-lxd_profile-merge-profiles.yml | 2 - ...4-dnsimple-add-support-for-caa-records.yml | 2 - .../1819-tidyup-pylint-blacklistnames.yml | 17 ----- .../1830-valmod_docmissingtype_batch1.yml | 7 -- .../1833-zfs-creation-only-properties.yaml | 2 - .../1838-runit-deprecate-param-dist.yml | 2 - .../fragments/1847-proxmox-kvm-fix-status.yml | 2 - ...er-fix-state-is-clean-without-release.yaml | 2 - changelogs/fragments/1861-python3-keys.yml | 22 ------- .../1867-modhelper-cmdmixin-dict-params.yml | 2 - .../fragments/1871-infoblox-inventory.yml | 2 - .../fragments/1880-fix_cobbler_system_ssl.yml | 2 - ...nmcli-ensure-slave-type-for-bond-slave.yml | 2 - .../1885-sanity-check-fixes-batch3.yml | 18 ----- ...894-feat-nmcli-add-method4-and-method6.yml | 2 - .../1895-proxmox-kvm-fix-issue-1875.yml | 3 - ...m_versionlock-lock_unlock_concurrently.yml | 3 - .../1914-add-sanitization-to-url.yml | 3 - .../1916-add-version-sort-filter.yml | 3 - .../1927-removed-parameter-invalid.yml | 12 ---- .../fragments/1928-bigpanda-message.yml | 2 - changelogs/fragments/1929-grove-message.yml | 4 -- .../fragments/1949-proxmox-inventory-tags.yml | 5 -- changelogs/fragments/1970-valmod-batch7.yml | 18 ----- .../1972-ini_file-empty-str-value.yml | 2 - .../1977-jenkinsjob-validate-certs.yml | 2 - .../fragments/1978-jira-transition-logic.yml | 4 -- ...proxmox-inventory-fix-template-in-pool.yml | 3 - .../fragments/1993-haproxy-fix-draining.yml | 3 - .../fragments/1999-proxmox-fix-issue-1955.yml | 3 - .../2000-proxmox_kvm-tag-support.yml | 3 - changelogs/fragments/2001-no_log-false.yml | 2 - changelogs/fragments/2006-valmod-batch8.yml | 4 -- ...te-java-cert-replace-cert-when-changed.yml | 7 -- .../2013-proxmox-purge-parameter.yml | 3 - .../2014-allow-root-for-kibana-plugin.yaml | 2 - .../2020-remove-unused-param-in-rax.yml | 2 - .../fragments/2024-module-helper-fixes.yml | 4 -- ...ish-session-create-delete-authenticate.yml | 2 - .../2031-ipa_sudorule_add_runasextusers.yml | 3 - changelogs/fragments/2032-one_image-pyone.yml | 2 - .../fragments/2036-scaleway-inventory.yml | 3 - .../fragments/2037-add-from-csv-filter.yml | 7 -- ...index-error-in-redfish-set-manager-nic.yml | 2 - changelogs/fragments/2057-nios-devel.yml | 2 - .../fragments/2061-archive-refactor1.yml | 2 - .../fragments/2065-snmp-facts-timeout.yml | 2 - .../2072-stacki-host-params-fallback.yml | 2 - ...t-PATH-env-variable-in-zypper-modules.yaml | 2 - .../fragments/2110-vdo-add_force_option.yaml | 3 - .../2116-add-fields-to-ipa-config-module.yml | 2 - .../fragments/2125-git-config-scope-file.yml | 2 - .../2135-vmadm-resolvers-type-fix.yml | 2 - .../2139-dimensiondata_network-str-format.yml | 2 - .../2142-apache2_mod_proxy-cleanup.yml | 2 - ...143-kibana_plugin-fixed-function-calls.yml | 2 - .../fragments/2144-atomic_get_bin_path.yml | 4 -- .../2146-npm-add_no_bin_links_option.yaml | 3 - ...148-proxmox-inventory-agent-interfaces.yml | 3 - .../fragments/2157-unreachable-code.yml | 4 -- ...pa-user-sshpubkey-multi-word-comments.yaml | 2 - changelogs/fragments/2160-list-literals.yml | 11 ---- .../fragments/2161-pkgutil-list-extend.yml | 2 - .../fragments/2162-modhelper-variables.yml | 2 - .../fragments/2162-proxmox-constructable.yml | 3 - ...ystore_1667_improve_temp_files_storage.yml | 5 -- ...2174-ipa-user-userauthtype-multiselect.yml | 2 - ...re_1668_dont_expose_secrets_on_cmdline.yml | 4 -- ...3-java_keystore_improve_error_handling.yml | 6 -- .../2185-xfconf-absent-check-mode.yml | 2 - .../2188-xfconf-modhelper-variables.yml | 3 - changelogs/fragments/2192-add-jira-attach.yml | 2 - .../2203-modhelper-cause-changes-deco.yml | 2 - .../2204-github_repo-fix-baseurl_port.yml | 2 - changelogs/fragments/2208-jira-revamp.yml | 2 - changelogs/fragments/2218-cpanm-revamp.yml | 5 -- .../fragments/2220_nmcli_wifi_support.yaml | 3 - .../2223_nmcli_no_IP_config_on_slave.yaml | 3 - .../2224_nmcli_allow_MAC_overwrite.yaml | 3 - ..._keystore-1669-ssl-input-files-by-path.yml | 6 -- changelogs/fragments/2236-jira-isinstance.yml | 2 - changelogs/fragments/2244-hashids-filters.yml | 6 -- ...2245-proxmox_fix_agent_string_handling.yml | 3 - changelogs/fragments/2246-terraform.yaml | 4 -- ...9-linode_v4-support-private_ip-option.yaml | 2 - ...eycloak-modules-to-take-token-as-param.yml | 5 -- .../fragments/2257-ldap_entry-params.yml | 2 - ...2259-proxmox-multi-nic-and-unsupported.yml | 5 -- .../2262-java_keystore-passphrase.yml | 8 --- ...vol_size_addition-subtraction_support.yaml | 5 -- .../fragments/2268-validation-univetion.yml | 4 -- .../2280-pids-new-pattern-option.yml | 3 - .../fragments/2282-nmap-fix-cache-support.yml | 2 - ...-influxdb_retention_policy-idempotence.yml | 4 -- ...-terraform-add-plugin_paths-parameter.yaml | 3 - .../2329-hiera-lookup-plugin-return-type.yaml | 2 - .../fragments/2340-jenkins_plugin-py2.yml | 2 - .../fragments/2349-jira-bugfix-b64decode.yml | 2 - ...620-consul_io-env-variables-conf-based.yml | 5 -- .../fragments/719-manageiq-resource_id.yml | 2 - .../fragments/720-cloudforms_inventory.yml | 2 - ...-invocate-feature-when-variable-is-set.yml | 2 - .../948-dellemc-migration-removal.yml | 13 ---- .../fragments/CVE-2021-20191_no_log.yml | 4 -- changelogs/fragments/allow_funcd_to_load.yml | 2 - changelogs/fragments/dict-filter.yml | 3 - .../fragments/meta-runtime-deprecations.yml | 2 - changelogs/fragments/no_log-fixes.yml | 25 ------- .../fragments/path_join-shim-filter.yml | 3 - .../fragments/remove-deprecated-features.yml | 16 ----- .../fragments/remove-deprecated-modules.yml | 66 ------------------- changelogs/fragments/selective-core-2.11.yml | 2 - galaxy.yml | 2 +- 141 files changed, 2 insertions(+), 619 deletions(-) delete mode 100644 changelogs/fragments/1475-xfconf-facts.yml delete mode 100644 changelogs/fragments/1478-filesystem-fix-1457-resizefs-idempotency.yml delete mode 100644 changelogs/fragments/1596-xfs_quota-feedback_on_projects_not_initialized_has_changed.yml delete mode 100644 changelogs/fragments/1661-gitlab-deploy-key-update-pubkey.yml delete mode 100644 changelogs/fragments/1691-add-name-and-id-props-to-redfish-inventory-output.yml delete mode 100644 changelogs/fragments/1695-parted-updatedregex.yaml delete mode 100644 changelogs/fragments/1702_homebrew_tap.yml delete mode 100644 changelogs/fragments/1703-sensu_silence-fix_json_parsing.yml delete mode 100644 changelogs/fragments/1714-gitlab_runner-required-reg-token.yml delete mode 100644 changelogs/fragments/1715-proxmox_kvm-add-vmid-to-returns.yml delete mode 100644 changelogs/fragments/1721-fix-nomad_job_info-no-jobs-failure.yml delete mode 100644 changelogs/fragments/1722_timezone.yml delete mode 100644 changelogs/fragments/1723-datadog_monitor-add-missing-monitor-types.yml delete mode 100644 changelogs/fragments/1724-various-fixes-for-updating-existing-gitlab-user.yml delete mode 100644 changelogs/fragments/1735-imc-sessions.yml delete mode 100644 changelogs/fragments/1740-aerospike_migration.yml delete mode 100644 changelogs/fragments/1741-use-path-argspec.yml delete mode 100644 changelogs/fragments/1744-case-insensitive-hostname-fqdn-matching.yml delete mode 100644 changelogs/fragments/1753-document-fstypes-supported-by-resizefs.yml delete mode 100644 changelogs/fragments/1761-redfish-tidy-up-validation.yml delete mode 100644 changelogs/fragments/1765-proxmox-params.yml delete mode 100644 changelogs/fragments/1766-zfs-fixed-sanity.yml delete mode 100644 changelogs/fragments/1771-centurylink-validation-elements.yml delete mode 100644 changelogs/fragments/1776-git_config-tilde_value.yml delete mode 100644 changelogs/fragments/1783-proxmox-kvm-fix-args-500-error.yaml delete mode 100644 changelogs/fragments/1788-ease-nios_host_record-dns-bypass-check.yml delete mode 100644 changelogs/fragments/1795-list-elements-batch1.yml delete mode 100644 changelogs/fragments/1813-lxd_profile-merge-profiles.yml delete mode 100644 changelogs/fragments/1814-dnsimple-add-support-for-caa-records.yml delete mode 100644 changelogs/fragments/1819-tidyup-pylint-blacklistnames.yml delete mode 100644 changelogs/fragments/1830-valmod_docmissingtype_batch1.yml delete mode 100644 changelogs/fragments/1833-zfs-creation-only-properties.yaml delete mode 100644 changelogs/fragments/1838-runit-deprecate-param-dist.yml delete mode 100644 changelogs/fragments/1847-proxmox-kvm-fix-status.yml delete mode 100644 changelogs/fragments/1852-deploy-helper-fix-state-is-clean-without-release.yaml delete mode 100644 changelogs/fragments/1861-python3-keys.yml delete mode 100644 changelogs/fragments/1867-modhelper-cmdmixin-dict-params.yml delete mode 100644 changelogs/fragments/1871-infoblox-inventory.yml delete mode 100644 changelogs/fragments/1880-fix_cobbler_system_ssl.yml delete mode 100644 changelogs/fragments/1882-fix-nmcli-ensure-slave-type-for-bond-slave.yml delete mode 100644 changelogs/fragments/1885-sanity-check-fixes-batch3.yml delete mode 100644 changelogs/fragments/1894-feat-nmcli-add-method4-and-method6.yml delete mode 100644 changelogs/fragments/1895-proxmox-kvm-fix-issue-1875.yml delete mode 100644 changelogs/fragments/1912-yum_versionlock-lock_unlock_concurrently.yml delete mode 100644 changelogs/fragments/1914-add-sanitization-to-url.yml delete mode 100644 changelogs/fragments/1916-add-version-sort-filter.yml delete mode 100644 changelogs/fragments/1927-removed-parameter-invalid.yml delete mode 100644 changelogs/fragments/1928-bigpanda-message.yml delete mode 100644 changelogs/fragments/1929-grove-message.yml delete mode 100644 changelogs/fragments/1949-proxmox-inventory-tags.yml delete mode 100644 changelogs/fragments/1970-valmod-batch7.yml delete mode 100644 changelogs/fragments/1972-ini_file-empty-str-value.yml delete mode 100644 changelogs/fragments/1977-jenkinsjob-validate-certs.yml delete mode 100644 changelogs/fragments/1978-jira-transition-logic.yml delete mode 100644 changelogs/fragments/1991-proxmox-inventory-fix-template-in-pool.yml delete mode 100644 changelogs/fragments/1993-haproxy-fix-draining.yml delete mode 100644 changelogs/fragments/1999-proxmox-fix-issue-1955.yml delete mode 100644 changelogs/fragments/2000-proxmox_kvm-tag-support.yml delete mode 100644 changelogs/fragments/2001-no_log-false.yml delete mode 100644 changelogs/fragments/2006-valmod-batch8.yml delete mode 100644 changelogs/fragments/2008-update-java-cert-replace-cert-when-changed.yml delete mode 100644 changelogs/fragments/2013-proxmox-purge-parameter.yml delete mode 100644 changelogs/fragments/2014-allow-root-for-kibana-plugin.yaml delete mode 100644 changelogs/fragments/2020-remove-unused-param-in-rax.yml delete mode 100644 changelogs/fragments/2024-module-helper-fixes.yml delete mode 100644 changelogs/fragments/2027-add-redfish-session-create-delete-authenticate.yml delete mode 100644 changelogs/fragments/2031-ipa_sudorule_add_runasextusers.yml delete mode 100644 changelogs/fragments/2032-one_image-pyone.yml delete mode 100644 changelogs/fragments/2036-scaleway-inventory.yml delete mode 100644 changelogs/fragments/2037-add-from-csv-filter.yml delete mode 100644 changelogs/fragments/2040-fix-index-error-in-redfish-set-manager-nic.yml delete mode 100644 changelogs/fragments/2057-nios-devel.yml delete mode 100644 changelogs/fragments/2061-archive-refactor1.yml delete mode 100644 changelogs/fragments/2065-snmp-facts-timeout.yml delete mode 100644 changelogs/fragments/2072-stacki-host-params-fallback.yml delete mode 100644 changelogs/fragments/2094-bugfix-respect-PATH-env-variable-in-zypper-modules.yaml delete mode 100644 changelogs/fragments/2110-vdo-add_force_option.yaml delete mode 100644 changelogs/fragments/2116-add-fields-to-ipa-config-module.yml delete mode 100644 changelogs/fragments/2125-git-config-scope-file.yml delete mode 100644 changelogs/fragments/2135-vmadm-resolvers-type-fix.yml delete mode 100644 changelogs/fragments/2139-dimensiondata_network-str-format.yml delete mode 100644 changelogs/fragments/2142-apache2_mod_proxy-cleanup.yml delete mode 100644 changelogs/fragments/2143-kibana_plugin-fixed-function-calls.yml delete mode 100644 changelogs/fragments/2144-atomic_get_bin_path.yml delete mode 100644 changelogs/fragments/2146-npm-add_no_bin_links_option.yaml delete mode 100644 changelogs/fragments/2148-proxmox-inventory-agent-interfaces.yml delete mode 100644 changelogs/fragments/2157-unreachable-code.yml delete mode 100644 changelogs/fragments/2159-ipa-user-sshpubkey-multi-word-comments.yaml delete mode 100644 changelogs/fragments/2160-list-literals.yml delete mode 100644 changelogs/fragments/2161-pkgutil-list-extend.yml delete mode 100644 changelogs/fragments/2162-modhelper-variables.yml delete mode 100644 changelogs/fragments/2162-proxmox-constructable.yml delete mode 100644 changelogs/fragments/2163-java_keystore_1667_improve_temp_files_storage.yml delete mode 100644 changelogs/fragments/2174-ipa-user-userauthtype-multiselect.yml delete mode 100644 changelogs/fragments/2177-java_keystore_1668_dont_expose_secrets_on_cmdline.yml delete mode 100644 changelogs/fragments/2183-java_keystore_improve_error_handling.yml delete mode 100644 changelogs/fragments/2185-xfconf-absent-check-mode.yml delete mode 100644 changelogs/fragments/2188-xfconf-modhelper-variables.yml delete mode 100644 changelogs/fragments/2192-add-jira-attach.yml delete mode 100644 changelogs/fragments/2203-modhelper-cause-changes-deco.yml delete mode 100644 changelogs/fragments/2204-github_repo-fix-baseurl_port.yml delete mode 100644 changelogs/fragments/2208-jira-revamp.yml delete mode 100644 changelogs/fragments/2218-cpanm-revamp.yml delete mode 100644 changelogs/fragments/2220_nmcli_wifi_support.yaml delete mode 100644 changelogs/fragments/2223_nmcli_no_IP_config_on_slave.yaml delete mode 100644 changelogs/fragments/2224_nmcli_allow_MAC_overwrite.yaml delete mode 100644 changelogs/fragments/2230-java_keystore-1669-ssl-input-files-by-path.yml delete mode 100644 changelogs/fragments/2236-jira-isinstance.yml delete mode 100644 changelogs/fragments/2244-hashids-filters.yml delete mode 100644 changelogs/fragments/2245-proxmox_fix_agent_string_handling.yml delete mode 100644 changelogs/fragments/2246-terraform.yaml delete mode 100644 changelogs/fragments/2249-linode_v4-support-private_ip-option.yaml delete mode 100644 changelogs/fragments/2250-allow-keycloak-modules-to-take-token-as-param.yml delete mode 100644 changelogs/fragments/2257-ldap_entry-params.yml delete mode 100644 changelogs/fragments/2259-proxmox-multi-nic-and-unsupported.yml delete mode 100644 changelogs/fragments/2262-java_keystore-passphrase.yml delete mode 100644 changelogs/fragments/2267-lvol_size_addition-subtraction_support.yaml delete mode 100644 changelogs/fragments/2268-validation-univetion.yml delete mode 100644 changelogs/fragments/2280-pids-new-pattern-option.yml delete mode 100644 changelogs/fragments/2282-nmap-fix-cache-support.yml delete mode 100644 changelogs/fragments/2284-influxdb_retention_policy-idempotence.yml delete mode 100644 changelogs/fragments/2308-terraform-add-plugin_paths-parameter.yaml delete mode 100644 changelogs/fragments/2329-hiera-lookup-plugin-return-type.yaml delete mode 100644 changelogs/fragments/2340-jenkins_plugin-py2.yml delete mode 100644 changelogs/fragments/2349-jira-bugfix-b64decode.yml delete mode 100644 changelogs/fragments/620-consul_io-env-variables-conf-based.yml delete mode 100644 changelogs/fragments/719-manageiq-resource_id.yml delete mode 100644 changelogs/fragments/720-cloudforms_inventory.yml delete mode 100644 changelogs/fragments/816-only-invocate-feature-when-variable-is-set.yml delete mode 100644 changelogs/fragments/948-dellemc-migration-removal.yml delete mode 100644 changelogs/fragments/CVE-2021-20191_no_log.yml delete mode 100644 changelogs/fragments/allow_funcd_to_load.yml delete mode 100644 changelogs/fragments/dict-filter.yml delete mode 100644 changelogs/fragments/meta-runtime-deprecations.yml delete mode 100644 changelogs/fragments/no_log-fixes.yml delete mode 100644 changelogs/fragments/path_join-shim-filter.yml delete mode 100644 changelogs/fragments/remove-deprecated-features.yml delete mode 100644 changelogs/fragments/remove-deprecated-modules.yml delete mode 100644 changelogs/fragments/selective-core-2.11.yml diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml index e78468a3ca..114b6d6b29 100644 --- a/changelogs/changelog.yaml +++ b/changelogs/changelog.yaml @@ -1,2 +1,2 @@ -ancestor: 2.0.0 +ancestor: 3.0.0 releases: {} diff --git a/changelogs/fragments/1475-xfconf-facts.yml b/changelogs/fragments/1475-xfconf-facts.yml deleted file mode 100644 index cffc6f023e..0000000000 --- a/changelogs/fragments/1475-xfconf-facts.yml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: - - xfconf - added option ``disable_facts`` to disable facts and its associated deprecation warning (https://github.com/ansible-collections/community.general/issues/1475). -deprecated_features: - - xfconf - returning output as facts is deprecated, this will be removed in community.general 4.0.0. Please register the task output in a variable and use it instead. You can already switch to the new behavior now by using the new ``disable_facts`` option (https://github.com/ansible-collections/community.general/pull/1747). diff --git a/changelogs/fragments/1478-filesystem-fix-1457-resizefs-idempotency.yml b/changelogs/fragments/1478-filesystem-fix-1457-resizefs-idempotency.yml deleted file mode 100644 index a90444308e..0000000000 --- a/changelogs/fragments/1478-filesystem-fix-1457-resizefs-idempotency.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -bugfixes: - - filesystem - do not fail when ``resizefs=yes`` and ``fstype=xfs`` if there is nothing to do, even if - the filesystem is not mounted. This only covers systems supporting access to unmounted XFS filesystems. - Others will still fail (https://github.com/ansible-collections/community.general/issues/1457, https://github.com/ansible-collections/community.general/pull/1478). diff --git a/changelogs/fragments/1596-xfs_quota-feedback_on_projects_not_initialized_has_changed.yml b/changelogs/fragments/1596-xfs_quota-feedback_on_projects_not_initialized_has_changed.yml deleted file mode 100644 index ba75a86a62..0000000000 --- a/changelogs/fragments/1596-xfs_quota-feedback_on_projects_not_initialized_has_changed.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - xfs_quota - the feedback for initializing project quota using xfs_quota binary from ``xfsprogs`` has changed since the version it was written for (https://github.com/ansible-collections/community.general/pull/1596). diff --git a/changelogs/fragments/1661-gitlab-deploy-key-update-pubkey.yml b/changelogs/fragments/1661-gitlab-deploy-key-update-pubkey.yml deleted file mode 100644 index f6edfc6f53..0000000000 --- a/changelogs/fragments/1661-gitlab-deploy-key-update-pubkey.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -minor_changes: - - gitlab_deploy_key - when the given key title already exists but has a different public key, the public key will now be updated to given value (https://github.com/ansible-collections/community.general/pull/1661). -breaking_changes: - - gitlab_deploy_key - if for an already existing key title a different public key was given as parameter nothing happened, now this changed so that the public key is updated to the new value (https://github.com/ansible-collections/community.general/pull/1661). diff --git a/changelogs/fragments/1691-add-name-and-id-props-to-redfish-inventory-output.yml b/changelogs/fragments/1691-add-name-and-id-props-to-redfish-inventory-output.yml deleted file mode 100644 index 1cf8897018..0000000000 --- a/changelogs/fragments/1691-add-name-and-id-props-to-redfish-inventory-output.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - redfish_info module, redfish_utils module utils - add ``Name`` and ``Id`` properties to output of Redfish inventory commands (https://github.com/ansible-collections/community.general/issues/1650). diff --git a/changelogs/fragments/1695-parted-updatedregex.yaml b/changelogs/fragments/1695-parted-updatedregex.yaml deleted file mode 100644 index fb3a5a5eaa..0000000000 --- a/changelogs/fragments/1695-parted-updatedregex.yaml +++ /dev/null @@ -1,4 +0,0 @@ -bugfixes: - - parted - change the regex that decodes the partition size to better support different formats that parted uses. - Change the regex that validates parted's version string - (https://github.com/ansible-collections/community.general/pull/1695). diff --git a/changelogs/fragments/1702_homebrew_tap.yml b/changelogs/fragments/1702_homebrew_tap.yml deleted file mode 100644 index 7eabc45a9b..0000000000 --- a/changelogs/fragments/1702_homebrew_tap.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- homebrew_tap - add support to specify search path for ``brew`` executable (https://github.com/ansible-collections/community.general/issues/1702). diff --git a/changelogs/fragments/1703-sensu_silence-fix_json_parsing.yml b/changelogs/fragments/1703-sensu_silence-fix_json_parsing.yml deleted file mode 100644 index 18d39b5674..0000000000 --- a/changelogs/fragments/1703-sensu_silence-fix_json_parsing.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - sensu-silence module - fix json parsing of sensu API responses on Python 3.5 (https://github.com/ansible-collections/community.general/pull/1703). diff --git a/changelogs/fragments/1714-gitlab_runner-required-reg-token.yml b/changelogs/fragments/1714-gitlab_runner-required-reg-token.yml deleted file mode 100644 index ec73bf422c..0000000000 --- a/changelogs/fragments/1714-gitlab_runner-required-reg-token.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - gitlab_runner - parameter ``registration_token`` was required but is used only when ``state`` is ``present`` (https://github.com/ansible-collections/community.general/issues/1714). diff --git a/changelogs/fragments/1715-proxmox_kvm-add-vmid-to-returns.yml b/changelogs/fragments/1715-proxmox_kvm-add-vmid-to-returns.yml deleted file mode 100644 index b4561f5145..0000000000 --- a/changelogs/fragments/1715-proxmox_kvm-add-vmid-to-returns.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - proxmox_kvm module - actually implemented ``vmid`` and ``status`` return values. Updated documentation to reflect current situation (https://github.com/ansible-collections/community.general/issues/1410, https://github.com/ansible-collections/community.general/pull/1715). diff --git a/changelogs/fragments/1721-fix-nomad_job_info-no-jobs-failure.yml b/changelogs/fragments/1721-fix-nomad_job_info-no-jobs-failure.yml deleted file mode 100644 index c3c3d804e3..0000000000 --- a/changelogs/fragments/1721-fix-nomad_job_info-no-jobs-failure.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - nomad_job_info - fix module failure when nomad client returns no jobs (https://github.com/ansible-collections/community.general/pull/1721). diff --git a/changelogs/fragments/1722_timezone.yml b/changelogs/fragments/1722_timezone.yml deleted file mode 100644 index cae337effd..0000000000 --- a/changelogs/fragments/1722_timezone.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- timezone - add Gentoo and Alpine Linux support (https://github.com/ansible-collections/community.general/issues/781). diff --git a/changelogs/fragments/1723-datadog_monitor-add-missing-monitor-types.yml b/changelogs/fragments/1723-datadog_monitor-add-missing-monitor-types.yml deleted file mode 100644 index 8b01717897..0000000000 --- a/changelogs/fragments/1723-datadog_monitor-add-missing-monitor-types.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - datadog_monitor - add missing monitor types ``query alert``, ``trace-analytics alert``, ``rum alert`` (https://github.com/ansible-collections/community.general/pull/1723). diff --git a/changelogs/fragments/1724-various-fixes-for-updating-existing-gitlab-user.yml b/changelogs/fragments/1724-various-fixes-for-updating-existing-gitlab-user.yml deleted file mode 100644 index eab67e0f47..0000000000 --- a/changelogs/fragments/1724-various-fixes-for-updating-existing-gitlab-user.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - gitlab_user - make updates to the ``isadmin``, ``password`` and ``confirm`` options of an already existing GitLab user work (https://github.com/ansible-collections/community.general/pull/1724). diff --git a/changelogs/fragments/1735-imc-sessions.yml b/changelogs/fragments/1735-imc-sessions.yml deleted file mode 100644 index 057393d06c..0000000000 --- a/changelogs/fragments/1735-imc-sessions.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - imc_rest - explicitly logging out instead of registering the call in ```atexit``` (https://github.com/ansible-collections/community.general/issues/1735). diff --git a/changelogs/fragments/1740-aerospike_migration.yml b/changelogs/fragments/1740-aerospike_migration.yml deleted file mode 100644 index e66963aae7..0000000000 --- a/changelogs/fragments/1740-aerospike_migration.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- "aerospike_migration - fix typo that caused ``migrate_tx_key`` instead of ``migrate_rx_key`` being used (https://github.com/ansible-collections/community.general/pull/1739)." diff --git a/changelogs/fragments/1741-use-path-argspec.yml b/changelogs/fragments/1741-use-path-argspec.yml deleted file mode 100644 index ed05fee16a..0000000000 --- a/changelogs/fragments/1741-use-path-argspec.yml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: -- "oci_vcn - ``api_user_key_file`` is now of type ``path`` and no longer ``str``. A side effect is that certain expansions are made, like ``~`` is replaced by the user's home directory, and environment variables like ``$HOME`` or ``$TEMP`` are evaluated (https://github.com/ansible-collections/community.general/pull/1741)." -- "lxd_container - ``client_key`` and ``client_cert`` are now of type ``path`` and no longer ``str``. A side effect is that certain expansions are made, like ``~`` is replaced by the user's home directory, and environment variables like ``$HOME`` or ``$TEMP`` are evaluated (https://github.com/ansible-collections/community.general/pull/1741)." -- "lxd_profile - ``client_key`` and ``client_cert`` are now of type ``path`` and no longer ``str``. A side effect is that certain expansions are made, like ``~`` is replaced by the user's home directory, and environment variables like ``$HOME`` or ``$TEMP`` are evaluated (https://github.com/ansible-collections/community.general/pull/1741)." diff --git a/changelogs/fragments/1744-case-insensitive-hostname-fqdn-matching.yml b/changelogs/fragments/1744-case-insensitive-hostname-fqdn-matching.yml deleted file mode 100644 index 0e9c086b96..0000000000 --- a/changelogs/fragments/1744-case-insensitive-hostname-fqdn-matching.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - redfish_config - case insensitive search for situations where the hostname/FQDN case on iLO doesn't match variable's case (https://github.com/ansible-collections/community.general/pull/1744). diff --git a/changelogs/fragments/1753-document-fstypes-supported-by-resizefs.yml b/changelogs/fragments/1753-document-fstypes-supported-by-resizefs.yml deleted file mode 100644 index 9b1329412c..0000000000 --- a/changelogs/fragments/1753-document-fstypes-supported-by-resizefs.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - filesystem - remove ``swap`` from list of FS supported by ``resizefs=yes`` (https://github.com/ansible-collections/community.general/issues/790). diff --git a/changelogs/fragments/1761-redfish-tidy-up-validation.yml b/changelogs/fragments/1761-redfish-tidy-up-validation.yml deleted file mode 100644 index 751c7ca30d..0000000000 --- a/changelogs/fragments/1761-redfish-tidy-up-validation.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - redfish modules - explicitly setting lists' elements to ``str`` (https://github.com/ansible-collections/community.general/pull/1761). diff --git a/changelogs/fragments/1765-proxmox-params.yml b/changelogs/fragments/1765-proxmox-params.yml deleted file mode 100644 index fd6d63c788..0000000000 --- a/changelogs/fragments/1765-proxmox-params.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - proxmox* modules - refactored some parameter validation code into use of ``env_fallback``, ``required_if``, ``required_together``, ``required_one_of`` (https://github.com/ansible-collections/community.general/pull/1765). diff --git a/changelogs/fragments/1766-zfs-fixed-sanity.yml b/changelogs/fragments/1766-zfs-fixed-sanity.yml deleted file mode 100644 index ac31084e2c..0000000000 --- a/changelogs/fragments/1766-zfs-fixed-sanity.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - zfs_delegate_admin - the elements of ``users``, ``groups`` and ``permissions`` are now enforced to be strings (https://github.com/ansible-collections/community.general/pull/1766). diff --git a/changelogs/fragments/1771-centurylink-validation-elements.yml b/changelogs/fragments/1771-centurylink-validation-elements.yml deleted file mode 100644 index 4c7a9bbbe4..0000000000 --- a/changelogs/fragments/1771-centurylink-validation-elements.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - clc_* modules - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1771). diff --git a/changelogs/fragments/1776-git_config-tilde_value.yml b/changelogs/fragments/1776-git_config-tilde_value.yml deleted file mode 100644 index c98912a24d..0000000000 --- a/changelogs/fragments/1776-git_config-tilde_value.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - git_config - prevent ``run_command`` from expanding values (https://github.com/ansible-collections/community.general/issues/1776). diff --git a/changelogs/fragments/1783-proxmox-kvm-fix-args-500-error.yaml b/changelogs/fragments/1783-proxmox-kvm-fix-args-500-error.yaml deleted file mode 100644 index 5e46b066a8..0000000000 --- a/changelogs/fragments/1783-proxmox-kvm-fix-args-500-error.yaml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - proxmox_kvm - do not add ``args`` if ``proxmox_default_behavior`` is set to no_defaults (https://github.com/ansible-collections/community.general/issues/1641). - - proxmox_kvm - stop implicitly adding ``force`` equal to ``false``. Proxmox API requires not implemented parameters otherwise, and assumes ``force`` to be ``false`` by default anyways (https://github.com/ansible-collections/community.general/pull/1783). diff --git a/changelogs/fragments/1788-ease-nios_host_record-dns-bypass-check.yml b/changelogs/fragments/1788-ease-nios_host_record-dns-bypass-check.yml deleted file mode 100644 index 6b1a43cc25..0000000000 --- a/changelogs/fragments/1788-ease-nios_host_record-dns-bypass-check.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - nios_host_record - allow DNS Bypass for views other than default (https://github.com/ansible-collections/community.general/issues/1786). diff --git a/changelogs/fragments/1795-list-elements-batch1.yml b/changelogs/fragments/1795-list-elements-batch1.yml deleted file mode 100644 index 9b057c7712..0000000000 --- a/changelogs/fragments/1795-list-elements-batch1.yml +++ /dev/null @@ -1,27 +0,0 @@ -minor_changes: - - plugins/module_utils/oracle/oci_utils.py - elements of list parameter ``key_by`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - lxd_container - elements of list parameter ``profiles`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - packet_device - elements of list parameters ``device_ids``, ``hostnames`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - pubnub_blocks - elements of list parameters ``event_handlers`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - vmadm - elements of list parameters ``disks``, ``nics``, ``resolvers``, ``filesystems`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - sl_vm - elements of list parameters ``disks``, ``ssh_keys`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - xml - elements of list parameters ``add_children``, ``set_children`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - keycloak_client - elements of list parameters ``default_roles``, ``redirect_uris``, ``web_origins`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - onepassword_info - elements of list parameters ``search_terms`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - librato_annotation - elements of list parameters ``links`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - pagerduty - elements of list parameters ``service`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - statusio_maintenance - elements of list parameters ``components``, ``containers`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - dnsimple - elements of list parameters ``record_ids`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - nsupdate - elements of list parameters ``value`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - omapi_host - elements of list parameters ``statements`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - mail - elements of list parameters ``to``, ``cc``, ``bcc``, ``attach``, ``headers`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - nexmo - elements of list parameters ``dest`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - rocketchat - elements of list parameters ``attachments`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - sendgrid - elements of list parameters ``to_addresses``, ``cc``, ``bcc``, ``attachments`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - slack - elements of list parameters ``attachments`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - twilio - elements of list parameters ``to_numbers`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - redhat_subscription - elements of list parameters ``pool_ids``, ``addons`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - gitlab_runner - elements of list parameters ``tag_list`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). - - na_ontap_gather_facts - elements of list parameters ``gather_subset`` are now validated (https://github.com/ansible-collections/community.general/pull/1795). -bugfixes: - - redhat_subscription - ``mutually_exclusive`` was referring to parameter alias instead of name (https://github.com/ansible-collections/community.general/pull/1795). diff --git a/changelogs/fragments/1813-lxd_profile-merge-profiles.yml b/changelogs/fragments/1813-lxd_profile-merge-profiles.yml deleted file mode 100644 index d374347a5e..0000000000 --- a/changelogs/fragments/1813-lxd_profile-merge-profiles.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- lxd_profile - added ``merge_profile`` parameter to merge configurations from the play to an existing profile (https://github.com/ansible-collections/community.general/pull/1813). diff --git a/changelogs/fragments/1814-dnsimple-add-support-for-caa-records.yml b/changelogs/fragments/1814-dnsimple-add-support-for-caa-records.yml deleted file mode 100644 index bc4915b7b9..0000000000 --- a/changelogs/fragments/1814-dnsimple-add-support-for-caa-records.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - dnsimple - add CAA records to the whitelist of valid record types (https://github.com/ansible-collections/community.general/pull/1814). diff --git a/changelogs/fragments/1819-tidyup-pylint-blacklistnames.yml b/changelogs/fragments/1819-tidyup-pylint-blacklistnames.yml deleted file mode 100644 index fdbc850528..0000000000 --- a/changelogs/fragments/1819-tidyup-pylint-blacklistnames.yml +++ /dev/null @@ -1,17 +0,0 @@ -bugfixes: - - "alternatives - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." - - "beadm - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." - - "cronvar - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." - - "dconf - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." - - "filesystem - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." - - "hipchat - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." - - "interfaces_file - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." - - "java_cert - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." - - "lvg - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." - - "lvol - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." - - "lxc - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." - - "lxc_container - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." - - "parted - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." - - "rundeck_acl_policy - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." - - "statusio_maintenance - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." - - "timezone - internal refactoring: replaced uses of ``_`` with ``dummy`` (https://github.com/ansible-collections/community.general/pull/1819)." diff --git a/changelogs/fragments/1830-valmod_docmissingtype_batch1.yml b/changelogs/fragments/1830-valmod_docmissingtype_batch1.yml deleted file mode 100644 index 83a27f7e77..0000000000 --- a/changelogs/fragments/1830-valmod_docmissingtype_batch1.yml +++ /dev/null @@ -1,7 +0,0 @@ -bugfixes: - - kibana_plugin - ``state`` parameter choices must use ``list()`` in python3 (https://github.com/ansible-collections/community.general/pull/1830). - - elasticsearch_plugin - ``state`` parameter choices must use ``list()`` in python3 (https://github.com/ansible-collections/community.general/pull/1830). - - riak - parameters ``wait_for_handoffs`` and ``wait_for_ring`` are ``int`` but the default value was ``false`` (https://github.com/ansible-collections/community.general/pull/1830). - - logstash_plugin - wrapped ``dict.keys()`` with ``list`` for use in ``choices`` setting (https://github.com/ansible-collections/community.general/pull/1830). - - iso_extract - use proper alias deprecation mechanism for ``thirsty`` alias of ``force`` (https://github.com/ansible-collections/community.general/pull/1830). - - runit - removed unused code, and passing command as ``list`` instead of ``str`` to ``run_command()`` (https://github.com/ansible-collections/community.general/pull/1830). diff --git a/changelogs/fragments/1833-zfs-creation-only-properties.yaml b/changelogs/fragments/1833-zfs-creation-only-properties.yaml deleted file mode 100644 index deb972a6d2..0000000000 --- a/changelogs/fragments/1833-zfs-creation-only-properties.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - zfs - some ZFS properties could be passed when the dataset/volume did not exist, but would fail if the dataset already existed, even if the property matched what was specified in the ansible task (https://github.com/ansible-collections/community.general/issues/868, https://github.com/ansible-collections/community.general/pull/1833). diff --git a/changelogs/fragments/1838-runit-deprecate-param-dist.yml b/changelogs/fragments/1838-runit-deprecate-param-dist.yml deleted file mode 100644 index 5d133c074e..0000000000 --- a/changelogs/fragments/1838-runit-deprecate-param-dist.yml +++ /dev/null @@ -1,2 +0,0 @@ -deprecated_features: - - runit - unused parameter ``dist`` marked for deprecation (https://github.com/ansible-collections/community.general/pull/1830). diff --git a/changelogs/fragments/1847-proxmox-kvm-fix-status.yml b/changelogs/fragments/1847-proxmox-kvm-fix-status.yml deleted file mode 100644 index 0863f1bed2..0000000000 --- a/changelogs/fragments/1847-proxmox-kvm-fix-status.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - proxmox_kvm - fix undefined local variable ``status`` when the parameter ``state`` is either ``stopped``, ``started``, ``restarted`` or ``absent`` (https://github.com/ansible-collections/community.general/pull/1847). diff --git a/changelogs/fragments/1852-deploy-helper-fix-state-is-clean-without-release.yaml b/changelogs/fragments/1852-deploy-helper-fix-state-is-clean-without-release.yaml deleted file mode 100644 index 0946a4f38f..0000000000 --- a/changelogs/fragments/1852-deploy-helper-fix-state-is-clean-without-release.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - deploy_helper - allow ``state=clean`` to be used without defining a ``release`` (https://github.com/ansible-collections/community.general/issues/1852). \ No newline at end of file diff --git a/changelogs/fragments/1861-python3-keys.yml b/changelogs/fragments/1861-python3-keys.yml deleted file mode 100644 index 029ed93575..0000000000 --- a/changelogs/fragments/1861-python3-keys.yml +++ /dev/null @@ -1,22 +0,0 @@ -bugfixes: - - redis cache plugin - wrapped usages of ``keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - memcached cache plugin - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - diy callback plugin - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - selective callback plugin - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - chef_databag lookup plugin - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - net_tools.nios.api module_utils - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - utm_utils module_utils - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - lxc_container - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - lxd_container - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - oneandone_monitoring_policy - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - oci_vcn - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - spotinst_aws_elastigroup - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - sensu_check - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - redhat_subscription - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - idrac_redfish_command - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - idrac_redfish_config - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - idrac_redfish_info - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - redfish_command - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - redfish_config - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - vdo - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). - - nsot inventory script - wrapped usages of ``dict.keys()`` in ``list()`` for Python 3 compatibility (https://github.com/ansible-collections/community.general/pull/1861). diff --git a/changelogs/fragments/1867-modhelper-cmdmixin-dict-params.yml b/changelogs/fragments/1867-modhelper-cmdmixin-dict-params.yml deleted file mode 100644 index 3f757b233a..0000000000 --- a/changelogs/fragments/1867-modhelper-cmdmixin-dict-params.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - module_helper module utils - ``CmdMixin.run_command()`` now accepts ``dict`` command arguments, providing the parameter and its value (https://github.com/ansible-collections/community.general/pull/1867). diff --git a/changelogs/fragments/1871-infoblox-inventory.yml b/changelogs/fragments/1871-infoblox-inventory.yml deleted file mode 100644 index d49d176f1b..0000000000 --- a/changelogs/fragments/1871-infoblox-inventory.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- "infoblox inventory script - make sure that the script also works with Ansible 2.9, and returns a more helpful error when community.general is not installed as part of Ansible 2.10/3 (https://github.com/ansible-collections/community.general/pull/1871)." diff --git a/changelogs/fragments/1880-fix_cobbler_system_ssl.yml b/changelogs/fragments/1880-fix_cobbler_system_ssl.yml deleted file mode 100644 index 849f703130..0000000000 --- a/changelogs/fragments/1880-fix_cobbler_system_ssl.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - cobbler_sync, cobbler_system - fix SSL/TLS certificate check when ``validate_certs`` set to ``false`` (https://github.com/ansible-collections/community.general/pull/1880). diff --git a/changelogs/fragments/1882-fix-nmcli-ensure-slave-type-for-bond-slave.yml b/changelogs/fragments/1882-fix-nmcli-ensure-slave-type-for-bond-slave.yml deleted file mode 100644 index 47569b6a24..0000000000 --- a/changelogs/fragments/1882-fix-nmcli-ensure-slave-type-for-bond-slave.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - nmcli - ensure the ``slave-type`` option is passed to ``nmcli`` for type ``bond-slave`` (https://github.com/ansible-collections/community.general/pull/1882). diff --git a/changelogs/fragments/1885-sanity-check-fixes-batch3.yml b/changelogs/fragments/1885-sanity-check-fixes-batch3.yml deleted file mode 100644 index bf819a6e21..0000000000 --- a/changelogs/fragments/1885-sanity-check-fixes-batch3.yml +++ /dev/null @@ -1,18 +0,0 @@ -minor_changes: - - oneandone_firewall_policy - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). - - oneandone_load_balancer - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). - - oneandone_monitoring_policy - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). - - oneandone_private_network - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). - - oneandone_server - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). - - profitbricks - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). - - profitbricks_volume - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). - - webfaction_domain - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). - - webfaction_site - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). - - consul - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). - - consul_acl - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). - - consul_session - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). - - datadog_monitor - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). - - sensu_check - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). - - sensu_client - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). - - sensu_handler - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). - - bundler - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1885). diff --git a/changelogs/fragments/1894-feat-nmcli-add-method4-and-method6.yml b/changelogs/fragments/1894-feat-nmcli-add-method4-and-method6.yml deleted file mode 100644 index 05daac483c..0000000000 --- a/changelogs/fragments/1894-feat-nmcli-add-method4-and-method6.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - nmcli - add ``method4`` and ``method6`` options (https://github.com/ansible-collections/community.general/pull/1894). diff --git a/changelogs/fragments/1895-proxmox-kvm-fix-issue-1875.yml b/changelogs/fragments/1895-proxmox-kvm-fix-issue-1875.yml deleted file mode 100644 index 73d908cfa8..0000000000 --- a/changelogs/fragments/1895-proxmox-kvm-fix-issue-1875.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - proxmox_kvm - fix parameter ``vmid`` passed twice to ``exit_json`` while creating a virtual machine without cloning (https://github.com/ansible-collections/community.general/issues/1875, https://github.com/ansible-collections/community.general/pull/1895). diff --git a/changelogs/fragments/1912-yum_versionlock-lock_unlock_concurrently.yml b/changelogs/fragments/1912-yum_versionlock-lock_unlock_concurrently.yml deleted file mode 100644 index 36f40da0fe..0000000000 --- a/changelogs/fragments/1912-yum_versionlock-lock_unlock_concurrently.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - yum_versionlock - Do the lock/unlock concurrently to speed up (https://github.com/ansible-collections/community.general/pull/1912). diff --git a/changelogs/fragments/1914-add-sanitization-to-url.yml b/changelogs/fragments/1914-add-sanitization-to-url.yml deleted file mode 100644 index 3b41bcb7af..0000000000 --- a/changelogs/fragments/1914-add-sanitization-to-url.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - proxmox inventory - added handling of extra trailing slashes in the URL (https://github.com/ansible-collections/community.general/pull/1914). diff --git a/changelogs/fragments/1916-add-version-sort-filter.yml b/changelogs/fragments/1916-add-version-sort-filter.yml deleted file mode 100644 index a06b464e55..0000000000 --- a/changelogs/fragments/1916-add-version-sort-filter.yml +++ /dev/null @@ -1,3 +0,0 @@ -add plugin.filter: - - name: version_sort - description: Sort a list according to version order instead of pure alphabetical one diff --git a/changelogs/fragments/1927-removed-parameter-invalid.yml b/changelogs/fragments/1927-removed-parameter-invalid.yml deleted file mode 100644 index 6dbc2e187b..0000000000 --- a/changelogs/fragments/1927-removed-parameter-invalid.yml +++ /dev/null @@ -1,12 +0,0 @@ -deprecated_features: - - composer - deprecated invalid parameter aliases ``working-dir``, ``global-command``, ``prefer-source``, ``prefer-dist``, ``no-dev``, ``no-scripts``, ``no-plugins``, ``optimize-autoloader``, ``classmap-authoritative``, ``apcu-autoloader``, ``ignore-platform-reqs``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927). - - apt_rpm - deprecated invalid parameter alias ``update-cache``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927). - - homebrew - deprecated invalid parameter alias ``update-brew``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927). - - homebrew_cask - deprecated invalid parameter alias ``update-brew``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927). - - opkg - deprecated invalid parameter alias ``update-cache``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927). - - pacman - deprecated invalid parameter alias ``update-cache``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927). - - slackpkg - deprecated invalid parameter alias ``update-cache``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927). - - urmpi - deprecated invalid parameter aliases ``update-cache`` and ``no-recommends``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927). - - xbps - deprecated invalid parameter alias ``update-cache``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927). - - github_deploy_key - deprecated invalid parameter alias ``2fa_token``, will be removed in 5.0.0 (https://github.com/ansible-collections/community.general/pull/1927). - - puppet - deprecated undocumented parameter ``show_diff``, will be removed in 7.0.0. (https://github.com/ansible-collections/community.general/pull/1927). diff --git a/changelogs/fragments/1928-bigpanda-message.yml b/changelogs/fragments/1928-bigpanda-message.yml deleted file mode 100644 index 081b51cc0f..0000000000 --- a/changelogs/fragments/1928-bigpanda-message.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- "bigpanda - actually use the ``deployment_message`` option (https://github.com/ansible-collections/community.general/pull/1928)." diff --git a/changelogs/fragments/1929-grove-message.yml b/changelogs/fragments/1929-grove-message.yml deleted file mode 100644 index 402aa24639..0000000000 --- a/changelogs/fragments/1929-grove-message.yml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: -- "grove - the option ``message`` has been renamed to ``message_content``. The old name ``message`` is kept as an alias and will be removed for community.general 4.0.0. This was done because ``message`` is used internally by Ansible (https://github.com/ansible-collections/community.general/pull/1929)." -deprecated_features: -- "grove - the option ``message`` will be removed in community.general 4.0.0. Use the new option ``message_content`` instead (https://github.com/ansible-collections/community.general/pull/1929)." diff --git a/changelogs/fragments/1949-proxmox-inventory-tags.yml b/changelogs/fragments/1949-proxmox-inventory-tags.yml deleted file mode 100644 index 073428c2e6..0000000000 --- a/changelogs/fragments/1949-proxmox-inventory-tags.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -bugfixes: -- proxmox inventory plugin - allowed proxomox tag string to contain commas when returned as fact (https://github.com/ansible-collections/community.general/pull/1949). -minor_changes: -- proxmox inventory plugin - added ``tags_parsed`` fact containing tags parsed as a list (https://github.com/ansible-collections/community.general/pull/1949). diff --git a/changelogs/fragments/1970-valmod-batch7.yml b/changelogs/fragments/1970-valmod-batch7.yml deleted file mode 100644 index cd577d4578..0000000000 --- a/changelogs/fragments/1970-valmod-batch7.yml +++ /dev/null @@ -1,18 +0,0 @@ -minor_changes: - - heroku_collaborator - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). - - linode_v4 - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). - - one_host - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). - - one_image_info - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). - - one_vm - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). - - scaleway_compute - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). - - scaleway_lb - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). - - manageiq_alert_profiles - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). - - manageiq_policies - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). - - manageiq_tags - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). - - oneview_datacenter_info - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). - - oneview_enclosure_info - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). - - oneview_ethernet_network_info - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). - - oneview_network_set_info - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/1970). -bugfixes: - - manageiq_provider - wrapped ``dict.keys()`` with ``list`` for use in ``choices`` setting (https://github.com/ansible-collections/community.general/pull/1970). - - packet_volume_attachment - removed extraneous ``print`` call - old debug? (https://github.com/ansible-collections/community.general/pull/1970). diff --git a/changelogs/fragments/1972-ini_file-empty-str-value.yml b/changelogs/fragments/1972-ini_file-empty-str-value.yml deleted file mode 100644 index 7beba5ac4c..0000000000 --- a/changelogs/fragments/1972-ini_file-empty-str-value.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - ini_file - allows an empty string as a value for an option (https://github.com/ansible-collections/community.general/pull/1972). diff --git a/changelogs/fragments/1977-jenkinsjob-validate-certs.yml b/changelogs/fragments/1977-jenkinsjob-validate-certs.yml deleted file mode 100644 index b4f7b2f938..0000000000 --- a/changelogs/fragments/1977-jenkinsjob-validate-certs.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - jenkins_job - add a ``validate_certs`` parameter that allows disabling TLS/SSL certificate validation (https://github.com/ansible-collections/community.general/issues/255). diff --git a/changelogs/fragments/1978-jira-transition-logic.yml b/changelogs/fragments/1978-jira-transition-logic.yml deleted file mode 100644 index 12b4adc56d..0000000000 --- a/changelogs/fragments/1978-jira-transition-logic.yml +++ /dev/null @@ -1,4 +0,0 @@ -bugfixes: - - jira - fixed fields' update in ticket transitions (https://github.com/ansible-collections/community.general/issues/818). -minor_changes: - - jira - added parameter ``account_id`` for compatibility with recent versions of JIRA (https://github.com/ansible-collections/community.general/issues/818, https://github.com/ansible-collections/community.general/pull/1978). diff --git a/changelogs/fragments/1991-proxmox-inventory-fix-template-in-pool.yml b/changelogs/fragments/1991-proxmox-inventory-fix-template-in-pool.yml deleted file mode 100644 index 90a438dddf..0000000000 --- a/changelogs/fragments/1991-proxmox-inventory-fix-template-in-pool.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - proxmox inventory - exclude qemu templates from inclusion to the inventory via pools (https://github.com/ansible-collections/community.general/issues/1986, https://github.com/ansible-collections/community.general/pull/1991). diff --git a/changelogs/fragments/1993-haproxy-fix-draining.yml b/changelogs/fragments/1993-haproxy-fix-draining.yml deleted file mode 100644 index fd5c77f573..0000000000 --- a/changelogs/fragments/1993-haproxy-fix-draining.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - haproxy - fix a bug preventing haproxy from properly entering ``DRAIN`` mode (https://github.com/ansible-collections/community.general/issues/1913). diff --git a/changelogs/fragments/1999-proxmox-fix-issue-1955.yml b/changelogs/fragments/1999-proxmox-fix-issue-1955.yml deleted file mode 100644 index 274e70fb0f..0000000000 --- a/changelogs/fragments/1999-proxmox-fix-issue-1955.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: -- proxmox - removed requirement that root password is provided when containter state is ``present`` (https://github.com/ansible-collections/community.general/pull/1999). diff --git a/changelogs/fragments/2000-proxmox_kvm-tag-support.yml b/changelogs/fragments/2000-proxmox_kvm-tag-support.yml deleted file mode 100644 index d4084ecd67..0000000000 --- a/changelogs/fragments/2000-proxmox_kvm-tag-support.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- proxmox_kvm - added new module parameter ``tags`` for use with PVE 6+ (https://github.com/ansible-collections/community.general/pull/2000). diff --git a/changelogs/fragments/2001-no_log-false.yml b/changelogs/fragments/2001-no_log-false.yml deleted file mode 100644 index 82d9ba0bb0..0000000000 --- a/changelogs/fragments/2001-no_log-false.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- "Mark various module options with ``no_log=False`` which have a name that potentially could leak secrets, but which do not (https://github.com/ansible-collections/community.general/pull/2001)." diff --git a/changelogs/fragments/2006-valmod-batch8.yml b/changelogs/fragments/2006-valmod-batch8.yml deleted file mode 100644 index 30be5e16b2..0000000000 --- a/changelogs/fragments/2006-valmod-batch8.yml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: - - rax - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/2006). - - rax_cdb_user - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/2006). - - rax_scaling_group - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/2006). diff --git a/changelogs/fragments/2008-update-java-cert-replace-cert-when-changed.yml b/changelogs/fragments/2008-update-java-cert-replace-cert-when-changed.yml deleted file mode 100644 index 8cfda91016..0000000000 --- a/changelogs/fragments/2008-update-java-cert-replace-cert-when-changed.yml +++ /dev/null @@ -1,7 +0,0 @@ -minor_changes: - - "java_cert - change ``state: present`` to check certificates by hash, not just alias name (https://github.com/ansible/ansible/issues/43249)." -bugfixes: - - "java_cert - allow setting ``state: absent`` by providing just the ``cert_alias`` (https://github.com/ansible/ansible/issues/27982)." - - "java_cert - properly handle proxy arguments when the scheme is provided (https://github.com/ansible/ansible/issues/54481)." -security_fixes: - - "java_cert - remove password from ``run_command`` arguments (https://github.com/ansible-collections/community.general/pull/2008)." diff --git a/changelogs/fragments/2013-proxmox-purge-parameter.yml b/changelogs/fragments/2013-proxmox-purge-parameter.yml deleted file mode 100644 index 6c681e5a19..0000000000 --- a/changelogs/fragments/2013-proxmox-purge-parameter.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- proxmox - added ``purge`` module parameter for use when deleting lxc's with HA options (https://github.com/ansible-collections/community.general/pull/2013). diff --git a/changelogs/fragments/2014-allow-root-for-kibana-plugin.yaml b/changelogs/fragments/2014-allow-root-for-kibana-plugin.yaml deleted file mode 100644 index 6420203888..0000000000 --- a/changelogs/fragments/2014-allow-root-for-kibana-plugin.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - kibana_plugin - add parameter for passing ``--allow-root`` flag to kibana and kibana-plugin commands (https://github.com/ansible-collections/community.general/pull/2014). diff --git a/changelogs/fragments/2020-remove-unused-param-in-rax.yml b/changelogs/fragments/2020-remove-unused-param-in-rax.yml deleted file mode 100644 index 333548f0b9..0000000000 --- a/changelogs/fragments/2020-remove-unused-param-in-rax.yml +++ /dev/null @@ -1,2 +0,0 @@ -removed_features: - - rax - unused parameter ``service`` removed (https://github.com/ansible-collections/community.general/pull/2020). diff --git a/changelogs/fragments/2024-module-helper-fixes.yml b/changelogs/fragments/2024-module-helper-fixes.yml deleted file mode 100644 index 3ce3cc71dc..0000000000 --- a/changelogs/fragments/2024-module-helper-fixes.yml +++ /dev/null @@ -1,4 +0,0 @@ -bugfixes: - - module_helper module utils - actually ignoring formatting of parameters with value ``None`` (https://github.com/ansible-collections/community.general/pull/2024). - - module_helper module utils - handling ``ModuleHelperException`` now properly calls ``fail_json()`` (https://github.com/ansible-collections/community.general/pull/2024). - - module_helper module utils - use the command name as-is in ``CmdMixin`` if it fails ``get_bin_path()`` - allowing full path names to be passed (https://github.com/ansible-collections/community.general/pull/2024). diff --git a/changelogs/fragments/2027-add-redfish-session-create-delete-authenticate.yml b/changelogs/fragments/2027-add-redfish-session-create-delete-authenticate.yml deleted file mode 100644 index b5c22b9502..0000000000 --- a/changelogs/fragments/2027-add-redfish-session-create-delete-authenticate.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - redfish_* modules, redfish_utils module utils - add support for Redfish session create, delete, and authenticate (https://github.com/ansible-collections/community.general/issues/1975). diff --git a/changelogs/fragments/2031-ipa_sudorule_add_runasextusers.yml b/changelogs/fragments/2031-ipa_sudorule_add_runasextusers.yml deleted file mode 100644 index 9e70a16d80..0000000000 --- a/changelogs/fragments/2031-ipa_sudorule_add_runasextusers.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- ipa_sudorule - add support for setting sudo runasuser (https://github.com/ansible-collections/community.general/pull/2031). diff --git a/changelogs/fragments/2032-one_image-pyone.yml b/changelogs/fragments/2032-one_image-pyone.yml deleted file mode 100644 index 4975cb73ad..0000000000 --- a/changelogs/fragments/2032-one_image-pyone.yml +++ /dev/null @@ -1,2 +0,0 @@ -breaking_changes: - - one_image - use pyone instead of python-oca (https://github.com/ansible-collections/community.general/pull/2032). diff --git a/changelogs/fragments/2036-scaleway-inventory.yml b/changelogs/fragments/2036-scaleway-inventory.yml deleted file mode 100644 index 44161306ac..0000000000 --- a/changelogs/fragments/2036-scaleway-inventory.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - scaleway inventory plugin - fix pagination on scaleway inventory plugin (https://github.com/ansible-collections/community.general/pull/2036). diff --git a/changelogs/fragments/2037-add-from-csv-filter.yml b/changelogs/fragments/2037-add-from-csv-filter.yml deleted file mode 100644 index d99c4cd0a8..0000000000 --- a/changelogs/fragments/2037-add-from-csv-filter.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -add plugin.filter: - - name: from_csv - description: Converts CSV text input into list of dicts -minor_changes: - - csv module utils - new module_utils for shared functions between ``from_csv`` filter and ``read_csv`` module (https://github.com/ansible-collections/community.general/pull/2037). - - read_csv - refactored read_csv module to use shared csv functions from csv module_utils (https://github.com/ansible-collections/community.general/pull/2037). diff --git a/changelogs/fragments/2040-fix-index-error-in-redfish-set-manager-nic.yml b/changelogs/fragments/2040-fix-index-error-in-redfish-set-manager-nic.yml deleted file mode 100644 index 04d9a11101..0000000000 --- a/changelogs/fragments/2040-fix-index-error-in-redfish-set-manager-nic.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - redfish_config module, redfish_utils module utils - fix IndexError in ``SetManagerNic`` command (https://github.com/ansible-collections/community.general/issues/1692). diff --git a/changelogs/fragments/2057-nios-devel.yml b/changelogs/fragments/2057-nios-devel.yml deleted file mode 100644 index be9f8a970f..0000000000 --- a/changelogs/fragments/2057-nios-devel.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- "nios* modules - fix modules to work with ansible-core 2.11 (https://github.com/ansible-collections/community.general/pull/2057)." diff --git a/changelogs/fragments/2061-archive-refactor1.yml b/changelogs/fragments/2061-archive-refactor1.yml deleted file mode 100644 index a7189a2f59..0000000000 --- a/changelogs/fragments/2061-archive-refactor1.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - archive - refactored some reused code out into a couple of functions (https://github.com/ansible-collections/community.general/pull/2061). diff --git a/changelogs/fragments/2065-snmp-facts-timeout.yml b/changelogs/fragments/2065-snmp-facts-timeout.yml deleted file mode 100644 index 0e6a4e54fa..0000000000 --- a/changelogs/fragments/2065-snmp-facts-timeout.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - snmp_facts - added parameters ``timeout`` and ``retries`` to module (https://github.com/ansible-collections/community.general/issues/980). diff --git a/changelogs/fragments/2072-stacki-host-params-fallback.yml b/changelogs/fragments/2072-stacki-host-params-fallback.yml deleted file mode 100644 index f586a6eb0c..0000000000 --- a/changelogs/fragments/2072-stacki-host-params-fallback.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - stacki_host - replaced ``default`` to environment variables with ``fallback`` to them (https://github.com/ansible-collections/community.general/pull/2072). diff --git a/changelogs/fragments/2094-bugfix-respect-PATH-env-variable-in-zypper-modules.yaml b/changelogs/fragments/2094-bugfix-respect-PATH-env-variable-in-zypper-modules.yaml deleted file mode 100644 index e0addce2fc..0000000000 --- a/changelogs/fragments/2094-bugfix-respect-PATH-env-variable-in-zypper-modules.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - zypper, zypper_repository - respect ``PATH`` environment variable when resolving zypper executable path (https://github.com/ansible-collections/community.general/pull/2094). diff --git a/changelogs/fragments/2110-vdo-add_force_option.yaml b/changelogs/fragments/2110-vdo-add_force_option.yaml deleted file mode 100644 index 9e93a919a2..0000000000 --- a/changelogs/fragments/2110-vdo-add_force_option.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - vdo - add ``force`` option (https://github.com/ansible-collections/community.general/issues/2101). diff --git a/changelogs/fragments/2116-add-fields-to-ipa-config-module.yml b/changelogs/fragments/2116-add-fields-to-ipa-config-module.yml deleted file mode 100644 index d1e1dc3180..0000000000 --- a/changelogs/fragments/2116-add-fields-to-ipa-config-module.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ipa_config - add new options ``ipaconfigstring``, ``ipadefaultprimarygroup``, ``ipagroupsearchfields``, ``ipahomesrootdir``, ``ipabrkauthzdata``, ``ipamaxusernamelength``, ``ipapwdexpadvnotify``, ``ipasearchrecordslimit``, ``ipasearchtimelimit``, ``ipauserauthtype``, and ``ipausersearchfields`` (https://github.com/ansible-collections/community.general/pull/2116). diff --git a/changelogs/fragments/2125-git-config-scope-file.yml b/changelogs/fragments/2125-git-config-scope-file.yml deleted file mode 100644 index 75862e0333..0000000000 --- a/changelogs/fragments/2125-git-config-scope-file.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - git_config - fixed scope ``file`` behaviour and added integraton test for it (https://github.com/ansible-collections/community.general/issues/2117). diff --git a/changelogs/fragments/2135-vmadm-resolvers-type-fix.yml b/changelogs/fragments/2135-vmadm-resolvers-type-fix.yml deleted file mode 100644 index fcce6e12e1..0000000000 --- a/changelogs/fragments/2135-vmadm-resolvers-type-fix.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - vmadm - correct type of list elements in ``resolvers`` parameter (https://github.com/ansible-collections/community.general/issues/2135). diff --git a/changelogs/fragments/2139-dimensiondata_network-str-format.yml b/changelogs/fragments/2139-dimensiondata_network-str-format.yml deleted file mode 100644 index 115b04f045..0000000000 --- a/changelogs/fragments/2139-dimensiondata_network-str-format.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - dimensiondata_network - bug when formatting message, instead of % a simple comma was used (https://github.com/ansible-collections/community.general/pull/2139). diff --git a/changelogs/fragments/2142-apache2_mod_proxy-cleanup.yml b/changelogs/fragments/2142-apache2_mod_proxy-cleanup.yml deleted file mode 100644 index 6a24f1afc3..0000000000 --- a/changelogs/fragments/2142-apache2_mod_proxy-cleanup.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - apache2_mod_proxy - refactored/cleaned-up part of the code (https://github.com/ansible-collections/community.general/pull/2142). diff --git a/changelogs/fragments/2143-kibana_plugin-fixed-function-calls.yml b/changelogs/fragments/2143-kibana_plugin-fixed-function-calls.yml deleted file mode 100644 index 54a41cd237..0000000000 --- a/changelogs/fragments/2143-kibana_plugin-fixed-function-calls.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - kibana_plugin - added missing parameters to ``remove_plugin`` when using ``state=present force=true``, and fix potential quoting errors when invoking ``kibana`` (https://github.com/ansible-collections/community.general/pull/2143). diff --git a/changelogs/fragments/2144-atomic_get_bin_path.yml b/changelogs/fragments/2144-atomic_get_bin_path.yml deleted file mode 100644 index eeb55114d2..0000000000 --- a/changelogs/fragments/2144-atomic_get_bin_path.yml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: - - atomic_container - using ``get_bin_path()`` before calling ``run_command()`` (https://github.com/ansible-collections/community.general/pull/2144). - - atomic_host - using ``get_bin_path()`` before calling ``run_command()`` (https://github.com/ansible-collections/community.general/pull/2144). - - atomic_image - using ``get_bin_path()`` before calling ``run_command()`` (https://github.com/ansible-collections/community.general/pull/2144). diff --git a/changelogs/fragments/2146-npm-add_no_bin_links_option.yaml b/changelogs/fragments/2146-npm-add_no_bin_links_option.yaml deleted file mode 100644 index 651af80186..0000000000 --- a/changelogs/fragments/2146-npm-add_no_bin_links_option.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - npm - add ``no_bin_links`` option (https://github.com/ansible-collections/community.general/issues/2128). diff --git a/changelogs/fragments/2148-proxmox-inventory-agent-interfaces.yml b/changelogs/fragments/2148-proxmox-inventory-agent-interfaces.yml deleted file mode 100644 index 0ef97f20ed..0000000000 --- a/changelogs/fragments/2148-proxmox-inventory-agent-interfaces.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- proxmox inventory plugin - added ``proxmox_agent_interfaces`` fact describing network interfaces returned from a QEMU guest agent (https://github.com/ansible-collections/community.general/pull/2148). diff --git a/changelogs/fragments/2157-unreachable-code.yml b/changelogs/fragments/2157-unreachable-code.yml deleted file mode 100644 index 7cb84b4db9..0000000000 --- a/changelogs/fragments/2157-unreachable-code.yml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: - - rhevm - removed unreachable code (https://github.com/ansible-collections/community.general/pull/2157). - - ovh_ip_failover - removed unreachable code (https://github.com/ansible-collections/community.general/pull/2157). - - bitbucket_pipeline_variable - removed unreachable code (https://github.com/ansible-collections/community.general/pull/2157). diff --git a/changelogs/fragments/2159-ipa-user-sshpubkey-multi-word-comments.yaml b/changelogs/fragments/2159-ipa-user-sshpubkey-multi-word-comments.yaml deleted file mode 100644 index 10547bb71b..0000000000 --- a/changelogs/fragments/2159-ipa-user-sshpubkey-multi-word-comments.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - ipa_user - allow ``sshpubkey`` to permit multiple word comments (https://github.com/ansible-collections/community.general/pull/2159). diff --git a/changelogs/fragments/2160-list-literals.yml b/changelogs/fragments/2160-list-literals.yml deleted file mode 100644 index 661b1e322e..0000000000 --- a/changelogs/fragments/2160-list-literals.yml +++ /dev/null @@ -1,11 +0,0 @@ -minor_changes: - - hiera lookup - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160). - - known_hosts module utils - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160). - - nictagadm - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160). - - smartos_image_info - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160). - - xattr - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160). - - ipwcli_dns - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160). - - svr4pkg - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160). - - zfs_facts - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160). - - zpool_facts - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160). - - beadm - minor refactor converting multiple statements to a single list literal (https://github.com/ansible-collections/community.general/pull/2160). diff --git a/changelogs/fragments/2161-pkgutil-list-extend.yml b/changelogs/fragments/2161-pkgutil-list-extend.yml deleted file mode 100644 index 9af970afd8..0000000000 --- a/changelogs/fragments/2161-pkgutil-list-extend.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - pkgutil - fixed calls to ``list.extend()`` (https://github.com/ansible-collections/community.general/pull/2161). diff --git a/changelogs/fragments/2162-modhelper-variables.yml b/changelogs/fragments/2162-modhelper-variables.yml deleted file mode 100644 index 68b0edc37e..0000000000 --- a/changelogs/fragments/2162-modhelper-variables.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - module_helper module utils - added mechanism to manage variables, providing automatic output of variables, change status and diff information (https://github.com/ansible-collections/community.general/pull/2162). diff --git a/changelogs/fragments/2162-proxmox-constructable.yml b/changelogs/fragments/2162-proxmox-constructable.yml deleted file mode 100644 index dfcb1e3495..0000000000 --- a/changelogs/fragments/2162-proxmox-constructable.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- proxmox inventory plugin - added ``Constructable`` class to the inventory to provide options ``strict``, ``keyed_groups``, ``groups``, and ``compose`` (https://github.com/ansible-collections/community.general/pull/2180). diff --git a/changelogs/fragments/2163-java_keystore_1667_improve_temp_files_storage.yml b/changelogs/fragments/2163-java_keystore_1667_improve_temp_files_storage.yml deleted file mode 100644 index 43d183707c..0000000000 --- a/changelogs/fragments/2163-java_keystore_1667_improve_temp_files_storage.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -bugfixes: - - "java_keystore - use tempfile lib to create temporary files with randomized - names, and remove the temporary PKCS#12 keystore as well as other materials - (https://github.com/ansible-collections/community.general/issues/1667)." diff --git a/changelogs/fragments/2174-ipa-user-userauthtype-multiselect.yml b/changelogs/fragments/2174-ipa-user-userauthtype-multiselect.yml deleted file mode 100644 index d162f19b7a..0000000000 --- a/changelogs/fragments/2174-ipa-user-userauthtype-multiselect.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ipa_user - fix ``userauthtype`` option to take in list of strings for the multi-select field instead of single string (https://github.com/ansible-collections/community.general/pull/2174). diff --git a/changelogs/fragments/2177-java_keystore_1668_dont_expose_secrets_on_cmdline.yml b/changelogs/fragments/2177-java_keystore_1668_dont_expose_secrets_on_cmdline.yml deleted file mode 100644 index 0d961a53ac..0000000000 --- a/changelogs/fragments/2177-java_keystore_1668_dont_expose_secrets_on_cmdline.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -security_fixes: - - "java_keystore - pass secret to keytool through an environment variable to not expose it as a - commandline argument (https://github.com/ansible-collections/community.general/issues/1668)." diff --git a/changelogs/fragments/2183-java_keystore_improve_error_handling.yml b/changelogs/fragments/2183-java_keystore_improve_error_handling.yml deleted file mode 100644 index 5d6ceef511..0000000000 --- a/changelogs/fragments/2183-java_keystore_improve_error_handling.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -bugfixes: - - "java_keystore - improve error handling and return ``cmd`` as documented. - Force ``LANG``, ``LC_ALL`` and ``LC_MESSAGES`` environment variables to ``C`` to rely - on ``keytool`` output parsing. Fix pylint's ``unused-variable`` and ``no-else-return`` - hints (https://github.com/ansible-collections/community.general/pull/2183)." diff --git a/changelogs/fragments/2185-xfconf-absent-check-mode.yml b/changelogs/fragments/2185-xfconf-absent-check-mode.yml deleted file mode 100644 index 059f4acd9a..0000000000 --- a/changelogs/fragments/2185-xfconf-absent-check-mode.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - xfconf - module was not honoring check mode when ``state`` was ``absent`` (https://github.com/ansible-collections/community.general/pull/2185). diff --git a/changelogs/fragments/2188-xfconf-modhelper-variables.yml b/changelogs/fragments/2188-xfconf-modhelper-variables.yml deleted file mode 100644 index 19e94254bd..0000000000 --- a/changelogs/fragments/2188-xfconf-modhelper-variables.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - module_helper module utils - added management of facts and adhoc setting of the initial value for variables (https://github.com/ansible-collections/community.general/pull/2188). - - xfconf - changed implementation to use ``ModuleHelper`` new features (https://github.com/ansible-collections/community.general/pull/2188). diff --git a/changelogs/fragments/2192-add-jira-attach.yml b/changelogs/fragments/2192-add-jira-attach.yml deleted file mode 100644 index 5877250541..0000000000 --- a/changelogs/fragments/2192-add-jira-attach.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - jira - added ``attach`` operation, which allows a user to attach a file to an issue (https://github.com/ansible-collections/community.general/pull/2192). diff --git a/changelogs/fragments/2203-modhelper-cause-changes-deco.yml b/changelogs/fragments/2203-modhelper-cause-changes-deco.yml deleted file mode 100644 index b61f97d6b8..0000000000 --- a/changelogs/fragments/2203-modhelper-cause-changes-deco.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - module_helper module utils - fixed decorator ``cause_changes`` (https://github.com/ansible-collections/community.general/pull/2203). diff --git a/changelogs/fragments/2204-github_repo-fix-baseurl_port.yml b/changelogs/fragments/2204-github_repo-fix-baseurl_port.yml deleted file mode 100644 index 0df3bd8ece..0000000000 --- a/changelogs/fragments/2204-github_repo-fix-baseurl_port.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - github_repo - PyGithub bug does not allow explicit port in ``base_url``. Specifying port is not required (https://github.com/PyGithub/PyGithub/issues/1913). diff --git a/changelogs/fragments/2208-jira-revamp.yml b/changelogs/fragments/2208-jira-revamp.yml deleted file mode 100644 index 32f1650aa0..0000000000 --- a/changelogs/fragments/2208-jira-revamp.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - jira - revamped the module as a class using ``ModuleHelper`` (https://github.com/ansible-collections/community.general/pull/2208). diff --git a/changelogs/fragments/2218-cpanm-revamp.yml b/changelogs/fragments/2218-cpanm-revamp.yml deleted file mode 100644 index 668a84f06b..0000000000 --- a/changelogs/fragments/2218-cpanm-revamp.yml +++ /dev/null @@ -1,5 +0,0 @@ -minor_changes: - - cpanm - rewritten using ``ModuleHelper`` (https://github.com/ansible-collections/community.general/pull/2218). - - cpanm - honor and install specified version when running in ``new`` mode; that feature is not available in ``compatibility`` mode (https://github.com/ansible-collections/community.general/issues/208). -deprecated_features: - - cpanm - parameter ``system_lib`` deprecated in favor of using ``become`` (https://github.com/ansible-collections/community.general/pull/2218). diff --git a/changelogs/fragments/2220_nmcli_wifi_support.yaml b/changelogs/fragments/2220_nmcli_wifi_support.yaml deleted file mode 100644 index 224c4dc526..0000000000 --- a/changelogs/fragments/2220_nmcli_wifi_support.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- "nmcli - add ability to connect to a Wifi network and also to attach it to a master (bond) (https://github.com/ansible-collections/community.general/pull/2220)." diff --git a/changelogs/fragments/2223_nmcli_no_IP_config_on_slave.yaml b/changelogs/fragments/2223_nmcli_no_IP_config_on_slave.yaml deleted file mode 100644 index 4d98b62922..0000000000 --- a/changelogs/fragments/2223_nmcli_no_IP_config_on_slave.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- "nmcli - do not set IP configuration on slave connection (https://github.com/ansible-collections/community.general/pull/2223)." diff --git a/changelogs/fragments/2224_nmcli_allow_MAC_overwrite.yaml b/changelogs/fragments/2224_nmcli_allow_MAC_overwrite.yaml deleted file mode 100644 index 98852463d8..0000000000 --- a/changelogs/fragments/2224_nmcli_allow_MAC_overwrite.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- "nmcli - don't restrict the ability to manually set the MAC address to the bridge (https://github.com/ansible-collections/community.general/pull/2224)." diff --git a/changelogs/fragments/2230-java_keystore-1669-ssl-input-files-by-path.yml b/changelogs/fragments/2230-java_keystore-1669-ssl-input-files-by-path.yml deleted file mode 100644 index 0622e93c31..0000000000 --- a/changelogs/fragments/2230-java_keystore-1669-ssl-input-files-by-path.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -minor_changes: - - "java_keystore - add options ``certificate_path`` and ``private_key_path``, - mutually exclusive with ``certificate`` and ``private_key`` respectively, and - targetting files on remote hosts rather than their contents on the controller. - (https://github.com/ansible-collections/community.general/issues/1669)." diff --git a/changelogs/fragments/2236-jira-isinstance.yml b/changelogs/fragments/2236-jira-isinstance.yml deleted file mode 100644 index e80cbacdf9..0000000000 --- a/changelogs/fragments/2236-jira-isinstance.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - jira - fixed calling of ``isinstance`` (https://github.com/ansible-collections/community.general/issues/2234). diff --git a/changelogs/fragments/2244-hashids-filters.yml b/changelogs/fragments/2244-hashids-filters.yml deleted file mode 100644 index 568119e890..0000000000 --- a/changelogs/fragments/2244-hashids-filters.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -add plugin.filter: - - name: hashids_encode - description: Encodes YouTube-like hashes from a sequence of integers - - name: hashids_decode - description: Decodes a sequence of numbers from a YouTube-like hash diff --git a/changelogs/fragments/2245-proxmox_fix_agent_string_handling.yml b/changelogs/fragments/2245-proxmox_fix_agent_string_handling.yml deleted file mode 100644 index 3eae94f4ea..0000000000 --- a/changelogs/fragments/2245-proxmox_fix_agent_string_handling.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - proxmox inventory - added handling of commas in KVM agent configuration string (https://github.com/ansible-collections/community.general/pull/2245). diff --git a/changelogs/fragments/2246-terraform.yaml b/changelogs/fragments/2246-terraform.yaml deleted file mode 100644 index d2dd93e22e..0000000000 --- a/changelogs/fragments/2246-terraform.yaml +++ /dev/null @@ -1,4 +0,0 @@ -bugfixes: - - terraform - fix issue that cause the execution fail because from Terraform 0.15 on, the ``-var`` and ``-var-file`` options are no longer available on ``terraform validate`` (https://github.com/ansible-collections/community.general/pull/2246). - - terraform - fix issue that cause the destroy to fail because from Terraform 0.15 on, the ``terraform destroy -force`` option is replaced with ``terraform destroy -auto-approve`` (https://github.com/ansible-collections/community.general/issues/2247). - - terraform - remove uses of ``use_unsafe_shell=True`` (https://github.com/ansible-collections/community.general/pull/2246). diff --git a/changelogs/fragments/2249-linode_v4-support-private_ip-option.yaml b/changelogs/fragments/2249-linode_v4-support-private_ip-option.yaml deleted file mode 100644 index e5d6ca02d7..0000000000 --- a/changelogs/fragments/2249-linode_v4-support-private_ip-option.yaml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - linode_v4 - add support for ``private_ip`` option (https://github.com/ansible-collections/community.general/pull/2249). diff --git a/changelogs/fragments/2250-allow-keycloak-modules-to-take-token-as-param.yml b/changelogs/fragments/2250-allow-keycloak-modules-to-take-token-as-param.yml deleted file mode 100644 index 5b8deb2a03..0000000000 --- a/changelogs/fragments/2250-allow-keycloak-modules-to-take-token-as-param.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -minor_changes: - - keycloak_* modules - allow the keycloak modules to use a token for the - authentication, the modules can take either a token or the credentials - (https://github.com/ansible-collections/community.general/pull/2250). diff --git a/changelogs/fragments/2257-ldap_entry-params.yml b/changelogs/fragments/2257-ldap_entry-params.yml deleted file mode 100644 index f5c92d0b9c..0000000000 --- a/changelogs/fragments/2257-ldap_entry-params.yml +++ /dev/null @@ -1,2 +0,0 @@ -removed_features: -- "ldap_entry - the ``params`` parameter is now completely removed. Using it already triggered an error since community.general 0.1.2 (https://github.com/ansible-collections/community.general/pull/2257)." diff --git a/changelogs/fragments/2259-proxmox-multi-nic-and-unsupported.yml b/changelogs/fragments/2259-proxmox-multi-nic-and-unsupported.yml deleted file mode 100644 index d8f6f80385..0000000000 --- a/changelogs/fragments/2259-proxmox-multi-nic-and-unsupported.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -bugfixes: - - proxmox inventory plugin - support network interfaces without IP addresses, multiple network interfaces and unsupported/commanddisabled guest error (https://github.com/ansible-collections/community.general/pull/2263). -minor_changes: - - proxmox inventory plugin - allow to select whether ``ansible_host`` should be set for the proxmox nodes (https://github.com/ansible-collections/community.general/pull/2263). diff --git a/changelogs/fragments/2262-java_keystore-passphrase.yml b/changelogs/fragments/2262-java_keystore-passphrase.yml deleted file mode 100644 index 882ada97c3..0000000000 --- a/changelogs/fragments/2262-java_keystore-passphrase.yml +++ /dev/null @@ -1,8 +0,0 @@ -breaking_changes: -- "java_keystore - instead of failing, now overwrites keystore if the alias (name) is changed. - This was originally the intended behavior, but did not work due to a logic error. Make sure - that your playbooks and roles do not depend on the old behavior of failing instead of - overwriting (https://github.com/ansible-collections/community.general/issues/1671)." -- "java_keystore - instead of failing, now overwrites keystore if the passphrase is changed. - Make sure that your playbooks and roles do not depend on the old behavior of failing instead - of overwriting (https://github.com/ansible-collections/community.general/issues/1671)." diff --git a/changelogs/fragments/2267-lvol_size_addition-subtraction_support.yaml b/changelogs/fragments/2267-lvol_size_addition-subtraction_support.yaml deleted file mode 100644 index 25b79f4528..0000000000 --- a/changelogs/fragments/2267-lvol_size_addition-subtraction_support.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -minor_changes: - - lvol - added proper support for ``+-`` options when extending or reducing the logical volume (https://github.com/ansible-collections/community.general/issues/1988). -bugfixes: - - lvol - fixed sizing calculation rounding to match the underlying tools (https://github.com/ansible-collections/community.general/issues/1988). diff --git a/changelogs/fragments/2268-validation-univetion.yml b/changelogs/fragments/2268-validation-univetion.yml deleted file mode 100644 index f245380441..0000000000 --- a/changelogs/fragments/2268-validation-univetion.yml +++ /dev/null @@ -1,4 +0,0 @@ -bugfixes: - - udm_dns_record - fixed default value of parameter ``data`` to match its type (https://github.com/ansible-collections/community.general/pull/2268). -minor_changes: - - udm_dns_zone - elements of list parameters ``nameserver``, ``interfaces``, and ``mx`` are now validated (https://github.com/ansible-collections/community.general/pull/2268). diff --git a/changelogs/fragments/2280-pids-new-pattern-option.yml b/changelogs/fragments/2280-pids-new-pattern-option.yml deleted file mode 100644 index fb9f07e744..0000000000 --- a/changelogs/fragments/2280-pids-new-pattern-option.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: -- pids - new options ``pattern`` and `ignore_case`` for retrieving PIDs of processes matching a supplied pattern (https://github.com/ansible-collections/community.general/pull/2280). diff --git a/changelogs/fragments/2282-nmap-fix-cache-support.yml b/changelogs/fragments/2282-nmap-fix-cache-support.yml deleted file mode 100644 index 62b026eb25..0000000000 --- a/changelogs/fragments/2282-nmap-fix-cache-support.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - nmap inventory plugin - fix cache and constructed group support (https://github.com/ansible-collections/community.general/issues/2242). diff --git a/changelogs/fragments/2284-influxdb_retention_policy-idempotence.yml b/changelogs/fragments/2284-influxdb_retention_policy-idempotence.yml deleted file mode 100644 index 0df25ca462..0000000000 --- a/changelogs/fragments/2284-influxdb_retention_policy-idempotence.yml +++ /dev/null @@ -1,4 +0,0 @@ -bugfixes: - - influxdb_retention_policy - ensure idempotent module execution with different - duration and shard duration parameter values - (https://github.com/ansible-collections/community.general/issues/2281). diff --git a/changelogs/fragments/2308-terraform-add-plugin_paths-parameter.yaml b/changelogs/fragments/2308-terraform-add-plugin_paths-parameter.yaml deleted file mode 100644 index ec389b270c..0000000000 --- a/changelogs/fragments/2308-terraform-add-plugin_paths-parameter.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - terraform - add ``plugin_paths`` parameter which allows disabling Terraform from performing plugin discovery and auto-download (https://github.com/ansible-collections/community.general/pull/2308). diff --git a/changelogs/fragments/2329-hiera-lookup-plugin-return-type.yaml b/changelogs/fragments/2329-hiera-lookup-plugin-return-type.yaml deleted file mode 100644 index 4cced727a2..0000000000 --- a/changelogs/fragments/2329-hiera-lookup-plugin-return-type.yaml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - hiera lookup plugin - converts the return type of plugin to unicode string (https://github.com/ansible-collections/community.general/pull/2329). diff --git a/changelogs/fragments/2340-jenkins_plugin-py2.yml b/changelogs/fragments/2340-jenkins_plugin-py2.yml deleted file mode 100644 index f3bcdbd361..0000000000 --- a/changelogs/fragments/2340-jenkins_plugin-py2.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- "jenkins_plugin - fixes Python 2 compatibility issue (https://github.com/ansible-collections/community.general/pull/2340)." \ No newline at end of file diff --git a/changelogs/fragments/2349-jira-bugfix-b64decode.yml b/changelogs/fragments/2349-jira-bugfix-b64decode.yml deleted file mode 100644 index 41a1dabb94..0000000000 --- a/changelogs/fragments/2349-jira-bugfix-b64decode.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - jira - fixed error when loading base64-encoded content as attachment (https://github.com/ansible-collections/community.general/pull/2349). diff --git a/changelogs/fragments/620-consul_io-env-variables-conf-based.yml b/changelogs/fragments/620-consul_io-env-variables-conf-based.yml deleted file mode 100644 index e3378428c5..0000000000 --- a/changelogs/fragments/620-consul_io-env-variables-conf-based.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -bugfixes: - - consul_io inventory script - kv_groups - fix byte chain decoding for Python 3 (https://github.com/ansible-collections/community.general/pull/620). -minor_changes: - - consul_io inventory script - conf options - allow custom configuration options via env variables (https://github.com/ansible-collections/community.general/pull/620). diff --git a/changelogs/fragments/719-manageiq-resource_id.yml b/changelogs/fragments/719-manageiq-resource_id.yml deleted file mode 100644 index bbeef5ff82..0000000000 --- a/changelogs/fragments/719-manageiq-resource_id.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - manageiq_tags and manageiq_policies - added new parameter ``resource_id``. This parameter can be used instead of parameter ``resource_name`` (https://github.com/ansible-collections/community.general/pull/719). \ No newline at end of file diff --git a/changelogs/fragments/720-cloudforms_inventory.yml b/changelogs/fragments/720-cloudforms_inventory.yml deleted file mode 100644 index f5675205d1..0000000000 --- a/changelogs/fragments/720-cloudforms_inventory.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - cloudforms inventory - fixed issue that non-existing (archived) VMs were synced (https://github.com/ansible-collections/community.general/pull/720). diff --git a/changelogs/fragments/816-only-invocate-feature-when-variable-is-set.yml b/changelogs/fragments/816-only-invocate-feature-when-variable-is-set.yml deleted file mode 100644 index 7d48c77298..0000000000 --- a/changelogs/fragments/816-only-invocate-feature-when-variable-is-set.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - proxmox lxc - only add the features flag when module parameter ``features`` is set. Before an empty string was send to proxmox in case the parameter was not used, which required to use ``root@pam`` for module execution (https://github.com/ansible-collections/community.general/pull/1763). diff --git a/changelogs/fragments/948-dellemc-migration-removal.yml b/changelogs/fragments/948-dellemc-migration-removal.yml deleted file mode 100644 index c4f64a815f..0000000000 --- a/changelogs/fragments/948-dellemc-migration-removal.yml +++ /dev/null @@ -1,13 +0,0 @@ -removed_features: - - | - The ``ome_device_info``, ``idrac_firmware`` and ``idrac_server_config_profile`` modules have now been migrated from community.general to the `dellemc.openmanage `_ Ansible collection. - If you use ansible-base 2.10 or newer, redirections have been provided. - - If you use Ansible 2.9 and installed this collection, you need to adjust the FQCNs (``community.general.idrac_firmware`` → ``dellemc.openmanage.idrac_firmware``) and make sure to install the dellemc.openmanage collection. -breaking_changes: - - | - If you use Ansible 2.9 and these plugins or modules from this collection, community.general 3.0.0 results in errors when trying to use the DellEMC content by FQCN, like ``community.general.idrac_firmware``. - Since Ansible 2.9 is not able to use redirections, you will have to adjust your playbooks and roles manually to use the new FQCNs (``dellemc.openmanage.idrac_firmware`` for the previous example) and to make sure that you have ``dellemc.openmanage`` installed. - - If you use ansible-base 2.10 or newer and did not install Ansible 4.0.0, but installed (and/or upgraded) community.general manually, you need to make sure to also install the ``dellemc.openmanage`` collection if you are using any of these plugins or modules. - While ansible-base 2.10 or newer can use the redirects that community.general 3.0.0 adds, the collection they point to (such as dellemc.openmanage) must be installed for them to work. diff --git a/changelogs/fragments/CVE-2021-20191_no_log.yml b/changelogs/fragments/CVE-2021-20191_no_log.yml deleted file mode 100644 index a2c8740598..0000000000 --- a/changelogs/fragments/CVE-2021-20191_no_log.yml +++ /dev/null @@ -1,4 +0,0 @@ -security_fixes: - - module_utils/_netapp, na_ontap_gather_facts - enabled ``no_log`` for the options ``api_key`` and ``secret_key`` to prevent accidental disclosure (CVE-2021-20191, https://github.com/ansible-collections/community.general/pull/1725). - - module_utils/identity/keycloak, keycloak_client, keycloak_clienttemplate, keycloak_group - enabled ``no_log`` for the option ``auth_client_secret`` to prevent accidental disclosure (CVE-2021-20191, https://github.com/ansible-collections/community.general/pull/1725). - - utm_proxy_auth_profile - enabled ``no_log`` for the option ``frontend_cookie_secret`` to prevent accidental disclosure (CVE-2021-20191, https://github.com/ansible-collections/community.general/pull/1725). diff --git a/changelogs/fragments/allow_funcd_to_load.yml b/changelogs/fragments/allow_funcd_to_load.yml deleted file mode 100644 index 3336b0aaf4..0000000000 --- a/changelogs/fragments/allow_funcd_to_load.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - funcd connection plugin - can now load (https://github.com/ansible-collections/community.general/pull/2235). diff --git a/changelogs/fragments/dict-filter.yml b/changelogs/fragments/dict-filter.yml deleted file mode 100644 index 1e9923e796..0000000000 --- a/changelogs/fragments/dict-filter.yml +++ /dev/null @@ -1,3 +0,0 @@ -add plugin.filter: - - name: dict - description: "The ``dict`` function as a filter: converts a list of tuples to a dictionary" diff --git a/changelogs/fragments/meta-runtime-deprecations.yml b/changelogs/fragments/meta-runtime-deprecations.yml deleted file mode 100644 index 8863f346af..0000000000 --- a/changelogs/fragments/meta-runtime-deprecations.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- "meta/runtime.yml - improve deprecation messages (https://github.com/ansible-collections/community.general/pull/1918)." diff --git a/changelogs/fragments/no_log-fixes.yml b/changelogs/fragments/no_log-fixes.yml deleted file mode 100644 index 70afd3229d..0000000000 --- a/changelogs/fragments/no_log-fixes.yml +++ /dev/null @@ -1,25 +0,0 @@ -security_fixes: - - "ovirt - mark the ``instance_rootpw`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "oneandone_firewall_policy, oneandone_load_balancer, oneandone_monitoring_policy, oneandone_private_network, oneandone_public_ip - mark the ``auth_token`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "rax_clb_ssl - mark the ``private_key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "spotinst_aws_elastigroup - mark the ``multai_token`` and ``token`` parameters as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "keycloak_client - mark the ``registration_access_token`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "librato_annotation - mark the ``api_key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "pagerduty_alert - mark the ``api_key``, ``service_key`` and ``integration_key`` parameters as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "nios_nsgroup - mark the ``tsig_key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "pulp_repo - mark the ``feed_client_key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "gitlab_runner - mark the ``registration_token`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "ibm_sa_host - mark the ``iscsi_chap_secret`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "keycloak_* modules - mark the ``auth_client_secret`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "hwc_ecs_instance - mark the ``admin_pass`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "ovirt - mark the ``instance_key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "pagerduty_change - mark the ``integration_key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "pingdom - mark the ``key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "rollbar_deployment - mark the ``token`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "stackdriver - mark the ``key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "dnsmadeeasy - mark the ``account_key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "logentries_msg - mark the ``token`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "redfish_command - mark the ``update_creds.password`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736)." - - "utm_proxy_auth_profile - mark the ``frontend_cookie_secret`` parameter as ``no_log`` to avoid leakage of secrets. This causes the ``utm_proxy_auth_profile`` return value to no longer containing the correct value, but a placeholder (https://github.com/ansible-collections/community.general/pull/1736)." -breaking_changes: - - "utm_proxy_auth_profile - the ``frontend_cookie_secret`` return value now contains a placeholder string instead of the module's ``frontend_cookie_secret`` parameter (https://github.com/ansible-collections/community.general/pull/1736)." diff --git a/changelogs/fragments/path_join-shim-filter.yml b/changelogs/fragments/path_join-shim-filter.yml deleted file mode 100644 index f96922203f..0000000000 --- a/changelogs/fragments/path_join-shim-filter.yml +++ /dev/null @@ -1,3 +0,0 @@ -add plugin.filter: - - name: path_join - description: Redirects to ansible.builtin.path_join for ansible-base 2.10 or newer, and provides a compatible implementation for Ansible 2.9 diff --git a/changelogs/fragments/remove-deprecated-features.yml b/changelogs/fragments/remove-deprecated-features.yml deleted file mode 100644 index e728ce62d3..0000000000 --- a/changelogs/fragments/remove-deprecated-features.yml +++ /dev/null @@ -1,16 +0,0 @@ -removed_features: -- "airbrake_deployment - removed deprecated ``token`` parameter. Use ``project_id`` and ``project_key`` instead (https://github.com/ansible-collections/community.general/pull/1926)." -- "bigpanda - the alias ``message`` has been removed. Use ``deployment_message`` instead (https://github.com/ansible-collections/community.general/pull/1926)." -- "cisco_spark, cisco_webex - the alias ``message`` has been removed. Use ``msg`` instead (https://github.com/ansible-collections/community.general/pull/1926)." -- "clc_aa_policy - the ``wait`` parameter has been removed. It did not have any effect (https://github.com/ansible-collections/community.general/pull/1926)." -- "datadog_monitor - the alias ``message`` has been removed. Use ``notification_message`` instead (https://github.com/ansible-collections/community.general/pull/1926)." -- "django_manage - the parameter ``liveserver`` has been removed (https://github.com/ansible-collections/community.general/pull/1926)." -- "idrac_redfish_config - the parameters ``manager_attribute_name`` and ``manager_attribute_value`` have been removed. Use ``manager_attributes`` instead (https://github.com/ansible-collections/community.general/pull/1926)." -- "iso_extract - the alias ``thirsty`` has been removed. Use ``force`` instead (https://github.com/ansible-collections/community.general/pull/1926)." -- "redfish_config - the parameters ``bios_attribute_name`` and ``bios_attribute_value`` have been removed. Use ``bios_attributes`` instead (https://github.com/ansible-collections/community.general/pull/1926)." -- "syspatch - the ``apply`` parameter has been removed. This is the default mode, so simply removing it will not change the behavior (https://github.com/ansible-collections/community.general/pull/1926)." -- "xbps - the ``force`` parameter has been removed. It did not have any effect (https://github.com/ansible-collections/community.general/pull/1926)." -- "redfish modules - issuing a data modification command without specifying the ID of the target System, Chassis or Manager resource when there is more than one is no longer allowed. Use the ``resource_id`` option to specify the target ID (https://github.com/ansible-collections/community.general/pull/1926)." -- "pulp_repo - the alias ``ca_cert`` has been removed. Use ``feed_ca_cert`` instead (https://github.com/ansible-collections/community.general/pull/1926)." -- "pulp_repo - the ``feed_client_cert`` parameter no longer defaults to the value of the ``client_cert`` parameter (https://github.com/ansible-collections/community.general/pull/1926)." -- "pulp_repo - the ``feed_client_key`` parameter no longer defaults to the value of the ``client_key`` parameter (https://github.com/ansible-collections/community.general/pull/1926)." diff --git a/changelogs/fragments/remove-deprecated-modules.yml b/changelogs/fragments/remove-deprecated-modules.yml deleted file mode 100644 index fa9d9c9eb7..0000000000 --- a/changelogs/fragments/remove-deprecated-modules.yml +++ /dev/null @@ -1,66 +0,0 @@ -removed_features: -- "The deprecated ali_instance_facts module has been removed. Use ali_instance_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated hpilo_facts module has been removed. Use hpilo_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated idrac_redfish_facts module has been removed. Use idrac_redfish_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated jenkins_job_facts module has been removed. Use jenkins_job_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated memset_memstore_facts module has been removed. Use memset_memstore_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated memset_server_facts module has been removed. Use memset_server_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated na_ontap_gather_facts module has been removed. Use netapp.ontap.na_ontap_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated nginx_status_facts module has been removed. Use nginx_status_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated one_image_facts module has been removed. Use one_image_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated onepassword_facts module has been removed. Use onepassword_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated oneview_datacenter_facts module has been removed. Use oneview_datacenter_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated oneview_enclosure_facts module has been removed. Use oneview_enclosure_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated oneview_ethernet_network_facts module has been removed. Use oneview_ethernet_network_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated oneview_fc_network_facts module has been removed. Use oneview_fc_network_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated oneview_fcoe_network_facts module has been removed. Use oneview_fcoe_network_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated oneview_logical_interconnect_group_facts module has been removed. Use oneview_logical_interconnect_group_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated oneview_network_set_facts module has been removed. Use oneview_network_set_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated oneview_san_manager_facts module has been removed. Use oneview_san_manager_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated online_server_facts module has been removed. Use online_server_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated online_user_facts module has been removed. Use online_user_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated purefa_facts module has been removed. Use purestorage.flasharray.purefa_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated purefb_facts module has been removed. Use purestorage.flasharray.purefb_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated python_requirements_facts module has been removed. Use python_requirements_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated redfish_facts module has been removed. Use redfish_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated scaleway_image_facts module has been removed. Use scaleway_image_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated scaleway_ip_facts module has been removed. Use scaleway_ip_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated scaleway_organization_facts module has been removed. Use scaleway_organization_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated scaleway_security_group_facts module has been removed. Use scaleway_security_group_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated scaleway_server_facts module has been removed. Use scaleway_server_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated scaleway_snapshot_facts module has been removed. Use scaleway_snapshot_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated scaleway_volume_facts module has been removed. Use scaleway_volume_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated smartos_image_facts module has been removed. Use smartos_image_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated vertica_facts module has been removed. Use vertica_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated xenserver_guest_facts module has been removed. Use xenserver_guest_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt module has been removed. Use ovirt.ovirt.ovirt_vm instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_affinity_label_facts module has been removed. Use ovirt.ovirt.ovirt_affinity_label_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_api_facts module has been removed. Use ovirt.ovirt.ovirt_api_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_cluster_facts module has been removed. Use ovirt.ovirt.ovirt_cluster_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_datacenter_facts module has been removed. Use ovirt.ovirt.ovirt_datacenter_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_disk_facts module has been removed. Use ovirt.ovirt.ovirt_disk_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_event_facts module has been removed. Use ovirt.ovirt.ovirt_event_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_external_provider_facts module has been removed. Use ovirt.ovirt.ovirt_external_provider_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_group_facts module has been removed. Use ovirt.ovirt.ovirt_group_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_host_facts module has been removed. Use ovirt.ovirt.ovirt_host_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_host_storage_facts module has been removed. Use ovirt.ovirt.ovirt_host_storage_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_network_facts module has been removed. Use ovirt.ovirt.ovirt_network_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_nic_facts module has been removed. Use ovirt.ovirt.ovirt_nic_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_permission_facts module has been removed. Use ovirt.ovirt.ovirt_permission_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_quota_facts module has been removed. Use ovirt.ovirt.ovirt_quota_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_scheduling_policy_facts module has been removed. Use ovirt.ovirt.ovirt_scheduling_policy_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_snapshot_facts module has been removed. Use ovirt.ovirt.ovirt_snapshot_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_storage_domain_facts module has been removed. Use ovirt.ovirt.ovirt_storage_domain_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_storage_template_facts module has been removed. Use ovirt.ovirt.ovirt_storage_template_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_storage_vm_facts module has been removed. Use ovirt.ovirt.ovirt_storage_vm_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_tag_facts module has been removed. Use ovirt.ovirt.ovirt_tag_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_template_facts module has been removed. Use ovirt.ovirt.ovirt_template_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_user_facts module has been removed. Use ovirt.ovirt.ovirt_user_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_vm_facts module has been removed. Use ovirt.ovirt.ovirt_vm_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ovirt_vmpool_facts module has been removed. Use ovirt.ovirt.ovirt_vmpool_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The ovirt_facts docs fragment has been removed (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated gluster_heal_info module has been removed. Use gluster.gluster.gluster_heal_info instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated gluster_peer module has been removed. Use gluster.gluster.gluster_peer instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated gluster_volume module has been removed. Use gluster.gluster.gluster_volume instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated helm module has been removed. Use community.kubernetes.helm instead (https://github.com/ansible-collections/community.general/pull/1924)." -- "The deprecated ldap_attr module has been removed. Use ldap_attrs instead (https://github.com/ansible-collections/community.general/pull/1924)." diff --git a/changelogs/fragments/selective-core-2.11.yml b/changelogs/fragments/selective-core-2.11.yml deleted file mode 100644 index 994e555c7c..0000000000 --- a/changelogs/fragments/selective-core-2.11.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- "selective callback plugin - adjust import so that the plugin also works with ansible-core 2.11 (https://github.com/ansible-collections/community.general/pull/1807)." diff --git a/galaxy.yml b/galaxy.yml index 3676516625..a4b4cad7e0 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,6 +1,6 @@ namespace: community name: general -version: 3.0.0 +version: 3.1.0 readme: README.md authors: - Ansible (https://github.com/ansible) From cd116120ad5c735a79f8609778b8a6c2e84d69cb Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 26 Apr 2021 18:43:14 +0200 Subject: [PATCH 0242/3093] Run CI for old branches only once per week. --- .azure-pipelines/azure-pipelines.yml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index 8c0804ab31..a479a33ba8 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -19,7 +19,14 @@ schedules: branches: include: - main - - stable-* + - stable-2 + - stable-3 + - cron: 0 8 * * 0 + displayName: Weekly (old branches) + always: true + branches: + include: + - stable-1 variables: - name: checkoutPath From 2ad004b97b750d31dc2900868598fafd31dc8d90 Mon Sep 17 00:00:00 2001 From: Alan Rominger Date: Mon, 26 Apr 2021 15:24:26 -0400 Subject: [PATCH 0243/3093] Make inventory scripts executable (#2337) * Make inventory scripts executable * Mark inventory scripts in vault folder as executable * Add changelog entry for making inventory scripts exectuable * Update changelogs/fragments/2337-mark-inventory-scripts-executable.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../fragments/2337-mark-inventory-scripts-executable.yml | 3 +++ scripts/inventory/abiquo.py | 0 scripts/inventory/apache-libcloud.py | 0 scripts/inventory/apstra_aos.py | 0 scripts/inventory/azure_rm.py | 0 scripts/inventory/brook.py | 0 scripts/inventory/cloudforms.py | 0 scripts/inventory/cobbler.py | 0 scripts/inventory/collins.py | 0 scripts/inventory/consul_io.py | 0 scripts/inventory/docker.py | 0 scripts/inventory/fleet.py | 0 scripts/inventory/foreman.py | 0 scripts/inventory/freeipa.py | 0 scripts/inventory/infoblox.py | 0 scripts/inventory/jail.py | 0 scripts/inventory/landscape.py | 0 scripts/inventory/linode.py | 0 scripts/inventory/lxc_inventory.py | 0 scripts/inventory/lxd.py | 0 scripts/inventory/mdt_dynamic_inventory.py | 0 scripts/inventory/nagios_livestatus.py | 0 scripts/inventory/nagios_ndo.py | 0 scripts/inventory/nsot.py | 0 scripts/inventory/openshift.py | 0 scripts/inventory/openvz.py | 0 scripts/inventory/ovirt.py | 0 scripts/inventory/ovirt4.py | 0 scripts/inventory/packet_net.py | 0 scripts/inventory/proxmox.py | 0 scripts/inventory/rackhd.py | 0 scripts/inventory/rax.py | 0 scripts/inventory/rudder.py | 0 scripts/inventory/scaleway.py | 0 scripts/inventory/serf.py | 0 scripts/inventory/softlayer.py | 0 scripts/inventory/spacewalk.py | 0 scripts/inventory/ssh_config.py | 0 scripts/inventory/stacki.py | 0 scripts/inventory/vagrant.py | 0 scripts/inventory/vbox.py | 0 scripts/inventory/zone.py | 0 scripts/vault/azure_vault.py | 0 scripts/vault/vault-keyring-client.py | 0 scripts/vault/vault-keyring.py | 0 45 files changed, 3 insertions(+) create mode 100644 changelogs/fragments/2337-mark-inventory-scripts-executable.yml mode change 100644 => 100755 scripts/inventory/abiquo.py mode change 100644 => 100755 scripts/inventory/apache-libcloud.py mode change 100644 => 100755 scripts/inventory/apstra_aos.py mode change 100644 => 100755 scripts/inventory/azure_rm.py mode change 100644 => 100755 scripts/inventory/brook.py mode change 100644 => 100755 scripts/inventory/cloudforms.py mode change 100644 => 100755 scripts/inventory/cobbler.py mode change 100644 => 100755 scripts/inventory/collins.py mode change 100644 => 100755 scripts/inventory/consul_io.py mode change 100644 => 100755 scripts/inventory/docker.py mode change 100644 => 100755 scripts/inventory/fleet.py mode change 100644 => 100755 scripts/inventory/foreman.py mode change 100644 => 100755 scripts/inventory/freeipa.py mode change 100644 => 100755 scripts/inventory/infoblox.py mode change 100644 => 100755 scripts/inventory/jail.py mode change 100644 => 100755 scripts/inventory/landscape.py mode change 100644 => 100755 scripts/inventory/linode.py mode change 100644 => 100755 scripts/inventory/lxc_inventory.py mode change 100644 => 100755 scripts/inventory/lxd.py mode change 100644 => 100755 scripts/inventory/mdt_dynamic_inventory.py mode change 100644 => 100755 scripts/inventory/nagios_livestatus.py mode change 100644 => 100755 scripts/inventory/nagios_ndo.py mode change 100644 => 100755 scripts/inventory/nsot.py mode change 100644 => 100755 scripts/inventory/openshift.py mode change 100644 => 100755 scripts/inventory/openvz.py mode change 100644 => 100755 scripts/inventory/ovirt.py mode change 100644 => 100755 scripts/inventory/ovirt4.py mode change 100644 => 100755 scripts/inventory/packet_net.py mode change 100644 => 100755 scripts/inventory/proxmox.py mode change 100644 => 100755 scripts/inventory/rackhd.py mode change 100644 => 100755 scripts/inventory/rax.py mode change 100644 => 100755 scripts/inventory/rudder.py mode change 100644 => 100755 scripts/inventory/scaleway.py mode change 100644 => 100755 scripts/inventory/serf.py mode change 100644 => 100755 scripts/inventory/softlayer.py mode change 100644 => 100755 scripts/inventory/spacewalk.py mode change 100644 => 100755 scripts/inventory/ssh_config.py mode change 100644 => 100755 scripts/inventory/stacki.py mode change 100644 => 100755 scripts/inventory/vagrant.py mode change 100644 => 100755 scripts/inventory/vbox.py mode change 100644 => 100755 scripts/inventory/zone.py mode change 100644 => 100755 scripts/vault/azure_vault.py mode change 100644 => 100755 scripts/vault/vault-keyring-client.py mode change 100644 => 100755 scripts/vault/vault-keyring.py diff --git a/changelogs/fragments/2337-mark-inventory-scripts-executable.yml b/changelogs/fragments/2337-mark-inventory-scripts-executable.yml new file mode 100644 index 0000000000..69aa3fff62 --- /dev/null +++ b/changelogs/fragments/2337-mark-inventory-scripts-executable.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - inventory and vault scripts - change file permissions to make vendored inventory and vault scripts exectuable (https://github.com/ansible-collections/community.general/pull/2337). diff --git a/scripts/inventory/abiquo.py b/scripts/inventory/abiquo.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/apache-libcloud.py b/scripts/inventory/apache-libcloud.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/apstra_aos.py b/scripts/inventory/apstra_aos.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/azure_rm.py b/scripts/inventory/azure_rm.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/brook.py b/scripts/inventory/brook.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/cloudforms.py b/scripts/inventory/cloudforms.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/cobbler.py b/scripts/inventory/cobbler.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/collins.py b/scripts/inventory/collins.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/consul_io.py b/scripts/inventory/consul_io.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/docker.py b/scripts/inventory/docker.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/fleet.py b/scripts/inventory/fleet.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/foreman.py b/scripts/inventory/foreman.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/freeipa.py b/scripts/inventory/freeipa.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/infoblox.py b/scripts/inventory/infoblox.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/jail.py b/scripts/inventory/jail.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/landscape.py b/scripts/inventory/landscape.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/linode.py b/scripts/inventory/linode.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/lxc_inventory.py b/scripts/inventory/lxc_inventory.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/lxd.py b/scripts/inventory/lxd.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/mdt_dynamic_inventory.py b/scripts/inventory/mdt_dynamic_inventory.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/nagios_livestatus.py b/scripts/inventory/nagios_livestatus.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/nagios_ndo.py b/scripts/inventory/nagios_ndo.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/nsot.py b/scripts/inventory/nsot.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/openshift.py b/scripts/inventory/openshift.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/openvz.py b/scripts/inventory/openvz.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/ovirt.py b/scripts/inventory/ovirt.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/ovirt4.py b/scripts/inventory/ovirt4.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/packet_net.py b/scripts/inventory/packet_net.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/proxmox.py b/scripts/inventory/proxmox.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/rackhd.py b/scripts/inventory/rackhd.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/rax.py b/scripts/inventory/rax.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/rudder.py b/scripts/inventory/rudder.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/scaleway.py b/scripts/inventory/scaleway.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/serf.py b/scripts/inventory/serf.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/softlayer.py b/scripts/inventory/softlayer.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/spacewalk.py b/scripts/inventory/spacewalk.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/ssh_config.py b/scripts/inventory/ssh_config.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/stacki.py b/scripts/inventory/stacki.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/vagrant.py b/scripts/inventory/vagrant.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/vbox.py b/scripts/inventory/vbox.py old mode 100644 new mode 100755 diff --git a/scripts/inventory/zone.py b/scripts/inventory/zone.py old mode 100644 new mode 100755 diff --git a/scripts/vault/azure_vault.py b/scripts/vault/azure_vault.py old mode 100644 new mode 100755 diff --git a/scripts/vault/vault-keyring-client.py b/scripts/vault/vault-keyring-client.py old mode 100644 new mode 100755 diff --git a/scripts/vault/vault-keyring.py b/scripts/vault/vault-keyring.py old mode 100644 new mode 100755 From 9d13acd68e3b9104e48ddddedc811603330d6b7b Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Tue, 27 Apr 2021 14:16:24 +0300 Subject: [PATCH 0244/3093] BOTMETA.yml: team_suse - add a maintainer (#2354) --- .github/BOTMETA.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 06501fc2aa..c14fb0d0e1 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -707,7 +707,8 @@ files: labels: zypper ignore: dirtyharrycallahan robinro $modules/packaging/os/zypper_repository.py: - maintainers: matze + maintainers: $team_suse matze + labels: zypper $modules/remote_management/cobbler/: maintainers: dagwieers $modules/remote_management/hpilo/: @@ -1025,5 +1026,5 @@ macros: team_rhn: FlossWare alikins barnabycourt vritant team_scaleway: QuentinBrosse abarbare jerome-quere kindermoumoute remyleone sieben team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l - team_suse: commel dcermak evrardjp lrupp toabctl AnderEnder alxgu andytom + team_suse: commel dcermak evrardjp lrupp toabctl AnderEnder alxgu andytom sealor team_virt: joshainglis karmab tleguern Thulium-Drake Ajpantuso From 48ef05def340588393075341c3b0b4e44f5fdab8 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 27 Apr 2021 23:18:29 +1200 Subject: [PATCH 0245/3093] spotinst_aws_elastigroup - fixed elements for many lists (#2355) * fixed elements for many lists * added changelog fragment * Removed verbose types in description - still missing formatting and properly documenting dicts --- ...spotinst_aws_elastigroup-list-elements.yml | 2 + .../spotinst/spotinst_aws_elastigroup.py | 165 ++++++++++-------- 2 files changed, 92 insertions(+), 75 deletions(-) create mode 100644 changelogs/fragments/2355-spotinst_aws_elastigroup-list-elements.yml diff --git a/changelogs/fragments/2355-spotinst_aws_elastigroup-list-elements.yml b/changelogs/fragments/2355-spotinst_aws_elastigroup-list-elements.yml new file mode 100644 index 0000000000..876b212690 --- /dev/null +++ b/changelogs/fragments/2355-spotinst_aws_elastigroup-list-elements.yml @@ -0,0 +1,2 @@ +minor_changes: + - spotinst_aws_elastigroup - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/2355). diff --git a/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py b/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py index 1a0ddb9fef..5ed8028e37 100644 --- a/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py +++ b/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py @@ -23,26 +23,26 @@ options: credentials_path: description: - - (Path) Optional parameter that allows to set a non-default credentials path. + - Optional parameter that allows to set a non-default credentials path. default: ~/.spotinst/credentials type: path account_id: description: - - (String) Optional parameter that allows to set an account-id inside the module configuration - By default this is retrieved from the credentials path + - Optional parameter that allows to set an account-id inside the module configuration. + By default this is retrieved from the credentials path. type: str availability_vs_cost: description: - - (String) The strategy orientation. + - The strategy orientation. - "The choices available are: C(availabilityOriented), C(costOriented), C(balanced)." required: true type: str availability_zones: description: - - (List of Objects) a list of hash/dictionaries of Availability Zones that are configured in the elastigroup; + - A list of hash/dictionaries of Availability Zones that are configured in the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed are name (String), @@ -50,10 +50,11 @@ options: placement_group_name (String), required: true type: list + elements: dict block_device_mappings: description: - - (List of Objects) a list of hash/dictionaries of Block Device Mappings for elastigroup instances; + - A list of hash/dictionaries of Block Device Mappings for elastigroup instances; You can specify virtual devices and EBS volumes.; '[{"key":"value", "key":"value"}]'; keys allowed are @@ -68,10 +69,11 @@ options: volume_type(String), volume_size(Integer)) type: list + elements: dict chef: description: - - (Object) The Chef integration configuration.; + - The Chef integration configuration.; Expects the following keys - chef_server (String), organization (String), user (String), @@ -81,92 +83,94 @@ options: draining_timeout: description: - - (Integer) Time for instance to be drained from incoming requests and deregistered from ELB before termination. + - Time for instance to be drained from incoming requests and deregistered from ELB before termination. type: int ebs_optimized: description: - - (Boolean) Enable EBS optimization for supported instances which are not enabled by default.; + - Enable EBS optimization for supported instances which are not enabled by default.; Note - additional charges will be applied. type: bool ebs_volume_pool: description: - - (List of Objects) a list of hash/dictionaries of EBS devices to reattach to the elastigroup when available; + - A list of hash/dictionaries of EBS devices to reattach to the elastigroup when available; '[{"key":"value", "key":"value"}]'; keys allowed are - volume_ids (List of Strings), device_name (String) type: list + elements: dict ecs: description: - - (Object) The ECS integration configuration.; + - The ECS integration configuration.; Expects the following key - cluster_name (String) type: dict elastic_ips: description: - - (List of Strings) List of ElasticIps Allocation Ids (Example C(eipalloc-9d4e16f8)) to associate to the group instances + - List of ElasticIps Allocation Ids (Example C(eipalloc-9d4e16f8)) to associate to the group instances type: list + elements: str fallback_to_od: description: - - (Boolean) In case of no spots available, Elastigroup will launch an On-demand instance instead + - In case of no spots available, Elastigroup will launch an On-demand instance instead type: bool health_check_grace_period: description: - - (Integer) The amount of time, in seconds, after the instance has launched to start and check its health. + - The amount of time, in seconds, after the instance has launched to start and check its health. - If not specified, it defaults to C(300). type: int health_check_unhealthy_duration_before_replacement: description: - - (Integer) Minimal mount of time instance should be unhealthy for us to consider it unhealthy. + - Minimal mount of time instance should be unhealthy for us to consider it unhealthy. type: int health_check_type: description: - - (String) The service to use for the health check. + - The service to use for the health check. - "The choices available are: C(ELB), C(HCS), C(TARGET_GROUP), C(MLB), C(EC2)." type: str iam_role_name: description: - - (String) The instance profile iamRole name + - The instance profile iamRole name - Only use iam_role_arn, or iam_role_name type: str iam_role_arn: description: - - (String) The instance profile iamRole arn + - The instance profile iamRole arn - Only use iam_role_arn, or iam_role_name type: str id: description: - - (String) The group id if it already exists and you want to update, or delete it. + - The group id if it already exists and you want to update, or delete it. This will not work unless the uniqueness_by field is set to id. When this is set, and the uniqueness_by field is set, the group will either be updated or deleted, but not created. type: str image_id: description: - - (String) The image Id used to launch the instance.; + - The image Id used to launch the instance.; In case of conflict between Instance type and image type, an error will be returned required: true type: str key_pair: description: - - (String) Specify a Key Pair to attach to the instances + - Specify a Key Pair to attach to the instances type: str kubernetes: description: - - (Object) The Kubernetes integration configuration. + - The Kubernetes integration configuration. Expects the following keys - api_server (String), token (String) @@ -174,47 +178,48 @@ options: lifetime_period: description: - - (Integer) lifetime period + - Lifetime period type: int load_balancers: description: - - (List of Strings) List of classic ELB names + - List of classic ELB names type: list + elements: str max_size: description: - - (Integer) The upper limit number of instances that you can scale up to + - The upper limit number of instances that you can scale up to required: true type: int mesosphere: description: - - (Object) The Mesosphere integration configuration. + - The Mesosphere integration configuration. Expects the following key - api_server (String) type: dict min_size: description: - - (Integer) The lower limit number of instances that you can scale down to + - The lower limit number of instances that you can scale down to required: true type: int monitoring: description: - - (String) Describes whether instance Enhanced Monitoring is enabled + - Describes whether instance Enhanced Monitoring is enabled type: str name: description: - - (String) Unique name for elastigroup to be created, updated or deleted + - Unique name for elastigroup to be created, updated or deleted required: true type: str network_interfaces: description: - - (List of Objects) a list of hash/dictionaries of network interfaces to add to the elastigroup; + - A list of hash/dictionaries of network interfaces to add to the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed are - description (String), @@ -229,29 +234,30 @@ options: associate_ipv6_address (Boolean), private_ip_addresses (List of Objects, Keys are privateIpAddress (String, required) and primary (Boolean)) type: list + elements: dict on_demand_count: description: - - (Integer) Required if risk is not set + - Required if risk is not set - Number of on demand instances to launch. All other instances will be spot instances.; Either set this parameter or the risk parameter type: int on_demand_instance_type: description: - - (String) On-demand instance type that will be provisioned + - On-demand instance type that will be provisioned type: str opsworks: description: - - (Object) The elastigroup OpsWorks integration configration.; + - The elastigroup OpsWorks integration configration.; Expects the following key - layer_id (String) type: dict persistence: description: - - (Object) The Stateful elastigroup configration.; + - The Stateful elastigroup configration.; Accepts the following keys - should_persist_root_device (Boolean), should_persist_block_devices (Boolean), @@ -260,14 +266,14 @@ options: product: description: - - (String) Operation system type. + - Operation system type. - "Available choices are: C(Linux/UNIX), C(SUSE Linux), C(Windows), C(Linux/UNIX (Amazon VPC)), C(SUSE Linux (Amazon VPC))." required: true type: str rancher: description: - - (Object) The Rancher integration configuration.; + - The Rancher integration configuration.; Expects the following keys - version (String), access_key (String), @@ -277,7 +283,7 @@ options: right_scale: description: - - (Object) The Rightscale integration configuration.; + - The Rightscale integration configuration.; Expects the following keys - account_id (String), refresh_token (String) @@ -285,12 +291,12 @@ options: risk: description: - - (Integer) required if on demand is not set. The percentage of Spot instances to launch (0 - 100). + - Required if on demand is not set. The percentage of Spot instances to launch (0 - 100). type: int roll_config: description: - - (Object) Roll configuration.; + - Roll configuration.; If you would like the group to roll after updating, please use this feature. Accepts the following keys - batch_size_percentage(Integer, Required), @@ -300,7 +306,7 @@ options: scheduled_tasks: description: - - (List of Objects) a list of hash/dictionaries of scheduled tasks to configure in the elastigroup; + - A list of hash/dictionaries of scheduled tasks to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed are - adjustment (Integer), @@ -315,84 +321,90 @@ options: task_type (String, required), is_enabled (Boolean) type: list + elements: dict security_group_ids: description: - - (List of Strings) One or more security group IDs. ; + - One or more security group IDs. ; In case of update it will override the existing Security Group with the new given array required: true type: list + elements: str shutdown_script: description: - - (String) The Base64-encoded shutdown script that executes prior to instance termination. + - The Base64-encoded shutdown script that executes prior to instance termination. Encode before setting. type: str signals: description: - - (List of Objects) a list of hash/dictionaries of signals to configure in the elastigroup; + - A list of hash/dictionaries of signals to configure in the elastigroup; keys allowed are - name (String, required), timeout (Integer) type: list + elements: dict spin_up_time: description: - - (Integer) spin up time, in seconds, for the instance + - Spin up time, in seconds, for the instance type: int spot_instance_types: description: - - (List of Strings) Spot instance type that will be provisioned. + - Spot instance type that will be provisioned. required: true type: list + elements: str state: choices: - present - absent description: - - (String) create or delete the elastigroup + - Create or delete the elastigroup default: present type: str tags: description: - - (List of tagKey:tagValue pairs) a list of tags to configure in the elastigroup. Please specify list of keys and values (key colon value); + - A list of tags to configure in the elastigroup. Please specify list of keys and values (key colon value); type: list + elements: dict target: description: - - (Integer) The number of instances to launch + - The number of instances to launch required: true type: int target_group_arns: description: - - (List of Strings) List of target group arns instances should be registered to + - List of target group arns instances should be registered to type: list + elements: str tenancy: description: - - (String) dedicated vs shared tenancy. + - Dedicated vs shared tenancy. - "The available choices are: C(default), C(dedicated)." type: str terminate_at_end_of_billing_hour: description: - - (Boolean) terminate at the end of billing hour + - Terminate at the end of billing hour type: bool unit: description: - - (String) The capacity unit to launch instances by. + - The capacity unit to launch instances by. - "The available choices are: C(instance), C(weight)." type: str up_scaling_policies: description: - - (List of Objects) a list of hash/dictionaries of scaling policies to configure in the elastigroup; + - A list of hash/dictionaries of scaling policies to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed are - policy_name (String, required), @@ -413,10 +425,11 @@ options: maximum (String), minimum (String) type: list + elements: dict down_scaling_policies: description: - - (List of Objects) a list of hash/dictionaries of scaling policies to configure in the elastigroup; + - A list of hash/dictionaries of scaling policies to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed are - policy_name (String, required), @@ -437,10 +450,11 @@ options: maximum (String), minimum (String) type: list + elements: dict target_tracking_policies: description: - - (List of Objects) a list of hash/dictionaries of target tracking policies to configure in the elastigroup; + - A list of hash/dictionaries of target tracking policies to configure in the elastigroup; '[{"key":"value", "key":"value"}]'; keys allowed are - policy_name (String, required), @@ -452,37 +466,38 @@ options: cooldown (String, required), target (String, required) type: list + elements: dict uniqueness_by: choices: - id - name description: - - (String) If your group names are not unique, you may use this feature to update or delete a specific group. + - If your group names are not unique, you may use this feature to update or delete a specific group. Whenever this property is set, you must set a group_id in order to update or delete a group, otherwise a group will be created. default: name type: str user_data: description: - - (String) Base64-encoded MIME user data. Encode before setting the value. + - Base64-encoded MIME user data. Encode before setting the value. type: str utilize_reserved_instances: description: - - (Boolean) In case of any available Reserved Instances, + - In case of any available Reserved Instances, Elastigroup will utilize your reservations before purchasing Spot instances. type: bool wait_for_instances: description: - - (Boolean) Whether or not the elastigroup creation / update actions should wait for the instances to spin + - Whether or not the elastigroup creation / update actions should wait for the instances to spin type: bool default: false wait_timeout: description: - - (Integer) How long the module should wait for instances before failing the action.; + - How long the module should wait for instances before failing the action.; Only works if wait_for_instances is True. type: int @@ -1428,18 +1443,18 @@ def main(): fields = dict( account_id=dict(type='str'), availability_vs_cost=dict(type='str', required=True), - availability_zones=dict(type='list', required=True), - block_device_mappings=dict(type='list'), + availability_zones=dict(type='list', elements='dict', required=True), + block_device_mappings=dict(type='list', elements='dict'), chef=dict(type='dict'), credentials_path=dict(type='path', default="~/.spotinst/credentials"), do_not_update=dict(default=[], type='list'), - down_scaling_policies=dict(type='list'), + down_scaling_policies=dict(type='list', elements='dict'), draining_timeout=dict(type='int'), ebs_optimized=dict(type='bool'), - ebs_volume_pool=dict(type='list'), + ebs_volume_pool=dict(type='list', elements='dict'), ecs=dict(type='dict'), elastic_beanstalk=dict(type='dict'), - elastic_ips=dict(type='list'), + elastic_ips=dict(type='list', elements='str'), fallback_to_od=dict(type='bool'), id=dict(type='str'), health_check_grace_period=dict(type='int'), @@ -1451,7 +1466,7 @@ def main(): key_pair=dict(type='str', no_log=False), kubernetes=dict(type='dict'), lifetime_period=dict(type='int'), - load_balancers=dict(type='list'), + load_balancers=dict(type='list', elements='str'), max_size=dict(type='int', required=True), mesosphere=dict(type='dict'), min_size=dict(type='int', required=True), @@ -1459,7 +1474,7 @@ def main(): multai_load_balancers=dict(type='list'), multai_token=dict(type='str', no_log=True), name=dict(type='str', required=True), - network_interfaces=dict(type='list'), + network_interfaces=dict(type='list', elements='dict'), on_demand_count=dict(type='int'), on_demand_instance_type=dict(type='str'), opsworks=dict(type='dict'), @@ -1469,16 +1484,16 @@ def main(): right_scale=dict(type='dict'), risk=dict(type='int'), roll_config=dict(type='dict'), - scheduled_tasks=dict(type='list'), - security_group_ids=dict(type='list', required=True), + scheduled_tasks=dict(type='list', elements='dict'), + security_group_ids=dict(type='list', elements='str', required=True), shutdown_script=dict(type='str'), - signals=dict(type='list'), + signals=dict(type='list', elements='dict'), spin_up_time=dict(type='int'), - spot_instance_types=dict(type='list', required=True), + spot_instance_types=dict(type='list', elements='str', required=True), state=dict(default='present', choices=['present', 'absent']), - tags=dict(type='list'), + tags=dict(type='list', elements='dict'), target=dict(type='int', required=True), - target_group_arns=dict(type='list'), + target_group_arns=dict(type='list', elements='str'), tenancy=dict(type='str'), terminate_at_end_of_billing_hour=dict(type='bool'), token=dict(type='str', no_log=True), @@ -1486,8 +1501,8 @@ def main(): user_data=dict(type='str'), utilize_reserved_instances=dict(type='bool'), uniqueness_by=dict(default='name', choices=['name', 'id']), - up_scaling_policies=dict(type='list'), - target_tracking_policies=dict(type='list'), + up_scaling_policies=dict(type='list', elements='dict'), + target_tracking_policies=dict(type='list', elements='dict'), wait_for_instances=dict(type='bool', default=False), wait_timeout=dict(type='int') ) From 77d4bc29421a78b479c6e826ca7d68f00fa82f57 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 27 Apr 2021 22:13:40 +0200 Subject: [PATCH 0246/3093] No longer required for devel's ansible-test. (#2365) ci_complete --- tests/sanity/ignore-2.12.txt | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt index 80975cf389..68684f000d 100644 --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -69,7 +69,5 @@ plugins/modules/system/ssh_config.py use-argspec-type-path # Required since modu plugins/modules/system/xfconf.py validate-modules:parameter-state-invalid-choice plugins/modules/system/xfconf.py validate-modules:return-syntax-error plugins/modules/web_infrastructure/jenkins_plugin.py use-argspec-type-path -tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.6 # django generated code -tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.7 # django generated code tests/utils/shippable/check_matrix.py replace-urlopen tests/utils/shippable/timing.py shebang From b3f436aa6325c34e1824ccc39e8446969b70ab95 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Fri, 30 Apr 2021 04:28:43 +0200 Subject: [PATCH 0247/3093] Use Ansible's codecov uploader. (#2377) --- .azure-pipelines/scripts/publish-codecov.sh | 2 +- tests/utils/shippable/shippable.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.azure-pipelines/scripts/publish-codecov.sh b/.azure-pipelines/scripts/publish-codecov.sh index 7aeabda0c0..6d184f0b8d 100755 --- a/.azure-pipelines/scripts/publish-codecov.sh +++ b/.azure-pipelines/scripts/publish-codecov.sh @@ -7,7 +7,7 @@ set -o pipefail -eu output_path="$1" -curl --silent --show-error https://codecov.io/bash > codecov.sh +curl --silent --show-error https://ansible-ci-files.s3.us-east-1.amazonaws.com/codecov/codecov.sh > codecov.sh for file in "${output_path}"/reports/coverage*.xml; do name="${file}" diff --git a/tests/utils/shippable/shippable.sh b/tests/utils/shippable/shippable.sh index f239e86975..f70aa11380 100755 --- a/tests/utils/shippable/shippable.sh +++ b/tests/utils/shippable/shippable.sh @@ -181,7 +181,7 @@ function cleanup flags="${flags//=/,}" flags="${flags//[^a-zA-Z0-9_,]/_}" - bash <(curl -s https://codecov.io/bash) \ + bash <(curl -s https://ansible-ci-files.s3.us-east-1.amazonaws.com/codecov/codecov.sh) \ -f "${file}" \ -F "${flags}" \ -n "${test}" \ From 5fbe946c3a68557e15ac4f0acf4508ced250abde Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Fri, 30 Apr 2021 22:13:46 +0200 Subject: [PATCH 0248/3093] Spread nightly runs out. (#2387) --- .azure-pipelines/azure-pipelines.yml | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index a479a33ba8..d4153d9796 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -14,15 +14,20 @@ pr: schedules: - cron: 0 8 * * * - displayName: Nightly + displayName: Nightly (main) always: true branches: include: - main + - cron: 0 10 * * * + displayName: Nightly (active stable branches) + always: true + branches: + include: - stable-2 - stable-3 - - cron: 0 8 * * 0 - displayName: Weekly (old branches) + - cron: 0 11 * * 0 + displayName: Weekly (old stable branches) always: true branches: include: From ae21af882075948f9e427919776e5c155b263ea2 Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Sat, 1 May 2021 02:19:33 +0430 Subject: [PATCH 0249/3093] Add Fedora 34 to CI (#2384) * Add fedora 34 and fix typo * Remove Fedora 32 from devel testing * Use one newer version of Fedora for fixed ansible versions * Revert "Use one newer version of Fedora for fixed ansible versions" This reverts commit cbd006bd385865905c18b87655bd98b0610d4abc. * Try to skip task. * Revert "Try to skip task." This reverts commit ff0c899a8650e78967a1933b93fd8015695a6a61. * Temporary disable Fedora 34 on setup_postgresql_db Co-authored-by: Felix Fontein --- .azure-pipelines/azure-pipelines.yml | 6 +++--- .../integration/targets/setup_postgresql_db/tasks/main.yml | 4 ++++ 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index d4153d9796..8d1b81865e 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -268,10 +268,10 @@ stages: test: centos7 - name: CentOS 8 test: centos8 - - name: Fedora 32 - test: fedora32 - name: Fedora 33 test: fedora33 + - name: Fedora 34 + test: fedora34 - name: openSUSE 15 py2 test: opensuse15py2 - name: openSUSE 15 py3 @@ -294,7 +294,7 @@ stages: targets: - name: CentOS 8 test: centos8 - - name: Fedora 32 + - name: Fedora 33 test: fedora33 - name: openSUSE 15 py3 test: opensuse15 diff --git a/tests/integration/targets/setup_postgresql_db/tasks/main.yml b/tests/integration/targets/setup_postgresql_db/tasks/main.yml index 2322ee2cbf..f535ecdcf9 100644 --- a/tests/integration/targets/setup_postgresql_db/tasks/main.yml +++ b/tests/integration/targets/setup_postgresql_db/tasks/main.yml @@ -11,6 +11,10 @@ - meta: end_play when: ansible_facts.distribution == 'CentOS' and ansible_facts.distribution_major_version == '8' +# Temporary disable Fedora 34 +- meta: end_play + when: ansible_facts.distribution == 'Fedora' and ansible_facts.distribution_major_version == '34' + - name: python 2 set_fact: python_suffix: '' From 276880aac1a4df7cd9cfbea983c3e743de0a1bbf Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 1 May 2021 09:51:35 +0200 Subject: [PATCH 0250/3093] Remove resmo as composer maintainer. (#2392) --- .github/BOTMETA.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index c14fb0d0e1..6fcfdff4c1 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -560,7 +560,8 @@ files: $modules/packaging/language/bundler.py: maintainers: thoiberg $modules/packaging/language/composer.py: - maintainers: dmtrs resmo + maintainers: dmtrs + ignore: resmo $modules/packaging/language/cpanm.py: maintainers: fcuny russoz $modules/packaging/language/easy_install.py: From 26c3bd25f676b67a89a625e13991ab41394f5098 Mon Sep 17 00:00:00 2001 From: Xabier Napal Date: Sat, 1 May 2021 14:19:05 +0200 Subject: [PATCH 0251/3093] influxdb_retention_policy: fix duration parsing to support INF values (#2396) * influxdb_retention_policy: fix duration parsing to support INF values * add changelog --- .../2284-influxdb_retention_policy-fix_duration_parsing.yml | 3 +++ .../modules/database/influxdb/influxdb_retention_policy.py | 6 +++--- 2 files changed, 6 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/2284-influxdb_retention_policy-fix_duration_parsing.yml diff --git a/changelogs/fragments/2284-influxdb_retention_policy-fix_duration_parsing.yml b/changelogs/fragments/2284-influxdb_retention_policy-fix_duration_parsing.yml new file mode 100644 index 0000000000..04c82480c1 --- /dev/null +++ b/changelogs/fragments/2284-influxdb_retention_policy-fix_duration_parsing.yml @@ -0,0 +1,3 @@ +bugfixes: + - influxdb_retention_policy - fix bug where ``INF`` duration values failed parsing + (https://github.com/ansible-collections/community.general/pull/2385). diff --git a/plugins/modules/database/influxdb/influxdb_retention_policy.py b/plugins/modules/database/influxdb/influxdb_retention_policy.py index 2c2f9674b7..883adaffa6 100644 --- a/plugins/modules/database/influxdb/influxdb_retention_policy.py +++ b/plugins/modules/database/influxdb/influxdb_retention_policy.py @@ -129,7 +129,7 @@ from ansible_collections.community.general.plugins.module_utils.influxdb import from ansible.module_utils._text import to_native -VALID_DURATION_REGEX = re.compile(r'^(\d+(ns|u|µ|ms|s|m|h|d|w))+$') +VALID_DURATION_REGEX = re.compile(r'^(INF|(\d+(ns|u|µ|ms|s|m|h|d|w)))+$') DURATION_REGEX = re.compile(r'(\d+)(ns|u|µ|ms|s|m|h|d|w)') EXTENDED_DURATION_REGEX = re.compile(r'(?:(\d+)(ns|u|µ|ms|m|h|d|w)|(\d+(?:\.\d+)?)(s))') @@ -217,7 +217,7 @@ def create_retention_policy(module, client): influxdb_shard_group_duration_format = parse_duration_literal(shard_group_duration) if influxdb_shard_group_duration_format < 3600000000000: - module.fail_json(msg="shard_group_duration value must be at least 1h") + module.fail_json(msg="shard_group_duration value must be finite and at least 1h") if not module.check_mode: try: @@ -256,7 +256,7 @@ def alter_retention_policy(module, client, retention_policy): influxdb_shard_group_duration_format = parse_duration_literal(shard_group_duration) if influxdb_shard_group_duration_format < 3600000000000: - module.fail_json(msg="shard_group_duration value must be at least 1h") + module.fail_json(msg="shard_group_duration value must be finite and at least 1h") if (retention_policy['duration'] != influxdb_duration_format or retention_policy['shardGroupDuration'] != influxdb_shard_group_duration_format or From eb455c69a2c7f7ec28f6162e0c5a34f0bc7932e3 Mon Sep 17 00:00:00 2001 From: George Angelopoulos Date: Sat, 1 May 2021 19:23:14 +0300 Subject: [PATCH 0252/3093] composer: --no-interaction when discovering available options (#2348) The composer module always uses the no-interaction option if it discovers it _after_ calling "composer help ..." but not on the help call itself. The lack of this option caused composer to not exit when called through the ansible module. The same example command when ran interactively does not prompt for user interaction and exits immediately. It is therefore currently unknown why the same command hangs when called through the ansible composer module or even directly with the command module. Example command which hangs: php /usr/local/bin/composer help install --format=json --- ...-composer-no-interaction-option-discovery-to-avoid-hang.yaml | 2 ++ plugins/modules/packaging/language/composer.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2348-composer-no-interaction-option-discovery-to-avoid-hang.yaml diff --git a/changelogs/fragments/2348-composer-no-interaction-option-discovery-to-avoid-hang.yaml b/changelogs/fragments/2348-composer-no-interaction-option-discovery-to-avoid-hang.yaml new file mode 100644 index 0000000000..0728aeb28b --- /dev/null +++ b/changelogs/fragments/2348-composer-no-interaction-option-discovery-to-avoid-hang.yaml @@ -0,0 +1,2 @@ +bugfixes: + - composer - use ``no-interaction`` option when discovering available options to avoid an issue where composer hangs (https://github.com/ansible-collections/community.general/pull/2348). diff --git a/plugins/modules/packaging/language/composer.py b/plugins/modules/packaging/language/composer.py index c792098b04..64157cb685 100644 --- a/plugins/modules/packaging/language/composer.py +++ b/plugins/modules/packaging/language/composer.py @@ -169,7 +169,7 @@ def has_changed(string): def get_available_options(module, command='install'): # get all available options from a composer command using composer help to json - rc, out, err = composer_command(module, "help %s --format=json" % command) + rc, out, err = composer_command(module, "help %s" % command, arguments="--no-interaction --format=json") if rc != 0: output = parse_out(err) module.fail_json(msg=output) From 4e90ee752ed8d9586bb0df69511b0d040a0946ff Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 1 May 2021 22:21:17 +0200 Subject: [PATCH 0253/3093] Add ansible-test config file. (#2404) --- tests/config.yml | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 tests/config.yml diff --git a/tests/config.yml b/tests/config.yml new file mode 100644 index 0000000000..ba0238e305 --- /dev/null +++ b/tests/config.yml @@ -0,0 +1,5 @@ +--- +# See template for more information: +# https://github.com/ansible/ansible/blob/devel/test/lib/ansible_test/config/config.yml +modules: + python_requires: default From c0221b75afffda7b55852d6510e04bb7f13f292d Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Sun, 2 May 2021 12:28:27 +0300 Subject: [PATCH 0254/3093] BOTMETA.yml: terraform - add a new maintainer (#2290) --- .github/BOTMETA.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 6fcfdff4c1..fd23d0c9e4 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -225,7 +225,7 @@ files: $modules/cloud/misc/: ignore: ryansb $modules/cloud/misc/terraform.py: - maintainers: m-yosefpor + maintainers: m-yosefpor rainerleber $modules/cloud/misc/xenserver_facts.py: maintainers: caphrim007 cheese labels: xenserver_facts From cd957fae4cbee051c16e899c2d06a54e227c14fc Mon Sep 17 00:00:00 2001 From: Daniel Werner Date: Mon, 3 May 2021 07:25:08 +0200 Subject: [PATCH 0255/3093] Fix #2373 - TypeError: a bytes-like object is required, not 'str' (#2375) * Fix #2373 * Changelog fragment for #2373 * Update changelogs/fragments/2373-svr4pkg-fix-typeerror.yml Co-authored-by: Amin Vakil * Update changelogs/fragments/2373-svr4pkg-fix-typeerror.yml Co-authored-by: Felix Fontein Co-authored-by: Amin Vakil Co-authored-by: Felix Fontein --- changelogs/fragments/2373-svr4pkg-fix-typeerror.yml | 3 +++ plugins/modules/packaging/os/svr4pkg.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2373-svr4pkg-fix-typeerror.yml diff --git a/changelogs/fragments/2373-svr4pkg-fix-typeerror.yml b/changelogs/fragments/2373-svr4pkg-fix-typeerror.yml new file mode 100644 index 0000000000..d0b3580889 --- /dev/null +++ b/changelogs/fragments/2373-svr4pkg-fix-typeerror.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - svr4pkg - convert string to a bytes-like object to avoid ``TypeError`` with Python 3 (https://github.com/ansible-collections/community.general/issues/2373). diff --git a/plugins/modules/packaging/os/svr4pkg.py b/plugins/modules/packaging/os/svr4pkg.py index ea3cd7d468..aa7a5c2e52 100644 --- a/plugins/modules/packaging/os/svr4pkg.py +++ b/plugins/modules/packaging/os/svr4pkg.py @@ -121,7 +121,7 @@ def package_installed(module, name, category): def create_admin_file(): (desc, filename) = tempfile.mkstemp(prefix='ansible_svr4pkg', text=True) - fullauto = ''' + fullauto = b''' mail= instance=unique partial=nocheck From 26aba8e76687fb36688540194bec08a82c8b3070 Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Mon, 3 May 2021 09:56:47 +0430 Subject: [PATCH 0256/3093] puppet - replace stdout with console in logdest option (#2407) * Change stdout to console * readd stdout, resulting in console * add changelog * readd stdout to docs and add a warning when it is used * version of what??? Co-authored-by: Felix Fontein * postpone deprecation in another PR * remove console option, so it can be backported * change changelog respectively * Fix changelog formatting Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../fragments/2407-puppet-change_stdout_to_console.yaml | 3 +++ plugins/modules/system/puppet.py | 7 ++++--- 2 files changed, 7 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/2407-puppet-change_stdout_to_console.yaml diff --git a/changelogs/fragments/2407-puppet-change_stdout_to_console.yaml b/changelogs/fragments/2407-puppet-change_stdout_to_console.yaml new file mode 100644 index 0000000000..697b8e78d7 --- /dev/null +++ b/changelogs/fragments/2407-puppet-change_stdout_to_console.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - puppet - replace ``console` with ``stdout`` in ``logdest`` option when ``all`` has been chosen (https://github.com/ansible-collections/community.general/issues/1190). diff --git a/plugins/modules/system/puppet.py b/plugins/modules/system/puppet.py index 309da290d0..b83ef89aa5 100644 --- a/plugins/modules/system/puppet.py +++ b/plugins/modules/system/puppet.py @@ -54,7 +54,8 @@ options: logdest: description: - Where the puppet logs should go, if puppet apply is being used. - - C(all) will go to both C(stdout) and C(syslog). + - C(all) will go to both C(console) and C(syslog). + - C(stdout) will be deprecated and replaced by C(console). type: str choices: [ all, stdout, syslog ] default: stdout @@ -127,7 +128,7 @@ EXAMPLES = r''' community.general.puppet: noop: yes -- name: Run a manifest with debug, log to both syslog and stdout, specify module path +- name: Run a manifest with debug, log to both syslog and console, specify module path community.general.puppet: modulepath: /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules logdest: all @@ -269,7 +270,7 @@ def main(): if p['logdest'] == 'syslog': cmd += "--logdest syslog " if p['logdest'] == 'all': - cmd += " --logdest syslog --logdest stdout" + cmd += " --logdest syslog --logdest console" if p['modulepath']: cmd += "--modulepath='%s'" % p['modulepath'] if p['environment']: From b5f8ae43204748c7f9e1719296ef81931c56c296 Mon Sep 17 00:00:00 2001 From: spike77453 Date: Mon, 3 May 2021 07:27:56 +0200 Subject: [PATCH 0257/3093] nmcli: Add 'slave-type bridge' to nmcli command if type is bridge-slave (#2409) --- ...ave-type_bridge_to_nmcli_command_if_type_is_bridge-slave.yml | 2 ++ plugins/modules/net_tools/nmcli.py | 1 + tests/unit/plugins/modules/net_tools/test_nmcli.py | 1 + 3 files changed, 4 insertions(+) create mode 100644 changelogs/fragments/2409-nmcli_add_slave-type_bridge_to_nmcli_command_if_type_is_bridge-slave.yml diff --git a/changelogs/fragments/2409-nmcli_add_slave-type_bridge_to_nmcli_command_if_type_is_bridge-slave.yml b/changelogs/fragments/2409-nmcli_add_slave-type_bridge_to_nmcli_command_if_type_is_bridge-slave.yml new file mode 100644 index 0000000000..8d0b4c1617 --- /dev/null +++ b/changelogs/fragments/2409-nmcli_add_slave-type_bridge_to_nmcli_command_if_type_is_bridge-slave.yml @@ -0,0 +1,2 @@ +bugfixes: + - nmcli - if type is ``bridge-slave`` add ``slave-type bridge`` to ``nmcli`` command (https://github.com/ansible-collections/community.general/issues/2408). diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index 4ae5a1dac9..02fbbd038b 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -780,6 +780,7 @@ class Nmcli(object): }) elif self.type == 'bridge-slave': options.update({ + 'connection.slave-type': 'bridge', 'bridge-port.path-cost': self.path_cost, 'bridge-port.hairpin-mode': self.hairpin, 'bridge-port.priority': self.slavepriority, diff --git a/tests/unit/plugins/modules/net_tools/test_nmcli.py b/tests/unit/plugins/modules/net_tools/test_nmcli.py index 8d830bcf19..a05c8ccbf8 100644 --- a/tests/unit/plugins/modules/net_tools/test_nmcli.py +++ b/tests/unit/plugins/modules/net_tools/test_nmcli.py @@ -223,6 +223,7 @@ TESTCASE_BRIDGE_SLAVE_SHOW_OUTPUT = """\ connection.id: non_existent_nw_device connection.interface-name: br0_non_existant connection.autoconnect: yes +connection.slave-type: bridge ipv4.never-default: no bridge-port.path-cost: 100 bridge-port.hairpin-mode: yes From 7359b1fbe57f75619d09d759ceba7cf124c5f0b5 Mon Sep 17 00:00:00 2001 From: spike77453 Date: Mon, 3 May 2021 07:28:53 +0200 Subject: [PATCH 0258/3093] nmcli: Compare MAC addresses case insensitively (#2416) * nmcli: Compare MAC addresses case insensitively * Update changelogs/fragments/2416-nmcli_compare_mac_addresses_case_insensitively.yml Co-authored-by: Felix Fontein * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Felix Fontein * Add mac to TESTCASE_BRIDGE so test_bridge_connection_unchanged covers case sensitive mac address comparison * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Felix Fontein * Convert current_value to uppercase as well in case nmcli changes behaviour Co-authored-by: Felix Fontein --- .../2416-nmcli_compare_mac_addresses_case_insensitively.yml | 2 ++ plugins/modules/net_tools/nmcli.py | 6 +++++- tests/unit/plugins/modules/net_tools/test_nmcli.py | 2 ++ 3 files changed, 9 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2416-nmcli_compare_mac_addresses_case_insensitively.yml diff --git a/changelogs/fragments/2416-nmcli_compare_mac_addresses_case_insensitively.yml b/changelogs/fragments/2416-nmcli_compare_mac_addresses_case_insensitively.yml new file mode 100644 index 0000000000..6694638964 --- /dev/null +++ b/changelogs/fragments/2416-nmcli_compare_mac_addresses_case_insensitively.yml @@ -0,0 +1,2 @@ +bugfixes: + - nmcli - compare MAC addresses case insensitively to fix idempotency issue (https://github.com/ansible-collections/community.general/issues/2409). diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index 02fbbd038b..e2ed4ad572 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -1042,7 +1042,6 @@ class Nmcli(object): 'con-name': 'connection.id', 'autoconnect': 'connection.autoconnect', 'ifname': 'connection.interface-name', - 'mac': self.mac_setting, 'master': 'connection.master', 'slave-type': 'connection.slave-type', 'zone': 'connection.zone', @@ -1066,6 +1065,11 @@ class Nmcli(object): current_value = [re.sub(r'^{\s*ip\s*=\s*([^, ]+),\s*nh\s*=\s*([^} ]+),\s*mt\s*=\s*([^} ]+)\s*}', r'\1 \2 \3', route) for route in current_value] current_value = [re.sub(r'^{\s*ip\s*=\s*([^, ]+),\s*nh\s*=\s*([^} ]+)\s*}', r'\1 \2', route) for route in current_value] + if key == self.mac_setting: + # MAC addresses are case insensitive, nmcli always reports them in uppercase + value = value.upper() + # ensure current_value is also converted to uppercase in case nmcli changes behaviour + current_value = current_value.upper() elif key in param_alias: real_key = param_alias[key] if real_key in conn_info: diff --git a/tests/unit/plugins/modules/net_tools/test_nmcli.py b/tests/unit/plugins/modules/net_tools/test_nmcli.py index a05c8ccbf8..dceb5e5f3f 100644 --- a/tests/unit/plugins/modules/net_tools/test_nmcli.py +++ b/tests/unit/plugins/modules/net_tools/test_nmcli.py @@ -184,6 +184,7 @@ TESTCASE_BRIDGE = [ 'ifname': 'br0_non_existant', 'ip4': '10.10.10.10/24', 'gw4': '10.10.10.1', + 'mac': '52:54:00:ab:cd:ef', 'maxage': 100, 'stp': True, 'state': 'present', @@ -200,6 +201,7 @@ ipv4.addresses: 10.10.10.10/24 ipv4.gateway: 10.10.10.1 ipv4.never-default: no ipv6.method: auto +bridge.mac-address: 52:54:00:AB:CD:EF bridge.stp: yes bridge.max-age: 100 bridge.ageing-time: 300 From 4b0d2dcfe04591d67c5cdf82599ac33156c221ce Mon Sep 17 00:00:00 2001 From: Daniel-Sanchez-Fabregas <33929811+Daniel-Sanchez-Fabregas@users.noreply.github.com> Date: Mon, 3 May 2021 07:42:58 +0200 Subject: [PATCH 0259/3093] =?UTF-8?q?=F0=9F=93=9D=20Document=20nested=20no?= =?UTF-8?q?de=20addition=20with=20"=5F"=20in=20xml=20module=20(#2371)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * 📝 Document nested node addition with "_" in xml module Nested node addition using "_" to indicate sub nodes, and attributes are only documented in tests and issues, where is hard to find. * 🚨 Fix trailing space * Apply suggestions from code review Add missing collection prefix for modules. Co-authored-by: Felix Fontein * Add missing comments * Update xml.py * Fix linter warnings Co-authored-by: Felix Fontein --- plugins/modules/files/xml.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/plugins/modules/files/xml.py b/plugins/modules/files/xml.py index df3562df8c..f93c8e4dc4 100644 --- a/plugins/modules/files/xml.py +++ b/plugins/modules/files/xml.py @@ -285,6 +285,22 @@ EXAMPLES = r''' z: http://z.test attribute: z:my_namespaced_attribute value: 'false' + +- name: Adding building nodes with floor subnodes from a YAML variable + community.general.xml: + path: /foo/bar.xml + xpath: /business + add_children: + - building: + # Attributes + name: Scumm bar + location: Monkey island + # Subnodes + _: + - floor: Pirate hall + - floor: Grog storage + - construction_date: "1990" # Only strings are valid + - building: Grog factory ''' RETURN = r''' From 6a72c3b3385a739d049d23a24d5a5f186962d606 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 3 May 2021 13:22:11 +0200 Subject: [PATCH 0260/3093] Make plugins pass validation. (#2414) --- plugins/become/sudosu.py | 2 +- plugins/callback/loganalytics.py | 2 +- plugins/inventory/lxd.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/become/sudosu.py b/plugins/become/sudosu.py index e9668e6522..410b881b96 100644 --- a/plugins/become/sudosu.py +++ b/plugins/become/sudosu.py @@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = """ - become: sudosu + name: sudosu short_description: Run tasks using sudo su - description: - This become plugins allows your remote/login user to execute commands as another user via the C(sudo) and C(su) utilities combined. diff --git a/plugins/callback/loganalytics.py b/plugins/callback/loganalytics.py index 507d6fccd9..ef1ea02f87 100644 --- a/plugins/callback/loganalytics.py +++ b/plugins/callback/loganalytics.py @@ -4,7 +4,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = ''' - callback: loganalytics + name: loganalytics type: aggregate short_description: Posts task results to Azure Log Analytics author: "Cyrus Li (@zhcli) " diff --git a/plugins/inventory/lxd.py b/plugins/inventory/lxd.py index c48818d595..d1e47b0505 100644 --- a/plugins/inventory/lxd.py +++ b/plugins/inventory/lxd.py @@ -6,7 +6,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = r''' - name: community.general.lxd + name: lxd short_description: Returns Ansible inventory from lxd host description: - Get inventory from the lxd. @@ -68,7 +68,7 @@ DOCUMENTATION = r''' description: - Create groups by the following keywords C(location), C(pattern), C(network_range), C(os), C(release), C(profile), C(vlanid). - See example for syntax. - type: json + type: dict ''' EXAMPLES = ''' From 5064aa8ec6d967e9c6867af9fdeb4496377a2e4d Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 3 May 2021 23:27:16 +1200 Subject: [PATCH 0261/3093] linode_v4 - fixed error message (#2430) * fixed error message * added changelog fragment --- changelogs/fragments/2430-linodev4-error-message.yml | 2 ++ plugins/modules/cloud/linode/linode_v4.py | 5 ++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/2430-linodev4-error-message.yml diff --git a/changelogs/fragments/2430-linodev4-error-message.yml b/changelogs/fragments/2430-linodev4-error-message.yml new file mode 100644 index 0000000000..3dbfda1b9c --- /dev/null +++ b/changelogs/fragments/2430-linodev4-error-message.yml @@ -0,0 +1,2 @@ +bugfixes: + - linode_v4 - changed the error message to point to the correct bugtracker URL (https://github.com/ansible-collections/community.general/pull/2430). diff --git a/plugins/modules/cloud/linode/linode_v4.py b/plugins/modules/cloud/linode/linode_v4.py index 0f1133bac0..fcf3725bfc 100644 --- a/plugins/modules/cloud/linode/linode_v4.py +++ b/plugins/modules/cloud/linode/linode_v4.py @@ -208,9 +208,8 @@ def create_linode(module, client, **kwargs): else: return response._raw_json except TypeError: - module.fail_json(msg='Unable to parse Linode instance creation' - ' response. Please raise a bug against this' - ' module on https://github.com/ansible/ansible/issues' + module.fail_json(msg='Unable to parse Linode instance creation response. Please raise a bug against this' + ' module on https://github.com/ansible-collections/community.general/issues' ) From 7007c68ab786e6d51ecab5d97d2e3b891bd476c9 Mon Sep 17 00:00:00 2001 From: David Lundgren Date: Mon, 3 May 2021 14:05:07 -0500 Subject: [PATCH 0262/3093] Clean up test entries from sysrc tests (#2330) * Clean up test entries from sysrc tests * sysrc: enable tests * sysrc: cache the files to be changed and restore them * Update the ezjail archive host and remove obsolete file * sysrc: set ezjail to use archives for 12.0 or less * sysrc: Detect the version to use ftp vs ftp-archive using http * sysrc: Skip ezjail test on FreeBSD 12.0 --- tests/integration/targets/sysrc/aliases | 1 - .../integration/targets/sysrc/tasks/main.yml | 21 ++++++++++++++++++- .../targets/sysrc/tasks/setup-testjail.yml | 9 +++++++- 3 files changed, 28 insertions(+), 3 deletions(-) diff --git a/tests/integration/targets/sysrc/aliases b/tests/integration/targets/sysrc/aliases index c7d183fb65..360849e61b 100644 --- a/tests/integration/targets/sysrc/aliases +++ b/tests/integration/targets/sysrc/aliases @@ -3,4 +3,3 @@ needs/root skip/docker skip/osx skip/rhel -disabled # FIXME diff --git a/tests/integration/targets/sysrc/tasks/main.yml b/tests/integration/targets/sysrc/tasks/main.yml index c8b7de4160..b8292f785b 100644 --- a/tests/integration/targets/sysrc/tasks/main.yml +++ b/tests/integration/targets/sysrc/tasks/main.yml @@ -6,7 +6,11 @@ block: - name: Cache original contents of /etc/rc.conf shell: "cat /etc/rc.conf" - register: sysrc_original_content + register: cached_etc_rcconf_content + + - name: Cache original contents of /boot/loader.conf + shell: "cat /boot/loader.conf" + register: cached_boot_loaderconf_content ## ## sysrc - example - set mysqlpidfile @@ -130,6 +134,11 @@ ## sysrc - example - Enable nginx in testjail ## - name: Test within jail + # + # NOTE: FreeBSD 12.0 test runner receives a "connection reset by peer" after ~20% downloaded so we are + # only running this on 12.1 or higher + # + when: ansible_distribution_version is version('12.01', '>=') block: - name: Setup testjail include: setup-testjail.yml @@ -316,3 +325,13 @@ - not sysrc_value_absent_idempotent.changed - "'sysrc_delim=\"t1,t2\"' in sysrc_delim_content.stdout_lines" - "'sysrc_delim_delete' not in sysrc_delim_content.stdout_lines" + always: + - name: Restore /etc/rc.conf + copy: + content: "{{ cached_etc_rcconf_content }}" + dest: /etc/rc.conf + + - name: Restore /boot/loader.conf + copy: + content: "{{ cached_boot_loaderconf_content }}" + dest: /boot/loader.conf \ No newline at end of file diff --git a/tests/integration/targets/sysrc/tasks/setup-testjail.yml b/tests/integration/targets/sysrc/tasks/setup-testjail.yml index 9bd15320ae..e75957d19f 100644 --- a/tests/integration/targets/sysrc/tasks/setup-testjail.yml +++ b/tests/integration/targets/sysrc/tasks/setup-testjail.yml @@ -17,12 +17,19 @@ pkgng: name: ezjail +- name: Configure ezjail to use http + when: ansible_distribution_version is version('11.01', '>') + lineinfile: + dest: /usr/local/etc/ezjail.conf + regexp: ^ezjail_ftphost + line: ezjail_ftphost=http://ftp.freebsd.org + - name: Configure ezjail to use archive for old freebsd releases when: ansible_distribution_version is version('11.01', '<=') lineinfile: dest: /usr/local/etc/ezjail.conf regexp: ^ezjail_ftphost - line: ezjail_ftphost=ftp-archive.freebsd.org + line: ezjail_ftphost=http://ftp-archive.freebsd.org - name: Start ezjail ignore_errors: yes From 06bdabcad93846f37f70fd868fdce421ef366f8c Mon Sep 17 00:00:00 2001 From: zigaSRC <65527456+zigaSRC@users.noreply.github.com> Date: Mon, 3 May 2021 21:25:52 +0200 Subject: [PATCH 0263/3093] lvol - bug fix - Convert units to lowercase when using LVS or VGS command (#2369) * Added lower call for units when checking lvs/vgs size * Changelog * Size roudning correction * Rounding * Changelog * Remove whitespace --- changelogs/fragments/2369-lvol_size_bug_fixes.yml | 3 +++ plugins/modules/system/lvol.py | 13 +++++-------- 2 files changed, 8 insertions(+), 8 deletions(-) create mode 100644 changelogs/fragments/2369-lvol_size_bug_fixes.yml diff --git a/changelogs/fragments/2369-lvol_size_bug_fixes.yml b/changelogs/fragments/2369-lvol_size_bug_fixes.yml new file mode 100644 index 0000000000..fcd2f17b11 --- /dev/null +++ b/changelogs/fragments/2369-lvol_size_bug_fixes.yml @@ -0,0 +1,3 @@ +bugfixes: + - lvol - fixed size unit capitalization to match units used between different tools for comparison (https://github.com/ansible-collections/community.general/issues/2360). + - lvol - fixed rounding errors (https://github.com/ansible-collections/community.general/issues/2370). \ No newline at end of file diff --git a/plugins/modules/system/lvol.py b/plugins/modules/system/lvol.py index 8dc3fac7f5..fafa7db38a 100644 --- a/plugins/modules/system/lvol.py +++ b/plugins/modules/system/lvol.py @@ -389,7 +389,7 @@ def main(): # Get information on volume group requested vgs_cmd = module.get_bin_path("vgs", required=True) rc, current_vgs, err = module.run_command( - "%s --noheadings --nosuffix -o vg_name,size,free,vg_extent_size --units %s --separator ';' %s" % (vgs_cmd, unit, vg)) + "%s --noheadings --nosuffix -o vg_name,size,free,vg_extent_size --units %s --separator ';' %s" % (vgs_cmd, unit.lower(), vg)) if rc != 0: if state == 'absent': @@ -403,7 +403,7 @@ def main(): # Get information on logical volume requested lvs_cmd = module.get_bin_path("lvs", required=True) rc, current_lvs, err = module.run_command( - "%s -a --noheadings --nosuffix -o lv_name,size,lv_attr --units %s --separator ';' %s" % (lvs_cmd, unit, vg)) + "%s -a --noheadings --nosuffix -o lv_name,size,lv_attr --units %s --separator ';' %s" % (lvs_cmd, unit.lower(), vg)) if rc != 0: if state == 'absent': @@ -505,16 +505,13 @@ def main(): else: # size_whole == 'FREE': size_requested = size_percent * this_vg['free'] / 100 - # from LVEXTEND(8) - The resulting value is rounded upward. - # from LVREDUCE(8) - The resulting value for the substraction is rounded downward, for the absolute size it is rounded upward. if size_operator == '+': size_requested += this_lv['size'] - size_requested += this_vg['ext_size'] - (size_requested % this_vg['ext_size']) elif size_operator == '-': size_requested = this_lv['size'] - size_requested - size_requested -= (size_requested % this_vg['ext_size']) - else: - size_requested += this_vg['ext_size'] - (size_requested % this_vg['ext_size']) + + # According to latest documentation (LVM2-2.03.11) all tools round down + size_requested -= (size_requested % this_vg['ext_size']) if this_lv['size'] < size_requested: if (size_free > 0) and (size_free >= (size_requested - this_lv['size'])): From 1f41e66f098647af4c393a27ee5648e3371b2ed2 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 3 May 2021 22:24:33 +0200 Subject: [PATCH 0264/3093] Remove shippable config. (#2440) --- shippable.yml | 48 ------------------------------------------------ 1 file changed, 48 deletions(-) delete mode 100644 shippable.yml diff --git a/shippable.yml b/shippable.yml deleted file mode 100644 index 7cbbdc24e7..0000000000 --- a/shippable.yml +++ /dev/null @@ -1,48 +0,0 @@ -language: python - -env: - matrix: - - T=none - -matrix: - exclude: - - env: T=none - include: - - env: T=devel/sanity/1 - - env: T=devel/sanity/2 - - env: T=devel/sanity/3 - - env: T=devel/sanity/4 - - - env: T=2.11/sanity/1 - - env: T=2.11/sanity/2 - - env: T=2.11/sanity/3 - - env: T=2.11/sanity/4 - - - env: T=2.10/sanity/1 - - env: T=2.10/sanity/2 - - env: T=2.10/sanity/3 - - env: T=2.10/sanity/4 - - - env: T=2.9/sanity/1 - - env: T=2.9/sanity/2 - - env: T=2.9/sanity/3 - - env: T=2.9/sanity/4 - -branches: - except: - - "*-patch-*" - - "revert-*-*" - - "patchback/backports/*" - -build: - ci: - - tests/utils/shippable/timing.sh tests/utils/shippable/shippable.sh $T - -integrations: - notifications: - - integrationName: email - type: email - on_success: never - on_failure: never - on_start: never - on_pull_request: never From aaa561163b705dbf7e1f06b58382e3a072550e31 Mon Sep 17 00:00:00 2001 From: Jan Orel Date: Tue, 4 May 2021 12:21:55 +0200 Subject: [PATCH 0265/3093] OpenNebula one_vm.py: Fix missing keys (#2435) * OpenNebula one_vm.py: Fix missing keys * fixup OpenNebula one_vm.py: Fix missing keys --- .../fragments/2435-one_vm-fix_missing_keys.yml | 2 ++ plugins/modules/cloud/opennebula/one_vm.py | 17 +++++++++++++---- 2 files changed, 15 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/2435-one_vm-fix_missing_keys.yml diff --git a/changelogs/fragments/2435-one_vm-fix_missing_keys.yml b/changelogs/fragments/2435-one_vm-fix_missing_keys.yml new file mode 100644 index 0000000000..395c024b26 --- /dev/null +++ b/changelogs/fragments/2435-one_vm-fix_missing_keys.yml @@ -0,0 +1,2 @@ +bugfixes: + - one_vm - Allow missing NIC keys (https://github.com/ansible-collections/community.general/pull/2435). diff --git a/plugins/modules/cloud/opennebula/one_vm.py b/plugins/modules/cloud/opennebula/one_vm.py index 425a1c464a..fa3d4abaab 100644 --- a/plugins/modules/cloud/opennebula/one_vm.py +++ b/plugins/modules/cloud/opennebula/one_vm.py @@ -752,11 +752,20 @@ def get_vm_info(client, vm): if 'NIC' in vm.TEMPLATE: if isinstance(vm.TEMPLATE['NIC'], list): for nic in vm.TEMPLATE['NIC']: - networks_info.append({'ip': nic['IP'], 'mac': nic['MAC'], 'name': nic['NETWORK'], 'security_groups': nic['SECURITY_GROUPS']}) + networks_info.append({ + 'ip': nic.get('IP', ''), + 'mac': nic.get('MAC', ''), + 'name': nic.get('NETWORK', ''), + 'security_groups': nic.get('SECURITY_GROUPS', '') + }) else: - networks_info.append( - {'ip': vm.TEMPLATE['NIC']['IP'], 'mac': vm.TEMPLATE['NIC']['MAC'], - 'name': vm.TEMPLATE['NIC']['NETWORK'], 'security_groups': vm.TEMPLATE['NIC']['SECURITY_GROUPS']}) + networks_info.append({ + 'ip': vm.TEMPLATE['NIC'].get('IP', ''), + 'mac': vm.TEMPLATE['NIC'].get('MAC', ''), + 'name': vm.TEMPLATE['NIC'].get('NETWORK', ''), + 'security_groups': + vm.TEMPLATE['NIC'].get('SECURITY_GROUPS', '') + }) import time current_time = time.localtime() From 188a4eeb0c9c7b89de952d8bb76d962ee17dc490 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Wed, 5 May 2021 07:32:53 +0200 Subject: [PATCH 0266/3093] Add more plugin authors to BOTMETA. (#2451) --- .github/BOTMETA.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index fd23d0c9e4..f27c96e049 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -88,6 +88,8 @@ files: maintainers: $team_linode labels: cloud linode keywords: linode dynamic inventory script + $inventories/lxd.py: + maintainers: conloos $inventories/proxmox.py: maintainers: $team_virt ilijamt $inventories/scaleway.py: @@ -373,6 +375,8 @@ files: maintainers: $team_keycloak $modules/identity/keycloak/keycloak_group.py: maintainers: adamgoossens + $modules/identity/keycloak/keycloak_realm.py: + maintainers: kris2kris $modules/identity/onepassword_info.py: maintainers: Rylon $modules/identity/opendj/opendj_backendprop.py: From 9906b9dbc75c45288acdcc5eb597957a0a1df376 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Wed, 5 May 2021 12:31:01 +0200 Subject: [PATCH 0267/3093] Remove vendored ipaddress module. (#2441) --- changelogs/fragments/ipaddress.yml | 5 + plugins/inventory/lxd.py | 15 +- plugins/module_utils/compat/__init__.py | 0 plugins/module_utils/compat/ipaddress.py | 2580 ----------------- .../scaleway/scaleway_security_group_rule.py | 24 +- tests/sanity/ignore-2.10.txt | 2 - tests/sanity/ignore-2.11.txt | 2 - tests/sanity/ignore-2.12.txt | 2 - tests/sanity/ignore-2.9.txt | 2 - 9 files changed, 37 insertions(+), 2595 deletions(-) create mode 100644 changelogs/fragments/ipaddress.yml delete mode 100644 plugins/module_utils/compat/__init__.py delete mode 100644 plugins/module_utils/compat/ipaddress.py diff --git a/changelogs/fragments/ipaddress.yml b/changelogs/fragments/ipaddress.yml new file mode 100644 index 0000000000..7f6eeb70bb --- /dev/null +++ b/changelogs/fragments/ipaddress.yml @@ -0,0 +1,5 @@ +removed_features: +- "The vendored copy of ``ipaddress`` has been removed. Please use ``ipaddress`` from the Python 3 standard library, or `from pypi `_. (https://github.com/ansible-collections/community.general/pull/2441)." +breaking_changes: +- "scaleway_security_group_rule - when used with Python 2, the module now needs ``ipaddress`` installed `from pypi `_ (https://github.com/ansible-collections/community.general/pull/2441)." +- "lxd inventory plugin - when used with Python 2, the plugin now needs ``ipaddress`` installed `from pypi `_ (https://github.com/ansible-collections/community.general/pull/2441)." diff --git a/plugins/inventory/lxd.py b/plugins/inventory/lxd.py index d1e47b0505..06c620ac60 100644 --- a/plugins/inventory/lxd.py +++ b/plugins/inventory/lxd.py @@ -13,6 +13,8 @@ DOCUMENTATION = r''' - Uses a YAML configuration file that ends with 'lxd.(yml|yaml)'. version_added: "3.0.0" author: "Frank Dornheim (@conloos)" + requirements: + - ipaddress options: plugin: description: Token that ensures this is a source file for the 'lxd' plugin. @@ -124,10 +126,17 @@ import socket from ansible.plugins.inventory import BaseInventoryPlugin from ansible.module_utils._text import to_native, to_text from ansible.module_utils.common.dict_transformations import dict_merge +from ansible.module_utils.six import raise_from from ansible.errors import AnsibleError, AnsibleParserError -from ansible_collections.community.general.plugins.module_utils.compat import ipaddress from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException +try: + import ipaddress +except ImportError as exc: + IPADDRESS_IMPORT_ERROR = exc +else: + IPADDRESS_IMPORT_ERROR = None + class InventoryModule(BaseInventoryPlugin): DEBUG = 4 @@ -924,6 +933,10 @@ class InventoryModule(BaseInventoryPlugin): AnsibleParserError Returns: None""" + if IPADDRESS_IMPORT_ERROR: + raise_from( + AnsibleError('another_library must be installed to use this plugin'), + IPADDRESS_IMPORT_ERROR) super(InventoryModule, self).parse(inventory, loader, path, cache=False) # Read the inventory YAML file diff --git a/plugins/module_utils/compat/__init__.py b/plugins/module_utils/compat/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/module_utils/compat/ipaddress.py b/plugins/module_utils/compat/ipaddress.py deleted file mode 100644 index db4e91b784..0000000000 --- a/plugins/module_utils/compat/ipaddress.py +++ /dev/null @@ -1,2580 +0,0 @@ -# -*- coding: utf-8 -*- - -# This code is part of Ansible, but is an independent component. -# This particular file, and this file only, is based on -# Lib/ipaddress.py of cpython -# It is licensed under the PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 -# -# 1. This LICENSE AGREEMENT is between the Python Software Foundation -# ("PSF"), and the Individual or Organization ("Licensee") accessing and -# otherwise using this software ("Python") in source or binary form and -# its associated documentation. -# -# 2. Subject to the terms and conditions of this License Agreement, PSF hereby -# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, -# analyze, test, perform and/or display publicly, prepare derivative works, -# distribute, and otherwise use Python alone or in any derivative version, -# provided, however, that PSF's License Agreement and PSF's notice of copyright, -# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, -# 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved" -# are retained in Python alone or in any derivative version prepared by Licensee. -# -# 3. In the event Licensee prepares a derivative work that is based on -# or incorporates Python or any part thereof, and wants to make -# the derivative work available to others as provided herein, then -# Licensee hereby agrees to include in any such work a brief summary of -# the changes made to Python. -# -# 4. PSF is making Python available to Licensee on an "AS IS" -# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR -# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND -# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS -# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT -# INFRINGE ANY THIRD PARTY RIGHTS. -# -# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON -# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS -# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, -# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. -# -# 6. This License Agreement will automatically terminate upon a material -# breach of its terms and conditions. -# -# 7. Nothing in this License Agreement shall be deemed to create any -# relationship of agency, partnership, or joint venture between PSF and -# Licensee. This License Agreement does not grant permission to use PSF -# trademarks or trade name in a trademark sense to endorse or promote -# products or services of Licensee, or any third party. -# -# 8. By copying, installing or otherwise using Python, Licensee -# agrees to be bound by the terms and conditions of this License -# Agreement. - -# Copyright 2007 Google Inc. -# Licensed to PSF under a Contributor Agreement. - -"""A fast, lightweight IPv4/IPv6 manipulation library in Python. - -This library is used to create/poke/manipulate IPv4 and IPv6 addresses -and networks. - -""" - -from __future__ import (absolute_import, division, print_function) -from __future__ import unicode_literals -__metaclass__ = type - - -import itertools -import struct - - -# The following makes it easier for us to script updates of the bundled code and is not part of -# upstream -_BUNDLED_METADATA = {"pypi_name": "ipaddress", "version": "1.0.22"} - -__version__ = "1.0.22" - -# Compatibility functions -_compat_int_types = (int,) -try: - _compat_int_types = (int, long) -except NameError: - pass -try: - _compat_str = unicode -except NameError: - _compat_str = str - assert bytes != str -if b"\0"[0] == 0: # Python 3 semantics - - def _compat_bytes_to_byte_vals(byt): - return byt - - -else: - - def _compat_bytes_to_byte_vals(byt): - return [struct.unpack(b"!B", b)[0] for b in byt] - - -try: - _compat_int_from_byte_vals = int.from_bytes -except AttributeError: - - def _compat_int_from_byte_vals(bytvals, endianess): - assert endianess == "big" - res = 0 - for bv in bytvals: - assert isinstance(bv, _compat_int_types) - res = (res << 8) + bv - return res - - -def _compat_to_bytes(intval, length, endianess): - assert isinstance(intval, _compat_int_types) - assert endianess == "big" - if length == 4: - if intval < 0 or intval >= 2 ** 32: - raise struct.error("integer out of range for 'I' format code") - return struct.pack(b"!I", intval) - elif length == 16: - if intval < 0 or intval >= 2 ** 128: - raise struct.error("integer out of range for 'QQ' format code") - return struct.pack(b"!QQ", intval >> 64, intval & 0xFFFFFFFFFFFFFFFF) - else: - raise NotImplementedError() - - -if hasattr(int, "bit_length"): - # Not int.bit_length , since that won't work in 2.7 where long exists - def _compat_bit_length(i): - return i.bit_length() - - -else: - - def _compat_bit_length(i): - for res in itertools.count(): - if i >> res == 0: - return res - - -def _compat_range(start, end, step=1): - assert step > 0 - i = start - while i < end: - yield i - i += step - - -class _TotalOrderingMixin(object): - __slots__ = () - - # Helper that derives the other comparison operations from - # __lt__ and __eq__ - # We avoid functools.total_ordering because it doesn't handle - # NotImplemented correctly yet (http://bugs.python.org/issue10042) - def __eq__(self, other): - raise NotImplementedError - - def __ne__(self, other): - equal = self.__eq__(other) - if equal is NotImplemented: - return NotImplemented - return not equal - - def __lt__(self, other): - raise NotImplementedError - - def __le__(self, other): - less = self.__lt__(other) - if less is NotImplemented or not less: - return self.__eq__(other) - return less - - def __gt__(self, other): - less = self.__lt__(other) - if less is NotImplemented: - return NotImplemented - equal = self.__eq__(other) - if equal is NotImplemented: - return NotImplemented - return not (less or equal) - - def __ge__(self, other): - less = self.__lt__(other) - if less is NotImplemented: - return NotImplemented - return not less - - -IPV4LENGTH = 32 -IPV6LENGTH = 128 - - -class AddressValueError(ValueError): - """A Value Error related to the address.""" - - -class NetmaskValueError(ValueError): - """A Value Error related to the netmask.""" - - -def ip_address(address): - """Take an IP string/int and return an object of the correct type. - - Args: - address: A string or integer, the IP address. Either IPv4 or - IPv6 addresses may be supplied; integers less than 2**32 will - be considered to be IPv4 by default. - - Returns: - An IPv4Address or IPv6Address object. - - Raises: - ValueError: if the *address* passed isn't either a v4 or a v6 - address - - """ - try: - return IPv4Address(address) - except (AddressValueError, NetmaskValueError): - pass - - try: - return IPv6Address(address) - except (AddressValueError, NetmaskValueError): - pass - - if isinstance(address, bytes): - raise AddressValueError( - "%r does not appear to be an IPv4 or IPv6 address. " - "Did you pass in a bytes (str in Python 2) instead of" - " a unicode object?" % address - ) - - raise ValueError( - "%r does not appear to be an IPv4 or IPv6 address" % address - ) - - -def ip_network(address, strict=True): - """Take an IP string/int and return an object of the correct type. - - Args: - address: A string or integer, the IP network. Either IPv4 or - IPv6 networks may be supplied; integers less than 2**32 will - be considered to be IPv4 by default. - - Returns: - An IPv4Network or IPv6Network object. - - Raises: - ValueError: if the string passed isn't either a v4 or a v6 - address. Or if the network has host bits set. - - """ - try: - return IPv4Network(address, strict) - except (AddressValueError, NetmaskValueError): - pass - - try: - return IPv6Network(address, strict) - except (AddressValueError, NetmaskValueError): - pass - - if isinstance(address, bytes): - raise AddressValueError( - "%r does not appear to be an IPv4 or IPv6 network. " - "Did you pass in a bytes (str in Python 2) instead of" - " a unicode object?" % address - ) - - raise ValueError( - "%r does not appear to be an IPv4 or IPv6 network" % address - ) - - -def ip_interface(address): - """Take an IP string/int and return an object of the correct type. - - Args: - address: A string or integer, the IP address. Either IPv4 or - IPv6 addresses may be supplied; integers less than 2**32 will - be considered to be IPv4 by default. - - Returns: - An IPv4Interface or IPv6Interface object. - - Raises: - ValueError: if the string passed isn't either a v4 or a v6 - address. - - Notes: - The IPv?Interface classes describe an Address on a particular - Network, so they're basically a combination of both the Address - and Network classes. - - """ - try: - return IPv4Interface(address) - except (AddressValueError, NetmaskValueError): - pass - - try: - return IPv6Interface(address) - except (AddressValueError, NetmaskValueError): - pass - - raise ValueError( - "%r does not appear to be an IPv4 or IPv6 interface" % address - ) - - -def v4_int_to_packed(address): - """Represent an address as 4 packed bytes in network (big-endian) order. - - Args: - address: An integer representation of an IPv4 IP address. - - Returns: - The integer address packed as 4 bytes in network (big-endian) order. - - Raises: - ValueError: If the integer is negative or too large to be an - IPv4 IP address. - - """ - try: - return _compat_to_bytes(address, 4, "big") - except (struct.error, OverflowError): - raise ValueError("Address negative or too large for IPv4") - - -def v6_int_to_packed(address): - """Represent an address as 16 packed bytes in network (big-endian) order. - - Args: - address: An integer representation of an IPv6 IP address. - - Returns: - The integer address packed as 16 bytes in network (big-endian) order. - - """ - try: - return _compat_to_bytes(address, 16, "big") - except (struct.error, OverflowError): - raise ValueError("Address negative or too large for IPv6") - - -def _split_optional_netmask(address): - """Helper to split the netmask and raise AddressValueError if needed""" - addr = _compat_str(address).split("/") - if len(addr) > 2: - raise AddressValueError("Only one '/' permitted in %r" % address) - return addr - - -def _find_address_range(addresses): - """Find a sequence of sorted deduplicated IPv#Address. - - Args: - addresses: a list of IPv#Address objects. - - Yields: - A tuple containing the first and last IP addresses in the sequence. - - """ - it = iter(addresses) - first = last = next(it) # pylint: disable=stop-iteration-return - for ip in it: - if ip._ip != last._ip + 1: - yield first, last - first = ip - last = ip - yield first, last - - -def _count_righthand_zero_bits(number, bits): - """Count the number of zero bits on the right hand side. - - Args: - number: an integer. - bits: maximum number of bits to count. - - Returns: - The number of zero bits on the right hand side of the number. - - """ - if number == 0: - return bits - return min(bits, _compat_bit_length(~number & (number - 1))) - - -def summarize_address_range(first, last): - """Summarize a network range given the first and last IP addresses. - - Example: - >>> list(summarize_address_range(IPv4Address('192.0.2.0'), - ... IPv4Address('192.0.2.130'))) - ... #doctest: +NORMALIZE_WHITESPACE - [IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'), - IPv4Network('192.0.2.130/32')] - - Args: - first: the first IPv4Address or IPv6Address in the range. - last: the last IPv4Address or IPv6Address in the range. - - Returns: - An iterator of the summarized IPv(4|6) network objects. - - Raise: - TypeError: - If the first and last objects are not IP addresses. - If the first and last objects are not the same version. - ValueError: - If the last object is not greater than the first. - If the version of the first address is not 4 or 6. - - """ - if not ( - isinstance(first, _BaseAddress) and isinstance(last, _BaseAddress) - ): - raise TypeError("first and last must be IP addresses, not networks") - if first.version != last.version: - raise TypeError( - "%s and %s are not of the same version" % (first, last) - ) - if first > last: - raise ValueError("last IP address must be greater than first") - - if first.version == 4: - ip = IPv4Network - elif first.version == 6: - ip = IPv6Network - else: - raise ValueError("unknown IP version") - - ip_bits = first._max_prefixlen - first_int = first._ip - last_int = last._ip - while first_int <= last_int: - nbits = min( - _count_righthand_zero_bits(first_int, ip_bits), - _compat_bit_length(last_int - first_int + 1) - 1, - ) - net = ip((first_int, ip_bits - nbits)) - yield net - first_int += 1 << nbits - if first_int - 1 == ip._ALL_ONES: - break - - -def _collapse_addresses_internal(addresses): - """Loops through the addresses, collapsing concurrent netblocks. - - Example: - - ip1 = IPv4Network('192.0.2.0/26') - ip2 = IPv4Network('192.0.2.64/26') - ip3 = IPv4Network('192.0.2.128/26') - ip4 = IPv4Network('192.0.2.192/26') - - _collapse_addresses_internal([ip1, ip2, ip3, ip4]) -> - [IPv4Network('192.0.2.0/24')] - - This shouldn't be called directly; it is called via - collapse_addresses([]). - - Args: - addresses: A list of IPv4Network's or IPv6Network's - - Returns: - A list of IPv4Network's or IPv6Network's depending on what we were - passed. - - """ - # First merge - to_merge = list(addresses) - subnets = {} - while to_merge: - net = to_merge.pop() - supernet = net.supernet() - existing = subnets.get(supernet) - if existing is None: - subnets[supernet] = net - elif existing != net: - # Merge consecutive subnets - del subnets[supernet] - to_merge.append(supernet) - # Then iterate over resulting networks, skipping subsumed subnets - last = None - for net in sorted(subnets.values()): - if last is not None: - # Since they are sorted, - # last.network_address <= net.network_address is a given. - if last.broadcast_address >= net.broadcast_address: - continue - yield net - last = net - - -def collapse_addresses(addresses): - """Collapse a list of IP objects. - - Example: - collapse_addresses([IPv4Network('192.0.2.0/25'), - IPv4Network('192.0.2.128/25')]) -> - [IPv4Network('192.0.2.0/24')] - - Args: - addresses: An iterator of IPv4Network or IPv6Network objects. - - Returns: - An iterator of the collapsed IPv(4|6)Network objects. - - Raises: - TypeError: If passed a list of mixed version objects. - - """ - addrs = [] - ips = [] - nets = [] - - # split IP addresses and networks - for ip in addresses: - if isinstance(ip, _BaseAddress): - if ips and ips[-1]._version != ip._version: - raise TypeError( - "%s and %s are not of the same version" % (ip, ips[-1]) - ) - ips.append(ip) - elif ip._prefixlen == ip._max_prefixlen: - if ips and ips[-1]._version != ip._version: - raise TypeError( - "%s and %s are not of the same version" % (ip, ips[-1]) - ) - try: - ips.append(ip.ip) - except AttributeError: - ips.append(ip.network_address) - else: - if nets and nets[-1]._version != ip._version: - raise TypeError( - "%s and %s are not of the same version" % (ip, nets[-1]) - ) - nets.append(ip) - - # sort and dedup - ips = sorted(set(ips)) - - # find consecutive address ranges in the sorted sequence and summarize them - if ips: - for first, last in _find_address_range(ips): - addrs.extend(summarize_address_range(first, last)) - - return _collapse_addresses_internal(addrs + nets) - - -def get_mixed_type_key(obj): - """Return a key suitable for sorting between networks and addresses. - - Address and Network objects are not sortable by default; they're - fundamentally different so the expression - - IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24') - - doesn't make any sense. There are some times however, where you may wish - to have ipaddress sort these for you anyway. If you need to do this, you - can use this function as the key= argument to sorted(). - - Args: - obj: either a Network or Address object. - Returns: - appropriate key. - - """ - if isinstance(obj, _BaseNetwork): - return obj._get_networks_key() - elif isinstance(obj, _BaseAddress): - return obj._get_address_key() - return NotImplemented - - -class _IPAddressBase(_TotalOrderingMixin): - - """The mother class.""" - - __slots__ = () - - @property - def exploded(self): - """Return the longhand version of the IP address as a string.""" - return self._explode_shorthand_ip_string() - - @property - def compressed(self): - """Return the shorthand version of the IP address as a string.""" - return _compat_str(self) - - @property - def reverse_pointer(self): - """The name of the reverse DNS pointer for the IP address, e.g.: - >>> ipaddress.ip_address("127.0.0.1").reverse_pointer - '1.0.0.127.in-addr.arpa' - >>> ipaddress.ip_address("2001:db8::1").reverse_pointer - '1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa' - - """ - return self._reverse_pointer() - - @property - def version(self): - msg = "%200s has no version specified" % (type(self),) - raise NotImplementedError(msg) - - def _check_int_address(self, address): - if address < 0: - msg = "%d (< 0) is not permitted as an IPv%d address" - raise AddressValueError(msg % (address, self._version)) - if address > self._ALL_ONES: - msg = "%d (>= 2**%d) is not permitted as an IPv%d address" - raise AddressValueError( - msg % (address, self._max_prefixlen, self._version) - ) - - def _check_packed_address(self, address, expected_len): - address_len = len(address) - if address_len != expected_len: - msg = ( - "%r (len %d != %d) is not permitted as an IPv%d address. " - "Did you pass in a bytes (str in Python 2) instead of" - " a unicode object?" - ) - raise AddressValueError( - msg % (address, address_len, expected_len, self._version) - ) - - @classmethod - def _ip_int_from_prefix(cls, prefixlen): - """Turn the prefix length into a bitwise netmask - - Args: - prefixlen: An integer, the prefix length. - - Returns: - An integer. - - """ - return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen) - - @classmethod - def _prefix_from_ip_int(cls, ip_int): - """Return prefix length from the bitwise netmask. - - Args: - ip_int: An integer, the netmask in expanded bitwise format - - Returns: - An integer, the prefix length. - - Raises: - ValueError: If the input intermingles zeroes & ones - """ - trailing_zeroes = _count_righthand_zero_bits( - ip_int, cls._max_prefixlen - ) - prefixlen = cls._max_prefixlen - trailing_zeroes - leading_ones = ip_int >> trailing_zeroes - all_ones = (1 << prefixlen) - 1 - if leading_ones != all_ones: - byteslen = cls._max_prefixlen // 8 - details = _compat_to_bytes(ip_int, byteslen, "big") - msg = "Netmask pattern %r mixes zeroes & ones" - raise ValueError(msg % details) - return prefixlen - - @classmethod - def _report_invalid_netmask(cls, netmask_str): - msg = "%r is not a valid netmask" % netmask_str - raise NetmaskValueError(msg) - - @classmethod - def _prefix_from_prefix_string(cls, prefixlen_str): - """Return prefix length from a numeric string - - Args: - prefixlen_str: The string to be converted - - Returns: - An integer, the prefix length. - - Raises: - NetmaskValueError: If the input is not a valid netmask - """ - # int allows a leading +/- as well as surrounding whitespace, - # so we ensure that isn't the case - if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str): - cls._report_invalid_netmask(prefixlen_str) - try: - prefixlen = int(prefixlen_str) - except ValueError: - cls._report_invalid_netmask(prefixlen_str) - if not (0 <= prefixlen <= cls._max_prefixlen): - cls._report_invalid_netmask(prefixlen_str) - return prefixlen - - @classmethod - def _prefix_from_ip_string(cls, ip_str): - """Turn a netmask/hostmask string into a prefix length - - Args: - ip_str: The netmask/hostmask to be converted - - Returns: - An integer, the prefix length. - - Raises: - NetmaskValueError: If the input is not a valid netmask/hostmask - """ - # Parse the netmask/hostmask like an IP address. - try: - ip_int = cls._ip_int_from_string(ip_str) - except AddressValueError: - cls._report_invalid_netmask(ip_str) - - # Try matching a netmask (this would be /1*0*/ as a bitwise regexp). - # Note that the two ambiguous cases (all-ones and all-zeroes) are - # treated as netmasks. - try: - return cls._prefix_from_ip_int(ip_int) - except ValueError: - pass - - # Invert the bits, and try matching a /0+1+/ hostmask instead. - ip_int ^= cls._ALL_ONES - try: - return cls._prefix_from_ip_int(ip_int) - except ValueError: - cls._report_invalid_netmask(ip_str) - - def __reduce__(self): - return self.__class__, (_compat_str(self),) - - -class _BaseAddress(_IPAddressBase): - - """A generic IP object. - - This IP class contains the version independent methods which are - used by single IP addresses. - """ - - __slots__ = () - - def __int__(self): - return self._ip - - def __eq__(self, other): - try: - return self._ip == other._ip and self._version == other._version - except AttributeError: - return NotImplemented - - def __lt__(self, other): - if not isinstance(other, _IPAddressBase): - return NotImplemented - if not isinstance(other, _BaseAddress): - raise TypeError( - "%s and %s are not of the same type" % (self, other) - ) - if self._version != other._version: - raise TypeError( - "%s and %s are not of the same version" % (self, other) - ) - if self._ip != other._ip: - return self._ip < other._ip - return False - - # Shorthand for Integer addition and subtraction. This is not - # meant to ever support addition/subtraction of addresses. - def __add__(self, other): - if not isinstance(other, _compat_int_types): - return NotImplemented - return self.__class__(int(self) + other) - - def __sub__(self, other): - if not isinstance(other, _compat_int_types): - return NotImplemented - return self.__class__(int(self) - other) - - def __repr__(self): - return "%s(%r)" % (self.__class__.__name__, _compat_str(self)) - - def __str__(self): - return _compat_str(self._string_from_ip_int(self._ip)) - - def __hash__(self): - return hash(hex(int(self._ip))) - - def _get_address_key(self): - return (self._version, self) - - def __reduce__(self): - return self.__class__, (self._ip,) - - -class _BaseNetwork(_IPAddressBase): - - """A generic IP network object. - - This IP class contains the version independent methods which are - used by networks. - - """ - - def __init__(self, address): - self._cache = {} - - def __repr__(self): - return "%s(%r)" % (self.__class__.__name__, _compat_str(self)) - - def __str__(self): - return "%s/%d" % (self.network_address, self.prefixlen) - - def hosts(self): - """Generate Iterator over usable hosts in a network. - - This is like __iter__ except it doesn't return the network - or broadcast addresses. - - """ - network = int(self.network_address) - broadcast = int(self.broadcast_address) - for x in _compat_range(network + 1, broadcast): - yield self._address_class(x) - - def __iter__(self): - network = int(self.network_address) - broadcast = int(self.broadcast_address) - for x in _compat_range(network, broadcast + 1): - yield self._address_class(x) - - def __getitem__(self, n): - network = int(self.network_address) - broadcast = int(self.broadcast_address) - if n >= 0: - if network + n > broadcast: - raise IndexError("address out of range") - return self._address_class(network + n) - else: - n += 1 - if broadcast + n < network: - raise IndexError("address out of range") - return self._address_class(broadcast + n) - - def __lt__(self, other): - if not isinstance(other, _IPAddressBase): - return NotImplemented - if not isinstance(other, _BaseNetwork): - raise TypeError( - "%s and %s are not of the same type" % (self, other) - ) - if self._version != other._version: - raise TypeError( - "%s and %s are not of the same version" % (self, other) - ) - if self.network_address != other.network_address: - return self.network_address < other.network_address - if self.netmask != other.netmask: - return self.netmask < other.netmask - return False - - def __eq__(self, other): - try: - return ( - self._version == other._version - and self.network_address == other.network_address - and int(self.netmask) == int(other.netmask) - ) - except AttributeError: - return NotImplemented - - def __hash__(self): - return hash(int(self.network_address) ^ int(self.netmask)) - - def __contains__(self, other): - # always false if one is v4 and the other is v6. - if self._version != other._version: - return False - # dealing with another network. - if isinstance(other, _BaseNetwork): - return False - # dealing with another address - else: - # address - return ( - int(self.network_address) - <= int(other._ip) - <= int(self.broadcast_address) - ) - - def overlaps(self, other): - """Tell if self is partly contained in other.""" - return self.network_address in other or ( - self.broadcast_address in other - or ( - other.network_address in self - or (other.broadcast_address in self) - ) - ) - - @property - def broadcast_address(self): - x = self._cache.get("broadcast_address") - if x is None: - x = self._address_class( - int(self.network_address) | int(self.hostmask) - ) - self._cache["broadcast_address"] = x - return x - - @property - def hostmask(self): - x = self._cache.get("hostmask") - if x is None: - x = self._address_class(int(self.netmask) ^ self._ALL_ONES) - self._cache["hostmask"] = x - return x - - @property - def with_prefixlen(self): - return "%s/%d" % (self.network_address, self._prefixlen) - - @property - def with_netmask(self): - return "%s/%s" % (self.network_address, self.netmask) - - @property - def with_hostmask(self): - return "%s/%s" % (self.network_address, self.hostmask) - - @property - def num_addresses(self): - """Number of hosts in the current subnet.""" - return int(self.broadcast_address) - int(self.network_address) + 1 - - @property - def _address_class(self): - # Returning bare address objects (rather than interfaces) allows for - # more consistent behaviour across the network address, broadcast - # address and individual host addresses. - msg = "%200s has no associated address class" % (type(self),) - raise NotImplementedError(msg) - - @property - def prefixlen(self): - return self._prefixlen - - def address_exclude(self, other): - """Remove an address from a larger block. - - For example: - - addr1 = ip_network('192.0.2.0/28') - addr2 = ip_network('192.0.2.1/32') - list(addr1.address_exclude(addr2)) = - [IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'), - IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')] - - or IPv6: - - addr1 = ip_network('2001:db8::1/32') - addr2 = ip_network('2001:db8::1/128') - list(addr1.address_exclude(addr2)) = - [ip_network('2001:db8::1/128'), - ip_network('2001:db8::2/127'), - ip_network('2001:db8::4/126'), - ip_network('2001:db8::8/125'), - ... - ip_network('2001:db8:8000::/33')] - - Args: - other: An IPv4Network or IPv6Network object of the same type. - - Returns: - An iterator of the IPv(4|6)Network objects which is self - minus other. - - Raises: - TypeError: If self and other are of differing address - versions, or if other is not a network object. - ValueError: If other is not completely contained by self. - - """ - if not self._version == other._version: - raise TypeError( - "%s and %s are not of the same version" % (self, other) - ) - - if not isinstance(other, _BaseNetwork): - raise TypeError("%s is not a network object" % other) - - if not other.subnet_of(self): - raise ValueError("%s not contained in %s" % (other, self)) - if other == self: - return - - # Make sure we're comparing the network of other. - other = other.__class__( - "%s/%s" % (other.network_address, other.prefixlen) - ) - - s1, s2 = self.subnets() - while s1 != other and s2 != other: - if other.subnet_of(s1): - yield s2 - s1, s2 = s1.subnets() - elif other.subnet_of(s2): - yield s1 - s1, s2 = s2.subnets() - else: - # If we got here, there's a bug somewhere. - raise AssertionError( - "Error performing exclusion: " - "s1: %s s2: %s other: %s" % (s1, s2, other) - ) - if s1 == other: - yield s2 - elif s2 == other: - yield s1 - else: - # If we got here, there's a bug somewhere. - raise AssertionError( - "Error performing exclusion: " - "s1: %s s2: %s other: %s" % (s1, s2, other) - ) - - def compare_networks(self, other): - """Compare two IP objects. - - This is only concerned about the comparison of the integer - representation of the network addresses. This means that the - host bits aren't considered at all in this method. If you want - to compare host bits, you can easily enough do a - 'HostA._ip < HostB._ip' - - Args: - other: An IP object. - - Returns: - If the IP versions of self and other are the same, returns: - - -1 if self < other: - eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25') - IPv6Network('2001:db8::1000/124') < - IPv6Network('2001:db8::2000/124') - 0 if self == other - eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24') - IPv6Network('2001:db8::1000/124') == - IPv6Network('2001:db8::1000/124') - 1 if self > other - eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25') - IPv6Network('2001:db8::2000/124') > - IPv6Network('2001:db8::1000/124') - - Raises: - TypeError if the IP versions are different. - - """ - # does this need to raise a ValueError? - if self._version != other._version: - raise TypeError( - "%s and %s are not of the same type" % (self, other) - ) - # self._version == other._version below here: - if self.network_address < other.network_address: - return -1 - if self.network_address > other.network_address: - return 1 - # self.network_address == other.network_address below here: - if self.netmask < other.netmask: - return -1 - if self.netmask > other.netmask: - return 1 - return 0 - - def _get_networks_key(self): - """Network-only key function. - - Returns an object that identifies this address' network and - netmask. This function is a suitable "key" argument for sorted() - and list.sort(). - - """ - return (self._version, self.network_address, self.netmask) - - def subnets(self, prefixlen_diff=1, new_prefix=None): - """The subnets which join to make the current subnet. - - In the case that self contains only one IP - (self._prefixlen == 32 for IPv4 or self._prefixlen == 128 - for IPv6), yield an iterator with just ourself. - - Args: - prefixlen_diff: An integer, the amount the prefix length - should be increased by. This should not be set if - new_prefix is also set. - new_prefix: The desired new prefix length. This must be a - larger number (smaller prefix) than the existing prefix. - This should not be set if prefixlen_diff is also set. - - Returns: - An iterator of IPv(4|6) objects. - - Raises: - ValueError: The prefixlen_diff is too small or too large. - OR - prefixlen_diff and new_prefix are both set or new_prefix - is a smaller number than the current prefix (smaller - number means a larger network) - - """ - if self._prefixlen == self._max_prefixlen: - yield self - return - - if new_prefix is not None: - if new_prefix < self._prefixlen: - raise ValueError("new prefix must be longer") - if prefixlen_diff != 1: - raise ValueError("cannot set prefixlen_diff and new_prefix") - prefixlen_diff = new_prefix - self._prefixlen - - if prefixlen_diff < 0: - raise ValueError("prefix length diff must be > 0") - new_prefixlen = self._prefixlen + prefixlen_diff - - if new_prefixlen > self._max_prefixlen: - raise ValueError( - "prefix length diff %d is invalid for netblock %s" - % (new_prefixlen, self) - ) - - start = int(self.network_address) - end = int(self.broadcast_address) + 1 - step = (int(self.hostmask) + 1) >> prefixlen_diff - for new_addr in _compat_range(start, end, step): - current = self.__class__((new_addr, new_prefixlen)) - yield current - - def supernet(self, prefixlen_diff=1, new_prefix=None): - """The supernet containing the current network. - - Args: - prefixlen_diff: An integer, the amount the prefix length of - the network should be decreased by. For example, given a - /24 network and a prefixlen_diff of 3, a supernet with a - /21 netmask is returned. - - Returns: - An IPv4 network object. - - Raises: - ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have - a negative prefix length. - OR - If prefixlen_diff and new_prefix are both set or new_prefix is a - larger number than the current prefix (larger number means a - smaller network) - - """ - if self._prefixlen == 0: - return self - - if new_prefix is not None: - if new_prefix > self._prefixlen: - raise ValueError("new prefix must be shorter") - if prefixlen_diff != 1: - raise ValueError("cannot set prefixlen_diff and new_prefix") - prefixlen_diff = self._prefixlen - new_prefix - - new_prefixlen = self.prefixlen - prefixlen_diff - if new_prefixlen < 0: - raise ValueError( - "current prefixlen is %d, cannot have a prefixlen_diff of %d" - % (self.prefixlen, prefixlen_diff) - ) - return self.__class__( - ( - int(self.network_address) - & (int(self.netmask) << prefixlen_diff), - new_prefixlen, - ) - ) - - @property - def is_multicast(self): - """Test if the address is reserved for multicast use. - - Returns: - A boolean, True if the address is a multicast address. - See RFC 2373 2.7 for details. - - """ - return ( - self.network_address.is_multicast - and self.broadcast_address.is_multicast - ) - - @staticmethod - def _is_subnet_of(a, b): - try: - # Always false if one is v4 and the other is v6. - if a._version != b._version: - raise TypeError( - "%s and %s are not of the same version" % (a, b) - ) - return ( - b.network_address <= a.network_address - and b.broadcast_address >= a.broadcast_address - ) - except AttributeError: - raise TypeError( - "Unable to test subnet containment " - "between %s and %s" % (a, b) - ) - - def subnet_of(self, other): - """Return True if this network is a subnet of other.""" - return self._is_subnet_of(self, other) - - def supernet_of(self, other): - """Return True if this network is a supernet of other.""" - return self._is_subnet_of(other, self) - - @property - def is_reserved(self): - """Test if the address is otherwise IETF reserved. - - Returns: - A boolean, True if the address is within one of the - reserved IPv6 Network ranges. - - """ - return ( - self.network_address.is_reserved - and self.broadcast_address.is_reserved - ) - - @property - def is_link_local(self): - """Test if the address is reserved for link-local. - - Returns: - A boolean, True if the address is reserved per RFC 4291. - - """ - return ( - self.network_address.is_link_local - and self.broadcast_address.is_link_local - ) - - @property - def is_private(self): - """Test if this address is allocated for private networks. - - Returns: - A boolean, True if the address is reserved per - iana-ipv4-special-registry or iana-ipv6-special-registry. - - """ - return ( - self.network_address.is_private - and self.broadcast_address.is_private - ) - - @property - def is_global(self): - """Test if this address is allocated for public networks. - - Returns: - A boolean, True if the address is not reserved per - iana-ipv4-special-registry or iana-ipv6-special-registry. - - """ - return not self.is_private - - @property - def is_unspecified(self): - """Test if the address is unspecified. - - Returns: - A boolean, True if this is the unspecified address as defined in - RFC 2373 2.5.2. - - """ - return ( - self.network_address.is_unspecified - and self.broadcast_address.is_unspecified - ) - - @property - def is_loopback(self): - """Test if the address is a loopback address. - - Returns: - A boolean, True if the address is a loopback address as defined in - RFC 2373 2.5.3. - - """ - return ( - self.network_address.is_loopback - and self.broadcast_address.is_loopback - ) - - -class _BaseV4(object): - - """Base IPv4 object. - - The following methods are used by IPv4 objects in both single IP - addresses and networks. - - """ - - __slots__ = () - _version = 4 - # Equivalent to 255.255.255.255 or 32 bits of 1's. - _ALL_ONES = (2 ** IPV4LENGTH) - 1 - _DECIMAL_DIGITS = frozenset("0123456789") - - # the valid octets for host and netmasks. only useful for IPv4. - _valid_mask_octets = frozenset([255, 254, 252, 248, 240, 224, 192, 128, 0]) - - _max_prefixlen = IPV4LENGTH - # There are only a handful of valid v4 netmasks, so we cache them all - # when constructed (see _make_netmask()). - _netmask_cache = {} - - def _explode_shorthand_ip_string(self): - return _compat_str(self) - - @classmethod - def _make_netmask(cls, arg): - """Make a (netmask, prefix_len) tuple from the given argument. - - Argument can be: - - an integer (the prefix length) - - a string representing the prefix length (e.g. "24") - - a string representing the prefix netmask (e.g. "255.255.255.0") - """ - if arg not in cls._netmask_cache: - if isinstance(arg, _compat_int_types): - prefixlen = arg - else: - try: - # Check for a netmask in prefix length form - prefixlen = cls._prefix_from_prefix_string(arg) - except NetmaskValueError: - # Check for a netmask or hostmask in dotted-quad form. - # This may raise NetmaskValueError. - prefixlen = cls._prefix_from_ip_string(arg) - netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen)) - cls._netmask_cache[arg] = netmask, prefixlen - return cls._netmask_cache[arg] - - @classmethod - def _ip_int_from_string(cls, ip_str): - """Turn the given IP string into an integer for comparison. - - Args: - ip_str: A string, the IP ip_str. - - Returns: - The IP ip_str as an integer. - - Raises: - AddressValueError: if ip_str isn't a valid IPv4 Address. - - """ - if not ip_str: - raise AddressValueError("Address cannot be empty") - - octets = ip_str.split(".") - if len(octets) != 4: - raise AddressValueError("Expected 4 octets in %r" % ip_str) - - try: - return _compat_int_from_byte_vals( - map(cls._parse_octet, octets), "big" - ) - except ValueError as exc: - raise AddressValueError("%s in %r" % (exc, ip_str)) - - @classmethod - def _parse_octet(cls, octet_str): - """Convert a decimal octet into an integer. - - Args: - octet_str: A string, the number to parse. - - Returns: - The octet as an integer. - - Raises: - ValueError: if the octet isn't strictly a decimal from [0..255]. - - """ - if not octet_str: - raise ValueError("Empty octet not permitted") - # Whitelist the characters, since int() allows a lot of bizarre stuff. - if not cls._DECIMAL_DIGITS.issuperset(octet_str): - msg = "Only decimal digits permitted in %r" - raise ValueError(msg % octet_str) - # We do the length check second, since the invalid character error - # is likely to be more informative for the user - if len(octet_str) > 3: - msg = "At most 3 characters permitted in %r" - raise ValueError(msg % octet_str) - # Convert to integer (we know digits are legal) - octet_int = int(octet_str, 10) - # Any octets that look like they *might* be written in octal, - # and which don't look exactly the same in both octal and - # decimal are rejected as ambiguous - if octet_int > 7 and octet_str[0] == "0": - msg = "Ambiguous (octal/decimal) value in %r not permitted" - raise ValueError(msg % octet_str) - if octet_int > 255: - raise ValueError("Octet %d (> 255) not permitted" % octet_int) - return octet_int - - @classmethod - def _string_from_ip_int(cls, ip_int): - """Turns a 32-bit integer into dotted decimal notation. - - Args: - ip_int: An integer, the IP address. - - Returns: - The IP address as a string in dotted decimal notation. - - """ - return ".".join( - _compat_str( - struct.unpack(b"!B", b)[0] if isinstance(b, bytes) else b - ) - for b in _compat_to_bytes(ip_int, 4, "big") - ) - - def _is_hostmask(self, ip_str): - """Test if the IP string is a hostmask (rather than a netmask). - - Args: - ip_str: A string, the potential hostmask. - - Returns: - A boolean, True if the IP string is a hostmask. - - """ - bits = ip_str.split(".") - try: - parts = [x for x in map(int, bits) if x in self._valid_mask_octets] - except ValueError: - return False - if len(parts) != len(bits): - return False - if parts[0] < parts[-1]: - return True - return False - - def _reverse_pointer(self): - """Return the reverse DNS pointer name for the IPv4 address. - - This implements the method described in RFC1035 3.5. - - """ - reverse_octets = _compat_str(self).split(".")[::-1] - return ".".join(reverse_octets) + ".in-addr.arpa" - - @property - def max_prefixlen(self): - return self._max_prefixlen - - @property - def version(self): - return self._version - - -class IPv4Address(_BaseV4, _BaseAddress): - - """Represent and manipulate single IPv4 Addresses.""" - - __slots__ = ("_ip", "__weakref__") - - def __init__(self, address): - - """ - Args: - address: A string or integer representing the IP - - Additionally, an integer can be passed, so - IPv4Address('192.0.2.1') == IPv4Address(3221225985). - or, more generally - IPv4Address(int(IPv4Address('192.0.2.1'))) == - IPv4Address('192.0.2.1') - - Raises: - AddressValueError: If ipaddress isn't a valid IPv4 address. - - """ - # Efficient constructor from integer. - if isinstance(address, _compat_int_types): - self._check_int_address(address) - self._ip = address - return - - # Constructing from a packed address - if isinstance(address, bytes): - self._check_packed_address(address, 4) - bvs = _compat_bytes_to_byte_vals(address) - self._ip = _compat_int_from_byte_vals(bvs, "big") - return - - # Assume input argument to be string or any object representation - # which converts into a formatted IP string. - addr_str = _compat_str(address) - if "/" in addr_str: - raise AddressValueError("Unexpected '/' in %r" % address) - self._ip = self._ip_int_from_string(addr_str) - - @property - def packed(self): - """The binary representation of this address.""" - return v4_int_to_packed(self._ip) - - @property - def is_reserved(self): - """Test if the address is otherwise IETF reserved. - - Returns: - A boolean, True if the address is within the - reserved IPv4 Network range. - - """ - return self in self._constants._reserved_network - - @property - def is_private(self): - """Test if this address is allocated for private networks. - - Returns: - A boolean, True if the address is reserved per - iana-ipv4-special-registry. - - """ - return any(self in net for net in self._constants._private_networks) - - @property - def is_global(self): - return ( - self not in self._constants._public_network and not self.is_private - ) - - @property - def is_multicast(self): - """Test if the address is reserved for multicast use. - - Returns: - A boolean, True if the address is multicast. - See RFC 3171 for details. - - """ - return self in self._constants._multicast_network - - @property - def is_unspecified(self): - """Test if the address is unspecified. - - Returns: - A boolean, True if this is the unspecified address as defined in - RFC 5735 3. - - """ - return self == self._constants._unspecified_address - - @property - def is_loopback(self): - """Test if the address is a loopback address. - - Returns: - A boolean, True if the address is a loopback per RFC 3330. - - """ - return self in self._constants._loopback_network - - @property - def is_link_local(self): - """Test if the address is reserved for link-local. - - Returns: - A boolean, True if the address is link-local per RFC 3927. - - """ - return self in self._constants._linklocal_network - - -class IPv4Interface(IPv4Address): - def __init__(self, address): - if isinstance(address, (bytes, _compat_int_types)): - IPv4Address.__init__(self, address) - self.network = IPv4Network(self._ip) - self._prefixlen = self._max_prefixlen - return - - if isinstance(address, tuple): - IPv4Address.__init__(self, address[0]) - if len(address) > 1: - self._prefixlen = int(address[1]) - else: - self._prefixlen = self._max_prefixlen - - self.network = IPv4Network(address, strict=False) - self.netmask = self.network.netmask - self.hostmask = self.network.hostmask - return - - addr = _split_optional_netmask(address) - IPv4Address.__init__(self, addr[0]) - - self.network = IPv4Network(address, strict=False) - self._prefixlen = self.network._prefixlen - - self.netmask = self.network.netmask - self.hostmask = self.network.hostmask - - def __str__(self): - return "%s/%d" % ( - self._string_from_ip_int(self._ip), - self.network.prefixlen, - ) - - def __eq__(self, other): - address_equal = IPv4Address.__eq__(self, other) - if not address_equal or address_equal is NotImplemented: - return address_equal - try: - return self.network == other.network - except AttributeError: - # An interface with an associated network is NOT the - # same as an unassociated address. That's why the hash - # takes the extra info into account. - return False - - def __lt__(self, other): - address_less = IPv4Address.__lt__(self, other) - if address_less is NotImplemented: - return NotImplemented - try: - return ( - self.network < other.network - or self.network == other.network - and address_less - ) - except AttributeError: - # We *do* allow addresses and interfaces to be sorted. The - # unassociated address is considered less than all interfaces. - return False - - def __hash__(self): - return self._ip ^ self._prefixlen ^ int(self.network.network_address) - - __reduce__ = _IPAddressBase.__reduce__ - - @property - def ip(self): - return IPv4Address(self._ip) - - @property - def with_prefixlen(self): - return "%s/%s" % (self._string_from_ip_int(self._ip), self._prefixlen) - - @property - def with_netmask(self): - return "%s/%s" % (self._string_from_ip_int(self._ip), self.netmask) - - @property - def with_hostmask(self): - return "%s/%s" % (self._string_from_ip_int(self._ip), self.hostmask) - - -class IPv4Network(_BaseV4, _BaseNetwork): - - """This class represents and manipulates 32-bit IPv4 network + addresses.. - - Attributes: [examples for IPv4Network('192.0.2.0/27')] - .network_address: IPv4Address('192.0.2.0') - .hostmask: IPv4Address('0.0.0.31') - .broadcast_address: IPv4Address('192.0.2.32') - .netmask: IPv4Address('255.255.255.224') - .prefixlen: 27 - - """ - - # Class to use when creating address objects - _address_class = IPv4Address - - def __init__(self, address, strict=True): - - """Instantiate a new IPv4 network object. - - Args: - address: A string or integer representing the IP [& network]. - '192.0.2.0/24' - '192.0.2.0/255.255.255.0' - '192.0.0.2/0.0.0.255' - are all functionally the same in IPv4. Similarly, - '192.0.2.1' - '192.0.2.1/255.255.255.255' - '192.0.2.1/32' - are also functionally equivalent. That is to say, failing to - provide a subnetmask will create an object with a mask of /32. - - If the mask (portion after the / in the argument) is given in - dotted quad form, it is treated as a netmask if it starts with a - non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it - starts with a zero field (e.g. 0.255.255.255 == /8), with the - single exception of an all-zero mask which is treated as a - netmask == /0. If no mask is given, a default of /32 is used. - - Additionally, an integer can be passed, so - IPv4Network('192.0.2.1') == IPv4Network(3221225985) - or, more generally - IPv4Interface(int(IPv4Interface('192.0.2.1'))) == - IPv4Interface('192.0.2.1') - - Raises: - AddressValueError: If ipaddress isn't a valid IPv4 address. - NetmaskValueError: If the netmask isn't valid for - an IPv4 address. - ValueError: If strict is True and a network address is not - supplied. - - """ - _BaseNetwork.__init__(self, address) - - # Constructing from a packed address or integer - if isinstance(address, (_compat_int_types, bytes)): - self.network_address = IPv4Address(address) - self.netmask, self._prefixlen = self._make_netmask( - self._max_prefixlen - ) - # fixme: address/network test here. - return - - if isinstance(address, tuple): - if len(address) > 1: - arg = address[1] - else: - # We weren't given an address[1] - arg = self._max_prefixlen - self.network_address = IPv4Address(address[0]) - self.netmask, self._prefixlen = self._make_netmask(arg) - packed = int(self.network_address) - if packed & int(self.netmask) != packed: - if strict: - raise ValueError("%s has host bits set" % self) - else: - self.network_address = IPv4Address( - packed & int(self.netmask) - ) - return - - # Assume input argument to be string or any object representation - # which converts into a formatted IP prefix string. - addr = _split_optional_netmask(address) - self.network_address = IPv4Address(self._ip_int_from_string(addr[0])) - - if len(addr) == 2: - arg = addr[1] - else: - arg = self._max_prefixlen - self.netmask, self._prefixlen = self._make_netmask(arg) - - if strict: - if ( - IPv4Address(int(self.network_address) & int(self.netmask)) - != self.network_address - ): - raise ValueError("%s has host bits set" % self) - self.network_address = IPv4Address( - int(self.network_address) & int(self.netmask) - ) - - if self._prefixlen == (self._max_prefixlen - 1): - self.hosts = self.__iter__ - - @property - def is_global(self): - """Test if this address is allocated for public networks. - - Returns: - A boolean, True if the address is not reserved per - iana-ipv4-special-registry. - - """ - return ( - not ( - self.network_address in IPv4Network("100.64.0.0/10") - and self.broadcast_address in IPv4Network("100.64.0.0/10") - ) - and not self.is_private - ) - - -class _IPv4Constants(object): - - _linklocal_network = IPv4Network("169.254.0.0/16") - - _loopback_network = IPv4Network("127.0.0.0/8") - - _multicast_network = IPv4Network("224.0.0.0/4") - - _public_network = IPv4Network("100.64.0.0/10") - - _private_networks = [ - IPv4Network("0.0.0.0/8"), - IPv4Network("10.0.0.0/8"), - IPv4Network("127.0.0.0/8"), - IPv4Network("169.254.0.0/16"), - IPv4Network("172.16.0.0/12"), - IPv4Network("192.0.0.0/29"), - IPv4Network("192.0.0.170/31"), - IPv4Network("192.0.2.0/24"), - IPv4Network("192.168.0.0/16"), - IPv4Network("198.18.0.0/15"), - IPv4Network("198.51.100.0/24"), - IPv4Network("203.0.113.0/24"), - IPv4Network("240.0.0.0/4"), - IPv4Network("255.255.255.255/32"), - ] - - _reserved_network = IPv4Network("240.0.0.0/4") - - _unspecified_address = IPv4Address("0.0.0.0") - - -IPv4Address._constants = _IPv4Constants - - -class _BaseV6(object): - - """Base IPv6 object. - - The following methods are used by IPv6 objects in both single IP - addresses and networks. - - """ - - __slots__ = () - _version = 6 - _ALL_ONES = (2 ** IPV6LENGTH) - 1 - _HEXTET_COUNT = 8 - _HEX_DIGITS = frozenset("0123456789ABCDEFabcdef") - _max_prefixlen = IPV6LENGTH - - # There are only a bunch of valid v6 netmasks, so we cache them all - # when constructed (see _make_netmask()). - _netmask_cache = {} - - @classmethod - def _make_netmask(cls, arg): - """Make a (netmask, prefix_len) tuple from the given argument. - - Argument can be: - - an integer (the prefix length) - - a string representing the prefix length (e.g. "24") - - a string representing the prefix netmask (e.g. "255.255.255.0") - """ - if arg not in cls._netmask_cache: - if isinstance(arg, _compat_int_types): - prefixlen = arg - else: - prefixlen = cls._prefix_from_prefix_string(arg) - netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen)) - cls._netmask_cache[arg] = netmask, prefixlen - return cls._netmask_cache[arg] - - @classmethod - def _ip_int_from_string(cls, ip_str): - """Turn an IPv6 ip_str into an integer. - - Args: - ip_str: A string, the IPv6 ip_str. - - Returns: - An int, the IPv6 address - - Raises: - AddressValueError: if ip_str isn't a valid IPv6 Address. - - """ - if not ip_str: - raise AddressValueError("Address cannot be empty") - - parts = ip_str.split(":") - - # An IPv6 address needs at least 2 colons (3 parts). - _min_parts = 3 - if len(parts) < _min_parts: - msg = "At least %d parts expected in %r" % (_min_parts, ip_str) - raise AddressValueError(msg) - - # If the address has an IPv4-style suffix, convert it to hexadecimal. - if "." in parts[-1]: - try: - ipv4_int = IPv4Address(parts.pop())._ip - except AddressValueError as exc: - raise AddressValueError("%s in %r" % (exc, ip_str)) - parts.append("%x" % ((ipv4_int >> 16) & 0xFFFF)) - parts.append("%x" % (ipv4_int & 0xFFFF)) - - # An IPv6 address can't have more than 8 colons (9 parts). - # The extra colon comes from using the "::" notation for a single - # leading or trailing zero part. - _max_parts = cls._HEXTET_COUNT + 1 - if len(parts) > _max_parts: - msg = "At most %d colons permitted in %r" % ( - _max_parts - 1, - ip_str, - ) - raise AddressValueError(msg) - - # Disregarding the endpoints, find '::' with nothing in between. - # This indicates that a run of zeroes has been skipped. - skip_index = None - for i in _compat_range(1, len(parts) - 1): - if not parts[i]: - if skip_index is not None: - # Can't have more than one '::' - msg = "At most one '::' permitted in %r" % ip_str - raise AddressValueError(msg) - skip_index = i - - # parts_hi is the number of parts to copy from above/before the '::' - # parts_lo is the number of parts to copy from below/after the '::' - if skip_index is not None: - # If we found a '::', then check if it also covers the endpoints. - parts_hi = skip_index - parts_lo = len(parts) - skip_index - 1 - if not parts[0]: - parts_hi -= 1 - if parts_hi: - msg = "Leading ':' only permitted as part of '::' in %r" - raise AddressValueError(msg % ip_str) # ^: requires ^:: - if not parts[-1]: - parts_lo -= 1 - if parts_lo: - msg = "Trailing ':' only permitted as part of '::' in %r" - raise AddressValueError(msg % ip_str) # :$ requires ::$ - parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo) - if parts_skipped < 1: - msg = "Expected at most %d other parts with '::' in %r" - raise AddressValueError(msg % (cls._HEXTET_COUNT - 1, ip_str)) - else: - # Otherwise, allocate the entire address to parts_hi. The - # endpoints could still be empty, but _parse_hextet() will check - # for that. - if len(parts) != cls._HEXTET_COUNT: - msg = "Exactly %d parts expected without '::' in %r" - raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str)) - if not parts[0]: - msg = "Leading ':' only permitted as part of '::' in %r" - raise AddressValueError(msg % ip_str) # ^: requires ^:: - if not parts[-1]: - msg = "Trailing ':' only permitted as part of '::' in %r" - raise AddressValueError(msg % ip_str) # :$ requires ::$ - parts_hi = len(parts) - parts_lo = 0 - parts_skipped = 0 - - try: - # Now, parse the hextets into a 128-bit integer. - ip_int = 0 - for i in range(parts_hi): - ip_int <<= 16 - ip_int |= cls._parse_hextet(parts[i]) - ip_int <<= 16 * parts_skipped - for i in range(-parts_lo, 0): - ip_int <<= 16 - ip_int |= cls._parse_hextet(parts[i]) - return ip_int - except ValueError as exc: - raise AddressValueError("%s in %r" % (exc, ip_str)) - - @classmethod - def _parse_hextet(cls, hextet_str): - """Convert an IPv6 hextet string into an integer. - - Args: - hextet_str: A string, the number to parse. - - Returns: - The hextet as an integer. - - Raises: - ValueError: if the input isn't strictly a hex number from - [0..FFFF]. - - """ - # Whitelist the characters, since int() allows a lot of bizarre stuff. - if not cls._HEX_DIGITS.issuperset(hextet_str): - raise ValueError("Only hex digits permitted in %r" % hextet_str) - # We do the length check second, since the invalid character error - # is likely to be more informative for the user - if len(hextet_str) > 4: - msg = "At most 4 characters permitted in %r" - raise ValueError(msg % hextet_str) - # Length check means we can skip checking the integer value - return int(hextet_str, 16) - - @classmethod - def _compress_hextets(cls, hextets): - """Compresses a list of hextets. - - Compresses a list of strings, replacing the longest continuous - sequence of "0" in the list with "" and adding empty strings at - the beginning or at the end of the string such that subsequently - calling ":".join(hextets) will produce the compressed version of - the IPv6 address. - - Args: - hextets: A list of strings, the hextets to compress. - - Returns: - A list of strings. - - """ - best_doublecolon_start = -1 - best_doublecolon_len = 0 - doublecolon_start = -1 - doublecolon_len = 0 - for index, hextet in enumerate(hextets): - if hextet == "0": - doublecolon_len += 1 - if doublecolon_start == -1: - # Start of a sequence of zeros. - doublecolon_start = index - if doublecolon_len > best_doublecolon_len: - # This is the longest sequence of zeros so far. - best_doublecolon_len = doublecolon_len - best_doublecolon_start = doublecolon_start - else: - doublecolon_len = 0 - doublecolon_start = -1 - - if best_doublecolon_len > 1: - best_doublecolon_end = ( - best_doublecolon_start + best_doublecolon_len - ) - # For zeros at the end of the address. - if best_doublecolon_end == len(hextets): - hextets += [""] - hextets[best_doublecolon_start:best_doublecolon_end] = [""] - # For zeros at the beginning of the address. - if best_doublecolon_start == 0: - hextets = [""] + hextets - - return hextets - - @classmethod - def _string_from_ip_int(cls, ip_int=None): - """Turns a 128-bit integer into hexadecimal notation. - - Args: - ip_int: An integer, the IP address. - - Returns: - A string, the hexadecimal representation of the address. - - Raises: - ValueError: The address is bigger than 128 bits of all ones. - - """ - if ip_int is None: - ip_int = int(cls._ip) - - if ip_int > cls._ALL_ONES: - raise ValueError("IPv6 address is too large") - - hex_str = "%032x" % ip_int - hextets = ["%x" % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)] - - hextets = cls._compress_hextets(hextets) - return ":".join(hextets) - - def _explode_shorthand_ip_string(self): - """Expand a shortened IPv6 address. - - Args: - ip_str: A string, the IPv6 address. - - Returns: - A string, the expanded IPv6 address. - - """ - if isinstance(self, IPv6Network): - ip_str = _compat_str(self.network_address) - elif isinstance(self, IPv6Interface): - ip_str = _compat_str(self.ip) - else: - ip_str = _compat_str(self) - - ip_int = self._ip_int_from_string(ip_str) - hex_str = "%032x" % ip_int - parts = [hex_str[x:x + 4] for x in range(0, 32, 4)] - if isinstance(self, (_BaseNetwork, IPv6Interface)): - return "%s/%d" % (":".join(parts), self._prefixlen) - return ":".join(parts) - - def _reverse_pointer(self): - """Return the reverse DNS pointer name for the IPv6 address. - - This implements the method described in RFC3596 2.5. - - """ - reverse_chars = self.exploded[::-1].replace(":", "") - return ".".join(reverse_chars) + ".ip6.arpa" - - @property - def max_prefixlen(self): - return self._max_prefixlen - - @property - def version(self): - return self._version - - -class IPv6Address(_BaseV6, _BaseAddress): - - """Represent and manipulate single IPv6 Addresses.""" - - __slots__ = ("_ip", "__weakref__") - - def __init__(self, address): - """Instantiate a new IPv6 address object. - - Args: - address: A string or integer representing the IP - - Additionally, an integer can be passed, so - IPv6Address('2001:db8::') == - IPv6Address(42540766411282592856903984951653826560) - or, more generally - IPv6Address(int(IPv6Address('2001:db8::'))) == - IPv6Address('2001:db8::') - - Raises: - AddressValueError: If address isn't a valid IPv6 address. - - """ - # Efficient constructor from integer. - if isinstance(address, _compat_int_types): - self._check_int_address(address) - self._ip = address - return - - # Constructing from a packed address - if isinstance(address, bytes): - self._check_packed_address(address, 16) - bvs = _compat_bytes_to_byte_vals(address) - self._ip = _compat_int_from_byte_vals(bvs, "big") - return - - # Assume input argument to be string or any object representation - # which converts into a formatted IP string. - addr_str = _compat_str(address) - if "/" in addr_str: - raise AddressValueError("Unexpected '/' in %r" % address) - self._ip = self._ip_int_from_string(addr_str) - - @property - def packed(self): - """The binary representation of this address.""" - return v6_int_to_packed(self._ip) - - @property - def is_multicast(self): - """Test if the address is reserved for multicast use. - - Returns: - A boolean, True if the address is a multicast address. - See RFC 2373 2.7 for details. - - """ - return self in self._constants._multicast_network - - @property - def is_reserved(self): - """Test if the address is otherwise IETF reserved. - - Returns: - A boolean, True if the address is within one of the - reserved IPv6 Network ranges. - - """ - return any(self in x for x in self._constants._reserved_networks) - - @property - def is_link_local(self): - """Test if the address is reserved for link-local. - - Returns: - A boolean, True if the address is reserved per RFC 4291. - - """ - return self in self._constants._linklocal_network - - @property - def is_site_local(self): - """Test if the address is reserved for site-local. - - Note that the site-local address space has been deprecated by RFC 3879. - Use is_private to test if this address is in the space of unique local - addresses as defined by RFC 4193. - - Returns: - A boolean, True if the address is reserved per RFC 3513 2.5.6. - - """ - return self in self._constants._sitelocal_network - - @property - def is_private(self): - """Test if this address is allocated for private networks. - - Returns: - A boolean, True if the address is reserved per - iana-ipv6-special-registry. - - """ - return any(self in net for net in self._constants._private_networks) - - @property - def is_global(self): - """Test if this address is allocated for public networks. - - Returns: - A boolean, true if the address is not reserved per - iana-ipv6-special-registry. - - """ - return not self.is_private - - @property - def is_unspecified(self): - """Test if the address is unspecified. - - Returns: - A boolean, True if this is the unspecified address as defined in - RFC 2373 2.5.2. - - """ - return self._ip == 0 - - @property - def is_loopback(self): - """Test if the address is a loopback address. - - Returns: - A boolean, True if the address is a loopback address as defined in - RFC 2373 2.5.3. - - """ - return self._ip == 1 - - @property - def ipv4_mapped(self): - """Return the IPv4 mapped address. - - Returns: - If the IPv6 address is a v4 mapped address, return the - IPv4 mapped address. Return None otherwise. - - """ - if (self._ip >> 32) != 0xFFFF: - return None - return IPv4Address(self._ip & 0xFFFFFFFF) - - @property - def teredo(self): - """Tuple of embedded teredo IPs. - - Returns: - Tuple of the (server, client) IPs or None if the address - doesn't appear to be a teredo address (doesn't start with - 2001::/32) - - """ - if (self._ip >> 96) != 0x20010000: - return None - return ( - IPv4Address((self._ip >> 64) & 0xFFFFFFFF), - IPv4Address(~self._ip & 0xFFFFFFFF), - ) - - @property - def sixtofour(self): - """Return the IPv4 6to4 embedded address. - - Returns: - The IPv4 6to4-embedded address if present or None if the - address doesn't appear to contain a 6to4 embedded address. - - """ - if (self._ip >> 112) != 0x2002: - return None - return IPv4Address((self._ip >> 80) & 0xFFFFFFFF) - - -class IPv6Interface(IPv6Address): - def __init__(self, address): - if isinstance(address, (bytes, _compat_int_types)): - IPv6Address.__init__(self, address) - self.network = IPv6Network(self._ip) - self._prefixlen = self._max_prefixlen - return - if isinstance(address, tuple): - IPv6Address.__init__(self, address[0]) - if len(address) > 1: - self._prefixlen = int(address[1]) - else: - self._prefixlen = self._max_prefixlen - self.network = IPv6Network(address, strict=False) - self.netmask = self.network.netmask - self.hostmask = self.network.hostmask - return - - addr = _split_optional_netmask(address) - IPv6Address.__init__(self, addr[0]) - self.network = IPv6Network(address, strict=False) - self.netmask = self.network.netmask - self._prefixlen = self.network._prefixlen - self.hostmask = self.network.hostmask - - def __str__(self): - return "%s/%d" % ( - self._string_from_ip_int(self._ip), - self.network.prefixlen, - ) - - def __eq__(self, other): - address_equal = IPv6Address.__eq__(self, other) - if not address_equal or address_equal is NotImplemented: - return address_equal - try: - return self.network == other.network - except AttributeError: - # An interface with an associated network is NOT the - # same as an unassociated address. That's why the hash - # takes the extra info into account. - return False - - def __lt__(self, other): - address_less = IPv6Address.__lt__(self, other) - if address_less is NotImplemented: - return NotImplemented - try: - return ( - self.network < other.network - or self.network == other.network - and address_less - ) - except AttributeError: - # We *do* allow addresses and interfaces to be sorted. The - # unassociated address is considered less than all interfaces. - return False - - def __hash__(self): - return self._ip ^ self._prefixlen ^ int(self.network.network_address) - - __reduce__ = _IPAddressBase.__reduce__ - - @property - def ip(self): - return IPv6Address(self._ip) - - @property - def with_prefixlen(self): - return "%s/%s" % (self._string_from_ip_int(self._ip), self._prefixlen) - - @property - def with_netmask(self): - return "%s/%s" % (self._string_from_ip_int(self._ip), self.netmask) - - @property - def with_hostmask(self): - return "%s/%s" % (self._string_from_ip_int(self._ip), self.hostmask) - - @property - def is_unspecified(self): - return self._ip == 0 and self.network.is_unspecified - - @property - def is_loopback(self): - return self._ip == 1 and self.network.is_loopback - - -class IPv6Network(_BaseV6, _BaseNetwork): - - """This class represents and manipulates 128-bit IPv6 networks. - - Attributes: [examples for IPv6('2001:db8::1000/124')] - .network_address: IPv6Address('2001:db8::1000') - .hostmask: IPv6Address('::f') - .broadcast_address: IPv6Address('2001:db8::100f') - .netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0') - .prefixlen: 124 - - """ - - # Class to use when creating address objects - _address_class = IPv6Address - - def __init__(self, address, strict=True): - """Instantiate a new IPv6 Network object. - - Args: - address: A string or integer representing the IPv6 network or the - IP and prefix/netmask. - '2001:db8::/128' - '2001:db8:0000:0000:0000:0000:0000:0000/128' - '2001:db8::' - are all functionally the same in IPv6. That is to say, - failing to provide a subnetmask will create an object with - a mask of /128. - - Additionally, an integer can be passed, so - IPv6Network('2001:db8::') == - IPv6Network(42540766411282592856903984951653826560) - or, more generally - IPv6Network(int(IPv6Network('2001:db8::'))) == - IPv6Network('2001:db8::') - - strict: A boolean. If true, ensure that we have been passed - A true network address, eg, 2001:db8::1000/124 and not an - IP address on a network, eg, 2001:db8::1/124. - - Raises: - AddressValueError: If address isn't a valid IPv6 address. - NetmaskValueError: If the netmask isn't valid for - an IPv6 address. - ValueError: If strict was True and a network address was not - supplied. - - """ - _BaseNetwork.__init__(self, address) - - # Efficient constructor from integer or packed address - if isinstance(address, (bytes, _compat_int_types)): - self.network_address = IPv6Address(address) - self.netmask, self._prefixlen = self._make_netmask( - self._max_prefixlen - ) - return - - if isinstance(address, tuple): - if len(address) > 1: - arg = address[1] - else: - arg = self._max_prefixlen - self.netmask, self._prefixlen = self._make_netmask(arg) - self.network_address = IPv6Address(address[0]) - packed = int(self.network_address) - if packed & int(self.netmask) != packed: - if strict: - raise ValueError("%s has host bits set" % self) - else: - self.network_address = IPv6Address( - packed & int(self.netmask) - ) - return - - # Assume input argument to be string or any object representation - # which converts into a formatted IP prefix string. - addr = _split_optional_netmask(address) - - self.network_address = IPv6Address(self._ip_int_from_string(addr[0])) - - if len(addr) == 2: - arg = addr[1] - else: - arg = self._max_prefixlen - self.netmask, self._prefixlen = self._make_netmask(arg) - - if strict: - if ( - IPv6Address(int(self.network_address) & int(self.netmask)) - != self.network_address - ): - raise ValueError("%s has host bits set" % self) - self.network_address = IPv6Address( - int(self.network_address) & int(self.netmask) - ) - - if self._prefixlen == (self._max_prefixlen - 1): - self.hosts = self.__iter__ - - def hosts(self): - """Generate Iterator over usable hosts in a network. - - This is like __iter__ except it doesn't return the - Subnet-Router anycast address. - - """ - network = int(self.network_address) - broadcast = int(self.broadcast_address) - for x in _compat_range(network + 1, broadcast + 1): - yield self._address_class(x) - - @property - def is_site_local(self): - """Test if the address is reserved for site-local. - - Note that the site-local address space has been deprecated by RFC 3879. - Use is_private to test if this address is in the space of unique local - addresses as defined by RFC 4193. - - Returns: - A boolean, True if the address is reserved per RFC 3513 2.5.6. - - """ - return ( - self.network_address.is_site_local - and self.broadcast_address.is_site_local - ) - - -class _IPv6Constants(object): - - _linklocal_network = IPv6Network("fe80::/10") - - _multicast_network = IPv6Network("ff00::/8") - - _private_networks = [ - IPv6Network("::1/128"), - IPv6Network("::/128"), - IPv6Network("::ffff:0:0/96"), - IPv6Network("100::/64"), - IPv6Network("2001::/23"), - IPv6Network("2001:2::/48"), - IPv6Network("2001:db8::/32"), - IPv6Network("2001:10::/28"), - IPv6Network("fc00::/7"), - IPv6Network("fe80::/10"), - ] - - _reserved_networks = [ - IPv6Network("::/8"), - IPv6Network("100::/8"), - IPv6Network("200::/7"), - IPv6Network("400::/6"), - IPv6Network("800::/5"), - IPv6Network("1000::/4"), - IPv6Network("4000::/3"), - IPv6Network("6000::/3"), - IPv6Network("8000::/3"), - IPv6Network("A000::/3"), - IPv6Network("C000::/3"), - IPv6Network("E000::/4"), - IPv6Network("F000::/5"), - IPv6Network("F800::/6"), - IPv6Network("FE00::/9"), - ] - - _sitelocal_network = IPv6Network("fec0::/10") - - -IPv6Address._constants = _IPv6Constants diff --git a/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py b/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py index 054a4d4790..48e2f10ef3 100644 --- a/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py +++ b/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py @@ -17,11 +17,12 @@ module: scaleway_security_group_rule short_description: Scaleway Security Group Rule management module author: Antoine Barbare (@abarbare) description: - - This module manages Security Group Rule on Scaleway account - U(https://developer.scaleway.com) + - This module manages Security Group Rule on Scaleway account + U(https://developer.scaleway.com) extends_documentation_fragment: -- community.general.scaleway - + - community.general.scaleway +requirements: + - ipaddress options: state: @@ -129,10 +130,19 @@ data: } ''' +import traceback + from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway, payload_from_object -from ansible_collections.community.general.plugins.module_utils.compat.ipaddress import ip_network from ansible.module_utils._text import to_text -from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import AnsibleModule, missing_required_lib + +try: + from ipaddress import ip_network +except ImportError: + IPADDRESS_IMP_ERR = traceback.format_exc() + HAS_IPADDRESS = False +else: + HAS_IPADDRESS = True def get_sgr_from_api(security_group_rules, security_group_rule): @@ -255,6 +265,8 @@ def main(): argument_spec=argument_spec, supports_check_mode=True, ) + if not HAS_IPADDRESS: + module.fail_json(msg=missing_required_lib('ipaddress'), exception=IPADDRESS_IMP_ERR) core(module) diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 7beedfa206..a33e194233 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -1,6 +1,4 @@ plugins/module_utils/cloud.py pylint:bad-option-value # a pylint test that is disabled was modified over time -plugins/module_utils/compat/ipaddress.py no-assert -plugins/module_utils/compat/ipaddress.py no-unicode-literals plugins/module_utils/_mount.py future-import-boilerplate plugins/module_utils/_mount.py metaclass-boilerplate plugins/modules/cloud/linode/linode.py validate-modules:parameter-list-no-elements diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 80975cf389..4678f10294 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -1,5 +1,3 @@ -plugins/module_utils/compat/ipaddress.py no-assert -plugins/module_utils/compat/ipaddress.py no-unicode-literals plugins/module_utils/_mount.py future-import-boilerplate plugins/module_utils/_mount.py metaclass-boilerplate plugins/modules/cloud/linode/linode.py validate-modules:parameter-list-no-elements diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt index 68684f000d..ec34ff7833 100644 --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -1,5 +1,3 @@ -plugins/module_utils/compat/ipaddress.py no-assert -plugins/module_utils/compat/ipaddress.py no-unicode-literals plugins/module_utils/_mount.py future-import-boilerplate plugins/module_utils/_mount.py metaclass-boilerplate plugins/modules/cloud/linode/linode.py validate-modules:parameter-list-no-elements diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 36a0c3e08e..8f18be1c44 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -1,6 +1,4 @@ plugins/module_utils/cloud.py pylint:bad-option-value # a pylint test that is disabled was modified over time -plugins/module_utils/compat/ipaddress.py no-assert -plugins/module_utils/compat/ipaddress.py no-unicode-literals plugins/module_utils/_mount.py future-import-boilerplate plugins/module_utils/_mount.py metaclass-boilerplate plugins/modules/cloud/linode/linode.py validate-modules:parameter-type-not-in-doc From 73863262584ba58043e5357d8c45b468dd7de5e2 Mon Sep 17 00:00:00 2001 From: Mike Russell Date: Sat, 8 May 2021 02:58:55 -0700 Subject: [PATCH 0268/3093] Small Documentation Example Of Cask Leveraging (#2462) * Small Documentation Example Of Cask Leveraging - Just a lil' demo showing that we can utilize homebrew/cask/foo syntax for given name of package to grab associated cask pacakge Resolves: patch/sml-doc-example-update * Slight Documentation Example Edit - adjusting documentation example to provide better info surrounding installing a given formula from brew via cask Resolves: patch/sml-doc-example-update * Small Edits To Make PEP8 Happy - format code with autopep8 in vs code Resolves: patch/sml-doc-example-update * Only Making Small PEP8 Change - reverting previous mass PEP8 format, focus on trimming whitespace on doc example entry Resolves: patch/sml-doc-example-update * Remove Trailing Whitespace PEP8 - removed trailing whitespace on doc example chunk Resolves: patch/sml-doc-example-update --- plugins/modules/packaging/os/homebrew.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/plugins/modules/packaging/os/homebrew.py b/plugins/modules/packaging/os/homebrew.py index 9a41370c3d..47ec930a2c 100644 --- a/plugins/modules/packaging/os/homebrew.py +++ b/plugins/modules/packaging/os/homebrew.py @@ -127,6 +127,11 @@ EXAMPLES = ''' state: present install_options: with-baz,enable-debug +- name: Install formula foo with 'brew' from cask + community.general.homebrew: + name: homebrew/cask/foo + state: present + - name: Use ignored-pinned option while upgrading all community.general.homebrew: upgrade_all: yes From 4cdff8654a8ef793736b95d84b872acf3779bdea Mon Sep 17 00:00:00 2001 From: vbarba Date: Sun, 9 May 2021 22:25:00 +0200 Subject: [PATCH 0269/3093] fix stackpath_compute validate_config (#2448) * fix stackpath_compute validate_config get the lenght for the client_id / client_secret to validate inventory configuration * Add changelog fragment. Co-authored-by: Felix Fontein --- changelogs/fragments/2448-stackpath_compute-fix.yml | 2 ++ plugins/inventory/stackpath_compute.py | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/2448-stackpath_compute-fix.yml diff --git a/changelogs/fragments/2448-stackpath_compute-fix.yml b/changelogs/fragments/2448-stackpath_compute-fix.yml new file mode 100644 index 0000000000..196db780b1 --- /dev/null +++ b/changelogs/fragments/2448-stackpath_compute-fix.yml @@ -0,0 +1,2 @@ +bugfixes: +- "stackpath_compute inventory script - fix broken validation checks for client ID and client secret (https://github.com/ansible-collections/community.general/pull/2448)." diff --git a/plugins/inventory/stackpath_compute.py b/plugins/inventory/stackpath_compute.py index 393edac384..fb879e869e 100644 --- a/plugins/inventory/stackpath_compute.py +++ b/plugins/inventory/stackpath_compute.py @@ -102,13 +102,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): raise AnsibleError("plugin doesn't match this plugin") try: client_id = config['client_id'] - if client_id != 32: + if len(client_id) != 32: raise AnsibleError("client_id must be 32 characters long") except KeyError: raise AnsibleError("config missing client_id, a required option") try: client_secret = config['client_secret'] - if client_secret != 64: + if len(client_secret) != 64: raise AnsibleError("client_secret must be 64 characters long") except KeyError: raise AnsibleError("config missing client_id, a required option") From 2e58dfe52afd715a967c01b4994c1a3574e835dd Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 10 May 2021 14:45:10 +0200 Subject: [PATCH 0270/3093] Clarify Windows (non-)support. (#2476) --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 935f0ecabd..306f307128 100644 --- a/README.md +++ b/README.md @@ -7,6 +7,8 @@ This repo contains the `community.general` Ansible Collection. The collection in You can find [documentation for this collection on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/). +Please note that this collection does **not** support Windows targets. Only connection plugins included in this collection might support Windows targets, and will explicitly mention that in their documentation if they do so. + ## Tested with Ansible Tested with the current Ansible 2.9, ansible-base 2.10 and ansible-core 2.11 releases and the current development version of ansible-core. Ansible versions before 2.9.10 are not supported. From 8e7aff00b5f3af2ed7dbc377255832da68817144 Mon Sep 17 00:00:00 2001 From: sam-lunt Date: Mon, 10 May 2021 10:55:19 -0500 Subject: [PATCH 0271/3093] Avoid incorrectly marking zfs tasks as changed (#2454) * Avoid incorrectly marking zfs tasks as changed The zfs module will incorrectly mark certain tasks as having been changed. For example, if a dataset has a quota of "1G" and the user changes it to "1024M", the actual quota vale has not changed, but since the module is doing a simple string comparison between "1G" and "1024M", it marks the step as "changed". Instead of trying to handle all the corner cases of zfs (another example is when the zpool "altroot" property has been set), this change simply compares the output of "zfs-get" from before and after "zfs-set" is called * update changelog format * Update changelogs/fragments/2454-detect_zfs_changed.yml Co-authored-by: Felix Fontein * add note about check_mode * Update plugins/modules/storage/zfs/zfs.py Co-authored-by: Felix Fontein * Update plugins/modules/storage/zfs/zfs.py Co-authored-by: Felix Fontein * clarify check mode qualifications * rephrase to avoid hypothetical Co-authored-by: Felix Fontein --- .../fragments/2454-detect_zfs_changed.yml | 2 ++ plugins/modules/storage/zfs/zfs.py | 24 +++++++++++++++---- 2 files changed, 21 insertions(+), 5 deletions(-) create mode 100644 changelogs/fragments/2454-detect_zfs_changed.yml diff --git a/changelogs/fragments/2454-detect_zfs_changed.yml b/changelogs/fragments/2454-detect_zfs_changed.yml new file mode 100644 index 0000000000..0604278f6b --- /dev/null +++ b/changelogs/fragments/2454-detect_zfs_changed.yml @@ -0,0 +1,2 @@ +bugfixes: + - zfs - certain ZFS properties, especially sizes, would lead to a task being falsely marked as "changed" even when no actual change was made (https://github.com/ansible-collections/community.general/issues/975, https://github.com/ansible-collections/community.general/pull/2454). diff --git a/plugins/modules/storage/zfs/zfs.py b/plugins/modules/storage/zfs/zfs.py index fe693a5045..2d5d4487dd 100644 --- a/plugins/modules/storage/zfs/zfs.py +++ b/plugins/modules/storage/zfs/zfs.py @@ -37,6 +37,12 @@ options: - A dictionary of zfs properties to be set. - See the zfs(8) man page for more information. type: dict +notes: + - C(check_mode) is supported, but in certain situations it may report a task + as changed that will not be reported as changed when C(check_mode) is disabled. + For example, this might occur when the zpool C(altroot) option is set or when + a size is written using human-readable notation, such as C(1M) or C(1024K), + instead of as an unqualified byte count, such as C(1048576). author: - Johan Wiren (@johanwiren) ''' @@ -184,9 +190,7 @@ class Zfs(object): return cmd = [self.zfs_cmd, 'set', prop + '=' + str(value), self.name] (rc, out, err) = self.module.run_command(cmd) - if rc == 0: - self.changed = True - else: + if rc != 0: self.module.fail_json(msg=err) def set_properties_if_changed(self): @@ -194,15 +198,25 @@ class Zfs(object): for prop, value in self.properties.items(): if current_properties.get(prop, None) != value: self.set_property(prop, value) + if self.module.check_mode: + return + updated_properties = self.get_current_properties() + for prop in self.properties: + value = updated_properties.get(prop, None) + if value is None: + self.module.fail_json(msg="zfsprop was not present after being successfully set: %s" % prop) + if current_properties.get(prop, None) != value: + self.changed = True def get_current_properties(self): - cmd = [self.zfs_cmd, 'get', '-H'] + cmd = [self.zfs_cmd, 'get', '-H', '-p', '-o', "property,value,source"] if self.enhanced_sharing: cmd += ['-e'] cmd += ['all', self.name] rc, out, err = self.module.run_command(" ".join(cmd)) properties = dict() - for prop, value, source in [l.split('\t')[1:4] for l in out.splitlines()]: + for line in out.splitlines(): + prop, value, source = line.split('\t') # include source '-' so that creation-only properties are not removed # to avoids errors when the dataset already exists and the property is not changed # this scenario is most likely when the same playbook is run more than once From 624eb7171e8afc72684695aaad6c12d1a27c3c26 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 11 May 2021 07:52:57 +0200 Subject: [PATCH 0272/3093] Run unit tests also with Python 3.10. (#2486) ci_complete --- .azure-pipelines/azure-pipelines.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index 8d1b81865e..8dc49e5c03 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -124,6 +124,7 @@ stages: - test: 3.7 - test: 3.8 - test: 3.9 + - test: '3.10' - stage: Units_2_11 displayName: Units 2.11 dependsOn: [] From eea4f4596541fb0a3fc348bf36f6208c2a408b5f Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 11 May 2021 19:27:05 +0200 Subject: [PATCH 0273/3093] Add dependent lookup plugin (#2164) * Add dependent lookup plugin. * Use correct YAML booleans. * Began complete rewrite. * Only match start of error msg. * Improve tests. * Work around old Jinja2 versions. * Fix metadata. * Fix filter name. --- plugins/lookup/dependent.py | 208 ++++++++++++++++++ .../targets/lookup_dependent/aliases | 2 + .../targets/lookup_dependent/tasks/main.yml | 179 +++++++++++++++ tests/unit/plugins/lookup/test_dependent.py | 44 ++++ 4 files changed, 433 insertions(+) create mode 100644 plugins/lookup/dependent.py create mode 100644 tests/integration/targets/lookup_dependent/aliases create mode 100644 tests/integration/targets/lookup_dependent/tasks/main.yml create mode 100644 tests/unit/plugins/lookup/test_dependent.py diff --git a/plugins/lookup/dependent.py b/plugins/lookup/dependent.py new file mode 100644 index 0000000000..a22a98476c --- /dev/null +++ b/plugins/lookup/dependent.py @@ -0,0 +1,208 @@ +# (c) 2015-2021, Felix Fontein +# (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +name: dependent +short_description: Composes a list with nested elements of other lists or dicts which can depend on previous loop variables +version_added: 3.1.0 +description: + - "Takes the input lists and returns a list with elements that are lists, dictionaries, + or template expressions which evaluate to lists or dicts, composed of the elements of + the input evaluated lists and dictionaries." +options: + _raw: + description: + - A list where the elements are one-element dictionaries, mapping a name to a string, list, or dictionary. + The name is the index that is used in the result object. The value is iterated over as described below. + - If the value is a list, it is simply iterated over. + - If the value is a dictionary, it is iterated over and returned as if they would be processed by the + R(ansible.builtin.dict2items filter,ansible_collections.ansible.builtin.dict2items_filter). + - If the value is a string, it is evaluated as Jinja2 expressions which can access the previously chosen + elements with C(item.). The result must be a list or a dictionary. + type: list + elements: dict + required: true +""" + +EXAMPLES = """ +- name: Install/remove public keys for active admin users + ansible.posix.authorized_key: + user: "{{ item.admin.key }}" + key: "{{ lookup('file', item.key.public_key) }}" + state: "{{ 'present' if item.key.active else 'absent' }}" + when: item.admin.value.active + with_community.general.dependent: + - admin: admin_user_data + - key: admin_ssh_keys[item.admin.key] + loop_control: + # Makes the output readable, so that it doesn't contain the whole subdictionaries and lists + label: "{{ [item.admin.key, 'active' if item.key.active else 'inactive', item.key.public_key] }}" + vars: + admin_user_data: + admin1: + name: Alice + active: true + admin2: + name: Bob + active: true + admin_ssh_keys: + admin1: + - private_key: keys/private_key_admin1.pem + public_key: keys/private_key_admin1.pub + active: true + admin2: + - private_key: keys/private_key_admin2.pem + public_key: keys/private_key_admin2.pub + active: true + - private_key: keys/private_key_admin2-old.pem + public_key: keys/private_key_admin2-old.pub + active: false + +- name: Update DNS records + community.aws.route53: + zone: "{{ item.zone.key }}" + record: "{{ item.prefix.key ~ '.' if item.prefix.key else '' }}{{ item.zone.key }}" + type: "{{ item.entry.key }}" + ttl: "{{ item.entry.value.ttl | default(3600) }}" + value: "{{ item.entry.value.value }}" + state: "{{ 'absent' if (item.entry.value.absent | default(False)) else 'present' }}" + overwrite: true + loop_control: + # Makes the output readable, so that it doesn't contain the whole subdictionaries and lists + label: |- + {{ [item.zone.key, item.prefix.key, item.entry.key, + item.entry.value.ttl | default(3600), + item.entry.value.absent | default(False), item.entry.value.value] }} + with_community.general.dependent: + - zone: dns_setup + - prefix: item.zone.value + - entry: item.prefix.value + vars: + dns_setup: + example.com: + '': + A: + value: + - 1.2.3.4 + AAAA: + value: + - "2a01:1:2:3::1" + 'test._domainkey': + TXT: + ttl: 300 + value: + - '"k=rsa; t=s; p=MIGfMA..."' + example.org: + 'www': + A: + value: + - 1.2.3.4 + - 5.6.7.8 +""" + +RETURN = """ + _list: + description: + - A list composed of dictionaries whose keys are the variable names from the input list. + type: list + elements: dict + sample: + - key1: a + key2: test + - key1: a + key2: foo + - key1: b + key2: bar +""" + +from ansible.errors import AnsibleLookupError +from ansible.module_utils.common._collections_compat import Mapping, Sequence +from ansible.module_utils.six import string_types +from ansible.plugins.lookup import LookupBase +from ansible.template import Templar + + +class LookupModule(LookupBase): + def __evaluate(self, expression, templar, variables): + """Evaluate expression with templar. + + ``expression`` is the expression to evaluate. + ``variables`` are the variables to use. + """ + templar.available_variables = variables or {} + return templar.template("{0}{1}{2}".format("{{", expression, "}}"), cache=False) + + def __process(self, result, terms, index, current, templar, variables): + """Fills ``result`` list with evaluated items. + + ``result`` is a list where the resulting items are placed. + ``terms`` is the parsed list of terms + ``index`` is the current index to be processed in the list. + ``current`` is a dictionary where the first ``index`` values are filled in. + ``variables`` are the variables currently available. + """ + # If we are done, add to result list: + if index == len(terms): + result.append(current.copy()) + return + + key, expression, values = terms[index] + + if expression is not None: + # Evaluate expression in current context + vars = variables.copy() + vars['item'] = current.copy() + try: + values = self.__evaluate(expression, templar, variables=vars) + except Exception as e: + raise AnsibleLookupError( + 'Caught "{error}" while evaluating {key!r} with item == {item!r}'.format( + error=e, key=key, item=current)) + + if isinstance(values, Mapping): + for idx, val in sorted(values.items()): + current[key] = dict([('key', idx), ('value', val)]) + self.__process(result, terms, index + 1, current, templar, variables) + elif isinstance(values, Sequence): + for elt in values: + current[key] = elt + self.__process(result, terms, index + 1, current, templar, variables) + else: + raise AnsibleLookupError( + 'Did not obtain dictionary or list while evaluating {key!r} with item == {item!r}, but {type}'.format( + key=key, item=current, type=type(values))) + + def run(self, terms, variables=None, **kwargs): + """Generate list.""" + result = [] + if len(terms) > 0: + templar = Templar(loader=self._templar._loader) + data = [] + vars_so_far = set() + for index, term in enumerate(terms): + if not isinstance(term, Mapping): + raise AnsibleLookupError( + 'Parameter {index} must be a dictionary, got {type}'.format( + index=index, type=type(term))) + if len(term) != 1: + raise AnsibleLookupError( + 'Parameter {index} must be a one-element dictionary, got {count} elements'.format( + index=index, count=len(term))) + k, v = list(term.items())[0] + if k in vars_so_far: + raise AnsibleLookupError( + 'The variable {key!r} appears more than once'.format(key=k)) + vars_so_far.add(k) + if isinstance(v, string_types): + data.append((k, v, None)) + elif isinstance(v, (Sequence, Mapping)): + data.append((k, None, v)) + else: + raise AnsibleLookupError( + 'Parameter {key!r} (index {index}) must have a value of type string, dictionary or list, got type {type}'.format( + index=index, key=k, type=type(v))) + self.__process(result, data, 0, {}, templar, variables) + return result diff --git a/tests/integration/targets/lookup_dependent/aliases b/tests/integration/targets/lookup_dependent/aliases new file mode 100644 index 0000000000..45489be80c --- /dev/null +++ b/tests/integration/targets/lookup_dependent/aliases @@ -0,0 +1,2 @@ +shippable/posix/group2 +skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller diff --git a/tests/integration/targets/lookup_dependent/tasks/main.yml b/tests/integration/targets/lookup_dependent/tasks/main.yml new file mode 100644 index 0000000000..0f1b8d34fb --- /dev/null +++ b/tests/integration/targets/lookup_dependent/tasks/main.yml @@ -0,0 +1,179 @@ +--- +- name: Test 1 + set_fact: + loop_result: >- + {{ + query('community.general.dependent', + dict(key1=[1, 2]), + dict(key2='[item.key1 + 3, item.key1 + 6]'), + dict(key3='[item.key1 + item.key2 * 10]')) + }} + +- name: Check result of Test 1 + assert: + that: + - loop_result == expected_result + vars: + expected_result: + - key1: 1 + key2: 4 + key3: 41 + - key1: 1 + key2: 7 + key3: 71 + - key1: 2 + key2: 5 + key3: 52 + - key1: 2 + key2: 8 + key3: 82 + +- name: Test 2 + set_fact: + loop_result: >- + {{ query('community.general.dependent', + dict([['a', [1, 2, 3]]]), + dict([['b', '[1, 2, 3, 4] if item.a == 1 else [2, 3, 4] if item.a == 2 else [3, 4]']])) }} + # The last expression could have been `range(item.a, 5)`, but that's not supported by all Jinja2 versions used in CI + +- name: Check result of Test 2 + assert: + that: + - loop_result == expected_result + vars: + expected_result: + - a: 1 + b: 1 + - a: 1 + b: 2 + - a: 1 + b: 3 + - a: 1 + b: 4 + - a: 2 + b: 2 + - a: 2 + b: 3 + - a: 2 + b: 4 + - a: 3 + b: 3 + - a: 3 + b: 4 + +- name: Test 3 + debug: + var: item + with_community.general.dependent: + - var1: + a: + - 1 + - 2 + b: + - 3 + - 4 + - var2: 'item.var1.value' + - var3: 'dependent_lookup_test[item.var1.key ~ "_" ~ item.var2]' + loop_control: + label: "{{ [item.var1.key, item.var2, item.var3] }}" + register: dependent + vars: + dependent_lookup_test: + a_1: + - A + - B + a_2: + - C + b_3: + - D + b_4: + - E + - F + - G + +- name: Check result of Test 3 + assert: + that: + - (dependent.results | length) == 7 + - dependent.results[0].item.var1.key == "a" + - dependent.results[0].item.var2 == 1 + - dependent.results[0].item.var3 == "A" + - dependent.results[1].item.var1.key == "a" + - dependent.results[1].item.var2 == 1 + - dependent.results[1].item.var3 == "B" + - dependent.results[2].item.var1.key == "a" + - dependent.results[2].item.var2 == 2 + - dependent.results[2].item.var3 == "C" + - dependent.results[3].item.var1.key == "b" + - dependent.results[3].item.var2 == 3 + - dependent.results[3].item.var3 == "D" + - dependent.results[4].item.var1.key == "b" + - dependent.results[4].item.var2 == 4 + - dependent.results[4].item.var3 == "E" + - dependent.results[5].item.var1.key == "b" + - dependent.results[5].item.var2 == 4 + - dependent.results[5].item.var3 == "F" + - dependent.results[6].item.var1.key == "b" + - dependent.results[6].item.var2 == 4 + - dependent.results[6].item.var3 == "G" + +- name: "Test 4: template failure" + debug: + msg: "{{ item }}" + with_community.general.dependent: + - a: + - 1 + - 2 + - b: "[item.a + foo]" + ignore_errors: true + register: eval_error + +- name: Check result of Test 4 + assert: + that: + - eval_error is failed + - eval_error.msg.startswith("Caught \"'foo' is undefined\" while evaluating ") + +- name: "Test 5: same variable name reused" + debug: + msg: "{{ item }}" + with_community.general.dependent: + - a: x + - b: x + ignore_errors: true + register: eval_error + +- name: Check result of Test 5 + assert: + that: + - eval_error is failed + - eval_error.msg.startswith("Caught \"'x' is undefined\" while evaluating ") + +- name: "Test 6: multi-value dict" + debug: + msg: "{{ item }}" + with_community.general.dependent: + - a: x + b: x + ignore_errors: true + register: eval_error + +- name: Check result of Test 6 + assert: + that: + - eval_error is failed + - eval_error.msg == 'Parameter 0 must be a one-element dictionary, got 2 elements' + +- name: "Test 7: empty dict" + debug: + msg: "{{ item }}" + with_community.general.dependent: + - {} + ignore_errors: true + register: eval_error + +- name: Check result of Test 7 + assert: + that: + - eval_error is failed + - eval_error.msg == 'Parameter 0 must be a one-element dictionary, got 0 elements' diff --git a/tests/unit/plugins/lookup/test_dependent.py b/tests/unit/plugins/lookup/test_dependent.py new file mode 100644 index 0000000000..f2a31ff4b6 --- /dev/null +++ b/tests/unit/plugins/lookup/test_dependent.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# (c) 2020-2021, Felix Fontein +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# Make coding more python3-ish +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +from ansible_collections.community.internal_test_tools.tests.unit.compat.unittest import TestCase +from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import ( + MagicMock, +) + +from ansible.plugins.loader import lookup_loader + + +class TestLookupModule(TestCase): + def setUp(self): + templar = MagicMock() + templar._loader = None + self.lookup = lookup_loader.get("community.general.dependent", templar=templar) + + def test_empty(self): + self.assertListEqual(self.lookup.run([], None), []) + + def test_simple(self): + self.assertListEqual( + self.lookup.run( + [ + {'a': '[1, 2]'}, + {'b': '[item.a + 3, item.a + 6]'}, + {'c': '[item.a + item.b * 10]'}, + ], + {}, + ), + [ + {'a': 1, 'b': 4, 'c': 41}, + {'a': 1, 'b': 7, 'c': 71}, + {'a': 2, 'b': 5, 'c': 52}, + {'a': 2, 'b': 8, 'c': 82}, + ], + ) From 9d46ccf1b2aacac8136432f0abb85ae65082d8a8 Mon Sep 17 00:00:00 2001 From: TrevorSquillario <72882537+TrevorSquillario@users.noreply.github.com> Date: Tue, 11 May 2021 11:30:09 -0600 Subject: [PATCH 0274/3093] modified redfish_config and idrac_redfish_config to skip incorrect attributes (#2334) * modified redfish_config and idrac_redfish_config to skip incorrect attributes Signed-off-by: Trevor Squillario Trevor_Squillario@Dell.com * modified redfish_utils.py and idrac_redfish_config.py to return empty warning message * modified redfish_config.py and idrac_redfish_config.py to use module.warn() * updated changelog fragment for pr 2334 --- ...dfish_config-skip-incorrect-attributes.yml | 4 +++ plugins/module_utils/redfish_utils.py | 28 ++++++++++++++----- .../redfish/idrac_redfish_config.py | 24 ++++++++++++---- .../redfish/redfish_config.py | 3 ++ 4 files changed, 47 insertions(+), 12 deletions(-) create mode 100644 changelogs/fragments/2334-redfish_config-skip-incorrect-attributes.yml diff --git a/changelogs/fragments/2334-redfish_config-skip-incorrect-attributes.yml b/changelogs/fragments/2334-redfish_config-skip-incorrect-attributes.yml new file mode 100644 index 0000000000..2e609c43fc --- /dev/null +++ b/changelogs/fragments/2334-redfish_config-skip-incorrect-attributes.yml @@ -0,0 +1,4 @@ +minor_changes: + - redfish_utils module utils - modified set_bios_attributes function to skip invalid attribute instead of returning. Added skipped attributes to output (https://github.com/ansible-collections/community.general/issues/1995). + - idrac_redfish_config - modified set_manager_attributes function to skip invalid attribute instead of returning. Added skipped attributes to output. Modified module exit to add warning variable (https://github.com/ansible-collections/community.general/issues/1995). + - redfish_config - modified module exit to add warning variable (https://github.com/ansible-collections/community.general/issues/1995). diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index d8cc4061f8..df7011a0b4 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -1671,19 +1671,31 @@ class RedfishUtils(object): # Make a copy of the attributes dict attrs_to_patch = dict(attributes) + # List to hold attributes not found + attrs_bad = {} # Check the attributes - for attr in attributes: - if attr not in data[u'Attributes']: - return {'ret': False, 'msg': "BIOS attribute %s not found" % attr} + for attr_name, attr_value in attributes.items(): + # Check if attribute exists + if attr_name not in data[u'Attributes']: + # Remove and proceed to next attribute if this isn't valid + attrs_bad.update({attr_name: attr_value}) + del attrs_to_patch[attr_name] + continue + # If already set to requested value, remove it from PATCH payload - if data[u'Attributes'][attr] == attributes[attr]: - del attrs_to_patch[attr] + if data[u'Attributes'][attr_name] == attributes[attr_name]: + del attrs_to_patch[attr_name] + + warning = "" + if attrs_bad: + warning = "Incorrect attributes %s" % (attrs_bad) # Return success w/ changed=False if no attrs need to be changed if not attrs_to_patch: return {'ret': True, 'changed': False, - 'msg': "BIOS attributes already set"} + 'msg': "BIOS attributes already set", + 'warning': warning} # Get the SettingsObject URI set_bios_attr_uri = data["@Redfish.Settings"]["SettingsObject"]["@odata.id"] @@ -1693,7 +1705,9 @@ class RedfishUtils(object): response = self.patch_request(self.root_uri + set_bios_attr_uri, payload) if response['ret'] is False: return response - return {'ret': True, 'changed': True, 'msg': "Modified BIOS attribute"} + return {'ret': True, 'changed': True, + 'msg': "Modified BIOS attributes %s" % (attrs_to_patch), + 'warning': warning} def set_boot_order(self, boot_list): if not boot_list: diff --git a/plugins/modules/remote_management/redfish/idrac_redfish_config.py b/plugins/modules/remote_management/redfish/idrac_redfish_config.py index e27ef6a2a6..b16401311b 100644 --- a/plugins/modules/remote_management/redfish/idrac_redfish_config.py +++ b/plugins/modules/remote_management/redfish/idrac_redfish_config.py @@ -179,6 +179,7 @@ class IdracRedfishUtils(RedfishUtils): attrs_to_patch = {} attrs_skipped = {} + attrs_bad = {} # Store attrs which were not found in the system # Search for key entry and extract URI from it response = self.get_request(self.root_uri + manager_uri + "/" + key) @@ -189,13 +190,15 @@ class IdracRedfishUtils(RedfishUtils): if key not in data: return {'ret': False, - 'msg': "%s: Key %s not found" % (command, key)} + 'msg': "%s: Key %s not found" % (command, key), + 'warning': ""} for attr_name, attr_value in attributes.items(): # Check if attribute exists if attr_name not in data[u'Attributes']: - return {'ret': False, - 'msg': "%s: Manager attribute %s not found" % (command, attr_name)} + # Skip and proceed to next attribute if this isn't valid + attrs_bad.update({attr_name: attr_value}) + continue # Find out if value is already set to what we want. If yes, exclude # those attributes @@ -204,16 +207,23 @@ class IdracRedfishUtils(RedfishUtils): else: attrs_to_patch.update({attr_name: attr_value}) + warning = "" + if attrs_bad: + warning = "Incorrect attributes %s" % (attrs_bad) + if not attrs_to_patch: return {'ret': True, 'changed': False, - 'msg': "Manager attributes already set"} + 'msg': "No changes made. Manager attributes already set.", + 'warning': warning} payload = {"Attributes": attrs_to_patch} response = self.patch_request(self.root_uri + manager_uri + "/" + key, payload) if response['ret'] is False: return response + return {'ret': True, 'changed': True, - 'msg': "%s: Modified Manager attributes %s" % (command, attrs_to_patch)} + 'msg': "%s: Modified Manager attributes %s" % (command, attrs_to_patch), + 'warning': warning} CATEGORY_COMMANDS_ALL = { @@ -221,6 +231,7 @@ CATEGORY_COMMANDS_ALL = { "SetSystemAttributes"] } + # list of mutually exclusive commands for a category CATEGORY_COMMANDS_MUTUALLY_EXCLUSIVE = { "Manager": [["SetManagerAttributes", "SetLifecycleControllerAttributes", @@ -308,6 +319,9 @@ def main(): # Return data back or fail with proper message if result['ret'] is True: + if result.get('warning'): + module.warn(to_native(result['warning'])) + module.exit_json(changed=result['changed'], msg=to_native(result['msg'])) else: module.fail_json(msg=to_native(result['msg'])) diff --git a/plugins/modules/remote_management/redfish/redfish_config.py b/plugins/modules/remote_management/redfish/redfish_config.py index 5c1df16c4e..e084c670f4 100644 --- a/plugins/modules/remote_management/redfish/redfish_config.py +++ b/plugins/modules/remote_management/redfish/redfish_config.py @@ -321,6 +321,9 @@ def main(): # Return data back or fail with proper message if result['ret'] is True: + if result.get('warning'): + module.warn(to_native(result['warning'])) + module.exit_json(changed=result['changed'], msg=to_native(result['msg'])) else: module.fail_json(msg=to_native(result['msg'])) From d22dd5056e62d6b2b8929f732b453214354253b9 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Wed, 12 May 2021 05:31:10 +1200 Subject: [PATCH 0275/3093] module_helper.py Breakdown (#2393) * break down of module_helper into smaller pieces, keeping compatibility * removed abc.ABC (py3 only) from code + fixed reference to vars.py * multiple changes: - mh.base - moved more functionalities to ModuleHelperBase - mh.mixins.(cmd, state) - CmdMixin no longer inherits from ModuleHelperBase - mh.mixins.deps - DependencyMixin now overrides run() method to test dependency - mh.mixins.vars - created class VarsMixin - mh.module_helper - moved functions to base class, added VarsMixin - module_helper - importing AnsibleModule as well, for backward compatibility in test * removed unnecessary __all__ * make pylint happy * PR adjustments + bot config + changelog frag * Update plugins/module_utils/mh/module_helper.py Co-authored-by: Felix Fontein * Update plugins/module_utils/mh/module_helper.py Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .github/BOTMETA.yml | 3 + .../2393-module_helper-breakdown.yml | 2 + plugins/module_utils/mh/__init__.py | 0 plugins/module_utils/mh/base.py | 56 ++ plugins/module_utils/mh/deco.py | 54 ++ plugins/module_utils/mh/exceptions.py | 22 + plugins/module_utils/mh/mixins/__init__.py | 0 plugins/module_utils/mh/mixins/cmd.py | 167 ++++++ plugins/module_utils/mh/mixins/deps.py | 58 ++ plugins/module_utils/mh/mixins/state.py | 39 ++ plugins/module_utils/mh/mixins/vars.py | 132 +++++ plugins/module_utils/mh/module_helper.py | 79 +++ plugins/module_utils/module_helper.py | 511 +----------------- .../module_utils/test_module_helper.py | 6 +- .../plugins/modules/system/test_xfconf.py | 4 +- 15 files changed, 625 insertions(+), 508 deletions(-) create mode 100644 changelogs/fragments/2393-module_helper-breakdown.yml create mode 100644 plugins/module_utils/mh/__init__.py create mode 100644 plugins/module_utils/mh/base.py create mode 100644 plugins/module_utils/mh/deco.py create mode 100644 plugins/module_utils/mh/exceptions.py create mode 100644 plugins/module_utils/mh/mixins/__init__.py create mode 100644 plugins/module_utils/mh/mixins/cmd.py create mode 100644 plugins/module_utils/mh/mixins/deps.py create mode 100644 plugins/module_utils/mh/mixins/state.py create mode 100644 plugins/module_utils/mh/mixins/vars.py create mode 100644 plugins/module_utils/mh/module_helper.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index f27c96e049..cdef437f90 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -142,6 +142,9 @@ files: $module_utils/memset.py: maintainers: glitchcrab labels: cloud memset + $module_utils/mh/: + maintainers: russoz + labels: module_helper $module_utils/module_helper.py: maintainers: russoz labels: module_helper diff --git a/changelogs/fragments/2393-module_helper-breakdown.yml b/changelogs/fragments/2393-module_helper-breakdown.yml new file mode 100644 index 0000000000..472a1c3569 --- /dev/null +++ b/changelogs/fragments/2393-module_helper-breakdown.yml @@ -0,0 +1,2 @@ +minor_changes: + - module_helper module utils - break down of the long file into smaller pieces (https://github.com/ansible-collections/community.general/pull/2393). diff --git a/plugins/module_utils/mh/__init__.py b/plugins/module_utils/mh/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/mh/base.py b/plugins/module_utils/mh/base.py new file mode 100644 index 0000000000..2a2dd88f7b --- /dev/null +++ b/plugins/module_utils/mh/base.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# (c) 2020, Alexei Znamensky +# Copyright: (c) 2020, Ansible Project +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException as _MHE +from ansible_collections.community.general.plugins.module_utils.mh.deco import module_fails_on_exception + + +class ModuleHelperBase(object): + module = None + ModuleHelperException = _MHE + + def __init__(self, module=None): + self._changed = False + + if module: + self.module = module + + if not isinstance(self.module, AnsibleModule): + self.module = AnsibleModule(**self.module) + + def __init_module__(self): + pass + + def __run__(self): + raise NotImplementedError() + + def __quit_module__(self): + pass + + @property + def changed(self): + return self._changed + + @changed.setter + def changed(self, value): + self._changed = value + + def has_changed(self): + raise NotImplementedError() + + @property + def output(self): + raise NotImplementedError() + + @module_fails_on_exception + def run(self): + self.__init_module__() + self.__run__() + self.__quit_module__() + self.module.exit_json(changed=self.has_changed(), **self.output) diff --git a/plugins/module_utils/mh/deco.py b/plugins/module_utils/mh/deco.py new file mode 100644 index 0000000000..91f0d97744 --- /dev/null +++ b/plugins/module_utils/mh/deco.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# (c) 2020, Alexei Znamensky +# Copyright: (c) 2020, Ansible Project +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import traceback +from functools import wraps + +from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException + + +def cause_changes(on_success=None, on_failure=None): + + def deco(func): + if on_success is None and on_failure is None: + return func + + @wraps(func) + def wrapper(*args, **kwargs): + try: + self = args[0] + func(*args, **kwargs) + if on_success is not None: + self.changed = on_success + except Exception: + if on_failure is not None: + self.changed = on_failure + raise + + return wrapper + + return deco + + +def module_fails_on_exception(func): + @wraps(func) + def wrapper(self, *args, **kwargs): + try: + func(self, *args, **kwargs) + except SystemExit: + raise + except ModuleHelperException as e: + if e.update_output: + self.update_output(e.update_output) + self.module.fail_json(msg=e.msg, exception=traceback.format_exc(), + output=self.output, vars=self.vars.output(), **self.output) + except Exception as e: + msg = "Module failed with exception: {0}".format(str(e).strip()) + self.module.fail_json(msg=msg, exception=traceback.format_exc(), + output=self.output, vars=self.vars.output(), **self.output) + return wrapper diff --git a/plugins/module_utils/mh/exceptions.py b/plugins/module_utils/mh/exceptions.py new file mode 100644 index 0000000000..558dcca05f --- /dev/null +++ b/plugins/module_utils/mh/exceptions.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# (c) 2020, Alexei Znamensky +# Copyright: (c) 2020, Ansible Project +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class ModuleHelperException(Exception): + @staticmethod + def _get_remove(key, kwargs): + if key in kwargs: + result = kwargs[key] + del kwargs[key] + return result + return None + + def __init__(self, *args, **kwargs): + self.msg = self._get_remove('msg', kwargs) or "Module failed with exception: {0}".format(self) + self.update_output = self._get_remove('update_output', kwargs) or {} + super(ModuleHelperException, self).__init__(*args) diff --git a/plugins/module_utils/mh/mixins/__init__.py b/plugins/module_utils/mh/mixins/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/plugins/module_utils/mh/mixins/cmd.py b/plugins/module_utils/mh/mixins/cmd.py new file mode 100644 index 0000000000..fc66638f69 --- /dev/null +++ b/plugins/module_utils/mh/mixins/cmd.py @@ -0,0 +1,167 @@ +# -*- coding: utf-8 -*- +# (c) 2020, Alexei Znamensky +# Copyright: (c) 2020, Ansible Project +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from functools import partial + + +class ArgFormat(object): + """ + Argument formatter for use as a command line parameter. Used in CmdMixin. + """ + BOOLEAN = 0 + PRINTF = 1 + FORMAT = 2 + + @staticmethod + def stars_deco(num): + if num == 1: + def deco(f): + return lambda v: f(*v) + return deco + elif num == 2: + def deco(f): + return lambda v: f(**v) + return deco + + return lambda f: f + + def __init__(self, name, fmt=None, style=FORMAT, stars=0): + """ + Creates a CLI-formatter for one specific argument. The argument may be a module parameter or just a named parameter for + the CLI command execution. + :param name: Name of the argument to be formatted + :param fmt: Either a str to be formatted (using or not printf-style) or a callable that does that + :param style: Whether arg_format (as str) should use printf-style formatting. + Ignored if arg_format is None or not a str (should be callable). + :param stars: A int with 0, 1 or 2 value, indicating to formatting the value as: value, *value or **value + """ + def printf_fmt(_fmt, v): + try: + return [_fmt % v] + except TypeError as e: + if e.args[0] != 'not all arguments converted during string formatting': + raise + return [_fmt] + + _fmts = { + ArgFormat.BOOLEAN: lambda _fmt, v: ([_fmt] if bool(v) else []), + ArgFormat.PRINTF: printf_fmt, + ArgFormat.FORMAT: lambda _fmt, v: [_fmt.format(v)], + } + + self.name = name + self.stars = stars + + if fmt is None: + fmt = "{0}" + style = ArgFormat.FORMAT + + if isinstance(fmt, str): + func = _fmts[style] + self.arg_format = partial(func, fmt) + elif isinstance(fmt, list) or isinstance(fmt, tuple): + self.arg_format = lambda v: [_fmts[style](f, v)[0] for f in fmt] + elif hasattr(fmt, '__call__'): + self.arg_format = fmt + else: + raise TypeError('Parameter fmt must be either: a string, a list/tuple of ' + 'strings or a function: type={0}, value={1}'.format(type(fmt), fmt)) + + if stars: + self.arg_format = (self.stars_deco(stars))(self.arg_format) + + def to_text(self, value): + if value is None: + return [] + func = self.arg_format + return [str(p) for p in func(value)] + + +class CmdMixin(object): + """ + Mixin for mapping module options to running a CLI command with its arguments. + """ + command = None + command_args_formats = {} + run_command_fixed_options = {} + check_rc = False + force_lang = "C" + + @property + def module_formats(self): + result = {} + for param in self.module.params.keys(): + result[param] = ArgFormat(param) + return result + + @property + def custom_formats(self): + result = {} + for param, fmt_spec in self.command_args_formats.items(): + result[param] = ArgFormat(param, **fmt_spec) + return result + + def _calculate_args(self, extra_params=None, params=None): + def add_arg_formatted_param(_cmd_args, arg_format, _value): + args = list(arg_format.to_text(_value)) + return _cmd_args + args + + def find_format(_param): + return self.custom_formats.get(_param, self.module_formats.get(_param)) + + extra_params = extra_params or dict() + cmd_args = list([self.command]) if isinstance(self.command, str) else list(self.command) + try: + cmd_args[0] = self.module.get_bin_path(cmd_args[0], required=True) + except ValueError: + pass + param_list = params if params else self.module.params.keys() + + for param in param_list: + if isinstance(param, dict): + if len(param) != 1: + raise self.ModuleHelperException("run_command parameter as a dict must " + "contain only one key: {0}".format(param)) + _param = list(param.keys())[0] + fmt = find_format(_param) + value = param[_param] + elif isinstance(param, str): + if param in self.module.argument_spec: + fmt = find_format(param) + value = self.module.params[param] + elif param in extra_params: + fmt = find_format(param) + value = extra_params[param] + else: + self.module.deprecate("Cannot determine value for parameter: {0}. " + "From version 4.0.0 onwards this will generate an exception".format(param), + version="4.0.0", collection_name="community.general") + continue + + else: + raise self.ModuleHelperException("run_command parameter must be either a str or a dict: {0}".format(param)) + cmd_args = add_arg_formatted_param(cmd_args, fmt, value) + + return cmd_args + + def process_command_output(self, rc, out, err): + return rc, out, err + + def run_command(self, extra_params=None, params=None, *args, **kwargs): + self.vars.cmd_args = self._calculate_args(extra_params, params) + options = dict(self.run_command_fixed_options) + env_update = dict(options.get('environ_update', {})) + options['check_rc'] = options.get('check_rc', self.check_rc) + if self.force_lang: + env_update.update({'LANGUAGE': self.force_lang}) + self.update_output(force_lang=self.force_lang) + options['environ_update'] = env_update + options.update(kwargs) + rc, out, err = self.module.run_command(self.vars.cmd_args, *args, **options) + self.update_output(rc=rc, stdout=out, stderr=err) + return self.process_command_output(rc, out, err) diff --git a/plugins/module_utils/mh/mixins/deps.py b/plugins/module_utils/mh/mixins/deps.py new file mode 100644 index 0000000000..1c6c9ae484 --- /dev/null +++ b/plugins/module_utils/mh/mixins/deps.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# (c) 2020, Alexei Znamensky +# Copyright: (c) 2020, Ansible Project +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +import traceback + +from ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase +from ansible_collections.community.general.plugins.module_utils.mh.deco import module_fails_on_exception + + +class DependencyCtxMgr(object): + def __init__(self, name, msg=None): + self.name = name + self.msg = msg + self.has_it = False + self.exc_type = None + self.exc_val = None + self.exc_tb = None + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.has_it = exc_type is None + self.exc_type = exc_type + self.exc_val = exc_val + self.exc_tb = exc_tb + return not self.has_it + + @property + def text(self): + return self.msg or str(self.exc_val) + + +class DependencyMixin(ModuleHelperBase): + _dependencies = [] + + @classmethod + def dependency(cls, name, msg): + cls._dependencies.append(DependencyCtxMgr(name, msg)) + return cls._dependencies[-1] + + def fail_on_missing_deps(self): + for d in self._dependencies: + if not d.has_it: + self.module.fail_json(changed=False, + exception="\n".join(traceback.format_exception(d.exc_type, d.exc_val, d.exc_tb)), + msg=d.text, + **self.output) + + @module_fails_on_exception + def run(self): + self.fail_on_missing_deps() + super(DependencyMixin, self).run() diff --git a/plugins/module_utils/mh/mixins/state.py b/plugins/module_utils/mh/mixins/state.py new file mode 100644 index 0000000000..b946090ac9 --- /dev/null +++ b/plugins/module_utils/mh/mixins/state.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- +# (c) 2020, Alexei Znamensky +# Copyright: (c) 2020, Ansible Project +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class StateMixin(object): + state_param = 'state' + default_state = None + + def _state(self): + state = self.module.params.get(self.state_param) + return self.default_state if state is None else state + + def _method(self, state): + return "{0}_{1}".format(self.state_param, state) + + def __run__(self): + state = self._state() + self.vars.state = state + + # resolve aliases + if state not in self.module.params: + aliased = [name for name, param in self.module.argument_spec.items() if state in param.get('aliases', [])] + if aliased: + state = aliased[0] + self.vars.effective_state = state + + method = self._method(state) + if not hasattr(self, method): + return self.__state_fallback__() + func = getattr(self, method) + return func() + + def __state_fallback__(self): + raise ValueError("Cannot find method: {0}".format(self._method(self._state()))) diff --git a/plugins/module_utils/mh/mixins/vars.py b/plugins/module_utils/mh/mixins/vars.py new file mode 100644 index 0000000000..7c936e04ac --- /dev/null +++ b/plugins/module_utils/mh/mixins/vars.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- +# (c) 2020, Alexei Znamensky +# Copyright: (c) 2020, Ansible Project +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class VarMeta(object): + NOTHING = object() + + def __init__(self, diff=False, output=True, change=None, fact=False): + self.init = False + self.initial_value = None + self.value = None + + self.diff = diff + self.change = diff if change is None else change + self.output = output + self.fact = fact + + def set(self, diff=None, output=None, change=None, fact=None, initial_value=NOTHING): + if diff is not None: + self.diff = diff + if output is not None: + self.output = output + if change is not None: + self.change = change + if fact is not None: + self.fact = fact + if initial_value is not self.NOTHING: + self.initial_value = initial_value + + def set_value(self, value): + if not self.init: + self.initial_value = value + self.init = True + self.value = value + return self + + @property + def has_changed(self): + return self.change and (self.initial_value != self.value) + + @property + def diff_result(self): + return None if not (self.diff and self.has_changed) else { + 'before': self.initial_value, + 'after': self.value, + } + + def __str__(self): + return "".format( + self.value, self.initial_value, self.diff, self.output, self.change + ) + + +class VarDict(object): + def __init__(self): + self._data = dict() + self._meta = dict() + + def __getitem__(self, item): + return self._data[item] + + def __setitem__(self, key, value): + self.set(key, value) + + def __getattr__(self, item): + try: + return self._data[item] + except KeyError: + return getattr(self._data, item) + + def __setattr__(self, key, value): + if key in ('_data', '_meta'): + super(VarDict, self).__setattr__(key, value) + else: + self.set(key, value) + + def meta(self, name): + return self._meta[name] + + def set_meta(self, name, **kwargs): + self.meta(name).set(**kwargs) + + def set(self, name, value, **kwargs): + if name in ('_data', '_meta'): + raise ValueError("Names _data and _meta are reserved for use by ModuleHelper") + self._data[name] = value + if name in self._meta: + meta = self.meta(name) + else: + meta = VarMeta(**kwargs) + meta.set_value(value) + self._meta[name] = meta + + def output(self): + return dict((k, v) for k, v in self._data.items() if self.meta(k).output) + + def diff(self): + diff_results = [(k, self.meta(k).diff_result) for k in self._data] + diff_results = [dr for dr in diff_results if dr[1] is not None] + if diff_results: + before = dict((dr[0], dr[1]['before']) for dr in diff_results) + after = dict((dr[0], dr[1]['after']) for dr in diff_results) + return {'before': before, 'after': after} + return None + + def facts(self): + facts_result = dict((k, v) for k, v in self._data.items() if self._meta[k].fact) + return facts_result if facts_result else None + + def change_vars(self): + return [v for v in self._data if self.meta(v).change] + + def has_changed(self, v): + return self._meta[v].has_changed + + +class VarsMixin(object): + + def __init__(self, module=None): + self.vars = VarDict() + super(VarsMixin, self).__init__(module) + + def update_vars(self, meta=None, **kwargs): + if meta is None: + meta = {} + for k, v in kwargs.items(): + self.vars.set(k, v, **meta) diff --git a/plugins/module_utils/mh/module_helper.py b/plugins/module_utils/mh/module_helper.py new file mode 100644 index 0000000000..b27b60df9a --- /dev/null +++ b/plugins/module_utils/mh/module_helper.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- +# (c) 2020, Alexei Znamensky +# Copyright: (c) 2020, Ansible Project +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.common.dict_transformations import dict_merge + +from ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase, AnsibleModule +from ansible_collections.community.general.plugins.module_utils.mh.mixins.cmd import CmdMixin +from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin +from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyMixin +from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarsMixin, VarDict as _VD + + +class ModuleHelper(VarsMixin, DependencyMixin, ModuleHelperBase): + _output_conflict_list = ('msg', 'exception', 'output', 'vars', 'changed') + facts_name = None + output_params = () + diff_params = () + change_params = () + facts_params = () + + VarDict = _VD # for backward compatibility, will be deprecated at some point + + def __init__(self, module=None): + super(ModuleHelper, self).__init__(module) + for name, value in self.module.params.items(): + self.vars.set( + name, value, + diff=name in self.diff_params, + output=name in self.output_params, + change=None if not self.change_params else name in self.change_params, + fact=name in self.facts_params, + ) + + def update_output(self, **kwargs): + self.update_vars(meta={"output": True}, **kwargs) + + def update_facts(self, **kwargs): + self.update_vars(meta={"fact": True}, **kwargs) + + def _vars_changed(self): + return any(self.vars.has_changed(v) for v in self.vars.change_vars()) + + def has_changed(self): + return self.changed or self._vars_changed() + + @property + def output(self): + result = dict(self.vars.output()) + if self.facts_name: + facts = self.vars.facts() + if facts is not None: + result['ansible_facts'] = {self.facts_name: facts} + if self.module._diff: + diff = result.get('diff', {}) + vars_diff = self.vars.diff() or {} + result['diff'] = dict_merge(dict(diff), vars_diff) + + for varname in result: + if varname in self._output_conflict_list: + result["_" + varname] = result[varname] + del result[varname] + return result + + +class StateModuleHelper(StateMixin, ModuleHelper): + pass + + +class CmdModuleHelper(CmdMixin, ModuleHelper): + pass + + +class CmdStateModuleHelper(CmdMixin, StateMixin, ModuleHelper): + pass diff --git a/plugins/module_utils/module_helper.py b/plugins/module_utils/module_helper.py index d241eba5af..a6b35bdd33 100644 --- a/plugins/module_utils/module_helper.py +++ b/plugins/module_utils/module_helper.py @@ -6,506 +6,13 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -from functools import partial, wraps -import traceback -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.dict_transformations import dict_merge - - -class ModuleHelperException(Exception): - @staticmethod - def _get_remove(key, kwargs): - if key in kwargs: - result = kwargs[key] - del kwargs[key] - return result - return None - - def __init__(self, *args, **kwargs): - self.msg = self._get_remove('msg', kwargs) or "Module failed with exception: {0}".format(self) - self.update_output = self._get_remove('update_output', kwargs) or {} - super(ModuleHelperException, self).__init__(*args) - - -class ArgFormat(object): - """ - Argument formatter for use as a command line parameter. Used in CmdMixin. - """ - BOOLEAN = 0 - PRINTF = 1 - FORMAT = 2 - - @staticmethod - def stars_deco(num): - if num == 1: - def deco(f): - return lambda v: f(*v) - return deco - elif num == 2: - def deco(f): - return lambda v: f(**v) - return deco - - return lambda f: f - - def __init__(self, name, fmt=None, style=FORMAT, stars=0): - """ - Creates a CLI-formatter for one specific argument. The argument may be a module parameter or just a named parameter for - the CLI command execution. - :param name: Name of the argument to be formatted - :param fmt: Either a str to be formatted (using or not printf-style) or a callable that does that - :param style: Whether arg_format (as str) should use printf-style formatting. - Ignored if arg_format is None or not a str (should be callable). - :param stars: A int with 0, 1 or 2 value, indicating to formatting the value as: value, *value or **value - """ - def printf_fmt(_fmt, v): - try: - return [_fmt % v] - except TypeError as e: - if e.args[0] != 'not all arguments converted during string formatting': - raise - return [_fmt] - - _fmts = { - ArgFormat.BOOLEAN: lambda _fmt, v: ([_fmt] if bool(v) else []), - ArgFormat.PRINTF: printf_fmt, - ArgFormat.FORMAT: lambda _fmt, v: [_fmt.format(v)], - } - - self.name = name - self.stars = stars - - if fmt is None: - fmt = "{0}" - style = ArgFormat.FORMAT - - if isinstance(fmt, str): - func = _fmts[style] - self.arg_format = partial(func, fmt) - elif isinstance(fmt, list) or isinstance(fmt, tuple): - self.arg_format = lambda v: [_fmts[style](f, v)[0] for f in fmt] - elif hasattr(fmt, '__call__'): - self.arg_format = fmt - else: - raise TypeError('Parameter fmt must be either: a string, a list/tuple of ' - 'strings or a function: type={0}, value={1}'.format(type(fmt), fmt)) - - if stars: - self.arg_format = (self.stars_deco(stars))(self.arg_format) - - def to_text(self, value): - if value is None: - return [] - func = self.arg_format - return [str(p) for p in func(value)] - - -def cause_changes(on_success=None, on_failure=None): - - def deco(func): - if on_success is None and on_failure is None: - return func - - @wraps(func) - def wrapper(*args, **kwargs): - try: - self = args[0] - func(*args, **kwargs) - if on_success is not None: - self.changed = on_success - except Exception: - if on_failure is not None: - self.changed = on_failure - raise - - return wrapper - - return deco - - -def module_fails_on_exception(func): - @wraps(func) - def wrapper(self, *args, **kwargs): - try: - func(self, *args, **kwargs) - except SystemExit: - raise - except ModuleHelperException as e: - if e.update_output: - self.update_output(e.update_output) - self.module.fail_json(msg=e.msg, exception=traceback.format_exc(), - output=self.output, vars=self.vars.output(), **self.output) - except Exception as e: - msg = "Module failed with exception: {0}".format(str(e).strip()) - self.module.fail_json(msg=msg, exception=traceback.format_exc(), - output=self.output, vars=self.vars.output(), **self.output) - return wrapper - - -class DependencyCtxMgr(object): - def __init__(self, name, msg=None): - self.name = name - self.msg = msg - self.has_it = False - self.exc_type = None - self.exc_val = None - self.exc_tb = None - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.has_it = exc_type is None - self.exc_type = exc_type - self.exc_val = exc_val - self.exc_tb = exc_tb - return not self.has_it - - @property - def text(self): - return self.msg or str(self.exc_val) - - -class VarMeta(object): - NOTHING = object() - - def __init__(self, diff=False, output=True, change=None, fact=False): - self.init = False - self.initial_value = None - self.value = None - - self.diff = diff - self.change = diff if change is None else change - self.output = output - self.fact = fact - - def set(self, diff=None, output=None, change=None, fact=None, initial_value=NOTHING): - if diff is not None: - self.diff = diff - if output is not None: - self.output = output - if change is not None: - self.change = change - if fact is not None: - self.fact = fact - if initial_value is not self.NOTHING: - self.initial_value = initial_value - - def set_value(self, value): - if not self.init: - self.initial_value = value - self.init = True - self.value = value - return self - - @property - def has_changed(self): - return self.change and (self.initial_value != self.value) - - @property - def diff_result(self): - return None if not (self.diff and self.has_changed) else { - 'before': self.initial_value, - 'after': self.value, - } - - def __str__(self): - return "".format( - self.value, self.initial_value, self.diff, self.output, self.change - ) - - -class ModuleHelper(object): - _output_conflict_list = ('msg', 'exception', 'output', 'vars', 'changed') - _dependencies = [] - module = None - facts_name = None - output_params = () - diff_params = () - change_params = () - facts_params = () - - class VarDict(object): - def __init__(self): - self._data = dict() - self._meta = dict() - - def __getitem__(self, item): - return self._data[item] - - def __setitem__(self, key, value): - self.set(key, value) - - def __getattr__(self, item): - try: - return self._data[item] - except KeyError: - return getattr(self._data, item) - - def __setattr__(self, key, value): - if key in ('_data', '_meta'): - super(ModuleHelper.VarDict, self).__setattr__(key, value) - else: - self.set(key, value) - - def meta(self, name): - return self._meta[name] - - def set_meta(self, name, **kwargs): - self.meta(name).set(**kwargs) - - def set(self, name, value, **kwargs): - if name in ('_data', '_meta'): - raise ValueError("Names _data and _meta are reserved for use by ModuleHelper") - self._data[name] = value - if name in self._meta: - meta = self.meta(name) - else: - meta = VarMeta(**kwargs) - meta.set_value(value) - self._meta[name] = meta - - def output(self): - return dict((k, v) for k, v in self._data.items() if self.meta(k).output) - - def diff(self): - diff_results = [(k, self.meta(k).diff_result) for k in self._data] - diff_results = [dr for dr in diff_results if dr[1] is not None] - if diff_results: - before = dict((dr[0], dr[1]['before']) for dr in diff_results) - after = dict((dr[0], dr[1]['after']) for dr in diff_results) - return {'before': before, 'after': after} - return None - - def facts(self): - facts_result = dict((k, v) for k, v in self._data.items() if self._meta[k].fact) - return facts_result if facts_result else None - - def change_vars(self): - return [v for v in self._data if self.meta(v).change] - - def has_changed(self, v): - return self._meta[v].has_changed - - def __init__(self, module=None): - self.vars = ModuleHelper.VarDict() - self._changed = False - - if module: - self.module = module - - if not isinstance(self.module, AnsibleModule): - self.module = AnsibleModule(**self.module) - - for name, value in self.module.params.items(): - self.vars.set( - name, value, - diff=name in self.diff_params, - output=name in self.output_params, - change=None if not self.change_params else name in self.change_params, - fact=name in self.facts_params, - ) - - def update_vars(self, meta=None, **kwargs): - if meta is None: - meta = {} - for k, v in kwargs.items(): - self.vars.set(k, v, **meta) - - def update_output(self, **kwargs): - self.update_vars(meta={"output": True}, **kwargs) - - def update_facts(self, **kwargs): - self.update_vars(meta={"fact": True}, **kwargs) - - def __init_module__(self): - pass - - def __run__(self): - raise NotImplementedError() - - def __quit_module__(self): - pass - - def _vars_changed(self): - return any(self.vars.has_changed(v) for v in self.vars.change_vars()) - - @property - def changed(self): - return self._changed - - @changed.setter - def changed(self, value): - self._changed = value - - def has_changed(self): - return self.changed or self._vars_changed() - - @property - def output(self): - result = dict(self.vars.output()) - if self.facts_name: - facts = self.vars.facts() - if facts is not None: - result['ansible_facts'] = {self.facts_name: facts} - if self.module._diff: - diff = result.get('diff', {}) - vars_diff = self.vars.diff() or {} - result['diff'] = dict_merge(dict(diff), vars_diff) - - for varname in result: - if varname in self._output_conflict_list: - result["_" + varname] = result[varname] - del result[varname] - return result - - @module_fails_on_exception - def run(self): - self.fail_on_missing_deps() - self.__init_module__() - self.__run__() - self.__quit_module__() - self.module.exit_json(changed=self.has_changed(), **self.output) - - @classmethod - def dependency(cls, name, msg): - cls._dependencies.append(DependencyCtxMgr(name, msg)) - return cls._dependencies[-1] - - def fail_on_missing_deps(self): - for d in self._dependencies: - if not d.has_it: - self.module.fail_json(changed=False, - exception="\n".join(traceback.format_exception(d.exc_type, d.exc_val, d.exc_tb)), - msg=d.text, - **self.output) - - -class StateMixin(object): - state_param = 'state' - default_state = None - - def _state(self): - state = self.module.params.get(self.state_param) - return self.default_state if state is None else state - - def _method(self, state): - return "{0}_{1}".format(self.state_param, state) - - def __run__(self): - state = self._state() - self.vars.state = state - - # resolve aliases - if state not in self.module.params: - aliased = [name for name, param in self.module.argument_spec.items() if state in param.get('aliases', [])] - if aliased: - state = aliased[0] - self.vars.effective_state = state - - method = self._method(state) - if not hasattr(self, method): - return self.__state_fallback__() - func = getattr(self, method) - return func() - - def __state_fallback__(self): - raise ValueError("Cannot find method: {0}".format(self._method(self._state()))) - - -class CmdMixin(object): - """ - Mixin for mapping module options to running a CLI command with its arguments. - """ - command = None - command_args_formats = {} - run_command_fixed_options = {} - check_rc = False - force_lang = "C" - - @property - def module_formats(self): - result = {} - for param in self.module.params.keys(): - result[param] = ArgFormat(param) - return result - - @property - def custom_formats(self): - result = {} - for param, fmt_spec in self.command_args_formats.items(): - result[param] = ArgFormat(param, **fmt_spec) - return result - - def _calculate_args(self, extra_params=None, params=None): - def add_arg_formatted_param(_cmd_args, arg_format, _value): - args = list(arg_format.to_text(_value)) - return _cmd_args + args - - def find_format(_param): - return self.custom_formats.get(_param, self.module_formats.get(_param)) - - extra_params = extra_params or dict() - cmd_args = list([self.command]) if isinstance(self.command, str) else list(self.command) - try: - cmd_args[0] = self.module.get_bin_path(cmd_args[0], required=True) - except ValueError: - pass - param_list = params if params else self.module.params.keys() - - for param in param_list: - if isinstance(param, dict): - if len(param) != 1: - raise ModuleHelperException("run_command parameter as a dict must " - "contain only one key: {0}".format(param)) - _param = list(param.keys())[0] - fmt = find_format(_param) - value = param[_param] - elif isinstance(param, str): - if param in self.module.argument_spec: - fmt = find_format(param) - value = self.module.params[param] - elif param in extra_params: - fmt = find_format(param) - value = extra_params[param] - else: - self.module.deprecate("Cannot determine value for parameter: {0}. " - "From version 4.0.0 onwards this will generate an exception".format(param), - version="4.0.0", collection_name="community.general") - continue - - else: - raise ModuleHelperException("run_command parameter must be either a str or a dict: {0}".format(param)) - cmd_args = add_arg_formatted_param(cmd_args, fmt, value) - - return cmd_args - - def process_command_output(self, rc, out, err): - return rc, out, err - - def run_command(self, extra_params=None, params=None, *args, **kwargs): - self.vars.cmd_args = self._calculate_args(extra_params, params) - options = dict(self.run_command_fixed_options) - env_update = dict(options.get('environ_update', {})) - options['check_rc'] = options.get('check_rc', self.check_rc) - if self.force_lang: - env_update.update({'LANGUAGE': self.force_lang}) - self.update_output(force_lang=self.force_lang) - options['environ_update'] = env_update - options.update(kwargs) - rc, out, err = self.module.run_command(self.vars.cmd_args, *args, **options) - self.update_output(rc=rc, stdout=out, stderr=err) - return self.process_command_output(rc, out, err) - - -class StateModuleHelper(StateMixin, ModuleHelper): - pass - - -class CmdModuleHelper(CmdMixin, ModuleHelper): - pass - - -class CmdStateModuleHelper(CmdMixin, StateMixin, ModuleHelper): - pass +from ansible_collections.community.general.plugins.module_utils.mh.module_helper import ( + ModuleHelper, StateModuleHelper, CmdModuleHelper, CmdStateModuleHelper, AnsibleModule +) +from ansible_collections.community.general.plugins.module_utils.mh.mixins.cmd import CmdMixin, ArgFormat +from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin +from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyCtxMgr +from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException +from ansible_collections.community.general.plugins.module_utils.mh.deco import cause_changes, module_fails_on_exception +from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarMeta, VarDict diff --git a/tests/unit/plugins/module_utils/test_module_helper.py b/tests/unit/plugins/module_utils/test_module_helper.py index 6f77ca7662..6452784182 100644 --- a/tests/unit/plugins/module_utils/test_module_helper.py +++ b/tests/unit/plugins/module_utils/test_module_helper.py @@ -6,12 +6,10 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from collections import namedtuple - import pytest from ansible_collections.community.general.plugins.module_utils.module_helper import ( - ArgFormat, DependencyCtxMgr, ModuleHelper, VarMeta, cause_changes + ArgFormat, DependencyCtxMgr, VarMeta, VarDict, cause_changes ) @@ -144,7 +142,7 @@ def test_variable_meta_diff(): def test_vardict(): - vd = ModuleHelper.VarDict() + vd = VarDict() vd.set('a', 123) assert vd['a'] == 123 assert vd.a == 123 diff --git a/tests/unit/plugins/modules/system/test_xfconf.py b/tests/unit/plugins/modules/system/test_xfconf.py index 1002952ce3..dee387bd7d 100644 --- a/tests/unit/plugins/modules/system/test_xfconf.py +++ b/tests/unit/plugins/modules/system/test_xfconf.py @@ -21,7 +21,7 @@ def patch_xfconf(mocker): """ Function used for mocking some parts of redhat_subscribtion module """ - mocker.patch('ansible_collections.community.general.plugins.module_utils.module_helper.AnsibleModule.get_bin_path', + mocker.patch('ansible_collections.community.general.plugins.module_utils.mh.module_helper.AnsibleModule.get_bin_path', return_value='/testbin/xfconf-query') @@ -332,7 +332,7 @@ def test_xfconf(mocker, capfd, patch_xfconf, testcase): # Mock function used for running commands first call_results = [item[2] for item in testcase['run_command.calls']] mock_run_command = mocker.patch( - 'ansible_collections.community.general.plugins.module_utils.module_helper.AnsibleModule.run_command', + 'ansible_collections.community.general.plugins.module_utils.mh.module_helper.AnsibleModule.run_command', side_effect=call_results) # Try to run test case From 0912e8cc7ab1bc2b70f6552e84b3430fb7cd1dfc Mon Sep 17 00:00:00 2001 From: CWollinger Date: Tue, 11 May 2021 19:31:46 +0200 Subject: [PATCH 0276/3093] discord.py: Add new module for discord notifications (#2398) * first push: add discord module and test for notifications * fix the yaml docs and edit the result output * add link * fix link * fix docs and remove required=False in argument spec * add elements specd and more info about embeds * called str... * elements for embeds oc. * fix typo's in description and set checkmode to false * edit docs and module return * support checkmode with get method * fix unit test * handle exception and add new example for embeds * quote line * fix typos * fix yaml --- plugins/modules/discord.py | 1 + plugins/modules/notification/discord.py | 215 ++++++++++++++++++ .../modules/notification/test_discord.py | 103 +++++++++ 3 files changed, 319 insertions(+) create mode 120000 plugins/modules/discord.py create mode 100644 plugins/modules/notification/discord.py create mode 100644 tests/unit/plugins/modules/notification/test_discord.py diff --git a/plugins/modules/discord.py b/plugins/modules/discord.py new file mode 120000 index 0000000000..1acf222f94 --- /dev/null +++ b/plugins/modules/discord.py @@ -0,0 +1 @@ +./notification/discord.py \ No newline at end of file diff --git a/plugins/modules/notification/discord.py b/plugins/modules/notification/discord.py new file mode 100644 index 0000000000..27dc6fc85c --- /dev/null +++ b/plugins/modules/notification/discord.py @@ -0,0 +1,215 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Christian Wollinger +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: discord +short_description: Send Discord messages +version_added: 3.1.0 +description: + - Sends a message to a Discord channel using the Discord webhook API. +author: Christian Wollinger (@cwollinger) +seealso: + - name: API documentation + description: Documentation for Discord API + link: https://discord.com/developers/docs/resources/webhook#execute-webhook +options: + webhook_id: + description: + - The webhook ID. + - "Format from Discord webhook URL: C(/webhooks/{webhook.id}/{webhook.token})." + required: yes + type: str + webhook_token: + description: + - The webhook token. + - "Format from Discord webhook URL: C(/webhooks/{webhook.id}/{webhook.token})." + required: yes + type: str + content: + description: + - Content of the message to the Discord channel. + - At least one of I(content) and I(embeds) must be specified. + type: str + username: + description: + - Overrides the default username of the webhook. + type: str + avatar_url: + description: + - Overrides the default avatar of the webhook. + type: str + tts: + description: + - Set this to C(true) if this is a TTS (Text to Speech) message. + type: bool + default: false + embeds: + description: + - Send messages as Embeds to the Discord channel. + - Embeds can have a colored border, embedded images, text fields and more. + - "Allowed parameters are described in the Discord Docs: U(https://discord.com/developers/docs/resources/channel#embed-object)" + - At least one of I(content) and I(embeds) must be specified. + type: list + elements: dict +''' + +EXAMPLES = """ +- name: Send a message to the Discord channel + community.general.discord: + webhook_id: "00000" + webhook_token: "XXXYYY" + content: "This is a message from ansible" + +- name: Send a message to the Discord channel with specific username and avatar + community.general.discord: + webhook_id: "00000" + webhook_token: "XXXYYY" + content: "This is a message from ansible" + username: Ansible + avatar_url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png" + +- name: Send a embedded message to the Discord channel + community.general.discord: + webhook_id: "00000" + webhook_token: "XXXYYY" + embeds: + - title: "Embedded message" + description: "This is an embedded message" + footer: + text: "Author: Ansible" + image: + url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png" + +- name: Send two embedded messages + community.general.discord: + webhook_id: "00000" + webhook_token: "XXXYYY" + embeds: + - title: "First message" + description: "This is my first embedded message" + footer: + text: "Author: Ansible" + image: + url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png" + - title: "Second message" + description: "This is my first second message" + footer: + text: "Author: Ansible" + icon_url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png" + fields: + - name: "Field 1" + value: "Value of my first field" + - name: "Field 2" + value: "Value of my second field" + timestamp: "{{ ansible_date_time.iso8601 }}" +""" + +RETURN = """ +http_code: + description: + - Response Code returned by Discord API. + returned: always + type: int + sample: 204 +""" + +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.basic import AnsibleModule + + +def discord_check_mode(module): + + webhook_id = module.params['webhook_id'] + webhook_token = module.params['webhook_token'] + + headers = { + 'content-type': 'application/json' + } + + url = "https://discord.com/api/webhooks/%s/%s" % ( + webhook_id, webhook_token) + + response, info = fetch_url(module, url, method='GET', headers=headers) + return response, info + + +def discord_text_msg(module): + + webhook_id = module.params['webhook_id'] + webhook_token = module.params['webhook_token'] + content = module.params['content'] + user = module.params['username'] + avatar_url = module.params['avatar_url'] + tts = module.params['tts'] + embeds = module.params['embeds'] + + headers = { + 'content-type': 'application/json' + } + + url = "https://discord.com/api/webhooks/%s/%s" % ( + webhook_id, webhook_token) + + payload = { + 'content': content, + 'username': user, + 'avatar_url': avatar_url, + 'tts': tts, + 'embeds': embeds, + } + + payload = module.jsonify(payload) + + response, info = fetch_url(module, url, data=payload, headers=headers, method='POST') + return response, info + + +def main(): + module = AnsibleModule( + argument_spec=dict( + webhook_id=dict(type='str', required=True), + webhook_token=dict(type='str', required=True, no_log=True), + content=dict(type='str'), + username=dict(type='str'), + avatar_url=dict(type='str'), + tts=dict(type='bool', default=False), + embeds=dict(type='list', elements='dict'), + ), + required_one_of=[['content', 'embeds']], + supports_check_mode=True + ) + + result = dict( + changed=False, + http_code='', + ) + + if module.check_mode: + response, info = discord_check_mode(module) + if info['status'] != 200: + try: + module.fail_json(http_code=info['status'], msg=info['msg'], response=module.from_json(info['body']), info=info) + except Exception: + module.fail_json(http_code=info['status'], msg=info['msg'], info=info) + else: + module.exit_json(msg=info['msg'], changed=False, http_code=info['status'], response=module.from_json(response.read())) + else: + response, info = discord_text_msg(module) + if info['status'] != 204: + try: + module.fail_json(http_code=info['status'], msg=info['msg'], response=module.from_json(info['body']), info=info) + except Exception: + module.fail_json(http_code=info['status'], msg=info['msg'], info=info) + else: + module.exit_json(msg=info['msg'], changed=True, http_code=info['status']) + + +if __name__ == "__main__": + main() diff --git a/tests/unit/plugins/modules/notification/test_discord.py b/tests/unit/plugins/modules/notification/test_discord.py new file mode 100644 index 0000000000..257b0d4dab --- /dev/null +++ b/tests/unit/plugins/modules/notification/test_discord.py @@ -0,0 +1,103 @@ +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import pytest +from ansible_collections.community.general.tests.unit.compat.mock import Mock, patch +from ansible_collections.community.general.plugins.modules.notification import discord +from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args + + +class TestDiscordModule(ModuleTestCase): + + def setUp(self): + super(TestDiscordModule, self).setUp() + self.module = discord + + def tearDown(self): + super(TestDiscordModule, self).tearDown() + + @pytest.fixture + def fetch_url_mock(self, mocker): + return mocker.patch('ansible.module_utils.notification.discord.fetch_url') + + def test_without_parameters(self): + """Failure if no parameters set""" + with self.assertRaises(AnsibleFailJson): + set_module_args({}) + self.module.main() + + def test_without_content(self): + """Failure if content and embeds both are missing""" + set_module_args({ + 'webhook_id': 'xxx', + 'webhook_token': 'xxx' + }) + with self.assertRaises(AnsibleFailJson): + self.module.main() + + def test_successful_message(self): + """Test a basic message successfully.""" + set_module_args({ + 'webhook_id': 'xxx', + 'webhook_token': 'xxx', + 'content': 'test' + }) + + with patch.object(discord, "fetch_url") as fetch_url_mock: + fetch_url_mock.return_value = (None, {"status": 204, 'msg': 'OK (0 bytes)'}) + with self.assertRaises(AnsibleExitJson): + self.module.main() + + self.assertTrue(fetch_url_mock.call_count, 1) + call_data = json.loads(fetch_url_mock.call_args[1]['data']) + assert call_data['content'] == "test" + + def test_message_with_username(self): + """Test a message with username set successfully.""" + set_module_args({ + 'webhook_id': 'xxx', + 'webhook_token': 'xxx', + 'content': 'test', + 'username': 'Ansible Bot' + }) + + with patch.object(discord, "fetch_url") as fetch_url_mock: + fetch_url_mock.return_value = (None, {"status": 204, 'msg': 'OK (0 bytes)'}) + with self.assertRaises(AnsibleExitJson): + self.module.main() + + self.assertTrue(fetch_url_mock.call_count, 1) + call_data = json.loads(fetch_url_mock.call_args[1]['data']) + assert call_data['username'] == "Ansible Bot" + assert call_data['content'] == "test" + + def test_failed_message(self): + """Test failure because webhook id is wrong.""" + + set_module_args({ + 'webhook_id': 'wrong', + 'webhook_token': 'xxx', + 'content': 'test' + }) + + with patch.object(discord, "fetch_url") as fetch_url_mock: + fetch_url_mock.return_value = (None, {"status": 404, 'msg': 'HTTP Error 404: Not Found', 'body': '{"message": "Unknown Webhook", "code": 10015}'}) + with self.assertRaises(AnsibleFailJson): + self.module.main() + + def test_failed_message_without_body(self): + """Test failure with empty response body.""" + + set_module_args({ + 'webhook_id': 'wrong', + 'webhook_token': 'xxx', + 'content': 'test' + }) + + with patch.object(discord, "fetch_url") as fetch_url_mock: + fetch_url_mock.return_value = (None, {"status": 404, 'msg': 'HTTP Error 404: Not Found'}) + with self.assertRaises(AnsibleFailJson): + self.module.main() From b9fa9116c15f5d6e90eb8626ec67650854429b14 Mon Sep 17 00:00:00 2001 From: spike77453 Date: Tue, 11 May 2021 19:35:30 +0200 Subject: [PATCH 0277/3093] nmcli: Remove dead code, 'options' never contains keys from 'param_alias' (#2417) * nmcli: Remove dead code, 'options' never contains keys from 'param_alias' * Update changelogs/fragments/2417-nmcli_remove_dead_code.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../fragments/2417-nmcli_remove_dead_code.yml | 2 ++ plugins/modules/net_tools/nmcli.py | 18 ------------------ 2 files changed, 2 insertions(+), 18 deletions(-) create mode 100644 changelogs/fragments/2417-nmcli_remove_dead_code.yml diff --git a/changelogs/fragments/2417-nmcli_remove_dead_code.yml b/changelogs/fragments/2417-nmcli_remove_dead_code.yml new file mode 100644 index 0000000000..9d94c393fa --- /dev/null +++ b/changelogs/fragments/2417-nmcli_remove_dead_code.yml @@ -0,0 +1,2 @@ +minor_changes: + - nmcli - remove dead code, ``options`` never contains keys from ``param_alias`` (https://github.com/ansible-collections/community.general/pull/2417). diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index e2ed4ad572..929d88c654 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -1036,17 +1036,6 @@ class Nmcli(object): return conn_info def _compare_conn_params(self, conn_info, options): - # See nmcli(1) for details - param_alias = { - 'type': 'connection.type', - 'con-name': 'connection.id', - 'autoconnect': 'connection.autoconnect', - 'ifname': 'connection.interface-name', - 'master': 'connection.master', - 'slave-type': 'connection.slave-type', - 'zone': 'connection.zone', - } - changed = False diff_before = dict() diff_after = dict() @@ -1070,13 +1059,6 @@ class Nmcli(object): value = value.upper() # ensure current_value is also converted to uppercase in case nmcli changes behaviour current_value = current_value.upper() - elif key in param_alias: - real_key = param_alias[key] - if real_key in conn_info: - current_value = conn_info[real_key] - else: - # alias parameter does not exist - current_value = None else: # parameter does not exist current_value = None From 83a0c32269210c33c978ecf1a11096608ff91a20 Mon Sep 17 00:00:00 2001 From: Xabier Napal Date: Wed, 12 May 2021 17:33:27 +0200 Subject: [PATCH 0278/3093] influxdb_retention_policy - add state argument to module spec (#2383) (#2385) * influxdb_retention_policy: add state option to module argument spec * influxdb_retention_policy: simplify duration parsing logic (suggested in #2284) * add changelog * fix documentation and changelog * add constants for duration and sgduration validations * restyle ansible module spec Co-authored-by: Felix Fontein * improve changelog Co-authored-by: Felix Fontein * set changed result in check mode for state absent * remove required flag in optional module arguments * influxdb_retention_policy: improve examples readability Co-authored-by: Felix Fontein --- ...uxdb_retention_policy-add-state-option.yml | 6 + .../influxdb/influxdb_retention_policy.py | 132 ++++++++++++------ 2 files changed, 92 insertions(+), 46 deletions(-) create mode 100644 changelogs/fragments/2383-influxdb_retention_policy-add-state-option.yml diff --git a/changelogs/fragments/2383-influxdb_retention_policy-add-state-option.yml b/changelogs/fragments/2383-influxdb_retention_policy-add-state-option.yml new file mode 100644 index 0000000000..b8e358848e --- /dev/null +++ b/changelogs/fragments/2383-influxdb_retention_policy-add-state-option.yml @@ -0,0 +1,6 @@ +minor_changes: + - influxdb_retention_policy - add ``state`` parameter with allowed values + ``present`` and ``absent`` to support deletion of existing retention policies + (https://github.com/ansible-collections/community.general/issues/2383). + - influxdb_retention_policy - simplify duration logic parsing + (https://github.com/ansible-collections/community.general/pull/2385). diff --git a/plugins/modules/database/influxdb/influxdb_retention_policy.py b/plugins/modules/database/influxdb/influxdb_retention_policy.py index 883adaffa6..3ff48cbad0 100644 --- a/plugins/modules/database/influxdb/influxdb_retention_policy.py +++ b/plugins/modules/database/influxdb/influxdb_retention_policy.py @@ -29,17 +29,24 @@ options: - Name of the retention policy. required: true type: str + state: + description: + - State of the retention policy. + choices: [ absent, present ] + default: present + type: str + version_added: 3.1.0 duration: description: - Determines how long InfluxDB should keep the data. If specified, it should be C(INF) or at least one hour. If not specified, C(INF) is assumed. Supports complex duration expressions with multiple units. - required: true + - Required only if I(state) is set to C(present). type: str replication: description: - Determines how many independent copies of each point are stored in the cluster. - required: true + - Required only if I(state) is set to C(present). type: int default: description: @@ -63,53 +70,65 @@ EXAMPLES = r''' # Example influxdb_retention_policy command from Ansible Playbooks - name: Create 1 hour retention policy community.general.influxdb_retention_policy: - hostname: "{{influxdb_ip_address}}" - database_name: "{{influxdb_database_name}}" + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" policy_name: test duration: 1h replication: 1 ssl: yes validate_certs: yes + state: present - name: Create 1 day retention policy with 1 hour shard group duration community.general.influxdb_retention_policy: - hostname: "{{influxdb_ip_address}}" - database_name: "{{influxdb_database_name}}" + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" policy_name: test duration: 1d replication: 1 shard_group_duration: 1h + state: present - name: Create 1 week retention policy with 1 day shard group duration community.general.influxdb_retention_policy: - hostname: "{{influxdb_ip_address}}" - database_name: "{{influxdb_database_name}}" + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" policy_name: test duration: 1w replication: 1 shard_group_duration: 1d + state: present - name: Create infinite retention policy with 1 week of shard group duration community.general.influxdb_retention_policy: - hostname: "{{influxdb_ip_address}}" - database_name: "{{influxdb_database_name}}" + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" policy_name: test duration: INF replication: 1 ssl: no validate_certs: no shard_group_duration: 1w + state: present - name: Create retention policy with complex durations community.general.influxdb_retention_policy: - hostname: "{{influxdb_ip_address}}" - database_name: "{{influxdb_database_name}}" + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" policy_name: test duration: 5d1h30m replication: 1 ssl: no validate_certs: no shard_group_duration: 1d10h30m + state: present + +- name: Drop retention policy + community.general.influxdb_retention_policy: + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + policy_name: test + state: absent ''' RETURN = r''' @@ -134,6 +153,21 @@ VALID_DURATION_REGEX = re.compile(r'^(INF|(\d+(ns|u|µ|ms|s|m|h|d|w)))+$') DURATION_REGEX = re.compile(r'(\d+)(ns|u|µ|ms|s|m|h|d|w)') EXTENDED_DURATION_REGEX = re.compile(r'(?:(\d+)(ns|u|µ|ms|m|h|d|w)|(\d+(?:\.\d+)?)(s))') +DURATION_UNIT_NANOSECS = { + 'ns': 1, + 'u': 1000, + 'µ': 1000, + 'ms': 1000 * 1000, + 's': 1000 * 1000 * 1000, + 'm': 1000 * 1000 * 1000 * 60, + 'h': 1000 * 1000 * 1000 * 60 * 60, + 'd': 1000 * 1000 * 1000 * 60 * 60 * 24, + 'w': 1000 * 1000 * 1000 * 60 * 60 * 24 * 7, +} + +MINIMUM_VALID_DURATION = 1 * DURATION_UNIT_NANOSECS['h'] +MINIMUM_VALID_SHARD_GROUP_DURATION = 1 * DURATION_UNIT_NANOSECS['h'] + def check_duration_literal(value): return VALID_DURATION_REGEX.search(value) is not None @@ -148,28 +182,9 @@ def parse_duration_literal(value, extended=False): lookup = (EXTENDED_DURATION_REGEX if extended else DURATION_REGEX).findall(value) for duration_literal in lookup: - if extended and duration_literal[3] == 's': - duration_val = float(duration_literal[2]) - duration += duration_val * 1000 * 1000 * 1000 - else: - duration_val = int(duration_literal[0]) - - if duration_literal[1] == 'ns': - duration += duration_val - elif duration_literal[1] == 'u' or duration_literal[1] == 'µ': - duration += duration_val * 1000 - elif duration_literal[1] == 'ms': - duration += duration_val * 1000 * 1000 - elif duration_literal[1] == 's': - duration += duration_val * 1000 * 1000 * 1000 - elif duration_literal[1] == 'm': - duration += duration_val * 1000 * 1000 * 1000 * 60 - elif duration_literal[1] == 'h': - duration += duration_val * 1000 * 1000 * 1000 * 60 * 60 - elif duration_literal[1] == 'd': - duration += duration_val * 1000 * 1000 * 1000 * 60 * 60 * 24 - elif duration_literal[1] == 'w': - duration += duration_val * 1000 * 1000 * 1000 * 60 * 60 * 24 * 7 + filtered_literal = list(filter(None, duration_literal)) + duration_val = float(filtered_literal[0]) + duration += duration_val * DURATION_UNIT_NANOSECS[filtered_literal[1]] return duration @@ -208,7 +223,7 @@ def create_retention_policy(module, client): module.fail_json(msg="Failed to parse value of duration") influxdb_duration_format = parse_duration_literal(duration) - if influxdb_duration_format != 0 and influxdb_duration_format < 3600000000000: + if influxdb_duration_format != 0 and influxdb_duration_format < MINIMUM_VALID_DURATION: module.fail_json(msg="duration value must be at least 1h") if shard_group_duration is not None: @@ -216,7 +231,7 @@ def create_retention_policy(module, client): module.fail_json(msg="Failed to parse value of shard_group_duration") influxdb_shard_group_duration_format = parse_duration_literal(shard_group_duration) - if influxdb_shard_group_duration_format < 3600000000000: + if influxdb_shard_group_duration_format < MINIMUM_VALID_SHARD_GROUP_DURATION: module.fail_json(msg="shard_group_duration value must be finite and at least 1h") if not module.check_mode: @@ -245,7 +260,7 @@ def alter_retention_policy(module, client, retention_policy): module.fail_json(msg="Failed to parse value of duration") influxdb_duration_format = parse_duration_literal(duration) - if influxdb_duration_format != 0 and influxdb_duration_format < 3600000000000: + if influxdb_duration_format != 0 and influxdb_duration_format < MINIMUM_VALID_DURATION: module.fail_json(msg="duration value must be at least 1h") if shard_group_duration is None: @@ -255,7 +270,7 @@ def alter_retention_policy(module, client, retention_policy): module.fail_json(msg="Failed to parse value of shard_group_duration") influxdb_shard_group_duration_format = parse_duration_literal(shard_group_duration) - if influxdb_shard_group_duration_format < 3600000000000: + if influxdb_shard_group_duration_format < MINIMUM_VALID_SHARD_GROUP_DURATION: module.fail_json(msg="shard_group_duration value must be finite and at least 1h") if (retention_policy['duration'] != influxdb_duration_format or @@ -272,30 +287,55 @@ def alter_retention_policy(module, client, retention_policy): module.exit_json(changed=changed) +def drop_retention_policy(module, client): + database_name = module.params['database_name'] + policy_name = module.params['policy_name'] + + if not module.check_mode: + try: + client.drop_retention_policy(policy_name, database_name) + except exceptions.InfluxDBClientError as e: + module.fail_json(msg=e.content) + module.exit_json(changed=True) + + def main(): argument_spec = InfluxDb.influxdb_argument_spec() argument_spec.update( + state=dict(default='present', type='str', choices=['present', 'absent']), database_name=dict(required=True, type='str'), policy_name=dict(required=True, type='str'), - duration=dict(required=True, type='str'), - replication=dict(required=True, type='int'), + duration=dict(type='str'), + replication=dict(type='int'), default=dict(default=False, type='bool'), - shard_group_duration=dict(required=False, type='str'), + shard_group_duration=dict(type='str'), ) module = AnsibleModule( argument_spec=argument_spec, - supports_check_mode=True + supports_check_mode=True, + required_if=( + ('state', 'present', ['duration', 'replication']), + ), ) + state = module.params['state'] + influxdb = InfluxDb(module) client = influxdb.connect_to_influxdb() retention_policy = find_retention_policy(module, client) - if retention_policy: - alter_retention_policy(module, client, retention_policy) - else: - create_retention_policy(module, client) + if state == 'present': + if retention_policy: + alter_retention_policy(module, client, retention_policy) + else: + create_retention_policy(module, client) + + if state == 'absent': + if retention_policy: + drop_retention_policy(module, client) + else: + module.exit_json(changed=False) if __name__ == '__main__': From 265d034e310fd3704d4779108be910f611bca91e Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 13 May 2021 03:37:31 +1200 Subject: [PATCH 0279/3093] linode - docs/validation changes + minor refactorings (#2410) * multiple changes: - documentation fixes - minor refactorings * added param deprecation note to the documentation * added changelog fragment * Update changelogs/fragments/2410-linode-improvements.yml Co-authored-by: Felix Fontein * Update changelogs/fragments/2410-linode-improvements.yml Co-authored-by: Felix Fontein * Update plugins/modules/cloud/linode/linode.py Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../fragments/2410-linode-improvements.yml | 5 ++ plugins/modules/cloud/linode/linode.py | 71 +++++++++---------- tests/sanity/ignore-2.10.txt | 3 - tests/sanity/ignore-2.11.txt | 3 - tests/sanity/ignore-2.12.txt | 3 - tests/sanity/ignore-2.9.txt | 2 - 6 files changed, 39 insertions(+), 48 deletions(-) create mode 100644 changelogs/fragments/2410-linode-improvements.yml diff --git a/changelogs/fragments/2410-linode-improvements.yml b/changelogs/fragments/2410-linode-improvements.yml new file mode 100644 index 0000000000..cdf8551b08 --- /dev/null +++ b/changelogs/fragments/2410-linode-improvements.yml @@ -0,0 +1,5 @@ +deprecated_features: + - linode - parameter ``backupsenabled`` is deprecated and will be removed in community.general 5.0.0 (https://github.com/ansible-collections/community.general/pull/2410). +minor_changes: + - linode - added proper traceback when failing due to exceptions (https://github.com/ansible-collections/community.general/pull/2410). + - linode - parameter ``additional_disks`` is now validated as a list of dictionaries (https://github.com/ansible-collections/community.general/pull/2410). diff --git a/plugins/modules/cloud/linode/linode.py b/plugins/modules/cloud/linode/linode.py index a35b25b6c7..c9ee0e61ed 100644 --- a/plugins/modules/cloud/linode/linode.py +++ b/plugins/modules/cloud/linode/linode.py @@ -21,8 +21,10 @@ options: type: str api_key: description: - - Linode API key + - Linode API key. + - C(LINODE_API_KEY) env variable can be used instead. type: str + required: yes name: description: - Name to give the instance (alphanumeric, dashes, underscore). @@ -46,6 +48,7 @@ options: - List of dictionaries for creating additional disks that are added to the Linode configuration settings. - Dictionary takes Size, Label, Type. Size is in MB. type: list + elements: dict alert_bwin_enabled: description: - Set status of bandwidth in alerts. @@ -86,9 +89,18 @@ options: description: - Set threshold for average IO ops/sec over 2 hour period. type: int + backupsenabled: + description: + - Deprecated parameter, it will be removed in community.general C(5.0.0). + - To enable backups pass values to either I(backupweeklyday) or I(backupwindow). + type: int backupweeklyday: description: - - Integer value for what day of the week to store weekly backups. + - Day of the week to take backups. + type: int + backupwindow: + description: + - The time window in which backups will be taken. type: int plan: description: @@ -153,7 +165,6 @@ author: notes: - Please note, linode-python does not have python 3 support. - This module uses the now deprecated v3 of the Linode API. - - C(LINODE_API_KEY) env variable can be used instead. - Please review U(https://www.linode.com/api/linode) for determining the required parameters. ''' @@ -262,7 +273,6 @@ EXAMPLES = ''' delegate_to: localhost ''' -import os import time import traceback @@ -274,7 +284,7 @@ except ImportError: LINODE_IMP_ERR = traceback.format_exc() HAS_LINODE = False -from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback def randompass(): @@ -358,7 +368,7 @@ def linodeServers(module, api, state, name, if not servers: for arg in (name, plan, distribution, datacenter): if not arg: - module.fail_json(msg='%s is required for %s state' % (arg, state)) # @TODO use required_if instead + module.fail_json(msg='%s is required for %s state' % (arg, state)) # Create linode entity new_server = True @@ -383,7 +393,7 @@ def linodeServers(module, api, state, name, try: res = api.linode_ip_addprivate(LinodeID=linode_id) except Exception as e: - module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE']) + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) if not disks: for arg in (name, linode_id, distribution): @@ -428,7 +438,7 @@ def linodeServers(module, api, state, name, jobs.append(res['JobID']) except Exception as e: # TODO: destroy linode ? - module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE']) + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) if not configs: for arg in (name, linode_id, distribution): @@ -471,7 +481,7 @@ def linodeServers(module, api, state, name, Disklist=disks_list, Label='%s config' % name) configs = api.linode_config_list(LinodeId=linode_id) except Exception as e: - module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE']) + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) # Start / Ensure servers are running for server in servers: @@ -517,10 +527,7 @@ def linodeServers(module, api, state, name, instance['password'] = password instances.append(instance) - elif state in ('stopped'): - if not linode_id: - module.fail_json(msg='linode_id is required for stopped state') - + elif state in ('stopped',): if not servers: module.fail_json(msg='Server (lid: %s) not found' % (linode_id)) @@ -530,17 +537,14 @@ def linodeServers(module, api, state, name, try: res = api.linode_shutdown(LinodeId=linode_id) except Exception as e: - module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE']) + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) instance['status'] = 'Stopping' changed = True else: instance['status'] = 'Stopped' instances.append(instance) - elif state in ('restarted'): - if not linode_id: - module.fail_json(msg='linode_id is required for restarted state') - + elif state in ('restarted',): if not servers: module.fail_json(msg='Server (lid: %s) not found' % (linode_id)) @@ -549,7 +553,7 @@ def linodeServers(module, api, state, name, try: res = api.linode_reboot(LinodeId=server['LINODEID']) except Exception as e: - module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE']) + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) instance['status'] = 'Restarting' changed = True instances.append(instance) @@ -560,7 +564,7 @@ def linodeServers(module, api, state, name, try: api.linode_delete(LinodeId=server['LINODEID'], skipChecks=True) except Exception as e: - module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE']) + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) instance['status'] = 'Deleting' changed = True instances.append(instance) @@ -577,7 +581,7 @@ def main(): argument_spec=dict( state=dict(type='str', default='present', choices=['absent', 'active', 'deleted', 'present', 'restarted', 'started', 'stopped']), - api_key=dict(type='str', no_log=True), + api_key=dict(type='str', no_log=True, required=True, fallback=(env_fallback, ['LINODE_API_KEY'])), name=dict(type='str', required=True), alert_bwin_enabled=dict(type='bool'), alert_bwin_threshold=dict(type='int'), @@ -589,12 +593,12 @@ def main(): alert_cpu_threshold=dict(type='int'), alert_diskio_enabled=dict(type='bool'), alert_diskio_threshold=dict(type='int'), - backupsenabled=dict(type='int'), + backupsenabled=dict(type='int', removed_in_version='5.0.0', removed_from_collection='community.general'), backupweeklyday=dict(type='int'), backupwindow=dict(type='int'), displaygroup=dict(type='str', default=''), plan=dict(type='int'), - additional_disks=dict(type='list'), + additional_disks=dict(type='list', elements='dict'), distribution=dict(type='int'), datacenter=dict(type='int'), kernel_id=dict(type='int'), @@ -608,6 +612,10 @@ def main(): wait_timeout=dict(type='int', default=300), watchdog=dict(type='bool', default=True), ), + required_if=[ + ('state', 'restarted', ['linode_id']), + ('state', 'stopped', ['linode_id']), + ] ) if not HAS_LINODE: @@ -626,7 +634,6 @@ def main(): alert_cpu_threshold = module.params.get('alert_cpu_threshold') alert_diskio_enabled = module.params.get('alert_diskio_enabled') alert_diskio_threshold = module.params.get('alert_diskio_threshold') - backupsenabled = module.params.get('backupsenabled') backupweeklyday = module.params.get('backupweeklyday') backupwindow = module.params.get('backupwindow') displaygroup = module.params.get('displaygroup') @@ -642,10 +649,9 @@ def main(): ssh_pub_key = module.params.get('ssh_pub_key') swap = module.params.get('swap') wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) + wait_timeout = module.params.get('wait_timeout') watchdog = int(module.params.get('watchdog')) - kwargs = dict() check_items = dict( alert_bwin_enabled=alert_bwin_enabled, alert_bwin_threshold=alert_bwin_threshold, @@ -661,23 +667,14 @@ def main(): backupwindow=backupwindow, ) - for key, value in check_items.items(): - if value is not None: - kwargs[key] = value - - # Setup the api_key - if not api_key: - try: - api_key = os.environ['LINODE_API_KEY'] - except KeyError as e: - module.fail_json(msg='Unable to load %s' % e.message) + kwargs = dict((k, v) for k, v in check_items.items() if v is not None) # setup the auth try: api = linode_api.Api(api_key) api.test_echo() except Exception as e: - module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE']) + module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc()) linodeServers(module, api, state, name, displaygroup, plan, diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index a33e194233..da611904bb 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -1,9 +1,6 @@ plugins/module_utils/cloud.py pylint:bad-option-value # a pylint test that is disabled was modified over time plugins/module_utils/_mount.py future-import-boilerplate plugins/module_utils/_mount.py metaclass-boilerplate -plugins/modules/cloud/linode/linode.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/linode/linode.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/linode/linode.py validate-modules:undocumented-parameter plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-choice diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 4678f10294..a7d85904ae 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -1,8 +1,5 @@ plugins/module_utils/_mount.py future-import-boilerplate plugins/module_utils/_mount.py metaclass-boilerplate -plugins/modules/cloud/linode/linode.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/linode/linode.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/linode/linode.py validate-modules:undocumented-parameter plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-choice diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt index ec34ff7833..cf5d588e9a 100644 --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -1,8 +1,5 @@ plugins/module_utils/_mount.py future-import-boilerplate plugins/module_utils/_mount.py metaclass-boilerplate -plugins/modules/cloud/linode/linode.py validate-modules:parameter-list-no-elements -plugins/modules/cloud/linode/linode.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/linode/linode.py validate-modules:undocumented-parameter plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-choice diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 8f18be1c44..5c759d2095 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -1,8 +1,6 @@ plugins/module_utils/cloud.py pylint:bad-option-value # a pylint test that is disabled was modified over time plugins/module_utils/_mount.py future-import-boilerplate plugins/module_utils/_mount.py metaclass-boilerplate -plugins/modules/cloud/linode/linode.py validate-modules:parameter-type-not-in-doc -plugins/modules/cloud/linode/linode.py validate-modules:undocumented-parameter plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/cloud/online/online_server_info.py validate-modules:return-syntax-error From 23dda56913c19fdd8f1156be9496b7762fc7c11a Mon Sep 17 00:00:00 2001 From: Kogelvis Date: Thu, 13 May 2021 21:48:49 +0200 Subject: [PATCH 0280/3093] Add proxmox_nic module (#2449) * Add proxmox_nic module Add proxmox_nic module to manage NIC's on Qemu(KVM) VM's in a Proxmox VE cluster. Update proxmox integration tests and add tests for proxmox_nic module. This partially solves https://github.com/ansible-collections/community.general/issues/1964#issuecomment-790499397 and allows for adding/updating/deleting network interface cards after creating/cloning a VM. The proxmox_nic module will keep MAC-addresses the same when updating a NIC. It only changes when explicitly setting a MAC-address. * Apply suggestions from code review Co-authored-by: Felix Fontein * Add check_mode and implement review comments - check_mode added - some documentation updates - when MTU is set, check if the model is virtio, else fail - trunks can now be provided as list of ints instead of vlanid[;vlanid...] * Make returns on update_nic and delete_nic more readable Co-authored-by: Felix Fontein * Increase readability on update_nic and delete_nic * Implement check in get_vmid - get_vmid will now fail when multiple vmid's are returned as proxmox doesn't guarantee uniqueness - remove an unused import - fix a typo in an error message * Add some error checking to get_vmid - get_vmid will now return the error message when proxmoxer fails - get_vmid will return the vmid directly instead of a list of one - Some minor documentation updates * Warn instead of fail when setting mtu on unsupported nic - When setting the MTU on an unsupported NIC model (virtio is the only supported model) this module will now print a warning instead of failing. - Some minor documentation updates. * Take advantage of proxmox_auth_argument_spec Make use of proxmox_auth_argument_spec from plugins/module_utils/proxmox.py This provides some extra environment fallbacks. * Add blank line to conform with pep8 Co-authored-by: Felix Fontein --- plugins/modules/cloud/misc/proxmox_nic.py | 349 ++++++++++++++++++ plugins/modules/proxmox_nic.py | 1 + .../targets/proxmox/tasks/main.yml | 88 ++++- 3 files changed, 437 insertions(+), 1 deletion(-) create mode 100644 plugins/modules/cloud/misc/proxmox_nic.py create mode 120000 plugins/modules/proxmox_nic.py diff --git a/plugins/modules/cloud/misc/proxmox_nic.py b/plugins/modules/cloud/misc/proxmox_nic.py new file mode 100644 index 0000000000..a9c9f14ddc --- /dev/null +++ b/plugins/modules/cloud/misc/proxmox_nic.py @@ -0,0 +1,349 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Lammert Hellinga (@Kogelvis) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: proxmox_nic +short_description: Management of a NIC of a Qemu(KVM) VM in a Proxmox VE cluster. +version_added: 3.1.0 +description: + - Allows you to create/update/delete a NIC on Qemu(KVM) Virtual Machines in a Proxmox VE cluster. +author: "Lammert Hellinga (@Kogelvis) " +options: + bridge: + description: + - Add this interface to the specified bridge device. The Proxmox VE default bridge is called C(vmbr0). + type: str + firewall: + description: + - Whether this interface should be protected by the firewall. + type: bool + default: false + interface: + description: + - Name of the interface, should be C(net[n]) where C(1 ≤ n ≤ 31). + type: str + required: true + link_down: + description: + - Whether this interface should be disconnected (like pulling the plug). + type: bool + default: false + mac: + description: + - C(XX:XX:XX:XX:XX:XX) should be a unique MAC address. This is automatically generated if not specified. + - When not specified this module will keep the MAC address the same when changing an existing interface. + type: str + model: + description: + - The NIC emulator model. + type: str + choices: ['e1000', 'e1000-82540em', 'e1000-82544gc', 'e1000-82545em', 'i82551', 'i82557b', 'i82559er', 'ne2k_isa', 'ne2k_pci', 'pcnet', + 'rtl8139', 'virtio', 'vmxnet3'] + default: virtio + mtu: + description: + - Force MTU, for C(virtio) model only, setting will be ignored otherwise. + - Set to C(1) to use the bridge MTU. + - Value should be C(1 ≤ n ≤ 65520). + type: int + name: + description: + - Specifies the VM name. Only used on the configuration web interface. + - Required only for I(state=present). + type: str + queues: + description: + - Number of packet queues to be used on the device. + - Value should be C(0 ≤ n ≤ 16). + type: int + rate: + description: + - Rate limit in MBps (MegaBytes per second) as floating point number. + type: float + state: + description: + - Indicates desired state of the NIC. + type: str + choices: ['present', 'absent'] + default: present + tag: + description: + - VLAN tag to apply to packets on this interface. + - Value should be C(1 ≤ n ≤ 4094). + type: int + trunks: + description: + - List of VLAN trunks to pass through this interface. + type: list + elements: int + vmid: + description: + - Specifies the instance ID. + type: int +extends_documentation_fragment: + - community.general.proxmox.documentation +''' + +EXAMPLES = ''' +- name: Create NIC net0 targeting the vm by name + community.general.proxmox_nic: + api_user: root@pam + api_password: secret + api_host: proxmoxhost + name: my_vm + interface: net0 + bridge: vmbr0 + tag: 3 + +- name: Create NIC net0 targeting the vm by id + community.general.proxmox_nic: + api_user: root@pam + api_password: secret + api_host: proxmoxhost + vmid: 103 + interface: net0 + bridge: vmbr0 + mac: "12:34:56:C0:FF:EE" + firewall: true + +- name: Delete NIC net0 targeting the vm by name + community.general.proxmox_nic: + api_user: root@pam + api_password: secret + api_host: proxmoxhost + name: my_vm + interface: net0 + state: absent +''' + +RETURN = ''' +vmid: + description: The VM vmid. + returned: success + type: int + sample: 115 +msg: + description: A short message + returned: always + type: str + sample: "Nic net0 unchanged on VM with vmid 103" +''' + +try: + from proxmoxer import ProxmoxAPI + HAS_PROXMOXER = True +except ImportError: + HAS_PROXMOXER = False + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible_collections.community.general.plugins.module_utils.proxmox import proxmox_auth_argument_spec + + +def get_vmid(module, proxmox, name): + try: + vms = [vm['vmid'] for vm in proxmox.cluster.resources.get(type='vm') if vm.get('name') == name] + except Exception as e: + module.fail_json(msg='Error: %s occurred while retrieving VM with name = %s' % (e, name)) + + if not vms: + module.fail_json(msg='No VM found with name: %s' % name) + elif len(vms) > 1: + module.fail_json(msg='Multiple VMs found with name: %s, provide vmid instead' % name) + + return vms[0] + + +def get_vm(proxmox, vmid): + return [vm for vm in proxmox.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid)] + + +def update_nic(module, proxmox, vmid, interface, model, **kwargs): + vm = get_vm(proxmox, vmid) + + try: + vminfo = proxmox.nodes(vm[0]['node']).qemu(vmid).config.get() + except Exception as e: + module.fail_json(msg='Getting information for VM with vmid = %s failed with exception: %s' % (vmid, e)) + + if interface in vminfo: + # Convert the current config to a dictionary + config = vminfo[interface].split(',') + config.sort() + + config_current = {} + + for i in config: + kv = i.split('=') + try: + config_current[kv[0]] = kv[1] + except IndexError: + config_current[kv[0]] = '' + + # determine the current model nic and mac-address + models = ['e1000', 'e1000-82540em', 'e1000-82544gc', 'e1000-82545em', 'i82551', 'i82557b', + 'i82559er', 'ne2k_isa', 'ne2k_pci', 'pcnet', 'rtl8139', 'virtio', 'vmxnet3'] + current_model = set(models) & set(config_current.keys()) + current_model = current_model.pop() + current_mac = config_current[current_model] + + # build nic config string + config_provided = "{0}={1}".format(model, current_mac) + else: + config_provided = model + + if kwargs['mac']: + config_provided = "{0}={1}".format(model, kwargs['mac']) + + if kwargs['bridge']: + config_provided += ",bridge={0}".format(kwargs['bridge']) + + if kwargs['firewall']: + config_provided += ",firewall=1" + + if kwargs['link_down']: + config_provided += ',link_down=1' + + if kwargs['mtu']: + if model == 'virtio': + config_provided += ",mtu={0}".format(kwargs['mtu']) + else: + module.warn( + 'Ignoring MTU for nic {0} on VM with vmid {1}, ' + 'model should be set to \'virtio\': '.format(interface, vmid)) + + if kwargs['queues']: + config_provided += ",queues={0}".format(kwargs['queues']) + + if kwargs['rate']: + config_provided += ",rate={0}".format(kwargs['rate']) + + if kwargs['tag']: + config_provided += ",tag={0}".format(kwargs['tag']) + + if kwargs['trunks']: + config_provided += ",trunks={0}".format(';'.join(str(x) for x in kwargs['trunks'])) + + net = {interface: config_provided} + vm = get_vm(proxmox, vmid) + + if ((interface not in vminfo) or (vminfo[interface] != config_provided)): + if not module.check_mode: + proxmox.nodes(vm[0]['node']).qemu(vmid).config.set(**net) + return True + + return False + + +def delete_nic(module, proxmox, vmid, interface): + vm = get_vm(proxmox, vmid) + vminfo = proxmox.nodes(vm[0]['node']).qemu(vmid).config.get() + + if interface in vminfo: + if not module.check_mode: + proxmox.nodes(vm[0]['node']).qemu(vmid).config.set(vmid=vmid, delete=interface) + return True + + return False + + +def main(): + module_args = proxmox_auth_argument_spec() + nic_args = dict( + bridge=dict(type='str'), + firewall=dict(type='bool', default=False), + interface=dict(type='str', required=True), + link_down=dict(type='bool', default=False), + mac=dict(type='str'), + model=dict(choices=['e1000', 'e1000-82540em', 'e1000-82544gc', 'e1000-82545em', + 'i82551', 'i82557b', 'i82559er', 'ne2k_isa', 'ne2k_pci', 'pcnet', + 'rtl8139', 'virtio', 'vmxnet3'], default='virtio'), + mtu=dict(type='int'), + name=dict(type='str'), + queues=dict(type='int'), + rate=dict(type='float'), + state=dict(default='present', choices=['present', 'absent']), + tag=dict(type='int'), + trunks=dict(type='list', elements='int'), + vmid=dict(type='int'), + ) + module_args.update(nic_args) + + module = AnsibleModule( + argument_spec=module_args, + required_together=[('api_token_id', 'api_token_secret')], + required_one_of=[('name', 'vmid'), ('api_password', 'api_token_id')], + supports_check_mode=True, + ) + + if not HAS_PROXMOXER: + module.fail_json(msg='proxmoxer required for this module') + + api_host = module.params['api_host'] + api_password = module.params['api_password'] + api_token_id = module.params['api_token_id'] + api_token_secret = module.params['api_token_secret'] + api_user = module.params['api_user'] + interface = module.params['interface'] + model = module.params['model'] + name = module.params['name'] + state = module.params['state'] + validate_certs = module.params['validate_certs'] + vmid = module.params['vmid'] + + auth_args = {'user': api_user} + if not (api_token_id and api_token_secret): + auth_args['password'] = api_password + else: + auth_args['token_name'] = api_token_id + auth_args['token_value'] = api_token_secret + + try: + proxmox = ProxmoxAPI(api_host, verify_ssl=validate_certs, **auth_args) + except Exception as e: + module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e) + + # If vmid is not defined then retrieve its value from the vm name, + if not vmid: + vmid = get_vmid(module, proxmox, name) + + # Ensure VM id exists + if not get_vm(proxmox, vmid): + module.fail_json(vmid=vmid, msg='VM with vmid = %s does not exist in cluster' % vmid) + + if state == 'present': + try: + if update_nic(module, proxmox, vmid, interface, model, + bridge=module.params['bridge'], + firewall=module.params['firewall'], + link_down=module.params['link_down'], + mac=module.params['mac'], + mtu=module.params['mtu'], + queues=module.params['queues'], + rate=module.params['rate'], + tag=module.params['tag'], + trunks=module.params['trunks']): + module.exit_json(changed=True, vmid=vmid, msg="Nic {0} updated on VM with vmid {1}".format(interface, vmid)) + else: + module.exit_json(vmid=vmid, msg="Nic {0} unchanged on VM with vmid {1}".format(interface, vmid)) + except Exception as e: + module.fail_json(vmid=vmid, msg='Unable to change nic {0} on VM with vmid {1}: '.format(interface, vmid) + str(e)) + + elif state == 'absent': + try: + if delete_nic(module, proxmox, vmid, interface): + module.exit_json(changed=True, vmid=vmid, msg="Nic {0} deleted on VM with vmid {1}".format(interface, vmid)) + else: + module.exit_json(vmid=vmid, msg="Nic {0} does not exist on VM with vmid {1}".format(interface, vmid)) + except Exception as e: + module.fail_json(vmid=vmid, msg='Unable to delete nic {0} on VM with vmid {1}: '.format(interface, vmid) + str(e)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/proxmox_nic.py b/plugins/modules/proxmox_nic.py new file mode 120000 index 0000000000..88756ab636 --- /dev/null +++ b/plugins/modules/proxmox_nic.py @@ -0,0 +1 @@ +cloud/misc/proxmox_nic.py \ No newline at end of file diff --git a/tests/integration/targets/proxmox/tasks/main.yml b/tests/integration/targets/proxmox/tasks/main.yml index 6301cb66ef..5954d3f11f 100644 --- a/tests/integration/targets/proxmox/tasks/main.yml +++ b/tests/integration/targets/proxmox/tasks/main.yml @@ -48,7 +48,7 @@ api_token_secret: "{{ api_token_secret | default(omit) }}" validate_certs: "{{ validate_certs }}" register: results - + - assert: that: - results is not changed @@ -226,6 +226,92 @@ - results_action_current.vmid == {{ vmid }} - results_action_current.msg == "VM test-instance with vmid = {{ vmid }} is running" +- name: VM add/change/delete NIC + tags: [ 'nic' ] + block: + - name: Add NIC to test VM + proxmox_nic: + api_host: "{{ api_host }}" + api_user: "{{ user }}@{{ domain }}" + api_password: "{{ api_password | default(omit) }}" + api_token_id: "{{ api_token_id | default(omit) }}" + api_token_secret: "{{ api_token_secret | default(omit) }}" + validate_certs: "{{ validate_certs }}" + vmid: "{{ vmid }}" + state: present + interface: net5 + bridge: vmbr0 + tag: 42 + register: results + + - assert: + that: + - results is changed + - results.vmid == {{ vmid }} + - results.msg == "Nic net5 updated on VM with vmid {{ vmid }}" + + - name: Update NIC no changes + proxmox_nic: + api_host: "{{ api_host }}" + api_user: "{{ user }}@{{ domain }}" + api_password: "{{ api_password | default(omit) }}" + api_token_id: "{{ api_token_id | default(omit) }}" + api_token_secret: "{{ api_token_secret | default(omit) }}" + validate_certs: "{{ validate_certs }}" + vmid: "{{ vmid }}" + state: present + interface: net5 + bridge: vmbr0 + tag: 42 + register: results + + - assert: + that: + - results is not changed + - results.vmid == {{ vmid }} + - results.msg == "Nic net5 unchanged on VM with vmid {{ vmid }}" + + - name: Update NIC with changes + proxmox_nic: + api_host: "{{ api_host }}" + api_user: "{{ user }}@{{ domain }}" + api_password: "{{ api_password | default(omit) }}" + api_token_id: "{{ api_token_id | default(omit) }}" + api_token_secret: "{{ api_token_secret | default(omit) }}" + validate_certs: "{{ validate_certs }}" + vmid: "{{ vmid }}" + state: present + interface: net5 + bridge: vmbr0 + tag: 24 + firewall: True + register: results + + - assert: + that: + - results is changed + - results.vmid == {{ vmid }} + - results.msg == "Nic net5 updated on VM with vmid {{ vmid }}" + + - name: Delete NIC + proxmox_nic: + api_host: "{{ api_host }}" + api_user: "{{ user }}@{{ domain }}" + api_password: "{{ api_password | default(omit) }}" + api_token_id: "{{ api_token_id | default(omit) }}" + api_token_secret: "{{ api_token_secret | default(omit) }}" + validate_certs: "{{ validate_certs }}" + vmid: "{{ vmid }}" + state: absent + interface: net5 + register: results + + - assert: + that: + - results is changed + - results.vmid == {{ vmid }} + - results.msg == "Nic net5 deleted on VM with vmid {{ vmid }}" + - name: VM stop tags: [ 'stop' ] block: From 384655e15c7e36e5b4c56578c534053404f9f1d1 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 13 May 2021 21:49:57 +0200 Subject: [PATCH 0281/3093] Add groupby_as_dict filter (#2323) * Add groupby_as_dict filter. * Test all error cases. --- .../fragments/2323-groupby_as_dict-filter.yml | 3 ++ plugins/filter/groupby.py | 42 +++++++++++++++++ .../targets/filter_groupby/aliases | 2 + .../targets/filter_groupby/tasks/main.yml | 45 +++++++++++++++++++ .../targets/filter_groupby/vars/main.yml | 31 +++++++++++++ 5 files changed, 123 insertions(+) create mode 100644 changelogs/fragments/2323-groupby_as_dict-filter.yml create mode 100644 plugins/filter/groupby.py create mode 100644 tests/integration/targets/filter_groupby/aliases create mode 100644 tests/integration/targets/filter_groupby/tasks/main.yml create mode 100644 tests/integration/targets/filter_groupby/vars/main.yml diff --git a/changelogs/fragments/2323-groupby_as_dict-filter.yml b/changelogs/fragments/2323-groupby_as_dict-filter.yml new file mode 100644 index 0000000000..e72f323a60 --- /dev/null +++ b/changelogs/fragments/2323-groupby_as_dict-filter.yml @@ -0,0 +1,3 @@ +add plugin.filter: + - name: groupby_as_dict + description: Transform a sequence of dictionaries to a dictionary where the dictionaries are indexed by an attribute diff --git a/plugins/filter/groupby.py b/plugins/filter/groupby.py new file mode 100644 index 0000000000..a2a85aa905 --- /dev/null +++ b/plugins/filter/groupby.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, Felix Fontein +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.errors import AnsibleFilterError +from ansible.module_utils.common._collections_compat import Mapping, Sequence + + +def groupby_as_dict(sequence, attribute): + ''' + Given a sequence of dictionaries and an attribute name, returns a dictionary mapping + the value of this attribute to the dictionary. + + If multiple dictionaries in the sequence have the same value for this attribute, + the filter will fail. + ''' + if not isinstance(sequence, Sequence): + raise AnsibleFilterError('Input is not a sequence') + + result = dict() + for list_index, element in enumerate(sequence): + if not isinstance(element, Mapping): + raise AnsibleFilterError('Sequence element #{0} is not a mapping'.format(list_index)) + if attribute not in element: + raise AnsibleFilterError('Attribute not contained in element #{0} of sequence'.format(list_index)) + result_index = element[attribute] + if result_index in result: + raise AnsibleFilterError('Multiple sequence entries have attribute value {0!r}'.format(result_index)) + result[result_index] = element + return result + + +class FilterModule(object): + ''' Ansible list filters ''' + + def filters(self): + return { + 'groupby_as_dict': groupby_as_dict, + } diff --git a/tests/integration/targets/filter_groupby/aliases b/tests/integration/targets/filter_groupby/aliases new file mode 100644 index 0000000000..6e79abdd02 --- /dev/null +++ b/tests/integration/targets/filter_groupby/aliases @@ -0,0 +1,2 @@ +shippable/posix/group4 +skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller diff --git a/tests/integration/targets/filter_groupby/tasks/main.yml b/tests/integration/targets/filter_groupby/tasks/main.yml new file mode 100644 index 0000000000..29036a3bc5 --- /dev/null +++ b/tests/integration/targets/filter_groupby/tasks/main.yml @@ -0,0 +1,45 @@ +--- +- name: Test functionality + assert: + that: + - list1 | community.general.groupby_as_dict('name') == dict1 + +- name: 'Test error: not a list' + set_fact: + test: "{{ list_no_list | community.general.groupby_as_dict('name') }}" + ignore_errors: true + register: result + +- assert: + that: + - result.msg == 'Input is not a sequence' + +- name: 'Test error: list element not a mapping' + set_fact: + test: "{{ list_no_dict | community.general.groupby_as_dict('name') }}" + ignore_errors: true + register: result + +- assert: + that: + - "result.msg == 'Sequence element #0 is not a mapping'" + +- name: 'Test error: list element does not have attribute' + set_fact: + test: "{{ list_no_attribute | community.general.groupby_as_dict('name') }}" + ignore_errors: true + register: result + +- assert: + that: + - "result.msg == 'Attribute not contained in element #1 of sequence'" + +- name: 'Test error: attribute collision' + set_fact: + test: "{{ list_collision | community.general.groupby_as_dict('name') }}" + ignore_errors: true + register: result + +- assert: + that: + - result.msg == "Multiple sequence entries have attribute value 'a'" diff --git a/tests/integration/targets/filter_groupby/vars/main.yml b/tests/integration/targets/filter_groupby/vars/main.yml new file mode 100644 index 0000000000..15d38a351a --- /dev/null +++ b/tests/integration/targets/filter_groupby/vars/main.yml @@ -0,0 +1,31 @@ +--- +list1: + - name: a + x: y + - name: b + z: 1 + +dict1: + a: + name: a + x: y + b: + name: b + z: 1 + +list_no_list: + a: + name: a + +list_no_dict: + - [] + - 1 + +list_no_attribute: + - name: a + foo: baz + - foo: bar + +list_collision: + - name: a + - name: a From ee9770cff720259cb781f0b1d9705f33a5d83fb1 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 13 May 2021 21:50:40 +0200 Subject: [PATCH 0282/3093] Deprecate nios content (#2458) * Deprecate nios content. * Make 2.9's ansible-test happy. * Add module_utils deprecation. --- changelogs/fragments/nios-deprecation.yml | 2 + meta/runtime.yml | 88 ++++++++++++++++++- plugins/lookup/nios.py | 4 + plugins/lookup/nios_next_ip.py | 4 + plugins/lookup/nios_next_network.py | 4 + .../modules/net_tools/nios/nios_a_record.py | 4 + .../net_tools/nios/nios_aaaa_record.py | 4 + .../net_tools/nios/nios_cname_record.py | 4 + .../modules/net_tools/nios/nios_dns_view.py | 4 + .../net_tools/nios/nios_fixed_address.py | 4 + .../net_tools/nios/nios_host_record.py | 4 + plugins/modules/net_tools/nios/nios_member.py | 4 + .../modules/net_tools/nios/nios_mx_record.py | 4 + .../net_tools/nios/nios_naptr_record.py | 4 + .../modules/net_tools/nios/nios_network.py | 4 + .../net_tools/nios/nios_network_view.py | 4 + .../modules/net_tools/nios/nios_nsgroup.py | 4 + .../modules/net_tools/nios/nios_ptr_record.py | 4 + .../modules/net_tools/nios/nios_srv_record.py | 4 + .../modules/net_tools/nios/nios_txt_record.py | 4 + plugins/modules/net_tools/nios/nios_zone.py | 4 + tests/sanity/ignore-2.9.txt | 32 +++++++ 22 files changed, 196 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/nios-deprecation.yml diff --git a/changelogs/fragments/nios-deprecation.yml b/changelogs/fragments/nios-deprecation.yml new file mode 100644 index 0000000000..bcfc2b4128 --- /dev/null +++ b/changelogs/fragments/nios-deprecation.yml @@ -0,0 +1,2 @@ +deprecated_features: +- "The nios, nios_next_ip, nios_next_network lookup plugins, the nios documentation fragment, and the nios_host_record, nios_ptr_record, nios_mx_record, nios_fixed_address, nios_zone, nios_member, nios_a_record, nios_aaaa_record, nios_network, nios_dns_view, nios_txt_record, nios_naptr_record, nios_srv_record, nios_cname_record, nios_nsgroup, and nios_network_view module have been deprecated and will be removed from community.general 5.0.0. Please install the `infoblox.nios_modules `_ collection instead and use its plugins and modules (https://github.com/ansible-collections/community.general/pull/2458)." diff --git a/meta/runtime.yml b/meta/runtime.yml index c116029974..e5b59bc046 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -37,6 +37,18 @@ plugin_routing: redirect: community.google.gcp_storage_file hashi_vault: redirect: community.hashi_vault.hashi_vault + nios: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios lookup plugin has been deprecated. Please use infoblox.nios_modules.nios_lookup instead. + nios_next_ip: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_next_ip lookup plugin has been deprecated. Please use infoblox.nios_modules.nios_next_ip instead. + nios_next_network: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_next_network lookup plugin has been deprecated. Please use infoblox.nios_modules.nios_next_network instead. modules: ali_instance_facts: tombstone: @@ -283,6 +295,70 @@ plugin_routing: tombstone: removal_version: 3.0.0 warning_text: Use netapp.ontap.na_ontap_info instead. + nios_a_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_a_record module has been deprecated. Please use infoblox.nios_modules.nios_a_record instead. + nios_aaaa_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_aaaa_record module has been deprecated. Please use infoblox.nios_modules.nios_aaaa_record instead. + nios_cname_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_cname_record module has been deprecated. Please use infoblox.nios_modules.nios_cname_record instead. + nios_dns_view: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_dns_view module has been deprecated. Please use infoblox.nios_modules.nios_dns_view instead. + nios_fixed_address: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_fixed_address module has been deprecated. Please use infoblox.nios_modules.nios_fixed_address instead. + nios_host_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_host_record module has been deprecated. Please use infoblox.nios_modules.nios_host_record instead. + nios_member: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_member module has been deprecated. Please use infoblox.nios_modules.nios_member instead. + nios_mx_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_mx_record module has been deprecated. Please use infoblox.nios_modules.nios_mx_record instead. + nios_naptr_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_naptr_record module has been deprecated. Please use infoblox.nios_modules.nios_naptr_record instead. + nios_network: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_network module has been deprecated. Please use infoblox.nios_modules.nios_network instead. + nios_network_view: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_network_view module has been deprecated. Please use infoblox.nios_modules.nios_network_view instead. + nios_nsgroup: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_nsgroup module has been deprecated. Please use infoblox.nios_modules.nios_nsgroup instead. + nios_ptr_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_ptr_record module has been deprecated. Please use infoblox.nios_modules.nios_ptr_record instead. + nios_srv_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_srv_record module has been deprecated. Please use infoblox.nios_modules.nios_srv_record instead. + nios_txt_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_txt_record module has been deprecated. Please use infoblox.nios_modules.nios_txt_record instead. + nios_zone: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_zone module has been deprecated. Please use infoblox.nios_modules.nios_zone instead. nginx_status_facts: tombstone: removal_version: 3.0.0 @@ -568,11 +644,13 @@ plugin_routing: redirect: community.kubevirt.kubevirt_common_options kubevirt_vm_options: redirect: community.kubevirt.kubevirt_vm_options + nios: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios document fragment has been deprecated. Please use infoblox.nios_modules.nios instead. postgresql: redirect: community.postgresql.postgresql module_utils: - remote_management.dellemc.dellemc_idrac: - redirect: dellemc.openmanage.dellemc_idrac docker.common: redirect: community.docker.common docker.swarm: @@ -587,6 +665,12 @@ plugin_routing: redirect: community.hrobot.robot kubevirt: redirect: community.kubevirt.kubevirt + net_tools.nios.api: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.net_tools.nios.api module_utils has been deprecated. Please use infoblox.nios_modules.api instead. + remote_management.dellemc.dellemc_idrac: + redirect: dellemc.openmanage.dellemc_idrac remote_management.dellemc.ome: redirect: dellemc.openmanage.ome postgresql: diff --git a/plugins/lookup/nios.py b/plugins/lookup/nios.py index 4b606e78ba..819d8077e6 100644 --- a/plugins/lookup/nios.py +++ b/plugins/lookup/nios.py @@ -25,6 +25,10 @@ DOCUMENTATION = ''' author: Unknown (!UNKNOWN) name: nios short_description: Query Infoblox NIOS objects +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding lookup from it. + alternative: infoblox.nios_modules.nios_lookup + removed_in: 5.0.0 description: - Uses the Infoblox WAPI API to fetch NIOS specified objects. This lookup supports adding additional keywords to filter the return data and specify diff --git a/plugins/lookup/nios_next_ip.py b/plugins/lookup/nios_next_ip.py index 5b979b8d07..21773cb53e 100644 --- a/plugins/lookup/nios_next_ip.py +++ b/plugins/lookup/nios_next_ip.py @@ -25,6 +25,10 @@ DOCUMENTATION = ''' author: Unknown (!UNKNOWN) name: nios_next_ip short_description: Return the next available IP address for a network +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding lookup from it. + alternative: infoblox.nios_modules.nios_next_ip + removed_in: 5.0.0 description: - Uses the Infoblox WAPI API to return the next available IP addresses for a given network CIDR diff --git a/plugins/lookup/nios_next_network.py b/plugins/lookup/nios_next_network.py index 84b230d1fe..2aa22ab704 100644 --- a/plugins/lookup/nios_next_network.py +++ b/plugins/lookup/nios_next_network.py @@ -25,6 +25,10 @@ DOCUMENTATION = ''' author: Unknown (!UNKNOWN) name: nios_next_network short_description: Return the next available network range for a network-container +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding lookup from it. + alternative: infoblox.nios_modules.nios_next_network + removed_in: 5.0.0 description: - Uses the Infoblox WAPI API to return the next available network addresses for a given network CIDR diff --git a/plugins/modules/net_tools/nios/nios_a_record.py b/plugins/modules/net_tools/nios/nios_a_record.py index 7e8b273024..b4adfe0103 100644 --- a/plugins/modules/net_tools/nios/nios_a_record.py +++ b/plugins/modules/net_tools/nios/nios_a_record.py @@ -10,6 +10,10 @@ DOCUMENTATION = ''' module: nios_a_record author: "Blair Rampling (@brampling)" short_description: Configure Infoblox NIOS A records +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding module from it. + alternative: infoblox.nios_modules.nios_a_record + removed_in: 5.0.0 description: - Adds and/or removes instances of A record objects from Infoblox NIOS servers. This module manages NIOS C(record:a) objects diff --git a/plugins/modules/net_tools/nios/nios_aaaa_record.py b/plugins/modules/net_tools/nios/nios_aaaa_record.py index d35b779f10..9b22f86948 100644 --- a/plugins/modules/net_tools/nios/nios_aaaa_record.py +++ b/plugins/modules/net_tools/nios/nios_aaaa_record.py @@ -10,6 +10,10 @@ DOCUMENTATION = ''' module: nios_aaaa_record author: "Blair Rampling (@brampling)" short_description: Configure Infoblox NIOS AAAA records +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding module from it. + alternative: infoblox.nios_modules.nios_aaaa_record + removed_in: 5.0.0 description: - Adds and/or removes instances of AAAA record objects from Infoblox NIOS servers. This module manages NIOS C(record:aaaa) objects diff --git a/plugins/modules/net_tools/nios/nios_cname_record.py b/plugins/modules/net_tools/nios/nios_cname_record.py index 2ab38473f3..099cb02572 100644 --- a/plugins/modules/net_tools/nios/nios_cname_record.py +++ b/plugins/modules/net_tools/nios/nios_cname_record.py @@ -10,6 +10,10 @@ DOCUMENTATION = ''' module: nios_cname_record author: "Blair Rampling (@brampling)" short_description: Configure Infoblox NIOS CNAME records +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding module from it. + alternative: infoblox.nios_modules.nios_cname_record + removed_in: 5.0.0 description: - Adds and/or removes instances of CNAME record objects from Infoblox NIOS servers. This module manages NIOS C(record:cname) objects diff --git a/plugins/modules/net_tools/nios/nios_dns_view.py b/plugins/modules/net_tools/nios/nios_dns_view.py index af5d56d4ca..46c56fc7bb 100644 --- a/plugins/modules/net_tools/nios/nios_dns_view.py +++ b/plugins/modules/net_tools/nios/nios_dns_view.py @@ -10,6 +10,10 @@ DOCUMENTATION = ''' module: nios_dns_view author: "Peter Sprygada (@privateip)" short_description: Configure Infoblox NIOS DNS views +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding module from it. + alternative: infoblox.nios_modules.nios_dns_view + removed_in: 5.0.0 description: - Adds and/or removes instances of DNS view objects from Infoblox NIOS servers. This module manages NIOS C(view) objects diff --git a/plugins/modules/net_tools/nios/nios_fixed_address.py b/plugins/modules/net_tools/nios/nios_fixed_address.py index cab3b5e1b5..bc2969bbe5 100644 --- a/plugins/modules/net_tools/nios/nios_fixed_address.py +++ b/plugins/modules/net_tools/nios/nios_fixed_address.py @@ -10,6 +10,10 @@ DOCUMENTATION = ''' module: nios_fixed_address author: "Sumit Jaiswal (@sjaiswal)" short_description: Configure Infoblox NIOS DHCP Fixed Address +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding module from it. + alternative: infoblox.nios_modules.nios_fixed_address + removed_in: 5.0.0 description: - A fixed address is a specific IP address that a DHCP server always assigns when a lease request comes from a particular diff --git a/plugins/modules/net_tools/nios/nios_host_record.py b/plugins/modules/net_tools/nios/nios_host_record.py index d3e9d3de95..6fed663657 100644 --- a/plugins/modules/net_tools/nios/nios_host_record.py +++ b/plugins/modules/net_tools/nios/nios_host_record.py @@ -10,6 +10,10 @@ DOCUMENTATION = ''' module: nios_host_record author: "Peter Sprygada (@privateip)" short_description: Configure Infoblox NIOS host records +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding module from it. + alternative: infoblox.nios_modules.nios_host_record + removed_in: 5.0.0 description: - Adds and/or removes instances of host record objects from Infoblox NIOS servers. This module manages NIOS C(record:host) objects diff --git a/plugins/modules/net_tools/nios/nios_member.py b/plugins/modules/net_tools/nios/nios_member.py index f8bf3e2595..186933864a 100644 --- a/plugins/modules/net_tools/nios/nios_member.py +++ b/plugins/modules/net_tools/nios/nios_member.py @@ -10,6 +10,10 @@ DOCUMENTATION = ''' module: nios_member author: "Krishna Vasudevan (@krisvasudevan)" short_description: Configure Infoblox NIOS members +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding module from it. + alternative: infoblox.nios_modules.nios_member + removed_in: 5.0.0 description: - Adds and/or removes Infoblox NIOS servers. This module manages NIOS C(member) objects using the Infoblox WAPI interface over REST. requirements: diff --git a/plugins/modules/net_tools/nios/nios_mx_record.py b/plugins/modules/net_tools/nios/nios_mx_record.py index a5c93b92bf..6e54ff2bda 100644 --- a/plugins/modules/net_tools/nios/nios_mx_record.py +++ b/plugins/modules/net_tools/nios/nios_mx_record.py @@ -10,6 +10,10 @@ DOCUMENTATION = ''' module: nios_mx_record author: "Blair Rampling (@brampling)" short_description: Configure Infoblox NIOS MX records +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding module from it. + alternative: infoblox.nios_modules.nios_mx_record + removed_in: 5.0.0 description: - Adds and/or removes instances of MX record objects from Infoblox NIOS servers. This module manages NIOS C(record:mx) objects diff --git a/plugins/modules/net_tools/nios/nios_naptr_record.py b/plugins/modules/net_tools/nios/nios_naptr_record.py index 387dd1dd98..f943d3d6d9 100644 --- a/plugins/modules/net_tools/nios/nios_naptr_record.py +++ b/plugins/modules/net_tools/nios/nios_naptr_record.py @@ -10,6 +10,10 @@ DOCUMENTATION = ''' module: nios_naptr_record author: "Blair Rampling (@brampling)" short_description: Configure Infoblox NIOS NAPTR records +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding module from it. + alternative: infoblox.nios_modules.nios_naptr_record + removed_in: 5.0.0 description: - Adds and/or removes instances of NAPTR record objects from Infoblox NIOS servers. This module manages NIOS C(record:naptr) objects diff --git a/plugins/modules/net_tools/nios/nios_network.py b/plugins/modules/net_tools/nios/nios_network.py index 98d06a2ede..6a7decb894 100644 --- a/plugins/modules/net_tools/nios/nios_network.py +++ b/plugins/modules/net_tools/nios/nios_network.py @@ -10,6 +10,10 @@ DOCUMENTATION = ''' module: nios_network author: "Peter Sprygada (@privateip)" short_description: Configure Infoblox NIOS network object +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding module from it. + alternative: infoblox.nios_modules.nios_network + removed_in: 5.0.0 description: - Adds and/or removes instances of network objects from Infoblox NIOS servers. This module manages NIOS C(network) objects diff --git a/plugins/modules/net_tools/nios/nios_network_view.py b/plugins/modules/net_tools/nios/nios_network_view.py index c8925adcfb..a27f8519a0 100644 --- a/plugins/modules/net_tools/nios/nios_network_view.py +++ b/plugins/modules/net_tools/nios/nios_network_view.py @@ -10,6 +10,10 @@ DOCUMENTATION = ''' module: nios_network_view author: "Peter Sprygada (@privateip)" short_description: Configure Infoblox NIOS network views +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding module from it. + alternative: infoblox.nios_modules.nios_network_view + removed_in: 5.0.0 description: - Adds and/or removes instances of network view objects from Infoblox NIOS servers. This module manages NIOS C(networkview) objects diff --git a/plugins/modules/net_tools/nios/nios_nsgroup.py b/plugins/modules/net_tools/nios/nios_nsgroup.py index b56c3f0b8d..8e8cde399c 100644 --- a/plugins/modules/net_tools/nios/nios_nsgroup.py +++ b/plugins/modules/net_tools/nios/nios_nsgroup.py @@ -11,6 +11,10 @@ DOCUMENTATION = ''' --- module: nios_nsgroup short_description: Configure InfoBlox DNS Nameserver Groups +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding module from it. + alternative: infoblox.nios_modules.nios_nsgroup + removed_in: 5.0.0 extends_documentation_fragment: - community.general.nios diff --git a/plugins/modules/net_tools/nios/nios_ptr_record.py b/plugins/modules/net_tools/nios/nios_ptr_record.py index 04c1370920..22550f129a 100644 --- a/plugins/modules/net_tools/nios/nios_ptr_record.py +++ b/plugins/modules/net_tools/nios/nios_ptr_record.py @@ -11,6 +11,10 @@ DOCUMENTATION = ''' module: nios_ptr_record author: "Trebuchet Clement (@clementtrebuchet)" short_description: Configure Infoblox NIOS PTR records +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding module from it. + alternative: infoblox.nios_modules.nios_ptr_record + removed_in: 5.0.0 description: - Adds and/or removes instances of PTR record objects from Infoblox NIOS servers. This module manages NIOS C(record:ptr) objects diff --git a/plugins/modules/net_tools/nios/nios_srv_record.py b/plugins/modules/net_tools/nios/nios_srv_record.py index 8a12aa7fd3..574a5fcf8b 100644 --- a/plugins/modules/net_tools/nios/nios_srv_record.py +++ b/plugins/modules/net_tools/nios/nios_srv_record.py @@ -10,6 +10,10 @@ DOCUMENTATION = ''' module: nios_srv_record author: "Blair Rampling (@brampling)" short_description: Configure Infoblox NIOS SRV records +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding module from it. + alternative: infoblox.nios_modules.nios_srv_record + removed_in: 5.0.0 description: - Adds and/or removes instances of SRV record objects from Infoblox NIOS servers. This module manages NIOS C(record:srv) objects diff --git a/plugins/modules/net_tools/nios/nios_txt_record.py b/plugins/modules/net_tools/nios/nios_txt_record.py index 761a895052..b3267af41f 100644 --- a/plugins/modules/net_tools/nios/nios_txt_record.py +++ b/plugins/modules/net_tools/nios/nios_txt_record.py @@ -10,6 +10,10 @@ DOCUMENTATION = ''' module: nios_txt_record author: "Corey Wanless (@coreywan)" short_description: Configure Infoblox NIOS txt records +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding module from it. + alternative: infoblox.nios_modules.nios_txt_record + removed_in: 5.0.0 description: - Adds and/or removes instances of txt record objects from Infoblox NIOS servers. This module manages NIOS C(record:txt) objects diff --git a/plugins/modules/net_tools/nios/nios_zone.py b/plugins/modules/net_tools/nios/nios_zone.py index 3c59aab298..f97098351b 100644 --- a/plugins/modules/net_tools/nios/nios_zone.py +++ b/plugins/modules/net_tools/nios/nios_zone.py @@ -10,6 +10,10 @@ DOCUMENTATION = ''' module: nios_zone author: "Peter Sprygada (@privateip)" short_description: Configure Infoblox NIOS DNS zones +deprecated: + why: Please install the infoblox.nios_modules collection and use the corresponding module from it. + alternative: infoblox.nios_modules.nios_zone + removed_in: 5.0.0 description: - Adds and/or removes instances of DNS zone objects from Infoblox NIOS servers. This module manages NIOS C(zone_auth) objects diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 5c759d2095..32e13b1a1e 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -41,6 +41,38 @@ plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules plugins/modules/remote_management/stacki/stacki_host.py validate-modules:doc-default-does-not-match-spec plugins/modules/remote_management/stacki/stacki_host.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/stacki/stacki_host.py validate-modules:undocumented-parameter +plugins/modules/net_tools/nios/nios_a_record.py validate-modules:deprecation-mismatch +plugins/modules/net_tools/nios/nios_a_record.py validate-modules:invalid-documentation +plugins/modules/net_tools/nios/nios_aaaa_record.py validate-modules:deprecation-mismatch +plugins/modules/net_tools/nios/nios_aaaa_record.py validate-modules:invalid-documentation +plugins/modules/net_tools/nios/nios_cname_record.py validate-modules:deprecation-mismatch +plugins/modules/net_tools/nios/nios_cname_record.py validate-modules:invalid-documentation +plugins/modules/net_tools/nios/nios_dns_view.py validate-modules:deprecation-mismatch +plugins/modules/net_tools/nios/nios_dns_view.py validate-modules:invalid-documentation +plugins/modules/net_tools/nios/nios_fixed_address.py validate-modules:deprecation-mismatch +plugins/modules/net_tools/nios/nios_fixed_address.py validate-modules:invalid-documentation +plugins/modules/net_tools/nios/nios_host_record.py validate-modules:deprecation-mismatch +plugins/modules/net_tools/nios/nios_host_record.py validate-modules:invalid-documentation +plugins/modules/net_tools/nios/nios_member.py validate-modules:deprecation-mismatch +plugins/modules/net_tools/nios/nios_member.py validate-modules:invalid-documentation +plugins/modules/net_tools/nios/nios_mx_record.py validate-modules:deprecation-mismatch +plugins/modules/net_tools/nios/nios_mx_record.py validate-modules:invalid-documentation +plugins/modules/net_tools/nios/nios_naptr_record.py validate-modules:deprecation-mismatch +plugins/modules/net_tools/nios/nios_naptr_record.py validate-modules:invalid-documentation +plugins/modules/net_tools/nios/nios_network.py validate-modules:deprecation-mismatch +plugins/modules/net_tools/nios/nios_network.py validate-modules:invalid-documentation +plugins/modules/net_tools/nios/nios_network_view.py validate-modules:deprecation-mismatch +plugins/modules/net_tools/nios/nios_network_view.py validate-modules:invalid-documentation +plugins/modules/net_tools/nios/nios_nsgroup.py validate-modules:deprecation-mismatch +plugins/modules/net_tools/nios/nios_nsgroup.py validate-modules:invalid-documentation +plugins/modules/net_tools/nios/nios_ptr_record.py validate-modules:deprecation-mismatch +plugins/modules/net_tools/nios/nios_ptr_record.py validate-modules:invalid-documentation +plugins/modules/net_tools/nios/nios_srv_record.py validate-modules:deprecation-mismatch +plugins/modules/net_tools/nios/nios_srv_record.py validate-modules:invalid-documentation +plugins/modules/net_tools/nios/nios_txt_record.py validate-modules:deprecation-mismatch +plugins/modules/net_tools/nios/nios_txt_record.py validate-modules:invalid-documentation +plugins/modules/net_tools/nios/nios_zone.py validate-modules:deprecation-mismatch +plugins/modules/net_tools/nios/nios_zone.py validate-modules:invalid-documentation plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-invalid plugins/modules/system/iptables_state.py validate-modules:undocumented-parameter plugins/modules/system/launchd.py use-argspec-type-path # False positive From 054eb90ae52b6065caf7d5a52dc887bff06b46a2 Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Fri, 14 May 2021 12:30:59 +0430 Subject: [PATCH 0283/3093] gitlab_user: add expires_at option (#2450) * gitlab_user: add expires_at option * Add changelog * Add integration test * Add expires_at to addSshKeyToUser function * password is required if state is set to present * Check expires_at will not be added to a present ssh key * add documentation about present ssh key * add expires_at to unit tests * Improve documentation Co-authored-by: Felix Fontein * Only pass expires_at to api when it is not None * Emphasize on SSH public key * Apply felixfontein suggestion Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- ...450-gitlab_user-add_expires_at_option.yaml | 3 + .../source_control/gitlab/gitlab_user.py | 28 +++- .../targets/gitlab_user/defaults/main.yml | 3 + .../targets/gitlab_user/tasks/main.yml | 12 +- .../targets/gitlab_user/tasks/sshkey.yml | 134 ++++++++++++++++++ .../source_control/gitlab/test_gitlab_user.py | 6 +- 6 files changed, 172 insertions(+), 14 deletions(-) create mode 100644 changelogs/fragments/2450-gitlab_user-add_expires_at_option.yaml create mode 100644 tests/integration/targets/gitlab_user/tasks/sshkey.yml diff --git a/changelogs/fragments/2450-gitlab_user-add_expires_at_option.yaml b/changelogs/fragments/2450-gitlab_user-add_expires_at_option.yaml new file mode 100644 index 0000000000..290e13847a --- /dev/null +++ b/changelogs/fragments/2450-gitlab_user-add_expires_at_option.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - gitlab_user - add ``expires_at`` option (https://github.com/ansible-collections/community.general/issues/2325). diff --git a/plugins/modules/source_control/gitlab/gitlab_user.py b/plugins/modules/source_control/gitlab/gitlab_user.py index 9fefe1aff9..4d300ea842 100644 --- a/plugins/modules/source_control/gitlab/gitlab_user.py +++ b/plugins/modules/source_control/gitlab/gitlab_user.py @@ -57,16 +57,22 @@ options: type: str sshkey_name: description: - - The name of the sshkey + - The name of the SSH public key. type: str sshkey_file: description: - - The ssh key itself. + - The SSH public key itself. type: str + sshkey_expires_at: + description: + - The expiration date of the SSH public key in ISO 8601 format C(YYYY-MM-DDTHH:MM:SSZ). + - This is only used when adding new SSH public keys. + type: str + version_added: 3.1.0 group: description: - Id or Full path of parent group in the form of group/name. - - Add user as an member to this group. + - Add user as a member to this group. type: str access_level: description: @@ -254,7 +260,8 @@ class GitLabUser(object): if options['sshkey_name'] and options['sshkey_file']: key_changed = self.addSshKeyToUser(user, { 'name': options['sshkey_name'], - 'file': options['sshkey_file']}) + 'file': options['sshkey_file'], + 'expires_at': options['sshkey_expires_at']}) changed = changed or key_changed # Assign group @@ -295,7 +302,7 @@ class GitLabUser(object): ''' @param user User object - @param sshkey Dict containing sshkey infos {"name": "", "file": ""} + @param sshkey Dict containing sshkey infos {"name": "", "file": "", "expires_at": ""} ''' def addSshKeyToUser(self, user, sshkey): if not self.sshKeyExists(user, sshkey['name']): @@ -303,9 +310,13 @@ class GitLabUser(object): return True try: - user.keys.create({ + parameter = { 'title': sshkey['name'], - 'key': sshkey['file']}) + 'key': sshkey['file'], + } + if sshkey['expires_at'] is not None: + parameter['expires_at'] = sshkey['expires_at'] + user.keys.create(parameter) except gitlab.exceptions.GitlabCreateError as e: self._module.fail_json(msg="Failed to assign sshkey to user: %s" % to_native(e)) return True @@ -471,6 +482,7 @@ def main(): email=dict(type='str'), sshkey_name=dict(type='str'), sshkey_file=dict(type='str', no_log=False), + sshkey_expires_at=dict(type='str', no_log=False), group=dict(type='str'), access_level=dict(type='str', default="guest", choices=["developer", "guest", "maintainer", "master", "owner", "reporter"]), confirm=dict(type='bool', default=True), @@ -503,6 +515,7 @@ def main(): user_email = module.params['email'] user_sshkey_name = module.params['sshkey_name'] user_sshkey_file = module.params['sshkey_file'] + user_sshkey_expires_at = module.params['sshkey_expires_at'] group_path = module.params['group'] access_level = module.params['access_level'] confirm = module.params['confirm'] @@ -549,6 +562,7 @@ def main(): "email": user_email, "sshkey_name": user_sshkey_name, "sshkey_file": user_sshkey_file, + "sshkey_expires_at": user_sshkey_expires_at, "group_path": group_path, "access_level": access_level, "confirm": confirm, diff --git a/tests/integration/targets/gitlab_user/defaults/main.yml b/tests/integration/targets/gitlab_user/defaults/main.yml index a6755cf412..bbe016b0a8 100644 --- a/tests/integration/targets/gitlab_user/defaults/main.yml +++ b/tests/integration/targets/gitlab_user/defaults/main.yml @@ -1,3 +1,6 @@ gitlab_user: ansible_test_user gitlab_user_pass: Secr3tPassw00rd gitlab_user_email: root@localhost +gitlab_sshkey_name: ansibletest +gitlab_sshkey_file: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDI8GIMlrirf+zsvBpxnF0daykP6YEJ5wytZXhDGD2dZXg9Tln0KUSDgreT3FDgoabjlOmG1L/nhu6ML76WCsmc/wnVMlXlDlQpVJSQ2PCxGNs9WRW7Y/Pk6t9KtV/VSYr0LaPgLEU8VkffSUBJezbKa1cssjb4CmRRqcePRNYpgCXdK05TEgFvmXl9qIM8Domf1ak1PlbyMmi/MytzHmnVFzxgUKv5c0Mr+vguCi131gPdh3QSf5AHPLEoO9LcMfu2IO1zvl61wYfsJ0Wn2Fncw+tJQfUin0ffTFgUIsGqki04/YjXyWynjSwQf5Jym4BYM0i2zlDUyRxs4/Tfp4yvJFik42ambzjLK6poq+iCpQReeYih9WZUaZwUQe7zYWhTOuoV7ydsk8+kDRMPidF9K5zWkQnglGrOzdbTqnhxNpwHCg2eSRJ49kPYLOH76g8P7IQvl+zluG0o8Nndir1WcYil4D4CCBskM8WbmrElZH1CRyP/NQMNIf4hFMItTjk= ansible@ansible +gitlab_sshkey_expires_at: 2030-01-01T00:00:00.000Z diff --git a/tests/integration/targets/gitlab_user/tasks/main.yml b/tests/integration/targets/gitlab_user/tasks/main.yml index 6cbcd14c34..dddf7aaea8 100644 --- a/tests/integration/targets/gitlab_user/tasks/main.yml +++ b/tests/integration/targets/gitlab_user/tasks/main.yml @@ -56,7 +56,7 @@ - gitlab_user_state_again.user.is_admin == False -- name: Update User Test => Make User Admin +- name: Update User Test => Make User Admin gitlab_user: api_url: "{{ gitlab_host }}" email: "{{ gitlab_user_email }}" @@ -189,8 +189,8 @@ api_url: "{{ gitlab_host }}" validate_certs: False - # note: the only way to check if a password really is what it is expected - # to be is to use it for login, so we use it here instead of the + # note: the only way to check if a password really is what it is expected + # to be is to use it for login, so we use it here instead of the # default token assuming that a user can always change its own password api_username: "{{ gitlab_user }}" api_password: "{{ gitlab_user_pass }}" @@ -205,8 +205,8 @@ - name: Check PW setting return state assert: that: - # note: there is no way to determine if a password has changed or - # not, so it can only be always yellow or always green, we + # note: there is no way to determine if a password has changed or + # not, so it can only be always yellow or always green, we # decided for always green for now - gitlab_user_state is not changed @@ -248,3 +248,5 @@ assert: that: - gitlab_user_state is not changed + +- include_tasks: sshkey.yml diff --git a/tests/integration/targets/gitlab_user/tasks/sshkey.yml b/tests/integration/targets/gitlab_user/tasks/sshkey.yml new file mode 100644 index 0000000000..2d2067e74b --- /dev/null +++ b/tests/integration/targets/gitlab_user/tasks/sshkey.yml @@ -0,0 +1,134 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- name: Create gitlab user with sshkey credentials + gitlab_user: + api_url: "{{ gitlab_host }}" + api_token: "{{ gitlab_login_token }}" + email: "{{ gitlab_user_email }}" + name: "{{ gitlab_user }}" + username: "{{ gitlab_user }}" + password: "{{ gitlab_user_pass }}" + validate_certs: false + sshkey_name: "{{ gitlab_sshkey_name }}" + sshkey_file: "{{ gitlab_sshkey_file }}" + state: present + register: gitlab_user_sshkey + +- name: Check user has been created correctly + assert: + that: + - gitlab_user_sshkey is changed + +- name: Create gitlab user again + gitlab_user: + api_url: "{{ gitlab_host }}" + api_token: "{{ gitlab_login_token }}" + email: "{{ gitlab_user_email }}" + name: "{{ gitlab_user }}" + username: "{{ gitlab_user }}" + password: "{{ gitlab_user_pass }}" + validate_certs: false + sshkey_name: "{{ gitlab_sshkey_name }}" + sshkey_file: "{{ gitlab_sshkey_file }}" + state: present + register: gitlab_user_sshkey_again + +- name: Check state is not changed + assert: + that: + - gitlab_user_sshkey_again is not changed + +- name: Add expires_at to an already created gitlab user with ssh key + gitlab_user: + api_url: "{{ gitlab_host }}" + api_token: "{{ gitlab_login_token }}" + email: "{{ gitlab_user_email }}" + name: "{{ gitlab_user }}" + username: "{{ gitlab_user }}" + password: "{{ gitlab_user_pass }}" + validate_certs: false + sshkey_name: "{{ gitlab_sshkey_name }}" + sshkey_file: "{{ gitlab_sshkey_file }}" + sshkey_expires_at: "{{ gitlab_sshkey_expires_at }}" + state: present + register: gitlab_user_created_user_sshkey_expires_at + +- name: Check expires_at will not be added to a present ssh key + assert: + that: + - gitlab_user_created_user_sshkey_expires_at is not changed + +- name: Remove created gitlab user + gitlab_user: + api_url: "{{ gitlab_host }}" + api_token: "{{ gitlab_login_token }}" + email: "{{ gitlab_user_email }}" + name: "{{ gitlab_user }}" + username: "{{ gitlab_user }}" + validate_certs: false + state: absent + register: gitlab_user_sshkey_remove + +- name: Check user has been removed correctly + assert: + that: + - gitlab_user_sshkey_remove is changed + +- name: Create gitlab user with sshkey and expires_at + gitlab_user: + api_url: "{{ gitlab_host }}" + api_token: "{{ gitlab_login_token }}" + email: "{{ gitlab_user_email }}" + name: "{{ gitlab_user }}" + username: "{{ gitlab_user }}" + password: "{{ gitlab_user_pass }}" + validate_certs: false + sshkey_name: "{{ gitlab_sshkey_name }}" + sshkey_file: "{{ gitlab_sshkey_file }}" + sshkey_expires_at: "{{ gitlab_sshkey_expires_at }}" + state: present + register: gitlab_user_sshkey_expires_at + +- name: Check user has been created correctly + assert: + that: + - gitlab_user_sshkey_expires_at is changed + +- name: Create gitlab user with sshkey and expires_at again + gitlab_user: + api_url: "{{ gitlab_host }}" + api_token: "{{ gitlab_login_token }}" + email: "{{ gitlab_user_email }}" + name: "{{ gitlab_user }}" + username: "{{ gitlab_user }}" + password: "{{ gitlab_user_pass }}" + validate_certs: false + sshkey_name: "{{ gitlab_sshkey_name }}" + sshkey_file: "{{ gitlab_sshkey_file }}" + sshkey_expires_at: "{{ gitlab_sshkey_expires_at }}" + state: present + register: gitlab_user_sshkey_expires_at_again + +- name: Check state is not changed + assert: + that: + - gitlab_user_sshkey_expires_at_again is not changed + +- name: Remove created gitlab user + gitlab_user: + api_url: "{{ gitlab_host }}" + api_token: "{{ gitlab_login_token }}" + email: "{{ gitlab_user_email }}" + name: "{{ gitlab_user }}" + username: "{{ gitlab_user }}" + validate_certs: false + state: absent + register: gitlab_user_sshkey_expires_at_remove + +- name: Check user has been removed correctly + assert: + that: + - gitlab_user_sshkey_expires_at_remove is changed diff --git a/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_user.py b/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_user.py index 4a47654a8c..5722854e17 100644 --- a/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_user.py +++ b/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_user.py @@ -144,7 +144,8 @@ class TestGitlabUser(GitlabModuleTestCase): 'name': "Public key", 'file': "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJe" "jgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4" - "soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0="}) + "soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=", + 'expires_at': ""}) self.assertEqual(rvalue, False) rvalue = self.moduleUtil.addSshKeyToUser(user, { @@ -153,7 +154,8 @@ class TestGitlabUser(GitlabModuleTestCase): "dRuSuA5zszUJzYPPUSRAX3BCgTqLqYx//UuVncK7YqLVSbbwjKR2Ez5lISgCnVfLVEXzwhv+" "xawxKWmI7hJ5S0tOv6MJ+IxyTa4xcKwJTwB86z22n9fVOQeJTR2dSOH1WJrf0PvRk+KVNY2j" "TiGHTi9AIjLnyD/jWRpOgtdfkLRc8EzAWrWlgNmH2WOKBw6za0az6XoG75obUdFVdW3qcD0x" - "c809OHLi7FDf+E7U4wiZJCFuUizMeXyuK/SkaE1aee4Qp5R4dxTR4TP9M1XAYkf+kF0W9srZ+mhF069XD/zhUPJsvwEF"}) + "c809OHLi7FDf+E7U4wiZJCFuUizMeXyuK/SkaE1aee4Qp5R4dxTR4TP9M1XAYkf+kF0W9srZ+mhF069XD/zhUPJsvwEF", + 'expires_at': "2027-01-01"}) self.assertEqual(rvalue, True) @with_httmock(resp_get_group) From e2dfd42dd49115dfb4b07d484c98dfd7b300da49 Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Fri, 14 May 2021 13:03:30 +0430 Subject: [PATCH 0284/3093] proxmox_nic: set mtu on interface even if it's not virtio (#2505) * Set mtu on interface whatsoever * add changelog fragment * Revert "add changelog fragment" This reverts commit 5f2f1e7febd848b1fd095635a85bf5215fbcd17d. --- plugins/modules/cloud/misc/proxmox_nic.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/plugins/modules/cloud/misc/proxmox_nic.py b/plugins/modules/cloud/misc/proxmox_nic.py index a9c9f14ddc..23be9473eb 100644 --- a/plugins/modules/cloud/misc/proxmox_nic.py +++ b/plugins/modules/cloud/misc/proxmox_nic.py @@ -211,9 +211,8 @@ def update_nic(module, proxmox, vmid, interface, model, **kwargs): config_provided += ',link_down=1' if kwargs['mtu']: - if model == 'virtio': - config_provided += ",mtu={0}".format(kwargs['mtu']) - else: + config_provided += ",mtu={0}".format(kwargs['mtu']) + if model != 'virtio': module.warn( 'Ignoring MTU for nic {0} on VM with vmid {1}, ' 'model should be set to \'virtio\': '.format(interface, vmid)) From 5d0a7f40f2dd87e60b59f4ca7c3390fcc168d9ff Mon Sep 17 00:00:00 2001 From: Abhijeet Kasurde Date: Fri, 14 May 2021 14:25:27 +0530 Subject: [PATCH 0285/3093] random_pet: Random pet name generator (#2479) A lookup plugin to generate random pet names based upon criteria. Signed-off-by: Abhijeet Kasurde --- plugins/lookup/random_pet.py | 99 +++++++++++++++++++ .../targets/lookup_random_pet/aliases | 3 + .../lookup_random_pet/dependencies.yml | 6 ++ .../targets/lookup_random_pet/runme.sh | 9 ++ .../targets/lookup_random_pet/test.yml | 25 +++++ 5 files changed, 142 insertions(+) create mode 100644 plugins/lookup/random_pet.py create mode 100644 tests/integration/targets/lookup_random_pet/aliases create mode 100644 tests/integration/targets/lookup_random_pet/dependencies.yml create mode 100755 tests/integration/targets/lookup_random_pet/runme.sh create mode 100644 tests/integration/targets/lookup_random_pet/test.yml diff --git a/plugins/lookup/random_pet.py b/plugins/lookup/random_pet.py new file mode 100644 index 0000000000..6caf178e4b --- /dev/null +++ b/plugins/lookup/random_pet.py @@ -0,0 +1,99 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2021, Abhijeet Kasurde +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = r''' + name: random_pet + author: + - Abhijeet Kasurde (@Akasurde) + short_description: Generates random pet names + version_added: '3.1.0' + requirements: + - petname U(https://github.com/dustinkirkland/python-petname) + description: + - Generates random pet names that can be used as unique identifiers for the resources. + options: + words: + description: + - The number of words in the pet name. + default: 2 + type: int + length: + description: + - The maximal length of every component of the pet name. + - Values below 3 will be set to 3 by petname. + default: 6 + type: int + prefix: + description: A string to prefix with the name. + type: str + separator: + description: The character to separate words in the pet name. + default: "-" + type: str +''' + +EXAMPLES = r''' +- name: Generate pet name + ansible.builtin.debug: + var: lookup('community.general.random_pet') + # Example result: 'loving-raptor' + +- name: Generate pet name with 3 words + ansible.builtin.debug: + var: lookup('community.general.random_pet', words=3) + # Example result: 'fully-fresh-macaw' + +- name: Generate pet name with separator + ansible.builtin.debug: + var: lookup('community.general.random_pet', separator="_") + # Example result: 'causal_snipe' + +- name: Generate pet name with length + ansible.builtin.debug: + var: lookup('community.general.random_pet', length=7) + # Example result: 'natural-peacock' +''' + +RETURN = r''' + _raw: + description: A one-element list containing a random pet name + type: list + elements: str +''' + +try: + import petname + + HAS_PETNAME = True +except ImportError: + HAS_PETNAME = False + +from ansible.errors import AnsibleError +from ansible.plugins.lookup import LookupBase + + +class LookupModule(LookupBase): + + def run(self, terms, variables=None, **kwargs): + + if not HAS_PETNAME: + raise AnsibleError('Python petname library is required. ' + 'Please install using "pip install petname"') + + self.set_options(var_options=variables, direct=kwargs) + words = self.get_option('words') + length = self.get_option('length') + prefix = self.get_option('prefix') + separator = self.get_option('separator') + + values = petname.Generate(words=words, separator=separator, letters=length) + if prefix: + values = "%s%s%s" % (prefix, separator, values) + + return [values] diff --git a/tests/integration/targets/lookup_random_pet/aliases b/tests/integration/targets/lookup_random_pet/aliases new file mode 100644 index 0000000000..bc987654d9 --- /dev/null +++ b/tests/integration/targets/lookup_random_pet/aliases @@ -0,0 +1,3 @@ +shippable/posix/group2 +skip/aix +skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller diff --git a/tests/integration/targets/lookup_random_pet/dependencies.yml b/tests/integration/targets/lookup_random_pet/dependencies.yml new file mode 100644 index 0000000000..b6b679d966 --- /dev/null +++ b/tests/integration/targets/lookup_random_pet/dependencies.yml @@ -0,0 +1,6 @@ +--- +- hosts: localhost + tasks: + - name: Install Petname Python package + pip: + name: petname \ No newline at end of file diff --git a/tests/integration/targets/lookup_random_pet/runme.sh b/tests/integration/targets/lookup_random_pet/runme.sh new file mode 100755 index 0000000000..afdff7bb9d --- /dev/null +++ b/tests/integration/targets/lookup_random_pet/runme.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +set -eux + +ANSIBLE_ROLES_PATH=../ \ + ansible-playbook dependencies.yml -v "$@" + +ANSIBLE_ROLES_PATH=../ \ + ansible-playbook test.yml -v "$@" diff --git a/tests/integration/targets/lookup_random_pet/test.yml b/tests/integration/targets/lookup_random_pet/test.yml new file mode 100644 index 0000000000..1ab619d2f4 --- /dev/null +++ b/tests/integration/targets/lookup_random_pet/test.yml @@ -0,0 +1,25 @@ +- hosts: localhost + gather_facts: no + tasks: + - name: Call plugin + set_fact: + result1: "{{ query('community.general.random_pet', words=3) }}" + result2: "{{ query('community.general.random_pet', length=3) }}" + result3: "{{ query('community.general.random_pet', prefix='kubernetes') }}" + result4: "{{ query('community.general.random_pet', separator='_') }}" + result5: "{{ query('community.general.random_pet', words=2, length=6, prefix='kubernetes', separator='_') }}" + + - name: Check results + assert: + that: + - result1 | length == 1 + - result1[0].split('-') | length == 3 + - result2 | length == 1 + - result2[0].split('-')[0] | length <= 3 + - result3 | length == 1 + - result3[0].split('-')[0] == 'kubernetes' + - result4 | length == 1 + - result4[0].split('_') | length == 2 + - result5 | length == 1 + - result5[0].split('_') | length == 3 + - result5[0].split('_')[0] == 'kubernetes' From a385cbb11dd22953451890c9c2157538977972a8 Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Fri, 14 May 2021 16:31:44 -0400 Subject: [PATCH 0286/3093] java_keystore: New ssl_backend option for cryptography (#2485) * Adding cryptography as a backend for OpenSSL operations * Updating unit tests and adding changelog fragment * Allowing private key password option when using unprotected key * Incorporating suggestions from initial review * Centralizing module exit path --- ...85-java_keystore-ssl_backend-parameter.yml | 2 + plugins/modules/system/java_keystore.py | 461 ++++++++++++------ .../targets/java_keystore/tasks/main.yml | 13 + .../targets/java_keystore/tasks/tests.yml | 1 + .../modules/system/test_java_keystore.py | 168 ++++--- 5 files changed, 414 insertions(+), 231 deletions(-) create mode 100644 changelogs/fragments/2485-java_keystore-ssl_backend-parameter.yml diff --git a/changelogs/fragments/2485-java_keystore-ssl_backend-parameter.yml b/changelogs/fragments/2485-java_keystore-ssl_backend-parameter.yml new file mode 100644 index 0000000000..b446476f82 --- /dev/null +++ b/changelogs/fragments/2485-java_keystore-ssl_backend-parameter.yml @@ -0,0 +1,2 @@ +minor_changes: + - java_keystore - added ``ssl_backend`` parameter for using the cryptography library instead of the OpenSSL binary (https://github.com/ansible-collections/community.general/pull/2485). diff --git a/plugins/modules/system/java_keystore.py b/plugins/modules/system/java_keystore.py index ebfe6abdd7..78bcfb6af6 100644 --- a/plugins/modules/system/java_keystore.py +++ b/plugins/modules/system/java_keystore.py @@ -88,9 +88,19 @@ options: description: - Mode the file should be. required: false + ssl_backend: + description: + - Backend for loading private keys and certificates. + type: str + default: openssl + choices: + - openssl + - cryptography + version_added: 3.1.0 requirements: - - openssl in PATH + - openssl in PATH (when I(ssl_backend=openssl)) - keytool in PATH + - cryptography >= 3.0 (when I(ssl_backend=cryptography)) author: - Guillaume Grossetie (@Mogztter) - quidame (@quidame) @@ -164,55 +174,283 @@ import os import re import tempfile -from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import PY2 +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text + +try: + from cryptography.hazmat.primitives.serialization.pkcs12 import serialize_key_and_certificates + from cryptography.hazmat.primitives.serialization import ( + BestAvailableEncryption, + NoEncryption, + load_pem_private_key, + load_der_private_key, + ) + from cryptography.x509 import ( + load_pem_x509_certificate, + load_der_x509_certificate, + ) + from cryptography.hazmat.primitives import hashes + from cryptography.exceptions import UnsupportedAlgorithm + from cryptography.hazmat.backends.openssl import backend + HAS_CRYPTOGRAPHY_PKCS12 = True +except ImportError: + HAS_CRYPTOGRAPHY_PKCS12 = False -def read_certificate_fingerprint(module, openssl_bin, certificate_path): - current_certificate_fingerprint_cmd = [openssl_bin, "x509", "-noout", "-in", certificate_path, "-fingerprint", "-sha256"] - (rc, current_certificate_fingerprint_out, current_certificate_fingerprint_err) = run_commands(module, current_certificate_fingerprint_cmd) - if rc != 0: - return module.fail_json(msg=current_certificate_fingerprint_out, - err=current_certificate_fingerprint_err, - cmd=current_certificate_fingerprint_cmd, - rc=rc) +class JavaKeystore: + def __init__(self, module): + self.module = module - current_certificate_match = re.search(r"=([\w:]+)", current_certificate_fingerprint_out) - if not current_certificate_match: - return module.fail_json(msg="Unable to find the current certificate fingerprint in %s" % current_certificate_fingerprint_out, - cmd=current_certificate_fingerprint_cmd, - rc=rc) + self.keytool_bin = module.get_bin_path('keytool', True) - return current_certificate_match.group(1) - - -def read_stored_certificate_fingerprint(module, keytool_bin, alias, keystore_path, keystore_password): - stored_certificate_fingerprint_cmd = [keytool_bin, "-list", "-alias", alias, "-keystore", keystore_path, "-storepass:env", "STOREPASS", "-v"] - (rc, stored_certificate_fingerprint_out, stored_certificate_fingerprint_err) = run_commands( - module, stored_certificate_fingerprint_cmd, environ_update=dict(STOREPASS=keystore_password)) - if rc != 0: - if "keytool error: java.lang.Exception: Alias <%s> does not exist" % alias in stored_certificate_fingerprint_out: - return "alias mismatch" - if re.match(r'keytool error: java\.io\.IOException: [Kk]eystore( was tampered with, or)? password was incorrect', - stored_certificate_fingerprint_out): - return "password mismatch" - return module.fail_json(msg=stored_certificate_fingerprint_out, - err=stored_certificate_fingerprint_err, - cmd=stored_certificate_fingerprint_cmd, - rc=rc) - - stored_certificate_match = re.search(r"SHA256: ([\w:]+)", stored_certificate_fingerprint_out) - if not stored_certificate_match: - return module.fail_json(msg="Unable to find the stored certificate fingerprint in %s" % stored_certificate_fingerprint_out, - cmd=stored_certificate_fingerprint_cmd, - rc=rc) - - return stored_certificate_match.group(1) - - -def run_commands(module, cmd, data=None, environ_update=None, check_rc=False): - return module.run_command(cmd, check_rc=check_rc, data=data, environ_update=environ_update) + self.certificate = module.params['certificate'] + self.keypass = module.params['private_key_passphrase'] + self.keystore_path = module.params['dest'] + self.name = module.params['name'] + self.password = module.params['password'] + self.private_key = module.params['private_key'] + self.ssl_backend = module.params['ssl_backend'] + + if self.ssl_backend == 'openssl': + self.openssl_bin = module.get_bin_path('openssl', True) + else: + if not HAS_CRYPTOGRAPHY_PKCS12: + self.module.fail_json(msg=missing_required_lib('cryptography >= 3.0')) + + if module.params['certificate_path'] is None: + self.certificate_path = create_file(self.certificate) + self.module.add_cleanup_file(self.certificate_path) + else: + self.certificate_path = module.params['certificate_path'] + + if module.params['private_key_path'] is None: + self.private_key_path = create_file(self.private_key) + self.module.add_cleanup_file(self.private_key_path) + else: + self.private_key_path = module.params['private_key_path'] + + def update_permissions(self): + try: + file_args = self.module.load_file_common_arguments(self.module.params, path=self.keystore_path) + except TypeError: + # The path argument is only supported in Ansible-base 2.10+. Fall back to + # pre-2.10 behavior for older Ansible versions. + self.module.params['path'] = self.keystore_path + file_args = self.module.load_file_common_arguments(self.module.params) + return self.module.set_fs_attributes_if_different(file_args, False) + + def read_certificate_fingerprint(self, cert_format='PEM'): + if self.ssl_backend == 'cryptography': + if cert_format == 'PEM': + cert_loader = load_pem_x509_certificate + else: + cert_loader = load_der_x509_certificate + + try: + with open(self.certificate_path, 'rb') as cert_file: + cert = cert_loader( + cert_file.read(), + backend=backend + ) + except (OSError, ValueError) as e: + self.module.fail_json(msg="Unable to read the provided certificate: %s" % to_native(e)) + + fp = hex_decode(cert.fingerprint(hashes.SHA256())).upper() + fingerprint = ':'.join([fp[i:i + 2] for i in range(0, len(fp), 2)]) + else: + current_certificate_fingerprint_cmd = [ + self.openssl_bin, "x509", "-noout", "-in", self.certificate_path, "-fingerprint", "-sha256" + ] + (rc, current_certificate_fingerprint_out, current_certificate_fingerprint_err) = self.module.run_command( + current_certificate_fingerprint_cmd, + environ_update=None, + check_rc=False + ) + if rc != 0: + return self.module.fail_json( + msg=current_certificate_fingerprint_out, + err=current_certificate_fingerprint_err, + cmd=current_certificate_fingerprint_cmd, + rc=rc + ) + + current_certificate_match = re.search(r"=([\w:]+)", current_certificate_fingerprint_out) + if not current_certificate_match: + return self.module.fail_json( + msg="Unable to find the current certificate fingerprint in %s" % ( + current_certificate_fingerprint_out + ), + cmd=current_certificate_fingerprint_cmd, + rc=rc + ) + + fingerprint = current_certificate_match.group(1) + return fingerprint + + def read_stored_certificate_fingerprint(self): + stored_certificate_fingerprint_cmd = [ + self.keytool_bin, "-list", "-alias", self.name, "-keystore", + self.keystore_path, "-storepass:env", "STOREPASS", "-v" + ] + (rc, stored_certificate_fingerprint_out, stored_certificate_fingerprint_err) = self.module.run_command( + stored_certificate_fingerprint_cmd, environ_update=dict(STOREPASS=self.password), check_rc=False) + if rc != 0: + if "keytool error: java.lang.Exception: Alias <%s> does not exist" % self.name \ + in stored_certificate_fingerprint_out: + return "alias mismatch" + if re.match( + r'keytool error: java\.io\.IOException: ' + + '[Kk]eystore( was tampered with, or)? password was incorrect', + stored_certificate_fingerprint_out + ): + return "password mismatch" + return self.module.fail_json( + msg=stored_certificate_fingerprint_out, + err=stored_certificate_fingerprint_err, + cmd=stored_certificate_fingerprint_cmd, + rc=rc + ) + + stored_certificate_match = re.search(r"SHA256: ([\w:]+)", stored_certificate_fingerprint_out) + if not stored_certificate_match: + return self.module.fail_json( + msg="Unable to find the stored certificate fingerprint in %s" % stored_certificate_fingerprint_out, + cmd=stored_certificate_fingerprint_cmd, + rc=rc + ) + + return stored_certificate_match.group(1) + + def cert_changed(self): + current_certificate_fingerprint = self.read_certificate_fingerprint() + stored_certificate_fingerprint = self.read_stored_certificate_fingerprint() + return current_certificate_fingerprint != stored_certificate_fingerprint + + def cryptography_create_pkcs12_bundle(self, keystore_p12_path, key_format='PEM', cert_format='PEM'): + if key_format == 'PEM': + key_loader = load_pem_private_key + else: + key_loader = load_der_private_key + + if cert_format == 'PEM': + cert_loader = load_pem_x509_certificate + else: + cert_loader = load_der_x509_certificate + + try: + with open(self.private_key_path, 'rb') as key_file: + private_key = key_loader( + key_file.read(), + password=to_bytes(self.keypass), + backend=backend + ) + except TypeError: + # Re-attempt with no password to match existing behavior + try: + with open(self.private_key_path, 'rb') as key_file: + private_key = key_loader( + key_file.read(), + password=None, + backend=backend + ) + except (OSError, TypeError, ValueError, UnsupportedAlgorithm) as e: + self.module.fail_json( + msg="The following error occurred while loading the provided private_key: %s" % to_native(e) + ) + except (OSError, ValueError, UnsupportedAlgorithm) as e: + self.module.fail_json( + msg="The following error occurred while loading the provided private_key: %s" % to_native(e) + ) + try: + with open(self.certificate_path, 'rb') as cert_file: + cert = cert_loader( + cert_file.read(), + backend=backend + ) + except (OSError, ValueError, UnsupportedAlgorithm) as e: + self.module.fail_json( + msg="The following error occurred while loading the provided certificate: %s" % to_native(e) + ) + + if self.password: + encryption = BestAvailableEncryption(to_bytes(self.password)) + else: + encryption = NoEncryption() + + pkcs12_bundle = serialize_key_and_certificates( + name=to_bytes(self.name), + key=private_key, + cert=cert, + cas=None, + encryption_algorithm=encryption + ) + + with open(keystore_p12_path, 'wb') as p12_file: + p12_file.write(pkcs12_bundle) + + def openssl_create_pkcs12_bundle(self, keystore_p12_path): + export_p12_cmd = [self.openssl_bin, "pkcs12", "-export", "-name", self.name, "-in", self.certificate_path, + "-inkey", self.private_key_path, "-out", keystore_p12_path, "-passout", "stdin"] + + # when keypass is provided, add -passin + cmd_stdin = "" + if self.keypass: + export_p12_cmd.append("-passin") + export_p12_cmd.append("stdin") + cmd_stdin = "%s\n" % self.keypass + cmd_stdin += "%s\n%s" % (self.password, self.password) + + (rc, export_p12_out, dummy) = self.module.run_command( + export_p12_cmd, data=cmd_stdin, environ_update=None, check_rc=False + ) + + if rc != 0: + self.module.fail_json(msg=export_p12_out, cmd=export_p12_cmd, rc=rc) + + def create(self): + if self.module.check_mode: + return {'changed': True} + + if os.path.exists(self.keystore_path): + os.remove(self.keystore_path) + + keystore_p12_path = create_path() + self.module.add_cleanup_file(keystore_p12_path) + + if self.ssl_backend == 'cryptography': + self.cryptography_create_pkcs12_bundle(keystore_p12_path) + else: + self.openssl_create_pkcs12_bundle(keystore_p12_path) + + import_keystore_cmd = [self.keytool_bin, "-importkeystore", + "-destkeystore", self.keystore_path, + "-srckeystore", keystore_p12_path, + "-srcstoretype", "pkcs12", + "-alias", self.name, + "-deststorepass:env", "STOREPASS", + "-srcstorepass:env", "STOREPASS", + "-noprompt"] + + (rc, import_keystore_out, dummy) = self.module.run_command( + import_keystore_cmd, data=None, environ_update=dict(STOREPASS=self.password), check_rc=False + ) + if rc != 0: + return self.module.fail_json(msg=import_keystore_out, cmd=import_keystore_cmd, rc=rc) + + self.update_permissions() + return { + 'changed': True, + 'msg': import_keystore_out, + 'cmd': import_keystore_cmd, + 'rc': rc + } + + def exists(self): + return os.path.exists(self.keystore_path) +# Utility functions def create_path(): dummy, tmpfile = tempfile.mkstemp() os.remove(tmpfile) @@ -226,123 +464,11 @@ def create_file(content): return tmpfile -def create_tmp_certificate(module): - return create_file(module.params['certificate']) - - -def create_tmp_private_key(module): - return create_file(module.params['private_key']) - - -def cert_changed(module, openssl_bin, keytool_bin, keystore_path, keystore_pass, alias): - certificate_path = module.params['certificate_path'] - if certificate_path is None: - certificate_path = create_tmp_certificate(module) - try: - current_certificate_fingerprint = read_certificate_fingerprint(module, openssl_bin, certificate_path) - stored_certificate_fingerprint = read_stored_certificate_fingerprint(module, keytool_bin, alias, keystore_path, keystore_pass) - return current_certificate_fingerprint != stored_certificate_fingerprint - finally: - if module.params['certificate_path'] is None: - os.remove(certificate_path) - - -def create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password, keypass): - if module.check_mode: - return module.exit_json(changed=True) - - certificate_path = module.params['certificate_path'] - if certificate_path is None: - certificate_path = create_tmp_certificate(module) - - private_key_path = module.params['private_key_path'] - if private_key_path is None: - private_key_path = create_tmp_private_key(module) - - keystore_p12_path = create_path() - - try: - if os.path.exists(keystore_path): - os.remove(keystore_path) - - export_p12_cmd = [openssl_bin, "pkcs12", "-export", "-name", name, "-in", certificate_path, - "-inkey", private_key_path, "-out", keystore_p12_path, "-passout", "stdin"] - - # when keypass is provided, add -passin - cmd_stdin = "" - if keypass: - export_p12_cmd.append("-passin") - export_p12_cmd.append("stdin") - cmd_stdin = "%s\n" % keypass - cmd_stdin += "%s\n%s" % (password, password) - - (rc, export_p12_out, dummy) = run_commands(module, export_p12_cmd, data=cmd_stdin) - if rc != 0: - return module.fail_json(msg=export_p12_out, - cmd=export_p12_cmd, - rc=rc) - - import_keystore_cmd = [keytool_bin, "-importkeystore", - "-destkeystore", keystore_path, - "-srckeystore", keystore_p12_path, - "-srcstoretype", "pkcs12", - "-alias", name, - "-deststorepass:env", "STOREPASS", - "-srcstorepass:env", "STOREPASS", - "-noprompt"] - - (rc, import_keystore_out, dummy) = run_commands(module, import_keystore_cmd, data=None, - environ_update=dict(STOREPASS=password)) - if rc != 0: - return module.fail_json(msg=import_keystore_out, - cmd=import_keystore_cmd, - rc=rc) - - update_jks_perm(module, keystore_path) - return module.exit_json(changed=True, - msg=import_keystore_out, - cmd=import_keystore_cmd, - rc=rc) - finally: - if module.params['certificate_path'] is None: - os.remove(certificate_path) - if module.params['private_key_path'] is None: - os.remove(private_key_path) - os.remove(keystore_p12_path) - - -def update_jks_perm(module, keystore_path): - try: - file_args = module.load_file_common_arguments(module.params, path=keystore_path) - except TypeError: - # The path argument is only supported in Ansible-base 2.10+. Fall back to - # pre-2.10 behavior for older Ansible versions. - module.params['path'] = keystore_path - file_args = module.load_file_common_arguments(module.params) - module.set_fs_attributes_if_different(file_args, False) - - -def process_jks(module): - name = module.params['name'] - password = module.params['password'] - keypass = module.params['private_key_passphrase'] - keystore_path = module.params['dest'] - force = module.params['force'] - openssl_bin = module.get_bin_path('openssl', True) - keytool_bin = module.get_bin_path('keytool', True) - - if os.path.exists(keystore_path): - if force: - create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password, keypass) - else: - if cert_changed(module, openssl_bin, keytool_bin, keystore_path, password, name): - create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password, keypass) - else: - if not module.check_mode: - update_jks_perm(module, keystore_path) - module.exit_json(changed=False) +def hex_decode(s): + if PY2: + return s.decode('hex') else: - create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password, keypass) + return s.hex() class ArgumentSpec(object): @@ -358,6 +484,7 @@ class ArgumentSpec(object): private_key_path=dict(type='path', no_log=False), private_key_passphrase=dict(type='str', no_log=True), password=dict(type='str', required=True, no_log=True), + ssl_backend=dict(type='str', default='openssl', choices=['openssl', 'cryptography']), force=dict(type='bool', default=False), ) choose_between = ( @@ -379,7 +506,19 @@ def main(): add_file_common_args=spec.add_file_common_args, ) module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') - process_jks(module) + + result = dict() + jks = JavaKeystore(module) + + if jks.exists(): + if module.params['force'] or jks.cert_changed(): + result = jks.create() + else: + result['changed'] = jks.update_permissions() + else: + result = jks.create() + + module.exit_json(**result) if __name__ == '__main__': diff --git a/tests/integration/targets/java_keystore/tasks/main.yml b/tests/integration/targets/java_keystore/tasks/main.yml index 358222aea8..b5f1f01624 100644 --- a/tests/integration/targets/java_keystore/tasks/main.yml +++ b/tests/integration/targets/java_keystore/tasks/main.yml @@ -9,12 +9,22 @@ - name: Include tasks to create ssl materials on the controller include_tasks: prepare.yml +- set_fact: + ssl_backends: ['openssl'] + +- set_fact: + ssl_backends: "{{ ssl_backends + ['cryptography'] }}" + when: cryptography_version.stdout is version('3.0', '>=') + - when: has_java_keytool block: - name: Include tasks to play with 'certificate' and 'private_key' contents include_tasks: tests.yml vars: remote_cert: false + loop: "{{ ssl_backends }}" + loop_control: + loop_var: ssl_backend - name: Include tasks to create ssl materials on the remote host include_tasks: prepare.yml @@ -23,3 +33,6 @@ include_tasks: tests.yml vars: remote_cert: true + loop: "{{ ssl_backends }}" + loop_control: + loop_var: ssl_backend diff --git a/tests/integration/targets/java_keystore/tasks/tests.yml b/tests/integration/targets/java_keystore/tasks/tests.yml index e0de1c6836..b892dd1d29 100644 --- a/tests/integration/targets/java_keystore/tasks/tests.yml +++ b/tests/integration/targets/java_keystore/tasks/tests.yml @@ -23,6 +23,7 @@ private_key_path: "{{ omit if not remote_cert else output_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.key' }}" private_key_passphrase: "{{ item.passphrase | d(omit) }}" password: changeit + ssl_backend: "{{ ssl_backend }}" loop: "{{ java_keystore_certs }}" check_mode: yes register: result_check diff --git a/tests/unit/plugins/modules/system/test_java_keystore.py b/tests/unit/plugins/modules/system/test_java_keystore.py index ec14b3734d..5e99074c95 100644 --- a/tests/unit/plugins/modules/system/test_java_keystore.py +++ b/tests/unit/plugins/modules/system/test_java_keystore.py @@ -14,7 +14,7 @@ from ansible_collections.community.general.tests.unit.plugins.modules.utils impo from ansible_collections.community.general.tests.unit.compat.mock import patch from ansible_collections.community.general.tests.unit.compat.mock import Mock from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.modules.system.java_keystore import create_jks, cert_changed, ArgumentSpec +from ansible_collections.community.general.plugins.modules.system.java_keystore import JavaKeystore, ArgumentSpec class TestCreateJavaKeystore(ModuleTestCase): @@ -28,14 +28,16 @@ class TestCreateJavaKeystore(ModuleTestCase): self.spec = ArgumentSpec() self.mock_create_file = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.create_file') self.mock_create_path = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.create_path') - self.mock_run_commands = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.run_commands') + self.mock_run_command = patch('ansible.module_utils.basic.AnsibleModule.run_command') + self.mock_get_bin_path = patch('ansible.module_utils.basic.AnsibleModule.get_bin_path') self.mock_os_path_exists = patch('os.path.exists', side_effect=lambda path: True if path == '/path/to/keystore.jks' else orig_exists(path)) self.mock_selinux_context = patch('ansible.module_utils.basic.AnsibleModule.selinux_context', side_effect=lambda path: ['unconfined_u', 'object_r', 'user_home_t', 's0']) self.mock_is_special_selinux_path = patch('ansible.module_utils.basic.AnsibleModule.is_special_selinux_path', side_effect=lambda path: (False, None)) - self.run_commands = self.mock_run_commands.start() + self.run_command = self.mock_run_command.start() + self.get_bin_path = self.mock_get_bin_path.start() self.create_file = self.mock_create_file.start() self.create_path = self.mock_create_path.start() self.selinux_context = self.mock_selinux_context.start() @@ -47,7 +49,8 @@ class TestCreateJavaKeystore(ModuleTestCase): super(TestCreateJavaKeystore, self).tearDown() self.mock_create_file.stop() self.mock_create_path.stop() - self.mock_run_commands.stop() + self.mock_run_command.stop() + self.mock_get_bin_path.stop() self.mock_selinux_context.stop() self.mock_is_special_selinux_path.stop() self.mock_os_path_exists.stop() @@ -57,7 +60,38 @@ class TestCreateJavaKeystore(ModuleTestCase): certificate='cert-foo', private_key='private-foo', dest='/path/to/keystore.jks', - name='foo', + name='test', + password='changeit' + )) + + module = AnsibleModule( + argument_spec=self.spec.argument_spec, + supports_check_mode=self.spec.supports_check_mode + ) + + with patch('os.remove', return_value=True): + self.create_path.side_effect = ['/tmp/tmpgrzm2ah7'] + self.create_file.side_effect = ['/tmp/etacifitrec', '/tmp/yek_etavirp', ''] + self.run_command.side_effect = [(0, '', ''), (0, '', '')] + self.get_bin_path.side_effect = ['keytool', 'openssl', ''] + jks = JavaKeystore(module) + assert jks.create() == { + 'changed': True, + 'cmd': ["keytool", "-importkeystore", + "-destkeystore", "/path/to/keystore.jks", + "-srckeystore", "/tmp/tmpgrzm2ah7", "-srcstoretype", "pkcs12", "-alias", "test", + "-deststorepass:env", "STOREPASS", "-srcstorepass:env", "STOREPASS", "-noprompt"], + 'msg': '', + 'rc': 0 + } + + def test_create_jks_keypass_fail_export_pkcs12(self): + set_module_args(dict( + certificate='cert-foo', + private_key='private-foo', + private_key_passphrase='passphrase-foo', + dest='/path/to/keystore.jks', + name='test', password='changeit' )) @@ -67,44 +101,15 @@ class TestCreateJavaKeystore(ModuleTestCase): ) module.exit_json = Mock() - - with patch('os.remove', return_value=True): - self.create_path.side_effect = ['/tmp/tmpgrzm2ah7'] - self.create_file.side_effect = ['/tmp/etacifitrec', '/tmp/yek_etavirp'] - self.run_commands.side_effect = [(0, '', ''), (0, '', '')] - create_jks(module, "test", "openssl", "keytool", "/path/to/keystore.jks", "changeit", "") - module.exit_json.assert_called_once_with( - changed=True, - cmd=["keytool", "-importkeystore", - "-destkeystore", "/path/to/keystore.jks", - "-srckeystore", "/tmp/tmpgrzm2ah7", "-srcstoretype", "pkcs12", "-alias", "test", - "-deststorepass:env", "STOREPASS", "-srcstorepass:env", "STOREPASS", "-noprompt"], - msg='', - rc=0 - ) - - def test_create_jks_keypass_fail_export_pkcs12(self): - set_module_args(dict( - certificate='cert-foo', - private_key='private-foo', - private_key_passphrase='passphrase-foo', - dest='/path/to/keystore.jks', - name='foo', - password='changeit' - )) - - module = AnsibleModule( - argument_spec=self.spec.argument_spec, - supports_check_mode=self.spec.supports_check_mode - ) - module.fail_json = Mock() with patch('os.remove', return_value=True): self.create_path.side_effect = ['/tmp/tmp1cyp12xa'] - self.create_file.side_effect = ['/tmp/tmpvalcrt32', '/tmp/tmpwh4key0c'] - self.run_commands.side_effect = [(1, '', ''), (0, '', '')] - create_jks(module, "test", "openssl", "keytool", "/path/to/keystore.jks", "changeit", "passphrase-foo") + self.create_file.side_effect = ['/tmp/tmpvalcrt32', '/tmp/tmpwh4key0c', ''] + self.run_command.side_effect = [(1, '', ''), (0, '', '')] + self.get_bin_path.side_effect = ['keytool', 'openssl', ''] + jks = JavaKeystore(module) + jks.create() module.fail_json.assert_called_once_with( cmd=["openssl", "pkcs12", "-export", "-name", "test", "-in", "/tmp/tmpvalcrt32", @@ -121,7 +126,7 @@ class TestCreateJavaKeystore(ModuleTestCase): certificate='cert-foo', private_key='private-foo', dest='/path/to/keystore.jks', - name='foo', + name='test', password='changeit' )) @@ -130,13 +135,16 @@ class TestCreateJavaKeystore(ModuleTestCase): supports_check_mode=self.spec.supports_check_mode ) + module.exit_json = Mock() module.fail_json = Mock() with patch('os.remove', return_value=True): self.create_path.side_effect = ['/tmp/tmp1cyp12xa'] - self.create_file.side_effect = ['/tmp/tmpvalcrt32', '/tmp/tmpwh4key0c'] - self.run_commands.side_effect = [(1, '', ''), (0, '', '')] - create_jks(module, "test", "openssl", "keytool", "/path/to/keystore.jks", "changeit", "") + self.create_file.side_effect = ['/tmp/tmpvalcrt32', '/tmp/tmpwh4key0c', ''] + self.run_command.side_effect = [(1, '', ''), (0, '', '')] + self.get_bin_path.side_effect = ['keytool', 'openssl', ''] + jks = JavaKeystore(module) + jks.create() module.fail_json.assert_called_once_with( cmd=["openssl", "pkcs12", "-export", "-name", "test", "-in", "/tmp/tmpvalcrt32", @@ -152,7 +160,7 @@ class TestCreateJavaKeystore(ModuleTestCase): certificate='cert-foo', private_key='private-foo', dest='/path/to/keystore.jks', - name='foo', + name='test', password='changeit' )) @@ -161,13 +169,16 @@ class TestCreateJavaKeystore(ModuleTestCase): supports_check_mode=self.spec.supports_check_mode ) + module.exit_json = Mock() module.fail_json = Mock() with patch('os.remove', return_value=True): self.create_path.side_effect = ['/tmp/tmpgrzm2ah7'] - self.create_file.side_effect = ['/tmp/etacifitrec', '/tmp/yek_etavirp'] - self.run_commands.side_effect = [(0, '', ''), (1, '', '')] - create_jks(module, "test", "openssl", "keytool", "/path/to/keystore.jks", "changeit", "") + self.create_file.side_effect = ['/tmp/etacifitrec', '/tmp/yek_etavirp', ''] + self.run_command.side_effect = [(0, '', ''), (1, '', '')] + self.get_bin_path.side_effect = ['keytool', 'openssl', ''] + jks = JavaKeystore(module) + jks.create() module.fail_json.assert_called_once_with( cmd=["keytool", "-importkeystore", "-destkeystore", "/path/to/keystore.jks", @@ -186,15 +197,18 @@ class TestCertChanged(ModuleTestCase): super(TestCertChanged, self).setUp() self.spec = ArgumentSpec() self.mock_create_file = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.create_file') - self.mock_run_commands = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.run_commands') - self.run_commands = self.mock_run_commands.start() + self.mock_run_command = patch('ansible.module_utils.basic.AnsibleModule.run_command') + self.mock_get_bin_path = patch('ansible.module_utils.basic.AnsibleModule.get_bin_path') + self.run_command = self.mock_run_command.start() self.create_file = self.mock_create_file.start() + self.get_bin_path = self.mock_get_bin_path.start() def tearDown(self): """Teardown.""" super(TestCertChanged, self).tearDown() self.mock_create_file.stop() - self.mock_run_commands.stop() + self.mock_run_command.stop() + self.mock_get_bin_path.stop() def test_cert_unchanged_same_fingerprint(self): set_module_args(dict( @@ -211,9 +225,11 @@ class TestCertChanged(ModuleTestCase): ) with patch('os.remove', return_value=True): - self.create_file.side_effect = ['/tmp/placeholder'] - self.run_commands.side_effect = [(0, 'foo=abcd:1234:efgh', ''), (0, 'SHA256: abcd:1234:efgh', '')] - result = cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo') + self.create_file.side_effect = ['/tmp/placeholder', ''] + self.run_command.side_effect = [(0, 'foo=abcd:1234:efgh', ''), (0, 'SHA256: abcd:1234:efgh', '')] + self.get_bin_path.side_effect = ['keytool', 'openssl', ''] + jks = JavaKeystore(module) + result = jks.cert_changed() self.assertFalse(result, 'Fingerprint is identical') def test_cert_changed_fingerprint_mismatch(self): @@ -231,9 +247,11 @@ class TestCertChanged(ModuleTestCase): ) with patch('os.remove', return_value=True): - self.create_file.side_effect = ['/tmp/placeholder'] - self.run_commands.side_effect = [(0, 'foo=abcd:1234:efgh', ''), (0, 'SHA256: wxyz:9876:stuv', '')] - result = cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo') + self.create_file.side_effect = ['/tmp/placeholder', ''] + self.run_command.side_effect = [(0, 'foo=abcd:1234:efgh', ''), (0, 'SHA256: wxyz:9876:stuv', '')] + self.get_bin_path.side_effect = ['keytool', 'openssl', ''] + jks = JavaKeystore(module) + result = jks.cert_changed() self.assertTrue(result, 'Fingerprint mismatch') def test_cert_changed_fail_alias_does_not_exist(self): @@ -251,10 +269,12 @@ class TestCertChanged(ModuleTestCase): ) with patch('os.remove', return_value=True): - self.create_file.side_effect = ['/tmp/placeholder'] - self.run_commands.side_effect = [(0, 'foo=abcd:1234:efgh', ''), - (1, 'keytool error: java.lang.Exception: Alias does not exist', '')] - result = cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo') + self.create_file.side_effect = ['/tmp/placeholder', ''] + self.run_command.side_effect = [(0, 'foo=abcd:1234:efgh', ''), + (1, 'keytool error: java.lang.Exception: Alias does not exist', '')] + self.get_bin_path.side_effect = ['keytool', 'openssl', ''] + jks = JavaKeystore(module) + result = jks.cert_changed() self.assertTrue(result, 'Alias mismatch detected') def test_cert_changed_password_mismatch(self): @@ -272,10 +292,12 @@ class TestCertChanged(ModuleTestCase): ) with patch('os.remove', return_value=True): - self.create_file.side_effect = ['/tmp/placeholder'] - self.run_commands.side_effect = [(0, 'foo=abcd:1234:efgh', ''), - (1, 'keytool error: java.io.IOException: Keystore password was incorrect', '')] - result = cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo') + self.create_file.side_effect = ['/tmp/placeholder', ''] + self.run_command.side_effect = [(0, 'foo=abcd:1234:efgh', ''), + (1, 'keytool error: java.io.IOException: Keystore password was incorrect', '')] + self.get_bin_path.side_effect = ['keytool', 'openssl', ''] + jks = JavaKeystore(module) + result = jks.cert_changed() self.assertTrue(result, 'Password mismatch detected') def test_cert_changed_fail_read_cert(self): @@ -292,12 +314,15 @@ class TestCertChanged(ModuleTestCase): supports_check_mode=self.spec.supports_check_mode ) + module.exit_json = Mock() module.fail_json = Mock() with patch('os.remove', return_value=True): - self.create_file.side_effect = ['/tmp/tmpdj6bvvme'] - self.run_commands.side_effect = [(1, '', 'Oops'), (0, 'SHA256: wxyz:9876:stuv', '')] - cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo') + self.create_file.side_effect = ['/tmp/tmpdj6bvvme', ''] + self.run_command.side_effect = [(1, '', 'Oops'), (0, 'SHA256: wxyz:9876:stuv', '')] + self.get_bin_path.side_effect = ['keytool', 'openssl', ''] + jks = JavaKeystore(module) + jks.cert_changed() module.fail_json.assert_called_once_with( cmd=["openssl", "x509", "-noout", "-in", "/tmp/tmpdj6bvvme", "-fingerprint", "-sha256"], msg='', @@ -319,12 +344,15 @@ class TestCertChanged(ModuleTestCase): supports_check_mode=self.spec.supports_check_mode ) + module.exit_json = Mock() module.fail_json = Mock(return_value=True) with patch('os.remove', return_value=True): - self.create_file.side_effect = ['/tmp/placeholder'] - self.run_commands.side_effect = [(0, 'foo: wxyz:9876:stuv', ''), (1, '', 'Oops')] - cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo') + self.create_file.side_effect = ['/tmp/placeholder', ''] + self.run_command.side_effect = [(0, 'foo: wxyz:9876:stuv', ''), (1, '', 'Oops')] + self.get_bin_path.side_effect = ['keytool', 'openssl', ''] + jks = JavaKeystore(module) + jks.cert_changed() module.fail_json.assert_called_with( cmd=["keytool", "-list", "-alias", "foo", "-keystore", "/path/to/keystore.jks", "-storepass:env", "STOREPASS", "-v"], msg='', From c8f402806fe1f7d8ee8a0a716c986b1b47860cae Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 16 May 2021 23:24:37 +1200 Subject: [PATCH 0287/3093] Cleanup connections plugins (#2520) * minor refactors * minor refactors in plugins/connection/saltstack.py * minor refactors in plugins/connection/qubes.py * minor refactor in plugins/connection/lxc.py * minor refactors in plugins/connection/chroot.py * minor refactors in plugins/connection/funcd.py * minor refactors in plugins/connection/iocage.py * minor refactors in plugins/connection/jail.py * added changelog fragment --- .../fragments/2520-connection-refactors.yml | 9 ++++++ plugins/connection/chroot.py | 27 ++++++++-------- plugins/connection/funcd.py | 18 ++++++----- plugins/connection/iocage.py | 2 +- plugins/connection/jail.py | 26 +++++++-------- plugins/connection/lxc.py | 10 +++--- plugins/connection/qubes.py | 8 +---- plugins/connection/saltstack.py | 32 ++++++++----------- plugins/connection/zone.py | 27 ++++++++-------- 9 files changed, 79 insertions(+), 80 deletions(-) create mode 100644 changelogs/fragments/2520-connection-refactors.yml diff --git a/changelogs/fragments/2520-connection-refactors.yml b/changelogs/fragments/2520-connection-refactors.yml new file mode 100644 index 0000000000..2e5c8273d7 --- /dev/null +++ b/changelogs/fragments/2520-connection-refactors.yml @@ -0,0 +1,9 @@ +minor_changes: + - chroot connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). + - funcd connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). + - iocage connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). + - jail connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). + - lxc connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). + - qubes connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). + - saltstack connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). + - zone connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520). diff --git a/plugins/connection/chroot.py b/plugins/connection/chroot.py index ffaea2b198..a18506cb80 100644 --- a/plugins/connection/chroot.py +++ b/plugins/connection/chroot.py @@ -62,7 +62,7 @@ display = Display() class Connection(ConnectionBase): - ''' Local chroot based connections ''' + """ Local chroot based connections """ transport = 'community.general.chroot' has_pipelining = True @@ -95,7 +95,7 @@ class Connection(ConnectionBase): raise AnsibleError("%s does not look like a chrootable dir (/bin/sh missing)" % self.chroot) def _connect(self): - ''' connect to the chroot ''' + """ connect to the chroot """ if os.path.isabs(self.get_option('chroot_exe')): self.chroot_cmd = self.get_option('chroot_exe') else: @@ -110,17 +110,17 @@ class Connection(ConnectionBase): self._connected = True def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE): - ''' run a command on the chroot. This is only needed for implementing + """ run a command on the chroot. This is only needed for implementing put_file() get_file() so that we don't have to read the whole file into memory. compared to exec_command() it looses some niceties like being able to return the process's exit code immediately. - ''' + """ executable = self.get_option('executable') local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd] - display.vvv("EXEC %s" % (local_cmd), host=self.chroot) + display.vvv("EXEC %s" % local_cmd, host=self.chroot) local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -128,16 +128,17 @@ class Connection(ConnectionBase): return p def exec_command(self, cmd, in_data=None, sudoable=False): - ''' run a command on the chroot ''' + """ run a command on the chroot """ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) p = self._buffered_exec_command(cmd) stdout, stderr = p.communicate(in_data) - return (p.returncode, stdout, stderr) + return p.returncode, stdout, stderr - def _prefix_login_path(self, remote_path): - ''' Make sure that we put files into a standard path + @staticmethod + def _prefix_login_path(remote_path): + """ Make sure that we put files into a standard path If a path is relative, then we need to choose where to put it. ssh chooses $HOME but we aren't guaranteed that a home dir will @@ -145,13 +146,13 @@ class Connection(ConnectionBase): This also happens to be the former default. Can revisit using $HOME instead if it's a problem - ''' + """ if not remote_path.startswith(os.path.sep): remote_path = os.path.join(os.path.sep, remote_path) return os.path.normpath(remote_path) def put_file(self, in_path, out_path): - ''' transfer a file from local to chroot ''' + """ transfer a file from local to chroot """ super(Connection, self).put_file(in_path, out_path) display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.chroot) @@ -177,7 +178,7 @@ class Connection(ConnectionBase): raise AnsibleError("file or module does not exist at: %s" % in_path) def fetch_file(self, in_path, out_path): - ''' fetch a file from chroot to local ''' + """ fetch a file from chroot to local """ super(Connection, self).fetch_file(in_path, out_path) display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.chroot) @@ -201,6 +202,6 @@ class Connection(ConnectionBase): raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) def close(self): - ''' terminate the connection; nothing to do here ''' + """ terminate the connection; nothing to do here """ super(Connection, self).close() self._connected = False diff --git a/plugins/connection/funcd.py b/plugins/connection/funcd.py index 3aed7145cb..109e251146 100644 --- a/plugins/connection/funcd.py +++ b/plugins/connection/funcd.py @@ -44,7 +44,7 @@ display = Display() class Connection(ConnectionBase): - ''' Func-based connections ''' + """ Func-based connections """ has_pipelining = False @@ -53,6 +53,7 @@ class Connection(ConnectionBase): self.host = host # port is unused, this go on func self.port = port + self.client = None def connect(self, port=None): if not HAVE_FUNC: @@ -62,31 +63,32 @@ class Connection(ConnectionBase): return self def exec_command(self, cmd, become_user=None, sudoable=False, executable='/bin/sh', in_data=None): - ''' run a command on the remote minion ''' + """ run a command on the remote minion """ if in_data: raise AnsibleError("Internal Error: this module does not support optimized module pipelining") # totally ignores privlege escalation - display.vvv("EXEC %s" % (cmd), host=self.host) + display.vvv("EXEC %s" % cmd, host=self.host) p = self.client.command.run(cmd)[self.host] - return (p[0], p[1], p[2]) + return p[0], p[1], p[2] - def _normalize_path(self, path, prefix): + @staticmethod + def _normalize_path(path, prefix): if not path.startswith(os.path.sep): path = os.path.join(os.path.sep, path) normpath = os.path.normpath(path) return os.path.join(prefix, normpath[1:]) def put_file(self, in_path, out_path): - ''' transfer a file from local to remote ''' + """ transfer a file from local to remote """ out_path = self._normalize_path(out_path, '/') display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host) self.client.local.copyfile.send(in_path, out_path) def fetch_file(self, in_path, out_path): - ''' fetch a file from remote to local ''' + """ fetch a file from remote to local """ in_path = self._normalize_path(in_path, '/') display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host) @@ -99,5 +101,5 @@ class Connection(ConnectionBase): shutil.rmtree(tmpdir) def close(self): - ''' terminate the connection; nothing to do here ''' + """ terminate the connection; nothing to do here """ pass diff --git a/plugins/connection/iocage.py b/plugins/connection/iocage.py index 435c789fd2..beb440eae3 100644 --- a/plugins/connection/iocage.py +++ b/plugins/connection/iocage.py @@ -40,7 +40,7 @@ display = Display() class Connection(Jail): - ''' Local iocage based connections ''' + """ Local iocage based connections """ transport = 'community.general.iocage' diff --git a/plugins/connection/jail.py b/plugins/connection/jail.py index 5252e3c4eb..f5d787b62f 100644 --- a/plugins/connection/jail.py +++ b/plugins/connection/jail.py @@ -35,7 +35,6 @@ import os import os.path import subprocess import traceback -import ansible.constants as C from ansible.errors import AnsibleError from ansible.module_utils.six.moves import shlex_quote @@ -47,7 +46,7 @@ display = Display() class Connection(ConnectionBase): - ''' Local BSD Jail based connections ''' + """ Local BSD Jail based connections """ modified_jailname_key = 'conn_jail_name' @@ -90,20 +89,20 @@ class Connection(ConnectionBase): return to_text(stdout, errors='surrogate_or_strict').split() def _connect(self): - ''' connect to the jail; nothing to do here ''' + """ connect to the jail; nothing to do here """ super(Connection, self)._connect() if not self._connected: display.vvv(u"ESTABLISH JAIL CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self.jail) self._connected = True def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE): - ''' run a command on the jail. This is only needed for implementing + """ run a command on the jail. This is only needed for implementing put_file() get_file() so that we don't have to read the whole file into memory. compared to exec_command() it looses some niceties like being able to return the process's exit code immediately. - ''' + """ local_cmd = [self.jexec_cmd] set_env = '' @@ -123,16 +122,17 @@ class Connection(ConnectionBase): return p def exec_command(self, cmd, in_data=None, sudoable=False): - ''' run a command on the jail ''' + """ run a command on the jail """ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) p = self._buffered_exec_command(cmd) stdout, stderr = p.communicate(in_data) - return (p.returncode, stdout, stderr) + return p.returncode, stdout, stderr - def _prefix_login_path(self, remote_path): - ''' Make sure that we put files into a standard path + @staticmethod + def _prefix_login_path(remote_path): + """ Make sure that we put files into a standard path If a path is relative, then we need to choose where to put it. ssh chooses $HOME but we aren't guaranteed that a home dir will @@ -140,13 +140,13 @@ class Connection(ConnectionBase): This also happens to be the former default. Can revisit using $HOME instead if it's a problem - ''' + """ if not remote_path.startswith(os.path.sep): remote_path = os.path.join(os.path.sep, remote_path) return os.path.normpath(remote_path) def put_file(self, in_path, out_path): - ''' transfer a file from local to jail ''' + """ transfer a file from local to jail """ super(Connection, self).put_file(in_path, out_path) display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.jail) @@ -172,7 +172,7 @@ class Connection(ConnectionBase): raise AnsibleError("file or module does not exist at: %s" % in_path) def fetch_file(self, in_path, out_path): - ''' fetch a file from jail to local ''' + """ fetch a file from jail to local """ super(Connection, self).fetch_file(in_path, out_path) display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.jail) @@ -196,6 +196,6 @@ class Connection(ConnectionBase): raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, to_native(stdout), to_native(stderr))) def close(self): - ''' terminate the connection; nothing to do here ''' + """ terminate the connection; nothing to do here """ super(Connection, self).close() self._connected = False diff --git a/plugins/connection/lxc.py b/plugins/connection/lxc.py index 8de1acc35d..6512a87c6d 100644 --- a/plugins/connection/lxc.py +++ b/plugins/connection/lxc.py @@ -42,14 +42,13 @@ try: except ImportError: pass -from ansible import constants as C from ansible import errors from ansible.module_utils._text import to_bytes, to_native from ansible.plugins.connection import ConnectionBase class Connection(ConnectionBase): - ''' Local lxc based connections ''' + """ Local lxc based connections """ transport = 'community.general.lxc' has_pipelining = True @@ -62,7 +61,7 @@ class Connection(ConnectionBase): self.container = None def _connect(self): - ''' connect to the lxc; nothing to do here ''' + """ connect to the lxc; nothing to do here """ super(Connection, self)._connect() if not HAS_LIBLXC: @@ -77,7 +76,8 @@ class Connection(ConnectionBase): if self.container.state == "STOPPED": raise errors.AnsibleError("%s is not running" % self.container_name) - def _communicate(self, pid, in_data, stdin, stdout, stderr): + @staticmethod + def _communicate(pid, in_data, stdin, stdout, stderr): buf = {stdout: [], stderr: []} read_fds = [stdout, stderr] if in_data: @@ -111,7 +111,7 @@ class Connection(ConnectionBase): return fd def exec_command(self, cmd, in_data=None, sudoable=False): - ''' run a command on the chroot ''' + """ run a command on the chroot """ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) # python2-lxc needs bytes. python3-lxc needs text. diff --git a/plugins/connection/qubes.py b/plugins/connection/qubes.py index aa0075b674..d3f934b601 100644 --- a/plugins/connection/qubes.py +++ b/plugins/connection/qubes.py @@ -37,15 +37,9 @@ DOCUMENTATION = ''' # - name: hosts ''' -import shlex -import shutil - -import os -import base64 import subprocess -import ansible.constants as C -from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils._text import to_bytes from ansible.plugins.connection import ConnectionBase, ensure_connect from ansible.errors import AnsibleConnectionFailure from ansible.utils.display import Display diff --git a/plugins/connection/saltstack.py b/plugins/connection/saltstack.py index 6be7a79949..f8e3680aea 100644 --- a/plugins/connection/saltstack.py +++ b/plugins/connection/saltstack.py @@ -16,14 +16,11 @@ DOCUMENTATION = ''' - This allows you to use existing Saltstack infrastructure to connect to targets. ''' -import re import os -import pty -import codecs -import subprocess +import base64 -from ansible.module_utils._text import to_bytes, to_text -from ansible.module_utils.six.moves import cPickle +from ansible import errors +from ansible.plugins.connection import ConnectionBase HAVE_SALTSTACK = False try: @@ -32,13 +29,9 @@ try: except ImportError: pass -import os -from ansible import errors -from ansible.plugins.connection import ConnectionBase - class Connection(ConnectionBase): - ''' Salt-based connections ''' + """ Salt-based connections """ has_pipelining = False # while the name of the product is salt, naming that module salt cause @@ -58,29 +51,30 @@ class Connection(ConnectionBase): return self def exec_command(self, cmd, sudoable=False, in_data=None): - ''' run a command on the remote minion ''' + """ run a command on the remote minion """ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) if in_data: raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") - self._display.vvv("EXEC %s" % (cmd), host=self.host) + self._display.vvv("EXEC %s" % cmd, host=self.host) # need to add 'true;' to work around https://github.com/saltstack/salt/issues/28077 res = self.client.cmd(self.host, 'cmd.exec_code_all', ['bash', 'true;' + cmd]) if self.host not in res: raise errors.AnsibleError("Minion %s didn't answer, check if salt-minion is running and the name is correct" % self.host) p = res[self.host] - return (p['retcode'], p['stdout'], p['stderr']) + return p['retcode'], p['stdout'], p['stderr'] - def _normalize_path(self, path, prefix): + @staticmethod + def _normalize_path(path, prefix): if not path.startswith(os.path.sep): path = os.path.join(os.path.sep, path) normpath = os.path.normpath(path) return os.path.join(prefix, normpath[1:]) def put_file(self, in_path, out_path): - ''' transfer a file from local to remote ''' + """ transfer a file from local to remote """ super(Connection, self).put_file(in_path, out_path) @@ -88,11 +82,11 @@ class Connection(ConnectionBase): self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host) with open(in_path, 'rb') as in_fh: content = in_fh.read() - self.client.cmd(self.host, 'hashutil.base64_decodefile', [codecs.encode(content, 'base64'), out_path]) + self.client.cmd(self.host, 'hashutil.base64_decodefile', [base64.b64encode(content), out_path]) # TODO test it def fetch_file(self, in_path, out_path): - ''' fetch a file from remote to local ''' + """ fetch a file from remote to local """ super(Connection, self).fetch_file(in_path, out_path) @@ -102,5 +96,5 @@ class Connection(ConnectionBase): open(out_path, 'wb').write(content) def close(self): - ''' terminate the connection; nothing to do here ''' + """ terminate the connection; nothing to do here """ pass diff --git a/plugins/connection/zone.py b/plugins/connection/zone.py index 7a7a36331d..b101ec5cf3 100644 --- a/plugins/connection/zone.py +++ b/plugins/connection/zone.py @@ -31,7 +31,6 @@ import os.path import subprocess import traceback -from ansible import constants as C from ansible.errors import AnsibleError from ansible.module_utils.six.moves import shlex_quote from ansible.module_utils._text import to_bytes @@ -42,7 +41,7 @@ display = Display() class Connection(ConnectionBase): - ''' Local zone based connections ''' + """ Local zone based connections """ transport = 'community.general.zone' has_pipelining = True @@ -75,9 +74,9 @@ class Connection(ConnectionBase): stdout=subprocess.PIPE, stderr=subprocess.PIPE) zones = [] - for l in process.stdout.readlines(): + for line in process.stdout.readlines(): # 1:work:running:/zones/work:3126dc59-9a07-4829-cde9-a816e4c5040e:native:shared - s = l.split(':') + s = line.split(':') if s[1] != 'global': zones.append(s[1]) @@ -95,20 +94,20 @@ class Connection(ConnectionBase): return path + '/root' def _connect(self): - ''' connect to the zone; nothing to do here ''' + """ connect to the zone; nothing to do here """ super(Connection, self)._connect() if not self._connected: display.vvv("THIS IS A LOCAL ZONE DIR", host=self.zone) self._connected = True def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE): - ''' run a command on the zone. This is only needed for implementing + """ run a command on the zone. This is only needed for implementing put_file() get_file() so that we don't have to read the whole file into memory. compared to exec_command() it looses some niceties like being able to return the process's exit code immediately. - ''' + """ # NOTE: zlogin invokes a shell (just like ssh does) so we do not pass # this through /bin/sh -c here. Instead it goes through the shell # that zlogin selects. @@ -122,16 +121,16 @@ class Connection(ConnectionBase): return p def exec_command(self, cmd, in_data=None, sudoable=False): - ''' run a command on the zone ''' + """ run a command on the zone """ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) p = self._buffered_exec_command(cmd) stdout, stderr = p.communicate(in_data) - return (p.returncode, stdout, stderr) + return p.returncode, stdout, stderr def _prefix_login_path(self, remote_path): - ''' Make sure that we put files into a standard path + """ Make sure that we put files into a standard path If a path is relative, then we need to choose where to put it. ssh chooses $HOME but we aren't guaranteed that a home dir will @@ -139,13 +138,13 @@ class Connection(ConnectionBase): This also happens to be the former default. Can revisit using $HOME instead if it's a problem - ''' + """ if not remote_path.startswith(os.path.sep): remote_path = os.path.join(os.path.sep, remote_path) return os.path.normpath(remote_path) def put_file(self, in_path, out_path): - ''' transfer a file from local to zone ''' + """ transfer a file from local to zone """ super(Connection, self).put_file(in_path, out_path) display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.zone) @@ -171,7 +170,7 @@ class Connection(ConnectionBase): raise AnsibleError("file or module does not exist at: %s" % in_path) def fetch_file(self, in_path, out_path): - ''' fetch a file from zone to local ''' + """ fetch a file from zone to local """ super(Connection, self).fetch_file(in_path, out_path) display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.zone) @@ -195,6 +194,6 @@ class Connection(ConnectionBase): raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) def close(self): - ''' terminate the connection; nothing to do here ''' + """ terminate the connection; nothing to do here """ super(Connection, self).close() self._connected = False From 5b7751530819ce066f8edf899ef3ecd2d7791e6d Mon Sep 17 00:00:00 2001 From: iridian <442359+iridian-ks@users.noreply.github.com> Date: Sun, 16 May 2021 22:32:51 -0700 Subject: [PATCH 0288/3093] 1085 updating the hcl whitelist to include all supported options (#2495) * 1085 updating the hcl whitelist to include all supported options * Update changelogs/fragments/1085-consul-acl-hcl-whitelist-update.yml Co-authored-by: Felix Fontein Co-authored-by: Dillon Gilmore Co-authored-by: Felix Fontein --- .../1085-consul-acl-hcl-whitelist-update.yml | 2 ++ .../modules/clustering/consul/consul_acl.py | 19 ++++++++++++++++++- 2 files changed, 20 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/1085-consul-acl-hcl-whitelist-update.yml diff --git a/changelogs/fragments/1085-consul-acl-hcl-whitelist-update.yml b/changelogs/fragments/1085-consul-acl-hcl-whitelist-update.yml new file mode 100644 index 0000000000..78db43da7d --- /dev/null +++ b/changelogs/fragments/1085-consul-acl-hcl-whitelist-update.yml @@ -0,0 +1,2 @@ +bugfixes: + - consul_acl - update the hcl allowlist to include all supported options (https://github.com/ansible-collections/community.general/pull/2495). diff --git a/plugins/modules/clustering/consul/consul_acl.py b/plugins/modules/clustering/consul/consul_acl.py index cb5395ed31..5a37ca0eb9 100644 --- a/plugins/modules/clustering/consul/consul_acl.py +++ b/plugins/modules/clustering/consul/consul_acl.py @@ -189,7 +189,24 @@ from collections import defaultdict from ansible.module_utils.basic import to_text, AnsibleModule -RULE_SCOPES = ["agent", "event", "key", "keyring", "node", "operator", "query", "service", "session"] +RULE_SCOPES = [ + "agent", + "agent_prefix", + "event", + "event_prefix", + "key", + "key_prefix", + "keyring", + "node", + "node_prefix", + "operator", + "query", + "query_prefix", + "service", + "service_prefix", + "session", + "session_prefix", +] MANAGEMENT_PARAMETER_NAME = "mgmt_token" HOST_PARAMETER_NAME = "host" From ea200c9d8c35ac084a5f4841ff82558572d14ee3 Mon Sep 17 00:00:00 2001 From: sgalea87 <43749726+sgalea87@users.noreply.github.com> Date: Mon, 17 May 2021 07:33:40 +0200 Subject: [PATCH 0289/3093] Update influxdb_user.py Fixed Multiple No Privileges (#2499) * Update influxdb_user.py Fixed Multiple No Privileges * Update influxdb_user.py Fixed line spaces * Update influxdb_user.py Fixed whitespace * Create 2499-influxdb_user-fix-multiple-no-privileges.yml Added changelog --- ...99-influxdb_user-fix-multiple-no-privileges.yml | 2 ++ plugins/modules/database/influxdb/influxdb_user.py | 14 +++++++------- 2 files changed, 9 insertions(+), 7 deletions(-) create mode 100644 changelogs/fragments/2499-influxdb_user-fix-multiple-no-privileges.yml diff --git a/changelogs/fragments/2499-influxdb_user-fix-multiple-no-privileges.yml b/changelogs/fragments/2499-influxdb_user-fix-multiple-no-privileges.yml new file mode 100644 index 0000000000..d4575ea711 --- /dev/null +++ b/changelogs/fragments/2499-influxdb_user-fix-multiple-no-privileges.yml @@ -0,0 +1,2 @@ +bugfixes: + - influxdb_user - fix bug where an influxdb user has no privileges for 2 or more databases (https://github.com/ansible-collections/community.general/pull/2499). diff --git a/plugins/modules/database/influxdb/influxdb_user.py b/plugins/modules/database/influxdb/influxdb_user.py index e17e3753f2..8aec04533b 100644 --- a/plugins/modules/database/influxdb/influxdb_user.py +++ b/plugins/modules/database/influxdb/influxdb_user.py @@ -166,16 +166,16 @@ def set_user_grants(module, client, user_name, grants): try: current_grants = client.get_list_privileges(user_name) + parsed_grants = [] # Fix privileges wording for i, v in enumerate(current_grants): - if v['privilege'] == 'ALL PRIVILEGES': - v['privilege'] = 'ALL' - current_grants[i] = v - elif v['privilege'] == 'NO PRIVILEGES': - del(current_grants[i]) + if v['privilege'] != 'NO PRIVILEGES': + if v['privilege'] == 'ALL PRIVILEGES': + v['privilege'] = 'ALL' + parsed_grants.add(v) # check if the current grants are included in the desired ones - for current_grant in current_grants: + for current_grant in parsed_grants: if current_grant not in grants: if not module.check_mode: client.revoke_privilege(current_grant['privilege'], @@ -185,7 +185,7 @@ def set_user_grants(module, client, user_name, grants): # check if the desired grants are included in the current ones for grant in grants: - if grant not in current_grants: + if grant not in parsed_grants: if not module.check_mode: client.grant_privilege(grant['privilege'], grant['database'], From 448b8cbcda019e1d89eb715dfb6fc2f754440613 Mon Sep 17 00:00:00 2001 From: Dennis Israelsson Date: Mon, 17 May 2021 07:35:15 +0200 Subject: [PATCH 0290/3093] fix error when cache is disabled (#2518) --- changelogs/fragments/2518-nmap-fix-cache-disabled.yml | 2 ++ plugins/inventory/nmap.py | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2518-nmap-fix-cache-disabled.yml diff --git a/changelogs/fragments/2518-nmap-fix-cache-disabled.yml b/changelogs/fragments/2518-nmap-fix-cache-disabled.yml new file mode 100644 index 0000000000..8f4680b6a6 --- /dev/null +++ b/changelogs/fragments/2518-nmap-fix-cache-disabled.yml @@ -0,0 +1,2 @@ +bugfixes: + - nmap inventory plugin - fix local variable error when cache is disabled (https://github.com/ansible-collections/community.general/issues/2512). diff --git a/plugins/inventory/nmap.py b/plugins/inventory/nmap.py index 687317abfa..39a6ff3a67 100644 --- a/plugins/inventory/nmap.py +++ b/plugins/inventory/nmap.py @@ -130,7 +130,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): # This occurs if the cache_key is not in the cache or if the cache_key expired, so the cache needs to be updated cache_needs_update = True - if cache_needs_update: + if not user_cache_setting or cache_needs_update: # setup command cmd = [self._nmap] if not self._options['ports']: @@ -207,6 +207,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): except Exception as e: raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e))) + if cache_needs_update: self._cache[cache_key] = results self._populate(results) From 2cc848fe1a32959d54b23afe2aa153c2fd79b35c Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 17 May 2021 08:11:17 +0200 Subject: [PATCH 0291/3093] Use --assumeyes with explicit yum call. (#2533) --- tests/integration/targets/yum_versionlock/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/targets/yum_versionlock/tasks/main.yml b/tests/integration/targets/yum_versionlock/tasks/main.yml index dda5a11bf0..3ea170b145 100644 --- a/tests/integration/targets/yum_versionlock/tasks/main.yml +++ b/tests/integration/targets/yum_versionlock/tasks/main.yml @@ -24,7 +24,7 @@ register: lock_all_packages - name: Update all packages - command: yum update --setopt=obsoletes=0 + command: yum update --assumeyes --setopt=obsoletes=0 register: update_all_locked_packages changed_when: - '"No packages marked for update" not in update_all_locked_packages.stdout' From da7e4e1dc2306ae047cac87912f5b7c805ee2233 Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Mon, 17 May 2021 12:32:20 +0430 Subject: [PATCH 0292/3093] yum_versionlock: disable fedora34 integration test (#2536) * Disable yum_versionlock integration test on Fedora 34 * Remove --assumeyes and add a comment regarding this * Update update task name --- tests/integration/targets/yum_versionlock/tasks/main.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/integration/targets/yum_versionlock/tasks/main.yml b/tests/integration/targets/yum_versionlock/tasks/main.yml index 3ea170b145..4084bdcb91 100644 --- a/tests/integration/targets/yum_versionlock/tasks/main.yml +++ b/tests/integration/targets/yum_versionlock/tasks/main.yml @@ -23,8 +23,9 @@ state: present register: lock_all_packages - - name: Update all packages - command: yum update --assumeyes --setopt=obsoletes=0 + # This should fail when it needs user interaction and missing -y is on purpose. + - name: Update all packages (not really) + command: yum update --setopt=obsoletes=0 register: update_all_locked_packages changed_when: - '"No packages marked for update" not in update_all_locked_packages.stdout' @@ -59,4 +60,4 @@ state: absent when: yum_versionlock_install is changed when: (ansible_distribution in ['CentOS', 'RedHat'] and ansible_distribution_major_version is version('7', '>=')) or - (ansible_distribution == 'Fedora') + (ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('33', '<=')) From 350380ba8c91030b69ec4fe2b087fb62ee82c389 Mon Sep 17 00:00:00 2001 From: Jan Baier <7996094+baierjan@users.noreply.github.com> Date: Mon, 17 May 2021 13:50:40 +0200 Subject: [PATCH 0293/3093] Add option missing to passwordstore lookup (#2500) Add ability to ignore error on missing pass file to allow processing the output further via another filters (mainly the default filter) without updating the pass file itself. It also contains the option to create the pass file, like the option create=true does. Finally, it also allows to issue a warning only, if the pass file is not found. --- ...asswordstore-add_option_ignore_missing.yml | 3 + plugins/lookup/passwordstore.py | 56 +++++++++++++++++-- .../lookup_passwordstore/tasks/tests.yml | 31 ++++++++++ 3 files changed, 85 insertions(+), 5 deletions(-) create mode 100644 changelogs/fragments/2500-passwordstore-add_option_ignore_missing.yml diff --git a/changelogs/fragments/2500-passwordstore-add_option_ignore_missing.yml b/changelogs/fragments/2500-passwordstore-add_option_ignore_missing.yml new file mode 100644 index 0000000000..6141ac7747 --- /dev/null +++ b/changelogs/fragments/2500-passwordstore-add_option_ignore_missing.yml @@ -0,0 +1,3 @@ +minor_changes: + - passwordstore lookup - add option ``missing`` to choose what to do if the password file is missing + (https://github.com/ansible-collections/community.general/pull/2500). diff --git a/plugins/lookup/passwordstore.py b/plugins/lookup/passwordstore.py index 79c69ed962..976dfb837e 100644 --- a/plugins/lookup/passwordstore.py +++ b/plugins/lookup/passwordstore.py @@ -25,9 +25,9 @@ DOCUMENTATION = ''' env: - name: PASSWORD_STORE_DIR create: - description: Create the password if it does not already exist. + description: Create the password if it does not already exist. Takes precedence over C(missing). type: bool - default: 'no' + default: false overwrite: description: Overwrite the password if it does already exist. type: bool @@ -60,6 +60,22 @@ DOCUMENTATION = ''' description: use alphanumeric characters. type: bool default: 'no' + missing: + description: + - List of preference about what to do if the password file is missing. + - If I(create=true), the value for this option is ignored and assumed to be C(create). + - If set to C(error), the lookup will error out if the passname does not exist. + - If set to C(create), the passname will be created with the provided length I(length) if it does not exist. + - If set to C(empty) or C(warn), will return a C(none) in case the passname does not exist. + When using C(lookup) and not C(query), this will be translated to an empty string. + version_added: 3.1.0 + type: str + default: error + choices: + - error + - warn + - empty + - create ''' EXAMPLES = """ # Debug is used for examples, BAD IDEA to show passwords on screen @@ -67,12 +83,28 @@ EXAMPLES = """ ansible.builtin.debug: msg: "{{ lookup('community.general.passwordstore', 'example/test')}}" +- name: Basic lookup. Warns if example/test does not exist and returns empty string + ansible.builtin.debug: + msg: "{{ lookup('community.general.passwordstore', 'example/test missing=warn')}}" + - name: Create pass with random 16 character password. If password exists just give the password ansible.builtin.debug: var: mypassword vars: mypassword: "{{ lookup('community.general.passwordstore', 'example/test create=true')}}" +- name: Create pass with random 16 character password. If password exists just give the password + ansible.builtin.debug: + var: mypassword + vars: + mypassword: "{{ lookup('community.general.passwordstore', 'example/test missing=create')}}" + +- name: Prints 'abc' if example/test does not exist, just give the password otherwise + ansible.builtin.debug: + var: mypassword + vars: + mypassword: "{{ lookup('community.general.passwordstore', 'example/test missing=empty') | default('abc', true) }}" + - name: Different size password ansible.builtin.debug: msg: "{{ lookup('community.general.passwordstore', 'example/test create=true length=42')}}" @@ -111,10 +143,13 @@ import yaml from distutils import util from ansible.errors import AnsibleError, AnsibleAssertionError from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.utils.display import Display from ansible.utils.encrypt import random_password from ansible.plugins.lookup import LookupBase from ansible import constants as C +display = Display() + # backhacked check_output with input for python 2.7 # http://stackoverflow.com/questions/10103551/passing-data-to-subprocess-check-output @@ -178,12 +213,17 @@ class LookupModule(LookupBase): self.paramvals[key] = util.strtobool(self.paramvals[key]) except (ValueError, AssertionError) as e: raise AnsibleError(e) + if self.paramvals['missing'] not in ['error', 'warn', 'create', 'empty']: + raise AnsibleError("{0} is not a valid option for missing".format(self.paramvals['missing'])) if not isinstance(self.paramvals['length'], int): if self.paramvals['length'].isdigit(): self.paramvals['length'] = int(self.paramvals['length']) else: raise AnsibleError("{0} is not a correct value for length".format(self.paramvals['length'])) + if self.paramvals['create']: + self.paramvals['missing'] = 'create' + # Collect pass environment variables from the plugin's parameters. self.env = os.environ.copy() @@ -224,9 +264,11 @@ class LookupModule(LookupBase): if e.returncode != 0 and 'not in the password store' in e.output: # if pass returns 1 and return string contains 'is not in the password store.' # We need to determine if this is valid or Error. - if not self.paramvals['create']: - raise AnsibleError('passname: {0} not found, use create=True'.format(self.passname)) + if self.paramvals['missing'] == 'error': + raise AnsibleError('passwordstore: passname {0} not found and missing=error is set'.format(self.passname)) else: + if self.paramvals['missing'] == 'warn': + display.warning('passwordstore: passname {0} not found'.format(self.passname)) return False else: raise AnsibleError(e) @@ -294,6 +336,7 @@ class LookupModule(LookupBase): 'userpass': '', 'length': 16, 'backup': False, + 'missing': 'error', } for term in terms: @@ -304,6 +347,9 @@ class LookupModule(LookupBase): else: result.append(self.get_passresult()) else: # password does not exist - if self.paramvals['create']: + if self.paramvals['missing'] == 'create': result.append(self.generate_password()) + else: + result.append(None) + return result diff --git a/tests/integration/targets/lookup_passwordstore/tasks/tests.yml b/tests/integration/targets/lookup_passwordstore/tasks/tests.yml index aba5457c0a..e69ba5e572 100644 --- a/tests/integration/targets/lookup_passwordstore/tasks/tests.yml +++ b/tests/integration/targets/lookup_passwordstore/tasks/tests.yml @@ -61,6 +61,37 @@ that: - readpass == newpass +- name: Create a password using missing=create + set_fact: + newpass: "{{ lookup('community.general.passwordstore', 'test-missing-create missing=create length=8') }}" + +- name: Fetch password from an existing file + set_fact: + readpass: "{{ lookup('community.general.passwordstore', 'test-missing-create') }}" + +- name: Verify password + assert: + that: + - readpass == newpass + +- name: Fetch password from existing file using missing=empty + set_fact: + readpass: "{{ lookup('community.general.passwordstore', 'test-missing-create missing=empty') }}" + +- name: Verify password + assert: + that: + - readpass == newpass + +- name: Fetch password from non-existing file using missing=empty + set_fact: + readpass: "{{ query('community.general.passwordstore', 'test-missing-pass missing=empty') }}" + +- name: Verify password + assert: + that: + - readpass == [ none ] + # As inserting multiline passwords on the commandline would require something # like expect, simply create it by using default gpg on a file with the correct # structure. From 345d5f2dfa8e8ea4d624da18a92ac69d298522b8 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 18 May 2021 00:03:15 +1200 Subject: [PATCH 0294/3093] snap - revamp + implementing enabled/disabled states (#2411) * revamp of snap module * added changelog fragment * fixed description * Update changelogs/fragments/2411-snap-revamp-enabled-disabled-states.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- ...11-snap-revamp-enabled-disabled-states.yml | 2 + plugins/modules/packaging/os/snap.py | 277 ++++++++++-------- 2 files changed, 157 insertions(+), 122 deletions(-) create mode 100644 changelogs/fragments/2411-snap-revamp-enabled-disabled-states.yml diff --git a/changelogs/fragments/2411-snap-revamp-enabled-disabled-states.yml b/changelogs/fragments/2411-snap-revamp-enabled-disabled-states.yml new file mode 100644 index 0000000000..a52b377817 --- /dev/null +++ b/changelogs/fragments/2411-snap-revamp-enabled-disabled-states.yml @@ -0,0 +1,2 @@ +minor_changes: + - snap - added ``enabled`` and ``disabled`` states (https://github.com/ansible-collections/community.general/issues/1990). diff --git a/plugins/modules/packaging/os/snap.py b/plugins/modules/packaging/os/snap.py index 9776b4e50a..fab2558ccf 100644 --- a/plugins/modules/packaging/os/snap.py +++ b/plugins/modules/packaging/os/snap.py @@ -31,7 +31,7 @@ options: - Desired state of the package. required: false default: present - choices: [ absent, present ] + choices: [ absent, present, enabled, disabled ] type: str classic: description: @@ -105,151 +105,184 @@ snaps_removed: returned: When any snaps have been removed ''' -import operator import re -from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.module_helper import ( + CmdStateModuleHelper, ArgFormat, ModuleHelperException +) -def validate_input_snaps(module): - """Ensure that all exist.""" - for snap_name in module.params['name']: - if not snap_exists(module, snap_name): - module.fail_json(msg="No snap matching '%s' available." % snap_name) +__state_map = dict( + present='install', + absent='remove', + info='info', # not public + list='list', # not public + enabled='enable', + disabled='disable', +) -def snap_exists(module, snap_name): - snap_path = module.get_bin_path("snap", True) - cmd_parts = [snap_path, 'info', snap_name] - cmd = ' '.join(cmd_parts) - rc, out, err = module.run_command(cmd, check_rc=False) - - return rc == 0 +def _state_map(value): + return __state_map[value] -def is_snap_installed(module, snap_name): - snap_path = module.get_bin_path("snap", True) - cmd_parts = [snap_path, 'list', snap_name] - cmd = ' '.join(cmd_parts) - rc, out, err = module.run_command(cmd, check_rc=False) +class Snap(CmdStateModuleHelper): + __disable_re = re.compile(r'(?:\S+\s+){5}(?P\S+)') + module = dict( + argument_spec={ + 'name': dict(type='list', elements='str', required=True), + 'state': dict(type='str', required=False, default='present', + choices=['absent', 'present', 'enabled', 'disabled']), + 'classic': dict(type='bool', required=False, default=False), + 'channel': dict(type='str', required=False, default='stable'), + }, + supports_check_mode=True, + ) + command = "snap" + command_args_formats = dict( + actionable_snaps=dict(fmt=lambda v: v), + state=dict(fmt=_state_map), + classic=dict(fmt="--classic", style=ArgFormat.BOOLEAN), + channel=dict(fmt=lambda v: [] if v == 'stable' else ['--channel', '{0}']), + ) + check_rc = False - return rc == 0 + @staticmethod + def _first_non_zero(a): + for elem in a: + if elem != 0: + return elem + return 0 -def get_snap_for_action(module): - """Construct a list of snaps to use for current action.""" - snaps = module.params['name'] + def _run_multiple_commands(self, commands): + outputs = [(c,) + self.run_command(params=c) for c in commands] + results = ([], [], [], []) + for output in outputs: + for i in range(4): + results[i].append(output[i]) - is_present_state = module.params['state'] == 'present' - negation_predicate = operator.not_ if is_present_state else bool + return [ + '; '.join(results[0]), + self._first_non_zero(results[1]), + '\n'.join(results[2]), + '\n'.join(results[3]), + ] - def predicate(s): - return negation_predicate(is_snap_installed(module, s)) + def snap_exists(self, snap_name): + return 0 == self.run_command(params=[{'state': 'info'}, {'name': [snap_name]}])[0] - return [s for s in snaps if predicate(s)] + def is_snap_installed(self, snap_name): + return 0 == self.run_command(params=[{'state': 'list'}, {'name': [snap_name]}])[0] + def is_snap_enabled(self, snap_name): + rc, out, err = self.run_command(params=[{'state': 'list'}, {'name': [snap_name]}]) + if rc != 0: + return None + result = out.splitlines()[1] + match = self.__disable_re.match(result) + if not match: + raise ModuleHelperException(msg="Unable to parse 'snap list {0}' output:\n{1}".format(snap_name, out)) + notes = match.group('notes') + return "disabled" not in notes.split(',') -def get_base_cmd_parts(module): - action_map = { - 'present': 'install', - 'absent': 'remove', - } + def validate_input_snaps(self): + """Ensure that all exist.""" + for snap_name in self.vars.name: + if not self.snap_exists(snap_name): + raise ModuleHelperException(msg="No snap matching '%s' available." % snap_name) - state = module.params['state'] + def state_present(self): + self.validate_input_snaps() # if snap doesnt exist, it will explode when trying to install + self.vars.meta('classic').set(output=True) + self.vars.meta('channel').set(output=True) + actionable_snaps = [s for s in self.vars.name if self.is_snap_installed(s)] + if not actionable_snaps: + return + self.changed = True + self.vars.snaps_installed = actionable_snaps + if self.module.check_mode: + return + params = ['classic', 'channel', 'state'] # get base cmd parts + has_one_pkg_params = bool(self.vars.classic) or self.vars.channel != 'stable' + has_multiple_snaps = len(actionable_snaps) > 1 + if has_one_pkg_params and has_multiple_snaps: + commands = [params + [s] for s in actionable_snaps] + else: + commands = [params + actionable_snaps] + self.vars.cmd, rc, out, err = self._run_multiple_commands(commands) + if rc == 0: + return - classic = ['--classic'] if module.params['classic'] else [] - channel = ['--channel', module.params['channel']] if module.params['channel'] and module.params['channel'] != 'stable' else [] + classic_snap_pattern = re.compile(r'^error: This revision of snap "(?P\w+)"' + r' was published using classic confinement') + match = classic_snap_pattern.match(err) + if match: + err_pkg = match.group('package_name') + msg = "Couldn't install {name} because it requires classic confinement".format(name=err_pkg) + else: + msg = "Ooops! Snap installation failed while executing '{cmd}', please examine logs and " \ + "error output for more details.".format(cmd=self.vars.cmd) + raise ModuleHelperException(msg=msg) - snap_path = module.get_bin_path("snap", True) - snap_action = action_map[state] + def state_absent(self): + self.validate_input_snaps() # if snap doesnt exist, it will be absent by definition + actionable_snaps = [s for s in self.vars.name if not self.is_snap_installed(s)] + if not actionable_snaps: + return + self.changed = True + self.vars.snaps_removed = actionable_snaps + if self.module.check_mode: + return + params = ['classic', 'channel', 'state'] # get base cmd parts + commands = [params + actionable_snaps] + self.vars.cmd, rc, out, err = self._run_multiple_commands(commands) + if rc == 0: + return + msg = "Ooops! Snap removal failed while executing '{cmd}', please examine logs and " \ + "error output for more details.".format(cmd=self.vars.cmd) + raise ModuleHelperException(msg=msg) - cmd_parts = [snap_path, snap_action] - if snap_action == 'install': - cmd_parts += classic + channel + def state_enabled(self): + self.validate_input_snaps() + actionable_snaps = [s for s in self.vars.name if self.is_snap_enabled(s) is False] + if not actionable_snaps: + return + self.changed = True + self.vars.snaps_enabled = actionable_snaps + if self.module.check_mode: + return + params = ['classic', 'channel', 'state'] # get base cmd parts + commands = [params + actionable_snaps] + self.vars.cmd, rc, out, err = self._run_multiple_commands(commands) + if rc == 0: + return + msg = "Ooops! Snap enabling failed while executing '{cmd}', please examine logs and " \ + "error output for more details.".format(cmd=self.vars.cmd) + raise ModuleHelperException(msg=msg) - return cmd_parts - - -def get_cmd_parts(module, snap_names): - """Return list of cmds to run in exec format.""" - is_install_mode = module.params['state'] == 'present' - has_multiple_snaps = len(snap_names) > 1 - - cmd_parts = get_base_cmd_parts(module) - has_one_pkg_params = '--classic' in cmd_parts or '--channel' in cmd_parts - - if not (is_install_mode and has_one_pkg_params and has_multiple_snaps): - return [cmd_parts + snap_names] - - return [cmd_parts + [s] for s in snap_names] - - -def run_cmd_for(module, snap_names): - cmds_parts = get_cmd_parts(module, snap_names) - cmd = '; '.join(' '.join(c) for c in cmds_parts) - cmd = 'sh -c "{0}"'.format(cmd) - - # Actually execute the snap command - return (cmd, ) + module.run_command(cmd, check_rc=False) - - -def execute_action(module): - is_install_mode = module.params['state'] == 'present' - exit_kwargs = { - 'classic': module.params['classic'], - 'channel': module.params['channel'], - } if is_install_mode else {} - - actionable_snaps = get_snap_for_action(module) - if not actionable_snaps: - module.exit_json(changed=False, **exit_kwargs) - - changed_def_args = { - 'changed': True, - 'snaps_{result}'. - format(result='installed' if is_install_mode - else 'removed'): actionable_snaps, - } - - if module.check_mode: - module.exit_json(**dict(changed_def_args, **exit_kwargs)) - - cmd, rc, out, err = run_cmd_for(module, actionable_snaps) - cmd_out_args = { - 'cmd': cmd, - 'rc': rc, - 'stdout': out, - 'stderr': err, - } - - if rc == 0: - module.exit_json(**dict(changed_def_args, **dict(cmd_out_args, **exit_kwargs))) - else: - msg = "Ooops! Snap installation failed while executing '{cmd}', please examine logs and error output for more details.".format(cmd=cmd) - if is_install_mode: - m = re.match(r'^error: This revision of snap "(?P\w+)" was published using classic confinement', err) - if m is not None: - err_pkg = m.group('package_name') - msg = "Couldn't install {name} because it requires classic confinement".format(name=err_pkg) - module.fail_json(msg=msg, **dict(cmd_out_args, **exit_kwargs)) + def state_disabled(self): + self.validate_input_snaps() + actionable_snaps = [s for s in self.vars.name if self.is_snap_enabled(s) is True] + if not actionable_snaps: + return + self.changed = True + self.vars.snaps_enabled = actionable_snaps + if self.module.check_mode: + return + params = ['classic', 'channel', 'state'] # get base cmd parts + commands = [params + actionable_snaps] + self.vars.cmd, rc, out, err = self._run_multiple_commands(commands) + if rc == 0: + return + msg = "Ooops! Snap disabling failed while executing '{cmd}', please examine logs and " \ + "error output for more details.".format(cmd=self.vars.cmd) + raise ModuleHelperException(msg=msg) def main(): - module_args = { - 'name': dict(type='list', elements='str', required=True), - 'state': dict(type='str', required=False, default='present', choices=['absent', 'present']), - 'classic': dict(type='bool', required=False, default=False), - 'channel': dict(type='str', required=False, default='stable'), - } - module = AnsibleModule( - argument_spec=module_args, - supports_check_mode=True, - ) - - validate_input_snaps(module) - - # Apply changes to the snaps - execute_action(module) + snap = Snap() + snap.run() if __name__ == '__main__': From dc0a56141fd41c94ff7c79dc902f97255da295cb Mon Sep 17 00:00:00 2001 From: Lauri Tirkkonen Date: Mon, 17 May 2021 19:55:00 +0300 Subject: [PATCH 0295/3093] zfs_delegate_admin: drop choices from permissions (#2540) instead of whitelisting some subset of known existing permissions, just allow any string to be used as permissions. this way, any permission supported by the underlying zfs commands can be used, eg. 'bookmark', 'load-key', 'change-key' and all property permissions, which were missing from the choices list. --- changelogs/fragments/2540-zfs-delegate-choices.yml | 2 ++ plugins/modules/storage/zfs/zfs_delegate_admin.py | 8 +++----- 2 files changed, 5 insertions(+), 5 deletions(-) create mode 100644 changelogs/fragments/2540-zfs-delegate-choices.yml diff --git a/changelogs/fragments/2540-zfs-delegate-choices.yml b/changelogs/fragments/2540-zfs-delegate-choices.yml new file mode 100644 index 0000000000..8e0138420c --- /dev/null +++ b/changelogs/fragments/2540-zfs-delegate-choices.yml @@ -0,0 +1,2 @@ +minor_changes: + - zfs_delegate_admin - drop choices from permissions, allowing any permission supported by the underlying zfs commands (https://github.com/ansible-collections/community.general/pull/2540). diff --git a/plugins/modules/storage/zfs/zfs_delegate_admin.py b/plugins/modules/storage/zfs/zfs_delegate_admin.py index 71225fa155..ead4041150 100644 --- a/plugins/modules/storage/zfs/zfs_delegate_admin.py +++ b/plugins/modules/storage/zfs/zfs_delegate_admin.py @@ -51,8 +51,9 @@ options: permissions: description: - The list of permission(s) to delegate (required if C(state) is C(present)). + - Supported permissions depend on the ZFS version in use. See for example + U(https://openzfs.github.io/openzfs-docs/man/8/zfs-allow.8.html) for OpenZFS. type: list - choices: [ allow, clone, create, destroy, diff, hold, mount, promote, readonly, receive, release, rename, rollback, send, share, snapshot, unallow ] elements: str local: description: @@ -248,10 +249,7 @@ def main(): users=dict(type='list', elements='str'), groups=dict(type='list', elements='str'), everyone=dict(type='bool', default=False), - permissions=dict(type='list', elements='str', - choices=['allow', 'clone', 'create', 'destroy', 'diff', 'hold', 'mount', 'promote', - 'readonly', 'receive', 'release', 'rename', 'rollback', 'send', 'share', - 'snapshot', 'unallow']), + permissions=dict(type='list', elements='str'), local=dict(type='bool'), descendents=dict(type='bool'), recursive=dict(type='bool', default=False), From 2b1eff2783b6f6c8b6d4ef0552afc35d5eac9146 Mon Sep 17 00:00:00 2001 From: quidame Date: Mon, 17 May 2021 20:05:24 +0200 Subject: [PATCH 0296/3093] java_keystore: pass in secret to keytool via stdin (#2526) * java_keystore: pass in secret to keytool via stdin * add changelog fragment --- .../2526-java_keystore-password-via-stdin.yml | 4 ++++ plugins/modules/system/java_keystore.py | 10 ++++------ .../unit/plugins/modules/system/test_java_keystore.py | 6 +++--- 3 files changed, 11 insertions(+), 9 deletions(-) create mode 100644 changelogs/fragments/2526-java_keystore-password-via-stdin.yml diff --git a/changelogs/fragments/2526-java_keystore-password-via-stdin.yml b/changelogs/fragments/2526-java_keystore-password-via-stdin.yml new file mode 100644 index 0000000000..1e45e306af --- /dev/null +++ b/changelogs/fragments/2526-java_keystore-password-via-stdin.yml @@ -0,0 +1,4 @@ +--- +minor_changes: + - "java_keystore - replace envvar by stdin to pass secret to ``keytool`` + (https://github.com/ansible-collections/community.general/pull/2526)." diff --git a/plugins/modules/system/java_keystore.py b/plugins/modules/system/java_keystore.py index 78bcfb6af6..8293801f1b 100644 --- a/plugins/modules/system/java_keystore.py +++ b/plugins/modules/system/java_keystore.py @@ -290,11 +290,11 @@ class JavaKeystore: def read_stored_certificate_fingerprint(self): stored_certificate_fingerprint_cmd = [ - self.keytool_bin, "-list", "-alias", self.name, "-keystore", - self.keystore_path, "-storepass:env", "STOREPASS", "-v" + self.keytool_bin, "-list", "-alias", self.name, + "-keystore", self.keystore_path, "-v" ] (rc, stored_certificate_fingerprint_out, stored_certificate_fingerprint_err) = self.module.run_command( - stored_certificate_fingerprint_cmd, environ_update=dict(STOREPASS=self.password), check_rc=False) + stored_certificate_fingerprint_cmd, data=self.password, check_rc=False) if rc != 0: if "keytool error: java.lang.Exception: Alias <%s> does not exist" % self.name \ in stored_certificate_fingerprint_out: @@ -428,12 +428,10 @@ class JavaKeystore: "-srckeystore", keystore_p12_path, "-srcstoretype", "pkcs12", "-alias", self.name, - "-deststorepass:env", "STOREPASS", - "-srcstorepass:env", "STOREPASS", "-noprompt"] (rc, import_keystore_out, dummy) = self.module.run_command( - import_keystore_cmd, data=None, environ_update=dict(STOREPASS=self.password), check_rc=False + import_keystore_cmd, data='%s\n%s\n%s' % (self.password, self.password, self.password), check_rc=False ) if rc != 0: return self.module.fail_json(msg=import_keystore_out, cmd=import_keystore_cmd, rc=rc) diff --git a/tests/unit/plugins/modules/system/test_java_keystore.py b/tests/unit/plugins/modules/system/test_java_keystore.py index 5e99074c95..7d582a3e99 100644 --- a/tests/unit/plugins/modules/system/test_java_keystore.py +++ b/tests/unit/plugins/modules/system/test_java_keystore.py @@ -80,7 +80,7 @@ class TestCreateJavaKeystore(ModuleTestCase): 'cmd': ["keytool", "-importkeystore", "-destkeystore", "/path/to/keystore.jks", "-srckeystore", "/tmp/tmpgrzm2ah7", "-srcstoretype", "pkcs12", "-alias", "test", - "-deststorepass:env", "STOREPASS", "-srcstorepass:env", "STOREPASS", "-noprompt"], + "-noprompt"], 'msg': '', 'rc': 0 } @@ -183,7 +183,7 @@ class TestCreateJavaKeystore(ModuleTestCase): cmd=["keytool", "-importkeystore", "-destkeystore", "/path/to/keystore.jks", "-srckeystore", "/tmp/tmpgrzm2ah7", "-srcstoretype", "pkcs12", "-alias", "test", - "-deststorepass:env", "STOREPASS", "-srcstorepass:env", "STOREPASS", "-noprompt"], + "-noprompt"], msg='', rc=1 ) @@ -354,7 +354,7 @@ class TestCertChanged(ModuleTestCase): jks = JavaKeystore(module) jks.cert_changed() module.fail_json.assert_called_with( - cmd=["keytool", "-list", "-alias", "foo", "-keystore", "/path/to/keystore.jks", "-storepass:env", "STOREPASS", "-v"], + cmd=["keytool", "-list", "-alias", "foo", "-keystore", "/path/to/keystore.jks", "-v"], msg='', err='Oops', rc=1 From 2a376642ddc8be103e57eb688da86bfb71bf790a Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 18 May 2021 06:28:21 +1200 Subject: [PATCH 0297/3093] ModuleHelper - better mechanism for customizing "changed" behaviour (#2514) * better mechanism for customizing "changed" behaviour * dont drink and code: silly mistake from late at night * added changelog fragment --- changelogs/fragments/2514-mh-improved-changed.yml | 2 ++ plugins/module_utils/mh/base.py | 8 +++++++- 2 files changed, 9 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2514-mh-improved-changed.yml diff --git a/changelogs/fragments/2514-mh-improved-changed.yml b/changelogs/fragments/2514-mh-improved-changed.yml new file mode 100644 index 0000000000..b540600130 --- /dev/null +++ b/changelogs/fragments/2514-mh-improved-changed.yml @@ -0,0 +1,2 @@ +minor_changes: + - ModuleHelper module utils - improved mechanism for customizing the calculation of ``changed`` (https://github.com/ansible-collections/community.general/pull/2514). diff --git a/plugins/module_utils/mh/base.py b/plugins/module_utils/mh/base.py index 2a2dd88f7b..e0de7f2fdd 100644 --- a/plugins/module_utils/mh/base.py +++ b/plugins/module_utils/mh/base.py @@ -33,9 +33,15 @@ class ModuleHelperBase(object): def __quit_module__(self): pass + def __changed__(self): + raise NotImplementedError() + @property def changed(self): - return self._changed + try: + return self.__changed__() + except NotImplementedError: + return self._changed @changed.setter def changed(self, value): From b89eb87ad6872dcfed1bd2a7969ba5ce091ddf9e Mon Sep 17 00:00:00 2001 From: Xabier Napal Date: Mon, 17 May 2021 21:00:35 +0200 Subject: [PATCH 0298/3093] influxdb_user: allow creation of first user with auth enabled (#2364) (#2368) * influxdb_user: allow creation of first user with auth enabled (#2364) * handle potential exceptions while parsing influxdb client error * fix changelog Co-authored-by: Felix Fontein * influxdb_user: use generic exceptions to be compatible with python 2.7 Co-authored-by: Felix Fontein --- .../2364-influxdb_user-first_user.yml | 5 ++++ .../database/influxdb/influxdb_user.py | 25 +++++++++++++++++-- 2 files changed, 28 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/2364-influxdb_user-first_user.yml diff --git a/changelogs/fragments/2364-influxdb_user-first_user.yml b/changelogs/fragments/2364-influxdb_user-first_user.yml new file mode 100644 index 0000000000..905688643b --- /dev/null +++ b/changelogs/fragments/2364-influxdb_user-first_user.yml @@ -0,0 +1,5 @@ +bugfixes: + - influxdb_user - allow creation of admin users when InfluxDB authentication + is enabled but no other user exists on the database. In this scenario, + InfluxDB 1.x allows only ``CREATE USER`` queries and rejects any other query + (https://github.com/ansible-collections/community.general/issues/2364). diff --git a/plugins/modules/database/influxdb/influxdb_user.py b/plugins/modules/database/influxdb/influxdb_user.py index 8aec04533b..d9e6b58051 100644 --- a/plugins/modules/database/influxdb/influxdb_user.py +++ b/plugins/modules/database/influxdb/influxdb_user.py @@ -100,6 +100,8 @@ RETURN = r''' #only defaults ''' +import json + from ansible.module_utils.urls import ConnectionError from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native @@ -115,7 +117,7 @@ def find_user(module, client, user_name): if user['user'] == user_name: user_result = user break - except (ConnectionError, influx.exceptions.InfluxDBClientError) as e: + except ConnectionError as e: module.fail_json(msg=to_native(e)) return user_result @@ -198,6 +200,9 @@ def set_user_grants(module, client, user_name, grants): return changed +INFLUX_AUTH_FIRST_USER_REQUIRED = "error authorizing query: create admin user first or disable authentication" + + def main(): argument_spec = influx.InfluxDb.influxdb_argument_spec() argument_spec.update( @@ -219,7 +224,23 @@ def main(): grants = module.params['grants'] influxdb = influx.InfluxDb(module) client = influxdb.connect_to_influxdb() - user = find_user(module, client, user_name) + + user = None + try: + user = find_user(module, client, user_name) + except influx.exceptions.InfluxDBClientError as e: + if e.code == 403: + reason = None + try: + msg = json.loads(e.content) + reason = msg["error"] + except (KeyError, ValueError): + module.fail_json(msg=to_native(e)) + + if reason != INFLUX_AUTH_FIRST_USER_REQUIRED: + module.fail_json(msg=to_native(e)) + else: + module.fail_json(msg=to_native(e)) changed = False From d24fc92466cc48d8dc436b80a2613635061b8f07 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 18 May 2021 08:44:00 +1200 Subject: [PATCH 0299/3093] ModuleHelper - cmd params now taken from self.vars instead of self.module.params (#2517) * cmd params now taken from self.vars instead of self.module.params * added changelog fragment * Update changelogs/fragments/2517-cmd-params-from-vars.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- changelogs/fragments/2517-cmd-params-from-vars.yml | 2 ++ plugins/module_utils/mh/mixins/cmd.py | 6 +++--- plugins/modules/system/xfconf.py | 2 +- 3 files changed, 6 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/2517-cmd-params-from-vars.yml diff --git a/changelogs/fragments/2517-cmd-params-from-vars.yml b/changelogs/fragments/2517-cmd-params-from-vars.yml new file mode 100644 index 0000000000..95a2f7165d --- /dev/null +++ b/changelogs/fragments/2517-cmd-params-from-vars.yml @@ -0,0 +1,2 @@ +minor_changes: + - cmd (Module Helper) module utils - ``CmdMixin`` now pulls the value for ``run_command()`` params from ``self.vars``, as opposed to previously retrieving those from ``self.module.params`` (https://github.com/ansible-collections/community.general/pull/2517). diff --git a/plugins/module_utils/mh/mixins/cmd.py b/plugins/module_utils/mh/mixins/cmd.py index fc66638f69..eb7cc698cc 100644 --- a/plugins/module_utils/mh/mixins/cmd.py +++ b/plugins/module_utils/mh/mixins/cmd.py @@ -120,7 +120,7 @@ class CmdMixin(object): cmd_args[0] = self.module.get_bin_path(cmd_args[0], required=True) except ValueError: pass - param_list = params if params else self.module.params.keys() + param_list = params if params else self.vars.keys() for param in param_list: if isinstance(param, dict): @@ -131,9 +131,9 @@ class CmdMixin(object): fmt = find_format(_param) value = param[_param] elif isinstance(param, str): - if param in self.module.argument_spec: + if param in self.vars.keys(): fmt = find_format(param) - value = self.module.params[param] + value = self.vars[param] elif param in extra_params: fmt = find_format(param) value = extra_params[param] diff --git a/plugins/modules/system/xfconf.py b/plugins/modules/system/xfconf.py index f2975df050..dc560e7775 100644 --- a/plugins/modules/system/xfconf.py +++ b/plugins/modules/system/xfconf.py @@ -258,7 +258,7 @@ class XFConfProperty(CmdMixin, StateMixin, ModuleHelper): params = ['channel', 'property', {'create': True}] if self.vars.is_array: - params.append({'is_array': True}) + params.append('is_array') params.append({'values_and_types': (self.vars.value, value_type)}) if not self.module.check_mode: From f6db0745fcfb59dc2dff26a1d86fc60b142b65d3 Mon Sep 17 00:00:00 2001 From: quidame Date: Tue, 18 May 2021 06:46:45 +0200 Subject: [PATCH 0300/3093] filesystem: revamp module (#2472) * revamp filesystem module to prepare next steps * pass all commands to module.run_command() as lists * refactor grow() and grow_cmd() to not need to override them so much * refactor all existing get_fs_size() overrides to raise a ValueError if not able to parse command output and return an integer. * override MKFS_FORCE_FLAGS the same way for all fstypes that require it * improve documentation of limitations of the module regarding FreeBSD * fix indentation in DOCUMENTATION * add/update function/method docstrings * fix pylint hints filesystem: refactor integration tests * Include *reiserfs* and *swap* in tests. * Fix reiserfs related code and tests accordingly. * Replace "other fs" (unhandled by this module), from *swap* to *minix* (both mkswap and mkfs.minix being provided by util-linux). * Replace *dd* commands by *filesize* dedicated module. * Use FQCNs and name the tasks. * Update main tests conditionals. * add a changelog fragment * Apply suggestions from code review Co-authored-by: Felix Fontein * declare variables as lists when lists are needed * fix construction without useless conversion Co-authored-by: Felix Fontein --- .../2472_filesystem_module_revamp.yml | 9 + plugins/modules/system/filesystem.py | 296 ++++++++++-------- .../targets/filesystem/defaults/main.yml | 2 + .../filesystem/tasks/create_device.yml | 20 +- .../targets/filesystem/tasks/create_fs.yml | 69 ++-- .../targets/filesystem/tasks/main.yml | 50 +-- .../filesystem/tasks/overwrite_another_fs.yml | 37 ++- .../targets/filesystem/tasks/remove_fs.yml | 60 ++-- .../targets/filesystem/tasks/setup.yml | 179 +++++++---- 9 files changed, 434 insertions(+), 288 deletions(-) create mode 100644 changelogs/fragments/2472_filesystem_module_revamp.yml diff --git a/changelogs/fragments/2472_filesystem_module_revamp.yml b/changelogs/fragments/2472_filesystem_module_revamp.yml new file mode 100644 index 0000000000..691c861078 --- /dev/null +++ b/changelogs/fragments/2472_filesystem_module_revamp.yml @@ -0,0 +1,9 @@ +--- +minor_changes: + - "filesystem - cleanup and revamp module, tests and doc. Pass all commands to + ``module.run_command()`` as lists. Move the device-vs-mountpoint logic to + ``grow()`` method. Give to all ``get_fs_size()`` the same logic and error + handling. (https://github.com/ansible-collections/community.general/pull/2472)." +bugfixes: + - "filesystem - repair ``reiserfs`` fstype support after adding it to integration + tests (https://github.com/ansible-collections/community.general/pull/2472)." diff --git a/plugins/modules/system/filesystem.py b/plugins/modules/system/filesystem.py index 6944178da1..97fe2dc1ab 100644 --- a/plugins/modules/system/filesystem.py +++ b/plugins/modules/system/filesystem.py @@ -7,10 +7,11 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type + DOCUMENTATION = ''' --- author: -- Alexander Bulimov (@abulimov) + - Alexander Bulimov (@abulimov) module: filesystem short_description: Makes a filesystem description: @@ -18,13 +19,12 @@ description: options: state: description: - - If C(state=present), the filesystem is created if it doesn't already - exist, that is the default behaviour if I(state) is omitted. - - If C(state=absent), filesystem signatures on I(dev) are wiped if it - contains a filesystem (as known by C(blkid)). - - When C(state=absent), all other options but I(dev) are ignored, and the - module doesn't fail if the device I(dev) doesn't actually exist. - - C(state=absent) is not supported and will fail on FreeBSD systems. + - If C(state=present), the filesystem is created if it doesn't already + exist, that is the default behaviour if I(state) is omitted. + - If C(state=absent), filesystem signatures on I(dev) are wiped if it + contains a filesystem (as known by C(blkid)). + - When C(state=absent), all other options but I(dev) are ignored, and the + module doesn't fail if the device I(dev) doesn't actually exist. type: str choices: [ present, absent ] default: present @@ -32,48 +32,56 @@ options: fstype: choices: [ btrfs, ext2, ext3, ext4, ext4dev, f2fs, lvm, ocfs2, reiserfs, xfs, vfat, swap ] description: - - Filesystem type to be created. This option is required with - C(state=present) (or if I(state) is omitted). - - reiserfs support was added in 2.2. - - lvm support was added in 2.5. - - since 2.5, I(dev) can be an image file. - - vfat support was added in 2.5 - - ocfs2 support was added in 2.6 - - f2fs support was added in 2.7 - - swap support was added in 2.8 + - Filesystem type to be created. This option is required with + C(state=present) (or if I(state) is omitted). + - reiserfs support was added in 2.2. + - lvm support was added in 2.5. + - since 2.5, I(dev) can be an image file. + - vfat support was added in 2.5 + - ocfs2 support was added in 2.6 + - f2fs support was added in 2.7 + - swap support was added in 2.8 type: str aliases: [type] dev: description: - - Target path to device or image file. + - Target path to block device or regular file. + - On systems not using block devices but character devices instead (as + FreeBSD), this module only works when applying to regular files, aka + disk images. type: path required: yes aliases: [device] force: description: - - If C(yes), allows to create new filesystem on devices that already has filesystem. + - If C(yes), allows to create new filesystem on devices that already has filesystem. type: bool default: 'no' resizefs: description: - - If C(yes), if the block device and filesystem size differ, grow the filesystem into the space. - - Supported for C(ext2), C(ext3), C(ext4), C(ext4dev), C(f2fs), C(lvm), C(xfs) and C(vfat) filesystems. - Attempts to resize other filesystem types will fail. - - XFS Will only grow if mounted. Currently, the module is based on commands - from C(util-linux) package to perform operations, so resizing of XFS is - not supported on FreeBSD systems. - - vFAT will likely fail if fatresize < 1.04. + - If C(yes), if the block device and filesystem size differ, grow the filesystem into the space. + - Supported for C(ext2), C(ext3), C(ext4), C(ext4dev), C(f2fs), C(lvm), C(xfs) and C(vfat) filesystems. + Attempts to resize other filesystem types will fail. + - XFS Will only grow if mounted. Currently, the module is based on commands + from C(util-linux) package to perform operations, so resizing of XFS is + not supported on FreeBSD systems. + - vFAT will likely fail if fatresize < 1.04. type: bool default: 'no' opts: description: - - List of options to be passed to mkfs command. + - List of options to be passed to mkfs command. type: str requirements: - - Uses tools related to the I(fstype) (C(mkfs)) and C(blkid) command. When I(resizefs) is enabled, C(blockdev) command is required too. + - Uses tools related to the I(fstype) (C(mkfs)) and the C(blkid) command. + - When I(resizefs) is enabled, C(blockdev) command is required too. notes: - - Potential filesystem on I(dev) are checked using C(blkid), in case C(blkid) isn't able to detect an existing filesystem, - this filesystem is overwritten even if I(force) is C(no). + - Potential filesystem on I(dev) are checked using C(blkid). In case C(blkid) + isn't able to detect an existing filesystem, this filesystem is overwritten + even if I(force) is C(no). + - On FreeBSD systems, either C(e2fsprogs) or C(util-linux) packages provide + a C(blkid) command that is compatible with this module, when applied to + regular files. - This module supports I(check_mode). ''' @@ -102,6 +110,7 @@ import re import stat from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native class Device(object): @@ -114,13 +123,15 @@ class Device(object): statinfo = os.stat(self.path) if stat.S_ISBLK(statinfo.st_mode): blockdev_cmd = self.module.get_bin_path("blockdev", required=True) - dummy, devsize_in_bytes, dummy = self.module.run_command([blockdev_cmd, "--getsize64", self.path], check_rc=True) - return int(devsize_in_bytes) + dummy, out, dummy = self.module.run_command([blockdev_cmd, "--getsize64", self.path], check_rc=True) + devsize_in_bytes = int(out) elif os.path.isfile(self.path): - return os.path.getsize(self.path) + devsize_in_bytes = os.path.getsize(self.path) else: self.module.fail_json(changed=False, msg="Target device not supported: %s" % self) + return devsize_in_bytes + def get_mountpoint(self): """Return (first) mountpoint of device. Returns None when not mounted.""" cmd_findmnt = self.module.get_bin_path("findmnt", required=True) @@ -141,9 +152,12 @@ class Device(object): class Filesystem(object): - GROW = None MKFS = None - MKFS_FORCE_FLAGS = '' + MKFS_FORCE_FLAGS = [] + INFO = None + GROW = None + GROW_MAX_SPACE_FLAGS = [] + GROW_MOUNTPOINT_ONLY = False LANG_ENV = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'} @@ -155,7 +169,11 @@ class Filesystem(object): return type(self).__name__ def get_fs_size(self, dev): - """ Return size in bytes of filesystem on device. Returns int """ + """Return size in bytes of filesystem on device (integer). + Should query the info with a per-fstype command that can access the + device whenever it is mounted or not, and parse the command output. + Parser must ensure to return an integer, or raise a ValueError. + """ raise NotImplementedError() def create(self, opts, dev): @@ -163,31 +181,27 @@ class Filesystem(object): return mkfs = self.module.get_bin_path(self.MKFS, required=True) - if opts is None: - cmd = "%s %s '%s'" % (mkfs, self.MKFS_FORCE_FLAGS, dev) - else: - cmd = "%s %s %s '%s'" % (mkfs, self.MKFS_FORCE_FLAGS, opts, dev) + cmd = [mkfs] + self.MKFS_FORCE_FLAGS + opts + [str(dev)] self.module.run_command(cmd, check_rc=True) def wipefs(self, dev): - if platform.system() == 'FreeBSD': - msg = "module param state=absent is currently not supported on this OS (FreeBSD)." - self.module.fail_json(msg=msg) - if self.module.check_mode: return # wipefs comes with util-linux package (as 'blockdev' & 'findmnt' above) - # so it is not supported on FreeBSD. Even the use of dd as a fallback is + # that is ported to FreeBSD. The use of dd as a portable fallback is # not doable here if it needs get_mountpoint() (to prevent corruption of - # a mounted filesystem), since 'findmnt' is not available on FreeBSD. + # a mounted filesystem), since 'findmnt' is not available on FreeBSD, + # even in util-linux port for this OS. wipefs = self.module.get_bin_path('wipefs', required=True) - cmd = [wipefs, "--all", dev.__str__()] + cmd = [wipefs, "--all", str(dev)] self.module.run_command(cmd, check_rc=True) - def grow_cmd(self, dev): - cmd = self.module.get_bin_path(self.GROW, required=True) - return [cmd, str(dev)] + def grow_cmd(self, target): + """Build and return the resizefs commandline as list.""" + cmdline = [self.module.get_bin_path(self.GROW, required=True)] + cmdline += self.GROW_MAX_SPACE_FLAGS + [target] + return cmdline def grow(self, dev): """Get dev and fs size and compare. Returns stdout of used command.""" @@ -196,31 +210,50 @@ class Filesystem(object): try: fssize_in_bytes = self.get_fs_size(dev) except NotImplementedError: - self.module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % self.fstype) + self.module.fail_json(msg="module does not support resizing %s filesystem yet" % self.fstype) + except ValueError as err: + self.module.warn("unable to process %s output '%s'" % (self.INFO, to_native(err))) + self.module.fail_json(msg="unable to process %s output for %s" % (self.INFO, dev)) if not fssize_in_bytes < devsize_in_bytes: self.module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (self.fstype, dev)) elif self.module.check_mode: - self.module.exit_json(changed=True, msg="Resizing filesystem %s on device %s" % (self.fstype, dev)) + self.module.exit_json(changed=True, msg="resizing filesystem %s on device %s" % (self.fstype, dev)) + + if self.GROW_MOUNTPOINT_ONLY: + mountpoint = dev.get_mountpoint() + if not mountpoint: + self.module.fail_json(msg="%s needs to be mounted for %s operations" % (dev, self.fstype)) + grow_target = mountpoint else: - dummy, out, dummy = self.module.run_command(self.grow_cmd(dev), check_rc=True) - return out + grow_target = str(dev) + + dummy, out, dummy = self.module.run_command(self.grow_cmd(grow_target), check_rc=True) + return out class Ext(Filesystem): - MKFS_FORCE_FLAGS = '-F' + MKFS_FORCE_FLAGS = ['-F'] + INFO = 'tune2fs' GROW = 'resize2fs' def get_fs_size(self, dev): - cmd = self.module.get_bin_path('tune2fs', required=True) - # Get Block count and Block size - dummy, size, dummy = self.module.run_command([cmd, '-l', str(dev)], check_rc=True, environ_update=self.LANG_ENV) - for line in size.splitlines(): + """Get Block count and Block size and return their product.""" + cmd = self.module.get_bin_path(self.INFO, required=True) + dummy, out, dummy = self.module.run_command([cmd, '-l', str(dev)], check_rc=True, environ_update=self.LANG_ENV) + + block_count = block_size = None + for line in out.splitlines(): if 'Block count:' in line: block_count = int(line.split(':')[1].strip()) elif 'Block size:' in line: block_size = int(line.split(':')[1].strip()) - return block_size * block_count + if None not in (block_size, block_count): + break + else: + raise ValueError(out) + + return block_size * block_count class Ext2(Ext): @@ -237,52 +270,46 @@ class Ext4(Ext): class XFS(Filesystem): MKFS = 'mkfs.xfs' - MKFS_FORCE_FLAGS = '-f' + MKFS_FORCE_FLAGS = ['-f'] + INFO = 'xfs_info' GROW = 'xfs_growfs' + GROW_MOUNTPOINT_ONLY = True def get_fs_size(self, dev): - cmd = self.module.get_bin_path('xfs_info', required=True) + """Get bsize and blocks and return their product.""" + cmdline = [self.module.get_bin_path(self.INFO, required=True)] + # Depending on the versions, xfs_info is able to get info from the + # device, whenever it is mounted or not, or only if unmounted, or + # only if mounted, or not at all. For any version until now, it is + # able to query info from the mountpoint. So try it first, and use + # device as the last resort: it may or may not work. mountpoint = dev.get_mountpoint() if mountpoint: - rc, out, err = self.module.run_command([cmd, str(mountpoint)], environ_update=self.LANG_ENV) + cmdline += [mountpoint] else: - # Recent GNU/Linux distros support access to unmounted XFS filesystems - rc, out, err = self.module.run_command([cmd, str(dev)], environ_update=self.LANG_ENV) - if rc != 0: - self.module.fail_json(msg="Error while attempting to query size of XFS filesystem: %s" % err) + cmdline += [str(dev)] + dummy, out, dummy = self.module.run_command(cmdline, check_rc=True, environ_update=self.LANG_ENV) + block_size = block_count = None for line in out.splitlines(): col = line.split('=') if col[0].strip() == 'data': - if col[1].strip() != 'bsize': - self.module.fail_json(msg='Unexpected output format from xfs_info (could not locate "bsize")') - if col[2].split()[1] != 'blocks': - self.module.fail_json(msg='Unexpected output format from xfs_info (could not locate "blocks")') - block_size = int(col[2].split()[0]) - block_count = int(col[3].split(',')[0]) - return block_size * block_count + if col[1].strip() == 'bsize': + block_size = int(col[2].split()[0]) + if col[2].split()[1] == 'blocks': + block_count = int(col[3].split(',')[0]) + if None not in (block_size, block_count): + break + else: + raise ValueError(out) - def grow_cmd(self, dev): - # Check first if growing is needed, and then if it is doable or not. - devsize_in_bytes = dev.size() - fssize_in_bytes = self.get_fs_size(dev) - if not fssize_in_bytes < devsize_in_bytes: - self.module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (self.fstype, dev)) - - mountpoint = dev.get_mountpoint() - if not mountpoint: - # xfs filesystem needs to be mounted - self.module.fail_json(msg="%s needs to be mounted for xfs operations" % dev) - - cmd = self.module.get_bin_path(self.GROW, required=True) - - return [cmd, str(mountpoint)] + return block_size * block_count class Reiserfs(Filesystem): MKFS = 'mkfs.reiserfs' - MKFS_FORCE_FLAGS = '-f' + MKFS_FORCE_FLAGS = ['-q'] class Btrfs(Filesystem): @@ -290,7 +317,8 @@ class Btrfs(Filesystem): def __init__(self, module): super(Btrfs, self).__init__(module) - dummy, stdout, stderr = self.module.run_command('%s --version' % self.MKFS, check_rc=True) + mkfs = self.module.get_bin_path(self.MKFS, required=True) + dummy, stdout, stderr = self.module.run_command([mkfs, '--version'], check_rc=True) match = re.search(r" v([0-9.]+)", stdout) if not match: # v0.20-rc1 use stderr @@ -298,29 +326,27 @@ class Btrfs(Filesystem): if match: # v0.20-rc1 doesn't have --force parameter added in following version v3.12 if LooseVersion(match.group(1)) >= LooseVersion('3.12'): - self.MKFS_FORCE_FLAGS = '-f' - else: - self.MKFS_FORCE_FLAGS = '' + self.MKFS_FORCE_FLAGS = ['-f'] else: # assume version is greater or equal to 3.12 - self.MKFS_FORCE_FLAGS = '-f' + self.MKFS_FORCE_FLAGS = ['-f'] self.module.warn('Unable to identify mkfs.btrfs version (%r, %r)' % (stdout, stderr)) class Ocfs2(Filesystem): MKFS = 'mkfs.ocfs2' - MKFS_FORCE_FLAGS = '-Fx' + MKFS_FORCE_FLAGS = ['-Fx'] class F2fs(Filesystem): MKFS = 'mkfs.f2fs' + INFO = 'dump.f2fs' GROW = 'resize.f2fs' - @property - def MKFS_FORCE_FLAGS(self): + def __init__(self, module): + super(F2fs, self).__init__(module) mkfs = self.module.get_bin_path(self.MKFS, required=True) - cmd = "%s %s" % (mkfs, os.devnull) - dummy, out, dummy = self.module.run_command(cmd, check_rc=False, environ_update=self.LANG_ENV) + dummy, out, dummy = self.module.run_command([mkfs, os.devnull], check_rc=False, environ_update=self.LANG_ENV) # Looking for " F2FS-tools: mkfs.f2fs Ver: 1.10.0 (2018-01-30)" # mkfs.f2fs displays version since v1.2.0 match = re.search(r"F2FS-tools: mkfs.f2fs Ver: ([0-9.]+) \(", out) @@ -328,69 +354,73 @@ class F2fs(Filesystem): # Since 1.9.0, mkfs.f2fs check overwrite before make filesystem # before that version -f switch wasn't used if LooseVersion(match.group(1)) >= LooseVersion('1.9.0'): - return '-f' - - return '' + self.MKFS_FORCE_FLAGS = ['-f'] def get_fs_size(self, dev): - cmd = self.module.get_bin_path('dump.f2fs', required=True) - # Get sector count and sector size - dummy, dump, dummy = self.module.run_command([cmd, str(dev)], check_rc=True, environ_update=self.LANG_ENV) - sector_size = None - sector_count = None - for line in dump.splitlines(): + """Get sector size and total FS sectors and return their product.""" + cmd = self.module.get_bin_path(self.INFO, required=True) + dummy, out, dummy = self.module.run_command([cmd, str(dev)], check_rc=True, environ_update=self.LANG_ENV) + sector_size = sector_count = None + for line in out.splitlines(): if 'Info: sector size = ' in line: # expected: 'Info: sector size = 512' sector_size = int(line.split()[4]) elif 'Info: total FS sectors = ' in line: # expected: 'Info: total FS sectors = 102400 (50 MB)' sector_count = int(line.split()[5]) - if None not in (sector_size, sector_count): break else: - self.module.warn("Unable to process dump.f2fs output '%s'", '\n'.join(dump)) - self.module.fail_json(msg="Unable to process dump.f2fs output for %s" % dev) + raise ValueError(out) return sector_size * sector_count class VFAT(Filesystem): - if platform.system() == 'FreeBSD': - MKFS = "newfs_msdos" - else: - MKFS = 'mkfs.vfat' + INFO = 'fatresize' GROW = 'fatresize' + GROW_MAX_SPACE_FLAGS = ['-s', 'max'] + + def __init__(self, module): + super(VFAT, self).__init__(module) + if platform.system() == 'FreeBSD': + self.MKFS = 'newfs_msdos' + else: + self.MKFS = 'mkfs.vfat' def get_fs_size(self, dev): - cmd = self.module.get_bin_path(self.GROW, required=True) - dummy, output, dummy = self.module.run_command([cmd, '--info', str(dev)], check_rc=True, environ_update=self.LANG_ENV) - for line in output.splitlines()[1:]: + """Get and return size of filesystem, in bytes.""" + cmd = self.module.get_bin_path(self.INFO, required=True) + dummy, out, dummy = self.module.run_command([cmd, '--info', str(dev)], check_rc=True, environ_update=self.LANG_ENV) + fssize = None + for line in out.splitlines()[1:]: param, value = line.split(':', 1) if param.strip() == 'Size': - return int(value.strip()) - self.module.fail_json(msg="fatresize failed to provide filesystem size for %s" % dev) + fssize = int(value.strip()) + break + else: + raise ValueError(out) - def grow_cmd(self, dev): - cmd = self.module.get_bin_path(self.GROW) - return [cmd, "-s", str(dev.size()), str(dev.path)] + return fssize class LVM(Filesystem): MKFS = 'pvcreate' - MKFS_FORCE_FLAGS = '-f' + MKFS_FORCE_FLAGS = ['-f'] + INFO = 'pvs' GROW = 'pvresize' def get_fs_size(self, dev): - cmd = self.module.get_bin_path('pvs', required=True) + """Get and return PV size, in bytes.""" + cmd = self.module.get_bin_path(self.INFO, required=True) dummy, size, dummy = self.module.run_command([cmd, '--noheadings', '-o', 'pv_size', '--units', 'b', '--nosuffix', str(dev)], check_rc=True) - block_count = int(size) - return block_count + pv_size = int(size) + return pv_size class Swap(Filesystem): MKFS = 'mkswap' - MKFS_FORCE_FLAGS = '-f' + MKFS_FORCE_FLAGS = ['-f'] FILESYSTEMS = { @@ -439,6 +469,10 @@ def main(): force = module.params['force'] resizefs = module.params['resizefs'] + mkfs_opts = [] + if opts is not None: + mkfs_opts = opts.split() + changed = False if not os.path.exists(dev): @@ -451,7 +485,7 @@ def main(): dev = Device(module, dev) cmd = module.get_bin_path('blkid', required=True) - rc, raw_fs, err = module.run_command("%s -c /dev/null -o value -s TYPE %s" % (cmd, dev)) + rc, raw_fs, err = module.run_command([cmd, '-c', os.devnull, '-o', 'value', '-s', 'TYPE', str(dev)]) # In case blkid isn't able to identify an existing filesystem, device is considered as empty, # then this existing filesystem would be overwritten even if force isn't enabled. fs = raw_fs.strip() @@ -481,7 +515,7 @@ def main(): module.fail_json(msg="'%s' is already used as %s, use force=yes to overwrite" % (dev, fs), rc=rc, err=err) # create fs - filesystem.create(opts, dev) + filesystem.create(mkfs_opts, dev) changed = True elif fs: diff --git a/tests/integration/targets/filesystem/defaults/main.yml b/tests/integration/targets/filesystem/defaults/main.yml index 764b98b6ba..15ef85aa0e 100644 --- a/tests/integration/targets/filesystem/defaults/main.yml +++ b/tests/integration/targets/filesystem/defaults/main.yml @@ -17,7 +17,9 @@ tested_filesystems: ext2: {fssize: 10, grow: True} xfs: {fssize: 20, grow: False} # grow requires a mounted filesystem btrfs: {fssize: 150, grow: False} # grow not implemented + reiserfs: {fssize: 33, grow: False} # grow not implemented vfat: {fssize: 20, grow: True} ocfs2: {fssize: '{{ ocfs2_fssize }}', grow: False} # grow not implemented f2fs: {fssize: '{{ f2fs_fssize|default(60) }}', grow: 'f2fs_version is version("1.10.0", ">=")'} lvm: {fssize: 20, grow: True} + swap: {fssize: 10, grow: False} # grow not implemented diff --git a/tests/integration/targets/filesystem/tasks/create_device.yml b/tests/integration/targets/filesystem/tasks/create_device.yml index e49861e7ca..30fd62e33a 100644 --- a/tests/integration/targets/filesystem/tasks/create_device.yml +++ b/tests/integration/targets/filesystem/tasks/create_device.yml @@ -1,6 +1,9 @@ --- - name: 'Create a "disk" file' - command: 'dd if=/dev/zero of={{ image_file }} bs=1M count={{ fssize }}' + community.general.filesize: + path: '{{ image_file }}' + size: '{{ fssize }}M' + force: true - vars: dev: '{{ image_file }}' @@ -8,26 +11,29 @@ - when: fstype == 'lvm' block: - name: 'Create a loop device for LVM' - command: 'losetup --show -f {{ dev }}' + ansible.builtin.command: + cmd: 'losetup --show -f {{ dev }}' register: loop_device_cmd - - set_fact: + - name: 'Switch to loop device target for further tasks' + ansible.builtin.set_fact: dev: "{{ loop_device_cmd.stdout }}" - include_tasks: '{{ action }}.yml' always: - name: 'Detach loop device used for LVM' - command: 'losetup -d {{ dev }}' - args: + ansible.builtin.command: + cmd: 'losetup -d {{ dev }}' removes: '{{ dev }}' when: fstype == 'lvm' - name: 'Clean correct device for LVM' - set_fact: + ansible.builtin.set_fact: dev: '{{ image_file }}' when: fstype == 'lvm' - - file: + - name: 'Remove disk image file' + ansible.builtin.file: name: '{{ image_file }}' state: absent diff --git a/tests/integration/targets/filesystem/tasks/create_fs.yml b/tests/integration/targets/filesystem/tasks/create_fs.yml index 688a4462db..de1a9f18a0 100644 --- a/tests/integration/targets/filesystem/tasks/create_fs.yml +++ b/tests/integration/targets/filesystem/tasks/create_fs.yml @@ -1,43 +1,58 @@ -- name: filesystem creation - filesystem: +--- +- name: "Create filesystem" + community.general.filesystem: dev: '{{ dev }}' fstype: '{{ fstype }}' register: fs_result -- assert: +- name: "Assert that results are as expected" + ansible.builtin.assert: that: - 'fs_result is changed' - 'fs_result is success' -- command: 'blkid -c /dev/null -o value -s UUID {{ dev }}' +- name: "Get UUID of created filesystem" + ansible.builtin.command: + cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + changed_when: false register: uuid - name: "Check that filesystem isn't created if force isn't used" - filesystem: + community.general.filesystem: dev: '{{ dev }}' fstype: '{{ fstype }}' register: fs2_result -- command: 'blkid -c /dev/null -o value -s UUID {{ dev }}' +- name: "Get UUID of the filesystem" + ansible.builtin.command: + cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + changed_when: false register: uuid2 -- assert: +- name: "Assert that filesystem UUID is not changed" + ansible.builtin.assert: that: - - 'not (fs2_result is changed)' + - 'fs2_result is not changed' - 'fs2_result is success' - 'uuid.stdout == uuid2.stdout' -- name: Check that filesystem is recreated if force is used - filesystem: +- name: "Check that filesystem is recreated if force is used" + community.general.filesystem: dev: '{{ dev }}' fstype: '{{ fstype }}' force: yes register: fs3_result -- command: 'blkid -c /dev/null -o value -s UUID {{ dev }}' +- name: "Get UUID of the new filesystem" + ansible.builtin.command: + cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + changed_when: false register: uuid3 -- assert: +- name: "Assert that filesystem UUID is changed" + # libblkid gets no UUID at all for this fstype on FreeBSD + when: not (ansible_system == 'FreeBSD' and fstype == 'reiserfs') + ansible.builtin.assert: that: - 'fs3_result is changed' - 'fs3_result is success' @@ -46,24 +61,31 @@ - when: 'grow|bool and (fstype != "vfat" or resize_vfat)' block: - - name: increase fake device - shell: 'dd if=/dev/zero bs=1M count=1 >> {{ image_file }}' + - name: "Increase fake device" + community.general.filesize: + path: '{{ image_file }}' + size: '{{ fssize | int + 1 }}M' - - name: Resize loop device for LVM - command: losetup -c {{ dev }} + - name: "Resize loop device for LVM" + ansible.builtin.command: + cmd: 'losetup -c {{ dev }}' when: fstype == 'lvm' - - name: Expand filesystem - filesystem: + - name: "Expand filesystem" + community.general.filesystem: dev: '{{ dev }}' fstype: '{{ fstype }}' resizefs: yes register: fs4_result - - command: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + - name: "Get UUID of the filesystem" + ansible.builtin.command: + cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + changed_when: false register: uuid4 - - assert: + - name: "Assert that filesystem UUID is not changed" + ansible.builtin.assert: that: - 'fs4_result is changed' - 'fs4_result is success' @@ -74,14 +96,15 @@ (fstype == "xfs" and ansible_system == "Linux" and ansible_distribution not in ["CentOS", "Ubuntu"]) block: - - name: Check that resizefs does nothing if device size is not changed - filesystem: + - name: "Check that resizefs does nothing if device size is not changed" + community.general.filesystem: dev: '{{ dev }}' fstype: '{{ fstype }}' resizefs: yes register: fs5_result - - assert: + - name: "Assert that the state did not change" + ansible.builtin.assert: that: - 'fs5_result is not changed' - 'fs5_result is succeeded' diff --git a/tests/integration/targets/filesystem/tasks/main.yml b/tests/integration/targets/filesystem/tasks/main.yml index 44e8c49f61..d836c8a15d 100644 --- a/tests/integration/targets/filesystem/tasks/main.yml +++ b/tests/integration/targets/filesystem/tasks/main.yml @@ -4,9 +4,9 @@ # and should not be used as examples of how to write Ansible roles # #################################################################### -- debug: +- ansible.builtin.debug: msg: '{{ role_name }}' -- debug: +- ansible.builtin.debug: msg: '{{ role_path|basename }}' - import_tasks: setup.yml @@ -27,29 +27,35 @@ grow: '{{ item.0.value.grow }}' action: '{{ item.1 }}' when: - - 'not (item.0.key == "btrfs" and ansible_system == "FreeBSD")' # btrfs not available on FreeBSD - # On Ubuntu trusty, blkid is unable to identify filesystem smaller than 256Mo, see - # https://www.kernel.org/pub/linux/utils/util-linux/v2.21/v2.21-ChangeLog - # https://anonscm.debian.org/cgit/collab-maint/pkg-util-linux.git/commit/?id=04f7020eadf31efc731558df92daa0a1c336c46c - - 'not (item.0.key == "btrfs" and (ansible_distribution == "Ubuntu" and ansible_distribution_release == "trusty"))' - - 'not (item.0.key == "btrfs" and (ansible_facts.os_family == "RedHat" and ansible_facts.distribution_major_version is version("8", ">=")))' - - 'not (item.0.key == "lvm" and ansible_system == "FreeBSD")' # LVM not available on FreeBSD - - 'not (item.0.key == "lvm" and ansible_virtualization_type in ["docker", "container", "containerd"])' # Tests use losetup which can not be used inside unprivileged container - - 'not (item.0.key == "ocfs2" and ansible_os_family != "Debian")' # ocfs2 only available on Debian based distributions - - 'not (item.0.key == "f2fs" and ansible_system == "FreeBSD")' - # f2fs-tools package not available with RHEL/CentOS - - 'not (item.0.key == "f2fs" and ansible_distribution in ["CentOS", "RedHat"])' - # On Ubuntu trusty, blkid (2.20.1) is unable to identify F2FS filesystem. blkid handles F2FS since v2.23, see: - # https://mirrors.edge.kernel.org/pub/linux/utils/util-linux/v2.23/v2.23-ReleaseNotes - - 'not (item.0.key == "f2fs" and ansible_distribution == "Ubuntu" and ansible_distribution_version is version("14.04", "<="))' - - 'not (item.1 == "overwrite_another_fs" and ansible_system == "FreeBSD")' + # FreeBSD limited support + # Not available: btrfs, lvm, f2fs, ocfs2 + # All BSD systems use swap fs, but only Linux needs mkswap + # Supported: ext2/3/4 (e2fsprogs), xfs (xfsprogs), reiserfs (progsreiserfs), vfat + - 'not (ansible_system == "FreeBSD" and item.0.key in ["btrfs", "f2fs", "swap", "lvm", "ocfs2"])' + # Available on FreeBSD but not on testbed (util-linux conflicts with e2fsprogs): wipefs, mkfs.minix + - 'not (ansible_system == "FreeBSD" and item.1 in ["overwrite_another_fs", "remove_fs"])' + + # Other limitations and corner cases + + # f2fs-tools and reiserfs-utils packages not available with RHEL/CentOS on CI + - 'not (ansible_distribution in ["CentOS", "RedHat"] and item.0.key in ["f2fs", "reiserfs"])' + - 'not (ansible_os_family == "RedHat" and ansible_distribution_major_version is version("8", ">=") and + item.0.key == "btrfs")' + # ocfs2 only available on Debian based distributions + - 'not (item.0.key == "ocfs2" and ansible_os_family != "Debian")' + # Tests use losetup which can not be used inside unprivileged container + - 'not (item.0.key == "lvm" and ansible_virtualization_type in ["docker", "container", "containerd"])' - - 'not (item.1 == "remove_fs" and ansible_system == "FreeBSD")' # util-linux not available on FreeBSD # On CentOS 6 shippable containers, wipefs seems unable to remove vfat signatures - - 'not (item.1 == "remove_fs" and item.0.key == "vfat" and ansible_distribution == "CentOS" and - ansible_distribution_version is version("7.0", "<"))' + - 'not (ansible_distribution == "CentOS" and ansible_distribution_version is version("7.0", "<") and + item.1 == "remove_fs" and item.0.key == "vfat")' + # On same systems, mkfs.minix (unhandled by the module) can't find the device/file + - 'not (ansible_distribution == "CentOS" and ansible_distribution_version is version("7.0", "<") and + item.1 == "overwrite_another_fs")' # The xfsprogs package on newer versions of OpenSUSE (15+) require Python 3, we skip this on our Python 2 container # OpenSUSE 42.3 Python2 and the other py3 containers are not affected so we will continue to run that - - 'not (item.0.key == "xfs" and ansible_os_family == "Suse" and ansible_python.version.major == 2 and ansible_distribution_major_version|int != 42)' + - 'not (ansible_os_family == "Suse" and ansible_distribution_major_version|int != 42 and + item.0.key == "xfs" and ansible_python.version.major == 2)' + loop: "{{ query('dict', tested_filesystems)|product(['create_fs', 'overwrite_another_fs', 'remove_fs'])|list }}" diff --git a/tests/integration/targets/filesystem/tasks/overwrite_another_fs.yml b/tests/integration/targets/filesystem/tasks/overwrite_another_fs.yml index 671d9b0bea..4bf92836bb 100644 --- a/tests/integration/targets/filesystem/tasks/overwrite_another_fs.yml +++ b/tests/integration/targets/filesystem/tasks/overwrite_another_fs.yml @@ -1,40 +1,55 @@ --- - name: 'Recreate "disk" file' - command: 'dd if=/dev/zero of={{ image_file }} bs=1M count={{ fssize }}' + community.general.filesize: + path: '{{ image_file }}' + size: '{{ fssize }}M' + force: true -- name: 'Create a swap filesystem' - command: 'mkswap {{ dev }}' +- name: 'Create a minix filesystem' + ansible.builtin.command: + cmd: 'mkfs.minix {{ dev }}' -- command: 'blkid -c /dev/null -o value -s UUID {{ dev }}' +- name: 'Get UUID of the new filesystem' + ansible.builtin.command: + cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + changed_when: false register: uuid - name: "Check that an existing filesystem (not handled by this module) isn't overwritten when force isn't used" - filesystem: + community.general.filesystem: dev: '{{ dev }}' fstype: '{{ fstype }}' register: fs_result ignore_errors: True -- command: 'blkid -c /dev/null -o value -s UUID {{ dev }}' +- name: 'Get UUID of the filesystem' + ansible.builtin.command: + cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + changed_when: false register: uuid2 -- assert: +- name: 'Assert that module failed and filesystem UUID is not changed' + ansible.builtin.assert: that: - 'fs_result is failed' - 'uuid.stdout == uuid2.stdout' - name: "Check that an existing filesystem (not handled by this module) is overwritten when force is used" - filesystem: + community.general.filesystem: dev: '{{ dev }}' fstype: '{{ fstype }}' force: yes register: fs_result2 -- command: 'blkid -c /dev/null -o value -s UUID {{ dev }}' +- name: 'Get UUID of the new filesystem' + ansible.builtin.command: + cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + changed_when: false register: uuid3 -- assert: +- name: 'Assert that module succeeded and filesystem UUID is changed' + ansible.builtin.assert: that: - - 'fs_result2 is successful' + - 'fs_result2 is success' - 'fs_result2 is changed' - 'uuid2.stdout != uuid3.stdout' diff --git a/tests/integration/targets/filesystem/tasks/remove_fs.yml b/tests/integration/targets/filesystem/tasks/remove_fs.yml index 7d1ca2a19c..338d439d60 100644 --- a/tests/integration/targets/filesystem/tasks/remove_fs.yml +++ b/tests/integration/targets/filesystem/tasks/remove_fs.yml @@ -1,98 +1,98 @@ --- # We assume 'create_fs' tests have passed. -- name: filesystem creation - filesystem: +- name: "Create filesystem" + community.general.filesystem: dev: '{{ dev }}' fstype: '{{ fstype }}' -- name: get filesystem UUID with 'blkid' - command: +- name: "Get filesystem UUID with 'blkid'" + ansible.builtin.command: cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' changed_when: false register: blkid_ref -- name: Assert that a filesystem exists on top of the device - assert: +- name: "Assert that a filesystem exists on top of the device" + ansible.builtin.assert: that: - blkid_ref.stdout | length > 0 # Test check_mode first -- name: filesystem removal (check mode) - filesystem: +- name: "Remove filesystem (check mode)" + community.general.filesystem: dev: '{{ dev }}' state: absent register: wipefs check_mode: yes -- name: get filesystem UUID with 'blkid' (should remain the same) - command: +- name: "Get filesystem UUID with 'blkid' (should remain the same)" + ansible.builtin.command: cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' changed_when: false register: blkid -- name: Assert that the state changed but the filesystem still exists - assert: +- name: "Assert that the state changed but the filesystem still exists" + ansible.builtin.assert: that: - wipefs is changed - blkid.stdout == blkid_ref.stdout # Do it -- name: filesystem removal - filesystem: +- name: "Remove filesystem" + community.general.filesystem: dev: '{{ dev }}' state: absent register: wipefs -- name: get filesystem UUID with 'blkid' (should be empty) - command: +- name: "Get filesystem UUID with 'blkid' (should be empty)" + ansible.builtin.command: cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' changed_when: false failed_when: false register: blkid -- name: Assert that the state changed and the device has no filesystem - assert: +- name: "Assert that the state changed and the device has no filesystem" + ansible.builtin.assert: that: - wipefs is changed - blkid.stdout | length == 0 - blkid.rc == 2 # Do it again -- name: filesystem removal (idempotency) - filesystem: +- name: "Remove filesystem (idempotency)" + community.general.filesystem: dev: '{{ dev }}' state: absent register: wipefs -- name: Assert that the state did not change - assert: +- name: "Assert that the state did not change" + ansible.builtin.assert: that: - wipefs is not changed # and again -- name: filesystem removal (idempotency, check mode) - filesystem: +- name: "Remove filesystem (idempotency, check mode)" + community.general.filesystem: dev: '{{ dev }}' state: absent register: wipefs check_mode: yes -- name: Assert that the state did not change - assert: +- name: "Assert that the state did not change" + ansible.builtin.assert: that: - wipefs is not changed # By the way, test removal of a filesystem on unexistent device -- name: filesystem removal (unexistent device) - filesystem: +- name: "Remove filesystem (unexistent device)" + community.general.filesystem: dev: '/dev/unexistent_device' state: absent register: wipefs -- name: Assert that the state did not change - assert: +- name: "Assert that the state did not change" + ansible.builtin.assert: that: - wipefs is not changed diff --git a/tests/integration/targets/filesystem/tasks/setup.yml b/tests/integration/targets/filesystem/tasks/setup.yml index 82fe7c54e6..9ca4b983d0 100644 --- a/tests/integration/targets/filesystem/tasks/setup.yml +++ b/tests/integration/targets/filesystem/tasks/setup.yml @@ -1,6 +1,9 @@ --- -- name: install filesystem tools - package: +# By installing e2fsprogs on FreeBSD, we get a usable blkid command, but this +# package conflicts with util-linux, that provides blkid too, but also wipefs +# (required for filesystem state=absent). +- name: "Install filesystem tools" + ansible.builtin.package: name: '{{ item }}' state: present # xfsprogs on OpenSUSE requires Python 3, skip this for our newer Py2 OpenSUSE builds @@ -9,86 +12,134 @@ - e2fsprogs - xfsprogs -- block: - - name: install btrfs progs - package: - name: btrfs-progs - state: present - when: - - ansible_os_family != 'Suse' - - not (ansible_distribution == 'Ubuntu' and ansible_distribution_version is version('16.04', '<=')) - - ansible_system != "FreeBSD" - - not (ansible_facts.os_family == "RedHat" and ansible_facts.distribution_major_version is version('8', '>=')) +- name: "Install btrfs progs" + ansible.builtin.package: + name: btrfs-progs + state: present + when: + - ansible_os_family != 'Suse' + - not (ansible_distribution == 'Ubuntu' and ansible_distribution_version is version('16.04', '<=')) + - ansible_system != "FreeBSD" + - not (ansible_facts.os_family == "RedHat" and ansible_facts.distribution_major_version is version('8', '>=')) - - name: install btrfs progs (Ubuntu <= 16.04) - package: - name: btrfs-tools - state: present - when: ansible_distribution == 'Ubuntu' and ansible_distribution_version is version('16.04', '<=') +- name: "Install btrfs tools (Ubuntu <= 16.04)" + ansible.builtin.package: + name: btrfs-tools + state: present + when: + - ansible_distribution == 'Ubuntu' + - ansible_distribution_version is version('16.04', '<=') - - name: install btrfs progs (OpenSuse) - package: - name: '{{ item }}' - state: present - when: ansible_os_family == 'Suse' - with_items: - - python{{ ansible_python.version.major }}-xml - - btrfsprogs +- name: "Install btrfs progs (OpenSuse)" + ansible.builtin.package: + name: '{{ item }}' + state: present + when: ansible_os_family == 'Suse' + with_items: + - python{{ ansible_python.version.major }}-xml + - btrfsprogs - - name: install ocfs2 (Debian) - package: - name: ocfs2-tools - state: present - when: ansible_os_family == 'Debian' +- name: "Install reiserfs utils (Fedora)" + ansible.builtin.package: + name: reiserfs-utils + state: present + when: + - ansible_distribution == 'Fedora' - - when: - - ansible_os_family != 'RedHat' or ansible_distribution == 'Fedora' - - ansible_distribution != 'Ubuntu' or ansible_distribution_version is version('16.04', '>=') - - ansible_system != "FreeBSD" - block: - - name: install f2fs - package: - name: f2fs-tools - state: present +- name: "Install reiserfs (OpenSuse)" + ansible.builtin.package: + name: reiserfs + state: present + when: + - ansible_os_family == 'Suse' - - name: fetch f2fs version - command: mkfs.f2fs /dev/null - ignore_errors: yes - register: mkfs_f2fs +- name: "Install reiserfs progs (Debian and more)" + ansible.builtin.package: + name: reiserfsprogs + state: present + when: + - ansible_system == 'Linux' + - ansible_os_family not in ['Suse', 'RedHat'] - - set_fact: - f2fs_version: '{{ mkfs_f2fs.stdout | regex_search("F2FS-tools: mkfs.f2fs Ver:.*") | regex_replace("F2FS-tools: mkfs.f2fs Ver: ([0-9.]+) .*", "\1") }}' +- name: "Install reiserfs progs (FreeBSD)" + ansible.builtin.package: + name: progsreiserfs + state: present + when: + - ansible_system == 'FreeBSD' - - name: install dosfstools and lvm2 (Linux) - package: - name: '{{ item }}' - with_items: - - dosfstools - - lvm2 - when: ansible_system == 'Linux' +- name: "Install ocfs2 (Debian)" + ansible.builtin.package: + name: ocfs2-tools + state: present + when: ansible_os_family == 'Debian' -- block: - - name: install fatresize - package: - name: fatresize - state: present - - command: fatresize --help - register: fatresize - - set_fact: - fatresize_version: '{{ fatresize.stdout_lines[0] | regex_search("[0-9]+\.[0-9]+\.[0-9]+") }}' +- name: "Install f2fs tools and get version" + when: + - ansible_os_family != 'RedHat' or ansible_distribution == 'Fedora' + - ansible_distribution != 'Ubuntu' or ansible_distribution_version is version('16.04', '>=') + - ansible_system != "FreeBSD" + block: + - name: "Install f2fs tools" + ansible.builtin.package: + name: f2fs-tools + state: present + + - name: "Fetch f2fs version" + ansible.builtin.command: + cmd: mkfs.f2fs /dev/null + changed_when: false + ignore_errors: true + register: mkfs_f2fs + + - name: "Record f2fs_version" + ansible.builtin.set_fact: + f2fs_version: '{{ mkfs_f2fs.stdout + | regex_search("F2FS-tools: mkfs.f2fs Ver:.*") + | regex_replace("F2FS-tools: mkfs.f2fs Ver: ([0-9.]+) .*", "\1") }}' + +- name: "Install dosfstools and lvm2 (Linux)" + ansible.builtin.package: + name: '{{ item }}' + with_items: + - dosfstools + - lvm2 + when: ansible_system == 'Linux' + +- name: "Install fatresize and get version" when: - ansible_system == 'Linux' - ansible_os_family != 'Suse' - ansible_os_family != 'RedHat' or (ansible_distribution == 'CentOS' and ansible_distribution_version is version('7.0', '==')) + block: + - name: "Install fatresize" + ansible.builtin.package: + name: fatresize + state: present -- command: mke2fs -V + - name: "Fetch fatresize version" + ansible.builtin.command: + cmd: fatresize --help + changed_when: false + register: fatresize + + - name: "Record fatresize_version" + ansible.builtin.set_fact: + fatresize_version: '{{ fatresize.stdout_lines[0] | regex_search("[0-9]+\.[0-9]+\.[0-9]+") }}' + +- name: "Fetch e2fsprogs version" + ansible.builtin.command: + cmd: mke2fs -V + changed_when: false register: mke2fs -- set_fact: +- name: "Record e2fsprogs_version" + ansible.builtin.set_fact: # mke2fs 1.43.6 (29-Aug-2017) e2fsprogs_version: '{{ mke2fs.stderr_lines[0] | regex_search("[0-9]{1,2}\.[0-9]{1,2}(\.[0-9]{1,2})?") }}' -- set_fact: +- name: "Set version-related facts to skip further tasks" + ansible.builtin.set_fact: # http://e2fsprogs.sourceforge.net/e2fsprogs-release.html#1.43 # Mke2fs no longer complains if the user tries to create a file system # using the entire block device. From 2c1ab2d384cc44136e76a9177a7e87e4c7d1f96a Mon Sep 17 00:00:00 2001 From: quidame Date: Tue, 18 May 2021 11:51:37 +0200 Subject: [PATCH 0301/3093] iptables_state: fix per-table initialization command (#2525) * refactor initialize_from_null_state() * Use a more neutral command (iptables -L) to load per-table needed modules. * fix 'FutureWarning: Possible nested set at position ...' (re.sub) * fix pylints (module + action plugin) * unsubscriptable-object * superfluous-parens * consider-using-in * unused-variable * unused-import * no-else-break * cleanup other internal module_args if they exist * add changelog fragment * Apply suggestions from code review (changelog fragment) Co-authored-by: Felix Fontein * Remove useless plugin type in changelog fragment Co-authored-by: Amin Vakil Co-authored-by: Felix Fontein Co-authored-by: Amin Vakil --- ...ables_state-fix-initialization-command.yml | 6 +++ plugins/action/system/iptables_state.py | 19 ++++--- plugins/modules/system/iptables_state.py | 49 +++++++++---------- 3 files changed, 37 insertions(+), 37 deletions(-) create mode 100644 changelogs/fragments/2525-iptables_state-fix-initialization-command.yml diff --git a/changelogs/fragments/2525-iptables_state-fix-initialization-command.yml b/changelogs/fragments/2525-iptables_state-fix-initialization-command.yml new file mode 100644 index 0000000000..552c0b26ab --- /dev/null +++ b/changelogs/fragments/2525-iptables_state-fix-initialization-command.yml @@ -0,0 +1,6 @@ +--- +bugfixes: + - "iptables_state - fix initialization of iptables from null state when adressing + more than one table (https://github.com/ansible-collections/community.general/issues/2523)." + - "iptables_state - fix a 'FutureWarning' in a regex and do some basic code clean up + (https://github.com/ansible-collections/community.general/pull/2525)." diff --git a/plugins/action/system/iptables_state.py b/plugins/action/system/iptables_state.py index cc174b3bd7..96b6dc689c 100644 --- a/plugins/action/system/iptables_state.py +++ b/plugins/action/system/iptables_state.py @@ -7,7 +7,7 @@ __metaclass__ = type import time from ansible.plugins.action import ActionBase -from ansible.errors import AnsibleError, AnsibleActionFail, AnsibleConnectionFailure +from ansible.errors import AnsibleActionFail, AnsibleConnectionFailure from ansible.utils.vars import merge_hash from ansible.utils.display import Display @@ -46,7 +46,7 @@ class ActionModule(ActionBase): the async wrapper results (those with the ansible_job_id key). ''' # At least one iteration is required, even if timeout is 0. - for i in range(max(1, timeout)): + for dummy in range(max(1, timeout)): async_result = self._execute_module( module_name='ansible.builtin.async_status', module_args=module_args, @@ -76,7 +76,6 @@ class ActionModule(ActionBase): task_async = self._task.async_val check_mode = self._play_context.check_mode max_timeout = self._connection._play_context.timeout - module_name = self._task.action module_args = self._task.args if module_args.get('state', None) == 'restored': @@ -133,7 +132,7 @@ class ActionModule(ActionBase): # The module is aware to not process the main iptables-restore # command before finding (and deleting) the 'starter' cookie on # the host, so the previous query will not reach ssh timeout. - garbage = self._low_level_execute_command(starter_cmd, sudoable=self.DEFAULT_SUDOABLE) + dummy = self._low_level_execute_command(starter_cmd, sudoable=self.DEFAULT_SUDOABLE) # As the main command is not yet executed on the target, here # 'finished' means 'failed before main command be executed'. @@ -143,7 +142,7 @@ class ActionModule(ActionBase): except AttributeError: pass - for x in range(max_timeout): + for dummy in range(max_timeout): time.sleep(1) remaining_time -= 1 # - AnsibleConnectionFailure covers rejected requests (i.e. @@ -151,7 +150,7 @@ class ActionModule(ActionBase): # - ansible_timeout is able to cover dropped requests (due # to a rule or policy DROP) if not lower than async_val. try: - garbage = self._low_level_execute_command(confirm_cmd, sudoable=self.DEFAULT_SUDOABLE) + dummy = self._low_level_execute_command(confirm_cmd, sudoable=self.DEFAULT_SUDOABLE) break except AnsibleConnectionFailure: continue @@ -164,12 +163,12 @@ class ActionModule(ActionBase): del result[key] if result.get('invocation', {}).get('module_args'): - if '_timeout' in result['invocation']['module_args']: - del result['invocation']['module_args']['_back'] - del result['invocation']['module_args']['_timeout'] + for key in ('_back', '_timeout', '_async_dir', 'jid'): + if result['invocation']['module_args'].get(key): + del result['invocation']['module_args'][key] async_status_args['mode'] = 'cleanup' - garbage = self._execute_module( + dummy = self._execute_module( module_name='ansible.builtin.async_status', module_args=async_status_args, task_vars=task_vars, diff --git a/plugins/modules/system/iptables_state.py b/plugins/modules/system/iptables_state.py index 5647526819..326db862bc 100644 --- a/plugins/modules/system/iptables_state.py +++ b/plugins/modules/system/iptables_state.py @@ -232,7 +232,7 @@ import filecmp import shutil from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.module_utils._text import to_bytes, to_native IPTABLES = dict( @@ -262,7 +262,7 @@ def read_state(b_path): lines = text.splitlines() while '' in lines: lines.remove('') - return (lines) + return lines def write_state(b_path, lines, changed): @@ -282,9 +282,9 @@ def write_state(b_path, lines, changed): if b_destdir and not os.path.exists(b_destdir) and not module.check_mode: try: os.makedirs(b_destdir) - except Exception as e: + except Exception as err: module.fail_json( - msg='Error creating %s. Error code: %s. Error description: %s' % (destdir, e[0], e[1]), + msg='Error creating %s: %s' % (destdir, to_native(err)), initial_state=lines) changed = True @@ -295,10 +295,10 @@ def write_state(b_path, lines, changed): if changed and not module.check_mode: try: shutil.copyfile(tmpfile, b_path) - except Exception as e: + except Exception as err: path = to_native(b_path, errors='surrogate_or_strict') module.fail_json( - msg='Error saving state into %s. Error code: %s. Error description: %s' % (path, e[0], e[1]), + msg='Error saving state into %s: %s' % (path, to_native(err)), initial_state=lines) return changed @@ -313,14 +313,11 @@ def initialize_from_null_state(initializer, initcommand, table): if table is None: table = 'filter' - tmpfd, tmpfile = tempfile.mkstemp() - with os.fdopen(tmpfd, 'w') as f: - f.write('*%s\nCOMMIT\n' % table) - - initializer.append(tmpfile) - (rc, out, err) = module.run_command(initializer, check_rc=True) + commandline = list(initializer) + commandline += ['-t', table] + (rc, out, err) = module.run_command(commandline, check_rc=True) (rc, out, err) = module.run_command(initcommand, check_rc=True) - return (rc, out, err) + return rc, out, err def filter_and_format_state(string): @@ -328,13 +325,13 @@ def filter_and_format_state(string): Remove timestamps to ensure idempotence between runs. Also remove counters by default. And return the result as a list. ''' - string = re.sub('((^|\n)# (Generated|Completed)[^\n]*) on [^\n]*', '\\1', string) + string = re.sub(r'((^|\n)# (Generated|Completed)[^\n]*) on [^\n]*', r'\1', string) if not module.params['counters']: - string = re.sub('[[][0-9]+:[0-9]+[]]', '[0:0]', string) + string = re.sub(r'\[[0-9]+:[0-9]+\]', r'[0:0]', string) lines = string.splitlines() while '' in lines: lines.remove('') - return (lines) + return lines def per_table_state(command, state): @@ -347,14 +344,14 @@ def per_table_state(command, state): COMMAND = list(command) if '*%s' % t in state.splitlines(): COMMAND.extend(['--table', t]) - (rc, out, err) = module.run_command(COMMAND, check_rc=True) - out = re.sub('(^|\n)(# Generated|# Completed|[*]%s|COMMIT)[^\n]*' % t, '', out) - out = re.sub(' *[[][0-9]+:[0-9]+[]] *', '', out) + dummy, out, dummy = module.run_command(COMMAND, check_rc=True) + out = re.sub(r'(^|\n)(# Generated|# Completed|[*]%s|COMMIT)[^\n]*' % t, r'', out) + out = re.sub(r' *\[[0-9]+:[0-9]+\] *', r'', out) table = out.splitlines() while '' in table: table.remove('') tables[t] = table - return (tables) + return tables def main(): @@ -402,7 +399,7 @@ def main(): changed = False COMMANDARGS = [] INITCOMMAND = [bin_iptables_save] - INITIALIZER = [bin_iptables_restore] + INITIALIZER = [bin_iptables, '-L', '-n'] TESTCOMMAND = [bin_iptables_restore, '--test'] if counters: @@ -502,7 +499,7 @@ def main(): if _back is not None: b_back = to_bytes(_back, errors='surrogate_or_strict') - garbage = write_state(b_back, initref_state, changed) + dummy = write_state(b_back, initref_state, changed) BACKCOMMAND = list(MAINCOMMAND) BACKCOMMAND.append(_back) @@ -559,9 +556,7 @@ def main(): if os.path.exists(b_starter): os.remove(b_starter) break - else: - time.sleep(0.01) - continue + time.sleep(0.01) (rc, stdout, stderr) = module.run_command(MAINCOMMAND) if 'Another app is currently holding the xtables lock' in stderr: @@ -579,7 +574,7 @@ def main(): (rc, stdout, stderr) = module.run_command(SAVECOMMAND, check_rc=True) restored_state = filter_and_format_state(stdout) - if restored_state != initref_state and restored_state != initial_state: + if restored_state not in (initref_state, initial_state): if module.check_mode: changed = True else: @@ -609,7 +604,7 @@ def main(): # timeout # * task attribute 'poll' equals 0 # - for x in range(_timeout): + for dummy in range(_timeout): if os.path.exists(b_back): time.sleep(1) continue From 31687a524ea22ac313e18af067b413c313f8c714 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 18 May 2021 11:57:59 +0200 Subject: [PATCH 0302/3093] Next planned release is 3.2.0. --- galaxy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/galaxy.yml b/galaxy.yml index a4b4cad7e0..ba1969d712 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,6 +1,6 @@ namespace: community name: general -version: 3.1.0 +version: 3.2.0 readme: README.md authors: - Ansible (https://github.com/ansible) From c4624d3ad8db66a3f7d21656fef8a8f60a907aeb Mon Sep 17 00:00:00 2001 From: Andre Lehmann Date: Tue, 18 May 2021 12:59:11 +0200 Subject: [PATCH 0303/3093] pacman: add 'executable' option to use an alternative pacman binary (#2524) * Add 'bin' option to use an alternative pacman binary * Add changelog entry * Incorporate recommendations * Update plugins/modules/packaging/os/pacman.py * Apply suggestions from code review Co-authored-by: Felix Fontein --- .../fragments/2524-pacman_add_bin_option.yml | 2 ++ plugins/modules/packaging/os/pacman.py | 26 ++++++++++++++++--- 2 files changed, 25 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/2524-pacman_add_bin_option.yml diff --git a/changelogs/fragments/2524-pacman_add_bin_option.yml b/changelogs/fragments/2524-pacman_add_bin_option.yml new file mode 100644 index 0000000000..1a7c78f7ec --- /dev/null +++ b/changelogs/fragments/2524-pacman_add_bin_option.yml @@ -0,0 +1,2 @@ +minor_changes: + - pacman - add ``executable`` option to use an alternative pacman binary (https://github.com/ansible-collections/community.general/issues/2524). diff --git a/plugins/modules/packaging/os/pacman.py b/plugins/modules/packaging/os/pacman.py index b19528ba9e..859c90a6c4 100644 --- a/plugins/modules/packaging/os/pacman.py +++ b/plugins/modules/packaging/os/pacman.py @@ -44,6 +44,14 @@ options: default: no type: bool + executable: + description: + - Name of binary to use. This can either be C(pacman) or a pacman compatible AUR helper. + - Beware that AUR helpers might behave unexpectedly and are therefore not recommended. + default: pacman + type: str + version_added: 3.1.0 + extra_args: description: - Additional option to pass to pacman when enforcing C(state). @@ -79,8 +87,10 @@ options: type: str notes: - - When used with a `loop:` each package will be processed individually, - it is much more efficient to pass the list directly to the `name` option. + - When used with a C(loop:) each package will be processed individually, + it is much more efficient to pass the list directly to the I(name) option. + - To use an AUR helper (I(executable) option), a few extra setup steps might be required beforehand. + For example, a dedicated build user with permissions to install packages could be necessary. ''' RETURN = ''' @@ -109,6 +119,13 @@ EXAMPLES = ''' - ~/bar-1.0-1-any.pkg.tar.xz state: present +- name: Install package from AUR using a Pacman compatible AUR helper + community.general.pacman: + name: foo + state: present + executable: yay + extra_args: --builddir /var/cache/yay + - name: Upgrade package foo community.general.pacman: name: foo @@ -419,6 +436,7 @@ def main(): name=dict(type='list', elements='str', aliases=['pkg', 'package']), state=dict(type='str', default='present', choices=['present', 'installed', 'latest', 'absent', 'removed']), force=dict(type='bool', default=False), + executable=dict(type='str', default='pacman'), extra_args=dict(type='str', default=''), upgrade=dict(type='bool', default=False), upgrade_extra_args=dict(type='str', default=''), @@ -432,11 +450,13 @@ def main(): supports_check_mode=True, ) - pacman_path = module.get_bin_path('pacman', True) module.run_command_environ_update = dict(LC_ALL='C') p = module.params + # find pacman binary + pacman_path = module.get_bin_path(p['executable'], True) + # normalize the state parameter if p['state'] in ['present', 'installed']: p['state'] = 'present' From 452a185a2364b6c404093a1d5c6a6efa0e092c18 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 20 May 2021 05:38:11 +1200 Subject: [PATCH 0304/3093] removed supporting code for testing module "nuage" - no longer exists here (#2559) --- .../prepare_nuage_tests/tasks/main.yml | 24 ------------------- 1 file changed, 24 deletions(-) delete mode 100644 tests/integration/targets/prepare_nuage_tests/tasks/main.yml diff --git a/tests/integration/targets/prepare_nuage_tests/tasks/main.yml b/tests/integration/targets/prepare_nuage_tests/tasks/main.yml deleted file mode 100644 index 2a902dc828..0000000000 --- a/tests/integration/targets/prepare_nuage_tests/tasks/main.yml +++ /dev/null @@ -1,24 +0,0 @@ -#################################################################### -# WARNING: These are designed specifically for Ansible tests # -# and should not be used as examples of how to write Ansible roles # -#################################################################### - -- block: - - name: Install Nuage VSD API Simulator - pip: - name: nuage-vsd-sim - - - name: Start Nuage VSD API Simulator - shell: "(cd /; nuage-vsd-sim >/dev/null 2>&1)" - async: 1800 - poll: 0 - - - name: Wait for API to be ready - uri: - url: http://localhost:5000 - register: api - delay: 3 - retries: 10 - until: api.status == 200 - - when: "ansible_python_version is version('2.7', '>=')" From 1403f5edccd34027b25dfda9fa61309e16b0f3d2 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Fri, 21 May 2021 05:43:16 +1200 Subject: [PATCH 0305/3093] ModuleHelper: CmdMixin custom function for processing cmd results (#2564) * MH: custom function for processing cmd results * added changelog fragment * removed case of process_output being a str --- changelogs/fragments/2564-mh-cmd-process-output.yml | 2 ++ plugins/module_utils/mh/mixins/cmd.py | 9 +++++++-- 2 files changed, 9 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/2564-mh-cmd-process-output.yml diff --git a/changelogs/fragments/2564-mh-cmd-process-output.yml b/changelogs/fragments/2564-mh-cmd-process-output.yml new file mode 100644 index 0000000000..717c0d7fbb --- /dev/null +++ b/changelogs/fragments/2564-mh-cmd-process-output.yml @@ -0,0 +1,2 @@ +minor_changes: + - module_helper module utils - method ``CmdMixin.run_command()`` now accepts ``process_output`` specifying a function to process the outcome of the underlying ``module.run_command()`` (https://github.com/ansible-collections/community.general/pull/2564). diff --git a/plugins/module_utils/mh/mixins/cmd.py b/plugins/module_utils/mh/mixins/cmd.py index eb7cc698cc..724708868e 100644 --- a/plugins/module_utils/mh/mixins/cmd.py +++ b/plugins/module_utils/mh/mixins/cmd.py @@ -152,7 +152,7 @@ class CmdMixin(object): def process_command_output(self, rc, out, err): return rc, out, err - def run_command(self, extra_params=None, params=None, *args, **kwargs): + def run_command(self, extra_params=None, params=None, process_output=None, *args, **kwargs): self.vars.cmd_args = self._calculate_args(extra_params, params) options = dict(self.run_command_fixed_options) env_update = dict(options.get('environ_update', {})) @@ -164,4 +164,9 @@ class CmdMixin(object): options.update(kwargs) rc, out, err = self.module.run_command(self.vars.cmd_args, *args, **options) self.update_output(rc=rc, stdout=out, stderr=err) - return self.process_command_output(rc, out, err) + if process_output is None: + _process = self.process_command_output + else: + _process = process_output + + return _process(rc, out, err) From 7a169af0534d21142e2ddd5b89a882aedd2b6256 Mon Sep 17 00:00:00 2001 From: momcilo78 Date: Thu, 20 May 2021 22:06:00 +0200 Subject: [PATCH 0306/3093] Add comment_visibility parameter for comment operation for jira module (#2556) * Add comment_visibility parameter for comment operation for jira module Co-authored-by: felixfontein * Update plugins/modules/web_infrastructure/jira.py Co-authored-by: Felix Fontein * Update plugins/modules/web_infrastructure/jira.py Co-authored-by: Felix Fontein * addressed pep8 E711 * Added missing parameter. * params is not in use anymore. * It appears other modules are using options, where in documentation they use suboptions. Inconsistancy? * adjusted indentation * tweaked suboptions, fixed documentation * Added fragment * Update changelogs/fragments/2556-add-comment_visibility-parameter-for-comment-operation-of-jira-module.yml Co-authored-by: Felix Fontein * Update plugins/modules/web_infrastructure/jira.py Co-authored-by: Felix Fontein Co-authored-by: felixfontein --- ...r-for-comment-operation-of-jira-module.yml | 2 + plugins/modules/web_infrastructure/jira.py | 39 +++++++++++++++++++ 2 files changed, 41 insertions(+) create mode 100644 changelogs/fragments/2556-add-comment_visibility-parameter-for-comment-operation-of-jira-module.yml diff --git a/changelogs/fragments/2556-add-comment_visibility-parameter-for-comment-operation-of-jira-module.yml b/changelogs/fragments/2556-add-comment_visibility-parameter-for-comment-operation-of-jira-module.yml new file mode 100644 index 0000000000..e31fad744a --- /dev/null +++ b/changelogs/fragments/2556-add-comment_visibility-parameter-for-comment-operation-of-jira-module.yml @@ -0,0 +1,2 @@ +minor_changes: + - jira - add comment visibility parameter for comment operation (https://github.com/ansible-collections/community.general/pull/2556). diff --git a/plugins/modules/web_infrastructure/jira.py b/plugins/modules/web_infrastructure/jira.py index 6acf0c7f51..4c10974126 100644 --- a/plugins/modules/web_infrastructure/jira.py +++ b/plugins/modules/web_infrastructure/jira.py @@ -86,6 +86,25 @@ options: - The comment text to add. - Note that JIRA may not allow changing field values on specific transitions or states. + comment_visibility: + type: dict + description: + - Used to specify comment comment visibility. + - See U(https://developer.atlassian.com/cloud/jira/platform/rest/v2/api-group-issue-comments/#api-rest-api-2-issue-issueidorkey-comment-post) for details. + suboptions: + type: + description: + - Use type to specify which of the JIRA visibility restriction types will be used. + type: str + required: true + choices: [group, role] + value: + description: + - Use value to specify value corresponding to the type of visibility restriction. For example name of the group or role. + type: str + required: true + version_added: '3.2.0' + status: type: str required: false @@ -223,6 +242,18 @@ EXAMPLES = r""" operation: comment comment: A comment added by Ansible +- name: Comment on issue with restricted visibility + community.general.jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: '{{ issue.meta.key }}' + operation: comment + comment: A comment added by Ansible + comment_visibility: + type: role + value: Developers + # Assign an existing issue using edit - name: Assign an issue using free-form fields community.general.jira: @@ -385,6 +416,10 @@ class JIRA(StateModuleHelper): issuetype=dict(type='str', ), issue=dict(type='str', aliases=['ticket']), comment=dict(type='str', ), + comment_visibility=dict(type='dict', options=dict( + type=dict(type='str', choices=['group', 'role'], required=True), + value=dict(type='str', required=True) + )), status=dict(type='str', ), assignee=dict(type='str', ), fields=dict(default={}, type='dict'), @@ -445,6 +480,10 @@ class JIRA(StateModuleHelper): data = { 'body': self.vars.comment } + # if comment_visibility is specified restrict visibility + if self.vars.comment_visibility is not None: + data['visibility'] = self.vars.comment_visibility + url = self.vars.restbase + '/issue/' + self.vars.issue + '/comment' self.vars.meta = self.post(url, data) From 852e2405256b661a1a306a3e7656a60e7fba6803 Mon Sep 17 00:00:00 2001 From: Abhijeet Kasurde Date: Fri, 21 May 2021 22:15:22 +0530 Subject: [PATCH 0307/3093] Add missing author name (#2570) Signed-off-by: Abhijeet Kasurde --- plugins/inventory/stackpath_compute.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plugins/inventory/stackpath_compute.py b/plugins/inventory/stackpath_compute.py index fb879e869e..8e6b5bf953 100644 --- a/plugins/inventory/stackpath_compute.py +++ b/plugins/inventory/stackpath_compute.py @@ -10,6 +10,8 @@ DOCUMENTATION = ''' name: stackpath_compute short_description: StackPath Edge Computing inventory source version_added: 1.2.0 + author: + - UNKNOWN (@shayrybak) extends_documentation_fragment: - inventory_cache - constructed From 8f083d5d85ddf4f98aee8221bf4cb3c4a721e7d6 Mon Sep 17 00:00:00 2001 From: absynth76 <58172580+absynth76@users.noreply.github.com> Date: Sat, 22 May 2021 13:33:27 +0200 Subject: [PATCH 0308/3093] java_cert - fix incorrect certificate alias on pkcs12 import (#2560) * fix wrong certificate alias used when importing pkcs12, modify error output, stdout is more relevant than stderr * add changelog fragment * fix changelog fragment --- .../2560-java_cert-pkcs12-alias-bugfix.yml | 2 + plugins/modules/system/java_cert.py | 4 +- .../targets/java_cert/tasks/state_change.yml | 138 ++++++++++++------ 3 files changed, 98 insertions(+), 46 deletions(-) create mode 100644 changelogs/fragments/2560-java_cert-pkcs12-alias-bugfix.yml diff --git a/changelogs/fragments/2560-java_cert-pkcs12-alias-bugfix.yml b/changelogs/fragments/2560-java_cert-pkcs12-alias-bugfix.yml new file mode 100644 index 0000000000..471962d74f --- /dev/null +++ b/changelogs/fragments/2560-java_cert-pkcs12-alias-bugfix.yml @@ -0,0 +1,2 @@ +bugfixes: + - "java_cert - fix issue with incorrect alias used on PKCS#12 certificate import (https://github.com/ansible-collections/community.general/pull/2560)." diff --git a/plugins/modules/system/java_cert.py b/plugins/modules/system/java_cert.py index ad56358034..1c507f9277 100644 --- a/plugins/modules/system/java_cert.py +++ b/plugins/modules/system/java_cert.py @@ -278,7 +278,7 @@ def _export_public_cert_from_pkcs12(module, executable, pkcs_file, alias, passwo (export_rc, export_stdout, export_err) = module.run_command(export_cmd, data=password, check_rc=False) if export_rc != 0: - module.fail_json(msg="Internal module failure, cannot extract public certificate from pkcs12, error: %s" % export_err, + module.fail_json(msg="Internal module failure, cannot extract public certificate from pkcs12, error: %s" % export_stdout, rc=export_rc) with open(dest, 'w') as f: @@ -498,7 +498,7 @@ def main(): if pkcs12_path: # Extracting certificate with openssl - _export_public_cert_from_pkcs12(module, executable, pkcs12_path, cert_alias, pkcs12_pass, new_certificate) + _export_public_cert_from_pkcs12(module, executable, pkcs12_path, pkcs12_alias, pkcs12_pass, new_certificate) elif path: # Extracting the X509 digest is a bit easier. Keytool will print the PEM diff --git a/tests/integration/targets/java_cert/tasks/state_change.yml b/tests/integration/targets/java_cert/tasks/state_change.yml index 3c37fc6727..8cee41106f 100644 --- a/tests/integration/targets/java_cert/tasks/state_change.yml +++ b/tests/integration/targets/java_cert/tasks/state_change.yml @@ -4,52 +4,11 @@ args: creates: "{{ test_key_path }}" -- name: Create the test keystore - java_keystore: - name: placeholder - dest: "{{ test_keystore2_path }}" - password: "{{ test_keystore2_password }}" - private_key: "{{ lookup('file', '{{ test_key_path }}') }}" - certificate: "{{ lookup('file', '{{ test_cert_path }}') }}" - - name: Generate the self signed cert we will use for testing command: openssl req -x509 -newkey rsa:4096 -keyout '{{ test_key2_path }}' -out '{{ test_cert2_path }}' -days 365 -nodes -subj '/CN=localhost' args: creates: "{{ test_key2_path }}" -- name: | - Import the newly created certificate. This is our main test. - If the java_cert has been updated properly, then this task will report changed each time - since the module will be comparing the hash of the certificate instead of validating that the alias - simply exists - java_cert: - cert_alias: test_cert - cert_path: "{{ test_cert2_path }}" - keystore_path: "{{ test_keystore2_path }}" - keystore_pass: "{{ test_keystore2_password }}" - state: present - register: result_x509_changed - -- name: Verify the x509 status has changed - assert: - that: - - result_x509_changed is changed - -- name: | - We also want to make sure that the status doesnt change if we import the same cert - java_cert: - cert_alias: test_cert - cert_path: "{{ test_cert2_path }}" - keystore_path: "{{ test_keystore2_path }}" - keystore_pass: "{{ test_keystore2_password }}" - state: present - register: result_x509_succeeded - -- name: Verify the x509 status is ok - assert: - that: - - result_x509_succeeded is succeeded - - name: Create the pkcs12 archive from the test x509 cert command: > openssl pkcs12 @@ -70,6 +29,97 @@ -out {{ test_pkcs2_path }} -passout pass:"{{ test_keystore2_password }}" +- name: try to create the test keystore based on the just created pkcs12, keystore_create flag not enabled + java_cert: + cert_alias: test_pkcs12_cert + pkcs12_alias: test_pkcs12_cert + pkcs12_path: "{{ test_pkcs_path }}" + pkcs12_password: "{{ test_keystore2_password }}" + keystore_path: "{{ test_keystore2_path }}" + keystore_pass: "{{ test_keystore2_password }}" + ignore_errors: true + register: result_x509_changed + +- name: Verify the x509 status is failed + assert: + that: + - result_x509_changed is failed + +- name: Create the test keystore based on the just created pkcs12 + java_cert: + cert_alias: test_pkcs12_cert + pkcs12_alias: test_pkcs12_cert + pkcs12_path: "{{ test_pkcs_path }}" + pkcs12_password: "{{ test_keystore2_password }}" + keystore_path: "{{ test_keystore2_path }}" + keystore_pass: "{{ test_keystore2_password }}" + keystore_create: yes + +- name: try to import from pkcs12 a non existing alias + java_cert: + cert_alias: test_pkcs12_cert + pkcs12_alias: non_existing_alias + pkcs12_path: "{{ test_pkcs_path }}" + pkcs12_password: "{{ test_keystore2_password }}" + keystore_path: "{{ test_keystore2_path }}" + keystore_pass: "{{ test_keystore2_password }}" + keystore_create: yes + ignore_errors: yes + register: result_x509_changed + +- name: Verify the x509 status is failed + assert: + that: + - result_x509_changed is failed + +- name: import initial test certificate from file path + java_cert: + cert_alias: test_cert + cert_path: "{{ test_cert_path }}" + keystore_path: "{{ test_keystore2_path }}" + keystore_pass: "{{ test_keystore2_password }}" + keystore_create: yes + state: present + register: result_x509_changed + +- name: Verify the x509 status is changed + assert: + that: + - result_x509_changed is changed + +- name: | + Import the newly created certificate. This is our main test. + If the java_cert has been updated properly, then this task will report changed each time + since the module will be comparing the hash of the certificate instead of validating that the alias + simply exists + java_cert: + cert_alias: test_cert + cert_path: "{{ test_cert2_path }}" + keystore_path: "{{ test_keystore2_path }}" + keystore_pass: "{{ test_keystore2_password }}" + state: present + register: result_x509_changed + +- name: Verify the x509 status is changed + assert: + that: + - result_x509_changed is changed + +- name: | + We also want to make sure that the status doesnt change if we import the same cert + java_cert: + cert_alias: test_cert + cert_path: "{{ test_cert2_path }}" + keystore_path: "{{ test_keystore2_path }}" + keystore_pass: "{{ test_keystore2_password }}" + state: present + register: result_x509_succeeded + +- name: Verify the x509 status is ok + assert: + that: + - result_x509_succeeded is succeeded + - name: > Ensure the original pkcs12 cert is in the keystore java_cert: @@ -83,7 +133,7 @@ - name: | Perform the same test, but we will now be testing the pkcs12 functionality - If we add a different pkcs12 cert with the same alias, we should have a chnaged result, NOT the same + If we add a different pkcs12 cert with the same alias, we should have a changed result, NOT the same java_cert: cert_alias: test_pkcs12_cert pkcs12_alias: test_pkcs12_cert @@ -94,7 +144,7 @@ state: present register: result_pkcs12_changed -- name: Verify the pkcs12 status has changed +- name: Verify the pkcs12 status is changed assert: that: - result_pkcs12_changed is changed @@ -155,7 +205,7 @@ that: - result_x509_absent is changed -- name: Ensure we can remove the pkcs12 archive +- name: Ensure we can remove the certificate imported from pkcs12 archive java_cert: cert_alias: test_pkcs12_cert keystore_path: "{{ test_keystore2_path }}" From 3100c32a00d6a350274884aba06afe51a71d5253 Mon Sep 17 00:00:00 2001 From: abikouo <79859644+abikouo@users.noreply.github.com> Date: Sat, 22 May 2021 13:34:19 +0200 Subject: [PATCH 0309/3093] ovir4 inventory script (#2461) * update configparser * changelog * handle multiple python version * Update changelogs/fragments/2461-ovirt4-fix-configparser.yml Co-authored-by: Felix Fontein * Update ovirt4.py Co-authored-by: Felix Fontein --- .../2461-ovirt4-fix-configparser.yml | 3 +++ scripts/inventory/ovirt4.py | 27 +++++++++++++------ 2 files changed, 22 insertions(+), 8 deletions(-) create mode 100644 changelogs/fragments/2461-ovirt4-fix-configparser.yml diff --git a/changelogs/fragments/2461-ovirt4-fix-configparser.yml b/changelogs/fragments/2461-ovirt4-fix-configparser.yml new file mode 100644 index 0000000000..6e3845b21a --- /dev/null +++ b/changelogs/fragments/2461-ovirt4-fix-configparser.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - ovir4 inventory script - improve configparser creation to avoid crashes for options without values (https://github.com/ansible-collections/community.general/issues/674). diff --git a/scripts/inventory/ovirt4.py b/scripts/inventory/ovirt4.py index afff18dbdb..84b68a1258 100755 --- a/scripts/inventory/ovirt4.py +++ b/scripts/inventory/ovirt4.py @@ -56,6 +56,7 @@ import sys from collections import defaultdict from ansible.module_utils.six.moves import configparser +from ansible.module_utils.six import PY2 import json @@ -106,14 +107,24 @@ def create_connection(): config_path = os.environ.get('OVIRT_INI_PATH', default_path) # Create parser and add ovirt section if it doesn't exist: - config = configparser.SafeConfigParser( - defaults={ - 'ovirt_url': os.environ.get('OVIRT_URL'), - 'ovirt_username': os.environ.get('OVIRT_USERNAME'), - 'ovirt_password': os.environ.get('OVIRT_PASSWORD'), - 'ovirt_ca_file': os.environ.get('OVIRT_CAFILE', ''), - } - ) + if PY2: + config = configparser.SafeConfigParser( + defaults={ + 'ovirt_url': os.environ.get('OVIRT_URL'), + 'ovirt_username': os.environ.get('OVIRT_USERNAME'), + 'ovirt_password': os.environ.get('OVIRT_PASSWORD'), + 'ovirt_ca_file': os.environ.get('OVIRT_CAFILE', ''), + }, allow_no_value=True + ) + else: + config = configparser.ConfigParser( + defaults={ + 'ovirt_url': os.environ.get('OVIRT_URL'), + 'ovirt_username': os.environ.get('OVIRT_USERNAME'), + 'ovirt_password': os.environ.get('OVIRT_PASSWORD'), + 'ovirt_ca_file': os.environ.get('OVIRT_CAFILE', ''), + }, allow_no_value=True + ) if not config.has_section('ovirt'): config.add_section('ovirt') config.read(config_path) From d7e55db99b331be30301b9d1f027be63504007be Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sat, 22 May 2021 23:51:36 +1200 Subject: [PATCH 0310/3093] Massive adjustment in integration tests for changed and failed (#2577) * Replaced ".changed ==" with "is [not] changed". Same for failed * Mr Quote refused to go --- .../targets/archive/tasks/main.yml | 16 ++++++------- .../targets/flatpak/tasks/check_mode.yml | 24 +++++++++---------- .../targets/flatpak/tasks/main.yml | 4 ++-- .../targets/flatpak/tasks/test.yml | 16 ++++++------- .../flatpak_remote/tasks/check_mode.yml | 12 +++++----- .../targets/flatpak_remote/tasks/main.yml | 4 ++-- .../targets/flatpak_remote/tasks/test.yml | 10 ++++---- .../git_config/tasks/get_set_no_state.yml | 4 ++-- .../tasks/get_set_state_present.yml | 4 ++-- .../precedence_between_unset_and_value.yml | 2 +- .../git_config/tasks/unset_check_mode.yml | 2 +- .../git_config/tasks/unset_no_value.yml | 2 +- .../targets/git_config/tasks/unset_value.yml | 2 +- .../targets/github_issue/tasks/main.yml | 10 ++++---- .../targets/hwc_ecs_instance/tasks/main.yml | 8 +++---- .../targets/hwc_evs_disk/tasks/main.yml | 10 ++++---- .../targets/hwc_network_vpc/tasks/main.yml | 8 +++---- .../targets/hwc_smn_topic/tasks/main.yml | 8 +++---- .../targets/hwc_vpc_eip/tasks/main.yml | 8 +++---- .../hwc_vpc_peering_connect/tasks/main.yml | 8 +++---- .../targets/hwc_vpc_port/tasks/main.yml | 8 +++---- .../targets/hwc_vpc_private_ip/tasks/main.yml | 8 +++---- .../targets/hwc_vpc_route/tasks/main.yml | 8 +++---- .../hwc_vpc_security_group/tasks/main.yml | 8 +++---- .../tasks/main.yml | 8 +++---- .../targets/hwc_vpc_subnet/tasks/main.yml | 8 +++---- .../targets/influxdb_user/tasks/tests.yml | 22 ++++++++--------- .../targets/ipify_facts/tasks/main.yml | 6 ++--- .../targets/iso_create/tasks/main.yml | 16 ++++++------- .../targets/iso_extract/tasks/tests.yml | 6 ++--- .../targets/one_host/tasks/main.yml | 2 +- .../test_buildah_synchronize/tasks/main.yml | 4 ++-- .../test-add-children-elements-unicode.yml | 4 ++-- .../xml/tasks/test-add-children-elements.yml | 4 ++-- .../test-add-children-from-groupvars.yml | 4 ++-- .../tasks/test-add-children-insertafter.yml | 4 ++-- .../tasks/test-add-children-insertbefore.yml | 4 ++-- ...t-add-children-with-attributes-unicode.yml | 4 ++-- .../test-add-children-with-attributes.yml | 4 ++-- .../xml/tasks/test-add-element-implicitly.yml | 2 +- .../test-add-namespaced-children-elements.yml | 6 ++--- .../xml/tasks/test-children-elements-xml.yml | 4 ++-- .../targets/xml/tasks/test-count-unicode.yml | 2 +- .../targets/xml/tasks/test-count.yml | 2 +- .../test-get-element-content-unicode.yml | 4 ++-- .../xml/tasks/test-get-element-content.yml | 4 ++-- .../test-mutually-exclusive-attributes.yml | 4 ++-- .../xml/tasks/test-pretty-print-only.yml | 4 ++-- .../targets/xml/tasks/test-pretty-print.yml | 4 ++-- .../tasks/test-remove-attribute-nochange.yml | 4 ++-- .../xml/tasks/test-remove-attribute.yml | 4 ++-- .../tasks/test-remove-element-nochange.yml | 4 ++-- .../targets/xml/tasks/test-remove-element.yml | 4 ++-- ...t-remove-namespaced-attribute-nochange.yml | 4 ++-- .../test-remove-namespaced-attribute.yml | 4 ++-- ...est-remove-namespaced-element-nochange.yml | 4 ++-- .../tasks/test-remove-namespaced-element.yml | 4 ++-- .../test-set-attribute-value-unicode.yml | 4 ++-- .../xml/tasks/test-set-attribute-value.yml | 4 ++-- .../test-set-children-elements-level.yml | 8 +++---- .../test-set-children-elements-unicode.yml | 8 +++---- .../xml/tasks/test-set-children-elements.yml | 8 +++---- .../tasks/test-set-element-value-empty.yml | 4 ++-- .../tasks/test-set-element-value-unicode.yml | 6 ++--- .../xml/tasks/test-set-element-value.yml | 6 ++--- .../test-set-namespaced-attribute-value.yml | 4 ++-- .../test-set-namespaced-children-elements.yml | 6 ++--- .../test-set-namespaced-element-value.yml | 6 ++--- .../targets/xml/tasks/test-xmlstring.yml | 14 +++++------ 69 files changed, 220 insertions(+), 220 deletions(-) diff --git a/tests/integration/targets/archive/tasks/main.yml b/tests/integration/targets/archive/tasks/main.yml index 19a1f6af0c..2267268715 100644 --- a/tests/integration/targets/archive/tasks/main.yml +++ b/tests/integration/targets/archive/tasks/main.yml @@ -174,7 +174,7 @@ - name: Test that the file modes were changed assert: that: - - "archive_02_gz_stat.changed == False " + - archive_02_gz_stat is not changed - "archive_02_gz_stat.stat.mode == '0600'" - "'archived' in archive_bz2_result_02" - "{{ archive_bz2_result_02['archived']| length}} == 3" @@ -199,7 +199,7 @@ - name: Test that the file modes were changed assert: that: - - "archive_02_zip_stat.changed == False" + - archive_02_zip_stat is not changed - "archive_02_zip_stat.stat.mode == '0600'" - "'archived' in archive_zip_result_02" - "{{ archive_zip_result_02['archived']| length}} == 3" @@ -224,7 +224,7 @@ - name: Test that the file modes were changed assert: that: - - "archive_02_bz2_stat.changed == False" + - archive_02_bz2_stat is not changed - "archive_02_bz2_stat.stat.mode == '0600'" - "'archived' in archive_bz2_result_02" - "{{ archive_bz2_result_02['archived']| length}} == 3" @@ -248,7 +248,7 @@ - name: Test that the file modes were changed assert: that: - - "archive_02_xz_stat.changed == False" + - archive_02_xz_stat is not changed - "archive_02_xz_stat.stat.mode == '0600'" - "'archived' in archive_xz_result_02" - "{{ archive_xz_result_02['archived']| length}} == 3" @@ -294,7 +294,7 @@ - name: Assert that nonascii tests succeeded assert: that: - - "nonascii_result_0.changed == true" + - nonascii_result_0 is changed - "nonascii_stat0.stat.exists == true" - name: remove nonascii test @@ -315,7 +315,7 @@ - name: Assert that nonascii tests succeeded assert: that: - - "nonascii_result_1.changed == true" + - nonascii_result_1 is changed - "nonascii_stat_1.stat.exists == true" - name: remove nonascii test @@ -336,7 +336,7 @@ - name: Assert that nonascii tests succeeded assert: that: - - "nonascii_result_1.changed == true" + - nonascii_result_1 is changed - "nonascii_stat_1.stat.exists == true" - name: remove nonascii test @@ -357,7 +357,7 @@ - name: Assert that nonascii tests succeeded assert: that: - - "nonascii_result_2.changed == true" + - nonascii_result_2 is changed - "nonascii_stat_2.stat.exists == true" - name: remove nonascii test diff --git a/tests/integration/targets/flatpak/tasks/check_mode.yml b/tests/integration/targets/flatpak/tasks/check_mode.yml index 065f10dfa7..3186fd2830 100644 --- a/tests/integration/targets/flatpak/tasks/check_mode.yml +++ b/tests/integration/targets/flatpak/tasks/check_mode.yml @@ -13,7 +13,7 @@ - name: Verify addition of absent flatpak test result (check mode) assert: that: - - "addition_result.changed == true" + - addition_result is changed msg: "Adding an absent flatpak shall mark module execution as changed" - name: Test non-existent idempotency of addition of absent flatpak (check mode) @@ -27,7 +27,7 @@ - name: Verify non-existent idempotency of addition of absent flatpak test result (check mode) assert: that: - - "double_addition_result.changed == true" + - double_addition_result is changed msg: | Adding an absent flatpak a second time shall still mark module execution as changed in check mode @@ -44,7 +44,7 @@ - name: Verify removal of absent flatpak test result (check mode) assert: that: - - "removal_result.changed == false" + - removal_result is not changed msg: "Removing an absent flatpak shall mark module execution as not changed" # state=present with url on absent flatpak @@ -60,7 +60,7 @@ - name: Verify addition of absent flatpak with url test result (check mode) assert: that: - - "url_addition_result.changed == true" + - url_addition_result is changed msg: "Adding an absent flatpak from URL shall mark module execution as changed" - name: Test non-existent idempotency of addition of absent flatpak with url (check mode) @@ -76,7 +76,7 @@ result (check mode) assert: that: - - "double_url_addition_result.changed == true" + - double_url_addition_result is changed msg: | Adding an absent flatpak from URL a second time shall still mark module execution as changed in check mode @@ -93,7 +93,7 @@ - name: Verify removal of absent flatpak with url test result (check mode) assert: that: - - "url_removal_result.changed == false" + - url_removal_result is not changed msg: "Removing an absent flatpak shall mark module execution as not changed" @@ -112,7 +112,7 @@ - name: Verify addition test result of present flatpak (check mode) assert: that: - - "addition_present_result.changed == false" + - addition_present_result is not changed msg: "Adding an present flatpak shall mark module execution as not changed" # state=absent on present flatpak @@ -127,7 +127,7 @@ - name: Verify removal of present flatpak test result (check mode) assert: that: - - "removal_present_result.changed == true" + - removal_present_result is changed msg: "Removing a present flatpak shall mark module execution as changed" - name: Test non-existent idempotency of removal (check mode) @@ -140,7 +140,7 @@ - name: Verify non-existent idempotency of removal (check mode) assert: that: - - "double_removal_present_result.changed == true" + - double_removal_present_result is changed msg: | Removing a present flatpak a second time shall still mark module execution as changed in check mode @@ -158,7 +158,7 @@ - name: Verify addition with url of present flatpak test result (check mode) assert: that: - - "url_addition_present_result.changed == false" + - url_addition_present_result is not changed msg: "Adding a present flatpak from URL shall mark module execution as not changed" # state=absent with url on present flatpak @@ -173,7 +173,7 @@ - name: Verify removal with url of present flatpak test result (check mode) assert: that: - - "url_removal_present_result.changed == true" + - url_removal_present_result is changed msg: "Removing an absent flatpak shall mark module execution as not changed" - name: Test non-existent idempotency of removal with url of present flatpak (check mode) @@ -189,5 +189,5 @@ flatpak test result (check mode) assert: that: - - "double_url_removal_present_result.changed == true" + - double_url_removal_present_result is changed msg: Removing an absent flatpak a second time shall still mark module execution as changed diff --git a/tests/integration/targets/flatpak/tasks/main.yml b/tests/integration/targets/flatpak/tasks/main.yml index 45f9ecd501..a1d1bda8a4 100644 --- a/tests/integration/targets/flatpak/tasks/main.yml +++ b/tests/integration/targets/flatpak/tasks/main.yml @@ -40,8 +40,8 @@ - name: Verify executable override test result assert: that: - - "executable_override_result.failed == true" - - "executable_override_result.changed == false" + - executable_override_result is failed + - executable_override_result is not changed msg: "Specifying non-existing executable shall fail module execution" - import_tasks: check_mode.yml diff --git a/tests/integration/targets/flatpak/tasks/test.yml b/tests/integration/targets/flatpak/tasks/test.yml index 20d864a84d..1e7d888bb5 100644 --- a/tests/integration/targets/flatpak/tasks/test.yml +++ b/tests/integration/targets/flatpak/tasks/test.yml @@ -11,7 +11,7 @@ - name: Verify addition test result - {{ method }} assert: that: - - "addition_result.changed == true" + - addition_result is changed msg: "state=present shall add flatpak when absent" - name: Test idempotency of addition - {{ method }} @@ -25,7 +25,7 @@ - name: Verify idempotency of addition test result - {{ method }} assert: that: - - "double_addition_result.changed == false" + - double_addition_result is not changed msg: "state=present shall not do anything when flatpak is already present" # state=absent @@ -40,7 +40,7 @@ - name: Verify removal test result - {{ method }} assert: that: - - "removal_result.changed == true" + - removal_result is changed msg: "state=absent shall remove flatpak when present" - name: Test idempotency of removal - {{ method }} @@ -53,7 +53,7 @@ - name: Verify idempotency of removal test result - {{ method }} assert: that: - - "double_removal_result.changed == false" + - double_removal_result is not changed msg: "state=absent shall not do anything when flatpak is not present" # state=present with url as name @@ -69,7 +69,7 @@ - name: Verify addition test result - {{ method }} assert: that: - - "url_addition_result.changed == true" + - url_addition_result is changed msg: "state=present with url as name shall add flatpak when absent" - name: Test idempotency of addition with url - {{ method }} @@ -83,7 +83,7 @@ - name: Verify idempotency of addition with url test result - {{ method }} assert: that: - - "double_url_addition_result.changed == false" + - double_url_addition_result is not changed msg: "state=present with url as name shall not do anything when flatpak is already present" # state=absent with url as name @@ -98,7 +98,7 @@ - name: Verify removal test result - {{ method }} assert: that: - - "url_removal_result.changed == true" + - url_removal_result is changed msg: "state=absent with url as name shall remove flatpak when present" - name: Test idempotency of removal with url - {{ method }} @@ -111,5 +111,5 @@ - name: Verify idempotency of removal with url test result - {{ method }} assert: that: - - "double_url_removal_result.changed == false" + - double_url_removal_result is not changed msg: "state=absent with url as name shall not do anything when flatpak is not present" diff --git a/tests/integration/targets/flatpak_remote/tasks/check_mode.yml b/tests/integration/targets/flatpak_remote/tasks/check_mode.yml index 7ce89a8c15..1f4def86d9 100644 --- a/tests/integration/targets/flatpak_remote/tasks/check_mode.yml +++ b/tests/integration/targets/flatpak_remote/tasks/check_mode.yml @@ -13,7 +13,7 @@ - name: Verify addition of absent flatpak remote test result (check mode) assert: that: - - "addition_result.changed == true" + - addition_result is changed msg: "Adding an absent flatpak remote shall mark module execution as changed" - name: Test non-existent idempotency of addition of absent flatpak remote (check mode) @@ -29,7 +29,7 @@ test result (check mode) assert: that: - - "double_addition_result.changed == true" + - double_addition_result is changed msg: | Adding an absent flatpak remote a second time shall still mark module execution as changed in check mode @@ -46,7 +46,7 @@ - name: Verify removal of absent flatpak remote test result (check mode) assert: that: - - "removal_result.changed == false" + - removal_result is not changed msg: "Removing an absent flatpak remote shall mark module execution as not changed" @@ -65,7 +65,7 @@ - name: Verify addition of present flatpak remote test result (check mode) assert: that: - - "addition_result.changed == false" + - addition_result is not changed msg: "Adding a present flatpak remote shall mark module execution as not changed" # state=absent @@ -80,7 +80,7 @@ - name: Verify removal of present flatpak remote test result (check mode) assert: that: - - "removal_result.changed == true" + - removal_result is changed msg: "Removing a present flatpak remote shall mark module execution as changed" - name: Test non-existent idempotency of removal of present flatpak remote (check mode) @@ -95,7 +95,7 @@ test result (check mode) assert: that: - - "double_removal_result.changed == true" + - double_removal_result is changed msg: | Removing a present flatpak remote a second time shall still mark module execution as changed in check mode diff --git a/tests/integration/targets/flatpak_remote/tasks/main.yml b/tests/integration/targets/flatpak_remote/tasks/main.yml index aa2219e181..91fa7262df 100644 --- a/tests/integration/targets/flatpak_remote/tasks/main.yml +++ b/tests/integration/targets/flatpak_remote/tasks/main.yml @@ -40,8 +40,8 @@ - name: Verify executable override test result assert: that: - - "executable_override_result.failed == true" - - "executable_override_result.changed == false" + - executable_override_result is failed + - executable_override_result is not changed msg: "Specifying non-existing executable shall fail module execution" - import_tasks: check_mode.yml diff --git a/tests/integration/targets/flatpak_remote/tasks/test.yml b/tests/integration/targets/flatpak_remote/tasks/test.yml index 9570f623a1..66c43649b4 100644 --- a/tests/integration/targets/flatpak_remote/tasks/test.yml +++ b/tests/integration/targets/flatpak_remote/tasks/test.yml @@ -11,7 +11,7 @@ - name: Verify addition test result - {{ method }} assert: that: - - "addition_result.changed == true" + - addition_result is changed msg: "state=present shall add flatpak when absent" - name: Test idempotency of addition - {{ method }} @@ -25,7 +25,7 @@ - name: Verify idempotency of addition test result - {{ method }} assert: that: - - "double_addition_result.changed == false" + - double_addition_result is not changed msg: "state=present shall not do anything when flatpak is already present" - name: Test updating remote url does not do anything - {{ method }} @@ -39,7 +39,7 @@ - name: Verify updating remote url does not do anything - {{ method }} assert: that: - - "url_update_result.changed == false" + - url_update_result is not changed msg: "Trying to update the URL of an existing flatpak remote shall not do anything" @@ -55,7 +55,7 @@ - name: Verify removal test result - {{ method }} assert: that: - - "removal_result.changed == true" + - removal_result is changed msg: "state=absent shall remove flatpak when present" - name: Test idempotency of removal - {{ method }} @@ -68,5 +68,5 @@ - name: Verify idempotency of removal test result - {{ method }} assert: that: - - "double_removal_result.changed == false" + - double_removal_result is not changed msg: "state=absent shall not do anything when flatpak is not present" diff --git a/tests/integration/targets/git_config/tasks/get_set_no_state.yml b/tests/integration/targets/git_config/tasks/get_set_no_state.yml index 149a9b2d93..7e9714a75e 100644 --- a/tests/integration/targets/git_config/tasks/get_set_no_state.yml +++ b/tests/integration/targets/git_config/tasks/get_set_no_state.yml @@ -17,9 +17,9 @@ - name: assert set changed and value is correct assert: that: - - set_result.changed == true + - set_result is changed - set_result.diff.before == "\n" - set_result.diff.after == option_value + "\n" - - get_result.changed == false + - get_result is not changed - get_result.config_value == option_value ... diff --git a/tests/integration/targets/git_config/tasks/get_set_state_present.yml b/tests/integration/targets/git_config/tasks/get_set_state_present.yml index 59f3c9c0ee..52d986d633 100644 --- a/tests/integration/targets/git_config/tasks/get_set_state_present.yml +++ b/tests/integration/targets/git_config/tasks/get_set_state_present.yml @@ -19,9 +19,9 @@ - name: assert set changed and value is correct with state=present assert: that: - - set_result.changed == true + - set_result is changed - set_result.diff.before == "\n" - set_result.diff.after == option_value + "\n" - - get_result.changed == false + - get_result is not changed - get_result.config_value == option_value ... diff --git a/tests/integration/targets/git_config/tasks/precedence_between_unset_and_value.yml b/tests/integration/targets/git_config/tasks/precedence_between_unset_and_value.yml index 24ef292015..9eb4ca4034 100644 --- a/tests/integration/targets/git_config/tasks/precedence_between_unset_and_value.yml +++ b/tests/integration/targets/git_config/tasks/precedence_between_unset_and_value.yml @@ -18,7 +18,7 @@ - name: assert unset changed and deleted value assert: that: - - unset_result.changed == true + - unset_result is changed - unset_result.diff.before == option_value + "\n" - unset_result.diff.after == "\n" - get_result.config_value == '' diff --git a/tests/integration/targets/git_config/tasks/unset_check_mode.yml b/tests/integration/targets/git_config/tasks/unset_check_mode.yml index c8fe00c0b7..43b9905373 100644 --- a/tests/integration/targets/git_config/tasks/unset_check_mode.yml +++ b/tests/integration/targets/git_config/tasks/unset_check_mode.yml @@ -18,7 +18,7 @@ - name: assert unset changed but dit not delete value assert: that: - - unset_result.changed == true + - unset_result is changed - unset_result.diff.before == option_value + "\n" - unset_result.diff.after == "\n" - get_result.config_value == option_value diff --git a/tests/integration/targets/git_config/tasks/unset_no_value.yml b/tests/integration/targets/git_config/tasks/unset_no_value.yml index 71568e3aa4..5fb6b6bcb6 100644 --- a/tests/integration/targets/git_config/tasks/unset_no_value.yml +++ b/tests/integration/targets/git_config/tasks/unset_no_value.yml @@ -17,7 +17,7 @@ - name: assert unsetting didn't change assert: that: - - unset_result.changed == false + - unset_result is not changed - unset_result.msg == 'no setting to unset' - get_result.config_value == '' ... diff --git a/tests/integration/targets/git_config/tasks/unset_value.yml b/tests/integration/targets/git_config/tasks/unset_value.yml index a2308156aa..6dda37736e 100644 --- a/tests/integration/targets/git_config/tasks/unset_value.yml +++ b/tests/integration/targets/git_config/tasks/unset_value.yml @@ -17,7 +17,7 @@ - name: assert unset changed and deleted value assert: that: - - unset_result.changed == true + - unset_result is changed - unset_result.diff.before == option_value + "\n" - unset_result.diff.after == "\n" - get_result.config_value == '' diff --git a/tests/integration/targets/github_issue/tasks/main.yml b/tests/integration/targets/github_issue/tasks/main.yml index 24266128ae..7731a7a955 100644 --- a/tests/integration/targets/github_issue/tasks/main.yml +++ b/tests/integration/targets/github_issue/tasks/main.yml @@ -18,8 +18,8 @@ - assert: that: - - "{{ get_status_0002.changed == True }}" - - "{{ get_status_0002.issue_status == 'closed' }}" + - get_status_0002 is changed + - get_status_0002.issue_status == 'closed' - name: Check if GitHub issue is closed or not github_issue: @@ -32,6 +32,6 @@ - assert: that: - - "{{ get_status_0003.changed == False }}" - - "{{ get_status_0003.failed == True }}" - - "{{ 'Failed' in get_status_0003.msg }}" + - get_status_0003 is not changed + - get_status_0003 is failed + - "'Failed' in get_status_0003.msg" diff --git a/tests/integration/targets/hwc_ecs_instance/tasks/main.yml b/tests/integration/targets/hwc_ecs_instance/tasks/main.yml index 8c8ea2eb3d..4d36c11286 100644 --- a/tests/integration/targets/hwc_ecs_instance/tasks/main.yml +++ b/tests/integration/targets/hwc_ecs_instance/tasks/main.yml @@ -167,8 +167,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #---------------------------------------------------------- - name: delete a instance (check mode) hwc_ecs_instance: @@ -277,8 +277,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #--------------------------------------------------------- # Post-test teardown - name: delete a disk diff --git a/tests/integration/targets/hwc_evs_disk/tasks/main.yml b/tests/integration/targets/hwc_evs_disk/tasks/main.yml index 79e67d0dc9..e2380450cd 100644 --- a/tests/integration/targets/hwc_evs_disk/tasks/main.yml +++ b/tests/integration/targets/hwc_evs_disk/tasks/main.yml @@ -50,8 +50,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #---------------------------------------------------------- - name: delete a disk (check mode) hwc_evs_disk: @@ -92,7 +92,7 @@ - name: assert changed is false assert: that: - - result.changed == false + - result is not changed # ---------------------------------------------------------------------------- - name: delete a disk that does not exist hwc_evs_disk: @@ -105,5 +105,5 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed diff --git a/tests/integration/targets/hwc_network_vpc/tasks/main.yml b/tests/integration/targets/hwc_network_vpc/tasks/main.yml index 5c01cf7ad8..e3b979d0b5 100644 --- a/tests/integration/targets/hwc_network_vpc/tasks/main.yml +++ b/tests/integration/targets/hwc_network_vpc/tasks/main.yml @@ -62,8 +62,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #---------------------------------------------------------- - name: delete a vpc hwc_network_vpc: @@ -97,5 +97,5 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed diff --git a/tests/integration/targets/hwc_smn_topic/tasks/main.yml b/tests/integration/targets/hwc_smn_topic/tasks/main.yml index 180f8fad3e..a9879aea54 100644 --- a/tests/integration/targets/hwc_smn_topic/tasks/main.yml +++ b/tests/integration/targets/hwc_smn_topic/tasks/main.yml @@ -44,8 +44,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #---------------------------------------------------------- - name: delete a smn topic hwc_smn_topic: @@ -77,5 +77,5 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed diff --git a/tests/integration/targets/hwc_vpc_eip/tasks/main.yml b/tests/integration/targets/hwc_vpc_eip/tasks/main.yml index 57de832418..bdf5d763a7 100644 --- a/tests/integration/targets/hwc_vpc_eip/tasks/main.yml +++ b/tests/integration/targets/hwc_vpc_eip/tasks/main.yml @@ -96,8 +96,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #---------------------------------------------------------- - name: delete a eip (check mode) hwc_vpc_eip: @@ -159,8 +159,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #--------------------------------------------------------- # Post-test teardown - name: delete a port diff --git a/tests/integration/targets/hwc_vpc_peering_connect/tasks/main.yml b/tests/integration/targets/hwc_vpc_peering_connect/tasks/main.yml index 2316a4b25c..cb6a15f750 100644 --- a/tests/integration/targets/hwc_vpc_peering_connect/tasks/main.yml +++ b/tests/integration/targets/hwc_vpc_peering_connect/tasks/main.yml @@ -78,8 +78,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #---------------------------------------------------------- - name: delete a peering connect (check mode) hwc_vpc_peering_connect: @@ -133,8 +133,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #--------------------------------------------------------- # Post-test teardown - name: delete a vpc diff --git a/tests/integration/targets/hwc_vpc_port/tasks/main.yml b/tests/integration/targets/hwc_vpc_port/tasks/main.yml index b7f28360c1..00f5ae8b2e 100644 --- a/tests/integration/targets/hwc_vpc_port/tasks/main.yml +++ b/tests/integration/targets/hwc_vpc_port/tasks/main.yml @@ -69,8 +69,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #---------------------------------------------------------- - name: delete a port (check mode) hwc_vpc_port: @@ -116,8 +116,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #--------------------------------------------------------- # Post-test teardown - name: delete a subnet diff --git a/tests/integration/targets/hwc_vpc_private_ip/tasks/main.yml b/tests/integration/targets/hwc_vpc_private_ip/tasks/main.yml index efd6765c80..5531d575f8 100644 --- a/tests/integration/targets/hwc_vpc_private_ip/tasks/main.yml +++ b/tests/integration/targets/hwc_vpc_private_ip/tasks/main.yml @@ -70,8 +70,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #---------------------------------------------------------- - name: delete a private ip (check mode) hwc_vpc_private_ip: @@ -117,8 +117,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #--------------------------------------------------------- # Post-test teardown - name: delete a subnet diff --git a/tests/integration/targets/hwc_vpc_route/tasks/main.yml b/tests/integration/targets/hwc_vpc_route/tasks/main.yml index b281000b7a..9c9c37e8c0 100644 --- a/tests/integration/targets/hwc_vpc_route/tasks/main.yml +++ b/tests/integration/targets/hwc_vpc_route/tasks/main.yml @@ -81,8 +81,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #---------------------------------------------------------- - name: delete a route (check mode) hwc_vpc_route: @@ -127,8 +127,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #--------------------------------------------------------- # Post-test teardown - name: delete a peering connect diff --git a/tests/integration/targets/hwc_vpc_security_group/tasks/main.yml b/tests/integration/targets/hwc_vpc_security_group/tasks/main.yml index 6b21f8b9a4..9f853ca8e7 100644 --- a/tests/integration/targets/hwc_vpc_security_group/tasks/main.yml +++ b/tests/integration/targets/hwc_vpc_security_group/tasks/main.yml @@ -51,8 +51,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #---------------------------------------------------------- - name: delete a security group (check mode) hwc_vpc_security_group: @@ -83,5 +83,5 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed diff --git a/tests/integration/targets/hwc_vpc_security_group_rule/tasks/main.yml b/tests/integration/targets/hwc_vpc_security_group_rule/tasks/main.yml index 2d774101bf..04213e7162 100644 --- a/tests/integration/targets/hwc_vpc_security_group_rule/tasks/main.yml +++ b/tests/integration/targets/hwc_vpc_security_group_rule/tasks/main.yml @@ -85,8 +85,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #---------------------------------------------------------- - name: delete a security group rule (check mode) hwc_vpc_security_group_rule: @@ -151,8 +151,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #--------------------------------------------------------- # Post-test teardown - name: delete a security group diff --git a/tests/integration/targets/hwc_vpc_subnet/tasks/main.yml b/tests/integration/targets/hwc_vpc_subnet/tasks/main.yml index 3b3cf65478..c16ff85241 100644 --- a/tests/integration/targets/hwc_vpc_subnet/tasks/main.yml +++ b/tests/integration/targets/hwc_vpc_subnet/tasks/main.yml @@ -77,8 +77,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #---------------------------------------------------------- - name: delete a subnet (check mode) hwc_vpc_subnet: @@ -136,8 +136,8 @@ - name: assert changed is false assert: that: - - result.failed == 0 - - result.changed == false + - result is not failed + - result is not changed #--------------------------------------------------------- # Post-test teardown - name: delete a vpc diff --git a/tests/integration/targets/influxdb_user/tasks/tests.yml b/tests/integration/targets/influxdb_user/tasks/tests.yml index b980e29094..ad3396642b 100644 --- a/tests/integration/targets/influxdb_user/tasks/tests.yml +++ b/tests/integration/targets/influxdb_user/tasks/tests.yml @@ -13,7 +13,7 @@ - name: Check that admin user adding succeeds with a change assert: that: - - add_admin_user.changed == true + - add_admin_user is changed - name: Test add admin user block: @@ -24,7 +24,7 @@ - name: Check that admin user adding succeeds with a change assert: that: - - add_admin_user.changed == true + - add_admin_user is changed - name: Test add admin user idempotence block: @@ -35,7 +35,7 @@ - name: Check that admin user adding succeeds without a change assert: that: - - add_admin_user.changed == false + - add_admin_user is not changed - name: Enable authentication and restart service block: @@ -58,7 +58,7 @@ - name: Check that adding user with enabled authentication succeeds with a change assert: that: - - add_user_with_auth_enabled.changed == true + - add_user_with_auth_enabled is changed - name: Test add user when authentication enabled block: @@ -69,7 +69,7 @@ - name: Check that adding user with enabled authentication succeeds with a change assert: that: - - add_user_with_auth_enabled.changed == true + - add_user_with_auth_enabled is changed - name: Test add user when authentication enabled idempotence block: @@ -80,7 +80,7 @@ - name: Check that adding same user succeeds without a change assert: that: - - same_user.changed == false + - same_user is not changed - name: Test change user password in check mode block: @@ -92,7 +92,7 @@ - name: Check that password changing succeeds with a change assert: that: - - change_password.changed == true + - change_password is changed - name: Test change user password block: @@ -103,7 +103,7 @@ - name: Check that password changing succeeds with a change assert: that: - - change_password.changed == true + - change_password is changed - name: Test remove user in check mode block: @@ -115,7 +115,7 @@ - name: Check that removing user succeeds with a change assert: that: - - remove_user.changed == true + - remove_user is changed - name: Test remove user block: @@ -126,7 +126,7 @@ - name: Check that removing user succeeds with a change assert: that: - - remove_user.changed == true + - remove_user is changed - name: Test remove user idempotence block: @@ -137,4 +137,4 @@ - name: Check that removing user succeeds without a change assert: that: - - remove_user.changed == false + - remove_user is not changed diff --git a/tests/integration/targets/ipify_facts/tasks/main.yml b/tests/integration/targets/ipify_facts/tasks/main.yml index 4fbd5ab696..7b620ff9ec 100644 --- a/tests/integration/targets/ipify_facts/tasks/main.yml +++ b/tests/integration/targets/ipify_facts/tasks/main.yml @@ -41,6 +41,6 @@ - name: check if task was successful assert: that: - - "{{ external_ip.changed == false }}" - - "{{ external_ip['ansible_facts'] is defined }}" - - "{{ external_ip['ansible_facts']['ipify_public_ip'] is defined }}" + - external_ip is not changed + - external_ip.ansible_facts is defined + - external_ip.ansible_facts.ipify_public_ip is defined diff --git a/tests/integration/targets/iso_create/tasks/main.yml b/tests/integration/targets/iso_create/tasks/main.yml index de46276743..4a0df3b818 100644 --- a/tests/integration/targets/iso_create/tasks/main.yml +++ b/tests/integration/targets/iso_create/tasks/main.yml @@ -35,7 +35,7 @@ - debug: var=iso_file - assert: that: - - iso_result.changed == True + - iso_result is changed - iso_file.stat.exists == False - name: Create iso file with a specified file @@ -54,7 +54,7 @@ - assert: that: - - iso_result.changed == True + - iso_result is changed - iso_file.stat.exists == True - name: Create iso file with a specified file and folder @@ -74,10 +74,10 @@ - assert: that: - - iso_result.changed == True + - iso_result is changed - iso_file.stat.exists == True -- name: Create iso file with volume identification string +- name: Create iso file with volume identification string iso_create: src_files: - "{{ role_path }}/files/test1.cfg" @@ -93,7 +93,7 @@ - assert: that: - - iso_result.changed == True + - iso_result is changed - iso_file.stat.exists == True - name: Create iso file with Rock Ridge extention @@ -112,7 +112,7 @@ - assert: that: - - iso_result.changed == True + - iso_result is changed - iso_file.stat.exists == True - name: Create iso file with Joliet extention @@ -131,7 +131,7 @@ - assert: that: - - iso_result.changed == True + - iso_result is changed - iso_file.stat.exists == True - name: Create iso file with UDF enabled @@ -150,5 +150,5 @@ - assert: that: - - iso_result.changed == True + - iso_result is changed - iso_file.stat.exists == True diff --git a/tests/integration/targets/iso_extract/tasks/tests.yml b/tests/integration/targets/iso_extract/tasks/tests.yml index f9182ba6ae..18f22422ce 100644 --- a/tests/integration/targets/iso_extract/tasks/tests.yml +++ b/tests/integration/targets/iso_extract/tasks/tests.yml @@ -28,7 +28,7 @@ - assert: that: - - iso_extract_test0 is changed == true + - iso_extract_test0 is changed - name: Extract the iso again iso_extract: @@ -42,11 +42,11 @@ - name: Test iso_extract_test0_again (normal mode) assert: that: - - iso_extract_test0_again is changed == false + - iso_extract_test0_again is not changed when: not in_check_mode - name: Test iso_extract_test0_again (check-mode) assert: that: - - iso_extract_test0_again is changed == true + - iso_extract_test0_again is changed when: in_check_mode diff --git a/tests/integration/targets/one_host/tasks/main.yml b/tests/integration/targets/one_host/tasks/main.yml index a3cea768af..7d38c2a890 100644 --- a/tests/integration/targets/one_host/tasks/main.yml +++ b/tests/integration/targets/one_host/tasks/main.yml @@ -177,7 +177,7 @@ - name: "assert test_{{test_number}} worked" assert: that: - - result.changed == false + - result is not changed # HOST DISABLEMENT diff --git a/tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/tasks/main.yml b/tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/tasks/main.yml index 92fd0830c4..a80e218921 100644 --- a/tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/tasks/main.yml +++ b/tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/tasks/main.yml @@ -40,7 +40,7 @@ - assert: that: - "'changed' in sync_result" - - "sync_result.changed == true" + - sync_result is changed - "'cmd' in sync_result" - "'rsync' in sync_result.cmd" - "'msg' in sync_result" @@ -63,7 +63,7 @@ - assert: that: - - "sync_result.changed == False" + - sync_result is not changed - name: cleanup old files file: diff --git a/tests/integration/targets/xml/tasks/test-add-children-elements-unicode.yml b/tests/integration/targets/xml/tasks/test-add-children-elements-unicode.yml index 8ad91501c3..d89c29ae27 100644 --- a/tests/integration/targets/xml/tasks/test-add-children-elements-unicode.yml +++ b/tests/integration/targets/xml/tasks/test-add-children-elements-unicode.yml @@ -24,6 +24,6 @@ - name: Test expected result assert: that: - - add_children_elements_unicode.changed == true - - comparison.changed == false # identical + - add_children_elements_unicode is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-add-children-elements-unicode.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-add-children-elements.yml b/tests/integration/targets/xml/tasks/test-add-children-elements.yml index 8d9b06866d..3c439c7ac2 100644 --- a/tests/integration/targets/xml/tasks/test-add-children-elements.yml +++ b/tests/integration/targets/xml/tasks/test-add-children-elements.yml @@ -24,6 +24,6 @@ - name: Test expected result assert: that: - - add_children_elements.changed == true - - comparison.changed == false # identical + - add_children_elements is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-add-children-elements.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-add-children-from-groupvars.yml b/tests/integration/targets/xml/tasks/test-add-children-from-groupvars.yml index e062de8d14..818fdf09b9 100644 --- a/tests/integration/targets/xml/tasks/test-add-children-from-groupvars.yml +++ b/tests/integration/targets/xml/tasks/test-add-children-from-groupvars.yml @@ -23,6 +23,6 @@ - name: Test expected result assert: that: - - add_children_from_groupvars.changed == true - - comparison.changed == false # identical + - add_children_from_groupvars is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-add-children-from-groupvars.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-add-children-insertafter.yml b/tests/integration/targets/xml/tasks/test-add-children-insertafter.yml index 2d42e2d54e..479052ebdd 100644 --- a/tests/integration/targets/xml/tasks/test-add-children-insertafter.yml +++ b/tests/integration/targets/xml/tasks/test-add-children-insertafter.yml @@ -28,5 +28,5 @@ - name: Test expected result assert: that: - - add_children_insertafter.changed == true - - comparison.changed == false # identical + - add_children_insertafter is changed + - comparison is not changed # identical diff --git a/tests/integration/targets/xml/tasks/test-add-children-insertbefore.yml b/tests/integration/targets/xml/tasks/test-add-children-insertbefore.yml index 8550f12cf7..9839d7cc91 100644 --- a/tests/integration/targets/xml/tasks/test-add-children-insertbefore.yml +++ b/tests/integration/targets/xml/tasks/test-add-children-insertbefore.yml @@ -28,5 +28,5 @@ - name: Test expected result assert: that: - - add_children_insertbefore.changed == true - - comparison.changed == false # identical + - add_children_insertbefore is changed + - comparison is not changed # identical diff --git a/tests/integration/targets/xml/tasks/test-add-children-with-attributes-unicode.yml b/tests/integration/targets/xml/tasks/test-add-children-with-attributes-unicode.yml index d4a2329f69..585157c970 100644 --- a/tests/integration/targets/xml/tasks/test-add-children-with-attributes-unicode.yml +++ b/tests/integration/targets/xml/tasks/test-add-children-with-attributes-unicode.yml @@ -26,6 +26,6 @@ - name: Test expected result assert: that: - - add_children_with_attributes_unicode.changed == true - - comparison.changed == false # identical + - add_children_with_attributes_unicode is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-add-children-with-attributes-unicode.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-add-children-with-attributes.yml b/tests/integration/targets/xml/tasks/test-add-children-with-attributes.yml index 91e92637fc..c3704801d9 100644 --- a/tests/integration/targets/xml/tasks/test-add-children-with-attributes.yml +++ b/tests/integration/targets/xml/tasks/test-add-children-with-attributes.yml @@ -29,7 +29,7 @@ - name: Test expected result assert: that: - - add_children_with_attributes.changed == true - - comparison.changed == false # identical + - add_children_with_attributes is changed + - comparison is not changed # identical when: lxml_predictable_attribute_order #command: diff -u {{ role_path }}/results/test-add-children-with-attributes.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-add-element-implicitly.yml b/tests/integration/targets/xml/tasks/test-add-element-implicitly.yml index db674ba4fc..6166cd46b9 100644 --- a/tests/integration/targets/xml/tasks/test-add-element-implicitly.yml +++ b/tests/integration/targets/xml/tasks/test-add-element-implicitly.yml @@ -108,7 +108,7 @@ - name: Test expected result assert: that: - - comparison.changed == false # identical + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-add-element-implicitly.yml /tmp/ansible-xml-beers-implicit.xml diff --git a/tests/integration/targets/xml/tasks/test-add-namespaced-children-elements.yml b/tests/integration/targets/xml/tasks/test-add-namespaced-children-elements.yml index 25eca47f5b..2cac73e65c 100644 --- a/tests/integration/targets/xml/tasks/test-add-namespaced-children-elements.yml +++ b/tests/integration/targets/xml/tasks/test-add-namespaced-children-elements.yml @@ -21,12 +21,12 @@ src: results/test-add-namespaced-children-elements.xml dest: /tmp/ansible-xml-namespaced-beers.xml check_mode: yes - diff: yes + diff: yes register: comparison - name: Test expected result assert: that: - - add_namespaced_children_elements.changed == true - - comparison.changed == false # identical + - add_namespaced_children_elements is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-add-namespaced-children-elements.xml /tmp/ansible-xml-namespaced-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-children-elements-xml.yml b/tests/integration/targets/xml/tasks/test-children-elements-xml.yml index e63100c47c..6b50d819c3 100644 --- a/tests/integration/targets/xml/tasks/test-children-elements-xml.yml +++ b/tests/integration/targets/xml/tasks/test-children-elements-xml.yml @@ -25,6 +25,6 @@ - name: Test expected result assert: that: - - children_elements.changed == true - - comparison.changed == false # identical + - children_elements is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-add-children-elements.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-count-unicode.yml b/tests/integration/targets/xml/tasks/test-count-unicode.yml index 47a806bf98..a9a462b5da 100644 --- a/tests/integration/targets/xml/tasks/test-count-unicode.yml +++ b/tests/integration/targets/xml/tasks/test-count-unicode.yml @@ -15,5 +15,5 @@ - name: Test expected result assert: that: - - beers.changed == false + - beers is not changed - beers.count == 2 diff --git a/tests/integration/targets/xml/tasks/test-count.yml b/tests/integration/targets/xml/tasks/test-count.yml index cbc97e323c..b8a21870f7 100644 --- a/tests/integration/targets/xml/tasks/test-count.yml +++ b/tests/integration/targets/xml/tasks/test-count.yml @@ -15,5 +15,5 @@ - name: Test expected result assert: that: - - beers.changed == false + - beers is not changed - beers.count == 3 diff --git a/tests/integration/targets/xml/tasks/test-get-element-content-unicode.yml b/tests/integration/targets/xml/tasks/test-get-element-content-unicode.yml index 73ae96674f..718f12d640 100644 --- a/tests/integration/targets/xml/tasks/test-get-element-content-unicode.yml +++ b/tests/integration/targets/xml/tasks/test-get-element-content-unicode.yml @@ -15,7 +15,7 @@ - name: Test expected result assert: that: - - get_element_attribute.changed == false + - get_element_attribute is not changed - get_element_attribute.matches[0]['rating'] is defined and get_element_attribute.matches[0]['rating']['subjective'] == 'да' - name: Get element text @@ -28,5 +28,5 @@ - name: Test expected result assert: that: - - get_element_text.changed == false + - get_element_text is not changed - get_element_text.matches[0]['rating'] == 'десять' diff --git a/tests/integration/targets/xml/tasks/test-get-element-content.yml b/tests/integration/targets/xml/tasks/test-get-element-content.yml index 4a40b42dcf..d38aa70d95 100644 --- a/tests/integration/targets/xml/tasks/test-get-element-content.yml +++ b/tests/integration/targets/xml/tasks/test-get-element-content.yml @@ -15,7 +15,7 @@ - name: Test expected result assert: that: - - get_element_attribute.changed == false + - get_element_attribute is not changed - get_element_attribute.matches[0]['rating'] is defined - get_element_attribute.matches[0]['rating']['subjective'] == 'true' @@ -43,5 +43,5 @@ - name: Test expected result assert: that: - - get_element_text.changed == false + - get_element_text is not changed - get_element_text.matches[0]['rating'] == '10' diff --git a/tests/integration/targets/xml/tasks/test-mutually-exclusive-attributes.yml b/tests/integration/targets/xml/tasks/test-mutually-exclusive-attributes.yml index 3f24b0ac84..07a71f9153 100644 --- a/tests/integration/targets/xml/tasks/test-mutually-exclusive-attributes.yml +++ b/tests/integration/targets/xml/tasks/test-mutually-exclusive-attributes.yml @@ -18,5 +18,5 @@ - name: Test expected result assert: that: - - module_output.changed == false - - module_output.failed == true + - module_output is not changed + - module_output is failed diff --git a/tests/integration/targets/xml/tasks/test-pretty-print-only.yml b/tests/integration/targets/xml/tasks/test-pretty-print-only.yml index 7c0f7d5fd6..16fcf629c5 100644 --- a/tests/integration/targets/xml/tasks/test-pretty-print-only.yml +++ b/tests/integration/targets/xml/tasks/test-pretty-print-only.yml @@ -24,6 +24,6 @@ - name: Test expected result assert: that: - - pretty_print_only.changed == true - - comparison.changed == false # identical + - pretty_print_only is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-pretty-print-only.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-pretty-print.yml b/tests/integration/targets/xml/tasks/test-pretty-print.yml index 88b618b25d..fd47ff3d82 100644 --- a/tests/integration/targets/xml/tasks/test-pretty-print.yml +++ b/tests/integration/targets/xml/tasks/test-pretty-print.yml @@ -25,6 +25,6 @@ - name: Test expected result assert: that: - - pretty_print.changed == true - - comparison.changed == false # identical + - pretty_print is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-pretty-print.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-remove-attribute-nochange.yml b/tests/integration/targets/xml/tasks/test-remove-attribute-nochange.yml index d09dee405c..fbd73237f1 100644 --- a/tests/integration/targets/xml/tasks/test-remove-attribute-nochange.yml +++ b/tests/integration/targets/xml/tasks/test-remove-attribute-nochange.yml @@ -23,6 +23,6 @@ - name: Test expected result assert: that: - - remove_attribute.changed == false - - comparison.changed == false # identical + - remove_attribute is not changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-remove-attribute.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-remove-attribute.yml b/tests/integration/targets/xml/tasks/test-remove-attribute.yml index 9aa395e666..52b5214213 100644 --- a/tests/integration/targets/xml/tasks/test-remove-attribute.yml +++ b/tests/integration/targets/xml/tasks/test-remove-attribute.yml @@ -23,6 +23,6 @@ - name: Test expected result assert: that: - - remove_attribute.changed == true - - comparison.changed == false # identical + - remove_attribute is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-remove-attribute.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-remove-element-nochange.yml b/tests/integration/targets/xml/tasks/test-remove-element-nochange.yml index 2debc80d51..e548bfabf8 100644 --- a/tests/integration/targets/xml/tasks/test-remove-element-nochange.yml +++ b/tests/integration/targets/xml/tasks/test-remove-element-nochange.yml @@ -23,6 +23,6 @@ - name: Test expected result assert: that: - - remove_element.changed == false - - comparison.changed == false # identical + - remove_element is not changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-remove-element.yml b/tests/integration/targets/xml/tasks/test-remove-element.yml index f2e20ea220..092ca3e033 100644 --- a/tests/integration/targets/xml/tasks/test-remove-element.yml +++ b/tests/integration/targets/xml/tasks/test-remove-element.yml @@ -23,6 +23,6 @@ - name: Test expected result assert: that: - - remove_element.changed == true - - comparison.changed == false # identical + - remove_element is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute-nochange.yml b/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute-nochange.yml index 291536d3bf..19c14dec8d 100644 --- a/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute-nochange.yml +++ b/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute-nochange.yml @@ -28,6 +28,6 @@ - name: Test expected result assert: that: - - remove_namespaced_attribute.changed == false - - comparison.changed == false # identical + - remove_namespaced_attribute is not changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-remove-namespaced-attribute.xml /tmp/ansible-xml-namespaced-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute.yml b/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute.yml index a7ccdac4e3..9e54911ba5 100644 --- a/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute.yml +++ b/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute.yml @@ -28,6 +28,6 @@ - name: Test expected result assert: that: - - remove_namespaced_attribute.changed == true - - comparison.changed == false # identical + - remove_namespaced_attribute is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-remove-namespaced-attribute.xml /tmp/ansible-xml-namespaced-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-remove-namespaced-element-nochange.yml b/tests/integration/targets/xml/tasks/test-remove-namespaced-element-nochange.yml index b1938e45b7..b96f2a7819 100644 --- a/tests/integration/targets/xml/tasks/test-remove-namespaced-element-nochange.yml +++ b/tests/integration/targets/xml/tasks/test-remove-namespaced-element-nochange.yml @@ -28,6 +28,6 @@ - name: Test expected result assert: that: - - remove_namespaced_element.changed == false - - comparison.changed == false # identical + - remove_namespaced_element is not changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-namespaced-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-remove-namespaced-element.yml b/tests/integration/targets/xml/tasks/test-remove-namespaced-element.yml index be78af6803..660baa9840 100644 --- a/tests/integration/targets/xml/tasks/test-remove-namespaced-element.yml +++ b/tests/integration/targets/xml/tasks/test-remove-namespaced-element.yml @@ -28,6 +28,6 @@ - name: Test expected result assert: that: - - remove_namespaced_element.changed == true - - comparison.changed == false # identical + - remove_namespaced_element is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-namespaced-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-set-attribute-value-unicode.yml b/tests/integration/targets/xml/tasks/test-set-attribute-value-unicode.yml index dabf72a1b7..b72d502f12 100644 --- a/tests/integration/targets/xml/tasks/test-set-attribute-value-unicode.yml +++ b/tests/integration/targets/xml/tasks/test-set-attribute-value-unicode.yml @@ -24,6 +24,6 @@ - name: Test expected result assert: that: - - set_attribute_value_unicode.changed == true - - comparison.changed == false # identical + - set_attribute_value_unicode is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-set-attribute-value-unicode.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-set-attribute-value.yml b/tests/integration/targets/xml/tasks/test-set-attribute-value.yml index 2aa39fe22f..6a2aa6c511 100644 --- a/tests/integration/targets/xml/tasks/test-set-attribute-value.yml +++ b/tests/integration/targets/xml/tasks/test-set-attribute-value.yml @@ -24,6 +24,6 @@ - name: Test expected result assert: that: - - set_attribute_value.changed == true - - comparison.changed == false # identical + - set_attribute_value is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-set-attribute-value.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-set-children-elements-level.yml b/tests/integration/targets/xml/tasks/test-set-children-elements-level.yml index 3e2c0adb6f..7fa926e879 100644 --- a/tests/integration/targets/xml/tasks/test-set-children-elements-level.yml +++ b/tests/integration/targets/xml/tasks/test-set-children-elements-level.yml @@ -47,8 +47,8 @@ - name: Test expected result assert: that: - - set_children_elements_level.changed == true - - comparison.changed == false # identical + - set_children_elements_level is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-set-children-elements-level.xml /tmp/ansible-xml-beers.xml @@ -70,5 +70,5 @@ - name: Test expected result assert: that: - - set_children_again.changed == false - - comparison.changed == false # identical + - set_children_again is not changed + - comparison is not changed # identical diff --git a/tests/integration/targets/xml/tasks/test-set-children-elements-unicode.yml b/tests/integration/targets/xml/tasks/test-set-children-elements-unicode.yml index 240b894ac7..3cc25cd999 100644 --- a/tests/integration/targets/xml/tasks/test-set-children-elements-unicode.yml +++ b/tests/integration/targets/xml/tasks/test-set-children-elements-unicode.yml @@ -25,8 +25,8 @@ - name: Test expected result assert: that: - - set_children_elements_unicode.changed == true - - comparison.changed == false # identical + - set_children_elements_unicode is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-set-children-elements-unicode.xml /tmp/ansible-xml-beers.xml @@ -41,6 +41,6 @@ - name: Test expected result assert: that: - - set_children_again.changed == false - - comparison.changed == false # identical + - set_children_again is not changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-set-children-elements-unicode.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-set-children-elements.yml b/tests/integration/targets/xml/tasks/test-set-children-elements.yml index 7b0f3247ad..7c305ead74 100644 --- a/tests/integration/targets/xml/tasks/test-set-children-elements.yml +++ b/tests/integration/targets/xml/tasks/test-set-children-elements.yml @@ -25,8 +25,8 @@ - name: Test expected result assert: that: - - set_children_elements.changed == true - - comparison.changed == false # identical + - set_children_elements is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-set-children-elements.xml /tmp/ansible-xml-beers.xml @@ -48,6 +48,6 @@ - name: Test expected result assert: that: - - set_children_again.changed == false - - comparison.changed == false # identical + - set_children_again is not changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-set-children-elements.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-set-element-value-empty.yml b/tests/integration/targets/xml/tasks/test-set-element-value-empty.yml index 5814803cb7..4575d5e75f 100644 --- a/tests/integration/targets/xml/tasks/test-set-element-value-empty.yml +++ b/tests/integration/targets/xml/tasks/test-set-element-value-empty.yml @@ -23,6 +23,6 @@ - name: Test expected result assert: that: - - set_element_value_empty.changed == true - - comparison.changed == false # identical + - set_element_value_empty is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-set-element-value-empty.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-set-element-value-unicode.yml b/tests/integration/targets/xml/tasks/test-set-element-value-unicode.yml index c3a40b7d93..139087fcd9 100644 --- a/tests/integration/targets/xml/tasks/test-set-element-value-unicode.yml +++ b/tests/integration/targets/xml/tasks/test-set-element-value-unicode.yml @@ -37,7 +37,7 @@ - name: Test expected result assert: that: - - set_element_first_run.changed == true - - set_element_second_run.changed == false - - comparison.changed == false # identical + - set_element_first_run is changed + - set_element_second_run is not changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-set-element-value-unicode.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-set-element-value.yml b/tests/integration/targets/xml/tasks/test-set-element-value.yml index dbd070f139..2f845e949b 100644 --- a/tests/integration/targets/xml/tasks/test-set-element-value.yml +++ b/tests/integration/targets/xml/tasks/test-set-element-value.yml @@ -37,7 +37,7 @@ - name: Test expected result assert: that: - - set_element_first_run.changed == true - - set_element_second_run.changed == false - - comparison.changed == false # identical + - set_element_first_run is changed + - set_element_second_run is not changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-set-element-value.xml /tmp/ansible-xml-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-set-namespaced-attribute-value.yml b/tests/integration/targets/xml/tasks/test-set-namespaced-attribute-value.yml index e0086efe3a..2ba83a8330 100644 --- a/tests/integration/targets/xml/tasks/test-set-namespaced-attribute-value.yml +++ b/tests/integration/targets/xml/tasks/test-set-namespaced-attribute-value.yml @@ -29,6 +29,6 @@ - name: Test expected result assert: that: - - set_namespaced_attribute_value.changed == true - - comparison.changed == false # identical + - set_namespaced_attribute_value is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-set-namespaced-attribute-value.xml /tmp/ansible-xml-namespaced-beers.xml diff --git a/tests/integration/targets/xml/tasks/test-set-namespaced-children-elements.yml b/tests/integration/targets/xml/tasks/test-set-namespaced-children-elements.yml index 8e66e70eeb..6204c8c74d 100644 --- a/tests/integration/targets/xml/tasks/test-set-namespaced-children-elements.yml +++ b/tests/integration/targets/xml/tasks/test-set-namespaced-children-elements.yml @@ -52,6 +52,6 @@ - name: Test expected result assert: that: - - set_children_again.changed == false # idempotency - - set_namespaced_attribute_value.changed == true - - comparison.changed == false # identical + - set_children_again is not changed # idempotency + - set_namespaced_attribute_value is changed + - comparison is not changed # identical diff --git a/tests/integration/targets/xml/tasks/test-set-namespaced-element-value.yml b/tests/integration/targets/xml/tasks/test-set-namespaced-element-value.yml index f77d7537e9..cf6a8a7eb0 100644 --- a/tests/integration/targets/xml/tasks/test-set-namespaced-element-value.yml +++ b/tests/integration/targets/xml/tasks/test-set-namespaced-element-value.yml @@ -41,6 +41,6 @@ - name: Test expected result assert: that: - - set_element_first_run.changed == true - - set_element_second_run.changed == false - - comparison.changed == false # identical + - set_element_first_run is changed + - set_element_second_run is not changed + - comparison is not changed # identical diff --git a/tests/integration/targets/xml/tasks/test-xmlstring.yml b/tests/integration/targets/xml/tasks/test-xmlstring.yml index 4620d984fa..82781fa94d 100644 --- a/tests/integration/targets/xml/tasks/test-xmlstring.yml +++ b/tests/integration/targets/xml/tasks/test-xmlstring.yml @@ -25,8 +25,8 @@ - name: Test expected result assert: that: - - xmlresponse.changed == false - - comparison.changed == false # identical + - xmlresponse is not changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-pretty-print-only.xml /tmp/ansible-xml-beers.xml @@ -49,8 +49,8 @@ - name: Test expected result assert: that: - - xmlresponse.changed == true - - comparison.changed == false # identical + - xmlresponse is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-pretty-print-only.xml /tmp/ansible-xml-beers.xml @@ -63,7 +63,7 @@ add_children: - beer: Old Rasputin register: xmlresponse_modification - + - name: Compare to expected result copy: content: '{{ xmlresponse_modification.xmlstring }}' @@ -76,6 +76,6 @@ - name: Test expected result assert: that: - - xmlresponse_modification.changed == true - - comparison.changed == false # identical + - xmlresponse_modification is changed + - comparison is not changed # identical #command: diff -u {{ role_path }}/results/test-pretty-print.xml /tmp/ansible-xml-beers.xml From cc293f90a245aad5c2eae4b1c28b49101563b134 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 23 May 2021 08:20:37 +1200 Subject: [PATCH 0311/3093] ini_file - opening file as utf-8-sig (#2578) * opening file as utf-8-sig * added changelog fragment * using io.open() * Update tests/integration/targets/ini_file/tasks/main.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../fragments/2578-ini-file-utf8-bom.yml | 2 ++ plugins/modules/files/ini_file.py | 3 +- .../targets/ini_file/tasks/main.yml | 34 +++++++++++++++++++ 3 files changed, 38 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2578-ini-file-utf8-bom.yml diff --git a/changelogs/fragments/2578-ini-file-utf8-bom.yml b/changelogs/fragments/2578-ini-file-utf8-bom.yml new file mode 100644 index 0000000000..00640c0b23 --- /dev/null +++ b/changelogs/fragments/2578-ini-file-utf8-bom.yml @@ -0,0 +1,2 @@ +minor_changes: + - ini_file - opening file with encoding ``utf-8-sig`` (https://github.com/ansible-collections/community.general/issues/2189). diff --git a/plugins/modules/files/ini_file.py b/plugins/modules/files/ini_file.py index ac4c6d0cf3..ea857cefa9 100644 --- a/plugins/modules/files/ini_file.py +++ b/plugins/modules/files/ini_file.py @@ -104,6 +104,7 @@ EXAMPLES = r''' backup: yes ''' +import io import os import re import tempfile @@ -141,7 +142,7 @@ def do_ini(module, filename, section=None, option=None, value=None, os.makedirs(destpath) ini_lines = [] else: - with open(filename, 'r') as ini_file: + with io.open(filename, 'r', encoding="utf-8-sig") as ini_file: ini_lines = ini_file.readlines() if module._diff: diff --git a/tests/integration/targets/ini_file/tasks/main.yml b/tests/integration/targets/ini_file/tasks/main.yml index 2e84147c72..be5835669b 100644 --- a/tests/integration/targets/ini_file/tasks/main.yml +++ b/tests/integration/targets/ini_file/tasks/main.yml @@ -480,3 +480,37 @@ assert: that: - content15 == expected15 + +- name: Create starting ini file + copy: + # The content below is the following text file with BOM: + # [section1] + # var1=aaa + # var2=bbb + # [section2] + # var3=ccc + content: !!binary | + 77u/W3NlY3Rpb24xXQp2YXIxPWFhYQp2YXIyPWJiYgpbc2VjdGlvbjJdCnZhcjM9Y2NjCg== + dest: "{{ output_file }}" +- name: Test ini breakage + ini_file: + path: "{{ output_file }}" + section: section1 + option: var4 + value: 0 + +- name: read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: set expected content and get current ini file content + set_fact: + expected16: "[section1]\nvar1=aaa\nvar2=bbb\nvar4 = 0\n[section2]\nvar3=ccc\n" + content16: "{{ output_content.content | b64decode }}" +- debug: + var: content16 +- name: Verify content of ini file is as expected + assert: + that: + - content16 == expected16 From 593d622438dd2a7aada0ccb762446df4ebb1a6ac Mon Sep 17 00:00:00 2001 From: Tong He <68936428+unnecessary-username@users.noreply.github.com> Date: Tue, 25 May 2021 03:59:52 +0800 Subject: [PATCH 0312/3093] rhsm_release: Fix the issue that rhsm_release module considers 8, 7Client and 7Workstation as invalid releases (#2571) * rhsm_release: Fix the issue that rhsm_release module considers 8, 7Client and 7Workstation as invalid releases. * Fix the unit test error: The new release_matcher could pass a wider range of patterns but that would not cause extra issue to the whole module. * Submit the changelog fragment. * Update changelogs/fragments/2571-rhsm_release-fix-release_matcher.yaml Co-authored-by: Amin Vakil Co-authored-by: Amin Vakil --- .../fragments/2571-rhsm_release-fix-release_matcher.yaml | 2 ++ plugins/modules/packaging/os/rhsm_release.py | 6 +++--- .../unit/plugins/modules/packaging/os/test_rhsm_release.py | 5 ++--- 3 files changed, 7 insertions(+), 6 deletions(-) create mode 100644 changelogs/fragments/2571-rhsm_release-fix-release_matcher.yaml diff --git a/changelogs/fragments/2571-rhsm_release-fix-release_matcher.yaml b/changelogs/fragments/2571-rhsm_release-fix-release_matcher.yaml new file mode 100644 index 0000000000..764743303f --- /dev/null +++ b/changelogs/fragments/2571-rhsm_release-fix-release_matcher.yaml @@ -0,0 +1,2 @@ +bugfixes: + - rhsm_release - fix the issue that module considers 8, 7Client and 7Workstation as invalid releases (https://github.com/ansible-collections/community.general/pull/2571). diff --git a/plugins/modules/packaging/os/rhsm_release.py b/plugins/modules/packaging/os/rhsm_release.py index 22b280f1fc..a4d8f71197 100644 --- a/plugins/modules/packaging/os/rhsm_release.py +++ b/plugins/modules/packaging/os/rhsm_release.py @@ -56,9 +56,9 @@ from ansible.module_utils.basic import AnsibleModule import re -# Matches release-like values such as 7.2, 6.10, 10Server, -# but rejects unlikely values, like 100Server, 100.0, 1.100, etc. -release_matcher = re.compile(r'\b\d{1,2}(?:\.\d{1,2}|Server)\b') +# Matches release-like values such as 7.2, 5.10, 6Server, 8 +# but rejects unlikely values, like 100Server, 1.100, 7server etc. +release_matcher = re.compile(r'\b\d{1,2}(?:\.\d{1,2}|Server|Client|Workstation|)\b') def _sm_release(module, *args): diff --git a/tests/unit/plugins/modules/packaging/os/test_rhsm_release.py b/tests/unit/plugins/modules/packaging/os/test_rhsm_release.py index a75ec69448..98db6e2840 100644 --- a/tests/unit/plugins/modules/packaging/os/test_rhsm_release.py +++ b/tests/unit/plugins/modules/packaging/os/test_rhsm_release.py @@ -125,13 +125,12 @@ class RhsmRepositoryReleaseModuleTestCase(ModuleTestCase): def test_release_matcher(self): # throw a few values at the release matcher -- only sane_values should match - sane_values = ['1Server', '10Server', '1.10', '10.0'] + sane_values = ['1Server', '1Client', '10Server', '1.10', '10.0', '9'] insane_values = [ '6server', # lowercase 's' '100Server', # excessively long 'x' component - '100.0', # excessively long 'x' component - '6.100', # excessively long 'y' component '100.100', # excessively long 'x' and 'y' components + '+.-', # illegal characters ] matches = self.module.release_matcher.findall(' '.join(sane_values + insane_values)) From 63012eef82ad127e06e7a3e5a51eeb7a0f30a0c3 Mon Sep 17 00:00:00 2001 From: DasSkelett Date: Tue, 25 May 2021 12:58:20 +0200 Subject: [PATCH 0313/3093] Use str() to get exception message (#2590) --- .../fragments/2590-netcup_dns-exception-no-message-attr.yml | 2 ++ plugins/modules/net_tools/netcup_dns.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2590-netcup_dns-exception-no-message-attr.yml diff --git a/changelogs/fragments/2590-netcup_dns-exception-no-message-attr.yml b/changelogs/fragments/2590-netcup_dns-exception-no-message-attr.yml new file mode 100644 index 0000000000..06cac9ad1b --- /dev/null +++ b/changelogs/fragments/2590-netcup_dns-exception-no-message-attr.yml @@ -0,0 +1,2 @@ +bugfixes: + - netcup_dns - use ``str(ex)`` instead of unreliable ``ex.message`` in exception handling to fix ``AttributeError`` in error cases (https://github.com/ansible-collections/community.general/pull/2590). diff --git a/plugins/modules/net_tools/netcup_dns.py b/plugins/modules/net_tools/netcup_dns.py index 5d63a5b38e..5ec5cbb246 100644 --- a/plugins/modules/net_tools/netcup_dns.py +++ b/plugins/modules/net_tools/netcup_dns.py @@ -255,7 +255,7 @@ def main(): has_changed = True except Exception as ex: - module.fail_json(msg=ex.message) + module.fail_json(msg=str(ex)) module.exit_json(changed=has_changed, result={"records": [record_data(r) for r in all_records]}) From d8713992209ccce44b884093967749249bca960f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 25 May 2021 07:04:19 -0400 Subject: [PATCH 0314/3093] json_query, no more 'unknown type' errors (#2607) Signed-off-by: Abhijeet Kasurde --- changelogs/fragments/json_query_more_types.yml | 3 +++ plugins/filter/json_query.py | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/json_query_more_types.yml diff --git a/changelogs/fragments/json_query_more_types.yml b/changelogs/fragments/json_query_more_types.yml new file mode 100644 index 0000000000..4ac69b67c0 --- /dev/null +++ b/changelogs/fragments/json_query_more_types.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - json_query filter plugin - avoid 'unknown type' errors for more Ansible internal types (https://github.com/ansible-collections/community.general/pull/2607). diff --git a/plugins/filter/json_query.py b/plugins/filter/json_query.py index 972109a045..673cafa587 100644 --- a/plugins/filter/json_query.py +++ b/plugins/filter/json_query.py @@ -35,9 +35,11 @@ def json_query(data, expr): raise AnsibleError('You need to install "jmespath" prior to running ' 'json_query filter') - # Hack to handle Ansible String Types + # Hack to handle Ansible Unsafe text, AnsibleMapping and AnsibleSequence # See issue: https://github.com/ansible-collections/community.general/issues/320 jmespath.functions.REVERSE_TYPES_MAP['string'] = jmespath.functions.REVERSE_TYPES_MAP['string'] + ('AnsibleUnicode', 'AnsibleUnsafeText', ) + jmespath.functions.REVERSE_TYPES_MAP['array'] = jmespath.functions.REVERSE_TYPES_MAP['array'] + ('AnsibleSequence', ) + jmespath.functions.REVERSE_TYPES_MAP['object'] = jmespath.functions.REVERSE_TYPES_MAP['object'] + ('AnsibleMapping', ) try: return jmespath.search(expr, data) except jmespath.exceptions.JMESPathError as e: From 6df3685d42f35147b73f08237b3dea73e8d36e9a Mon Sep 17 00:00:00 2001 From: Alexander Moiseenko Date: Wed, 26 May 2021 08:00:53 +0300 Subject: [PATCH 0315/3093] jenkins_plugin: HTTP Error 405: Method Not Allowed on disable/enable plugin #2510 (#2511) * define POST method for pluginManager api requests Jenkins makeEnable/makeDisable api requests requires to use POST method * add changelog fragment * fix my yoda lang thx to aminvakil Co-authored-by: Amin Vakil * update changelog fragment Co-authored-by: Felix Fontein Co-authored-by: Amin Vakil Co-authored-by: Felix Fontein --- changelogs/fragments/2510-jenkins_plugin_use_post_method.yml | 2 ++ plugins/modules/web_infrastructure/jenkins_plugin.py | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2510-jenkins_plugin_use_post_method.yml diff --git a/changelogs/fragments/2510-jenkins_plugin_use_post_method.yml b/changelogs/fragments/2510-jenkins_plugin_use_post_method.yml new file mode 100644 index 0000000000..b310e27061 --- /dev/null +++ b/changelogs/fragments/2510-jenkins_plugin_use_post_method.yml @@ -0,0 +1,2 @@ +bugfixes: + - jenkins_plugin - use POST method for sending request to jenkins API when ``state`` option is one of ``enabled``, ``disabled``, ``pinned``, ``unpinned``, or ``absent`` (https://github.com/ansible-collections/community.general/issues/2510). diff --git a/plugins/modules/web_infrastructure/jenkins_plugin.py b/plugins/modules/web_infrastructure/jenkins_plugin.py index c9946023ac..be335fcfd3 100644 --- a/plugins/modules/web_infrastructure/jenkins_plugin.py +++ b/plugins/modules/web_infrastructure/jenkins_plugin.py @@ -696,7 +696,8 @@ class JenkinsPlugin(object): self._get_url_data( url, msg_status="Plugin not found. %s" % url, - msg_exception="%s has failed." % msg) + msg_exception="%s has failed." % msg, + method="POST") def main(): From aa74cf4d61b1e22e55ca1fa0b0d18744da493b4f Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Wed, 26 May 2021 17:06:23 +1200 Subject: [PATCH 0316/3093] ini_file - added note in documentation for utf-8 bom (#2599) * added note in documentation for utf-8 bom * Update plugins/modules/files/ini_file.py Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- plugins/modules/files/ini_file.py | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/modules/files/ini_file.py b/plugins/modules/files/ini_file.py index ea857cefa9..d318d04d57 100644 --- a/plugins/modules/files/ini_file.py +++ b/plugins/modules/files/ini_file.py @@ -79,6 +79,7 @@ options: notes: - While it is possible to add an I(option) without specifying a I(value), this makes no sense. - As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well. + - As of community.general 3.2.0, UTF-8 BOM markers are discarded when reading files. author: - Jan-Piet Mens (@jpmens) - Ales Nosek (@noseka1) From 4764a5deba6b44c988ab21b1b8b2951e71b8499b Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Wed, 26 May 2021 17:07:09 +1200 Subject: [PATCH 0317/3093] redis cache - better parsing of connection uri (#2579) * better parsing of connection uri * added changelog fragment * fixed tests for ansible 2.9 * Update tests/unit/plugins/cache/test_redis.py Co-authored-by: Felix Fontein * Update tests/unit/plugins/cache/test_redis.py Co-authored-by: Felix Fontein * Adjustments from PR * Update test_redis.py * Update test_redis.py * Update plugins/cache/redis.py Co-authored-by: Felix Fontein * Update plugins/cache/redis.py Co-authored-by: Felix Fontein * Update tests/unit/plugins/cache/test_redis.py Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- changelogs/fragments/2579-redis-cache-ipv6.yml | 2 ++ plugins/cache/redis.py | 14 ++++++++++++-- tests/unit/plugins/cache/test_redis.py | 15 ++++++++++++++- 3 files changed, 28 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/2579-redis-cache-ipv6.yml diff --git a/changelogs/fragments/2579-redis-cache-ipv6.yml b/changelogs/fragments/2579-redis-cache-ipv6.yml new file mode 100644 index 0000000000..aaa5e78b34 --- /dev/null +++ b/changelogs/fragments/2579-redis-cache-ipv6.yml @@ -0,0 +1,2 @@ +bugfixes: + - redis cache - improved connection string parsing (https://github.com/ansible-collections/community.general/issues/497). diff --git a/plugins/cache/redis.py b/plugins/cache/redis.py index 7a376d6d7c..6af7c731e4 100644 --- a/plugins/cache/redis.py +++ b/plugins/cache/redis.py @@ -61,6 +61,7 @@ DOCUMENTATION = ''' type: integer ''' +import re import time import json @@ -91,6 +92,8 @@ class CacheModule(BaseCacheModule): performance. """ _sentinel_service_name = None + re_url_conn = re.compile(r'^([^:]+|\[[^]]+\]):(\d+):(\d+)(?::(.*))?$') + re_sent_conn = re.compile(r'^(.*):(\d+)$') def __init__(self, *args, **kwargs): uri = '' @@ -130,11 +133,18 @@ class CacheModule(BaseCacheModule): self._db = self._get_sentinel_connection(uri, kw) # normal connection else: - connection = uri.split(':') + connection = self._parse_connection(self.re_url_conn, uri) self._db = StrictRedis(*connection, **kw) display.vv('Redis connection: %s' % self._db) + @staticmethod + def _parse_connection(re_patt, uri): + match = re_patt.match(uri) + if not match: + raise AnsibleError("Unable to parse connection string") + return match.groups() + def _get_sentinel_connection(self, uri, kw): """ get sentinel connection details from _uri @@ -158,7 +168,7 @@ class CacheModule(BaseCacheModule): except IndexError: pass # password is optional - sentinels = [tuple(shost.split(':')) for shost in connections] + sentinels = [self._parse_connection(self.re_sent_conn, shost) for shost in connections] display.vv('\nUsing redis sentinels: %s' % sentinels) scon = Sentinel(sentinels, **kw) try: diff --git a/tests/unit/plugins/cache/test_redis.py b/tests/unit/plugins/cache/test_redis.py index e665826769..ee7e1f7913 100644 --- a/tests/unit/plugins/cache/test_redis.py +++ b/tests/unit/plugins/cache/test_redis.py @@ -23,10 +23,23 @@ import pytest pytest.importorskip('redis') +from ansible import constants as C from ansible.plugins.loader import cache_loader +from ansible.release import __version__ as ansible_version from ansible_collections.community.general.plugins.cache.redis import CacheModule as RedisCache def test_redis_cachemodule(): # The _uri option is required for the redis plugin - assert isinstance(cache_loader.get('community.general.redis', **{'_uri': '127.0.0.1:6379:1'}), RedisCache) + connection = '127.0.0.1:6379:1' + if ansible_version.startswith('2.9.'): + C.CACHE_PLUGIN_CONNECTION = connection + assert isinstance(cache_loader.get('community.general.redis', **{'_uri': connection}), RedisCache) + + +def test_redis_cachemodule(): + # The _uri option is required for the redis plugin + connection = '[::1]:6379:1' + if ansible_version.startswith('2.9.'): + C.CACHE_PLUGIN_CONNECTION = connection + assert isinstance(cache_loader.get('community.general.redis', **{'_uri': connection}), RedisCache) From d0f8eac7fdf264ba04ce536d4de8b146dc3f86e4 Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Wed, 26 May 2021 12:12:21 +0430 Subject: [PATCH 0318/3093] Add CONTRIBUTING.md (#2602) * Initial file shamelessly copied from community.mysql * Add some notes on pull requests * Add CONTRIBUTING.md link to README.md * Add quick-start development guide link * Apply felixfontein's suggestions Co-authored-by: Felix Fontein * add note about rebasing and merge commits Co-authored-by: Felix Fontein * add note about easyfix and waiting_on_contributor tags Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- CONTRIBUTING.md | 32 ++++++++++++++++++++++++++++++++ README.md | 2 ++ 2 files changed, 34 insertions(+) create mode 100644 CONTRIBUTING.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000..959d363236 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,32 @@ +# Contributing + +We follow [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) in all our contributions and interactions within this repository. + +If you are a committer, also refer to the [collection's committer guidelines](https://github.com/ansible-collections/community.general/blob/main/commit-rights.md). + +## Issue tracker + +Whether you are looking for an opportunity to contribute or you found a bug and already know how to solve it, please go to the [issue tracker](https://github.com/ansible-collections/community.general/issues). +There you can find feature ideas to implement, reports about bugs to solve, or submit an issue to discuss your idea before implementing it which can help choose a right direction at the beginning of your work and potentially save a lot of time and effort. +Also somebody may already have started discussing or working on implementing the same or a similar idea, +so you can cooperate to create a better solution together. + +* If you are interested in starting with an easy issue, look for [issues with an `easyfix` label](https://github.com/ansible-collections/community.general/labels/easyfix). +* Often issues that are waiting for contributors to pick up have [the `waiting_on_contributor` label](https://github.com/ansible-collections/community.general/labels/waiting_on_contributor). + +## Open pull requests + +Look through currently [open pull requests](https://github.com/ansible-collections/community.general/pulls). +You can help by reviewing them. Reviews help move pull requests to merge state. Some good pull requests cannot be merged only due to a lack of reviews. And it is always worth saying that good reviews are often more valuable than pull requests themselves. +Note that reviewing does not only mean code review, but also offering comments on new interfaces added to existing plugins/modules, interfaces of new plugins/modules, improving language (not everyone is a native english speaker), or testing bugfixes and new features! + +Also, consider taking up a valuable, reviewed, but abandoned pull request which you could politely ask the original authors to complete yourself. + +* Try committing your changes with an informative but short commit message. +* All commits of a pull request branch will be squashed into one commit at last. That does not mean you must have only one commit on your pull request, though! +* Please try not to force-push if it is not needed, so reviewers and other users looking at your pull request later can see the pull request commit history. +* Do not add merge commits to your PR. The bot will complain and you will have to rebase ([instructions for rebasing](https://docs.ansible.com/ansible/latest/dev_guide/developing_rebasing.html)) to remove them before your PR can be merged. To avoid that git automatically does merges during pulls, you can configure it to do rebases instead by running `git config pull.rebase true` inside the respository checkout. + +You can also read [our Quick-start development guide](https://github.com/ansible/community-docs/blob/main/create_pr_quick_start_guide.rst). + +If you find any inconsistencies or places in this document which can be improved, feel free to raise an issue or pull request to fix it. diff --git a/README.md b/README.md index 306f307128..e6e4eb880e 100644 --- a/README.md +++ b/README.md @@ -50,6 +50,8 @@ export COLLECTIONS_PATH=$(pwd)/collections:$COLLECTIONS_PATH You can find more information in the [developer guide for collections](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections), and in the [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html). +Also for some notes specific to this collection see [our CONTRIBUTING documentation](https://github.com/ansible-collections/community.general/blob/main/CONTRIBUTING.md). + ### Running tests See [here](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#testing-collections). From 0b4a2bea01ef4f7a5d1f78e3f38f0d2b55955d39 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Wed, 26 May 2021 10:34:35 +0200 Subject: [PATCH 0319/3093] Use become test framework for sudosu tests. (#2629) --- tests/unit/plugins/become/test_sudosu.py | 37 ++++++++++++++---------- 1 file changed, 21 insertions(+), 16 deletions(-) diff --git a/tests/unit/plugins/become/test_sudosu.py b/tests/unit/plugins/become/test_sudosu.py index 4e5c998f09..6adf200d8e 100644 --- a/tests/unit/plugins/become/test_sudosu.py +++ b/tests/unit/plugins/become/test_sudosu.py @@ -10,36 +10,41 @@ __metaclass__ = type import re from ansible import context -from ansible.playbook.play_context import PlayContext -from ansible.plugins.loader import become_loader + +from .helper import call_become_plugin def test_sudosu(mocker, parser, reset_cli_args): options = parser.parse_args([]) context._init_global_context(options) - play_context = PlayContext() default_cmd = "/bin/foo" default_exe = "/bin/bash" sudo_exe = 'sudo' sudo_flags = '-H -s -n' - cmd = play_context.make_become_cmd(cmd=default_cmd, executable=default_exe) - assert cmd == default_cmd - success = 'BECOME-SUCCESS-.+?' - play_context.become = True - play_context.become_user = 'foo' - play_context.set_become_plugin(become_loader.get('community.general.sudosu')) - play_context.become_flags = sudo_flags - cmd = play_context.make_become_cmd(cmd=default_cmd, executable=default_exe) - - assert (re.match("""%s %s su -l %s %s -c 'echo %s; %s'""" % (sudo_exe, sudo_flags, play_context.become_user, + task = { + 'become_user': 'foo', + 'become_method': 'community.general.sudosu', + 'become_flags': sudo_flags, + } + var_options = {} + cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe) + print(cmd) + assert (re.match("""%s %s su -l %s %s -c 'echo %s; %s'""" % (sudo_exe, sudo_flags, task['become_user'], default_exe, success, default_cmd), cmd) is not None) - play_context.become_pass = 'testpass' - cmd = play_context.make_become_cmd(cmd=default_cmd, executable=default_exe) + task = { + 'become_user': 'foo', + 'become_method': 'community.general.sudosu', + 'become_flags': sudo_flags, + 'become_pass': 'testpass', + } + var_options = {} + cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe) + print(cmd) assert (re.match("""%s %s -p "%s" su -l %s %s -c 'echo %s; %s'""" % (sudo_exe, sudo_flags.replace('-n', ''), - r"\[sudo via ansible, key=.+?\] password:", play_context.become_user, + r"\[sudo via ansible, key=.+?\] password:", task['become_user'], default_exe, success, default_cmd), cmd) is not None) From 26757edfb27b8d871963aae45e41eddf05a06775 Mon Sep 17 00:00:00 2001 From: Sylvia van Os Date: Thu, 27 May 2021 07:57:06 +0200 Subject: [PATCH 0320/3093] Add one-liner lookup example (#2615) * Add one-liner lookup example * Remove trailing whitespace * Update plugins/lookup/tss.py Co-authored-by: Felix Fontein * Update plugins/lookup/tss.py Co-authored-by: Amin Vakil Co-authored-by: Felix Fontein Co-authored-by: Amin Vakil --- plugins/lookup/tss.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/plugins/lookup/tss.py b/plugins/lookup/tss.py index 2c25532699..b7b7cd85e0 100644 --- a/plugins/lookup/tss.py +++ b/plugins/lookup/tss.py @@ -103,6 +103,14 @@ EXAMPLES = r""" | items2dict(key_name='slug', value_name='itemValue'))['password'] }} + +- hosts: localhost + vars: + secret_password: >- + {{ ((lookup('community.general.tss', 1) | from_json).get('items') | items2dict(key_name='slug', value_name='itemValue'))['password'] }}" + tasks: + - ansible.builtin.debug: + msg: the password is {{ secret_password }} """ from ansible.errors import AnsibleError, AnsibleOptionsError From 4aa50962cb54b903c807b6a000cb41c28d4b1806 Mon Sep 17 00:00:00 2001 From: sgalea87 <43749726+sgalea87@users.noreply.github.com> Date: Thu, 27 May 2021 08:01:28 +0200 Subject: [PATCH 0321/3093] influxdb_user: Fix bug introduced by PR 2499 (#2614) * Update influxdb_user.py Fixed function name * Create 2614-influxdb_user-fix-issue-introduced-in-PR#2499 Added changelog * Rename 2614-influxdb_user-fix-issue-introduced-in-PR#2499 to 2614-influxdb_user-fix-issue-introduced-in-PR#2499.yml Fixed extension * Update changelogs/fragments/2614-influxdb_user-fix-issue-introduced-in-PR#2499.yml Co-authored-by: Amin Vakil Co-authored-by: Amin Vakil --- .../2614-influxdb_user-fix-issue-introduced-in-PR#2499.yml | 2 ++ plugins/modules/database/influxdb/influxdb_user.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2614-influxdb_user-fix-issue-introduced-in-PR#2499.yml diff --git a/changelogs/fragments/2614-influxdb_user-fix-issue-introduced-in-PR#2499.yml b/changelogs/fragments/2614-influxdb_user-fix-issue-introduced-in-PR#2499.yml new file mode 100644 index 0000000000..dfae3f2bdf --- /dev/null +++ b/changelogs/fragments/2614-influxdb_user-fix-issue-introduced-in-PR#2499.yml @@ -0,0 +1,2 @@ +bugfixes: + - influxdb_user - fix bug which removed current privileges instead of appending them to existing ones (https://github.com/ansible-collections/community.general/issues/2609, https://github.com/ansible-collections/community.general/pull/2614). diff --git a/plugins/modules/database/influxdb/influxdb_user.py b/plugins/modules/database/influxdb/influxdb_user.py index d9e6b58051..cb35ea7ce6 100644 --- a/plugins/modules/database/influxdb/influxdb_user.py +++ b/plugins/modules/database/influxdb/influxdb_user.py @@ -174,7 +174,7 @@ def set_user_grants(module, client, user_name, grants): if v['privilege'] != 'NO PRIVILEGES': if v['privilege'] == 'ALL PRIVILEGES': v['privilege'] = 'ALL' - parsed_grants.add(v) + parsed_grants.append(v) # check if the current grants are included in the desired ones for current_grant in parsed_grants: From b45298bc4355d0ab95ed37e0eade699b2b665289 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 27 May 2021 08:23:35 +0200 Subject: [PATCH 0322/3093] Temporarily disable iptables_state tests. (#2641) --- tests/integration/targets/iptables_state/aliases | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/targets/iptables_state/aliases b/tests/integration/targets/iptables_state/aliases index 3cac4af522..12765cec47 100644 --- a/tests/integration/targets/iptables_state/aliases +++ b/tests/integration/targets/iptables_state/aliases @@ -5,3 +5,4 @@ skip/freebsd # no iptables/netfilter (Linux specific) skip/osx # no iptables/netfilter (Linux specific) skip/macos # no iptables/netfilter (Linux specific) skip/aix # no iptables/netfilter (Linux specific) +disabled # FIXME From 909e9fe9508804b2e18b755ef36060861cde5228 Mon Sep 17 00:00:00 2001 From: quidame Date: Thu, 27 May 2021 08:47:16 +0200 Subject: [PATCH 0323/3093] fix a regression in initialization_from_null_state() (iptables-nft > 1.8.2) (#2604) --- plugins/modules/system/iptables_state.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/plugins/modules/system/iptables_state.py b/plugins/modules/system/iptables_state.py index 326db862bc..66ba2c9b20 100644 --- a/plugins/modules/system/iptables_state.py +++ b/plugins/modules/system/iptables_state.py @@ -304,7 +304,7 @@ def write_state(b_path, lines, changed): return changed -def initialize_from_null_state(initializer, initcommand, table): +def initialize_from_null_state(initializer, initcommand, fallbackcmd, table): ''' This ensures iptables-state output is suitable for iptables-restore to roll back to it, i.e. iptables-save output is not empty. This also works for the @@ -315,8 +315,14 @@ def initialize_from_null_state(initializer, initcommand, table): commandline = list(initializer) commandline += ['-t', table] - (rc, out, err) = module.run_command(commandline, check_rc=True) + dummy = module.run_command(commandline, check_rc=True) (rc, out, err) = module.run_command(initcommand, check_rc=True) + if '*%s' % table not in out.splitlines(): + # The last resort. + iptables_input = '*%s\n:OUTPUT ACCEPT\nCOMMIT\n' % table + dummy = module.run_command(fallbackcmd, data=iptables_input, check_rc=True) + (rc, out, err) = module.run_command(initcommand, check_rc=True) + return rc, out, err @@ -401,6 +407,7 @@ def main(): INITCOMMAND = [bin_iptables_save] INITIALIZER = [bin_iptables, '-L', '-n'] TESTCOMMAND = [bin_iptables_restore, '--test'] + FALLBACKCMD = [bin_iptables_restore] if counters: COMMANDARGS.append('--counters') @@ -425,6 +432,7 @@ def main(): INITIALIZER.extend(['--modprobe', modprobe]) INITCOMMAND.extend(['--modprobe', modprobe]) TESTCOMMAND.extend(['--modprobe', modprobe]) + FALLBACKCMD.extend(['--modprobe', modprobe]) SAVECOMMAND = list(COMMANDARGS) SAVECOMMAND.insert(0, bin_iptables_save) @@ -458,15 +466,15 @@ def main(): for t in TABLES: if '*%s' % t in state_to_restore: if len(stdout) == 0 or '*%s' % t not in stdout.splitlines(): - (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, t) + (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, FALLBACKCMD, t) elif len(stdout) == 0: - (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, 'filter') + (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, FALLBACKCMD, 'filter') elif state == 'restored' and '*%s' % table not in state_to_restore: module.fail_json(msg="Table %s to restore not defined in %s" % (table, path)) elif len(stdout) == 0 or '*%s' % table not in stdout.splitlines(): - (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, table) + (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, FALLBACKCMD, table) initial_state = filter_and_format_state(stdout) if initial_state is None: From b79969da68ddaa73cfabdb866f80ac7f414b9f62 Mon Sep 17 00:00:00 2001 From: rainerleber <39616583+rainerleber@users.noreply.github.com> Date: Thu, 27 May 2021 18:46:12 +0200 Subject: [PATCH 0324/3093] Add module hana_query to make SAP HANA administration easier. (#2623) * new * move link * Apply suggestions from code review Co-authored-by: Felix Fontein * add more interesting return value in test * remove unused objects * removed unneeded function * extend test output * Update tests/unit/plugins/modules/database/saphana/test_hana_query.py Co-authored-by: Felix Fontein Co-authored-by: Rainer Leber Co-authored-by: Felix Fontein --- .../modules/database/saphana/hana_query.py | 187 ++++++++++++++++++ plugins/modules/hana_query.py | 1 + .../modules/database/saphana/__init__.py | 0 .../database/saphana/test_hana_query.py | 66 +++++++ 4 files changed, 254 insertions(+) create mode 100644 plugins/modules/database/saphana/hana_query.py create mode 120000 plugins/modules/hana_query.py create mode 100644 tests/unit/plugins/modules/database/saphana/__init__.py create mode 100644 tests/unit/plugins/modules/database/saphana/test_hana_query.py diff --git a/plugins/modules/database/saphana/hana_query.py b/plugins/modules/database/saphana/hana_query.py new file mode 100644 index 0000000000..ab147ef3fe --- /dev/null +++ b/plugins/modules/database/saphana/hana_query.py @@ -0,0 +1,187 @@ +#!/usr/bin/python + +# Copyright: (c) 2021, Rainer Leber +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: hana_query +short_description: Execute SQL on HANA +version_added: 3.2.0 +description: This module executes SQL statements on HANA with hdbsql. +options: + sid: + description: The system ID. + type: str + required: true + instance: + description: The instance number. + type: str + required: true + user: + description: A dedicated username. Defaults to C(SYSTEM). + type: str + default: SYSTEM + password: + description: The password to connect to the database. + type: str + required: true + autocommit: + description: Autocommit the statement. + type: bool + default: true + host: + description: The Host IP address. The port can be defined as well. + type: str + database: + description: Define the database on which to connect. + type: str + encrypted: + description: Use encrypted connection. Defaults to C(false). + type: bool + default: false + filepath: + description: + - One or more files each containing one SQL query to run. + - Must be a string or list containing strings. + type: list + elements: path + query: + description: + - SQL query to run. + - Must be a string or list containing strings. Please note that if you supply a string, it will be split by commas (C(,)) to a list. + It is better to supply a one-element list instead to avoid mangled input. + type: list + elements: str +notes: + - Does not support C(check_mode). +author: + - Rainer Leber (@rainerleber) +''' + +EXAMPLES = r''' +- name: Simple select query + community.general.hana_query: + sid: "hdb" + instance: "01" + password: "Test123" + query: "select user_name from users" + +- name: Run several queries + community.general.hana_query: + sid: "hdb" + instance: "01" + password: "Test123" + query: + - "select user_name from users;" + - select * from SYSTEM; + host: "localhost" + autocommit: False + +- name: Run several queries from file + community.general.hana_query: + sid: "hdb" + instance: "01" + password: "Test123" + filepath: + - /tmp/HANA_CPU_UtilizationPerCore_2.00.020+.txt + - /tmp/HANA.txt + host: "localhost" +''' + +RETURN = r''' +query_result: + description: List containing results of all queries executed (one sublist for every query). + returned: on success + type: list + elements: list + sample: [[{"Column": "Value1"}, {"Column": "Value2"}], [{"Column": "Value1"}, {"Column": "Value2"}]] +''' + +import csv +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import StringIO +from ansible.module_utils._text import to_native + + +def csv_to_list(rawcsv): + reader_raw = csv.DictReader(StringIO(rawcsv)) + reader = [dict((k, v.strip()) for k, v in row.items()) for row in reader_raw] + return list(reader) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + sid=dict(type='str', required=True), + instance=dict(type='str', required=True), + encrypted=dict(type='bool', required=False, default=False), + host=dict(type='str', required=False), + user=dict(type='str', required=False, default="SYSTEM"), + password=dict(type='str', required=True, no_log=True), + database=dict(type='str', required=False), + query=dict(type='list', elements='str', required=False), + filepath=dict(type='list', elements='path', required=False), + autocommit=dict(type='bool', required=False, default=True), + ), + required_one_of=[('query', 'filepath')], + supports_check_mode=False, + ) + rc, out, err, out_raw = [0, [], "", ""] + + params = module.params + + sid = (params['sid']).upper() + instance = params['instance'] + user = params['user'] + password = params['password'] + autocommit = params['autocommit'] + host = params['host'] + database = params['database'] + encrypted = params['encrypted'] + + filepath = params['filepath'] + query = params['query'] + + bin_path = "/usr/sap/{sid}/HDB{instance}/exe/hdbsql".format(sid=sid, instance=instance) + + try: + command = [module.get_bin_path(bin_path, required=True)] + except Exception as e: + module.fail_json(msg='Failed to find hdbsql at the expected path "{0}". Please check SID and instance number: "{1}"'.format(bin_path, to_native(e))) + + if encrypted is True: + command.extend(['-attemptencrypt']) + if autocommit is False: + command.extend(['-z']) + if host is not None: + command.extend(['-n', host]) + if database is not None: + command.extend(['-d', database]) + # -x Suppresses additional output, such as the number of selected rows in a result set. + command.extend(['-x', '-i', instance, '-u', user, '-p', password]) + + if filepath is not None: + command.extend(['-I']) + for p in filepath: + # makes a command like hdbsql -i 01 -u SYSTEM -p secret123# -I /tmp/HANA_CPU_UtilizationPerCore_2.00.020+.txt, + # iterates through files and append the output to var out. + query_command = command + [p] + (rc, out_raw, err) = module.run_command(query_command) + out.append(csv_to_list(out_raw)) + if query is not None: + for q in query: + # makes a command like hdbsql -i 01 -u SYSTEM -p secret123# "select user_name from users", + # iterates through multiple commands and append the output to var out. + query_command = command + [q] + (rc, out_raw, err) = module.run_command(query_command) + out.append(csv_to_list(out_raw)) + changed = True + + module.exit_json(changed=changed, rc=rc, query_result=out, stderr=err) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/hana_query.py b/plugins/modules/hana_query.py new file mode 120000 index 0000000000..ea869eb7a4 --- /dev/null +++ b/plugins/modules/hana_query.py @@ -0,0 +1 @@ +./database/saphana/hana_query.py \ No newline at end of file diff --git a/tests/unit/plugins/modules/database/saphana/__init__.py b/tests/unit/plugins/modules/database/saphana/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit/plugins/modules/database/saphana/test_hana_query.py b/tests/unit/plugins/modules/database/saphana/test_hana_query.py new file mode 100644 index 0000000000..4d158c028e --- /dev/null +++ b/tests/unit/plugins/modules/database/saphana/test_hana_query.py @@ -0,0 +1,66 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Rainer Leber (@rainerleber) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible_collections.community.general.plugins.modules import hana_query +from ansible_collections.community.general.tests.unit.plugins.modules.utils import ( + AnsibleExitJson, + AnsibleFailJson, + ModuleTestCase, + set_module_args, +) +from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible.module_utils import basic + + +def get_bin_path(*args, **kwargs): + """Function to return path of hdbsql""" + return "/usr/sap/HDB/HDB01/exe/hdbsql" + + +class Testhana_query(ModuleTestCase): + """Main class for testing hana_query module.""" + + def setUp(self): + """Setup.""" + super(Testhana_query, self).setUp() + self.module = hana_query + self.mock_get_bin_path = patch.object(basic.AnsibleModule, 'get_bin_path', get_bin_path) + self.mock_get_bin_path.start() + self.addCleanup(self.mock_get_bin_path.stop) # ensure that the patching is 'undone' + + def tearDown(self): + """Teardown.""" + super(Testhana_query, self).tearDown() + + def test_without_required_parameters(self): + """Failure must occurs when all parameters are missing.""" + with self.assertRaises(AnsibleFailJson): + set_module_args({}) + self.module.main() + + def test_hana_query(self): + """Check that result is processed.""" + set_module_args({ + 'sid': "HDB", + 'instance': "01", + 'encrypted': False, + 'host': "localhost", + 'user': "SYSTEM", + 'password': "1234Qwer", + 'database': "HDB", + 'query': "SELECT * FROM users;" + }) + with patch.object(basic.AnsibleModule, 'run_command') as run_command: + run_command.return_value = 0, 'username,name\n testuser,test user \n myuser, my user \n', '' + with self.assertRaises(AnsibleExitJson) as result: + hana_query.main() + self.assertEqual(result.exception.args[0]['query_result'], [[ + {'username': 'testuser', 'name': 'test user'}, + {'username': 'myuser', 'name': 'my user'}, + ]]) + self.assertEqual(run_command.call_count, 1) From dc793ea32b7246904c969eb1768fb5c8aef87990 Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Thu, 27 May 2021 19:46:38 +0300 Subject: [PATCH 0325/3093] hana_query module: add a maintainer (#2647) --- .github/BOTMETA.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index cdef437f90..994de0621f 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -347,6 +347,8 @@ files: $modules/database/mssql/mssql_db.py: maintainers: vedit Jmainguy kenichi-ogawa-1988 labels: mssql_db + $modules/database/saphana/hana_query.py: + maintainers: rainerleber $modules/database/vertica/: maintainers: dareko $modules/files/archive.py: From 7cd96d963efe4e6bf7ac9080fdf933dc23664dcf Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 27 May 2021 18:49:26 +0200 Subject: [PATCH 0326/3093] meta/runtime.yml and __init__.py cleanup (#2632) * Remove superfluous __init__.py files. * Reformat and sort meta/runtime.yml. * The ovirt modules have been removed. * Add changelog entry. --- changelogs/fragments/2632-cleanup.yml | 2 + meta/runtime.yml | 244 +++++++++--------- plugins/action/__init__.py | 0 plugins/become/__init__.py | 0 plugins/cache/__init__.py | 0 plugins/callback/__init__.py | 0 plugins/connection/__init__.py | 0 plugins/doc_fragments/__init__.py | 0 plugins/filter/__init__.py | 0 plugins/inventory/__init__.py | 0 plugins/lookup/__init__.py | 0 plugins/module_utils/__init__.py | 0 plugins/module_utils/identity/__init__.py | 0 .../identity/keycloak/__init__.py | 0 plugins/module_utils/mh/__init__.py | 0 plugins/module_utils/mh/mixins/__init__.py | 0 plugins/module_utils/net_tools/__init__.py | 0 .../module_utils/net_tools/nios/__init__.py | 0 .../net_tools/pritunl/__init__.py | 0 plugins/module_utils/oracle/__init__.py | 0 .../remote_management/__init__.py | 0 .../remote_management/lxca/__init__.py | 0 .../module_utils/source_control/__init__.py | 0 plugins/module_utils/storage/__init__.py | 0 plugins/module_utils/storage/emc/__init__.py | 0 .../module_utils/storage/hpe3par/__init__.py | 0 plugins/modules/__init__.py | 0 plugins/modules/net_tools/pritunl/__init__.py | 0 28 files changed, 118 insertions(+), 128 deletions(-) create mode 100644 changelogs/fragments/2632-cleanup.yml delete mode 100644 plugins/action/__init__.py delete mode 100644 plugins/become/__init__.py delete mode 100644 plugins/cache/__init__.py delete mode 100644 plugins/callback/__init__.py delete mode 100644 plugins/connection/__init__.py delete mode 100644 plugins/doc_fragments/__init__.py delete mode 100644 plugins/filter/__init__.py delete mode 100644 plugins/inventory/__init__.py delete mode 100644 plugins/lookup/__init__.py delete mode 100644 plugins/module_utils/__init__.py delete mode 100644 plugins/module_utils/identity/__init__.py delete mode 100644 plugins/module_utils/identity/keycloak/__init__.py delete mode 100644 plugins/module_utils/mh/__init__.py delete mode 100644 plugins/module_utils/mh/mixins/__init__.py delete mode 100644 plugins/module_utils/net_tools/__init__.py delete mode 100644 plugins/module_utils/net_tools/nios/__init__.py delete mode 100644 plugins/module_utils/net_tools/pritunl/__init__.py delete mode 100644 plugins/module_utils/oracle/__init__.py delete mode 100644 plugins/module_utils/remote_management/__init__.py delete mode 100644 plugins/module_utils/remote_management/lxca/__init__.py delete mode 100644 plugins/module_utils/source_control/__init__.py delete mode 100644 plugins/module_utils/storage/__init__.py delete mode 100644 plugins/module_utils/storage/emc/__init__.py delete mode 100644 plugins/module_utils/storage/hpe3par/__init__.py delete mode 100644 plugins/modules/__init__.py delete mode 100644 plugins/modules/net_tools/pritunl/__init__.py diff --git a/changelogs/fragments/2632-cleanup.yml b/changelogs/fragments/2632-cleanup.yml new file mode 100644 index 0000000000..def89de634 --- /dev/null +++ b/changelogs/fragments/2632-cleanup.yml @@ -0,0 +1,2 @@ +minor_changes: +- "Remove unnecessary ``__init__.py`` files from ``plugins/`` (https://github.com/ansible-collections/community.general/pull/2632)." diff --git a/meta/runtime.yml b/meta/runtime.yml index e5b59bc046..8b2a0c0ad6 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -1,31 +1,5 @@ --- requires_ansible: '>=2.9.10' -action_groups: - ovirt: - - ovirt_affinity_label_facts - - ovirt_api_facts - - ovirt_cluster_facts - - ovirt_datacenter_facts - - ovirt_disk_facts - - ovirt_event_facts - - ovirt_external_provider_facts - - ovirt_group_facts - - ovirt_host_facts - - ovirt_host_storage_facts - - ovirt_network_facts - - ovirt_nic_facts - - ovirt_permission_facts - - ovirt_quota_facts - - ovirt_scheduling_policy_facts - - ovirt_snapshot_facts - - ovirt_storage_domain_facts - - ovirt_storage_template_facts - - ovirt_storage_vm_facts - - ovirt_tag_facts - - ovirt_template_facts - - ovirt_user_facts - - ovirt_vm_facts - - ovirt_vmpool_facts plugin_routing: connection: docker: @@ -40,15 +14,18 @@ plugin_routing: nios: deprecation: removal_version: 5.0.0 - warning_text: The community.general.nios lookup plugin has been deprecated. Please use infoblox.nios_modules.nios_lookup instead. + warning_text: The community.general.nios lookup plugin has been deprecated. + Please use infoblox.nios_modules.nios_lookup instead. nios_next_ip: deprecation: removal_version: 5.0.0 - warning_text: The community.general.nios_next_ip lookup plugin has been deprecated. Please use infoblox.nios_modules.nios_next_ip instead. + warning_text: The community.general.nios_next_ip lookup plugin has been deprecated. + Please use infoblox.nios_modules.nios_next_ip instead. nios_next_network: deprecation: removal_version: 5.0.0 - warning_text: The community.general.nios_next_network lookup plugin has been deprecated. Please use infoblox.nios_modules.nios_next_network instead. + warning_text: The community.general.nios_next_network lookup plugin has been + deprecated. Please use infoblox.nios_modules.nios_next_network instead. modules: ali_instance_facts: tombstone: @@ -153,11 +130,13 @@ plugin_routing: gcp_forwarding_rule: tombstone: removal_version: 2.0.0 - warning_text: Use google.cloud.gcp_compute_forwarding_rule or google.cloud.gcp_compute_global_forwarding_rule instead. + warning_text: Use google.cloud.gcp_compute_forwarding_rule or google.cloud.gcp_compute_global_forwarding_rule + instead. gcp_healthcheck: tombstone: removal_version: 2.0.0 - warning_text: Use google.cloud.gcp_compute_health_check, google.cloud.gcp_compute_http_health_check or google.cloud.gcp_compute_https_health_check instead. + warning_text: Use google.cloud.gcp_compute_health_check, google.cloud.gcp_compute_http_health_check + or google.cloud.gcp_compute_https_health_check instead. gcp_target_proxy: tombstone: removal_version: 2.0.0 @@ -168,37 +147,22 @@ plugin_routing: warning_text: Use google.cloud.gcp_compute_url_map instead. gcpubsub: redirect: community.google.gcpubsub - gcpubsub_info: - redirect: community.google.gcpubsub_info gcpubsub_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.google.gcpubsub_info instead. + gcpubsub_info: + redirect: community.google.gcpubsub_info gcspanner: tombstone: removal_version: 2.0.0 - warning_text: Use google.cloud.gcp_spanner_database and/or google.cloud.gcp_spanner_instance instead. + warning_text: Use google.cloud.gcp_spanner_database and/or google.cloud.gcp_spanner_instance + instead. github_hooks: tombstone: removal_version: 2.0.0 - warning_text: Use community.general.github_webhook and community.general.github_webhook_info instead. - # Adding tombstones burns the old name, so we simply remove the entries: - # gluster_heal_info: - # tombstone: - # removal_version: 3.0.0 - # warning_text: The gluster modules have migrated to the gluster.gluster collection. Use gluster.gluster.gluster_heal_info instead. - # gluster_peer: - # tombstone: - # removal_version: 3.0.0 - # warning_text: The gluster modules have migrated to the gluster.gluster collection. Use gluster.gluster.gluster_peer instead. - # gluster_volume: - # tombstone: - # removal_version: 3.0.0 - # warning_text: The gluster modules have migrated to the gluster.gluster collection. Use gluster.gluster.gluster_volume instead. - # helm: - # tombstone: - # removal_version: 3.0.0 - # warning_text: Use community.kubernetes.helm instead. + warning_text: Use community.general.github_webhook and community.general.github_webhook_info + instead. hetzner_failover_ip: redirect: community.hrobot.failover_ip hetzner_failover_ip_info: @@ -246,11 +210,13 @@ plugin_routing: logicmonitor: tombstone: removal_version: 1.0.0 - warning_text: The logicmonitor_facts module is no longer maintained and the API used has been disabled in 2017. + warning_text: The logicmonitor_facts module is no longer maintained and the + API used has been disabled in 2017. logicmonitor_facts: tombstone: removal_version: 1.0.0 - warning_text: The logicmonitor_facts module is no longer maintained and the API used has been disabled in 2017. + warning_text: The logicmonitor_facts module is no longer maintained and the + API used has been disabled in 2017. memset_memstore_facts: tombstone: removal_version: 3.0.0 @@ -295,74 +261,90 @@ plugin_routing: tombstone: removal_version: 3.0.0 warning_text: Use netapp.ontap.na_ontap_info instead. - nios_a_record: - deprecation: - removal_version: 5.0.0 - warning_text: The community.general.nios_a_record module has been deprecated. Please use infoblox.nios_modules.nios_a_record instead. - nios_aaaa_record: - deprecation: - removal_version: 5.0.0 - warning_text: The community.general.nios_aaaa_record module has been deprecated. Please use infoblox.nios_modules.nios_aaaa_record instead. - nios_cname_record: - deprecation: - removal_version: 5.0.0 - warning_text: The community.general.nios_cname_record module has been deprecated. Please use infoblox.nios_modules.nios_cname_record instead. - nios_dns_view: - deprecation: - removal_version: 5.0.0 - warning_text: The community.general.nios_dns_view module has been deprecated. Please use infoblox.nios_modules.nios_dns_view instead. - nios_fixed_address: - deprecation: - removal_version: 5.0.0 - warning_text: The community.general.nios_fixed_address module has been deprecated. Please use infoblox.nios_modules.nios_fixed_address instead. - nios_host_record: - deprecation: - removal_version: 5.0.0 - warning_text: The community.general.nios_host_record module has been deprecated. Please use infoblox.nios_modules.nios_host_record instead. - nios_member: - deprecation: - removal_version: 5.0.0 - warning_text: The community.general.nios_member module has been deprecated. Please use infoblox.nios_modules.nios_member instead. - nios_mx_record: - deprecation: - removal_version: 5.0.0 - warning_text: The community.general.nios_mx_record module has been deprecated. Please use infoblox.nios_modules.nios_mx_record instead. - nios_naptr_record: - deprecation: - removal_version: 5.0.0 - warning_text: The community.general.nios_naptr_record module has been deprecated. Please use infoblox.nios_modules.nios_naptr_record instead. - nios_network: - deprecation: - removal_version: 5.0.0 - warning_text: The community.general.nios_network module has been deprecated. Please use infoblox.nios_modules.nios_network instead. - nios_network_view: - deprecation: - removal_version: 5.0.0 - warning_text: The community.general.nios_network_view module has been deprecated. Please use infoblox.nios_modules.nios_network_view instead. - nios_nsgroup: - deprecation: - removal_version: 5.0.0 - warning_text: The community.general.nios_nsgroup module has been deprecated. Please use infoblox.nios_modules.nios_nsgroup instead. - nios_ptr_record: - deprecation: - removal_version: 5.0.0 - warning_text: The community.general.nios_ptr_record module has been deprecated. Please use infoblox.nios_modules.nios_ptr_record instead. - nios_srv_record: - deprecation: - removal_version: 5.0.0 - warning_text: The community.general.nios_srv_record module has been deprecated. Please use infoblox.nios_modules.nios_srv_record instead. - nios_txt_record: - deprecation: - removal_version: 5.0.0 - warning_text: The community.general.nios_txt_record module has been deprecated. Please use infoblox.nios_modules.nios_txt_record instead. - nios_zone: - deprecation: - removal_version: 5.0.0 - warning_text: The community.general.nios_zone module has been deprecated. Please use infoblox.nios_modules.nios_zone instead. nginx_status_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.nginx_status_info instead. + nios_a_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_a_record module has been deprecated. + Please use infoblox.nios_modules.nios_a_record instead. + nios_aaaa_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_aaaa_record module has been deprecated. + Please use infoblox.nios_modules.nios_aaaa_record instead. + nios_cname_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_cname_record module has been deprecated. + Please use infoblox.nios_modules.nios_cname_record instead. + nios_dns_view: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_dns_view module has been deprecated. + Please use infoblox.nios_modules.nios_dns_view instead. + nios_fixed_address: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_fixed_address module has been deprecated. + Please use infoblox.nios_modules.nios_fixed_address instead. + nios_host_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_host_record module has been deprecated. + Please use infoblox.nios_modules.nios_host_record instead. + nios_member: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_member module has been deprecated. + Please use infoblox.nios_modules.nios_member instead. + nios_mx_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_mx_record module has been deprecated. + Please use infoblox.nios_modules.nios_mx_record instead. + nios_naptr_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_naptr_record module has been deprecated. + Please use infoblox.nios_modules.nios_naptr_record instead. + nios_network: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_network module has been deprecated. + Please use infoblox.nios_modules.nios_network instead. + nios_network_view: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_network_view module has been deprecated. + Please use infoblox.nios_modules.nios_network_view instead. + nios_nsgroup: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_nsgroup module has been deprecated. + Please use infoblox.nios_modules.nios_nsgroup instead. + nios_ptr_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_ptr_record module has been deprecated. + Please use infoblox.nios_modules.nios_ptr_record instead. + nios_srv_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_srv_record module has been deprecated. + Please use infoblox.nios_modules.nios_srv_record instead. + nios_txt_record: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_txt_record module has been deprecated. + Please use infoblox.nios_modules.nios_txt_record instead. + nios_zone: + deprecation: + removal_version: 5.0.0 + warning_text: The community.general.nios_zone module has been deprecated. + Please use infoblox.nios_modules.nios_zone instead. ome_device_info: redirect: dellemc.openmanage.ome_device_info one_image_facts: @@ -396,7 +378,8 @@ plugin_routing: oneview_logical_interconnect_group_facts: tombstone: removal_version: 3.0.0 - warning_text: Use community.general.oneview_logical_interconnect_group_info instead. + warning_text: Use community.general.oneview_logical_interconnect_group_info + instead. oneview_network_set_facts: tombstone: removal_version: 3.0.0 @@ -553,10 +536,10 @@ plugin_routing: redirect: community.postgresql.postgresql_table postgresql_tablespace: redirect: community.postgresql.postgresql_tablespace - postgresql_user_obj_stat_info: - redirect: community.postgresql.postgresql_user_obj_stat_info postgresql_user: redirect: community.postgresql.postgresql_user + postgresql_user_obj_stat_info: + redirect: community.postgresql.postgresql_user_obj_stat_info purefa_facts: tombstone: removal_version: 3.0.0 @@ -647,7 +630,8 @@ plugin_routing: nios: deprecation: removal_version: 5.0.0 - warning_text: The community.general.nios document fragment has been deprecated. Please use infoblox.nios_modules.nios instead. + warning_text: The community.general.nios document fragment has been deprecated. + Please use infoblox.nios_modules.nios instead. postgresql: redirect: community.postgresql.postgresql module_utils: @@ -668,26 +652,30 @@ plugin_routing: net_tools.nios.api: deprecation: removal_version: 5.0.0 - warning_text: The community.general.net_tools.nios.api module_utils has been deprecated. Please use infoblox.nios_modules.api instead. + warning_text: The community.general.net_tools.nios.api module_utils has been + deprecated. Please use infoblox.nios_modules.api instead. + postgresql: + redirect: community.postgresql.postgresql remote_management.dellemc.dellemc_idrac: redirect: dellemc.openmanage.dellemc_idrac remote_management.dellemc.ome: redirect: dellemc.openmanage.ome - postgresql: - redirect: community.postgresql.postgresql callback: actionable: tombstone: removal_version: 2.0.0 - warning_text: Use the 'default' callback plugin with 'display_skipped_hosts = no' and 'display_ok_hosts = no' options. + warning_text: Use the 'default' callback plugin with 'display_skipped_hosts + = no' and 'display_ok_hosts = no' options. full_skip: tombstone: removal_version: 2.0.0 - warning_text: Use the 'default' callback plugin with 'display_skipped_hosts = no' option. + warning_text: Use the 'default' callback plugin with 'display_skipped_hosts + = no' option. stderr: tombstone: removal_version: 2.0.0 - warning_text: Use the 'default' callback plugin with 'display_failed_stderr = yes' option. + warning_text: Use the 'default' callback plugin with 'display_failed_stderr + = yes' option. inventory: docker_machine: redirect: community.docker.docker_machine diff --git a/plugins/action/__init__.py b/plugins/action/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/become/__init__.py b/plugins/become/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/cache/__init__.py b/plugins/cache/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/callback/__init__.py b/plugins/callback/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/connection/__init__.py b/plugins/connection/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/doc_fragments/__init__.py b/plugins/doc_fragments/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/filter/__init__.py b/plugins/filter/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/inventory/__init__.py b/plugins/inventory/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/lookup/__init__.py b/plugins/lookup/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/module_utils/__init__.py b/plugins/module_utils/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/module_utils/identity/__init__.py b/plugins/module_utils/identity/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/module_utils/identity/keycloak/__init__.py b/plugins/module_utils/identity/keycloak/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/module_utils/mh/__init__.py b/plugins/module_utils/mh/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/module_utils/mh/mixins/__init__.py b/plugins/module_utils/mh/mixins/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/module_utils/net_tools/__init__.py b/plugins/module_utils/net_tools/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/module_utils/net_tools/nios/__init__.py b/plugins/module_utils/net_tools/nios/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/module_utils/net_tools/pritunl/__init__.py b/plugins/module_utils/net_tools/pritunl/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/module_utils/oracle/__init__.py b/plugins/module_utils/oracle/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/module_utils/remote_management/__init__.py b/plugins/module_utils/remote_management/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/module_utils/remote_management/lxca/__init__.py b/plugins/module_utils/remote_management/lxca/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/module_utils/source_control/__init__.py b/plugins/module_utils/source_control/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/module_utils/storage/__init__.py b/plugins/module_utils/storage/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/module_utils/storage/emc/__init__.py b/plugins/module_utils/storage/emc/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/module_utils/storage/hpe3par/__init__.py b/plugins/module_utils/storage/hpe3par/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/modules/__init__.py b/plugins/modules/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plugins/modules/net_tools/pritunl/__init__.py b/plugins/modules/net_tools/pritunl/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 From 285639a4f94e6000f61e8298e26589b39e5f8d8f Mon Sep 17 00:00:00 2001 From: christophemorio <49184206+christophemorio@users.noreply.github.com> Date: Thu, 27 May 2021 19:03:39 +0200 Subject: [PATCH 0327/3093] Terraform overwrite init (#2573) * feat: implement overwrite_init option * chore: changelog --- .../fragments/2573-terraform-overwrite-init.yml | 2 ++ plugins/modules/cloud/misc/terraform.py | 11 ++++++++++- 2 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2573-terraform-overwrite-init.yml diff --git a/changelogs/fragments/2573-terraform-overwrite-init.yml b/changelogs/fragments/2573-terraform-overwrite-init.yml new file mode 100644 index 0000000000..f2dad6a7ee --- /dev/null +++ b/changelogs/fragments/2573-terraform-overwrite-init.yml @@ -0,0 +1,2 @@ +minor_changes: + - terraform - add option ``overwrite_init`` to skip init if exists (https://github.com/ansible-collections/community.general/pull/2573). diff --git a/plugins/modules/cloud/misc/terraform.py b/plugins/modules/cloud/misc/terraform.py index 0a4e41b5f0..9bf36c8c81 100644 --- a/plugins/modules/cloud/misc/terraform.py +++ b/plugins/modules/cloud/misc/terraform.py @@ -107,6 +107,12 @@ options: you intend to provision an entirely new Terraform deployment. default: false type: bool + overwrite_init: + description: + - Run init even if C(.terraform/terraform.tfstate) already exists in I(project_path). + default: true + type: bool + version_added: '3.2.0' backend_config: description: - A group of key-values to provide at init stage to the -backend-config parameter. @@ -348,6 +354,7 @@ def main(): backend_config=dict(type='dict', default=None), backend_config_files=dict(type='list', elements='path', default=None), init_reconfigure=dict(required=False, type='bool', default=False), + overwrite_init=dict(type='bool', default=True), ), required_if=[('state', 'planned', ['plan_file'])], supports_check_mode=True, @@ -367,6 +374,7 @@ def main(): backend_config = module.params.get('backend_config') backend_config_files = module.params.get('backend_config_files') init_reconfigure = module.params.get('init_reconfigure') + overwrite_init = module.params.get('overwrite_init') if bin_path is not None: command = [bin_path] @@ -383,7 +391,8 @@ def main(): APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve') if force_init: - init_plugins(command[0], project_path, backend_config, backend_config_files, init_reconfigure, plugin_paths) + if overwrite_init or not os.path.isfile(os.path.join(project_path, ".terraform", "terraform.tfstate")): + init_plugins(command[0], project_path, backend_config, backend_config_files, init_reconfigure, plugin_paths) workspace_ctx = get_workspace_context(command[0], project_path) if workspace_ctx["current"] != workspace: From 795125fec4d4b9875ea1c29a6ccd81c30432b4c7 Mon Sep 17 00:00:00 2001 From: Abhijeet Kasurde Date: Thu, 27 May 2021 22:34:52 +0530 Subject: [PATCH 0328/3093] xml: Add an example for absent (#2644) Element node can be deleted based upon the attribute value. Signed-off-by: Abhijeet Kasurde --- plugins/modules/files/xml.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/plugins/modules/files/xml.py b/plugins/modules/files/xml.py index f93c8e4dc4..e7c6ca3f1e 100644 --- a/plugins/modules/files/xml.py +++ b/plugins/modules/files/xml.py @@ -301,6 +301,23 @@ EXAMPLES = r''' - floor: Grog storage - construction_date: "1990" # Only strings are valid - building: Grog factory + +# Consider this XML for following example - +# +# +# +# part to remove +# +# +# part to keep +# +# + +- name: Delete element node based upon attribute + community.general.xml: + path: bar.xml + xpath: /config/element[@name='test1'] + state: absent ''' RETURN = r''' From 95794f31e34552628cd648d27672eadabe4154ec Mon Sep 17 00:00:00 2001 From: Merouane Atig Date: Thu, 27 May 2021 19:08:35 +0200 Subject: [PATCH 0329/3093] Fix drain example with correct wait values (#2603) --- plugins/modules/net_tools/haproxy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/modules/net_tools/haproxy.py b/plugins/modules/net_tools/haproxy.py index 8efb59ed2e..a3320b45c5 100644 --- a/plugins/modules/net_tools/haproxy.py +++ b/plugins/modules/net_tools/haproxy.py @@ -150,7 +150,7 @@ EXAMPLES = r''' backend: www wait: yes drain: yes - wait_interval: 1 + wait_interval: 60 wait_retries: 60 - name: Disable backend server in 'www' backend pool and drop open sessions to it From 43c12b82fa9cf63d2258565f1d62d5dc0a0075ff Mon Sep 17 00:00:00 2001 From: Abhijeet Kasurde Date: Thu, 27 May 2021 22:39:26 +0530 Subject: [PATCH 0330/3093] random_string: a new lookup plugin (#2572) New lookup plugin to generate random string based upon constraints. Signed-off-by: Abhijeet Kasurde --- plugins/lookup/random_string.py | 220 ++++++++++++++++++ .../targets/lookup_random_string/aliases | 3 + .../targets/lookup_random_string/runme.sh | 6 + .../targets/lookup_random_string/test.yml | 48 ++++ 4 files changed, 277 insertions(+) create mode 100644 plugins/lookup/random_string.py create mode 100644 tests/integration/targets/lookup_random_string/aliases create mode 100755 tests/integration/targets/lookup_random_string/runme.sh create mode 100644 tests/integration/targets/lookup_random_string/test.yml diff --git a/plugins/lookup/random_string.py b/plugins/lookup/random_string.py new file mode 100644 index 0000000000..6a05cfd041 --- /dev/null +++ b/plugins/lookup/random_string.py @@ -0,0 +1,220 @@ +# -*- coding: utf-8 -*- +# Copyright: (c) 2021, Abhijeet Kasurde +# Copyright: (c) 2018, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r""" + name: random_string + author: + - Abhijeet Kasurde (@Akasurde) + short_description: Generates random string + version_added: '3.2.0' + description: + - Generates random string based upon the given constraints. + options: + length: + description: The length of the string. + default: 8 + type: int + upper: + description: + - Include uppercase letters in the string. + default: true + type: bool + lower: + description: + - Include lowercase letters in the string. + default: true + type: bool + numbers: + description: + - Include numbers in the string. + default: true + type: bool + special: + description: + - Include special characters in the string. + - Special characters are taken from Python standard library C(string). + See L(the documentation of string.punctuation,https://docs.python.org/3/library/string.html#string.punctuation) + for which characters will be used. + - The choice of special characters can be changed to setting I(override_special). + default: true + type: bool + min_numeric: + description: + - Minimum number of numeric characters in the string. + - If set, overrides I(numbers=false). + default: 0 + type: int + min_upper: + description: + - Minimum number of uppercase alphabets in the string. + - If set, overrides I(upper=false). + default: 0 + type: int + min_lower: + description: + - Minimum number of lowercase alphabets in the string. + - If set, overrides I(lower=false). + default: 0 + type: int + min_special: + description: + - Minimum number of special character in the string. + default: 0 + type: int + override_special: + description: + - Overide a list of special characters to use in the string. + - If set I(min_special) should be set to a non-default value. + type: str + override_all: + description: + - Override all values of I(numbers), I(upper), I(lower), and I(special) with + the given list of characters. + type: str + base64: + description: + - Returns base64 encoded string. + type: bool + default: false +""" + +EXAMPLES = r""" +- name: Generate random string + ansible.builtin.debug: + var: lookup('community.general.random_string') + # Example result: ['DeadBeeF'] + +- name: Generate random string with length 12 + ansible.builtin.debug: + var: lookup('community.general.random_string', length=12) + # Example result: ['Uan0hUiX5kVG'] + +- name: Generate base64 encoded random string + ansible.builtin.debug: + var: lookup('community.general.random_string', base64=True) + # Example result: ['NHZ6eWN5Qk0='] + +- name: Generate a random string with 1 lower, 1 upper, 1 number and 1 special char (atleast) + ansible.builtin.debug: + var: lookup('community.general.random_string', min_lower=1, min_upper=1, min_special=1, min_numeric=1) + # Example result: ['&Qw2|E[-'] + +- name: Generate a random string with all lower case characters + debug: + var: query('community.general.random_string', upper=false, numbers=false, special=false) + # Example result: ['exolxzyz'] + +- name: Generate random hexadecimal string + debug: + var: query('community.general.random_string', upper=false, lower=false, override_special=hex_chars, numbers=false) + vars: + hex_chars: '0123456789ABCDEF' + # Example result: ['D2A40737'] + +- name: Generate random hexadecimal string with override_all + debug: + var: query('community.general.random_string', override_all=hex_chars) + vars: + hex_chars: '0123456789ABCDEF' + # Example result: ['D2A40737'] +""" + +RETURN = r""" + _raw: + description: A one-element list containing a random string + type: list + elements: str +""" + +import base64 +import random +import string + +from ansible.errors import AnsibleLookupError +from ansible.plugins.lookup import LookupBase +from ansible.module_utils._text import to_bytes, to_text + + +class LookupModule(LookupBase): + @staticmethod + def get_random(random_generator, chars, length): + if not chars: + raise AnsibleLookupError( + "Available characters cannot be None, please change constraints" + ) + return "".join(random_generator.choice(chars) for dummy in range(length)) + + @staticmethod + def b64encode(string_value, encoding="utf-8"): + return to_text( + base64.b64encode( + to_bytes(string_value, encoding=encoding, errors="surrogate_or_strict") + ) + ) + + def run(self, terms, variables=None, **kwargs): + number_chars = string.digits + lower_chars = string.ascii_lowercase + upper_chars = string.ascii_uppercase + special_chars = string.punctuation + random_generator = random.SystemRandom() + + self.set_options(var_options=variables, direct=kwargs) + + length = self.get_option("length") + base64_flag = self.get_option("base64") + override_all = self.get_option("override_all") + values = "" + available_chars_set = "" + + if override_all: + # Override all the values + available_chars_set = override_all + else: + upper = self.get_option("upper") + lower = self.get_option("lower") + numbers = self.get_option("numbers") + special = self.get_option("special") + override_special = self.get_option("override_special") + + if override_special: + special_chars = override_special + + if upper: + available_chars_set += upper_chars + if lower: + available_chars_set += lower_chars + if numbers: + available_chars_set += number_chars + if special: + available_chars_set += special_chars + + mapping = { + "min_numeric": number_chars, + "min_lower": lower_chars, + "min_upper": upper_chars, + "min_special": special_chars, + } + + for m in mapping: + if self.get_option(m): + values += self.get_random(random_generator, mapping[m], self.get_option(m)) + + remaining_pass_len = length - len(values) + values += self.get_random(random_generator, available_chars_set, remaining_pass_len) + + # Get pseudo randomization + shuffled_values = list(values) + # Randomize the order + random.shuffle(shuffled_values) + + if base64_flag: + return [self.b64encode("".join(shuffled_values))] + + return ["".join(shuffled_values)] diff --git a/tests/integration/targets/lookup_random_string/aliases b/tests/integration/targets/lookup_random_string/aliases new file mode 100644 index 0000000000..bc987654d9 --- /dev/null +++ b/tests/integration/targets/lookup_random_string/aliases @@ -0,0 +1,3 @@ +shippable/posix/group2 +skip/aix +skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller diff --git a/tests/integration/targets/lookup_random_string/runme.sh b/tests/integration/targets/lookup_random_string/runme.sh new file mode 100755 index 0000000000..8ed6373823 --- /dev/null +++ b/tests/integration/targets/lookup_random_string/runme.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +set -eux + +ANSIBLE_ROLES_PATH=../ \ + ansible-playbook test.yml -v "$@" diff --git a/tests/integration/targets/lookup_random_string/test.yml b/tests/integration/targets/lookup_random_string/test.yml new file mode 100644 index 0000000000..52a572379b --- /dev/null +++ b/tests/integration/targets/lookup_random_string/test.yml @@ -0,0 +1,48 @@ +- hosts: localhost + gather_facts: no + tasks: + - name: Call plugin + set_fact: + result1: "{{ query('community.general.random_string') }}" + result2: "{{ query('community.general.random_string', length=0) }}" + result3: "{{ query('community.general.random_string', length=10) }}" + result4: "{{ query('community.general.random_string', length=-1) }}" + result5: "{{ query('community.general.random_string', override_special='_', min_special=1) }}" + result6: "{{ query('community.general.random_string', upper=false, special=false) }}" # lower case only + result7: "{{ query('community.general.random_string', lower=false) }}" # upper case only + result8: "{{ query('community.general.random_string', lower=false, upper=false, special=false) }}" # number only + result9: "{{ query('community.general.random_string', lower=false, upper=false, special=false, min_numeric=1, length=1) }}" # single digit only + result10: "{{ query('community.general.random_string', numbers=false, upper=false, special=false, min_lower=1, length=1) }}" # single lowercase character only + result11: "{{ query('community.general.random_string', base64=true, length=8) }}" + result12: "{{ query('community.general.random_string', upper=false, numbers=false, special=false) }}" # all lower case + result13: "{{ query('community.general.random_string', override_all='0', length=2) }}" + + - name: Raise error when impossible constraints are provided + set_fact: + impossible: "{{ query('community.general.random_string', upper=false, lower=false, special=false, numbers=false) }}" + ignore_errors: yes + register: impossible_result + + - name: Check results + assert: + that: + - result1[0] | length == 8 + - result2[0] | length == 0 + - result3[0] | length == 10 + - result4[0] | length == 0 + - result5[0] | length == 8 + - "'_' in result5[0]" + - result6[0] is lower + - result7[0] is upper + - result8[0] | regex_replace('^(\d+)$', '') == '' + - result9[0] | regex_replace('^(\d+)$', '') == '' + - result9[0] | length == 1 + - result10[0] | length == 1 + - result10[0] is lower + # if input string is not multiple of 3, base64 encoded string will be padded with = + - result11[0].endswith('=') + - result12[0] is lower + - result13[0] | length == 2 + - result13[0] == '00' + - impossible_result is failed + - "'Available characters cannot' in impossible_result.msg" From 3afcf7e75db37d4c6e24bb4ef25999d95013d4e3 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Fri, 28 May 2021 05:13:21 +1200 Subject: [PATCH 0331/3093] minor refactors on plugins/modules/cloud/misc (#2557) * minor refactors on plugins/modules/cloud/misc * added changelog fragment * removed unreachable statement * Update plugins/modules/cloud/misc/terraform.py Co-authored-by: Felix Fontein * Update plugins/modules/cloud/misc/rhevm.py Co-authored-by: Felix Fontein * adjusted per PR comment Co-authored-by: Felix Fontein --- .../fragments/2557-cloud-misc-refactor.yml | 7 +++++ .../cloud/misc/cloud_init_data_facts.py | 4 +-- .../modules/cloud/misc/proxmox_group_info.py | 2 +- plugins/modules/cloud/misc/proxmox_kvm.py | 31 +++++++++---------- plugins/modules/cloud/misc/rhevm.py | 4 +-- plugins/modules/cloud/misc/serverless.py | 11 +++---- plugins/modules/cloud/misc/terraform.py | 2 +- 7 files changed, 32 insertions(+), 29 deletions(-) create mode 100644 changelogs/fragments/2557-cloud-misc-refactor.yml diff --git a/changelogs/fragments/2557-cloud-misc-refactor.yml b/changelogs/fragments/2557-cloud-misc-refactor.yml new file mode 100644 index 0000000000..82e56dc942 --- /dev/null +++ b/changelogs/fragments/2557-cloud-misc-refactor.yml @@ -0,0 +1,7 @@ +minor_changes: + - cloud_init_data_facts - minor refactor (https://github.com/ansible-collections/community.general/pull/2557). + - proxmox_group_info - minor refactor (https://github.com/ansible-collections/community.general/pull/2557). + - proxmox_kvm - minor refactor (https://github.com/ansible-collections/community.general/pull/2557). + - rhevm - minor refactor (https://github.com/ansible-collections/community.general/pull/2557). + - serverless - minor refactor (https://github.com/ansible-collections/community.general/pull/2557). + - terraform - minor refactor (https://github.com/ansible-collections/community.general/pull/2557). diff --git a/plugins/modules/cloud/misc/cloud_init_data_facts.py b/plugins/modules/cloud/misc/cloud_init_data_facts.py index 2efb90cfeb..5774fa6f39 100644 --- a/plugins/modules/cloud/misc/cloud_init_data_facts.py +++ b/plugins/modules/cloud/misc/cloud_init_data_facts.py @@ -88,7 +88,7 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_text -CLOUD_INIT_PATH = "/var/lib/cloud/data/" +CLOUD_INIT_PATH = "/var/lib/cloud/data" def gather_cloud_init_data_facts(module): @@ -100,7 +100,7 @@ def gather_cloud_init_data_facts(module): filter = module.params.get('filter') if filter is None or filter == i: res['cloud_init_data_facts'][i] = dict() - json_file = CLOUD_INIT_PATH + i + '.json' + json_file = os.path.join(CLOUD_INIT_PATH, i + '.json') if os.path.exists(json_file): f = open(json_file, 'rb') diff --git a/plugins/modules/cloud/misc/proxmox_group_info.py b/plugins/modules/cloud/misc/proxmox_group_info.py index bf88659656..3d60e7e214 100644 --- a/plugins/modules/cloud/misc/proxmox_group_info.py +++ b/plugins/modules/cloud/misc/proxmox_group_info.py @@ -95,7 +95,7 @@ class ProxmoxGroup: self.group = dict() # Data representation is not the same depending on API calls for k, v in group.items(): - if k == 'users' and type(v) == str: + if k == 'users' and isinstance(v, str): self.group['users'] = v.split(',') elif k == 'members': self.group['users'] = group['members'] diff --git a/plugins/modules/cloud/misc/proxmox_kvm.py b/plugins/modules/cloud/misc/proxmox_kvm.py index 2dcb1ab573..0ad75a45bd 100644 --- a/plugins/modules/cloud/misc/proxmox_kvm.py +++ b/plugins/modules/cloud/misc/proxmox_kvm.py @@ -808,23 +808,23 @@ def get_vminfo(module, proxmox, node, vmid, **kwargs): # Sanitize kwargs. Remove not defined args and ensure True and False converted to int. kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) - # Convert all dict in kwargs to elements. For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n] + # Convert all dict in kwargs to elements. + # For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n] for k in list(kwargs.keys()): if isinstance(kwargs[k], dict): kwargs.update(kwargs[k]) del kwargs[k] # Split information by type + re_net = re.compile(r'net[0-9]') + re_dev = re.compile(r'(virtio|ide|scsi|sata)[0-9]') for k, v in kwargs.items(): - if re.match(r'net[0-9]', k) is not None: + if re_net.match(k): interface = k k = vm[k] k = re.search('=(.*?),', k).group(1) mac[interface] = k - if (re.match(r'virtio[0-9]', k) is not None or - re.match(r'ide[0-9]', k) is not None or - re.match(r'scsi[0-9]', k) is not None or - re.match(r'sata[0-9]', k) is not None): + elif re_dev.match(k): device = k k = vm[k] k = re.search('(.*?),', k).group(1) @@ -835,16 +835,13 @@ def get_vminfo(module, proxmox, node, vmid, **kwargs): results['vmid'] = int(vmid) -def settings(module, proxmox, vmid, node, name, **kwargs): +def settings(proxmox, vmid, node, **kwargs): proxmox_node = proxmox.nodes(node) # Sanitize kwargs. Remove not defined args and ensure True and False converted to int. kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) - if proxmox_node.qemu(vmid).config.set(**kwargs) is None: - return True - else: - return False + return proxmox_node.qemu(vmid).config.set(**kwargs) is None def wait_for_task(module, proxmox, node, taskid): @@ -915,7 +912,8 @@ def create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sock if 'pool' in kwargs: del kwargs['pool'] - # Convert all dict in kwargs to elements. For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n], ipconfig[n] + # Convert all dict in kwargs to elements. + # For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n], ipconfig[n] for k in list(kwargs.keys()): if isinstance(kwargs[k], dict): kwargs.update(kwargs[k]) @@ -938,8 +936,9 @@ def create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sock # VM tags are expected to be valid and presented as a comma/semi-colon delimited string if 'tags' in kwargs: + re_tag = re.compile(r'^[a-z0-9_][a-z0-9_\-\+\.]*$') for tag in kwargs['tags']: - if not re.match(r'^[a-z0-9_][a-z0-9_\-\+\.]*$', tag): + if not re_tag.match(tag): module.fail_json(msg='%s is not a valid tag' % tag) kwargs['tags'] = ",".join(kwargs['tags']) @@ -971,7 +970,7 @@ def create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sock if not wait_for_task(module, proxmox, node, taskid): module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' % - proxmox_node.tasks(taskid).log.get()[:1]) + proxmox_node.tasks(taskid).log.get()[:1]) return False return True @@ -1209,14 +1208,14 @@ def main(): if delete is not None: try: - settings(module, proxmox, vmid, node, name, delete=delete) + settings(proxmox, vmid, node, delete=delete) module.exit_json(changed=True, vmid=vmid, msg="Settings has deleted on VM {0} with vmid {1}".format(name, vmid)) except Exception as e: module.fail_json(vmid=vmid, msg='Unable to delete settings on VM {0} with vmid {1}: '.format(name, vmid) + str(e)) if revert is not None: try: - settings(module, proxmox, vmid, node, name, revert=revert) + settings(proxmox, vmid, node, revert=revert) module.exit_json(changed=True, vmid=vmid, msg="Settings has reverted on VM {0} with vmid {1}".format(name, vmid)) except Exception as e: module.fail_json(vmid=vmid, msg='Unable to revert settings on VM {0} with vmid {1}: Maybe is not a pending task... '.format(name, vmid) + str(e)) diff --git a/plugins/modules/cloud/misc/rhevm.py b/plugins/modules/cloud/misc/rhevm.py index cc6c1252bf..77b40248b3 100644 --- a/plugins/modules/cloud/misc/rhevm.py +++ b/plugins/modules/cloud/misc/rhevm.py @@ -547,7 +547,7 @@ class RHEVConn(object): def set_Memory_Policy(self, name, memory_policy): VM = self.get_VM(name) - VM.memory_policy.guaranteed = int(int(memory_policy) * 1024 * 1024 * 1024) + VM.memory_policy.guaranteed = int(memory_policy) * 1024 * 1024 * 1024 try: VM.update() setMsg("The memory policy has been updated.") @@ -1260,7 +1260,7 @@ def core(module): r = RHEV(module) - state = module.params.get('state', 'present') + state = module.params.get('state') if state == 'ping': r.test() diff --git a/plugins/modules/cloud/misc/serverless.py b/plugins/modules/cloud/misc/serverless.py index 912d4226a8..1b2f8b62a6 100644 --- a/plugins/modules/cloud/misc/serverless.py +++ b/plugins/modules/cloud/misc/serverless.py @@ -139,16 +139,14 @@ from ansible.module_utils.basic import AnsibleModule def read_serverless_config(module): path = module.params.get('service_path') + full_path = os.path.join(path, 'serverless.yml') try: - with open(os.path.join(path, 'serverless.yml')) as sls_config: + with open(full_path) as sls_config: config = yaml.safe_load(sls_config.read()) return config except IOError as e: - module.fail_json(msg="Could not open serverless.yml in {0}. err: {1}".format(path, str(e))) - - module.fail_json(msg="Failed to open serverless config at {0}".format( - os.path.join(path, 'serverless.yml'))) + module.fail_json(msg="Could not open serverless.yml in {0}. err: {1}".format(full_path, str(e))) def get_service_name(module, stage): @@ -182,7 +180,6 @@ def main(): service_path = module.params.get('service_path') state = module.params.get('state') - functions = module.params.get('functions') region = module.params.get('region') stage = module.params.get('stage') deploy = module.params.get('deploy', True) @@ -193,7 +190,7 @@ def main(): if serverless_bin_path is not None: command = serverless_bin_path + " " else: - command = "serverless " + command = module.get_bin_path("serverless") + " " if state == 'present': command += 'deploy ' diff --git a/plugins/modules/cloud/misc/terraform.py b/plugins/modules/cloud/misc/terraform.py index 9bf36c8c81..8a34f9699b 100644 --- a/plugins/modules/cloud/misc/terraform.py +++ b/plugins/modules/cloud/misc/terraform.py @@ -233,7 +233,7 @@ def get_version(bin_path): def preflight_validation(bin_path, project_path, version, variables_args=None, plan_file=None): - if project_path in [None, ''] or '/' not in project_path: + if project_path is None or '/' not in project_path: module.fail_json(msg="Path for Terraform project can not be None or ''.") if not os.path.exists(bin_path): module.fail_json(msg="Path for Terraform binary '{0}' doesn't exist on this host - check the path and try again please.".format(bin_path)) From 14f13904d63dcffab2069b5be69ebe46a2945fef Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 27 May 2021 22:59:42 +0200 Subject: [PATCH 0332/3093] Add extra docs tests (#2663) * Add extra docs tests. * Linting. * Fix copy'n'paste error. --- tests/sanity/extra/extra-docs.json | 10 ++++++++++ tests/sanity/extra/extra-docs.py | 23 +++++++++++++++++++++++ 2 files changed, 33 insertions(+) create mode 100644 tests/sanity/extra/extra-docs.json create mode 100755 tests/sanity/extra/extra-docs.py diff --git a/tests/sanity/extra/extra-docs.json b/tests/sanity/extra/extra-docs.json new file mode 100644 index 0000000000..a62ef37e63 --- /dev/null +++ b/tests/sanity/extra/extra-docs.json @@ -0,0 +1,10 @@ +{ + "include_symlinks": false, + "prefixes": [ + "docs/docsite/" + ], + "output": "path-line-column-message", + "requirements": [ + "antsibull" + ] +} diff --git a/tests/sanity/extra/extra-docs.py b/tests/sanity/extra/extra-docs.py new file mode 100755 index 0000000000..f4b7f59d3c --- /dev/null +++ b/tests/sanity/extra/extra-docs.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +"""Check extra collection docs with antsibull-lint.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import sys +import subprocess + + +def main(): + """Main entry point.""" + if not os.path.isdir(os.path.join('docs', 'docsite')): + return + p = subprocess.run(['antsibull-lint', 'collection-docs', '.'], check=False) + if p.returncode not in (0, 3): + print('{0}:0:0: unexpected return code {1}'.format(sys.argv[0], p.returncode)) + + +if __name__ == '__main__': + main() From 14813a6287af016d2b1823ce8e29bc2cc1dd10e5 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Fri, 28 May 2021 07:09:57 +0200 Subject: [PATCH 0333/3093] Stop mentioning Freenode. We're on Libera.chat. (#2666) --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index e6e4eb880e..a874a3e929 100644 --- a/README.md +++ b/README.md @@ -60,10 +60,10 @@ See [here](https://docs.ansible.com/ansible/devel/dev_guide/developing_collectio We have a dedicated Working Group for Ansible development. -You can find other people interested on the following Freenode IRC channels - +You can find other people interested on the following [Libera.chat](https://libera.chat/) IRC channels - - `#ansible` - For general use questions and support. -- `#ansible-devel` - For discussions on developer topics and code related to features or bugs. -- `#ansible-community` - For discussions on community topics and community meetings. +- `#ansible-devel` - For discussions on developer topics and code related to features or bugs in ansible-core. +- `#ansible-community` - For discussions on community topics and community meetings, and for general development questions for community collections. For more information about communities, meetings and agendas see [Community Wiki](https://github.com/ansible/community/wiki/Community). From c3cab7c68c4eb0d0f80d49356c8be52d0bb849ef Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Fri, 28 May 2021 15:19:29 +0430 Subject: [PATCH 0334/3093] composer: add composer_executable (#2650) * composer: add composer_executable * Add changelog * Improve documentation thanks to felixfontein Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../2650-composer-add_composer_executable.yml | 3 +++ plugins/modules/packaging/language/composer.py | 14 ++++++++++++-- 2 files changed, 15 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/2650-composer-add_composer_executable.yml diff --git a/changelogs/fragments/2650-composer-add_composer_executable.yml b/changelogs/fragments/2650-composer-add_composer_executable.yml new file mode 100644 index 0000000000..b1cccc689c --- /dev/null +++ b/changelogs/fragments/2650-composer-add_composer_executable.yml @@ -0,0 +1,3 @@ +--- +minor_changes: + - composer - add ``composer_executable`` option (https://github.com/ansible-collections/community.general/issues/2649). diff --git a/plugins/modules/packaging/language/composer.py b/plugins/modules/packaging/language/composer.py index 64157cb685..86fe7bdea3 100644 --- a/plugins/modules/packaging/language/composer.py +++ b/plugins/modules/packaging/language/composer.py @@ -117,9 +117,14 @@ options: default: false type: bool aliases: [ ignore-platform-reqs ] + composer_executable: + type: path + description: + - Path to composer executable on the remote host, if composer is not in C(PATH) or a custom composer is needed. + version_added: 3.2.0 requirements: - php - - composer installed in bin path (recommended /usr/local/bin) + - composer installed in bin path (recommended /usr/local/bin) or specified in I(composer_executable) notes: - Default options that are always appended in each execution are --no-ansi, --no-interaction and --no-progress if available. - We received reports about issues on macOS if composer was installed by Homebrew. Please use the official install method to avoid issues. @@ -187,7 +192,11 @@ def composer_command(module, command, arguments="", options=None, global_command else: php_path = module.params['executable'] - composer_path = module.get_bin_path("composer", True, ["/usr/local/bin"]) + if module.params['composer_executable'] is None: + composer_path = module.get_bin_path("composer", True, ["/usr/local/bin"]) + else: + composer_path = module.params['composer_executable'] + cmd = "%s %s %s %s %s %s" % (php_path, composer_path, "global" if global_command else "", command, " ".join(options), arguments) return module.run_command(cmd) @@ -231,6 +240,7 @@ def main(): ignore_platform_reqs=dict( default=False, type="bool", aliases=["ignore-platform-reqs"], deprecated_aliases=[dict(name='ignore-platform-reqs', version='5.0.0', collection_name='community.general')]), + composer_executable=dict(type="path"), ), required_if=[('global_command', False, ['working_dir'])], supports_check_mode=True From b281d3d699433a0e0dda7d6db01d22855a2a4cd5 Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Sat, 29 May 2021 03:00:12 -0400 Subject: [PATCH 0335/3093] proxmox_kvm - Fixed vmid result when VM with name exists (#2648) * Fixed vmid result when VM with name exists * Adding changelog fragment --- changelogs/fragments/2648-proxmox_kvm-fix-vmid-return-value.yml | 2 ++ plugins/modules/cloud/misc/proxmox_kvm.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2648-proxmox_kvm-fix-vmid-return-value.yml diff --git a/changelogs/fragments/2648-proxmox_kvm-fix-vmid-return-value.yml b/changelogs/fragments/2648-proxmox_kvm-fix-vmid-return-value.yml new file mode 100644 index 0000000000..7971fc24eb --- /dev/null +++ b/changelogs/fragments/2648-proxmox_kvm-fix-vmid-return-value.yml @@ -0,0 +1,2 @@ +bugfixes: + - proxmox_kvm - fixed ``vmid`` return value when VM with ``name`` already exists (https://github.com/ansible-collections/community.general/issues/2648). diff --git a/plugins/modules/cloud/misc/proxmox_kvm.py b/plugins/modules/cloud/misc/proxmox_kvm.py index 0ad75a45bd..a664279e57 100644 --- a/plugins/modules/cloud/misc/proxmox_kvm.py +++ b/plugins/modules/cloud/misc/proxmox_kvm.py @@ -1225,7 +1225,7 @@ def main(): if get_vm(proxmox, vmid) and not (update or clone): module.exit_json(changed=False, vmid=vmid, msg="VM with vmid <%s> already exists" % vmid) elif get_vmid(proxmox, name) and not (update or clone): - module.exit_json(changed=False, vmid=vmid, msg="VM with name <%s> already exists" % name) + module.exit_json(changed=False, vmid=get_vmid(proxmox, name)[0], msg="VM with name <%s> already exists" % name) elif not (node, name): module.fail_json(msg='node, name is mandatory for creating/updating vm') elif not node_check(proxmox, node): From f09c39b71e84eb15481a9c2b4fd08beabfb17cff Mon Sep 17 00:00:00 2001 From: quidame Date: Sat, 29 May 2021 10:50:24 +0200 Subject: [PATCH 0336/3093] iptables_state: fix broken query of `async_status` result (#2671) * use get() rather than querying the key directly * add a changelog fragment * re-enable CI tests * Update changelog fragment Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../2671-fix-broken-query-of-async_status-result.yml | 6 ++++++ plugins/action/system/iptables_state.py | 2 +- tests/integration/targets/iptables_state/aliases | 1 - 3 files changed, 7 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/2671-fix-broken-query-of-async_status-result.yml diff --git a/changelogs/fragments/2671-fix-broken-query-of-async_status-result.yml b/changelogs/fragments/2671-fix-broken-query-of-async_status-result.yml new file mode 100644 index 0000000000..993caaa323 --- /dev/null +++ b/changelogs/fragments/2671-fix-broken-query-of-async_status-result.yml @@ -0,0 +1,6 @@ +--- +bugfixes: + - "iptables_state - fix a broken query of ``async_status`` result + with current ansible-core development version + (https://github.com/ansible-collections/community.general/issues/2627, + https://github.com/ansible-collections/community.general/pull/2671)." diff --git a/plugins/action/system/iptables_state.py b/plugins/action/system/iptables_state.py index 96b6dc689c..887f3f47f9 100644 --- a/plugins/action/system/iptables_state.py +++ b/plugins/action/system/iptables_state.py @@ -52,7 +52,7 @@ class ActionModule(ActionBase): module_args=module_args, task_vars=task_vars, wrap_async=False) - if async_result['finished'] == 1: + if async_result.get('finished', 0) == 1: break time.sleep(min(1, timeout)) diff --git a/tests/integration/targets/iptables_state/aliases b/tests/integration/targets/iptables_state/aliases index 12765cec47..3cac4af522 100644 --- a/tests/integration/targets/iptables_state/aliases +++ b/tests/integration/targets/iptables_state/aliases @@ -5,4 +5,3 @@ skip/freebsd # no iptables/netfilter (Linux specific) skip/osx # no iptables/netfilter (Linux specific) skip/macos # no iptables/netfilter (Linux specific) skip/aix # no iptables/netfilter (Linux specific) -disabled # FIXME From bef3c04d1c7dfaf87d00f3777dd844c1e09e2a99 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 30 May 2021 02:48:59 +1200 Subject: [PATCH 0337/3093] Fixed sanity checks for cloud/online/ modules (#2677) * fixed validation-modules for plugins/modules/cloud/online/online_server_info.py * fixed validation-modules for plugins/modules/cloud/online/online_user_info.py * sanity fix --- plugins/modules/cloud/online/online_server_info.py | 8 +++++--- plugins/modules/cloud/online/online_user_info.py | 9 ++++----- tests/sanity/ignore-2.10.txt | 2 -- tests/sanity/ignore-2.11.txt | 2 -- tests/sanity/ignore-2.12.txt | 2 -- tests/sanity/ignore-2.9.txt | 2 -- 6 files changed, 9 insertions(+), 16 deletions(-) diff --git a/plugins/modules/cloud/online/online_server_info.py b/plugins/modules/cloud/online/online_server_info.py index f0e73aea16..f33a44d30f 100644 --- a/plugins/modules/cloud/online/online_server_info.py +++ b/plugins/modules/cloud/online/online_server_info.py @@ -32,11 +32,13 @@ EXAMPLES = r''' ''' RETURN = r''' ---- online_server_info: - description: Response from Online API + description: + - Response from Online API. + - "For more details please refer to: U(https://console.online.net/en/api/)." returned: success - type: complex + type: list + elements: dict sample: "online_server_info": [ { diff --git a/plugins/modules/cloud/online/online_user_info.py b/plugins/modules/cloud/online/online_user_info.py index 093a2c687f..4125ccb63d 100644 --- a/plugins/modules/cloud/online/online_user_info.py +++ b/plugins/modules/cloud/online/online_user_info.py @@ -7,7 +7,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = r''' ---- module: online_user_info short_description: Gather information about Online user. description: @@ -16,7 +15,6 @@ author: - "Remy Leone (@sieben)" extends_documentation_fragment: - community.general.online - ''' EXAMPLES = r''' @@ -29,11 +27,12 @@ EXAMPLES = r''' ''' RETURN = r''' ---- online_user_info: - description: Response from Online API + description: + - Response from Online API. + - "For more details please refer to: U(https://console.online.net/en/api/)." returned: success - type: complex + type: dict sample: "online_user_info": { "company": "foobar LLC", diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index da611904bb..16c94a2c09 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -4,8 +4,6 @@ plugins/module_utils/_mount.py metaclass-boilerplate plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-choice -plugins/modules/cloud/online/online_server_info.py validate-modules:return-syntax-error -plugins/modules/cloud/online/online_user_info.py validate-modules:return-syntax-error plugins/modules/cloud/rackspace/rax.py use-argspec-type-path # fix needed plugins/modules/cloud/rackspace/rax_files.py validate-modules:parameter-state-invalid-choice plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index a7d85904ae..db731736c0 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -3,8 +3,6 @@ plugins/module_utils/_mount.py metaclass-boilerplate plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-choice -plugins/modules/cloud/online/online_server_info.py validate-modules:return-syntax-error -plugins/modules/cloud/online/online_user_info.py validate-modules:return-syntax-error plugins/modules/cloud/rackspace/rax.py use-argspec-type-path # fix needed plugins/modules/cloud/rackspace/rax_files.py validate-modules:parameter-state-invalid-choice plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt index cf5d588e9a..de3634ae40 100644 --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -3,8 +3,6 @@ plugins/module_utils/_mount.py metaclass-boilerplate plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-choice -plugins/modules/cloud/online/online_server_info.py validate-modules:return-syntax-error -plugins/modules/cloud/online/online_user_info.py validate-modules:return-syntax-error plugins/modules/cloud/rackspace/rax.py use-argspec-type-path # fix needed plugins/modules/cloud/rackspace/rax_files.py validate-modules:parameter-state-invalid-choice plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 32e13b1a1e..9cb31a442d 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -3,8 +3,6 @@ plugins/module_utils/_mount.py future-import-boilerplate plugins/module_utils/_mount.py metaclass-boilerplate plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen -plugins/modules/cloud/online/online_server_info.py validate-modules:return-syntax-error -plugins/modules/cloud/online/online_user_info.py validate-modules:return-syntax-error plugins/modules/cloud/rackspace/rax.py use-argspec-type-path plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path # fix needed, expanduser() applied to dict values From b6c0cc0b610e8a23d4b8c7353475fce0f4315947 Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Mon, 31 May 2021 01:51:29 -0400 Subject: [PATCH 0338/3093] archive - Adding exclusion_patterns option (#2616) * Adding exclusion_patterns option * Adding changelog fragment and Python 2.6 compatability * Minor refactoring for readability * Removing unneccessary conditional * Applying initial review suggestions * Adding missed review suggestion --- ...2616-archive-exclusion_patterns-option.yml | 2 + plugins/modules/files/archive.py | 105 ++++++++++++++---- .../targets/archive/tasks/main.yml | 13 +++ 3 files changed, 100 insertions(+), 20 deletions(-) create mode 100644 changelogs/fragments/2616-archive-exclusion_patterns-option.yml diff --git a/changelogs/fragments/2616-archive-exclusion_patterns-option.yml b/changelogs/fragments/2616-archive-exclusion_patterns-option.yml new file mode 100644 index 0000000000..86ef806b63 --- /dev/null +++ b/changelogs/fragments/2616-archive-exclusion_patterns-option.yml @@ -0,0 +1,2 @@ +minor_changes: + - archive - added ``exclusion_patterns`` option to exclude files or subdirectories from archives (https://github.com/ansible-collections/community.general/pull/2616). diff --git a/plugins/modules/files/archive.py b/plugins/modules/files/archive.py index 8b8088dae1..8d4afa58a5 100644 --- a/plugins/modules/files/archive.py +++ b/plugins/modules/files/archive.py @@ -41,8 +41,16 @@ options: exclude_path: description: - Remote absolute path, glob, or list of paths or globs for the file or files to exclude from I(path) list and glob expansion. + - Use I(exclusion_patterns) to instead exclude files or subdirectories below any of the paths from the I(path) list. type: list elements: path + exclusion_patterns: + description: + - Glob style patterns to exclude files or directories from the resulting archive. + - This differs from I(exclude_path) which applies only to the source paths from I(path). + type: list + elements: path + version_added: 3.2.0 force_archive: description: - Allows you to force the module to treat this as an archive even if only a single file is specified. @@ -163,6 +171,8 @@ import re import shutil import tarfile import zipfile +from fnmatch import fnmatch +from sys import version_info from traceback import format_exc from ansible.module_utils.basic import AnsibleModule, missing_required_lib @@ -186,6 +196,8 @@ else: LZMA_IMP_ERR = format_exc() HAS_LZMA = False +PY27 = version_info[0:2] >= (2, 7) + def to_b(s): return to_bytes(s, errors='surrogate_or_strict') @@ -214,6 +226,59 @@ def expand_paths(paths): return expanded_path, is_globby +def matches_exclusion_patterns(path, exclusion_patterns): + return any(fnmatch(path, p) for p in exclusion_patterns) + + +def get_filter(exclusion_patterns, format): + def zip_filter(path): + return matches_exclusion_patterns(path, exclusion_patterns) + + def tar_filter(tarinfo): + return None if matches_exclusion_patterns(tarinfo.name, exclusion_patterns) else tarinfo + + return zip_filter if format == 'zip' or not PY27 else tar_filter + + +def get_archive_contains(format): + def archive_contains(archive, name): + try: + if format == 'zip': + archive.getinfo(name) + else: + archive.getmember(name) + except KeyError: + return False + + return True + + return archive_contains + + +def get_add_to_archive(format, filter): + def add_to_zip_archive(archive_file, path, archive_name): + try: + if not filter(path): + archive_file.write(path, archive_name) + except Exception as e: + return e + + return None + + def add_to_tar_archive(archive_file, path, archive_name): + try: + if PY27: + archive_file.add(path, archive_name, recursive=False, filter=filter) + else: + archive_file.add(path, archive_name, recursive=False, exclude=filter) + except Exception as e: + return e + + return None + + return add_to_zip_archive if format == 'zip' else add_to_tar_archive + + def main(): module = AnsibleModule( argument_spec=dict( @@ -221,6 +286,7 @@ def main(): format=dict(type='str', default='gz', choices=['bz2', 'gz', 'tar', 'xz', 'zip']), dest=dict(type='path'), exclude_path=dict(type='list', elements='path'), + exclusion_patterns=dict(type='list', elements='path'), force_archive=dict(type='bool', default=False), remove=dict(type='bool', default=False), ), @@ -242,6 +308,8 @@ def main(): changed = False state = 'absent' + exclusion_patterns = params['exclusion_patterns'] or [] + # Simple or archive file compression (inapplicable with 'zip' since it's always an archive) b_successes = [] @@ -262,6 +330,10 @@ def main(): # Only attempt to expand the exclude paths if it exists b_expanded_exclude_paths = expand_paths(exclude_paths)[0] if exclude_paths else [] + filter = get_filter(exclusion_patterns, fmt) + archive_contains = get_archive_contains(fmt) + add_to_archive = get_add_to_archive(fmt, filter) + # Only try to determine if we are working with an archive or not if we haven't set archive to true if not force_archive: # If we actually matched multiple files or TRIED to, then @@ -384,38 +456,31 @@ def main(): n_fullpath = to_na(b_fullpath) n_arcname = to_native(b_match_root.sub(b'', b_fullpath), errors='surrogate_or_strict') - try: - if fmt == 'zip': - arcfile.write(n_fullpath, n_arcname) - else: - arcfile.add(n_fullpath, n_arcname, recursive=False) - - except Exception as e: - errors.append('%s: %s' % (n_fullpath, to_native(e))) + err = add_to_archive(arcfile, n_fullpath, n_arcname) + if err: + errors.append('%s: %s' % (n_fullpath, to_native(err))) for b_filename in b_filenames: b_fullpath = b_dirpath + b_filename n_fullpath = to_na(b_fullpath) n_arcname = to_n(b_match_root.sub(b'', b_fullpath)) - try: - if fmt == 'zip': - arcfile.write(n_fullpath, n_arcname) - else: - arcfile.add(n_fullpath, n_arcname, recursive=False) + err = add_to_archive(arcfile, n_fullpath, n_arcname) + if err: + errors.append('Adding %s: %s' % (to_native(b_path), to_native(err))) + if archive_contains(arcfile, n_arcname): b_successes.append(b_fullpath) - except Exception as e: - errors.append('Adding %s: %s' % (to_native(b_path), to_native(e))) else: path = to_na(b_path) arcname = to_n(b_match_root.sub(b'', b_path)) - if fmt == 'zip': - arcfile.write(path, arcname) - else: - arcfile.add(path, arcname, recursive=False) - b_successes.append(b_path) + err = add_to_archive(arcfile, path, arcname) + if err: + errors.append('Adding %s: %s' % (to_native(b_path), to_native(err))) + + if archive_contains(arcfile, arcname): + b_successes.append(b_path) except Exception as e: expanded_fmt = 'zip' if fmt == 'zip' else ('tar.' + fmt) diff --git a/tests/integration/targets/archive/tasks/main.yml b/tests/integration/targets/archive/tasks/main.yml index 2267268715..761f9eb7b8 100644 --- a/tests/integration/targets/archive/tasks/main.yml +++ b/tests/integration/targets/archive/tasks/main.yml @@ -363,6 +363,19 @@ - name: remove nonascii test file: path="{{ output_dir }}/test-archive-nonascii-くらとみ.zip" state=absent +- name: Test exclusion_patterns option + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/test-archive-exclustion-patterns.tgz" + exclusion_patterns: b?r.* + register: exclusion_patterns_result + +- name: Assert that exclusion_patterns only archives included files + assert: + that: + - exclusion_patterns_result is changed + - "'bar.txt' not in exclusion_patterns_result.archived" + - name: Remove backports.lzma if previously installed (pip) pip: name=backports.lzma state=absent when: backports_lzma_pip is changed From 3516acf8d402de721887cd10e16293d747fbb29e Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 1 Jun 2021 19:03:07 +0200 Subject: [PATCH 0339/3093] Add filter docs (#2680) * Began with filter docs. * Add more filters. * Add time unit filters. * Add TOC and filters to create identifiers. * Add more filters. * Add documentation from ansible/ansible for json_query and random_mac. * Update docs/docsite/rst/filter_guide.rst Co-authored-by: Abhijeet Kasurde Co-authored-by: Abhijeet Kasurde --- docs/docsite/extra-docs.yml | 5 + docs/docsite/rst/filter_guide.rst | 753 ++++++++++++++++++++++++++++++ 2 files changed, 758 insertions(+) create mode 100644 docs/docsite/extra-docs.yml create mode 100644 docs/docsite/rst/filter_guide.rst diff --git a/docs/docsite/extra-docs.yml b/docs/docsite/extra-docs.yml new file mode 100644 index 0000000000..22ae7b58f5 --- /dev/null +++ b/docs/docsite/extra-docs.yml @@ -0,0 +1,5 @@ +--- +sections: + - title: Guides + toctree: + - filter_guide diff --git a/docs/docsite/rst/filter_guide.rst b/docs/docsite/rst/filter_guide.rst new file mode 100644 index 0000000000..201b275aae --- /dev/null +++ b/docs/docsite/rst/filter_guide.rst @@ -0,0 +1,753 @@ +.. _ansible_collections.community.general.docsite.filter_guide: + +community.general Filter Guide +============================== + +The :ref:`community.general collection ` offers several useful filter plugins. + +.. contents:: Topics + +Paths +----- + +The ``path_join`` filter has been added in ansible-base 2.10. If you want to use this filter, but also need to support Ansible 2.9, you can use ``community.general``'s ``path_join`` shim, ``community.general.path_join``. This filter redirects to ``path_join`` for ansible-base 2.10 and ansible-core 2.11 or newer, and re-implements the filter for Ansible 2.9. + +.. code-block:: yaml+jinja + + # ansible-base 2.10 or newer: + path: {{ ('/etc', path, 'subdir', file) | path_join }} + + # Also works with Ansible 2.9: + path: {{ ('/etc', path, 'subdir', file) | community.general.path_join }} + +.. versionadded:: 3.0.0 + +Abstract transformations +------------------------ + +Dictionaries +^^^^^^^^^^^^ + +You can use the ``dict_kv`` filter to create a single-entry dictionary with ``value | community.general.dict_kv(key)``: + +.. code-block:: yaml+jinja + + - name: Create a single-entry dictionary + debug: + msg: "{{ myvar | community.general.dict_kv('thatsmyvar') }}" + vars: + myvar: myvalue + + - name: Create a list of dictionaries where the 'server' field is taken from a list + debug: + msg: >- + {{ myservers | map('community.general.dict_kv', 'server') + | map('combine', common_config) }} + vars: + common_config: + type: host + database: all + myservers: + - server1 + - server2 + +This produces: + +.. code-block:: ansible-output + + TASK [Create a single-entry dictionary] ************************************************** + ok: [localhost] => { + "msg": { + "thatsmyvar": "myvalue" + } + } + + TASK [Create a list of dictionaries where the 'server' field is taken from a list] ******* + ok: [localhost] => { + "msg": [ + { + "database": "all", + "server": "server1", + "type": "host" + }, + { + "database": "all", + "server": "server2", + "type": "host" + } + ] + } + +.. versionadded:: 2.0.0 + +If you need to convert a list of key-value pairs to a dictionary, you can use the ``dict`` function. Unfortunately, this function cannot be used with ``map``. For this, the ``community.general.dict`` filter can be used: + +.. code-block:: yaml+jinja + + - name: Create a dictionary with the dict function + debug: + msg: "{{ dict([[1, 2], ['a', 'b']]) }}" + + - name: Create a dictionary with the community.general.dict filter + debug: + msg: "{{ [[1, 2], ['a', 'b']] | community.general.dict }}" + + - name: Create a list of dictionaries with map and the community.general.dict filter + debug: + msg: >- + {{ values | map('zip', ['k1', 'k2', 'k3']) + | map('map', 'reverse') + | map('community.general.dict') }} + vars: + values: + - - foo + - 23 + - a + - - bar + - 42 + - b + +This produces: + +.. code-block:: ansible-output + + TASK [Create a dictionary with the dict function] **************************************** + ok: [localhost] => { + "msg": { + "1": 2, + "a": "b" + } + } + + TASK [Create a dictionary with the community.general.dict filter] ************************ + ok: [localhost] => { + "msg": { + "1": 2, + "a": "b" + } + } + + TASK [Create a list of dictionaries with map and the community.general.dict filter] ****** + ok: [localhost] => { + "msg": [ + { + "k1": "foo", + "k2": 23, + "k3": "a" + }, + { + "k1": "bar", + "k2": 42, + "k3": "b" + } + ] + } + +.. versionadded:: 3.0.0 + +Grouping +^^^^^^^^ + +If you have a list of dictionaries, the Jinja2 ``groupby`` filter allows to group the list by an attribute. This results in a list of ``(grouper, list)`` namedtuples, where ``list`` contains all dictionaries where the selected attribute equals ``grouper``. If you know that for every ``grouper``, there will be a most one entry in that list, you can use the ``community.general.groupby_as_dict`` filter to convert the original list into a dictionary which maps ``grouper`` to the corresponding dictionary. + +One example is ``ansible_facts.mounts``, which is a list of dictionaries where each has one ``device`` element to indicate the device which is mounted. Therefore, ``ansible_facts.mounts | community.general.groupby_as_dict('device')`` is a dictionary mapping a device to the mount information: + +.. code-block:: yaml+jinja + + - name: Output mount facts grouped by device name + debug: + var: ansible_facts.mounts | community.general.groupby_as_dict('device') + + - name: Output mount facts grouped by mount point + debug: + var: ansible_facts.mounts | community.general.groupby_as_dict('mount') + +This produces: + +.. code-block:: ansible-output + + TASK [Output mount facts grouped by device name] ****************************************** + ok: [localhost] => { + "ansible_facts.mounts | community.general.groupby_as_dict('device')": { + "/dev/sda1": { + "block_available": 2000, + "block_size": 4096, + "block_total": 2345, + "block_used": 345, + "device": "/dev/sda1", + "fstype": "ext4", + "inode_available": 500, + "inode_total": 512, + "inode_used": 12, + "mount": "/boot", + "options": "rw,relatime,data=ordered", + "size_available": 56821, + "size_total": 543210, + "uuid": "ab31cade-d9c1-484d-8482-8a4cbee5241a" + }, + "/dev/sda2": { + "block_available": 1234, + "block_size": 4096, + "block_total": 12345, + "block_used": 11111, + "device": "/dev/sda2", + "fstype": "ext4", + "inode_available": 1111, + "inode_total": 1234, + "inode_used": 123, + "mount": "/", + "options": "rw,relatime", + "size_available": 42143, + "size_total": 543210, + "uuid": "abcdef01-2345-6789-0abc-def012345678" + } + } + } + + TASK [Output mount facts grouped by mount point] ****************************************** + ok: [localhost] => { + "ansible_facts.mounts | community.general.groupby_as_dict('mount')": { + "/": { + "block_available": 1234, + "block_size": 4096, + "block_total": 12345, + "block_used": 11111, + "device": "/dev/sda2", + "fstype": "ext4", + "inode_available": 1111, + "inode_total": 1234, + "inode_used": 123, + "mount": "/", + "options": "rw,relatime", + "size_available": 42143, + "size_total": 543210, + "uuid": "bdf50b7d-4859-40af-8665-c637ee7a7808" + }, + "/boot": { + "block_available": 2000, + "block_size": 4096, + "block_total": 2345, + "block_used": 345, + "device": "/dev/sda1", + "fstype": "ext4", + "inode_available": 500, + "inode_total": 512, + "inode_used": 12, + "mount": "/boot", + "options": "rw,relatime,data=ordered", + "size_available": 56821, + "size_total": 543210, + "uuid": "ab31cade-d9c1-484d-8482-8a4cbee5241a" + } + } + } + +.. versionadded: 3.0.0 + +Merging lists of dictionaries +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If you have two lists of dictionaries and want to combine them into a list of merged dictionaries, where two dictionaries are merged if they coincide in one attribute, you can use the ``lists_mergeby`` filter. + +.. code-block:: yaml+jinja + + - name: Merge two lists by common attribute 'name' + debug: + var: list1 | community.general.lists_mergeby(list2, 'name') + vars: + list1: + - name: foo + extra: true + - name: bar + extra: false + - name: meh + extra: true + list2: + - name: foo + path: /foo + - name: baz + path: /bazzz + +This produces: + +.. code-block:: ansible-output + + TASK [Merge two lists by common attribute 'name'] **************************************** + ok: [localhost] => { + "list1 | community.general.lists_mergeby(list2, 'name')": [ + { + "extra": false, + "name": "bar" + }, + { + "name": "baz", + "path": "/bazzz" + }, + { + "extra": true, + "name": "foo", + "path": "/foo" + }, + { + "extra": true, + "name": "meh" + } + ] + } + +.. versionadded: 2.0.0 + +Working with times +------------------ + +The ``to_time_unit`` filter allows to convert times from a human-readable string to a unit. For example, ``'4h 30min 12second' | community.general.to_time_unit('hour')`` gives the number of hours that correspond to 4 hours, 30 minutes and 12 seconds. + +There are shorthands to directly convert to various units, like ``to_hours``, ``to_minutes``, ``to_seconds``, and so on. The following table lists all units that can be used: + +.. list-table:: Units + :widths: 25 25 25 25 + :header-rows: 1 + + * - Unit name + - Unit value in seconds + - Unit strings for filter + - Shorthand filter + * - Millisecond + - 1/1000 second + - ``ms``, ``millisecond``, ``milliseconds``, ``msec``, ``msecs``, ``msecond``, ``mseconds`` + - ``to_milliseconds`` + * - Second + - 1 second + - ``s``, ``sec``, ``secs``, ``second``, ``seconds`` + - ``to_seconds`` + * - Minute + - 60 seconds + - ``m``, ``min``, ``mins``, ``minute``, ``minutes`` + - ``to_minutes`` + * - Hour + - 60*60 seconds + - ``h``, ``hour``, ``hours`` + - ``to_hours`` + * - Day + - 24*60*60 seconds + - ``d``, ``day``, ``days`` + - ``to_days`` + * - Week + - 7*24*60*60 seconds + - ``w``, ``week``, ``weeks`` + - ``to_weeks`` + * - Month + - 30*24*60*60 seconds + - ``mo``, ``month``, ``months`` + - ``to_months`` + * - Year + - 365*24*60*60 seconds + - ``y``, ``year``, ``years`` + - ``to_years`` + +Note that months and years are using a simplified representation: a month is 30 days, and a year is 365 days. If you need different definitions of months or years, you can pass them as keyword arguments. For example, if you want a year to be 365.25 days, and a month to be 30.5 days, you can write ``'11months 4' | community.general.to_years(year=365.25, month=30.5)``. These keyword arguments can be specified to ``to_time_unit`` and to all shorthand filters. + +.. code-block:: yaml+jinja + + - name: Convert string to seconds + debug: + msg: "{{ '30h 20m 10s 123ms' | community.general.to_time_unit('seconds') }}" + + - name: Convert string to hours + debug: + msg: "{{ '30h 20m 10s 123ms' | community.general.to_hours }}" + + - name: Convert string to years (using 365.25 days == 1 year) + debug: + msg: "{{ '400d 15h' | community.general.to_years(year=365.25) }}" + +This produces: + +.. code-block:: ansible-output + + TASK [Convert string to seconds] ********************************************************** + ok: [localhost] => { + "msg": "109210.123" + } + + TASK [Convert string to hours] ************************************************************ + ok: [localhost] => { + "msg": "30.336145277778" + } + + TASK [Convert string to years (using 365.25 days == 1 year)] ****************************** + ok: [localhost] => { + "msg": "1.096851471595" + } + +.. versionadded: 0.2.0 + +Working with versions +--------------------- + +If you need to sort a list of version numbers, the Jinja ``sort`` filter is problematic. Since it sorts lexicographically, ``2.10`` will come before ``2.9``. To treat version numbers correctly, you can use the ``version_sort`` filter: + +.. code-block:: yaml+jinja + + - name: Sort list by version number + debug: + var: ansible_versions | community.general.version_sort + vars: + ansible_versions: + - '2.8.0' + - '2.11.0' + - '2.7.0' + - '2.10.0' + - '2.9.0' + +This produces: + +.. code-block:: ansible-output + + TASK [Sort list by version number] ******************************************************** + ok: [localhost] => { + "ansible_versions | community.general.version_sort": [ + "2.7.0", + "2.8.0", + "2.9.0", + "2.10.0", + "2.11.0" + ] + } + +.. versionadded: 2.2.0 + +Creating identifiers +-------------------- + +The following filters allow to create identifiers. + +Hashids +^^^^^^^ + +`Hashids `_ allow to convert sequences of integers to short unique string identifiers. This filter needs the `hashids Python library `_ installed on the controller. + +.. code-block:: yaml+jinja + + - name: "Create hashid" + debug: + msg: "{{ [1234, 5, 6] | community.general.hashids_encode }}" + + - name: "Decode hashid" + debug: + msg: "{{ 'jm2Cytn' | community.general.hashids_decode }}" + +This produces: + +.. code-block:: ansible-output + + TASK [Create hashid] ********************************************************************** + ok: [localhost] => { + "msg": "jm2Cytn" + } + + TASK [Decode hashid] ********************************************************************** + ok: [localhost] => { + "msg": [ + 1234, + 5, + 6 + ] + } + +The hashids filters accept keyword arguments to allow fine-tuning the hashids generated: + +:salt: String to use as salt when hashing. +:alphabet: String of 16 or more unique characters to produce a hash. +:min_length: Minimum length of hash produced. + +.. versionadded: 3.0.0 + +Random MACs +^^^^^^^^^^^ + +You can use the ``random_mac`` filter to complete a partial `MAC address `_ to a random 6-byte MAC address. + +.. code-block:: yaml+jinja + + - name: "Create a random MAC starting with ff:" + debug: + msg: "{{ 'FF' | community.general.random_mac }}" + + - name: "Create a random MAC starting with 00:11:22:" + debug: + msg: "{{ '00:11:22' | community.general.random_mac }}" + +This produces: + +.. code-block:: ansible-output + + TASK [Create a random MAC starting with ff:] ********************************************** + ok: [localhost] => { + "msg": "ff:69:d3:78:7f:b4" + } + + TASK [Create a random MAC starting with 00:11:22:] **************************************** + ok: [localhost] => { + "msg": "00:11:22:71:5d:3b" + } + +You can also initialize the random number generator from a seed to create random-but-idempotent MAC addresses: + +.. code-block:: yaml+jinja + + "{{ '52:54:00' | community.general.random_mac(seed=inventory_hostname) }}" + +Conversions +----------- + +Parsing CSV files +^^^^^^^^^^^^^^^^^ + +Ansible offers the :ref:`community.general.read_csv module ` to read CSV files. Sometimes you need to convert strings to CSV files instead. For this, the ``from_csv`` filter exists. + +.. code-block:: yaml+jinja + + - name: "Parse CSV from string" + debug: + msg: "{{ csv_string | community.general.from_csv }}" + vars: + csv_string: | + foo,bar,baz + 1,2,3 + you,this,then + +This produces: + +.. code-block:: ansible-output + + TASK [Parse CSV from string] ************************************************************** + ok: [localhost] => { + "msg": [ + { + "bar": "2", + "baz": "3", + "foo": "1" + }, + { + "bar": "this", + "baz": "then", + "foo": "you" + } + ] + } + +The ``from_csv`` filter has several keyword arguments to control its behavior: + +:dialect: Dialect of the CSV file. Default is ``excel``. Other possible choices are ``excel-tab`` and ``unix``. If one of ``delimiter``, ``skipinitialspace`` or ``strict`` is specified, ``dialect`` is ignored. +:fieldnames: A set of column names to use. If not provided, the first line of the CSV is assumed to contain the column names. +:delimiter: Sets the delimiter to use. Default depends on the dialect used. +:skipinitialspace: Set to ``true`` to ignore space directly after the delimiter. Default depends on the dialect used (usually ``false``). +:strict: Set to ``true`` to error out on invalid CSV input. + +.. versionadded: 3.0.0 + +Converting to JSON +^^^^^^^^^^^^^^^^^^ + +`JC `_ is a CLI tool and Python library which allows to interpret output of various CLI programs as JSON. It is also available as a filter in community.general. This filter needs the `jc Python library `_ installed on the controller. + +.. code-block:: yaml+jinja + + - name: Run 'ls' to list files in / + command: ls / + register: result + + - name: Parse the ls output + debug: + msg: "{{ result.stdout | community.general.jc('ls') }}" + +This produces: + +.. code-block:: ansible-output + + TASK [Run 'ls' to list files in /] ******************************************************** + changed: [localhost] + + TASK [Parse the ls output] **************************************************************** + ok: [localhost] => { + "msg": [ + { + "filename": "bin" + }, + { + "filename": "boot" + }, + { + "filename": "dev" + }, + { + "filename": "etc" + }, + { + "filename": "home" + }, + { + "filename": "lib" + }, + { + "filename": "proc" + }, + { + "filename": "root" + }, + { + "filename": "run" + }, + { + "filename": "tmp" + } + ] + } + +.. versionadded: 2.0.0 + +.. _ansible_collections.community.general.docsite.json_query_filter: + +Selecting JSON data: JSON queries +--------------------------------- + +To select a single element or a data subset from a complex data structure in JSON format (for example, Ansible facts), use the ``json_query`` filter. The ``json_query`` filter lets you query a complex JSON structure and iterate over it using a loop structure. + +.. note:: You must manually install the **jmespath** dependency on the Ansible controller before using this filter. This filter is built upon **jmespath**, and you can use the same syntax. For examples, see `jmespath examples `_. + +Consider this data structure: + +.. code-block:: yaml+jinja + + { + "domain_definition": { + "domain": { + "cluster": [ + { + "name": "cluster1" + }, + { + "name": "cluster2" + } + ], + "server": [ + { + "name": "server11", + "cluster": "cluster1", + "port": "8080" + }, + { + "name": "server12", + "cluster": "cluster1", + "port": "8090" + }, + { + "name": "server21", + "cluster": "cluster2", + "port": "9080" + }, + { + "name": "server22", + "cluster": "cluster2", + "port": "9090" + } + ], + "library": [ + { + "name": "lib1", + "target": "cluster1" + }, + { + "name": "lib2", + "target": "cluster2" + } + ] + } + } + } + +To extract all clusters from this structure, you can use the following query: + +.. code-block:: yaml+jinja + + - name: Display all cluster names + ansible.builtin.debug: + var: item + loop: "{{ domain_definition | community.general.json_query('domain.cluster[*].name') }}" + +To extract all server names: + +.. code-block:: yaml+jinja + + - name: Display all server names + ansible.builtin.debug: + var: item + loop: "{{ domain_definition | community.general.json_query('domain.server[*].name') }}" + +To extract ports from cluster1: + +.. code-block:: yaml+jinja + + - name: Display all ports from cluster1 + ansible.builtin.debug: + var: item + loop: "{{ domain_definition | community.general.json_query(server_name_cluster1_query) }}" + vars: + server_name_cluster1_query: "domain.server[?cluster=='cluster1'].port" + +.. note:: You can use a variable to make the query more readable. + +To print out the ports from cluster1 in a comma separated string: + +.. code-block:: yaml+jinja + + - name: Display all ports from cluster1 as a string + ansible.builtin.debug: + msg: "{{ domain_definition | community.general.json_query('domain.server[?cluster==`cluster1`].port') | join(', ') }}" + +.. note:: In the example above, quoting literals using backticks avoids escaping quotes and maintains readability. + +You can use YAML `single quote escaping `_: + +.. code-block:: yaml+jinja + + - name: Display all ports from cluster1 + ansible.builtin.debug: + var: item + loop: "{{ domain_definition | community.general.json_query('domain.server[?cluster==''cluster1''].port') }}" + +.. note:: Escaping single quotes within single quotes in YAML is done by doubling the single quote. + +To get a hash map with all ports and names of a cluster: + +.. code-block:: yaml+jinja + + - name: Display all server ports and names from cluster1 + ansible.builtin.debug: + var: item + loop: "{{ domain_definition | community.general.json_query(server_name_cluster1_query) }}" + vars: + server_name_cluster1_query: "domain.server[?cluster=='cluster2'].{name: name, port: port}" + +To extract ports from all clusters with name starting with 'server1': + +.. code-block:: yaml+jinja + + - name: Display all ports from cluster1 + ansible.builtin.debug: + msg: "{{ domain_definition | to_json | from_json | community.general.json_query(server_name_query) }}" + vars: + server_name_query: "domain.server[?starts_with(name,'server1')].port" + +To extract ports from all clusters with name containing 'server1': + +.. code-block:: yaml+jinja + + - name: Display all ports from cluster1 + ansible.builtin.debug: + msg: "{{ domain_definition | to_json | from_json | community.general.json_query(server_name_query) }}" + vars: + server_name_query: "domain.server[?contains(name,'server1')].port" + +.. note:: while using ``starts_with`` and ``contains``, you have to use `` to_json | from_json `` filter for correct parsing of data structure. From 1ad85849afa9a4c2d89678deecb7513edbee6164 Mon Sep 17 00:00:00 2001 From: Chih-Hsuan Yen Date: Wed, 2 Jun 2021 04:04:09 +0800 Subject: [PATCH 0340/3093] nmcli: new arguments to ignore automatic dns servers and gateways (#2635) * nmcli: new arguments to ignore automatic dns servers and gateways Closes #1087 * Add changelog fragment * Address review comments --- .../2635-nmcli-add-ignore-auto-arguments.yml | 2 + plugins/modules/net_tools/nmcli.py | 42 ++++++++++++++++++- .../plugins/modules/net_tools/test_nmcli.py | 32 ++++++++++++++ 3 files changed, 75 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2635-nmcli-add-ignore-auto-arguments.yml diff --git a/changelogs/fragments/2635-nmcli-add-ignore-auto-arguments.yml b/changelogs/fragments/2635-nmcli-add-ignore-auto-arguments.yml new file mode 100644 index 0000000000..e75ceb6a1b --- /dev/null +++ b/changelogs/fragments/2635-nmcli-add-ignore-auto-arguments.yml @@ -0,0 +1,2 @@ +minor_changes: + - nmcli - add new options to ignore automatic DNS servers and gateways (https://github.com/ansible-collections/community.general/issues/1087). diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index 929d88c654..399d15267a 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -77,6 +77,12 @@ options: - Use the format C(192.0.2.1). - This parameter is mutually_exclusive with never_default4 parameter. type: str + gw4_ignore_auto: + description: + - Ignore automatically configured IPv4 routes. + type: bool + default: false + version_added: 3.2.0 routes4: description: - The list of ipv4 routes. @@ -107,6 +113,12 @@ options: - A list of DNS search domains. elements: str type: list + dns4_ignore_auto: + description: + - Ignore automatically configured IPv4 name servers. + type: bool + default: false + version_added: 3.2.0 method4: description: - Configuration method to be used for IPv4. @@ -125,6 +137,12 @@ options: - The IPv6 gateway for this interface. - Use the format C(2001:db8::1). type: str + gw6_ignore_auto: + description: + - Ignore automatically configured IPv6 routes. + type: bool + default: false + version_added: 3.2.0 dns6: description: - A list of up to 3 dns servers. @@ -136,6 +154,12 @@ options: - A list of DNS search domains. elements: str type: list + dns6_ignore_auto: + description: + - Ignore automatically configured IPv6 name servers. + type: bool + default: false + version_added: 3.2.0 method6: description: - Configuration method to be used for IPv6 @@ -648,16 +672,20 @@ class Nmcli(object): self.type = module.params['type'] self.ip4 = module.params['ip4'] self.gw4 = module.params['gw4'] + self.gw4_ignore_auto = module.params['gw4_ignore_auto'] self.routes4 = module.params['routes4'] self.route_metric4 = module.params['route_metric4'] self.never_default4 = module.params['never_default4'] self.dns4 = module.params['dns4'] self.dns4_search = module.params['dns4_search'] + self.dns4_ignore_auto = module.params['dns4_ignore_auto'] self.method4 = module.params['method4'] self.ip6 = module.params['ip6'] self.gw6 = module.params['gw6'] + self.gw6_ignore_auto = module.params['gw6_ignore_auto'] self.dns6 = module.params['dns6'] self.dns6_search = module.params['dns6_search'] + self.dns6_ignore_auto = module.params['dns6_ignore_auto'] self.method6 = module.params['method6'] self.mtu = module.params['mtu'] self.stp = module.params['stp'] @@ -729,7 +757,9 @@ class Nmcli(object): 'ipv4.dhcp-client-id': self.dhcp_client_id, 'ipv4.dns': self.dns4, 'ipv4.dns-search': self.dns4_search, + 'ipv4.ignore-auto-dns': self.dns4_ignore_auto, 'ipv4.gateway': self.gw4, + 'ipv4.ignore-auto-routes': self.gw4_ignore_auto, 'ipv4.routes': self.routes4, 'ipv4.route-metric': self.route_metric4, 'ipv4.never-default': self.never_default4, @@ -737,7 +767,9 @@ class Nmcli(object): 'ipv6.addresses': self.ip6, 'ipv6.dns': self.dns6, 'ipv6.dns-search': self.dns6_search, + 'ipv6.ignore-auto-dns': self.dns6_ignore_auto, 'ipv6.gateway': self.gw6, + 'ipv6.ignore-auto-routes': self.gw6_ignore_auto, 'ipv6.method': self.ipv6_method, }) @@ -900,7 +932,11 @@ class Nmcli(object): if setting in ('bridge.stp', 'bridge-port.hairpin-mode', 'connection.autoconnect', - 'ipv4.never-default'): + 'ipv4.never-default', + 'ipv4.ignore-auto-dns', + 'ipv4.ignore-auto-routes', + 'ipv6.ignore-auto-dns', + 'ipv6.ignore-auto-routes'): return bool elif setting in ('ipv4.dns', 'ipv4.dns-search', @@ -1116,17 +1152,21 @@ def main(): ]), ip4=dict(type='str'), gw4=dict(type='str'), + gw4_ignore_auto=dict(type='bool', default=False), routes4=dict(type='list', elements='str'), route_metric4=dict(type='int'), never_default4=dict(type='bool', default=False), dns4=dict(type='list', elements='str'), dns4_search=dict(type='list', elements='str'), + dns4_ignore_auto=dict(type='bool', default=False), method4=dict(type='str', choices=['auto', 'link-local', 'manual', 'shared', 'disabled']), dhcp_client_id=dict(type='str'), ip6=dict(type='str'), gw6=dict(type='str'), + gw6_ignore_auto=dict(type='bool', default=False), dns6=dict(type='list', elements='str'), dns6_search=dict(type='list', elements='str'), + dns6_ignore_auto=dict(type='bool', default=False), method6=dict(type='str', choices=['ignore', 'auto', 'dhcp', 'link-local', 'manual', 'shared']), # Bond Specific vars mode=dict(type='str', default='balance-rr', diff --git a/tests/unit/plugins/modules/net_tools/test_nmcli.py b/tests/unit/plugins/modules/net_tools/test_nmcli.py index dceb5e5f3f..5b3f96937b 100644 --- a/tests/unit/plugins/modules/net_tools/test_nmcli.py +++ b/tests/unit/plugins/modules/net_tools/test_nmcli.py @@ -95,8 +95,12 @@ connection.autoconnect: yes ipv4.method: manual ipv4.addresses: 10.10.10.10/24 ipv4.gateway: 10.10.10.1 +ipv4.ignore-auto-dns: no +ipv4.ignore-auto-routes: no ipv4.never-default: no ipv6.method: auto +ipv6.ignore-auto-dns: no +ipv6.ignore-auto-routes: no """ TESTCASE_GENERIC_DNS4_SEARCH = [ @@ -120,10 +124,14 @@ connection.autoconnect: yes ipv4.method: manual ipv4.addresses: 10.10.10.10/24 ipv4.gateway: 10.10.10.1 +ipv4.ignore-auto-dns: no +ipv4.ignore-auto-routes: no ipv4.never-default: no ipv4.dns-search: search.redhat.com ipv6.dns-search: search6.redhat.com ipv6.method: auto +ipv6.ignore-auto-dns: no +ipv6.ignore-auto-routes: no """ TESTCASE_GENERIC_ZONE = [ @@ -147,8 +155,12 @@ connection.zone: external ipv4.method: manual ipv4.addresses: 10.10.10.10/24 ipv4.gateway: 10.10.10.1 +ipv4.ignore-auto-dns: no +ipv4.ignore-auto-routes: no ipv4.never-default: no ipv6.method: auto +ipv6.ignore-auto-dns: no +ipv6.ignore-auto-routes: no """ TESTCASE_BOND = [ @@ -172,8 +184,12 @@ connection.autoconnect: yes ipv4.method: manual ipv4.addresses: 10.10.10.10/24 ipv4.gateway: 10.10.10.1 +ipv4.ignore-auto-dns: no +ipv4.ignore-auto-routes: no ipv4.never-default: no ipv6.method: auto +ipv6.ignore-auto-dns: no +ipv6.ignore-auto-routes: no bond.options: mode=active-backup,primary=non_existent_primary """ @@ -199,8 +215,12 @@ connection.autoconnect: yes ipv4.method: manual ipv4.addresses: 10.10.10.10/24 ipv4.gateway: 10.10.10.1 +ipv4.ignore-auto-dns: no +ipv4.ignore-auto-routes: no ipv4.never-default: no ipv6.method: auto +ipv6.ignore-auto-dns: no +ipv6.ignore-auto-routes: no bridge.mac-address: 52:54:00:AB:CD:EF bridge.stp: yes bridge.max-age: 100 @@ -252,8 +272,12 @@ connection.autoconnect: yes ipv4.method: manual ipv4.addresses: 10.10.10.10/24 ipv4.gateway: 10.10.10.1 +ipv4.ignore-auto-dns: no +ipv4.ignore-auto-routes: no ipv4.never-default: no ipv6.method: auto +ipv6.ignore-auto-dns: no +ipv6.ignore-auto-routes: no vlan.id: 10 """ @@ -343,8 +367,12 @@ connection.autoconnect: yes 802-3-ethernet.mtu: auto ipv4.method: auto ipv4.dhcp-client-id: 00:11:22:AA:BB:CC:DD +ipv4.ignore-auto-dns: no +ipv4.ignore-auto-routes: no ipv4.never-default: no ipv6.method: auto +ipv6.ignore-auto-dns: no +ipv6.ignore-auto-routes: no """ TESTCASE_ETHERNET_STATIC = [ @@ -368,9 +396,13 @@ connection.autoconnect: yes ipv4.method: manual ipv4.addresses: 10.10.10.10/24 ipv4.gateway: 10.10.10.1 +ipv4.ignore-auto-dns: no +ipv4.ignore-auto-routes: no ipv4.never-default: no ipv4.dns: 1.1.1.1,8.8.8.8 ipv6.method: auto +ipv6.ignore-auto-dns: no +ipv6.ignore-auto-routes: no """ From ca1506fb267d2592ebc6e1fc5df87024ac98ce80 Mon Sep 17 00:00:00 2001 From: Gene Gotimer Date: Tue, 1 Jun 2021 16:06:26 -0400 Subject: [PATCH 0341/3093] Added SHA1 option to maven_artifact (#2662) * Added SHA1 option * Add changelog fragment * Update plugins/modules/packaging/language/maven_artifact.py Co-authored-by: Felix Fontein * Update plugins/modules/packaging/language/maven_artifact.py Co-authored-by: Felix Fontein * Combined hash functions Co-authored-by: Felix Fontein * Update plugins/modules/packaging/language/maven_artifact.py Co-authored-by: Felix Fontein * Update plugins/modules/packaging/language/maven_artifact.py Co-authored-by: Felix Fontein * Removed unused functions (rolled into _local_checksum) * Update changelogs/fragments/2661-maven_artifact-add-sha1-option.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../2661-maven_artifact-add-sha1-option.yml | 2 + .../packaging/language/maven_artifact.py | 72 +++++++++++-------- 2 files changed, 46 insertions(+), 28 deletions(-) create mode 100644 changelogs/fragments/2661-maven_artifact-add-sha1-option.yml diff --git a/changelogs/fragments/2661-maven_artifact-add-sha1-option.yml b/changelogs/fragments/2661-maven_artifact-add-sha1-option.yml new file mode 100644 index 0000000000..827942200b --- /dev/null +++ b/changelogs/fragments/2661-maven_artifact-add-sha1-option.yml @@ -0,0 +1,2 @@ +minor_changes: + - maven_artifact - added ``checksum_alg`` option to support SHA1 checksums in order to support FIPS systems (https://github.com/ansible-collections/community.general/pull/2662). diff --git a/plugins/modules/packaging/language/maven_artifact.py b/plugins/modules/packaging/language/maven_artifact.py index 50b808f57a..83833b0480 100644 --- a/plugins/modules/packaging/language/maven_artifact.py +++ b/plugins/modules/packaging/language/maven_artifact.py @@ -129,10 +129,10 @@ options: verify_checksum: type: str description: - - If C(never), the md5 checksum will never be downloaded and verified. - - If C(download), the md5 checksum will be downloaded and verified only after artifact download. This is the default. - - If C(change), the md5 checksum will be downloaded and verified if the destination already exist, - to verify if they are identical. This was the behaviour before 2.6. Since it downloads the md5 before (maybe) + - If C(never), the MD5/SHA1 checksum will never be downloaded and verified. + - If C(download), the MD5/SHA1 checksum will be downloaded and verified only after artifact download. This is the default. + - If C(change), the MD5/SHA1 checksum will be downloaded and verified if the destination already exist, + to verify if they are identical. This was the behaviour before 2.6. Since it downloads the checksum before (maybe) downloading the artifact, and since some repository software, when acting as a proxy/cache, return a 404 error if the artifact has not been cached yet, it may fail unexpectedly. If you still need it, you should consider using C(always) instead - if you deal with a checksum, it is better to @@ -141,6 +141,15 @@ options: required: false default: 'download' choices: ['never', 'download', 'change', 'always'] + checksum_alg: + type: str + description: + - If C(md5), checksums will use the MD5 algorithm. This is the default. + - If C(sha1), checksums will use the SHA1 algorithm. This can be used on systems configured to use + FIPS-compliant algorithms, since MD5 will be blocked on such systems. + default: 'md5' + choices: ['md5', 'sha1'] + version_added: 3.2.0 directory_mode: type: str description: @@ -507,7 +516,7 @@ class MavenDownloader: raise ValueError(failmsg + " because of " + info['msg'] + "for URL " + url_to_use) return None - def download(self, tmpdir, artifact, verify_download, filename=None): + def download(self, tmpdir, artifact, verify_download, filename=None, checksum_alg='md5'): if (not artifact.version and not artifact.version_by_spec) or artifact.version == "latest": artifact = Artifact(artifact.group_id, artifact.artifact_id, self.find_latest_version_available(artifact), None, artifact.classifier, artifact.extension) @@ -528,11 +537,11 @@ class MavenDownloader: shutil.copyfileobj(response, f) if verify_download: - invalid_md5 = self.is_invalid_md5(tempname, url) - if invalid_md5: + invalid_checksum = self.is_invalid_checksum(tempname, url, checksum_alg) + if invalid_checksum: # if verify_change was set, the previous file would be deleted os.remove(tempname) - return invalid_md5 + return invalid_checksum except Exception as e: os.remove(tempname) raise e @@ -541,40 +550,45 @@ class MavenDownloader: shutil.move(tempname, artifact.get_filename(filename)) return None - def is_invalid_md5(self, file, remote_url): + def is_invalid_checksum(self, file, remote_url, checksum_alg='md5'): if os.path.exists(file): - local_md5 = self._local_md5(file) + local_checksum = self._local_checksum(checksum_alg, file) if self.local: parsed_url = urlparse(remote_url) - remote_md5 = self._local_md5(parsed_url.path) + remote_checksum = self._local_checksum(checksum_alg, parsed_url.path) else: try: - remote_md5 = to_text(self._getContent(remote_url + '.md5', "Failed to retrieve MD5", False), errors='strict') + remote_checksum = to_text(self._getContent(remote_url + '.' + checksum_alg, "Failed to retrieve checksum", False), errors='strict') except UnicodeError as e: - return "Cannot retrieve a valid md5 from %s: %s" % (remote_url, to_native(e)) - if(not remote_md5): - return "Cannot find md5 from " + remote_url + return "Cannot retrieve a valid %s checksum from %s: %s" % (checksum_alg, remote_url, to_native(e)) + if not remote_checksum: + return "Cannot find %s checksum from %s" % (checksum_alg, remote_url) try: - # Check if remote md5 only contains md5 or md5 + filename - _remote_md5 = remote_md5.split(None)[0] - remote_md5 = _remote_md5 - # remote_md5 is empty so we continue and keep original md5 string - # This should not happen since we check for remote_md5 before + # Check if remote checksum only contains md5/sha1 or md5/sha1 + filename + _remote_checksum = remote_checksum.split(None)[0] + remote_checksum = _remote_checksum + # remote_checksum is empty so we continue and keep original checksum string + # This should not happen since we check for remote_checksum before except IndexError: pass - if local_md5.lower() == remote_md5.lower(): + if local_checksum.lower() == remote_checksum.lower(): return None else: - return "Checksum does not match: we computed " + local_md5 + " but the repository states " + remote_md5 + return "Checksum does not match: we computed " + local_checksum + " but the repository states " + remote_checksum return "Path does not exist: " + file - def _local_md5(self, file): - md5 = hashlib.md5() + def _local_checksum(self, checksum_alg, file): + if checksum_alg.lower() == 'md5': + hash = hashlib.md5() + elif checksum_alg.lower() == 'sha1': + hash = hashlib.sha1() + else: + raise ValueError("Unknown checksum_alg %s" % checksum_alg) with io.open(file, 'rb') as f: for chunk in iter(lambda: f.read(8192), b''): - md5.update(chunk) - return md5.hexdigest() + hash.update(chunk) + return hash.hexdigest() def main(): @@ -599,6 +613,7 @@ def main(): client_key=dict(type="path", required=False), keep_name=dict(required=False, default=False, type='bool'), verify_checksum=dict(required=False, default='download', choices=['never', 'download', 'change', 'always']), + checksum_alg=dict(required=False, default='md5', choices=['md5', 'sha1']), directory_mode=dict(type='str'), ), add_file_common_args=True, @@ -639,6 +654,7 @@ def main(): verify_checksum = module.params["verify_checksum"] verify_download = verify_checksum in ['download', 'always'] verify_change = verify_checksum in ['change', 'always'] + checksum_alg = module.params["checksum_alg"] downloader = MavenDownloader(module, repository_url, local, headers) @@ -683,12 +699,12 @@ def main(): b_dest = to_bytes(dest, errors='surrogate_or_strict') - if os.path.lexists(b_dest) and ((not verify_change) or not downloader.is_invalid_md5(dest, downloader.find_uri_for_artifact(artifact))): + if os.path.lexists(b_dest) and ((not verify_change) or not downloader.is_invalid_checksum(dest, downloader.find_uri_for_artifact(artifact), checksum_alg)): prev_state = "present" if prev_state == "absent": try: - download_error = downloader.download(module.tmpdir, artifact, verify_download, b_dest) + download_error = downloader.download(module.tmpdir, artifact, verify_download, b_dest, checksum_alg) if download_error is None: changed = True else: From fe5717c1aa1deab9ac487a2903c725ac2ac2cb27 Mon Sep 17 00:00:00 2001 From: Benjamin Schubert Date: Thu, 3 Jun 2021 20:42:05 +0100 Subject: [PATCH 0342/3093] keycloak_realm.py: Mark 'reset_password_allowed' as no_log=False (#2694) * keycloak_realm.py: Mark 'reset_password_allowed' as no_log=False This value is not sensitive but Ansible will complain about it otherwise * fixup! keycloak_realm.py: Mark 'reset_password_allowed' as no_log=False * Apply all suggestions from code review Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- changelogs/fragments/keycloak-realm-no-log-password-reset.yml | 2 ++ plugins/modules/identity/keycloak/keycloak_realm.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/keycloak-realm-no-log-password-reset.yml diff --git a/changelogs/fragments/keycloak-realm-no-log-password-reset.yml b/changelogs/fragments/keycloak-realm-no-log-password-reset.yml new file mode 100644 index 0000000000..104bf4179b --- /dev/null +++ b/changelogs/fragments/keycloak-realm-no-log-password-reset.yml @@ -0,0 +1,2 @@ +bugfixes: + - keycloak_realm - remove warning that ``reset_password_allowed`` needs to be marked as ``no_log`` (https://github.com/ansible-collections/community.general/pull/2694). diff --git a/plugins/modules/identity/keycloak/keycloak_realm.py b/plugins/modules/identity/keycloak/keycloak_realm.py index 7e80bd3d3d..509fcab7bc 100644 --- a/plugins/modules/identity/keycloak/keycloak_realm.py +++ b/plugins/modules/identity/keycloak/keycloak_realm.py @@ -654,7 +654,7 @@ def main(): registration_flow=dict(type='str', aliases=['registrationFlow']), remember_me=dict(type='bool', aliases=['rememberMe']), reset_credentials_flow=dict(type='str', aliases=['resetCredentialsFlow']), - reset_password_allowed=dict(type='bool', aliases=['resetPasswordAllowed']), + reset_password_allowed=dict(type='bool', aliases=['resetPasswordAllowed'], no_log=False), revoke_refresh_token=dict(type='bool', aliases=['revokeRefreshToken']), smtp_server=dict(type='dict', aliases=['smtpServer']), ssl_required=dict(type='bool', aliases=['sslRequired']), From efbda2389d02dbefd887bac505b320c100b66b1a Mon Sep 17 00:00:00 2001 From: Benjamin Schubert Date: Thu, 3 Jun 2021 20:44:54 +0100 Subject: [PATCH 0343/3093] keycloak_realm.py: Fix the `ssl_required` parameter according to the API (#2693) * keycloak_realm.py: Fix the `ssl_required` parameter according to the API The `ssl_required` parameter is a string and must be one of 'all', 'external' or 'none'. Passing a bool will make the server return a 500. * fixup! keycloak_realm.py: Fix the `ssl_required` parameter according to the API * Update changelogs/fragments/keycloak_realm_ssl_required.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- changelogs/fragments/keycloak_realm_ssl_required.yml | 3 +++ plugins/modules/identity/keycloak/keycloak_realm.py | 5 +++-- 2 files changed, 6 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/keycloak_realm_ssl_required.yml diff --git a/changelogs/fragments/keycloak_realm_ssl_required.yml b/changelogs/fragments/keycloak_realm_ssl_required.yml new file mode 100644 index 0000000000..7476612e2f --- /dev/null +++ b/changelogs/fragments/keycloak_realm_ssl_required.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - keycloak_realm - ``ssl_required`` changed from a boolean type to accept the strings ``none``, ``external`` or ``all``. This is not a breaking change since the module always failed when a boolean was supplied (https://github.com/ansible-collections/community.general/pull/2693). diff --git a/plugins/modules/identity/keycloak/keycloak_realm.py b/plugins/modules/identity/keycloak/keycloak_realm.py index 509fcab7bc..95f79704ef 100644 --- a/plugins/modules/identity/keycloak/keycloak_realm.py +++ b/plugins/modules/identity/keycloak/keycloak_realm.py @@ -439,9 +439,10 @@ options: ssl_required: description: - The realm ssl required option. + choices: ['all', 'external', 'none'] aliases: - sslRequired - type: bool + type: str sso_session_idle_timeout: description: - The realm sso session idle timeout. @@ -657,7 +658,7 @@ def main(): reset_password_allowed=dict(type='bool', aliases=['resetPasswordAllowed'], no_log=False), revoke_refresh_token=dict(type='bool', aliases=['revokeRefreshToken']), smtp_server=dict(type='dict', aliases=['smtpServer']), - ssl_required=dict(type='bool', aliases=['sslRequired']), + ssl_required=dict(choices=["external", "all", "none"], aliases=['sslRequired']), sso_session_idle_timeout=dict(type='int', aliases=['ssoSessionIdleTimeout']), sso_session_idle_timeout_remember_me=dict(type='int', aliases=['ssoSessionIdleTimeoutRememberMe']), sso_session_max_lifespan=dict(type='int', aliases=['ssoSessionMaxLifespan']), From d93bc039b274d1af837fa7fe869956a3be1d878c Mon Sep 17 00:00:00 2001 From: Matthias Vogelgesang Date: Thu, 3 Jun 2021 22:54:19 +0200 Subject: [PATCH 0344/3093] BOTMETA.yml: remove myself from zypper_repository (#2701) --- .github/BOTMETA.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 994de0621f..a3fb8e1f35 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -717,8 +717,9 @@ files: labels: zypper ignore: dirtyharrycallahan robinro $modules/packaging/os/zypper_repository.py: - maintainers: $team_suse matze + maintainers: $team_suse labels: zypper + ignore: matze $modules/remote_management/cobbler/: maintainers: dagwieers $modules/remote_management/hpilo/: From 5ddf0041ecc733ed6f1f6ab938af584683c6e862 Mon Sep 17 00:00:00 2001 From: George Rawlinson Date: Fri, 4 Jun 2021 17:08:54 +1200 Subject: [PATCH 0345/3093] add module pacman_key (#778) * add module pacman_key * add symlink and fix documentation for pacman_key * documentation fix for pacman_key * improve logic around user input * Update plugins/modules/packaging/os/pacman_key.py Co-authored-by: Andrew Klychkov * Update plugins/modules/packaging/os/pacman_key.py Co-authored-by: Andrew Klychkov * Update plugins/modules/packaging/os/pacman_key.py Co-authored-by: Andrew Klychkov * Update plugins/modules/packaging/os/pacman_key.py Co-authored-by: Andrew Klychkov * Update plugins/modules/packaging/os/pacman_key.py Co-authored-by: Andrew Klychkov * Update plugins/modules/packaging/os/pacman_key.py Co-authored-by: Andrew Klychkov * Update plugins/modules/packaging/os/pacman_key.py Co-authored-by: Andrew Klychkov * Update plugins/modules/packaging/os/pacman_key.py Co-authored-by: Andrew Klychkov * Update plugins/modules/packaging/os/pacman_key.py Co-authored-by: Andrew Klychkov * Update plugins/modules/packaging/os/pacman_key.py Co-authored-by: Andrew Klychkov * Improve parameter checking required_one_of=[] is neat. Co-authored-by: Alexei Znamensky * Revert "Improve parameter checking" This reverts commit 044b0cbc854744480ad1e17753e33f0371c7d0eb. * Simplify a bunch of code. * fix typos pointed out by yan12125 * replaced manual checks with required-if invocation * added default keyring to documentation * some initial tests * updated metadata * refactored to make sanity tests pass * refactor to make sanity tests pass ... part deux * refactor: simplify run_command invocations * test: cover check-mode and some normal operation * docs: fix grammatical errors * rip out fingerprint code a full length (40 characters) key ID is equivalent to the fingerprint. * refactor tests, add a couple more * test: added testcase for method: data * Update plugins/modules/packaging/os/pacman_key.py Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> * docs: correct yaml boolean type Co-authored-by: Felix Fontein Co-authored-by: Andrew Klychkov Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> Co-authored-by: Felix Fontein --- plugins/modules/packaging/os/pacman_key.py | 314 ++++++++++ plugins/modules/pacman_key.py | 1 + .../modules/packaging/os/test_pacman_key.py | 576 ++++++++++++++++++ 3 files changed, 891 insertions(+) create mode 100644 plugins/modules/packaging/os/pacman_key.py create mode 120000 plugins/modules/pacman_key.py create mode 100644 tests/unit/plugins/modules/packaging/os/test_pacman_key.py diff --git a/plugins/modules/packaging/os/pacman_key.py b/plugins/modules/packaging/os/pacman_key.py new file mode 100644 index 0000000000..85896c211d --- /dev/null +++ b/plugins/modules/packaging/os/pacman_key.py @@ -0,0 +1,314 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, George Rawlinson +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: pacman_key +author: +- George Rawlinson (@grawlinson) +version_added: "3.2.0" +short_description: Manage pacman's list of trusted keys +description: +- Add or remove gpg keys from the pacman keyring. +notes: +- Use full-length key ID (40 characters). +- Keys will be verified when using I(data), I(file), or I(url) unless I(verify) is overridden. +- Keys will be locally signed after being imported into the keyring. +- If the key ID exists in the keyring, the key will not be added unless I(force_update) is specified. +- I(data), I(file), I(url), and I(keyserver) are mutually exclusive. +- Supports C(check_mode). +requirements: +- gpg +- pacman-key +options: + id: + description: + - The 40 character identifier of the key. + - Including this allows check mode to correctly report the changed state. + - Do not specify a subkey ID, instead specify the primary key ID. + required: true + type: str + data: + description: + - The keyfile contents to add to the keyring. + - Must be of C(PGP PUBLIC KEY BLOCK) type. + type: str + file: + description: + - The path to a keyfile on the remote server to add to the keyring. + - Remote file must be of C(PGP PUBLIC KEY BLOCK) type. + type: path + url: + description: + - The URL to retrieve keyfile from. + - Remote file must be of C(PGP PUBLIC KEY BLOCK) type. + type: str + keyserver: + description: + - The keyserver used to retrieve key from. + type: str + verify: + description: + - Whether or not to verify the keyfile's key ID against specified key ID. + type: bool + default: true + force_update: + description: + - This forces the key to be updated if it already exists in the keyring. + type: bool + default: false + keyring: + description: + - The full path to the keyring folder on the remote server. + - If not specified, module will use pacman's default (C(/etc/pacman.d/gnupg)). + - Useful if the remote system requires an alternative gnupg directory. + type: path + default: /etc/pacman.d/gnupg + state: + description: + - Ensures that the key is present (added) or absent (revoked). + default: present + choices: [ absent, present ] + type: str +''' + +EXAMPLES = ''' +- name: Import a key via local file + community.general.pacman_key: + data: "{{ lookup('file', 'keyfile.asc') }}" + state: present + +- name: Import a key via remote file + community.general.pacman_key: + file: /tmp/keyfile.asc + state: present + +- name: Import a key via url + community.general.pacman_key: + id: 01234567890ABCDE01234567890ABCDE12345678 + url: https://domain.tld/keys/keyfile.asc + state: present + +- name: Import a key via keyserver + community.general.pacman_key: + id: 01234567890ABCDE01234567890ABCDE12345678 + keyserver: keyserver.domain.tld + +- name: Import a key into an alternative keyring + community.general.pacman_key: + id: 01234567890ABCDE01234567890ABCDE12345678 + file: /tmp/keyfile.asc + keyring: /etc/pacman.d/gnupg-alternative + +- name: Remove a key from the keyring + community.general.pacman_key: + id: 01234567890ABCDE01234567890ABCDE12345678 + state: absent +''' + +RETURN = r''' # ''' + +import os.path +import tempfile +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +from ansible.module_utils._text import to_native + + +class PacmanKey(object): + def __init__(self, module): + self.module = module + # obtain binary paths for gpg & pacman-key + self.gpg = module.get_bin_path('gpg', required=True) + self.pacman_key = module.get_bin_path('pacman-key', required=True) + + # obtain module parameters + keyid = module.params['id'] + url = module.params['url'] + data = module.params['data'] + file = module.params['file'] + keyserver = module.params['keyserver'] + verify = module.params['verify'] + force_update = module.params['force_update'] + keyring = module.params['keyring'] + state = module.params['state'] + self.keylength = 40 + + # sanitise key ID & check if key exists in the keyring + keyid = self.sanitise_keyid(keyid) + key_present = self.key_in_keyring(keyring, keyid) + + # check mode + if module.check_mode: + if state == "present": + changed = (key_present and force_update) or not key_present + module.exit_json(changed=changed) + elif state == "absent": + if key_present: + module.exit_json(changed=True) + module.exit_json(changed=False) + + if state == "present": + if key_present and not force_update: + module.exit_json(changed=False) + + if data: + file = self.save_key(data) + self.add_key(keyring, file, keyid, verify) + module.exit_json(changed=True) + elif file: + self.add_key(keyring, file, keyid, verify) + module.exit_json(changed=True) + elif url: + data = self.fetch_key(url) + file = self.save_key(data) + self.add_key(keyring, file, keyid, verify) + module.exit_json(changed=True) + elif keyserver: + self.recv_key(keyring, keyid, keyserver) + module.exit_json(changed=True) + elif state == "absent": + if key_present: + self.remove_key(keyring, keyid) + module.exit_json(changed=True) + module.exit_json(changed=False) + + def is_hexadecimal(self, string): + """Check if a given string is valid hexadecimal""" + try: + int(string, 16) + except ValueError: + return False + return True + + def sanitise_keyid(self, keyid): + """Sanitise given key ID. + + Strips whitespace, uppercases all characters, and strips leading `0X`. + """ + sanitised_keyid = keyid.strip().upper().replace(' ', '').replace('0X', '') + if len(sanitised_keyid) != self.keylength: + self.module.fail_json(msg="key ID is not full-length: %s" % sanitised_keyid) + if not self.is_hexadecimal(sanitised_keyid): + self.module.fail_json(msg="key ID is not hexadecimal: %s" % sanitised_keyid) + return sanitised_keyid + + def fetch_key(self, url): + """Downloads a key from url""" + response, info = fetch_url(self.module, url) + if info['status'] != 200: + self.module.fail_json(msg="failed to fetch key at %s, error was %s" % (url, info['msg'])) + return to_native(response.read()) + + def recv_key(self, keyring, keyid, keyserver): + """Receives key via keyserver""" + cmd = [self.pacman_key, '--gpgdir', keyring, '--keyserver', keyserver, '--recv-keys', keyid] + self.module.run_command(cmd, check_rc=True) + self.lsign_key(keyring, keyid) + + def lsign_key(self, keyring, keyid): + """Locally sign key""" + cmd = [self.pacman_key, '--gpgdir', keyring] + self.module.run_command(cmd + ['--lsign-key', keyid], check_rc=True) + + def save_key(self, data): + "Saves key data to a temporary file" + tmpfd, tmpname = tempfile.mkstemp() + self.module.add_cleanup_file(tmpname) + tmpfile = os.fdopen(tmpfd, "w") + tmpfile.write(data) + tmpfile.close() + return tmpname + + def add_key(self, keyring, keyfile, keyid, verify): + """Add key to pacman's keyring""" + if verify: + self.verify_keyfile(keyfile, keyid) + cmd = [self.pacman_key, '--gpgdir', keyring, '--add', keyfile] + self.module.run_command(cmd, check_rc=True) + self.lsign_key(keyring, keyid) + + def remove_key(self, keyring, keyid): + """Remove key from pacman's keyring""" + cmd = [self.pacman_key, '--gpgdir', keyring, '--delete', keyid] + self.module.run_command(cmd, check_rc=True) + + def verify_keyfile(self, keyfile, keyid): + """Verify that keyfile matches the specified key ID""" + if keyfile is None: + self.module.fail_json(msg="expected a key, got none") + elif keyid is None: + self.module.fail_json(msg="expected a key ID, got none") + + rc, stdout, stderr = self.module.run_command( + [ + self.gpg, + '--with-colons', + '--with-fingerprint', + '--batch', + '--no-tty', + '--show-keys', + keyfile + ], + check_rc=True, + ) + + extracted_keyid = None + for line in stdout.splitlines(): + if line.startswith('fpr:'): + extracted_keyid = line.split(':')[9] + break + + if extracted_keyid != keyid: + self.module.fail_json(msg="key ID does not match. expected %s, got %s" % (keyid, extracted_keyid)) + + def key_in_keyring(self, keyring, keyid): + "Check if the key ID is in pacman's keyring" + rc, stdout, stderr = self.module.run_command( + [ + self.gpg, + '--with-colons', + '--batch', + '--no-tty', + '--no-default-keyring', + '--keyring=%s/pubring.gpg' % keyring, + '--list-keys', keyid + ], + check_rc=False, + ) + if rc != 0: + if stderr.find("No public key") >= 0: + return False + else: + self.module.fail_json(msg="gpg returned an error: %s" % stderr) + return True + + +def main(): + module = AnsibleModule( + argument_spec=dict( + id=dict(type='str', required=True), + data=dict(type='str'), + file=dict(type='path'), + url=dict(type='str'), + keyserver=dict(type='str'), + verify=dict(type='bool', default=True), + force_update=dict(type='bool', default=False), + keyring=dict(type='path', default='/etc/pacman.d/gnupg'), + state=dict(type='str', default='present', choices=['absent', 'present']), + ), + supports_check_mode=True, + mutually_exclusive=(('data', 'file', 'url', 'keyserver'),), + required_if=[('state', 'present', ('data', 'file', 'url', 'keyserver'), True)], + ) + PacmanKey(module) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/pacman_key.py b/plugins/modules/pacman_key.py new file mode 120000 index 0000000000..ac0f448232 --- /dev/null +++ b/plugins/modules/pacman_key.py @@ -0,0 +1 @@ +packaging/os/pacman_key.py \ No newline at end of file diff --git a/tests/unit/plugins/modules/packaging/os/test_pacman_key.py b/tests/unit/plugins/modules/packaging/os/test_pacman_key.py new file mode 100644 index 0000000000..757fee4e87 --- /dev/null +++ b/tests/unit/plugins/modules/packaging/os/test_pacman_key.py @@ -0,0 +1,576 @@ +# Copyright: (c) 2019, George Rawlinson +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.modules.packaging.os import pacman_key +import pytest +import json + +# path used for mocking get_bin_path() +MOCK_BIN_PATH = '/mocked/path' + +# Key ID used for tests +TESTING_KEYID = '14F26682D0916CDD81E37B6D61B7B526D98F0353' +TESTING_KEYFILE_PATH = '/tmp/pubkey.asc' + +# gpg --{show,list}-key output (key present) +GPG_SHOWKEY_OUTPUT = '''tru::1:1616373715:0:3:1:5 +pub:-:4096:1:61B7B526D98F0353:1437155332:::-:::scSC::::::23::0: +fpr:::::::::14F26682D0916CDD81E37B6D61B7B526D98F0353: +uid:-::::1437155332::E57D1F9BFF3B404F9F30333629369B08DF5E2161::Mozilla Software Releases ::::::::::0: +sub:e:4096:1:1C69C4E55E9905DB:1437155572:1500227572:::::s::::::23: +fpr:::::::::F2EF4E6E6AE75B95F11F1EB51C69C4E55E9905DB: +sub:e:4096:1:BBBEBDBB24C6F355:1498143157:1561215157:::::s::::::23: +fpr:::::::::DCEAC5D96135B91C4EA672ABBBBEBDBB24C6F355: +sub:e:4096:1:F1A6668FBB7D572E:1559247338:1622319338:::::s::::::23: +fpr:::::::::097B313077AE62A02F84DA4DF1A6668FBB7D572E:''' + +# gpg --{show,list}-key output (key absent) +GPG_NOKEY_OUTPUT = '''gpg: error reading key: No public key +tru::1:1616373715:0:3:1:5''' + +# pacman-key output (successful invocation) +PACMAN_KEY_SUCCESS = '''==> Updating trust database... +gpg: next trustdb check due at 2021-08-02''' + +# expected command for gpg --list-keys KEYID +RUN_CMD_LISTKEYS = [ + MOCK_BIN_PATH, + '--with-colons', + '--batch', + '--no-tty', + '--no-default-keyring', + '--keyring=/etc/pacman.d/gnupg/pubring.gpg', + '--list-keys', + TESTING_KEYID, +] + +# expected command for gpg --show-keys KEYFILE +RUN_CMD_SHOW_KEYFILE = [ + MOCK_BIN_PATH, + '--with-colons', + '--with-fingerprint', + '--batch', + '--no-tty', + '--show-keys', + TESTING_KEYFILE_PATH, +] + +# expected command for pacman-key --lsign-key KEYID +RUN_CMD_LSIGN_KEY = [ + MOCK_BIN_PATH, + '--gpgdir', + '/etc/pacman.d/gnupg', + '--lsign-key', + TESTING_KEYID, +] + + +TESTCASES = [ + # + # invalid user input + # + # state: present, id: absent + [ + { + 'state': 'present', + }, + { + 'id': 'param_missing_id', + 'msg': 'missing required arguments: id', + 'failed': True, + }, + ], + # state: present, required parameters: missing + [ + { + 'state': 'present', + 'id': '0xDOESNTMATTER', + }, + { + 'id': 'param_missing_method', + 'msg': 'state is present but any of the following are missing: data, file, url, keyserver', + 'failed': True, + }, + ], + # state: present, id: invalid (not full-length) + [ + { + 'id': '0xDOESNTMATTER', + 'data': 'FAKEDATA', + }, + { + 'id': 'param_id_not_full', + 'msg': 'key ID is not full-length: DOESNTMATTER', + 'failed': True, + }, + ], + # state: present, id: invalid (not hexadecimal) + [ + { + 'state': 'present', + 'id': '01234567890ABCDE01234567890ABCDE1234567M', + 'data': 'FAKEDATA', + }, + { + 'id': 'param_id_not_hex', + 'msg': 'key ID is not hexadecimal: 01234567890ABCDE01234567890ABCDE1234567M', + 'failed': True, + }, + ], + # state: absent, id: absent + [ + { + 'state': 'absent', + }, + { + 'id': 'param_absent_state_missing_id', + 'msg': 'missing required arguments: id', + 'failed': True, + }, + ], + # + # check mode + # + # state & key present + [ + { + 'state': 'present', + 'id': TESTING_KEYID, + 'data': 'FAKEDATA', + '_ansible_check_mode': True, + }, + { + 'id': 'checkmode_state_and_key_present', + 'run_command.calls': [ + ( + RUN_CMD_LISTKEYS, + {'check_rc': False}, + ( + 0, + GPG_SHOWKEY_OUTPUT, + '', + ), + ), + ], + 'changed': False, + }, + ], + # state present, key absent + [ + { + 'state': 'present', + 'id': TESTING_KEYID, + 'data': 'FAKEDATA', + '_ansible_check_mode': True, + }, + { + 'id': 'checkmode_state_present_key_absent', + 'run_command.calls': [ + ( + RUN_CMD_LISTKEYS, + {'check_rc': False}, + ( + 2, + '', + GPG_NOKEY_OUTPUT, + ), + ), + ], + 'changed': True, + }, + ], + # state & key absent + [ + { + 'state': 'absent', + 'id': TESTING_KEYID, + '_ansible_check_mode': True, + }, + { + 'id': 'checkmode_state_and_key_absent', + 'run_command.calls': [ + ( + RUN_CMD_LISTKEYS, + {'check_rc': False}, + ( + 2, + '', + GPG_NOKEY_OUTPUT, + ), + ), + ], + 'changed': False, + }, + ], + # state absent, key present + [ + { + 'state': 'absent', + 'id': TESTING_KEYID, + '_ansible_check_mode': True, + }, + { + 'id': 'check_mode_state_absent_key_present', + 'run_command.calls': [ + ( + RUN_CMD_LISTKEYS, + {'check_rc': False}, + ( + 0, + GPG_SHOWKEY_OUTPUT, + '', + ), + ), + ], + 'changed': True, + }, + ], + # + # normal operation + # + # state & key present + [ + { + 'state': 'present', + 'id': TESTING_KEYID, + 'data': 'FAKEDATA', + }, + { + 'id': 'state_and_key_present', + 'run_command.calls': [ + ( + RUN_CMD_LISTKEYS, + {'check_rc': False}, + ( + 0, + GPG_SHOWKEY_OUTPUT, + '', + ), + ), + ], + 'changed': False, + }, + ], + # state absent, key present + [ + { + 'state': 'absent', + 'id': TESTING_KEYID, + }, + { + 'id': 'state_absent_key_present', + 'run_command.calls': [ + ( + RUN_CMD_LISTKEYS, + {'check_rc': False}, + ( + 0, + GPG_SHOWKEY_OUTPUT, + '', + ), + ), + ( + [ + MOCK_BIN_PATH, + '--gpgdir', + '/etc/pacman.d/gnupg', + '--delete', + TESTING_KEYID, + ], + {'check_rc': True}, + ( + 0, + PACMAN_KEY_SUCCESS, + '', + ), + ), + ], + 'changed': True, + }, + ], + # state & key absent + [ + { + 'state': 'absent', + 'id': TESTING_KEYID, + }, + { + 'id': 'state_and_key_absent', + 'run_command.calls': [ + ( + RUN_CMD_LISTKEYS, + {'check_rc': False}, + ( + 2, + '', + GPG_NOKEY_OUTPUT, + ), + ), + ], + 'changed': False, + }, + ], + # state: present, key: absent, method: file + [ + { + 'state': 'present', + 'id': TESTING_KEYID, + 'file': TESTING_KEYFILE_PATH, + }, + { + 'id': 'state_present_key_absent_method_file', + 'run_command.calls': [ + ( + RUN_CMD_LISTKEYS, + {'check_rc': False}, + ( + 2, + '', + GPG_NOKEY_OUTPUT, + ), + ), + ( + RUN_CMD_SHOW_KEYFILE, + {'check_rc': True}, + ( + 0, + GPG_SHOWKEY_OUTPUT, + '', + ), + ), + ( + [ + MOCK_BIN_PATH, + '--gpgdir', + '/etc/pacman.d/gnupg', + '--add', + '/tmp/pubkey.asc', + ], + {'check_rc': True}, + ( + 0, + PACMAN_KEY_SUCCESS, + '', + ), + ), + ( + RUN_CMD_LSIGN_KEY, + {'check_rc': True}, + ( + 0, + PACMAN_KEY_SUCCESS, + '', + ), + ), + ], + 'changed': True, + }, + ], + # state: present, key: absent, method: file + # failure: keyid & keyfile don't match + [ + { + 'state': 'present', + 'id': TESTING_KEYID, + 'file': TESTING_KEYFILE_PATH, + }, + { + 'id': 'state_present_key_absent_verify_failed', + 'msg': 'key ID does not match. expected 14F26682D0916CDD81E37B6D61B7B526D98F0353, got 14F26682D0916CDD81E37B6D61B7B526D98F0354', + 'run_command.calls': [ + ( + RUN_CMD_LISTKEYS, + {'check_rc': False}, + ( + 2, + '', + GPG_NOKEY_OUTPUT, + ), + ), + ( + RUN_CMD_SHOW_KEYFILE, + {'check_rc': True}, + ( + 0, + GPG_SHOWKEY_OUTPUT.replace('61B7B526D98F0353', '61B7B526D98F0354'), + '', + ), + ), + ], + 'failed': True, + }, + ], + # state: present, key: absent, method: keyserver + [ + { + 'state': 'present', + 'id': TESTING_KEYID, + 'keyserver': 'pgp.mit.edu', + }, + { + 'id': 'state_present_key_absent_method_keyserver', + 'run_command.calls': [ + ( + RUN_CMD_LISTKEYS, + {'check_rc': False}, + ( + 2, + '', + GPG_NOKEY_OUTPUT, + ), + ), + ( + [ + MOCK_BIN_PATH, + '--gpgdir', + '/etc/pacman.d/gnupg', + '--keyserver', + 'pgp.mit.edu', + '--recv-keys', + TESTING_KEYID, + ], + {'check_rc': True}, + ( + 0, + ''' +gpg: key 0x61B7B526D98F0353: 32 signatures not checked due to missing keys +gpg: key 0x61B7B526D98F0353: public key "Mozilla Software Releases " imported +gpg: marginals needed: 3 completes needed: 1 trust model: pgp +gpg: depth: 0 valid: 1 signed: 0 trust: 0-, 0q, 0n, 0m, 0f, 1u +gpg: Total number processed: 1 +gpg: imported: 1 +''', + '', + ), + ), + ( + RUN_CMD_LSIGN_KEY, + {'check_rc': True}, + ( + 0, + PACMAN_KEY_SUCCESS, + '', + ), + ), + ], + 'changed': True, + }, + ], + # state: present, key: absent, method: data + [ + { + 'state': 'present', + 'id': TESTING_KEYID, + 'data': 'PGP_DATA', + }, + { + 'id': 'state_present_key_absent_method_data', + 'run_command.calls': [ + ( + RUN_CMD_LISTKEYS, + {'check_rc': False}, + ( + 2, + '', + GPG_NOKEY_OUTPUT, + ), + ), + ( + RUN_CMD_SHOW_KEYFILE, + {'check_rc': True}, + ( + 0, + GPG_SHOWKEY_OUTPUT, + '', + ), + ), + ( + [ + MOCK_BIN_PATH, + '--gpgdir', + '/etc/pacman.d/gnupg', + '--add', + '/tmp/pubkey.asc', + ], + {'check_rc': True}, + ( + 0, + PACMAN_KEY_SUCCESS, + '', + ), + ), + ( + RUN_CMD_LSIGN_KEY, + {'check_rc': True}, + ( + 0, + PACMAN_KEY_SUCCESS, + '', + ), + ), + ], + 'save_key_output': TESTING_KEYFILE_PATH, + 'changed': True, + }, + ], +] + + +@pytest.fixture +def patch_get_bin_path(mocker): + get_bin_path = mocker.patch.object( + AnsibleModule, + 'get_bin_path', + return_value=MOCK_BIN_PATH, + ) + + +@pytest.mark.parametrize( + 'patch_ansible_module, expected', + TESTCASES, + ids=[item[1]['id'] for item in TESTCASES], + indirect=['patch_ansible_module'] +) +@pytest.mark.usefixtures('patch_ansible_module') +def test_operation(mocker, capfd, patch_get_bin_path, expected): + # patch run_command invocations with mock data + if 'run_command.calls' in expected: + mock_run_command = mocker.patch.object( + AnsibleModule, + 'run_command', + side_effect=[item[2] for item in expected['run_command.calls']], + ) + + # patch save_key invocations with mock data + if 'save_key_output' in expected: + mock_save_key = mocker.patch.object( + pacman_key.PacmanKey, + 'save_key', + return_value=expected['save_key_output'], + ) + + # invoke module + with pytest.raises(SystemExit): + pacman_key.main() + + # capture std{out,err} + out, err = capfd.readouterr() + results = json.loads(out) + + # assertion time! + if 'msg' in expected: + assert results['msg'] == expected['msg'] + if 'changed' in expected: + assert results['changed'] == expected['changed'] + if 'failed' in expected: + assert results['failed'] == expected['failed'] + + if 'run_command.calls' in expected: + assert AnsibleModule.run_command.call_count == len(expected['run_command.calls']) + call_args_list = [(item[0][0], item[1]) for item in AnsibleModule.run_command.call_args_list] + expected_call_args_list = [(item[0], item[1]) for item in expected['run_command.calls']] + assert call_args_list == expected_call_args_list From a4f46b881ac4596ff32e8581df4c794301dacd6e Mon Sep 17 00:00:00 2001 From: rainerleber <39616583+rainerleber@users.noreply.github.com> Date: Fri, 4 Jun 2021 07:36:35 +0200 Subject: [PATCH 0346/3093] Add module sapcar_extract to make SAP administration easier. (#2596) * add sapcar * integrate test * test integration * Revert "integrate test" This reverts commit 17cbff4f0227e4c27e1e25671d993823559d94bd. * add requiered * change test * change binary * test * add bin bath * change future * change download logic * change logic * sanity * Apply suggestions from code review Co-authored-by: Felix Fontein * add url and error handling * sanity * Apply suggestions from code review Co-authored-by: Andrew Klychkov * Apply suggestions from code review Co-authored-by: Felix Fontein * cleanup and fixes * sanity * add sec library * add description * remove blanks * sanity * Apply suggestions from code review Co-authored-by: Felix Fontein Co-authored-by: Rainer Leber Co-authored-by: Felix Fontein Co-authored-by: Andrew Klychkov --- plugins/modules/files/sapcar_extract.py | 219 ++++++++++++++++++ plugins/modules/sapcar_extract.py | 1 + tests/unit/plugins/modules/files/__init__.py | 0 .../modules/files/test_sapcar_extract.py | 53 +++++ 4 files changed, 273 insertions(+) create mode 100644 plugins/modules/files/sapcar_extract.py create mode 120000 plugins/modules/sapcar_extract.py create mode 100644 tests/unit/plugins/modules/files/__init__.py create mode 100644 tests/unit/plugins/modules/files/test_sapcar_extract.py diff --git a/plugins/modules/files/sapcar_extract.py b/plugins/modules/files/sapcar_extract.py new file mode 100644 index 0000000000..db0f5f9ea8 --- /dev/null +++ b/plugins/modules/files/sapcar_extract.py @@ -0,0 +1,219 @@ +#!/usr/bin/python + +# Copyright: (c) 2021, Rainer Leber +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: sapcar_extract +short_description: Manages SAP SAPCAR archives +version_added: "3.2.0" +description: + - Provides support for unpacking C(sar)/C(car) files with the SAPCAR binary from SAP and pulling + information back into Ansible. +options: + path: + description: The path to the SAR/CAR file. + type: path + required: true + dest: + description: + - The destination where SAPCAR extracts the SAR file. Missing folders will be created. + If this parameter is not provided it will unpack in the same folder as the SAR file. + type: path + binary_path: + description: + - The path to the SAPCAR binary, for example, C(/home/dummy/sapcar) or C(https://myserver/SAPCAR). + If this parameter is not provided the module will look in C(PATH). + type: path + signature: + description: + - If C(true) the signature will be extracted. + default: false + type: bool + security_library: + description: + - The path to the security library, for example, C(/usr/sap/hostctrl/exe/libsapcrytp.so), for signature operations. + type: path + manifest: + description: + - The name of the manifest. + default: "SIGNATURE.SMF" + type: str + remove: + description: + - If C(true) the SAR/CAR file will be removed. B(This should be used with caution!) + default: false + type: bool +author: + - Rainer Leber (@RainerLeber) +notes: + - Always returns C(changed=true) in C(check_mode). +''' + +EXAMPLES = """ +- name: Extract SAR file + community.general.sapcar_extract: + path: "~/source/hana.sar" + +- name: Extract SAR file with destination + community.general.sapcar_extract: + path: "~/source/hana.sar" + dest: "~/test/" + +- name: Extract SAR file with destination and download from webserver can be a fileshare as well + community.general.sapcar_extract: + path: "~/source/hana.sar" + dest: "~/dest/" + binary_path: "https://myserver/SAPCAR" + +- name: Extract SAR file and delete SAR after extract + community.general.sapcar_extract: + path: "~/source/hana.sar" + remove: true + +- name: Extract SAR file with manifest + community.general.sapcar_extract: + path: "~/source/hana.sar" + signature: true + +- name: Extract SAR file with manifest and rename it + community.general.sapcar_extract: + path: "~/source/hana.sar" + manifest: "MyNewSignature.SMF" + signature: true +""" + +import os +from tempfile import NamedTemporaryFile +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import open_url +from ansible.module_utils._text import to_native + + +def get_list_of_files(dir_name): + # create a list of file and directories + # names in the given directory + list_of_file = os.listdir(dir_name) + allFiles = list() + # Iterate over all the entries + for entry in list_of_file: + # Create full path + fullPath = os.path.join(dir_name, entry) + # If entry is a directory then get the list of files in this directory + if os.path.isdir(fullPath): + allFiles = allFiles + [fullPath] + allFiles = allFiles + get_list_of_files(fullPath) + else: + allFiles.append(fullPath) + return allFiles + + +def download_SAPCAR(binary_path, module): + bin_path = None + # download sapcar binary if url is provided otherwise path is returned + if binary_path is not None: + if binary_path.startswith('https://') or binary_path.startswith('http://'): + random_file = NamedTemporaryFile(delete=False) + with open_url(binary_path) as response: + with random_file as out_file: + data = response.read() + out_file.write(data) + os.chmod(out_file.name, 0o700) + bin_path = out_file.name + module.add_cleanup_file(bin_path) + else: + bin_path = binary_path + return bin_path + + +def check_if_present(command, path, dest, signature, manifest, module): + # manipuliating output from SAR file for compare with already extracted files + iter_command = [command, '-tvf', path] + sar_out = module.run_command(iter_command)[1] + sar_raw = sar_out.split("\n")[1:] + if dest[-1] != "/": + dest = dest + "/" + sar_files = [dest + x.split(" ")[-1] for x in sar_raw if x] + # remove any SIGNATURE.SMF from list because it will not unpacked if signature is false + if not signature: + sar_files = [item for item in sar_files if '.SMF' not in item] + # if signature is renamed manipulate files in list of sar file for compare. + if manifest != "SIGNATURE.SMF": + sar_files = [item for item in sar_files if '.SMF' not in item] + sar_files = sar_files + [manifest] + # get extracted files if present + files_extracted = get_list_of_files(dest) + # compare extracted files with files in sar file + present = all(elem in files_extracted for elem in sar_files) + return present + + +def main(): + module = AnsibleModule( + argument_spec=dict( + path=dict(type='path', required=True), + dest=dict(type='path'), + binary_path=dict(type='path'), + signature=dict(type='bool', default=False), + security_library=dict(type='path'), + manifest=dict(type='str', default="SIGNATURE.SMF"), + remove=dict(type='bool', default=False), + ), + supports_check_mode=True, + ) + rc, out, err = [0, "", ""] + params = module.params + check_mode = module.check_mode + + path = params['path'] + dest = params['dest'] + signature = params['signature'] + security_library = params['security_library'] + manifest = params['manifest'] + remove = params['remove'] + + bin_path = download_SAPCAR(params['binary_path'], module) + + if dest is None: + dest_head_tail = os.path.split(path) + dest = dest_head_tail[0] + '/' + else: + if not os.path.exists(dest): + os.makedirs(dest, 0o755) + + if bin_path is not None: + command = [module.get_bin_path(bin_path, required=True)] + else: + try: + command = [module.get_bin_path('sapcar', required=True)] + except Exception as e: + module.fail_json(msg='Failed to find SAPCAR at the expected path or URL "{0}". Please check whether it is available: {1}' + .format(bin_path, to_native(e))) + + present = check_if_present(command[0], path, dest, signature, manifest, module) + + if not present: + command.extend(['-xvf', path, '-R', dest]) + if security_library: + command.extend(['-L', security_library]) + if signature: + command.extend(['-manifest', manifest]) + if not check_mode: + (rc, out, err) = module.run_command(command, check_rc=True) + changed = True + else: + changed = False + out = "allready unpacked" + + if remove: + os.remove(path) + + module.exit_json(changed=changed, message=rc, stdout=out, + stderr=err, command=' '.join(command)) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/sapcar_extract.py b/plugins/modules/sapcar_extract.py new file mode 120000 index 0000000000..7bb47b10c1 --- /dev/null +++ b/plugins/modules/sapcar_extract.py @@ -0,0 +1 @@ +./files/sapcar_extract.py \ No newline at end of file diff --git a/tests/unit/plugins/modules/files/__init__.py b/tests/unit/plugins/modules/files/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit/plugins/modules/files/test_sapcar_extract.py b/tests/unit/plugins/modules/files/test_sapcar_extract.py new file mode 100644 index 0000000000..05946e8217 --- /dev/null +++ b/tests/unit/plugins/modules/files/test_sapcar_extract.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Rainer Leber (@rainerleber) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible_collections.community.general.plugins.modules.files import sapcar_extract +from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible.module_utils import basic + + +def get_bin_path(*args, **kwargs): + """Function to return path of SAPCAR""" + return "/tmp/sapcar" + + +class Testsapcar_extract(ModuleTestCase): + """Main class for testing sapcar_extract module.""" + + def setUp(self): + """Setup.""" + super(Testsapcar_extract, self).setUp() + self.module = sapcar_extract + self.mock_get_bin_path = patch.object(basic.AnsibleModule, 'get_bin_path', get_bin_path) + self.mock_get_bin_path.start() + self.addCleanup(self.mock_get_bin_path.stop) # ensure that the patching is 'undone' + + def tearDown(self): + """Teardown.""" + super(Testsapcar_extract, self).tearDown() + + def test_without_required_parameters(self): + """Failure must occurs when all parameters are missing.""" + with self.assertRaises(AnsibleFailJson): + set_module_args({}) + self.module.main() + + def test_sapcar_extract(self): + """Check that result is changed.""" + set_module_args({ + 'path': "/tmp/HANA_CLIENT_REV2_00_053_00_LINUX_X86_64.SAR", + 'dest': "/tmp/test2", + 'binary_path': "/tmp/sapcar" + }) + with patch.object(basic.AnsibleModule, 'run_command') as run_command: + run_command.return_value = 0, '', '' # successful execution, no output + with self.assertRaises(AnsibleExitJson) as result: + sapcar_extract.main() + self.assertTrue(result.exception.args[0]['changed']) + self.assertEqual(run_command.call_count, 1) From 2e8746a8aadc1af2ddc5a9e140851a9c0cf27092 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Fri, 4 Jun 2021 09:53:34 +0200 Subject: [PATCH 0347/3093] Fix spurious test errors. (#2709) --- tests/integration/targets/lookup_random_string/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/targets/lookup_random_string/test.yml b/tests/integration/targets/lookup_random_string/test.yml index 52a572379b..edbf9fd035 100644 --- a/tests/integration/targets/lookup_random_string/test.yml +++ b/tests/integration/targets/lookup_random_string/test.yml @@ -9,7 +9,7 @@ result4: "{{ query('community.general.random_string', length=-1) }}" result5: "{{ query('community.general.random_string', override_special='_', min_special=1) }}" result6: "{{ query('community.general.random_string', upper=false, special=false) }}" # lower case only - result7: "{{ query('community.general.random_string', lower=false) }}" # upper case only + result7: "{{ query('community.general.random_string', lower=false, special=false) }}" # upper case only result8: "{{ query('community.general.random_string', lower=false, upper=false, special=false) }}" # number only result9: "{{ query('community.general.random_string', lower=false, upper=false, special=false, min_numeric=1, length=1) }}" # single digit only result10: "{{ query('community.general.random_string', numbers=false, upper=false, special=false, min_lower=1, length=1) }}" # single lowercase character only From d49783280e9a3ba1df47ed999e3e8ec05b7206d0 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Fri, 4 Jun 2021 10:34:27 +0200 Subject: [PATCH 0348/3093] Add new module/plugin maintainers to BOTMETA. (#2708) --- .github/BOTMETA.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index a3fb8e1f35..a31ce91a4e 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -120,6 +120,8 @@ files: $lookups/nios: maintainers: $team_networking sganesh-infoblox labels: infoblox networking + $lookups/random_string.py: + maintainers: Akasurde $module_utils/: labels: module_utils $module_utils/gitlab.py: @@ -652,6 +654,9 @@ files: maintainers: elasticdog indrajitr tchernomax labels: pacman ignore: elasticdog + $modules/packaging/os/pacman_key.py: + maintainers: grawlinson + labels: pacman $modules/packaging/os/pkgin.py: maintainers: $team_solaris L2G jasperla szinck martinm82 labels: pkgin solaris From 4396ec9631065ad85154f272193e58d289f21876 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Fri, 4 Jun 2021 10:35:35 +0200 Subject: [PATCH 0349/3093] Fix action plugin BOTMETA entries. (#2707) --- .github/BOTMETA.yml | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index a31ce91a4e..74b53db418 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -6,12 +6,9 @@ files: support: community $actions: labels: action - $actions/aireos.py: - labels: aireos cisco networking - $actions/ironware.py: - maintainers: paulquack - labels: ironware networking - $actions/shutdown.py: + $actions/system/iptables_state.py: + maintainers: quidame + $actions/system/shutdown.py: maintainers: nitzmahone samdoran aminvakil $becomes/: labels: become @@ -853,6 +850,8 @@ files: labels: interfaces_file $modules/system/iptables_state.py: maintainers: quidame + $modules/system/shutdown.py: + maintainers: nitzmahone samdoran aminvakil $modules/system/java_cert.py: maintainers: haad absynth76 $modules/system/java_keystore.py: From a343756e6f3a9ed24f1cb3c16a97dfbae2273bf3 Mon Sep 17 00:00:00 2001 From: Alex Willmer Date: Fri, 4 Jun 2021 18:11:46 +0100 Subject: [PATCH 0350/3093] Fix repeated word in description of fs_type (#2717) --- plugins/modules/system/parted.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/modules/system/parted.py b/plugins/modules/system/parted.py index bbb8c1408b..3796cfc40b 100644 --- a/plugins/modules/system/parted.py +++ b/plugins/modules/system/parted.py @@ -100,7 +100,7 @@ options: fs_type: description: - If specified and the partition does not exist, will set filesystem type to given partition. - - Parameter optional, but see notes below about negative negative C(part_start) values. + - Parameter optional, but see notes below about negative C(part_start) values. type: str version_added: '0.2.0' resize: From c49a384a6522dd9d9b80fd7810df9a8b829e5127 Mon Sep 17 00:00:00 2001 From: christophemorio <49184206+christophemorio@users.noreply.github.com> Date: Fri, 4 Jun 2021 19:12:29 +0200 Subject: [PATCH 0351/3093] Terraform: ensure workspace is reset to current value (#2634) * fix: ensure workspace is reset to current value * chore: linter * chore: changelog --- changelogs/fragments/2634-terraform-switch-workspace.yml | 2 ++ plugins/modules/cloud/misc/terraform.py | 9 ++++++++- 2 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2634-terraform-switch-workspace.yml diff --git a/changelogs/fragments/2634-terraform-switch-workspace.yml b/changelogs/fragments/2634-terraform-switch-workspace.yml new file mode 100644 index 0000000000..247447b3a8 --- /dev/null +++ b/changelogs/fragments/2634-terraform-switch-workspace.yml @@ -0,0 +1,2 @@ +bugfixes: + - terraform - ensure the workspace is set back to its previous value when the apply fails (https://github.com/ansible-collections/community.general/pull/2634). diff --git a/plugins/modules/cloud/misc/terraform.py b/plugins/modules/cloud/misc/terraform.py index 8a34f9699b..86521ed264 100644 --- a/plugins/modules/cloud/misc/terraform.py +++ b/plugins/modules/cloud/misc/terraform.py @@ -447,7 +447,14 @@ def main(): command.append(plan_file) if needs_application and not module.check_mode and not state == 'planned': - rc, out, err = module.run_command(command, check_rc=True, cwd=project_path) + rc, out, err = module.run_command(command, check_rc=False, cwd=project_path) + if rc != 0: + if workspace_ctx["current"] != workspace: + select_workspace(command[0], project_path, workspace_ctx["current"]) + module.fail_json(msg=err.rstrip(), rc=rc, stdout=out, + stdout_lines=out.splitlines(), stderr=err, + stderr_lines=err.splitlines(), + cmd=' '.join(command)) # checks out to decide if changes were made during execution if ' 0 added, 0 changed' not in out and not state == "absent" or ' 0 destroyed' not in out: changed = True From 1a4af9bfc34e417d65e0eb81990d0f023a03c606 Mon Sep 17 00:00:00 2001 From: Anton Nikolaev Date: Sat, 5 Jun 2021 05:53:02 -0700 Subject: [PATCH 0352/3093] Reduce stormssh searches based on host (#2568) * Reduce stormssh searches based on host Due to the stormssh searches in the whole config values, we need to reduce the search results based on the full matching of the hosts * Removed whitespaces in the blank line * Added changelog fragment and tests for the fix. * Added newline at the end of the changelog fragment * Added newline at the end of the tests * Fixed bug with name in tests * Changed assertion for the existing host * Update changelogs/fragments/2568-ssh_config-reduce-stormssh-searches-based-on-host.yml Co-authored-by: Felix Fontein * Adjusted tests * New line at the end of the tests Co-authored-by: Anton Nikolaev Co-authored-by: Felix Fontein --- ...reduce-stormssh-searches-based-on-host.yml | 2 ++ plugins/modules/system/ssh_config.py | 2 ++ .../targets/ssh_config/tasks/main.yml | 36 +++++++++++++++++++ 3 files changed, 40 insertions(+) create mode 100644 changelogs/fragments/2568-ssh_config-reduce-stormssh-searches-based-on-host.yml diff --git a/changelogs/fragments/2568-ssh_config-reduce-stormssh-searches-based-on-host.yml b/changelogs/fragments/2568-ssh_config-reduce-stormssh-searches-based-on-host.yml new file mode 100644 index 0000000000..2f3e400e7e --- /dev/null +++ b/changelogs/fragments/2568-ssh_config-reduce-stormssh-searches-based-on-host.yml @@ -0,0 +1,2 @@ +bugfixes: + - ssh_config - reduce stormssh searches based on host (https://github.com/ansible-collections/community.general/pull/2568/). diff --git a/plugins/modules/system/ssh_config.py b/plugins/modules/system/ssh_config.py index 943f6b44fc..be177baaaf 100644 --- a/plugins/modules/system/ssh_config.py +++ b/plugins/modules/system/ssh_config.py @@ -209,6 +209,8 @@ class SSHConfig(): hosts_removed = [] hosts_added = [] + hosts_result = [host for host in hosts_result if host['host'] == self.host] + if hosts_result: for host in hosts_result: if state == 'absent': diff --git a/tests/integration/targets/ssh_config/tasks/main.yml b/tests/integration/targets/ssh_config/tasks/main.yml index 12f277b455..bd5acc9e04 100644 --- a/tests/integration/targets/ssh_config/tasks/main.yml +++ b/tests/integration/targets/ssh_config/tasks/main.yml @@ -183,3 +183,39 @@ that: - not mut_ex.changed - "'parameters are mutually exclusive' in mut_ex.msg" + +- name: Add a full name host + community.general.ssh_config: + ssh_config_file: "{{ ssh_config_test }}" + host: "full_name" + hostname: full_name.com + identity_file: '{{ ssh_private_key }}' + port: '2223' + state: present + register: full_name + +- name: Check if changes are made + assert: + that: + - full_name is changed + - full_name.hosts_added == ["full_name"] + - full_name.hosts_changed == [] + - full_name.hosts_removed == [] + +- name: Add a host with name which is contained in full name host + community.general.ssh_config: + ssh_config_file: "{{ ssh_config_test }}" + host: "full" + hostname: full.com + identity_file: '{{ ssh_private_key }}' + port: '2223' + state: present + register: short_name + +- name: Check that short name host is added and full name host is not updated + assert: + that: + - short_name is changed + - short_name.hosts_added == ["full"] + - short_name.hosts_changed == [] + - short_name.hosts_removed == [] From 0e6d70697c57889c7af66757dd501f38422cf0b8 Mon Sep 17 00:00:00 2001 From: fkuep Date: Sat, 5 Jun 2021 22:38:42 +0200 Subject: [PATCH 0353/3093] Wire token param into consul_api #2124 (#2126) * Wire token param into consul_api #2124 * Update changelogs/fragments/2124-consul_kv-pass-token.yml Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> * #2124 renamed release fragment to match pr, removed parse_params. * putting look back in, do some linting #2124 * try more linting * linting * try overwriting defaults in parse_params with get_option vals, instead of removing that function completely. * Revert "back to start, from 2nd approach: allow keyword arguments via parse_params for compatibility." This reverts commit 748be8e366d46b43cc63b740cb78cde519274342. * Revert " linting" This reverts commit 1d57374c3e539a2cb640bf1482496d80f654b7d8. * Revert " try more linting" This reverts commit 91c8d06e6af442bd130859a64afbf5d558528e74. * Revert "putting look back in, do some linting #2124" This reverts commit 87eeec71803929f08e2dbfc1bfa3c76c79ea55d0. * Revert " #2124 renamed release fragment to match pr, removed parse_params." This reverts commit d2869b2f22ad64d84945ed91145de5b52bff2676. * Revert "Update changelogs/fragments/2124-consul_kv-pass-token.yml" This reverts commit c50b1cf9d4a53fbbfaa8332ba3a7acca33909f09. * Revert "Wire token param into consul_api #2124" This reverts commit b60b6433a8000459b40c4fdcee1da4fe436729a9. * minimal chnages for this PR relative to current upstream. * superfluous newline in changlog fragment. Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> --- changelogs/fragments/2126-consul_kv-pass-token.yml | 4 ++++ plugins/lookup/consul_kv.py | 8 ++++---- 2 files changed, 8 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/2126-consul_kv-pass-token.yml diff --git a/changelogs/fragments/2126-consul_kv-pass-token.yml b/changelogs/fragments/2126-consul_kv-pass-token.yml new file mode 100644 index 0000000000..a60fd2efcd --- /dev/null +++ b/changelogs/fragments/2126-consul_kv-pass-token.yml @@ -0,0 +1,4 @@ +--- +bugfixes: + - consul_kv lookup plugin - allow to set ``recurse``, ``index``, ``datacenter`` and ``token`` as keyword arguments + (https://github.com/ansible-collections/community.general/issues/2124). diff --git a/plugins/lookup/consul_kv.py b/plugins/lookup/consul_kv.py index 7ba7e5ac90..d567b7f687 100644 --- a/plugins/lookup/consul_kv.py +++ b/plugins/lookup/consul_kv.py @@ -171,10 +171,10 @@ class LookupModule(LookupBase): paramvals = { 'key': params[0], - 'token': None, - 'recurse': False, - 'index': None, - 'datacenter': None + 'token': self.get_option('token'), + 'recurse': self.get_option('recurse'), + 'index': self.get_option('index'), + 'datacenter': self.get_option('datacenter') } # parameters specified? From 9d8bea9d36c1896ce7fff26bbed175ad7b96d601 Mon Sep 17 00:00:00 2001 From: The Binary Date: Sun, 6 Jun 2021 02:25:49 +0545 Subject: [PATCH 0354/3093] open_iscsi: allow same target selected portals login and override (#2684) * fix: include portal and port for logged on check * refactor: remove extra space * fix: allow None portal and port on target_loggedon test * add auto_portal_startup argument * fix: change param name for automatic_portal * add changelog fragment * refactor: Update changelogs/fragments/2684-open_iscsi-single-target-multiple-portal-overrides.yml Co-authored-by: Felix Fontein * add version added info to auto_portal_startup arg * add example for auto_portal_startup * fix: remove alias for auto_portal form arg_spec as well * refactor: elaborate in fragment changelogs Elaborate change Co-authored-by: Amin Vakil * open_iscsi: elaborate changelog fragment * Update plugins/modules/system/open_iscsi.py Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein Co-authored-by: Amin Vakil --- ...ingle-target-multiple-portal-overrides.yml | 3 + plugins/modules/system/open_iscsi.py | 67 +++++++++++++++++-- 2 files changed, 64 insertions(+), 6 deletions(-) create mode 100644 changelogs/fragments/2684-open_iscsi-single-target-multiple-portal-overrides.yml diff --git a/changelogs/fragments/2684-open_iscsi-single-target-multiple-portal-overrides.yml b/changelogs/fragments/2684-open_iscsi-single-target-multiple-portal-overrides.yml new file mode 100644 index 0000000000..cb14a08ba0 --- /dev/null +++ b/changelogs/fragments/2684-open_iscsi-single-target-multiple-portal-overrides.yml @@ -0,0 +1,3 @@ +minor_changes: + - open_iscsi - also consider ``portal`` and ``port`` to check if already logged in or not (https://github.com/ansible-collections/community.general/issues/2683). + - open_iscsi - add ``auto_portal_startup`` parameter to allow ``node.startup`` setting per portal (https://github.com/ansible-collections/community.general/issues/2685). diff --git a/plugins/modules/system/open_iscsi.py b/plugins/modules/system/open_iscsi.py index 222bb82f3d..570925f6a4 100644 --- a/plugins/modules/system/open_iscsi.py +++ b/plugins/modules/system/open_iscsi.py @@ -57,6 +57,11 @@ options: - Whether the target node should be automatically connected at startup. type: bool aliases: [ automatic ] + auto_portal_startup: + description: + - Whether the target node portal should be automatically connected at startup. + type: bool + version_added: 3.2.0 discover: description: - Whether the list of target nodes on the portal should be @@ -102,10 +107,18 @@ EXAMPLES = r''' community.general.open_iscsi: login: no target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d + +- name: Override and disable automatic portal login on specific portal + community.general.open_iscsi: + login: false + portal: 10.1.1.250 + auto_portal_startup: false + target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d ''' import glob import os +import re import socket import time @@ -158,12 +171,18 @@ def iscsi_discover(module, portal, port): module.fail_json(cmd=cmd, rc=rc, msg=err) -def target_loggedon(module, target): +def target_loggedon(module, target, portal=None, port=None): cmd = '%s --mode session' % iscsiadm_cmd (rc, out, err) = module.run_command(cmd) + if portal is None: + portal = "" + if port is None: + port = "" + if rc == 0: - return target in out + search_re = "%s:%s.*%s" % (re.escape(portal), port, re.escape(target)) + return re.search(search_re, out) is not None elif rc == 21: return False else: @@ -219,8 +238,14 @@ def target_device_node(module, target): return devdisks -def target_isauto(module, target): +def target_isauto(module, target, portal=None, port=None): cmd = '%s --mode node --targetname %s' % (iscsiadm_cmd, target) + + if portal is not None: + if port is not None: + portal = '%s:%s' % (portal, port) + cmd = '%s --portal %s' % (cmd, portal) + (rc, out, err) = module.run_command(cmd) if rc == 0: @@ -233,16 +258,28 @@ def target_isauto(module, target): module.fail_json(cmd=cmd, rc=rc, msg=err) -def target_setauto(module, target): +def target_setauto(module, target, portal=None, port=None): cmd = '%s --mode node --targetname %s --op=update --name node.startup --value automatic' % (iscsiadm_cmd, target) + + if portal is not None: + if port is not None: + portal = '%s:%s' % (portal, port) + cmd = '%s --portal %s' % (cmd, portal) + (rc, out, err) = module.run_command(cmd) if rc > 0: module.fail_json(cmd=cmd, rc=rc, msg=err) -def target_setmanual(module, target): +def target_setmanual(module, target, portal=None, port=None): cmd = '%s --mode node --targetname %s --op=update --name node.startup --value manual' % (iscsiadm_cmd, target) + + if portal is not None: + if port is not None: + portal = '%s:%s' % (portal, port) + cmd = '%s --portal %s' % (cmd, portal) + (rc, out, err) = module.run_command(cmd) if rc > 0: @@ -265,6 +302,7 @@ def main(): # actions login=dict(type='bool', aliases=['state']), auto_node_startup=dict(type='bool', aliases=['automatic']), + auto_portal_startup=dict(type='bool'), discover=dict(type='bool', default=False), show_nodes=dict(type='bool', default=False), ), @@ -288,6 +326,7 @@ def main(): port = module.params['port'] login = module.params['login'] automatic = module.params['auto_node_startup'] + automatic_portal = module.params['auto_portal_startup'] discover = module.params['discover'] show_nodes = module.params['show_nodes'] @@ -333,7 +372,7 @@ def main(): result['nodes'] = nodes if login is not None: - loggedon = target_loggedon(module, target) + loggedon = target_loggedon(module, target, portal, port) if (login and loggedon) or (not login and not loggedon): result['changed'] |= False if login: @@ -368,6 +407,22 @@ def main(): result['changed'] |= True result['automatic_changed'] = True + if automatic_portal is not None: + isauto = target_isauto(module, target, portal, port) + if (automatic_portal and isauto) or (not automatic_portal and not isauto): + result['changed'] |= False + result['automatic_portal_changed'] = False + elif not check: + if automatic_portal: + target_setauto(module, target, portal, port) + else: + target_setmanual(module, target, portal, port) + result['changed'] |= True + result['automatic_portal_changed'] = True + else: + result['changed'] |= True + result['automatic_portal_changed'] = True + module.exit_json(**result) From 463c576a67acdd101ecc1d181ad184742a22bcaa Mon Sep 17 00:00:00 2001 From: quidame Date: Sun, 6 Jun 2021 08:20:52 +0200 Subject: [PATCH 0355/3093] iptables_state: fix async status call (-> action plugin) (#2711) * fix call to async_status (-> action plugin) * add changelog fragment * Apply suggestions from code review Co-authored-by: Felix Fontein * rename a local variable for readability Co-authored-by: Felix Fontein --- ...-iptables_state-2700-async_status-call.yml | 4 +++ plugins/action/system/iptables_state.py | 28 +++++++++++-------- 2 files changed, 20 insertions(+), 12 deletions(-) create mode 100644 changelogs/fragments/2711-fix-iptables_state-2700-async_status-call.yml diff --git a/changelogs/fragments/2711-fix-iptables_state-2700-async_status-call.yml b/changelogs/fragments/2711-fix-iptables_state-2700-async_status-call.yml new file mode 100644 index 0000000000..8f94cf5178 --- /dev/null +++ b/changelogs/fragments/2711-fix-iptables_state-2700-async_status-call.yml @@ -0,0 +1,4 @@ +--- +bugfixes: + - "iptables_state - call ``async_status`` action plugin rather than its module + (https://github.com/ansible-collections/community.general/issues/2700)." diff --git a/plugins/action/system/iptables_state.py b/plugins/action/system/iptables_state.py index 887f3f47f9..6884e77713 100644 --- a/plugins/action/system/iptables_state.py +++ b/plugins/action/system/iptables_state.py @@ -40,18 +40,26 @@ class ActionModule(ActionBase): "(=%s) to 0, and 'async' (=%s) to a value >2 and not greater than " "'ansible_timeout' (=%s) (recommended).") - def _async_result(self, module_args, task_vars, timeout): + def _async_result(self, async_status_args, task_vars, timeout): ''' Retrieve results of the asynchonous task, and display them in place of the async wrapper results (those with the ansible_job_id key). ''' + async_status = self._task.copy() + async_status.args = async_status_args + async_status.action = 'ansible.builtin.async_status' + async_status.async_val = 0 + async_action = self._shared_loader_obj.action_loader.get( + async_status.action, task=async_status, connection=self._connection, + play_context=self._play_context, loader=self._loader, templar=self._templar, + shared_loader_obj=self._shared_loader_obj) + + if async_status.args['mode'] == 'cleanup': + return async_action.run(task_vars=task_vars) + # At least one iteration is required, even if timeout is 0. for dummy in range(max(1, timeout)): - async_result = self._execute_module( - module_name='ansible.builtin.async_status', - module_args=module_args, - task_vars=task_vars, - wrap_async=False) + async_result = async_action.run(task_vars=task_vars) if async_result.get('finished', 0) == 1: break time.sleep(min(1, timeout)) @@ -106,7 +114,7 @@ class ActionModule(ActionBase): # longer on the controller); and set a backup file path. module_args['_timeout'] = task_async module_args['_back'] = '%s/iptables.state' % async_dir - async_status_args = dict(_async_dir=async_dir) + async_status_args = dict(mode='status') confirm_cmd = 'rm -f %s' % module_args['_back'] starter_cmd = 'touch %s.starter' % module_args['_back'] remaining_time = max(task_async, max_timeout) @@ -168,11 +176,7 @@ class ActionModule(ActionBase): del result['invocation']['module_args'][key] async_status_args['mode'] = 'cleanup' - dummy = self._execute_module( - module_name='ansible.builtin.async_status', - module_args=async_status_args, - task_vars=task_vars, - wrap_async=False) + dummy = self._async_result(async_status_args, task_vars, 0) if not wrap_async: # remove a temporary path we created From f74b83663bcc1f6269a2bb56ba646f24f6218578 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 7 Jun 2021 17:58:26 +1200 Subject: [PATCH 0356/3093] Bugfix + sanity checks for stacki_host (#2681) * fixed validation-modules for plugins/modules/remote_management/stacki/stacki_host.py * sanity fix * added changelog fragment * extra fix to the documentation * Update plugins/modules/remote_management/stacki/stacki_host.py Co-authored-by: Felix Fontein * Update plugins/modules/remote_management/stacki/stacki_host.py Co-authored-by: Felix Fontein * rollback params Co-authored-by: Felix Fontein --- .../fragments/2681-stacki-host-bugfix.yml | 4 + .../remote_management/stacki/stacki_host.py | 84 +++++++++++-------- tests/sanity/ignore-2.10.txt | 3 - tests/sanity/ignore-2.11.txt | 3 - tests/sanity/ignore-2.12.txt | 3 - tests/sanity/ignore-2.9.txt | 3 - 6 files changed, 54 insertions(+), 46 deletions(-) create mode 100644 changelogs/fragments/2681-stacki-host-bugfix.yml diff --git a/changelogs/fragments/2681-stacki-host-bugfix.yml b/changelogs/fragments/2681-stacki-host-bugfix.yml new file mode 100644 index 0000000000..3403bfbfbe --- /dev/null +++ b/changelogs/fragments/2681-stacki-host-bugfix.yml @@ -0,0 +1,4 @@ +bugfixes: + - stacki_host - when adding a new server, ``rack`` and ``rank`` must be passed, and network parameters are optional (https://github.com/ansible-collections/community.general/pull/2681). +minor_changes: + - stacki_host - minor refactoring (https://github.com/ansible-collections/community.general/pull/2681). diff --git a/plugins/modules/remote_management/stacki/stacki_host.py b/plugins/modules/remote_management/stacki/stacki_host.py index 8bdc0f82f6..fda0c5d318 100644 --- a/plugins/modules/remote_management/stacki/stacki_host.py +++ b/plugins/modules/remote_management/stacki/stacki_host.py @@ -12,46 +12,48 @@ DOCUMENTATION = ''' module: stacki_host short_description: Add or remove host to stacki front-end description: - - Use this module to add or remove hosts to a stacki front-end via API. - - U(https://github.com/StackIQ/stacki) + - Use this module to add or remove hosts to a stacki front-end via API. + - Information on stacki can be found at U(https://github.com/StackIQ/stacki). options: name: description: - - Name of the host to be added to Stacki. + - Name of the host to be added to Stacki. required: True type: str stacki_user: description: - - Username for authenticating with Stacki API, but if not - specified, the environment variable C(stacki_user) is used instead. + - Username for authenticating with Stacki API, but if not specified, the environment variable C(stacki_user) is used instead. required: True type: str stacki_password: description: - - Password for authenticating with Stacki API, but if not + - Password for authenticating with Stacki API, but if not specified, the environment variable C(stacki_password) is used instead. required: True type: str stacki_endpoint: description: - - URL for the Stacki API Endpoint. + - URL for the Stacki API Endpoint. required: True type: str prim_intf_mac: description: - - MAC Address for the primary PXE boot network interface. + - MAC Address for the primary PXE boot network interface. + - Currently not used by the module. type: str prim_intf_ip: description: - - IP Address for the primary network interface. + - IP Address for the primary network interface. + - Currently not used by the module. type: str prim_intf: description: - - Name of the primary network interface. + - Name of the primary network interface. + - Currently not used by the module. type: str force_install: description: - - Set value to True to force node into install state if it already exists in stacki. + - Set value to C(true) to force node into install state if it already exists in stacki. type: bool default: no state: @@ -59,6 +61,30 @@ options: - Set value to the desired state for the specified host. type: str choices: [ absent, present ] + default: present + appliance: + description: + - Applicance to be used in host creation. + - Required if I(state) is C(present) and host does not yet exist. + type: str + default: backend + rack: + description: + - Rack to be used in host creation. + - Required if I(state) is C(present) and host does not yet exist. + type: int + rank: + description: + - Rank to be used in host creation. + - In Stacki terminology, the rank is the position of the machine in a rack. + - Required if I(state) is C(present) and host does not yet exist. + type: int + network: + description: + - Network to be configured in the host. + - Currently not used by the module. + type: str + default: private author: - Hugh Ma (@bbyhuy) ''' @@ -128,7 +154,7 @@ class StackiHost(object): 'PASSWORD': module.params['stacki_password']} # Get Initial CSRF - cred_a = self.do_request(self.module, self.endpoint, method="GET") + cred_a = self.do_request(self.endpoint, method="GET") cookie_a = cred_a.headers.get('Set-Cookie').split(';') init_csrftoken = None for c in cookie_a: @@ -145,8 +171,7 @@ class StackiHost(object): login_endpoint = self.endpoint + "/login" # Get Final CSRF and Session ID - login_req = self.do_request(self.module, login_endpoint, headers=header, - payload=urlencode(auth_creds), method='POST') + login_req = self.do_request(login_endpoint, headers=header, payload=urlencode(auth_creds), method='POST') cookie_f = login_req.headers.get('Set-Cookie').split(';') csrftoken = None @@ -163,8 +188,8 @@ class StackiHost(object): 'Content-type': 'application/json', 'Cookie': login_req.headers.get('Set-Cookie')} - def do_request(self, module, url, payload=None, headers=None, method=None): - res, info = fetch_url(module, url, data=payload, headers=headers, method=method) + def do_request(self, url, payload=None, headers=None, method=None): + res, info = fetch_url(self.module, url, data=payload, headers=headers, method=method) if info['status'] != 200: self.module.fail_json(changed=False, msg=info['msg']) @@ -172,24 +197,16 @@ class StackiHost(object): return res def stack_check_host(self): - res = self.do_request(self.module, self.endpoint, payload=json.dumps({"cmd": "list host"}), headers=self.header, method="POST") - - if self.hostname in res.read(): - return True - else: - return False + res = self.do_request(self.endpoint, payload=json.dumps({"cmd": "list host"}), headers=self.header, method="POST") + return self.hostname in res.read() def stack_sync(self): - self.do_request(self.module, self.endpoint, payload=json.dumps({"cmd": "sync config"}), headers=self.header, method="POST") - self.do_request(self.module, self.endpoint, payload=json.dumps({"cmd": "sync host config"}), headers=self.header, method="POST") + self.do_request(self.endpoint, payload=json.dumps({"cmd": "sync config"}), headers=self.header, method="POST") + self.do_request(self.endpoint, payload=json.dumps({"cmd": "sync host config"}), headers=self.header, method="POST") def stack_force_install(self, result): - data = dict() - changed = False - - data['cmd'] = "set host boot {0} action=install" \ - .format(self.hostname) - self.do_request(self.module, self.endpoint, payload=json.dumps(data), headers=self.header, method="POST") + data = {'cmd': "set host boot {0} action=install".format(self.hostname)} + self.do_request(self.endpoint, payload=json.dumps(data), headers=self.header, method="POST") changed = True self.stack_sync() @@ -203,7 +220,7 @@ class StackiHost(object): data['cmd'] = "add host {0} rack={1} rank={2} appliance={3}"\ .format(self.hostname, self.rack, self.rank, self.appliance) - self.do_request(self.module, self.endpoint, payload=json.dumps(data), headers=self.header, method="POST") + self.do_request(self.endpoint, payload=json.dumps(data), headers=self.header, method="POST") self.stack_sync() @@ -215,7 +232,7 @@ class StackiHost(object): data['cmd'] = "remove host {0}"\ .format(self.hostname) - self.do_request(self.module, self.endpoint, payload=json.dumps(data), headers=self.header, method="POST") + self.do_request(self.endpoint, payload=json.dumps(data), headers=self.header, method="POST") self.stack_sync() @@ -258,8 +275,7 @@ def main(): .format(module.params['name']) # Otherwise, state is present, but host doesn't exists, require more params to add host elif module.params['state'] == 'present' and not host_exists: - for param in ['appliance', 'prim_intf', - 'prim_intf_ip', 'network', 'prim_intf_mac']: + for param in ['appliance', 'rack', 'rank', 'prim_intf', 'prim_intf_ip', 'network', 'prim_intf_mac']: if not module.params[param]: missing_params.append(param) if len(missing_params) > 0: # @FIXME replace with required_if diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 16c94a2c09..1855fc963f 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -46,9 +46,6 @@ plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:parameter-type-not-in-doc # missing docs on suboptions plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:undocumented-parameter # missing docs on suboptions plugins/modules/remote_management/manageiq/manageiq_tags.py validate-modules:parameter-state-invalid-choice -plugins/modules/remote_management/stacki/stacki_host.py validate-modules:doc-default-does-not-match-spec -plugins/modules/remote_management/stacki/stacki_host.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/stacki/stacki_host.py validate-modules:undocumented-parameter plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-invalid plugins/modules/system/gconftool2.py validate-modules:parameter-state-invalid-choice plugins/modules/system/iptables_state.py validate-modules:undocumented-parameter diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index db731736c0..4727b8d6df 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -45,9 +45,6 @@ plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:parameter-type-not-in-doc # missing docs on suboptions plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:undocumented-parameter # missing docs on suboptions plugins/modules/remote_management/manageiq/manageiq_tags.py validate-modules:parameter-state-invalid-choice -plugins/modules/remote_management/stacki/stacki_host.py validate-modules:doc-default-does-not-match-spec -plugins/modules/remote_management/stacki/stacki_host.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/stacki/stacki_host.py validate-modules:undocumented-parameter plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-invalid plugins/modules/system/gconftool2.py validate-modules:parameter-state-invalid-choice plugins/modules/system/iptables_state.py validate-modules:undocumented-parameter diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt index de3634ae40..74b1ea16f6 100644 --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -45,9 +45,6 @@ plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:parameter-type-not-in-doc # missing docs on suboptions plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:undocumented-parameter # missing docs on suboptions plugins/modules/remote_management/manageiq/manageiq_tags.py validate-modules:parameter-state-invalid-choice -plugins/modules/remote_management/stacki/stacki_host.py validate-modules:doc-default-does-not-match-spec -plugins/modules/remote_management/stacki/stacki_host.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/stacki/stacki_host.py validate-modules:undocumented-parameter plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-invalid plugins/modules/system/gconftool2.py validate-modules:parameter-state-invalid-choice plugins/modules/system/iptables_state.py validate-modules:undocumented-parameter diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 9cb31a442d..2dac082311 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -36,9 +36,6 @@ plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:doc-missing-type # missing docs on suboptions plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:parameter-type-not-in-doc # missing docs on suboptions plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:undocumented-parameter # missing docs on suboptions -plugins/modules/remote_management/stacki/stacki_host.py validate-modules:doc-default-does-not-match-spec -plugins/modules/remote_management/stacki/stacki_host.py validate-modules:parameter-type-not-in-doc -plugins/modules/remote_management/stacki/stacki_host.py validate-modules:undocumented-parameter plugins/modules/net_tools/nios/nios_a_record.py validate-modules:deprecation-mismatch plugins/modules/net_tools/nios/nios_a_record.py validate-modules:invalid-documentation plugins/modules/net_tools/nios/nios_aaaa_record.py validate-modules:deprecation-mismatch From 6a41fba2f89c4b2f0d63b5b3b34b5b649101dde1 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 7 Jun 2021 23:06:23 +1200 Subject: [PATCH 0357/3093] ModuleHelper - also uses LC_ALL to force language (#2731) * also uses LC_ALL to force language * adjusted test_xfconf and test_cpanm * added changelog fragment * Update changelogs/fragments/2731-mh-cmd-locale.yml Co-authored-by: Felix Fontein * adjusted chglog frag per PR Co-authored-by: Felix Fontein --- changelogs/fragments/2731-mh-cmd-locale.yml | 5 ++++ plugins/module_utils/mh/mixins/cmd.py | 9 ++++-- .../modules/packaging/language/test_cpanm.py | 26 ++++++++-------- .../plugins/modules/system/test_xfconf.py | 30 +++++++++---------- 4 files changed, 39 insertions(+), 31 deletions(-) create mode 100644 changelogs/fragments/2731-mh-cmd-locale.yml diff --git a/changelogs/fragments/2731-mh-cmd-locale.yml b/changelogs/fragments/2731-mh-cmd-locale.yml new file mode 100644 index 0000000000..ea905cce4b --- /dev/null +++ b/changelogs/fragments/2731-mh-cmd-locale.yml @@ -0,0 +1,5 @@ +bugfixes: + - module_helper module utils - ``CmdMixin`` must also use ``LC_ALL`` to enforce locale choice (https://github.com/ansible-collections/community.general/pull/2731). + - xfconf - also use ``LC_ALL`` to enforce locale choice (https://github.com/ansible-collections/community.general/issues/2715). + - cpanm - also use ``LC_ALL`` to enforce locale choice (https://github.com/ansible-collections/community.general/pull/2731). + - snap - also use ``LC_ALL`` to enforce locale choice (https://github.com/ansible-collections/community.general/pull/2731). diff --git a/plugins/module_utils/mh/mixins/cmd.py b/plugins/module_utils/mh/mixins/cmd.py index 724708868e..0367b6173c 100644 --- a/plugins/module_utils/mh/mixins/cmd.py +++ b/plugins/module_utils/mh/mixins/cmd.py @@ -155,13 +155,16 @@ class CmdMixin(object): def run_command(self, extra_params=None, params=None, process_output=None, *args, **kwargs): self.vars.cmd_args = self._calculate_args(extra_params, params) options = dict(self.run_command_fixed_options) - env_update = dict(options.get('environ_update', {})) options['check_rc'] = options.get('check_rc', self.check_rc) + options.update(kwargs) + env_update = dict(options.get('environ_update', {})) if self.force_lang: - env_update.update({'LANGUAGE': self.force_lang}) + env_update.update({ + 'LANGUAGE': self.force_lang, + 'LC_ALL': self.force_lang, + }) self.update_output(force_lang=self.force_lang) options['environ_update'] = env_update - options.update(kwargs) rc, out, err = self.module.run_command(self.vars.cmd_args, *args, **options) self.update_output(rc=rc, stdout=out, stderr=err) if process_output is None: diff --git a/tests/unit/plugins/modules/packaging/language/test_cpanm.py b/tests/unit/plugins/modules/packaging/language/test_cpanm.py index fd52fc1cc9..10a2955019 100644 --- a/tests/unit/plugins/modules/packaging/language/test_cpanm.py +++ b/tests/unit/plugins/modules/packaging/language/test_cpanm.py @@ -38,7 +38,7 @@ TEST_CASES = [ ), ( ['/testbin/cpanm', 'Dancer'], - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, (0, '', '',), # output rc, out, err ), ], @@ -65,7 +65,7 @@ TEST_CASES = [ 'id': 'install_dancer', 'run_command.calls': [( ['/testbin/cpanm', 'Dancer'], - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, (0, '', '',), # output rc, out, err )], 'changed': True, @@ -77,7 +77,7 @@ TEST_CASES = [ 'id': 'install_distribution_file_compatibility', 'run_command.calls': [( ['/testbin/cpanm', 'MIYAGAWA/Plack-0.99_05.tar.gz'], - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, (0, '', '',), # output rc, out, err )], 'changed': True, @@ -89,7 +89,7 @@ TEST_CASES = [ 'id': 'install_distribution_file', 'run_command.calls': [( ['/testbin/cpanm', 'MIYAGAWA/Plack-0.99_05.tar.gz'], - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, (0, '', '',), # output rc, out, err )], 'changed': True, @@ -101,7 +101,7 @@ TEST_CASES = [ 'id': 'install_into_locallib', 'run_command.calls': [( ['/testbin/cpanm', '--local-lib', '/srv/webapps/my_app/extlib', 'Dancer'], - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, (0, '', '',), # output rc, out, err )], 'changed': True, @@ -113,7 +113,7 @@ TEST_CASES = [ 'id': 'install_from_local_directory', 'run_command.calls': [( ['/testbin/cpanm', '/srv/webapps/my_app/src/'], - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, (0, '', '',), # output rc, out, err )], 'changed': True, @@ -125,7 +125,7 @@ TEST_CASES = [ 'id': 'install_into_locallib_no_unit_testing', 'run_command.calls': [( ['/testbin/cpanm', '--notest', '--local-lib', '/srv/webapps/my_app/extlib', 'Dancer'], - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, (0, '', '',), # output rc, out, err )], 'changed': True, @@ -137,7 +137,7 @@ TEST_CASES = [ 'id': 'install_from_mirror', 'run_command.calls': [( ['/testbin/cpanm', '--mirror', 'http://cpan.cpantesters.org/', 'Dancer'], - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, (0, '', '',), # output rc, out, err )], 'changed': True, @@ -158,7 +158,7 @@ TEST_CASES = [ 'id': 'install_minversion_implicit', 'run_command.calls': [( ['/testbin/cpanm', 'Dancer~1.0'], - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, (0, '', '',), # output rc, out, err )], 'changed': True, @@ -170,7 +170,7 @@ TEST_CASES = [ 'id': 'install_minversion_explicit', 'run_command.calls': [( ['/testbin/cpanm', 'Dancer~1.5'], - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, (0, '', '',), # output rc, out, err )], 'changed': True, @@ -182,7 +182,7 @@ TEST_CASES = [ 'id': 'install_specific_version', 'run_command.calls': [( ['/testbin/cpanm', 'Dancer@1.7'], - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, (0, '', '',), # output rc, out, err )], 'changed': True, @@ -215,7 +215,7 @@ TEST_CASES = [ 'id': 'install_specific_version_from_git_url_explicit', 'run_command.calls': [( ['/testbin/cpanm', 'git://github.com/plack/Plack.git@1.7'], - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, (0, '', '',), # output rc, out, err )], 'changed': True, @@ -228,7 +228,7 @@ TEST_CASES = [ 'id': 'install_specific_version_from_git_url_implicit', 'run_command.calls': [( ['/testbin/cpanm', 'git://github.com/plack/Plack.git@2.5'], - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': True}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, (0, '', '',), # output rc, out, err )], 'changed': True, diff --git a/tests/unit/plugins/modules/system/test_xfconf.py b/tests/unit/plugins/modules/system/test_xfconf.py index dee387bd7d..d8c9a30a9a 100644 --- a/tests/unit/plugins/modules/system/test_xfconf.py +++ b/tests/unit/plugins/modules/system/test_xfconf.py @@ -49,7 +49,7 @@ TEST_CASES = [ # Calling of following command will be asserted ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/inactive_opacity'], # Was return code checked? - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, # Mock of returned code, stdout and stderr (0, '100\n', '',), ), @@ -69,7 +69,7 @@ TEST_CASES = [ # Calling of following command will be asserted ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/i_dont_exist'], # Was return code checked? - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, # Mock of returned code, stdout and stderr (1, '', 'Property "/general/i_dont_exist" does not exist on channel "xfwm4".\n',), ), @@ -89,7 +89,7 @@ TEST_CASES = [ # Calling of following command will be asserted ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/workspace_names'], # Was return code checked? - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, # Mock of returned code, stdout and stderr (0, 'Value is an array with 3 items:\n\nMain\nWork\nTmp\n', '',), ), @@ -109,7 +109,7 @@ TEST_CASES = [ # Calling of following command will be asserted ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/use_compositing'], # Was return code checked? - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, # Mock of returned code, stdout and stderr (0, 'true', '',), ), @@ -129,7 +129,7 @@ TEST_CASES = [ # Calling of following command will be asserted ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/use_compositing'], # Was return code checked? - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, # Mock of returned code, stdout and stderr (0, 'false', '',), ), @@ -155,7 +155,7 @@ TEST_CASES = [ # Calling of following command will be asserted ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/inactive_opacity'], # Was return code checked? - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, # Mock of returned code, stdout and stderr (0, '100\n', '',), ), @@ -164,7 +164,7 @@ TEST_CASES = [ ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/inactive_opacity', '--create', '--type', 'int', '--set', '90'], # Was return code checked? - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, # Mock of returned code, stdout and stderr (0, '', '',), ), @@ -190,7 +190,7 @@ TEST_CASES = [ # Calling of following command will be asserted ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/inactive_opacity'], # Was return code checked? - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, # Mock of returned code, stdout and stderr (0, '90\n', '',), ), @@ -199,7 +199,7 @@ TEST_CASES = [ ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/inactive_opacity', '--create', '--type', 'int', '--set', '90'], # Was return code checked? - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, # Mock of returned code, stdout and stderr (0, '', '',), ), @@ -225,7 +225,7 @@ TEST_CASES = [ # Calling of following command will be asserted ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/workspace_names'], # Was return code checked? - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, # Mock of returned code, stdout and stderr (0, 'Value is an array with 3 items:\n\nMain\nWork\nTmp\n', '',), ), @@ -235,7 +235,7 @@ TEST_CASES = [ '--create', '--force-array', '--type', 'string', '--set', 'A', '--type', 'string', '--set', 'B', '--type', 'string', '--set', 'C'], # Was return code checked? - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, # Mock of returned code, stdout and stderr (0, '', '',), ), @@ -261,7 +261,7 @@ TEST_CASES = [ # Calling of following command will be asserted ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/workspace_names'], # Was return code checked? - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, # Mock of returned code, stdout and stderr (0, 'Value is an array with 3 items:\n\nA\nB\nC\n', '',), ), @@ -271,7 +271,7 @@ TEST_CASES = [ '--create', '--force-array', '--type', 'string', '--set', 'A', '--type', 'string', '--set', 'B', '--type', 'string', '--set', 'C'], # Was return code checked? - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, # Mock of returned code, stdout and stderr (0, '', '',), ), @@ -295,7 +295,7 @@ TEST_CASES = [ # Calling of following command will be asserted ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/workspace_names'], # Was return code checked? - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, # Mock of returned code, stdout and stderr (0, 'Value is an array with 3 items:\n\nA\nB\nC\n', '',), ), @@ -304,7 +304,7 @@ TEST_CASES = [ ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/workspace_names', '--reset'], # Was return code checked? - {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False}, + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False}, # Mock of returned code, stdout and stderr (0, '', '',), ), From 1e34df7ca05c729b29dd27d45d1fa68b0bc87640 Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Mon, 7 Jun 2021 17:47:57 +0430 Subject: [PATCH 0358/3093] Add aminvakil to committers (#2739) --- commit-rights.md | 1 + 1 file changed, 1 insertion(+) diff --git a/commit-rights.md b/commit-rights.md index d10bea9af7..7aae8617fb 100644 --- a/commit-rights.md +++ b/commit-rights.md @@ -67,6 +67,7 @@ Individuals who have been asked to become a part of this group have generally be | Name | GitHub ID | IRC Nick | Other | | ------------------- | -------------------- | ------------------ | -------------------- | +| Amin Vakil | aminvakil | aminvakil | | | Andrew Klychkov | andersson007 | andersson007_ | | | Felix Fontein | felixfontein | felixfontein | | | John R Barker | gundalow | gundalow | | From 7c3f2ae4af4d3d16f6c9ef58d5eab499499ee7c9 Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Mon, 7 Jun 2021 18:57:51 +0430 Subject: [PATCH 0359/3093] Remove aminvakil from supershipit section as it is not needed anymore (#2743) --- .github/BOTMETA.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 74b53db418..6727373e85 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1,7 +1,7 @@ automerge: true files: plugins/: - supershipit: aminvakil russoz + supershipit: russoz changelogs/fragments/: support: community $actions: From 4c50f1add7e23b231afe6b80c8536ab4427b4005 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 7 Jun 2021 21:22:21 +0200 Subject: [PATCH 0360/3093] Re-enable flatpak_remote tests (#2747) * Automate test repo creation, re-enable flatpak_remote tests. * Linting. * Another try. --- .../targets/flatpak_remote/aliases | 1 - .../setup_flatpak_remote/create-repo.sh | 51 ++++++++++++++++++ .../setup_flatpak_remote/files/repo.tar.xz | Bin 15496 -> 5524 bytes 3 files changed, 51 insertions(+), 1 deletion(-) create mode 100755 tests/integration/targets/setup_flatpak_remote/create-repo.sh diff --git a/tests/integration/targets/flatpak_remote/aliases b/tests/integration/targets/flatpak_remote/aliases index 3623baa5c2..39291d435b 100644 --- a/tests/integration/targets/flatpak_remote/aliases +++ b/tests/integration/targets/flatpak_remote/aliases @@ -6,4 +6,3 @@ skip/osx skip/macos skip/rhel needs/root -disabled # FIXME diff --git a/tests/integration/targets/setup_flatpak_remote/create-repo.sh b/tests/integration/targets/setup_flatpak_remote/create-repo.sh new file mode 100755 index 0000000000..1b09bb7956 --- /dev/null +++ b/tests/integration/targets/setup_flatpak_remote/create-repo.sh @@ -0,0 +1,51 @@ +#!/usr/bin/env bash +set -eux + +flatpak install -y --system flathub org.freedesktop.Platform//1.6 org.freedesktop.Sdk//1.6 + +echo $'#!/bin/sh\necho hello world' > hello.sh + +export NUM=1 +flatpak build-init appdir$NUM com.dummy.App$NUM org.freedesktop.Sdk org.freedesktop.Platform 1.6; +flatpak build appdir$NUM mkdir /app/bin; +flatpak build appdir$NUM install --mode=750 hello.sh /app/bin; +flatpak build-finish --command=hello.sh appdir$NUM + +flatpak build-export repo appdir$NUM stable + +mkdir -p gpg +chmod 0700 gpg +gpg --homedir gpg --batch --passphrase '' --quick-gen-key test@dummy.com future-default default 10y + +KEY_ID=$(gpg --homedir=gpg --list-keys --with-colons test@dummy.com | grep fpr: | head -1 | cut -d ':' -f 10) + +gpg --homedir=gpg --export "${KEY_ID}" > dummy-repo.gpg + +BASE64_PUBLIC_KEY=$(base64 dummy-repo.gpg | tr -d '\n') + +cat > repo/com.dummy.App1.flatpakref < repo/dummy-repo.flatpakrepo <IAKH+ooF000E$*0e?f03iVu0001VFXf})@BbB4T>v>5N@un}Hi1L#y~)Z- z{84!W3f)Esb7jq5>fDNTXAtbJtFLh#1{z_@XdBbYO#I_6ejX}{k|qPV5*F#tV*(KZ z&8y&v@-_@%O$~^*VQ2&(ntw~=X4{`14ln0V8gAs7B)fP}6FAt~Kb*`}cin_F`PjKM z=gDE4I1`vmQzegt`_V-)uy0DRn8>u3#F{_S@Q?#j3Pf@d^@SuYQthQu*4bx}Wxgr^ zpzQnk7-!^ooWgD}Y<{Z)25({OuU|9(2RU`Q#t(Nms*UWhQUf?8R&lYuSf-byBX0K8 z^I}P z;jsPqKwt(94>NX^omDYN6iLjF8^%Fq74AggLtsy+E4;4W#LVx|OUJ_~rkBr=I9G`S z-`lF>zkaJh8c8uPIv7!1R#=VKn4$v5FVtOAT~e-|DG}P7pu?u zg2?4<^6J^x4!h7r^PhZef;GY0o&cgx`7rh$+7fCPFWMF3s{tZp9=q<`0M;3wj#_1g zy)AF_+45$2ZeN7&sq;gF7CAoR3TC{{A3kUkG`e@pmB_i|#|gx}R%AMZmCD*tQpM$A zmFmu|SZA0^w?;w2^J#7(!b+rZ$z`H1_w}(oKbr6?5#;6I*|C;sURHR-b#HK0GgHwl zWlO4wdEbX1D|5Dcj7=Qurtcf#d@UCH2{rv6(9o2ArZcO^C#+^h0?!I+spQj?8~88n z&E4e>R8^&N7+DMiqM6PD31Wv0giL;`XN??_@^TLV4hsB_WSOq_+c>H2$Dzqflv>9< zh9V#;-2)ZRNP$CY1HLx8Cx$ODMd$i>V~Lai-_=C*H-nB$WV7w4_|_RdZxY=@`)PU9 zGU-R&Fm5PR*l=QSUedJi+q~F%qp&wRE~3tnSu#;tV>1kSB#HCAD|;^tLzOsOObfGC zxl)v7r8Kf`JlpPMyfI81Mv@JKh+8vmq_9c@{#q=-N;#Y-+4)mJ77tr#mrvm(XFmSP zG`G5J9L|fhqonikigA&LzgIIzkD0JRwG!tEUQDZ}LZ|#5Ual6Xaun6M+(HDC&+jiB zxRa*KG2+)q*k=l9wu2EZW|NN;6w^HoI>w-V*1+BB&9amec?NGnkvXq>&?V<0S79xs zE);IWKN6$<=AHej%3g*(e+_xcmwPhy$}qM#E#@F{P)$5H9+(}YyEW0ZG;nQX00+iN zlwO}I{>mXc^=^Y1-~obpGtoH%Sg8VHbMnK%bcKXdHdNlomWIl)pzNw;A8lK?zqCL$ zNtth-_+Epfbrx87EX;Qr527&3oF#4}aE}OWga@2=Ygo`4h3Y^~>!X^knst8lFS%W5 z22|-1!ABSJyfe!9gP2T9@aCFHh_E-a`8#UE-nKN8K4Lw8PgF5GHLGhpl%DwU3GWAP zb)mCUGrJ1spZEsCe>(_RS+yylRnFv0&(HZPqRJiW{T9=KHLS>_%NoQ>AjypKmy>(K zStvuAI*Zm{SABkrITglicDPv7B=GyQ>NkIV*#NS+VqdSyXmh*i{Giej9qux%*V4!v z04}f5;K6g%G(3;;FrbnT{Vlj~6@tey6YDC&(V_B1$QFNs87L<@XGuRxb z-rW@&t@xrgkZjqx%OVI!>_M>rt9_fwcs-qXQM@YqO~2H@>&AFrl^o0=9KMF&+7hz8 zm9Z0~y=WvtQFC@A4HYGmx0;`V`j zTBirBrULceJufTw9S0oQrlQkQqL4U~0>5<_BHQxuNcybgxl+KdM;RrG>B_c1s)bSoOGy?7`dzpWg+m83c_Bfe}_G zs~{p8wWo+@hwfLuI(xH@T?B%SsJ~n@;DL)@45ldz@biq=qp>{HHsq0s`InRO9bUdOPHf1S(2GBdtU}U`lXj+^OaaJ0#?cIX zz`a_1oZoGHeOnV%cz?|}Ux!?f|BWlg`mh%HY&oR|SzSM*%ToXmZz^C-H2~|!G3&`d zCzie=S>qD)_I?rZJ#|kd6V&+zn{H`5&L{S%Bod&aajrBb!)rj#Xs~5!G zp(wBu%!MMS?AkiP;grvxR|8?)kmt33Ks3^UqR*k(HGaA1apho-VQ-xRYL`*9PsVQ* zgU=Tzp-Ab0@PL%xh?tv)VOvoR%C=YDQrO`Fh~-V^Gg1Am90nf<$hXh$Mj&A`=pj>y zBXH6u+B1j{O~Vxn)sMPc8YQ5@K?48f!TheWcJnpl#Kdl+zNEIal0C0Xxx*ay&BGTt zvJv?!ox`mJ2UfORJJr84*kW2)s;aizuM35WS7&vHCw{<)N>KpncOw%vC$e2I!T>HE z;hHcWk1BUPTsSh_eym-mFuSvZaS;o`j@#x|qz6gkPwEso`$pi&OqTtHJNMOyPYOY38xFB-B}fwBm;0_?4Ml@bxI6QfTp>DJr-^F%|W_ z$-Wlx_;e(f1)dO8QR&45;N*QG-c)lB-2)7qR9vsd*c(}Yp@Jf2nV38Y3W=}J^`9Xj z4^|H>om2DJ(AfpN96L<`CEV)e(`iQ>DemWjW z?#hsDws>z8($(!8fOwYzV|Q^~@ixxn#8TOMTj;{`7Ldq?z2;shLNYtuY?zsJ`8tJ>aXJ5j0S z95)Rb9qy4F`v;^O_Y)*9M+qOq=lX`2v?C+SZkuxNydCv@w$`OWJH43g%4!lk& zNEo2_0s;9TWv_KVzRWhB2Ntq-Xo$j9DB{AwI_{ac@FK$0Qjp|iDZC6+DAFbO-q&!W z9JWT6;p*bW#@Cmiceiq%R)QY|FOBs#3++h4b*o)4u*L^zDzolJbHB(k0dE3?6_-O% z7-;9MqjJs?r_{nm`Ve6AxTvebCYENAxyCW){#(NeH}OZa`;g1*+bVLLFzAHB|D-Bn z!&(K|XeU~^-&Y{a#(j+58(5j~ zLjy#sI4fG4xQ^X_kbH@F#sn}1!;X)2ykHi9Jc;`A34k%vE14LeTJF{G!4Ji4?=wR- z)zjB5K8bajhY%D+T|1S3fN#zt-?FPFOh}pCVrr6 zLTI^gc?vM+d%#YhHQbeZxy-NXZF;C!_oRTgHKSK$Q_i#ex>VZ#>|sc?EPYD?LI#1E zcN(_#74PgUZ)dY`{>3M{@8q@Kp?k1FwB^2l;-~(+A!BfIC4r|?y~l9}juyA2H!gXc zTrJt&h767*t{P?16$BU?g@U+X4XU622arWnyi9bWh1h~F4bjM%yWc*SR0g^_L0|Cx zry_hh3|iWe>UOV_PGS-w!U{Qou~SgKg3phkt06Pe&Z(lGvJ0%rrEzd8*_N{U-7w?m zGE%LF={|x#?(XU5_u{|zRAPuBYDdOfd9~rXcTN2OtliPE$`jdGDLDNHbzXBzGh
  • hitUvXK$sVWTuHQ4`ZdY6WW&L~ zYrr|UGRP5;Nk&K=A2@&o4^{3P!YbP$DR$8*fgz^$Sh1*c?`S(CY4dUUb9om`0^=(hr*Ce&t{Yz$ihlP(i21%ghcqYblmS!V)?*#g4GUc%yqbWw{q1EfU96 zdoUIg(;!t*YXS&P+ETGQ7(}uXNHx5qNdp#Hx)B2o6bgEIb#`ISwH203$fl`Qb1*w8`e`gB{_gynce{ znS8HH0?~yJro9En)?SFWZJC*b%Fvhh%OB7QjMOL9w*(-bFv?tLYogb}Jp6=hzY5rX z?7M`G$X;7JH)FIV&1&Brw(oot0Q^U?K2zJyz&+smc9-F6oS63gQp=36sY*D2xWZ>F zg7$w^{|SQy1o?Y9o%?D!Zix2U>s==f$i}meb-24lJQC*VpKu^3`Q#_GFkN(MW!Ybh z;=JF10DQ1ofCk~}{axu`@Q7TW6SP*i3jw6T*A{*zTwokFZi`oMbVj<_)}Xk36vi|x zKnsnf&pCxuvj;dc=59GwcagEJ9;;#uXs%TpKp+N?lF=RGED=EWet5#4$fiA|z_#p* zDDs0^=ISWtn=}DH7UbDn&`)$%x{t08%}FrW$b4G@9?{mEu~b~p4^r>ZL4+O<^Q zmWlBiQw+J1h|j%WUg;L-5wgiJ=nm)iEM>W06V*1yVI7rY^~0x~&!d=3nUl{gI4{MV zz{aoZ3`dP7q7{j+#B8hwedfY5r-n7`N8l4_12d1GeXQ!~GIK_)wTC0@6?g-b`B|lq2bG-zEloEM#*Y&L z>iQ{;vHn+)Q5IkmswrfxFf>0Hcf5c;#lq`U5DK5@QZ$xL_P)KksR+k449)S}RcT`3 zJ55QKuecH|_`LmTb$1nrdBjsKYJ>X?1=~Ldqaj@_k1A{!`qcnM9DxOzRjRZ$Gue$^ zKz)<LSF(d)Q0N`7h)2otJhEf+5ejV9Whz*;U%qDS zt!lPdMwT3{&pgHhh*}cnO$VoLYDLG;dkBFazn;HN^G=pPX|&& z`V(cM@h`Phy4B$m(G{apx$P@({_U4>E8Y>2+M)c?uO87t{wifMT1UloIoWeD)hB^= zFO2o1-z8`^O=PW1lyG$kt+EBo`AjbCs_N9QhRkW7K73AO-T6XVHPpVHeb~JhW0qKr zRkaP0XpQac_8d48VgHO4$-X_c9oM4h=WMmwCp|UYRHi0LK4F8HR3BA*>6ZsKZEFaZ zI7@6sqGB1x`6Wk^m6RgWy?l5DIu#0YJL&LVd*Ek_H_O;4Duo=^hH*OpHLstavsX5} za)|yUF{mUbR2do&rEK2-YWR4}6Mrn_q{3&RJ6g4PG_!pp#H3S$7x^y%nWeZw;Mwxy z2>Wa#Q((X*uxZKtlj?bxGlD>XMSo{^(n(q=)p#hSiN?stSFuvg!%px*vF-_oWE7_& z=^fo<7l_9=OvK$LGdGZO=<4tiGJ}tGYA4JSz{JLVGcm%62fajWhb+`0Ezbw+2uDZ_ z=`6RLzWT#dX3gKoYE$EXm_JK_!mwVwBN=!?H?4++G9*&csKyne*_VwXSq9#E1~s~F zMIi85_ukPN(T+_}kR2-9k#W*;r8n|SyDzP+b{?0njVMSb!wV%(ZkHTv2`vCv>5N@un}Hi1L#v;q3XE)^E@g)ZgXNF&=H-HakA{z?!OsX)+L=0K`5`#q1FO)| z@QO`e$oR3lw&+4zR2Fe{GDco>H`7iqBNOnV7teHb6Cx<1 z>&?~pmcoQ>3F1}g!1ago(bX6G(FrG%P8{pcIOF-xo_l?yTiKNM7as!Z8&PN*zkoZq zRfv_0BOT6xNqO-DT)?XgX3Hr_afav-8UCX=pDeZvpS|y&_CbTSHU3^Mqu{K#a#T6B zv6qsHt`nNV3v_vf;%pW}b!CM?r~+(D)v~F9oL?c;AfhZla*q}_51k=3T8I~`bTg%q zB8Kw8JEF&~hK1yEGLS)MA$U>hBkH(#$p@zHr(tIo4f4p(AUZr!-1T8dJ5)$~AOI4r zW9^M9Qbgeb_ObTL4>f^sT0QmORdL|eGO$f(AFS?hN}lrs!|di&@vdDFno_+LqVea_ zr24t2x`|&>OC*)UIpfkJ%B`zTXWW9(VXi_gpeU}!?f}HsU$nngar$hOm6pVc%&!J1 z9vRkQ8Reqem-8mN(8FcoeDU5o(r?#OxjYBWacRXpj@BCAm0`_+0xhih(lbAY0_>FrNoa!%Df448ZQtUTI1G zXP&^XUT(}}D4Bg&%o!w<3O;RX+K1vM*q>JNIkO#s>fbT0hOL>_+VvLdb@pRvfC~c0?)kd59 zQ73a#H&@Rp|1;IYrLP=M82GE~c3EK9#S)~Djz>cMMeKc_ti8U@N`4(bt+(`0K#$KfJ|F$RC*=F+UB9Zsl z0n2|(7&YcQrLmkoNj`IOMofpTqLLL4=%Az;>ZQ( zYe8(VMEZua0oT@ihvqxv9xyr1zV)G|ky;0D02OPp@;hNt(h2xA?o02Z7rP(F*SH>% zQ4AwTZU^hAa_i{ah31bJ#IXysyY zX@>R#q*el}uUGBnRDsxzPW?fn2*H}VhLGt6bVL!~I;<#U={?)k!3XVQJM2QTJ}>j_ zU%cL^u9hB{=(Nu{sfQ3Vk`+RE6-9m1AgK`Q&;#%X?R_k|IiFKkp2=h9J*e2GiBvbB8?K2D z1614QF6@zGK^exO1Tx?MKzw2)qZU{dbRUsctI>`t6_y;-k#zlzXs^VLmU9O^kCOT` z-djjy<~z1`sgr|vL*Q`}@RL3t6pJeLi1Et0Pt8?(v+gRg~a7DHw^sxW&V8sffy zM$03@D=5FP#{27Qs|dj39B1$2C!}G$BW^pqpqE<}xDax$wJ>%Wti4kahv0}u(G=Z5 z^AEH}i*F0OMCq7Q@M$xVocH%0m@ zlQA0~IcF16`Rz33DTpycuj+>*Z8!%dL&H207h ziRTl7xfSN?D!?6fGiDIJ`-}M{Z^=QF0hwtxuJbswYImvEb`^O>0`Ln(TJW#Szp>#T z3@RwV6K|SdpsdGYcJF&`E6<2$%UVqPmA<^&dF!$(@R=b-ZsH&VkBs^5Bn1_rmmf2Y zGE|JT>%^v~DwTc_v!MMm5r@b&8sc-fpE^nQssvZ$+NL_QK&d0XQ;^>Lc1$gki???{YOC z0+m_yqN54p5G3sQ=@5DU^K>FH7^x!+5m;DUzVyj`LDv*Me_97$P8k>! z_fI2iVgXe?5!Jt=c9*>zG0s~cU)BP+mc*D7@jX~u)tzlC3||n)E!*&Ez8rob2Te*+ zMUhZ2*}+Clxnwl`bj6z5q*=5^pIa;9dG{Yp%uLG)>~utH!dI5NFq#o5*|5n-%w38{<5cU$J;*F_3vz*s{x%%g zh{gF|46k^2Gu1!qdfT>lCrhhYk%Qn{rc>MQBp}SL*5iw0yeBwKKM1ffNYh~*l5Svl z#+4J76h@8_k|X75=IL31->W}0-v!th*Jb7wE;~B{r?N=7dwesU1p6|*^>Rr&0b7c^ zs8Dt}1edPcwzfkL>9*Q(mc)r_f39f#-xqn%F$iHaHJle3df?^cj!;4;!Z|(Z)PHN! zSXOMsvjI+t5|f?w99Xi{`N+4$t4ikr*2d5JvAmQB1+TNRhFXH|b_vdf233{!jb;sx zBD4x3>pWRQ&5LtkXMUDmk7EG@HDxJ42d1IB{In~H4l5}I&ssd`l7!Q9I4$dc znM}Q*I%DQ``Ipnj_Y^h!|7{|iAyy=5VI(4yL+_ldy)pnbZO5GXxlqdTeV-|!?mKBM z0f+AK*G$x!h4eipvk-r4gsl|F*vHsEiu&N0i6wtCJl@ik{Dh=N3FfNSA2 zGg4mmpM)X$DDQ>%gEY4gB+B&YG)sK#d4hS*av}5!ApJ;H7C6V|cuSs_rOgWCaV=t~ z#22B9k(a93vaA@QmD~A@#|MaPNWS30<=>Yn$(hzu=kF$qvNm?NO399rdp&anluXtO z0oSFyYz!ynhAWIFV{z+sFaB$6RGj)xJ0>$S6=A9}ND&-9--(;xbL~XD|I^G05Nssn z0QG)?#18%gRglqJL8E_3djb~Ky$3&*t_o#MXBu2`m^IQ_v7?bN14_Pe;@Lp+I-E;- z;~nMJIDSFn91WL4@-Gd6hAk}W2J&tZ>Qys-uN-U!%E8j&fN7>ynONqb>~4V4BdZ5* z%@5HDs*E~b(m0p zD#0A`ulh&`IY$E?eit<9_{-hKV-`gWEbs0@E33^#NTLugR9>mQxxFM_7^)J%$^R6o z?_M7A9D&i(c*+u-oH*as}OxEpu}Re90Jm~Ik4JF<*b`- zVlV*e;$-7?))>>%=0Q@5h7peKoa^z2rz!5h=D2K2X2%@*EypQByNl*b6o}4hFo9N9 z1(hR5dqsE^*oO<8Yt^iwucNLIVL=m_$Y>1{r-XM9lt%iE^hi4OTa(@`&8+BXnA|D~ zhB6J%@*=84I+t3&jh7o!|`Q&T0@`EfAdckB_L(B8u-yB}BsoHlL5GKDH z&It#hf9aAai%sp`w<#P zINaytsen+?N^`hHyw7d3giPZ#%%9<^_J{ho7PO zM?(FZ+_embuWRqUpcFzkYpf?CI2g~pmoWhBUNH)f8~+5&KUKM0t9fhh!ng{yRpnKS z)Nz)bH6VnJnaF&#BZ9@_8CK?dcgM~#54aTIV<%C!D7{}JbERouYhYR3iX-oj9j@Z%y+HBZ3$Y;*~Df0Yw8Aa6YQKkWmFgLv^1C|SD z9*CR^4RDxiL)u1&Q9p0kfgbzwJmZjbnRfzowtMOUzR6ry?KbJSYf2NbVL!GWtD5>x~R<VX`x9O(VS|DT;|;T-J-4VbSzW$d4%2+tblhJV>%q&@7ndFS%9kf7l^8L;Hxt3iiv}O^%^Mb}N8svo19=&%%hy*Scb+mZhrZNPRR|1Z!%=)a8TZo5M$l+$OIk>DNg5@T8(>jL-a& zRWsg<&V6B1arj+9$M|xX4Ac>ThMWVT>NAqjcs7QTBoP{5`A{TCEh2$+F4_?_@`NjxeEoWj36_{ z7)7U%pqrDd!)7cTTSbY9+z!WW=Lp-Gk3XnlIH6odYS0H5ynLZIlW*>DID?TkavR)x zfomB9Qk^my_G1bZ_eND*R1Jy}OOL-?m^1y&c1{bB%eE=9(>B1f6j~lrhTN119bAQc z*PS!l@AHTsM|;Gq^t9if5}2$cms|_KQCJNtMHHx=@smY0R$cs!rhT3bt8$b+Vz`4M zSmvX2cKl|j2a(@t{?+FHW?HU^OVO1H-Z$FM+ZX%>=XeX4h!t8@7czO&DQ$4sr`dy1bGh!c4Mh4?OcHbf?syRI4Ppw~&EH|{ z)L)gyiI{8OuGd0RRFBo{z7=w$9R;hjY)H;Mk^Y!!>Z=Ze{DZGNmy+_dLv4MUm5OTe z7)Md_54xnp?e%GEll{izT4EM#op(^O%njvp?=?M(Bi4sjpt&w3ZBxy=28GU!y;GKlUZIIwz zJ;6CSrhzKA2CfEd*Ft;jDM#%GIwgEoZXI=c*xo@^{^WtmKwg985;Bm7#kTw=ZG4r*?9S6n>^PHuU{?<1+pHq=jq#O&TcSVmdJ5vyG(7V5 zi$?tNiVn`M8lF2mn~pl+Elyl}>z=0fc7T|I@5{l64n_(W2B~~Irt%n`#F?|7{*y*+ z^Tgh8j-bcK4Z8@hcdnC?mF0r_w92T6KR zokNjEwz=)NVfqg%unEx(%Tc;Yl=Ofi*5QW9D9qU_6q_P^&Ilh06hhtLZKTKSYw@SP zu`K>&se;@?$nnG_z#HA8~>H(n`s47H;26MS^_Yw6@e!gt&VP!FJQe9K7gS zmKP6Z^(jpotq!kI;#wXHgpRdtVF< zaix0OMBhHsrg9cxwSy~KdAlSpJ^^~8>c`(X?cg;1L+;2Yk`aT07RnytdaOO(S7)mdxKAu(L9H_)Ma|#|5Rsf8mdt)z` z=txb^Oo{BPs-Vv+>MS1e$}%0M zoYF3N;~EWaak@txO$PqrnXD1frwP821m+Z*faKMqK>rI#^pd z=2RVDC(iVqPJq;(xF=jm!T@6T$(n&`yw@t zQwA-skbI56J;@Ec0wrle`ChI$e34)QcDPXW4tp#3eJ~uZ|JqkWK3{|Wxe*NRkIJ|i z-K!BceOoix%Hcn1kUTOL+v*c{INnNhkEg`XKJoCR&PWS?UeU2(pU-oq!6Fck+LN8)x;2} zK*g@NE1D)$SH^uj_}8%%@}{`FCIMmCA0h1%CF6e_??=UxhO+&pY6_0msRfF}QV(xA z)X2mzy!j^%h=+Gy8SrKJt&k9#GLhEYd$f)_p%3|RQ^83~*9!Jyo7?T+;*a4k;_^P9 z_rdQcu&M~2tD$uM>(P6EGlA zYQDF}xaw4@zdjXR6n~4>lGxXr=jgFm9q0jpsrGSlOK}p{G4q5QLm2JMnf0=K<>~tc zbTY_%<+HNA!_43b)aI^T^Fg9X1krde3Lf(oW#Kyq+v{$r%tZr;X>f}URnUE( zOm9G~9>KBkQG@JVV0fClr%wiBNX6-L2a#s1kh4mz(T^e%rOuXTi))yBHhFOPF~eb@ zs%kt}OTwxnQdHeTK75RrvC&mMU7>7d4!GZ)28ou(6)FbymfiSGEQoNO(Ff7qRPsAY z_v5b!UQDG$qLra!>+D=NMk@<+y=ceh+!VFMulvk04OY|RIl5`W`^Y0RgJ@VX0i>sRFR=pKe- zdg$v5PnXNux99@B34^w3V9Frhc%zv|2&6LksL*WeUN?Om2-G}St~9A4h%-$Ybr(On z6k3?jxF9W5JuOd;!dxArme&F&0kJJcLUR;jtVYhnl~Sm*9&J4Bpy<{C^d5!On^@BX zmLo7$#M$_3v!6phsm$`pe`1)J{=_0t`U3hF+9j@sHH|FD;YC-A83IarOh zPCz@FfjaP?&JdnfyX>%8o?;b4C7y-lE*u%!`y?4$Mg?nt7PRuy+9nRSs`pu=H6&FN z^^JT@J2ls$8*7k&td0oVtA-04v_PA*cRxL|uUVQ>A`5a+I2%j*d}5Za*+{OC9SPO`X_pJ>N8ZWct7vq}I>>CY`U|Uke2RcwkmhHUCQSsTdP# zvINmvtaxmP{8ONPiEkoDtvB`sQHY_HUNi_16)--TnKFG~t$|UABtM(0W4WRB69yv2 zrEg!zNfLG|tJH^S0A+|lg>UIG`=tKx`Hmg9X+y5d?(FbkC)=TUH z*si4x2w?f|1xzXqX^rg#hfu=%w37Nq@`wI?p~GxVYgk`}9JA6;TVFJSKPj2NANt>s zXQf4rSOqrdUsyq%U!R$CzQqN~`E<2Ruey-??(Dux1mw!a$^=dcSms7RFkr4~qK5q( zEELR+tUvbVxxL68=Q9h7yCCqYU63%UJJp8w8QBa0TM(vVkz{0SP0~45cY6uC8U|FU zm{(!!;8HT}6jd7d)JaQIjjD3L(0W!6hCT`G$3IO@^>-gC$nmh-WaPgxaSefP%v7(T zk(!L-YlONJ`R#7EwChKIWgQYJ+$wsqSWLU-%H5yViyGF;A zypPobv0+&($Wv=C;j$}2uQ`qK%a-EdsY7lWB3)FFo=_m~5>YYwxKh>!kTJH=6t<*y zQnlYTd7%*}>>WM3?LP>&x32pNteIP86QHv=2R>Ha^ZrdjgQa5{Jdqksgx+uez1bvCq~xV^#& zPnSuI`tfh4+kua!++u?5X-=mDUmL0 zY};2K$O&WeC5$G>A}D-QGv3f3oS++uXVlGGI9+Nu?d9eCXcfyy9BWytT*kdz3wfBIzEF>Ip4lho zlzjX?2)2zhephxrn82>esM-D25H)_h%%y(&W|YP~`>9S~gKt2$r<`$pgx)!`oUA~` zryMFD+Uu>ORffTW;Ah|#Db9}DZ>3t$JyKboSjUj}8~^2on!SEvj>rSIFd?$bt>zDP zeX1f%=HS9xvU%Tlu0<eVdz3{liS zn8le9xs!cRx|1EJX3bLGp}Y4?*!`9ErL= z#%D1|dtfuAx6E$nrvtM6)kIc#sXmnnN?Qd=1XX*r*aD#b2{04)4O$fmR4&3=$^k50 zS|_oK}~3Lhzb-^|_TG>;i3i zw#-w1Avf&Jd9GM1MtSy)`_!-aSGPE27hi@PX|_-(V`5L^`cglM{OsCGKTinEXRAv< z#ef98ReAd9RpwSUGpHXCdmz9a*rk?7$!GMCfK{esQ0^Dmd0*0YAhR+=EgsGT2`skT zbEeB6q>e=CFLK7WnhdDpIWhJq(tgDH{Nu!ZIQUa9~+8W1fNeWDl;pJEz zVRD3X(MUrp2^j6pfOss4@b&D&3IKAZCTB-XL z+n0N?ppOQ{uV+>n3am3;NA~B#RQkI&aMGT;s4b@+_-R@1F+(8iC(bl>LVY4+xho=u^AAxbxlUzNI za3dblAmnBdjh=`?)a4eHEM7YBK3tO41^6$mShn`UH(S#rj!>YyNl z4O&+Bgy~IBW`fbF4Py$|IfVxa}_kAvH#2kmk|ZBsI3Fn1LL|M zGr~9EA>eCJFq#3xtKV@#Dd%F-LDb)EzDrJOf>v2k<;DQxfZD8Xk;izi^91u5njzh! zM+C449$8v^eL-pDP+5Yau#fd>kD$na{WbC$b&Ksv`B0CA2qCS6wVCt3+}#@H_J7g_;X^8ggqQV?uwY z?!vx~v(DxE09vXAqOou_LuYf-cP6bH7-t1QUDJPrzii~jKJakfb|Iyj$T8k~!Mea^)G>Am*rK0$TH3`AZ4cY3`l*DP#^}0p&TG0FFj-QodOA^FRePlyHs#9 z6}jy8!~4eun16|-b7>(b1k);v=RU#o5_kUVn=u1S#F6Mr7jX*%tjmu7+*@gvbJYbgw+twD~?<@_Xu43rN0SH`dKZ7s&9$9GryQIK?REUdMD92B;h6JJl+Zv6Y`>iA?l6liH zn2Qj9ED0DfA+yO25FicFEu>c$An@LWCzLP2Gd;Bis`W;l$W``!_DQ>FVi=g`mK(($ zx@~ADf5hj6b!QcMf8}3mi9ac4MZ3;LlQfYd<~@M_Pt*Vw;oxJwXW62p%XTa)?)rFk zad77g5h|=nfWd$h9KBU?_>l%k%ebdKmJs3$?>LA>Mi8Z#{-kN*e4x^F{Kn2j3t$a< zevNkjt!}m;L#%BJsX8I+3y}nfLV`pxNRF-)h0lKs5GUvOA?lylO%V;$mb6 z1pdt-E#{j#RxLlmIG9Z#WiO^}^KkbLo3JB7(WQACux2y<(GP3JO0aaeK(7c0tk0Ey z!e;Gs^pt(Fgiiv~4euikh%K2K4vTdXKvAr#a!kLpcxVr8I^e*k>wg4wSFYEb5)!g|1QSt30y-}AFB+-AW12f@S0oU4Q=N>BI3Wp+REKWNxaWC#@|>5(OiRM$YJ z+5X+bE8cS@(24P*-U<_6T;?usBxZ;WrFkVk2 zUR^=W21!OszOd^GR51(nFL@9|vLJgJ-pk%|40Rn`H(LX7GSV|IQrqrN_xbAsg-@Zl z2+-tUiHCNG)w#*vtF#Qs?E)hxOi?xAovpCR|USuysIS1>igYxGG zQIz7u(Xt9y^0KhiiL6g1%#$BvuweNgKV?yS5{njxYYgfk57ZdEkLPFjJDL}T4EH+& za{Hl`+6P=F7ZFawI|spOZj9T=WD*_Md-9Q# z_BZ{1#aZ-R4qvwb)VXMPRT#1{!(C~HzgI+WiS)u|{kq40RB2UZ%ZnDQ4Kl<<0ks}o3>B1?pr=sju zg8~b8anYIol87Rh8G_V&zPHpM3|rhE zbevddhxmi{uF8IO`bflW)lMT!ZzvW@8-lAzh)bMU_w+JYEE6jUQH20znRs?2A;N?) zz~Y>9Q)_@6KtYT>GDdn2QfD`rA!VLKr463EXjB!D-}^b6y&t}8{!knko~Y7icCwt3 zSdsnk@v`56$D|eZzOx|5i6Uy89a2MSf4eJFC9wV#rGJkI2iG6Vub zRg)>$Gx)3#0iMSu`AaB|RP%E2+#o{oyU@R}EnjpVaNmEN$>c^3u4X>jGLk20m)WNa zoB(g;C2O3jlEy*vqU6>3GINj4&8xPIrt8V<$v=IVp0tv+?Q}-t{iF46>mD5EA6z8P8-$(Vf&DwWers=`4Ly<0xEDUnV?cD45E#4Yt*V5kpxL) zW+k|J!PZ99Ww;01(R;n{MqbiTpb-1a5d^DFo)lbqI?lEm-H#Mt?L|^sy<5RvUFnCn zI^9yU~`A z!C{&IYk@oj2Z?_b`yc_5a3kQT--(l-EH~sqNM9NiriyCIBPxn3$=MZIV!P6LBT)J7 zuE!c0VAb9_iquO-w!lwoW3e)kUg1IjIWeaLxm{pm!a&Qq!gDuKg}%VQS}oS&^s$_+ zWhJDx>PThuE$LyLx8mQ*{TsL1o4569O;+H15w9xdAhsOa6q{plu}MY4U#;+e%^ef9 za7NCH%G*h_1Jo^d$ri7tq%7#+ZnZEl0f2q3kPs}9e0u2=iT`Sq11++KDuh6`QtLav)MTv-s;ADAkIa;;Jv0MPJOy@CAjS z;j5-TMqbgaojyu4l!!r5nL_^aE3C?Ow2 zgF8s6zs$}bm(u?b!iuwj_#PHFuuDuU6fJbTRHZJ);)~ivr=Z==xax$cWr?MWjjMC2 z=p$D5BvJbg6|5xX`G$UsT5NQs1Z@Lhwuno_SMDZtAAX6; z462%$#v=)j*u~^Ne~*>CwwR~Sh?rt(HVk6z)BM?QKBSuP<%`oc3y{y(T&$eRB>`qW z7QFD>GTW7okA8D)Fw}uD*{IV=eWvCK1efE zlP5cFN7y$WGV$pj)lzI{a@V~ZT<`}Ni9KD?dF}SzB;*O=iJt+`D_vO{=w$aRHtGk$ zoeeYiEC}XMAV$8uh)EmlB)#V9RypW0uJdGEwlPLC3B9Xj0=$B)dM6l4ebmFD^X-lM zBkvA0!=kB{rLe2b3@!Lwy74Dn(?aip(i)?w0U~j9QSe2yg;{PT1{nkmv|BmWi%)U`QamteRv=9(IaNbei~uZ*R$MZ1B}r z^BGWkduxU}k(|7*#4^QT8Pn@;o{Av;=dwK5O_=g9#9My6z9UD41@*`X#l8ITw%Soe z$lZgSV9ZDFsZ}kSqaelMnI< z@F}HoP{69v(?(Yyc#hT(di154uTj#~k_)8vgN@pxR@2Qh77mylz4>vkvDdIHNbijc zgZxL~R=JTv2|lzdEWp@a53Ys^#c&bfb`}}02S$?ndv7q{>hZnYgaMS z$NxERzV^&-d*kMYH=vo=BqUeO9E{|JgR>1TteV9V#e>(WnCmf~Mz}Ff_^#N4dZn0l zkAoBul)*~nQAns7IinCyBaSkEV%RIeF(`MMe<>TcN_7%=!it|D;8IIy38_nH?#L;M ztG^w$9YqeP(;1Q&L$ucXg2$s|!I=@DhV}P9JxuBhpy2_Np`@2bD5GEX)VX%F%G=k&@V(#e0^FEtpd7gx3}6CQS%6^SSFeB}bK#&< zxhffwFQ9=}f2#6|sl|*LBv;>Ly{jMzZeeln@s5L>&s?o=@>|^zGL-NmxR#mDW^S>w z2@lx5$RZ9agL|9FoA0a$3!Fu&x!w+1SOC?=*pbFAhBppZsytTW#WPy6NJ|c6`bfcE zURH*>4>jvKNXl^x3^HDCoXySzAY*ucZ=+D<@vN`n)Y zA;6{4p?+=h05#Qq+RgzX)vH2ZZnFPV5!-=71{l4vlkr~_!a&FQu_KE(Y$?Z#jMw^9Yx}xoDoJ#IjtJt}6Kt$m2_t>9 zejNxZs2Q;dIrJ-y+NVrHkY+5PrN(Bnl!{mZ2DPsI)$_mpc(i%U`0GfB0iUaungn($ z0*Sly7hY`AwMN5W27}}&cRGjVdSp!7^^wl5auMIyDpr00^F8IVcCH(Q$Gb0rA$=(@ z|KXAXS8C!_;%*;Fq0J*2#hldLPyA&V0NRH}cH@O;>3(Bz@yJyp-?{X6m~^q$VYoH? zsuD+J`p(&PA{(+7Gk3bRvKp~UYh#$deJcL4Pt_=^NXE0j;1w~D5iG1tkISO*(s(0I zj|?zX1oOjNFp2oN@Vi#^6R#)s_s3Wx1$@eadQ&BI=4|Es25_~MNB~{!{<0|5|GYaV zDZ|NYlcj9xdfD`|L;<7c?CKYbNxbHB3!q`%)+q!*el&NeK#WEn(P0C;HXcTfEF}as zwhFBji7z4rgSfKPmf>INuf+whMu%o~W{DSejj36&2cuJ>-;n_wwtTG!=tu$$TiL;> zv^V-Bj~1;@t72AFj(eb;_CD-PvKmTN2rRD}G_W)A;TbK9v&U97=a8SGlz#%Ka1j5V z!y@dktdDSxxQW$%ld(A&fiPP`33gMXsovDZLBo=#aQrZqf_t@N)_GJ=Ys^Khh-K7R z&+luI=q58mn)j!w#Mq4$Fr9scxVT7yeRWn!AISNSAq~i z4g4T|oFK)Dv@?!nx1bcmUU<{#7>G?}bX0=ag*1|XyzmW$kC?W25c|V{>OtI~l+vVo zgRt>6t8$krCp_pXve_W4!aomrr{4b7svzfH5ec#>Vgu&RC`}GhaVdKbo|edVmTi}* zWBjdcw?nBE=4VvD)LPWYDRK+@O=~N0A$@1poib0-U#mQtnG6D>!YlD)CNS|Ar5<>S z8g7y)y(PR7cyM@rlYtp4R)Og-Kpt0qia1eR?(%v|cw}eP)b=GOn+c*G!kR8FZHZY~ zVF?!)PrybPU*4rWb^ahS)4;XJz2Hhf?sKc?`W?Z{DwN|_4-o;nfE_Vdz&zgj_CFVP zeI2~^kgioiws9!p$s!UGB*Jka0J_%jhte!q{Qx{wk`GB@-;(Kn`Js+~2#RI&#hBJ4 zX-Ex%y1Ghx?I!$sWtmu29zh2AL{tq$O;OpNQ8)aKgr-0`(jpvtx&tw*x*#F_wv9jQ z`Bz%D;A-c{IS??GaTR20j7J{VKXE!#0000}Y7lFp&~bbK0pfUos0aYD^^l{n#Ao{g K000001X)^a`4u++ From 94a53adff1da71b253536e843bbe20b6d7a16c76 Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Tue, 8 Jun 2021 10:53:32 +0430 Subject: [PATCH 0361/3093] zypper_repository: fix idempotency on adding repo with releasever and basearch variables (#2722) * zypper_repository: Check idempotency on adding repo with releasever * Name required when adding non-repo files. * Initial try to fix releasever * Replace re.sub with .replace * name releaseverrepo releaseverrepo * Change to ansible_distribution_version for removing repo * improve asserts format * add changelog * Fix changelog formatting Co-authored-by: Felix Fontein * improve command used for retrieving releasever variable Co-authored-by: Felix Fontein * add basearch replace * Add basearch to changelog fragment * Check for releasever and basearch only when they are there Co-authored-by: Felix Fontein --- ...potency_on_adding_repo_with_releasever.yml | 5 ++ .../modules/packaging/os/zypper_repository.py | 14 ++++- .../tasks/zypper_repository.yml | 58 +++++++++++++++++++ 3 files changed, 75 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/2722-zypper_repository-fix_idempotency_on_adding_repo_with_releasever.yml diff --git a/changelogs/fragments/2722-zypper_repository-fix_idempotency_on_adding_repo_with_releasever.yml b/changelogs/fragments/2722-zypper_repository-fix_idempotency_on_adding_repo_with_releasever.yml new file mode 100644 index 0000000000..faada2e9bf --- /dev/null +++ b/changelogs/fragments/2722-zypper_repository-fix_idempotency_on_adding_repo_with_releasever.yml @@ -0,0 +1,5 @@ +--- +bugfixes: + - zypper_repository - fix idempotency on adding repository with + ``$releasever`` and ``$basearch`` variables + (https://github.com/ansible-collections/community.general/issues/1985). diff --git a/plugins/modules/packaging/os/zypper_repository.py b/plugins/modules/packaging/os/zypper_repository.py index f1d85376f5..608675528d 100644 --- a/plugins/modules/packaging/os/zypper_repository.py +++ b/plugins/modules/packaging/os/zypper_repository.py @@ -175,7 +175,7 @@ def _parse_repos(module): module.fail_json(msg='Failed to execute "%s"' % " ".join(cmd), rc=rc, stdout=stdout, stderr=stderr) -def _repo_changes(realrepo, repocmp): +def _repo_changes(module, realrepo, repocmp): "Check whether the 2 given repos have different settings." for k in repocmp: if repocmp[k] and k not in realrepo: @@ -186,6 +186,16 @@ def _repo_changes(realrepo, repocmp): valold = str(repocmp[k] or "") valnew = v or "" if k == "url": + if '$releasever' in valold or '$releasever' in valnew: + cmd = ['rpm', '-q', '--qf', '%{version}', '-f', '/etc/os-release'] + rc, stdout, stderr = module.run_command(cmd, check_rc=True) + valnew = valnew.replace('$releasever', stdout) + valold = valold.replace('$releasever', stdout) + if '$basearch' in valold or '$basearch' in valnew: + cmd = ['rpm', '-q', '--qf', '%{arch}', '-f', '/etc/os-release'] + rc, stdout, stderr = module.run_command(cmd, check_rc=True) + valnew = valnew.replace('$basearch', stdout) + valold = valold.replace('$basearch', stdout) valold, valnew = valold.rstrip("/"), valnew.rstrip("/") if valold != valnew: return True @@ -215,7 +225,7 @@ def repo_exists(module, repodata, overwrite_multiple): return (False, False, None) elif len(repos) == 1: # Found an existing repo, look for changes - has_changes = _repo_changes(repos[0], repodata) + has_changes = _repo_changes(module, repos[0], repodata) return (True, has_changes, repos) elif len(repos) >= 2: if overwrite_multiple: diff --git a/tests/integration/targets/zypper_repository/tasks/zypper_repository.yml b/tests/integration/targets/zypper_repository/tasks/zypper_repository.yml index 0290fa4da2..4490ddca7d 100644 --- a/tests/integration/targets/zypper_repository/tasks/zypper_repository.yml +++ b/tests/integration/targets/zypper_repository/tasks/zypper_repository.yml @@ -125,3 +125,61 @@ priority: 100 auto_import_keys: true state: "present" + +- name: add a repo by releasever + community.general.zypper_repository: + name: releaseverrepo + repo: http://download.opensuse.org/repositories/devel:/languages:/ruby/openSUSE_Leap_$releasever/ + state: present + register: add_repo + +- name: add a repo by releasever again + community.general.zypper_repository: + name: releaseverrepo + repo: http://download.opensuse.org/repositories/devel:/languages:/ruby/openSUSE_Leap_$releasever/ + state: present + register: add_repo_again + +- assert: + that: + - add_repo is changed + - add_repo_again is not changed + +- name: remove added repo + community.general.zypper_repository: + repo: http://download.opensuse.org/repositories/devel:/languages:/ruby/openSUSE_Leap_{{ ansible_distribution_version }}/ + state: absent + register: remove_repo + +- assert: + that: + - remove_repo is changed + +- name: add a repo by basearch + community.general.zypper_repository: + name: basearchrepo + repo: https://packagecloud.io/netdata/netdata/opensuse/13.2/$basearch + state: present + register: add_repo + +- name: add a repo by basearch again + community.general.zypper_repository: + name: basearchrepo + repo: https://packagecloud.io/netdata/netdata/opensuse/13.2/$basearch + state: present + register: add_repo_again + +- assert: + that: + - add_repo is changed + - add_repo_again is not changed + +- name: remove added repo + community.general.zypper_repository: + repo: https://packagecloud.io/netdata/netdata/opensuse/13.2/x86_64 + state: absent + register: remove_repo + +- assert: + that: + - remove_repo is changed From bb37b67166a8c80efca92e608f397e4cd820eb5e Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 8 Jun 2021 08:46:20 +0200 Subject: [PATCH 0362/3093] flatpak: add tests in CI, add no_dependencies parameter (#2751) * Similar version restrictions than flatpak_remote tests. * ... * Try to work around missing dependencies. * Revert "Try to work around missing dependencies." This reverts commit 66a4e385668d0212e1150dcfb743478cf5aa042e. * Add changelog. * App8 -> App2; make sure that there are two apps App1 and App2. * Fix forgotten variabe. * Remove test notices. * Seems like flatpak no longer supports file:// URLs. The tests would need to be rewritten to offer the URL via http:// instead. * Try local HTTP server for URL tests. * ... * Lint, add status check. * Add boilerplate. * Add 'ps aux'. * Surrender to -f. * Work around apparent flatpak bug. * Fix YAML. * Improve condition. * Make sure test reruns behave better. --- .../2751-flatpak-no_dependencies.yml | 2 + plugins/modules/packaging/os/flatpak.py | 55 ++++++------- .../modules/packaging/os/flatpak_remote.py | 21 ----- tests/integration/targets/flatpak/aliases | 3 +- .../targets/flatpak/files/serve.py | 65 +++++++++++++++ .../integration/targets/flatpak/meta/main.yml | 1 + .../targets/flatpak/tasks/check_mode.yml | 39 +++++---- .../targets/flatpak/tasks/main.yml | 21 ++++- .../targets/flatpak/tasks/setup.yml | 44 +++++++--- .../targets/flatpak/tasks/test.yml | 76 ++++++++++++------ .../setup_flatpak_remote/create-repo.sh | 68 +++++++++------- .../setup_flatpak_remote/files/repo.tar.xz | Bin 5524 -> 6436 bytes 12 files changed, 255 insertions(+), 140 deletions(-) create mode 100644 changelogs/fragments/2751-flatpak-no_dependencies.yml create mode 100644 tests/integration/targets/flatpak/files/serve.py diff --git a/changelogs/fragments/2751-flatpak-no_dependencies.yml b/changelogs/fragments/2751-flatpak-no_dependencies.yml new file mode 100644 index 0000000000..a07ead96da --- /dev/null +++ b/changelogs/fragments/2751-flatpak-no_dependencies.yml @@ -0,0 +1,2 @@ +minor_changes: +- "flatpak - add ``no_dependencies`` parameter (https://github.com/ansible/ansible/pull/55452, https://github.com/ansible-collections/community.general/pull/2751)." diff --git a/plugins/modules/packaging/os/flatpak.py b/plugins/modules/packaging/os/flatpak.py index 1be1a72243..4a9e214fde 100644 --- a/plugins/modules/packaging/os/flatpak.py +++ b/plugins/modules/packaging/os/flatpak.py @@ -6,27 +6,6 @@ # Copyright: (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# ATTENTION CONTRIBUTORS! -# -# TL;DR: Run this module's integration tests manually before opening a pull request -# -# Long explanation: -# The integration tests for this module are currently NOT run on the Ansible project's continuous -# delivery pipeline. So please: When you make changes to this module, make sure that you run the -# included integration tests manually for both Python 2 and Python 3: -# -# Python 2: -# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 2.7 flatpak -# Python 3: -# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 3.6 flatpak -# -# Because of external dependencies, the current integration tests are somewhat too slow and brittle -# to be included right now. I have plans to rewrite the integration tests based on a local flatpak -# repository so that they can be included into the normal CI pipeline. -# //oolongbrothers - - from __future__ import (absolute_import, division, print_function) __metaclass__ = type @@ -60,18 +39,28 @@ options: name: description: - The name of the flatpak to manage. - - When used with I(state=present), I(name) can be specified as an C(http(s)) URL to a + - When used with I(state=present), I(name) can be specified as a URL to a C(flatpakref) file or the unique reverse DNS name that identifies a flatpak. + - Both C(https://) and C(http://) URLs are supported. - When supplying a reverse DNS name, you can use the I(remote) option to specify on what remote to look for the flatpak. An example for a reverse DNS name is C(org.gnome.gedit). - When used with I(state=absent), it is recommended to specify the name in the reverse DNS format. - - When supplying an C(http(s)) URL with I(state=absent), the module will try to match the + - When supplying a URL with I(state=absent), the module will try to match the installed flatpak based on the name of the flatpakref to remove it. However, there is no guarantee that the names of the flatpakref file and the reverse DNS name of the installed flatpak do match. type: str required: true + no_dependencies: + description: + - If installing runtime dependencies should be omitted or not + - This parameter is primarily implemented for integration testing this module. + There might however be some use cases where you would want to have this, like when you are + packaging your own flatpaks. + type: bool + default: false + version_added: 3.2.0 remote: description: - The flatpak remote (repository) to install the flatpak from. @@ -94,10 +83,11 @@ EXAMPLES = r''' name: https://s3.amazonaws.com/alexlarsson/spotify-repo/spotify.flatpakref state: present -- name: Install the gedit flatpak package +- name: Install the gedit flatpak package without dependencies (not recommended) community.general.flatpak: name: https://git.gnome.org/browse/gnome-apps-nightly/plain/gedit.flatpakref state: present + no_dependencies: true - name: Install the gedit package from flathub for current user community.general.flatpak: @@ -153,18 +143,21 @@ from ansible.module_utils.basic import AnsibleModule OUTDATED_FLATPAK_VERSION_ERROR_MESSAGE = "Unknown option --columns=application" -def install_flat(module, binary, remote, name, method): +def install_flat(module, binary, remote, name, method, no_dependencies): """Add a new flatpak.""" global result flatpak_version = _flatpak_version(module, binary) + command = [binary, "install", "--{0}".format(method)] if StrictVersion(flatpak_version) < StrictVersion('1.1.3'): - noninteractive_arg = "-y" + command += ["-y"] else: - noninteractive_arg = "--noninteractive" + command += ["--noninteractive"] + if no_dependencies: + command += ["--no-deps"] if name.startswith('http://') or name.startswith('https://'): - command = [binary, "install", "--{0}".format(method), noninteractive_arg, name] + command += [name] else: - command = [binary, "install", "--{0}".format(method), noninteractive_arg, remote, name] + command += [remote, name] _flatpak_command(module, module.check_mode, command) result['changed'] = True @@ -279,6 +272,7 @@ def main(): choices=['user', 'system']), state=dict(type='str', default='present', choices=['absent', 'present']), + no_dependencies=dict(type='bool', default=False), executable=dict(type='path', default='flatpak') ), supports_check_mode=True, @@ -287,6 +281,7 @@ def main(): name = module.params['name'] state = module.params['state'] remote = module.params['remote'] + no_dependencies = module.params['no_dependencies'] method = module.params['method'] executable = module.params['executable'] binary = module.get_bin_path(executable, None) @@ -301,7 +296,7 @@ def main(): module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result) if state == 'present' and not flatpak_exists(module, binary, name, method): - install_flat(module, binary, remote, name, method) + install_flat(module, binary, remote, name, method, no_dependencies) elif state == 'absent' and flatpak_exists(module, binary, name, method): uninstall_flat(module, binary, name, method) diff --git a/plugins/modules/packaging/os/flatpak_remote.py b/plugins/modules/packaging/os/flatpak_remote.py index dbb211c2fb..a7767621d7 100644 --- a/plugins/modules/packaging/os/flatpak_remote.py +++ b/plugins/modules/packaging/os/flatpak_remote.py @@ -6,27 +6,6 @@ # Copyright: (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -# ATTENTION CONTRIBUTORS! -# -# TL;DR: Run this module's integration tests manually before opening a pull request -# -# Long explanation: -# The integration tests for this module are currently NOT run on the Ansible project's continuous -# delivery pipeline. So please: When you make changes to this module, make sure that you run the -# included integration tests manually for both Python 2 and Python 3: -# -# Python 2: -# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 2.7 flatpak_remote -# Python 3: -# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 3.6 flatpak_remote -# -# Because of external dependencies, the current integration tests are somewhat too slow and brittle -# to be included right now. I have plans to rewrite the integration tests based on a local flatpak -# repository so that they can be included into the normal CI pipeline. -# //oolongbrothers - - from __future__ import (absolute_import, division, print_function) __metaclass__ = type diff --git a/tests/integration/targets/flatpak/aliases b/tests/integration/targets/flatpak/aliases index 59e306f8b4..39291d435b 100644 --- a/tests/integration/targets/flatpak/aliases +++ b/tests/integration/targets/flatpak/aliases @@ -1,4 +1,4 @@ -unsupported +shippable/posix/group3 destructive skip/aix skip/freebsd @@ -6,4 +6,3 @@ skip/osx skip/macos skip/rhel needs/root -needs/privileged diff --git a/tests/integration/targets/flatpak/files/serve.py b/tests/integration/targets/flatpak/files/serve.py new file mode 100644 index 0000000000..d9ca2d17a5 --- /dev/null +++ b/tests/integration/targets/flatpak/files/serve.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import posixpath +import sys + +try: + from http.server import SimpleHTTPRequestHandler, HTTPServer + from urllib.parse import unquote +except ImportError: + from SimpleHTTPServer import SimpleHTTPRequestHandler + from BaseHTTPServer import HTTPServer + from urllib import unquote + + +# Argument parsing +if len(sys.argv) != 4: + print('Syntax: {0} '.format(sys.argv[0])) + sys.exit(-1) + +HOST, PORT, PATH = sys.argv[1:4] +PORT = int(PORT) + + +# The HTTP request handler +class Handler(SimpleHTTPRequestHandler): + def translate_path(self, path): + # Modified from Python 3.6's version of SimpleHTTPRequestHandler + # to support using another base directory than CWD. + + # abandon query parameters + path = path.split('?', 1)[0] + path = path.split('#', 1)[0] + # Don't forget explicit trailing slash when normalizing. Issue17324 + trailing_slash = path.rstrip().endswith('/') + try: + path = unquote(path, errors='surrogatepass') + except (UnicodeDecodeError, TypeError) as exc: + path = unquote(path) + path = posixpath.normpath(path) + words = path.split('/') + words = filter(None, words) + path = PATH + for word in words: + if os.path.dirname(word) or word in (os.curdir, os.pardir): + # Ignore components that are not a simple file/directory name + continue + path = os.path.join(path, word) + if trailing_slash: + path += '/' + return path + + +# Run simple HTTP server +httpd = HTTPServer((HOST, PORT), Handler) + +try: + httpd.serve_forever() +except KeyboardInterrupt: + pass + +httpd.server_close() diff --git a/tests/integration/targets/flatpak/meta/main.yml b/tests/integration/targets/flatpak/meta/main.yml index 07faa21776..314f77eba9 100644 --- a/tests/integration/targets/flatpak/meta/main.yml +++ b/tests/integration/targets/flatpak/meta/main.yml @@ -1,2 +1,3 @@ dependencies: - prepare_tests + - setup_flatpak_remote diff --git a/tests/integration/targets/flatpak/tasks/check_mode.yml b/tests/integration/targets/flatpak/tasks/check_mode.yml index 3186fd2830..2270e0a9be 100644 --- a/tests/integration/targets/flatpak/tasks/check_mode.yml +++ b/tests/integration/targets/flatpak/tasks/check_mode.yml @@ -4,8 +4,8 @@ - name: Test addition of absent flatpak (check mode) flatpak: - name: org.gnome.Characters - remote: flathub + name: com.dummy.App1 + remote: dummy-remote state: present register: addition_result check_mode: true @@ -18,8 +18,8 @@ - name: Test non-existent idempotency of addition of absent flatpak (check mode) flatpak: - name: org.gnome.Characters - remote: flathub + name: com.dummy.App1 + remote: dummy-remote state: present register: double_addition_result check_mode: true @@ -36,7 +36,7 @@ - name: Test removal of absent flatpak check mode flatpak: - name: org.gnome.Characters + name: com.dummy.App1 state: absent register: removal_result check_mode: true @@ -51,8 +51,8 @@ - name: Test addition of absent flatpak with url (check mode) flatpak: - name: https://flathub.org/repo/appstream/org.gnome.Characters.flatpakref - remote: flathub + name: http://127.0.0.1:8000/repo/com.dummy.App1.flatpakref + remote: dummy-remote state: present register: url_addition_result check_mode: true @@ -65,8 +65,8 @@ - name: Test non-existent idempotency of addition of absent flatpak with url (check mode) flatpak: - name: https://flathub.org/repo/appstream/org.gnome.Characters.flatpakref - remote: flathub + name: http://127.0.0.1:8000/repo/com.dummy.App1.flatpakref + remote: dummy-remote state: present register: double_url_addition_result check_mode: true @@ -85,7 +85,7 @@ - name: Test removal of absent flatpak with url not doing anything (check mode) flatpak: - name: https://flathub.org/repo/appstream/org.gnome.Characters.flatpakref + name: http://127.0.0.1:8000/repo/com.dummy.App1.flatpakref state: absent register: url_removal_result check_mode: true @@ -96,15 +96,14 @@ - url_removal_result is not changed msg: "Removing an absent flatpak shall mark module execution as not changed" - # - Tests with present flatpak ------------------------------------------------- # state=present on present flatpak - name: Test addition of present flatpak (check mode) flatpak: - name: org.gnome.Calculator - remote: flathub + name: com.dummy.App2 + remote: dummy-remote state: present register: addition_present_result check_mode: true @@ -119,7 +118,7 @@ - name: Test removal of present flatpak (check mode) flatpak: - name: org.gnome.Calculator + name: com.dummy.App2 state: absent register: removal_present_result check_mode: true @@ -132,7 +131,7 @@ - name: Test non-existent idempotency of removal (check mode) flatpak: - name: org.gnome.Calculator + name: com.dummy.App2 state: absent register: double_removal_present_result check_mode: true @@ -149,8 +148,8 @@ - name: Test addition with url of present flatpak (check mode) flatpak: - name: https://flathub.org/repo/appstream/org.gnome.Calculator.flatpakref - remote: flathub + name: http://127.0.0.1:8000/repo/com.dummy.App2.flatpakref + remote: dummy-remote state: present register: url_addition_present_result check_mode: true @@ -165,7 +164,7 @@ - name: Test removal with url of present flatpak (check mode) flatpak: - name: https://flathub.org/repo/appstream/org.gnome.Calculator.flatpakref + name: http://127.0.0.1:8000/repo/com.dummy.App2.flatpakref state: absent register: url_removal_present_result check_mode: true @@ -178,8 +177,8 @@ - name: Test non-existent idempotency of removal with url of present flatpak (check mode) flatpak: - name: https://flathub.org/repo/appstream/org.gnome.Calculator.flatpakref - remote: flathub + name: http://127.0.0.1:8000/repo/com.dummy.App2.flatpakref + remote: dummy-remote state: absent register: double_url_removal_present_result check_mode: true diff --git a/tests/integration/targets/flatpak/tasks/main.yml b/tests/integration/targets/flatpak/tasks/main.yml index a1d1bda8a4..68d41d2efe 100644 --- a/tests/integration/targets/flatpak/tasks/main.yml +++ b/tests/integration/targets/flatpak/tasks/main.yml @@ -30,8 +30,8 @@ - name: Test executable override flatpak: - name: org.gnome.Characters - remote: flathub + name: com.dummy.App1 + remote: dummy-remote state: present executable: nothing-that-exists ignore_errors: true @@ -57,5 +57,20 @@ vars: method: system + always: + + - name: Check HTTP server status + async_status: + jid: "{{ webserver_status.ansible_job_id }}" + ignore_errors: true + + - name: List processes + command: ps aux + + - name: Stop HTTP server + command: >- + pkill -f -- '{{ remote_tmp_dir }}/serve.py' + when: | - ansible_distribution in ('Fedora', 'Ubuntu') + ansible_distribution == 'Fedora' or + ansible_distribution == 'Ubuntu' and not ansible_distribution_major_version | int < 16 diff --git a/tests/integration/targets/flatpak/tasks/setup.yml b/tests/integration/targets/flatpak/tasks/setup.yml index 2dfa33a0b1..98b07cd480 100644 --- a/tests/integration/targets/flatpak/tasks/setup.yml +++ b/tests/integration/targets/flatpak/tasks/setup.yml @@ -4,32 +4,58 @@ state: present become: true when: ansible_distribution == 'Fedora' + - block: - name: Activate flatpak ppa on Ubuntu apt_repository: repo: ppa:alexlarsson/flatpak state: present mode: '0644' + when: ansible_lsb.major_release | int < 18 + - name: Install flatpak package on Ubuntu apt: name: flatpak state: present - become: true + when: ansible_distribution == 'Ubuntu' -- name: Enable flathub for user + +- name: Install dummy remote for user flatpak_remote: - name: flathub + name: dummy-remote state: present - flatpakrepo_url: https://dl.flathub.org/repo/flathub.flatpakrepo + flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo method: user -- name: Enable flathub for system + +- name: Install dummy remote for system flatpak_remote: - name: flathub + name: dummy-remote state: present - flatpakrepo_url: https://dl.flathub.org/repo/flathub.flatpakrepo + flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo method: system + +- name: Remove (if necessary) flatpak for testing check mode on absent flatpak + flatpak: + name: com.dummy.App1 + remote: dummy-remote + state: absent + no_dependencies: true + - name: Add flatpak for testing check mode on present flatpak flatpak: - name: org.gnome.Calculator - remote: flathub + name: com.dummy.App2 + remote: dummy-remote state: present + no_dependencies: true + +- name: Copy HTTP server + copy: + src: serve.py + dest: '{{ remote_tmp_dir }}/serve.py' + mode: '0755' + +- name: Start HTTP server + command: '{{ remote_tmp_dir }}/serve.py 127.0.0.1 8000 /tmp/flatpak/' + async: 120 + poll: 0 + register: webserver_status diff --git a/tests/integration/targets/flatpak/tasks/test.yml b/tests/integration/targets/flatpak/tasks/test.yml index 1e7d888bb5..7442e4b468 100644 --- a/tests/integration/targets/flatpak/tasks/test.yml +++ b/tests/integration/targets/flatpak/tasks/test.yml @@ -2,10 +2,11 @@ - name: Test addition - {{ method }} flatpak: - name: org.gnome.Characters - remote: flathub + name: com.dummy.App1 + remote: dummy-remote state: present method: "{{ method }}" + no_dependencies: true register: addition_result - name: Verify addition test result - {{ method }} @@ -16,10 +17,11 @@ - name: Test idempotency of addition - {{ method }} flatpak: - name: org.gnome.Characters - remote: flathub + name: com.dummy.App1 + remote: dummy-remote state: present method: "{{ method }}" + no_dependencies: true register: double_addition_result - name: Verify idempotency of addition test result - {{ method }} @@ -32,9 +34,10 @@ - name: Test removal - {{ method }} flatpak: - name: org.gnome.Characters + name: com.dummy.App1 state: absent method: "{{ method }}" + no_dependencies: true register: removal_result - name: Verify removal test result - {{ method }} @@ -45,9 +48,10 @@ - name: Test idempotency of removal - {{ method }} flatpak: - name: org.gnome.Characters + name: com.dummy.App1 state: absent method: "{{ method }}" + no_dependencies: true register: double_removal_result - name: Verify idempotency of removal test result - {{ method }} @@ -60,10 +64,11 @@ - name: Test addition with url - {{ method }} flatpak: - name: https://flathub.org/repo/appstream/org.gnome.Characters.flatpakref - remote: flathub + name: http://127.0.0.1:8000/repo/com.dummy.App1.flatpakref + remote: dummy-remote state: present method: "{{ method }}" + no_dependencies: true register: url_addition_result - name: Verify addition test result - {{ method }} @@ -74,10 +79,11 @@ - name: Test idempotency of addition with url - {{ method }} flatpak: - name: https://flathub.org/repo/appstream/org.gnome.Characters.flatpakref - remote: flathub + name: http://127.0.0.1:8000/repo/com.dummy.App1.flatpakref + remote: dummy-remote state: present method: "{{ method }}" + no_dependencies: true register: double_url_addition_result - name: Verify idempotency of addition with url test result - {{ method }} @@ -90,26 +96,46 @@ - name: Test removal with url - {{ method }} flatpak: - name: https://flathub.org/repo/appstream/org.gnome.Characters.flatpakref + name: http://127.0.0.1:8000/repo/com.dummy.App1.flatpakref state: absent method: "{{ method }}" + no_dependencies: true register: url_removal_result + ignore_errors: true -- name: Verify removal test result - {{ method }} +- name: Verify removal test result failed - {{ method }} + # It looks like flatpak has a bug when the hostname contains a port. If this is the case, it emits + # the following message, which we check for. If another error happens, we fail. + # Upstream issue: https://github.com/flatpak/flatpak/issues/4307 + # (The second message happens with Ubuntu 18.04.) assert: that: - - url_removal_result is changed - msg: "state=absent with url as name shall remove flatpak when present" + - >- + url_removal_result.msg in [ + "error: Invalid branch 127.0.0.1:8000: Branch can't contain :", + "error: Invalid id http:: Name can't contain :", + ] + when: url_removal_result is failed -- name: Test idempotency of removal with url - {{ method }} - flatpak: - name: https://flathub.org/repo/appstream/org.gnome.Characters.flatpakref - state: absent - method: "{{ method }}" - register: double_url_removal_result +- when: url_removal_result is not failed + block: -- name: Verify idempotency of removal with url test result - {{ method }} - assert: - that: - - double_url_removal_result is not changed - msg: "state=absent with url as name shall not do anything when flatpak is not present" + - name: Verify removal test result - {{ method }} + assert: + that: + - url_removal_result is changed + msg: "state=absent with url as name shall remove flatpak when present" + + - name: Test idempotency of removal with url - {{ method }} + flatpak: + name: http://127.0.0.1:8000/repo/com.dummy.App1.flatpakref + state: absent + method: "{{ method }}" + no_dependencies: true + register: double_url_removal_result + + - name: Verify idempotency of removal with url test result - {{ method }} + assert: + that: + - double_url_removal_result is not changed + msg: "state=absent with url as name shall not do anything when flatpak is not present" diff --git a/tests/integration/targets/setup_flatpak_remote/create-repo.sh b/tests/integration/targets/setup_flatpak_remote/create-repo.sh index 1b09bb7956..4ece76ccfc 100755 --- a/tests/integration/targets/setup_flatpak_remote/create-repo.sh +++ b/tests/integration/targets/setup_flatpak_remote/create-repo.sh @@ -1,51 +1,59 @@ #!/usr/bin/env bash set -eux -flatpak install -y --system flathub org.freedesktop.Platform//1.6 org.freedesktop.Sdk//1.6 - -echo $'#!/bin/sh\necho hello world' > hello.sh - -export NUM=1 -flatpak build-init appdir$NUM com.dummy.App$NUM org.freedesktop.Sdk org.freedesktop.Platform 1.6; -flatpak build appdir$NUM mkdir /app/bin; -flatpak build appdir$NUM install --mode=750 hello.sh /app/bin; -flatpak build-finish --command=hello.sh appdir$NUM - -flatpak build-export repo appdir$NUM stable +# Delete traces from last run +rm -rf appdir* dummy-repo.gpg gpg hello.sh repo +# Create GPG key mkdir -p gpg chmod 0700 gpg gpg --homedir gpg --batch --passphrase '' --quick-gen-key test@dummy.com future-default default 10y - KEY_ID=$(gpg --homedir=gpg --list-keys --with-colons test@dummy.com | grep fpr: | head -1 | cut -d ':' -f 10) - gpg --homedir=gpg --export "${KEY_ID}" > dummy-repo.gpg - BASE64_PUBLIC_KEY=$(base64 dummy-repo.gpg | tr -d '\n') -cat > repo/com.dummy.App1.flatpakref < hello.sh + +for NUM in 1 2; do + flatpak build-init appdir${NUM} com.dummy.App${NUM} org.freedesktop.Sdk org.freedesktop.Platform 1.6; + flatpak build appdir${NUM} mkdir /app/bin; + flatpak build appdir${NUM} install --mode=750 hello.sh /app/bin; + flatpak build-finish --command=hello.sh appdir${NUM} + + flatpak build-export repo appdir${NUM} stable + + cat > repo/com.dummy.App${NUM}.flatpakref < repo/dummy-repo.flatpakrepo <v>5N@un}Hi1L#y~)Z- z{84!W3f)Esb7jq8xj(H~uV-ILw_hEK-tS2ml%=3W(9ZbK%*zGuyzw% zx|CRvE$)Z8hP(q}r5_|!pL6c&F%PC`(i85LvmX4tby$`<){6<0vLZ&gXzNHVZ&Xf~SL^u8sOHdo})6*qX{X~g@Rb%r$B zquEDB>&GQIE73m2JY`xc#+vdGzyT>afBQP{JpJd%k^#05fTyu|mQGF%YV6zMjsMHtkdFZgCC3Hu1hT#!3sP zlX%{M1-qoEJrl#7R+_{(He}QOx3|*uAuy=7BSbaUQ8?+uC3n_im4s73NG;MPAh=aV zsr!5fZ9@geCLM}Jn>G|r+q1urn&NgkY4kCkgSRk&Nj)-6mJ}r({(ADFv?Rj1Qi%V= z#?W3PY}yyhh)TAJTmea{WQz!$!kLBs#hrb!Oe&|_EO+32=p~I7Sqw@dyF6|0q@M8hmxVyd6gGOW|A*fgO zCjkQ-W<>L7srkC{ZDbazsy?xXWtl6Tn^-^s5r^=XB~c~PZoyai;ogn=Y?otN+g6?B zWT2F(AKl^h>@zuadxsdm1EXN(hxfSJYH8;e zz;C?|(!_t_gVJQO-YhxZ#DIbf^JItaMJEvG3=GNsV|Pda>WptsL5Ak0>x>89MY&QR zvxJw5)nrELLCY}g=4zkb23M+VcZJ$kRf>*B)7NH^5*~YC1&?h)!NDqJ#4h)y^7lSF zi0y|>Mb(4Bi+hVEtJNPzDGi5ePhs41$l@VJ6rX;uYD1>|^@C@*E|^CE22`*L6)9O( z?oddJ%D4`7)w*+JE5dme9)yRz6QUBv7seZjaaPl+=5^@LXrU2)SW!puqXN*k~< zxJ=^x0|xbva8VGz2sP|0OQ49+NDlq!uPRo^kf`w}9bO}}YYnnZfrnFS6OoqH0DR0Z z)13cHH?2TM5eIPv98c!~DoZUl=tf4~Ez+DZpFz9!hD8{t@ygq-u-`2F@8>dIx~3Pp z_~>w^QGN4mH_|yzkiIl29x4}JQ`?SIiDy8OIc$Rthg#B&sK;_kF(Hm(+xpjo$9{3} zf@XWNwLx*Vd_dj*OANsrRiQ-b!<{tKhioy&{7kMaBAL9ZYqlDm`$jG_&t~4py9Ya*Ge+y~Pv>HtJcFY8 zihplQ2s3yH>rf+Gfp*T`P}GI@BX&K=2Culc>vvXu$qsx^^i!;(p07mgthO5Vt3XwW z-S*i?CrwF}cE4bC?{sxVTM0iKA}878XK!_j}n zlK6Acn`k-ISyDh<>V?YujFd0=*QcORor3$IxiL^c^H>cZN#Ti~LF>Z*guR2b@FiEP zi%z<6o<4R@1mf4(89Y@fr2_iLU2H;6^ScvM6>{p5qy#U3#`+2b5+>I~Z1{Qcp68UB>zR!!-prXQw5y4( zgDKV1lMkGqQl!qMQYo**XhY34KJT4ex%fejcv zZtoQ=*~j`gYdvg;tQHVS^TBB^88ATLx!w{}b>CjXr6-Gk!0=mAp!C5o2jJHp%{A6Q z);jVS6UGe2k_eN$vMlN9BEzj$P+lss#h9gtB0|Slj-IbZAjy0vp+wvw`|l=$>$Db{ z-zX7JIZcLr+t_(b6|;jMi!T zolTrC91#&H^S!VZL5FS!4n!;Ov1TVn?=$9+44O#ga`2q`7k`0GT9BdFrCf(5QkIcBh1g_v`9#z=X}^u zEX3sA`gUnR{hP2ok2#U)b{&^yKRK|m35I1K2YzRc@F<7ISA)5&68BU@T`2SaIh0{Q zR+4P&P?>nXe-5Tu4ADqcQ8IXzze|bQdAZbeumC-v_asSQn; zcsa{!jf~)!@ApoXJ|eoTn#@sI%n_!eZYFJ?wwYK}&b4lJ3xGVH*ed!&CYPC&HO*{2 zx!&?sSwm+%<^c6=(?~CB>I7PY3&9n2}!JRmQS?&N4(`>5bCe#G>_!l-U zV=&BmtaM!@pCI6<7{y&03<(BUBPfg=}#=V1QjNh9r^D3ZQ~$ z-+d4(XSuVNC#N0n>`4rUY=>51W00YMceM5OJr_HgTTfSLEz{8iti&SNn>(EV$o9JW z!m(K@?NUx=&_SExn*qX~{m)clg3E3uI!yy-DPLdxN1DU*tpvKk!8glk*Hf;dQar=R z7on>L@+5L?e(v{&A8(jHcO7kAloZWS&3%0pW#C-Auw_GzDtBS?LYD*}^H6l(nT4DYJ9ve?9;u&7=<;D*FhwV`{oy33F!QH)y+h(Q_{^S__4O*`fbWV(A7 zWjUxL4a#hZr!puK*C~5l45>P9E67TlKz$7iUb0YsC|Yb zCnipl&)YZ90KN)Bxo#d<(iou(K{&J$GWS^<<=I+@__=Xt(A)>w+Yi}dKp8?N2KRwN z^gr}Vwq&VE4@VV%9@O>z>ERVLH36<>+mQ?%=aXkN*odx#Kq2}t^_A!}w#ln1zKPps zedEUYxuo@>=iu>96C9}yB zCbMidGlvUKENqLxrW2cg``-*3sZf3a$US)1zpgLentlC+dxuXcb#e0pH6eYbJF<(` z_(Kj?UXp$c-l`|EQjCV;n|z?PW2`l{DIOv3&U`!(tFpw!5fx^g(=@fBZq$tCk;a}L z?6uR0bKlS#MZXT{8{XUTd)j)acoP7%fg`4QNBm-F=w;VM(4%AF&^DR|Guo7@3417* z1~o&vueD=BWCKjHpR(`EZpGc4HGP_xU-J_pCsD>lqJCrA#QFIhAjjF}&De*Ukji|& zwq=!tF1#YjM2Dln$(9PGgQxvK<_3#XaRKjv3FymIvD{+O|B~>3*5T`aNVtD;A^}9v zPqTh15!MoN?6?KmUg5QY4u|}W6V^A$t!@H*8YtK*JFgf5r2_F4+dd-;oDv7lRl?)A zjlAgqldfV%)8HzC&Ph+uhd^-yLWe^l=?8a&12-Ume96LIJV&0ayQ zEn?2vlqGF3$U=z(-(fZ$tm|>X99kReTLQ8t)u5ltKiMhYq8cL7?Ik@{Sl;Br$a^m& zFD{ZgAv<5POi51>jj>s4sY#-jfDD-Va9FS>^It5_)V8d1)gZs&3zY4)KNF@3d2tn_ zbmXJ*`qQy!S2LEe(bB>G+Y1`&U>rh%dhH3#{MJds9$SwyNYE0n8;s&C=F3DAF85K` zyz~>{k4>dfFy$ks6~q}?sE$8PqQ%BnWg}@qS&AEVvnB5oYxlSxFU|)8D}Re8y3`ur zUlS`Jr|{LMxcP;BwzbI=?!2m`=czdR!Gm*bZEG<;C*@u4)R_SOdzt}!#;0QU?GrWx z(Lv@(8um}*&n6Nas;|`H<~RFk{-`kFbxRuUy$aZ&=aIgMt*6Kx-_=&v6KPfrN&YcP z(ONj_*Q`9$^OcNv!)XRPW5)@JGiz>U2@9tp%aOVi5tfG;a(IT#T5aPIfsmM*BGw{3 z&>-cf;$BH$4La$#zC^ue#8eHZR}=l#%%=4|27{Roc&molVxkR1#f3crno(y59e7uY z;wGqG6(iv~UZT5Ck`J5l8bJv_bG{!(#C5rQyMgXIyVxxf)|HK zoh#K!_$D}6sygpkNZEj5Yy29c$dn03ZPXxSymbD0d@bNhq$4rvQ7=E@=g*}lpY3Ta z6DUhSvmpHVX~4A(|18}-FcAbQq=Et>e7pA(_V9tnXWQ#I4j{VFdF+cVXmQ#RE2bhV z;BUI6f#|8(?6$dxAB!|kOk}CvDP9uw{7OPr79RE6sfkBIqxl$>HUYW|X%H*0n>Z+x z$JeHb2AYZcPy9l7m|1V1i+YKu=rfEwX#rzJfs6&|WJZ!{1m&it#Y%K{3HW3b^2BAu zDxJD3_N=RGd-#Bcw9<(A4TluQX}beZ&4r?ZQ&f~b&ajHscRaFabd#BM5-vR$G{Tnm zakJ|iprJoO7nSuf*Jz4G7Ajx?kstaIqc^RXq=3k!8`QM>WD~YZUs_(<4iGAK0Gcp1 z*FsNFE#=b)cy+7F1D$*S57pxj4-|)vDN*O@8{UaYm9oJZO+;)GzF-U_wmGNGM`)4x zVC{Cq2QVh6mp}>&c>KRZym}qc{$QmdbC7MmVj~b34u%DbOycuRMi~Sz{I$=rNQv$y zDo5s_sUl8K2jV6fnXewZ(7Yeui~8!5peW@sCh5WYXG(eA@g)mpJK1vin?qY($3{n& zdyv10S$%^5I7L8{v6v@p&bt7X@7D#zHePiI_Hp(dIRfD^cR}4cY~IRq>>^{lVl|E zUt+k`p7s9=R+ji-(kp16*(WHvE>e0Oi{E5VRphpC?mL3_t*M5M1eiCBJ%Nk%cvO@7 z-UgmF$$xoxz94TH_m)K- zW>Nq`uil4P5J{Ir2+cL1V}w&1@BzO7B4L<@9JnPDz|u@4bb1C`^-ydjrW>}Kc8trq-Suw90+K&UIaW>GLwj@XbzXWmCp zn;kGaR8DThc(5iNBB^tN$`mHytB#qUu)ktV(B}87t6C%zrvid%=;)B_+f#Bkhed0k zb@OPvh^2Yp!h@4+C9s2zGobnbcGKYp5?!OH805IoPKw>nwhg;x4NJJ0e~`qumk|irf*8I#Ao{g000001X)@i7;SF= literal 5524 zcmV;F6>IAKH+ooF000E$*0e?f03iVu0001VFXf})@BbB4T>v>5N@un}Hi1L#y~)Z- z{84!W3f)Esb7jq5>fDNTXAtbJtFLh#1{z_@XdBbYO#I_6ejX}{k|qPV5*F#tV*(KZ z&8y&v@-_@%O$~^*VQ2&(ntw~=X4{`14ln0V8gAs7B)fP}6FAt~Kb*`}cin_F`PjKM z=gDE4I1`vmQzegt`_V-)uy0DRn8>u3#F{_S@Q?#j3Pf@d^@SuYQthQu*4bx}Wxgr^ zpzQnk7-!^ooWgD}Y<{Z)25({OuU|9(2RU`Q#t(Nms*UWhQUf?8R&lYuSf-byBX0K8 z^I}P z;jsPqKwt(94>NX^omDYN6iLjF8^%Fq74AggLtsy+E4;4W#LVx|OUJ_~rkBr=I9G`S z-`lF>zkaJh8c8uPIv7!1R#=VKn4$v5FVtOAT~e-|DG}P7pu?u zg2?4<^6J^x4!h7r^PhZef;GY0o&cgx`7rh$+7fCPFWMF3s{tZp9=q<`0M;3wj#_1g zy)AF_+45$2ZeN7&sq;gF7CAoR3TC{{A3kUkG`e@pmB_i|#|gx}R%AMZmCD*tQpM$A zmFmu|SZA0^w?;w2^J#7(!b+rZ$z`H1_w}(oKbr6?5#;6I*|C;sURHR-b#HK0GgHwl zWlO4wdEbX1D|5Dcj7=Qurtcf#d@UCH2{rv6(9o2ArZcO^C#+^h0?!I+spQj?8~88n z&E4e>R8^&N7+DMiqM6PD31Wv0giL;`XN??_@^TLV4hsB_WSOq_+c>H2$Dzqflv>9< zh9V#;-2)ZRNP$CY1HLx8Cx$ODMd$i>V~Lai-_=C*H-nB$WV7w4_|_RdZxY=@`)PU9 zGU-R&Fm5PR*l=QSUedJi+q~F%qp&wRE~3tnSu#;tV>1kSB#HCAD|;^tLzOsOObfGC zxl)v7r8Kf`JlpPMyfI81Mv@JKh+8vmq_9c@{#q=-N;#Y-+4)mJ77tr#mrvm(XFmSP zG`G5J9L|fhqonikigA&LzgIIzkD0JRwG!tEUQDZ}LZ|#5Ual6Xaun6M+(HDC&+jiB zxRa*KG2+)q*k=l9wu2EZW|NN;6w^HoI>w-V*1+BB&9amec?NGnkvXq>&?V<0S79xs zE);IWKN6$<=AHej%3g*(e+_xcmwPhy$}qM#E#@F{P)$5H9+(}YyEW0ZG;nQX00+iN zlwO}I{>mXc^=^Y1-~obpGtoH%Sg8VHbMnK%bcKXdHdNlomWIl)pzNw;A8lK?zqCL$ zNtth-_+Epfbrx87EX;Qr527&3oF#4}aE}OWga@2=Ygo`4h3Y^~>!X^knst8lFS%W5 z22|-1!ABSJyfe!9gP2T9@aCFHh_E-a`8#UE-nKN8K4Lw8PgF5GHLGhpl%DwU3GWAP zb)mCUGrJ1spZEsCe>(_RS+yylRnFv0&(HZPqRJiW{T9=KHLS>_%NoQ>AjypKmy>(K zStvuAI*Zm{SABkrITglicDPv7B=GyQ>NkIV*#NS+VqdSyXmh*i{Giej9qux%*V4!v z04}f5;K6g%G(3;;FrbnT{Vlj~6@tey6YDC&(V_B1$QFNs87L<@XGuRxb z-rW@&t@xrgkZjqx%OVI!>_M>rt9_fwcs-qXQM@YqO~2H@>&AFrl^o0=9KMF&+7hz8 zm9Z0~y=WvtQFC@A4HYGmx0;`V`j zTBirBrULceJufTw9S0oQrlQkQqL4U~0>5<_BHQxuNcybgxl+KdM;RrG>B_c1s)bSoOGy?7`dzpWg+m83c_Bfe}_G zs~{p8wWo+@hwfLuI(xH@T?B%SsJ~n@;DL)@45ldz@biq=qp>{HHsq0s`InRO9bUdOPHf1S(2GBdtU}U`lXj+^OaaJ0#?cIX zz`a_1oZoGHeOnV%cz?|}Ux!?f|BWlg`mh%HY&oR|SzSM*%ToXmZz^C-H2~|!G3&`d zCzie=S>qD)_I?rZJ#|kd6V&+zn{H`5&L{S%Bod&aajrBb!)rj#Xs~5!G zp(wBu%!MMS?AkiP;grvxR|8?)kmt33Ks3^UqR*k(HGaA1apho-VQ-xRYL`*9PsVQ* zgU=Tzp-Ab0@PL%xh?tv)VOvoR%C=YDQrO`Fh~-V^Gg1Am90nf<$hXh$Mj&A`=pj>y zBXH6u+B1j{O~Vxn)sMPc8YQ5@K?48f!TheWcJnpl#Kdl+zNEIal0C0Xxx*ay&BGTt zvJv?!ox`mJ2UfORJJr84*kW2)s;aizuM35WS7&vHCw{<)N>KpncOw%vC$e2I!T>HE z;hHcWk1BUPTsSh_eym-mFuSvZaS;o`j@#x|qz6gkPwEso`$pi&OqTtHJNMOyPYOY38xFB-B}fwBm;0_?4Ml@bxI6QfTp>DJr-^F%|W_ z$-Wlx_;e(f1)dO8QR&45;N*QG-c)lB-2)7qR9vsd*c(}Yp@Jf2nV38Y3W=}J^`9Xj z4^|H>om2DJ(AfpN96L<`CEV)e(`iQ>DemWjW z?#hsDws>z8($(!8fOwYzV|Q^~@ixxn#8TOMTj;{`7Ldq?z2;shLNYtuY?zsJ`8tJ>aXJ5j0S z95)Rb9qy4F`v;^O_Y)*9M+qOq=lX`2v?C+SZkuxNydCv@w$`OWJH43g%4!lk& zNEo2_0s;9TWv_KVzRWhB2Ntq-Xo$j9DB{AwI_{ac@FK$0Qjp|iDZC6+DAFbO-q&!W z9JWT6;p*bW#@Cmiceiq%R)QY|FOBs#3++h4b*o)4u*L^zDzolJbHB(k0dE3?6_-O% z7-;9MqjJs?r_{nm`Ve6AxTvebCYENAxyCW){#(NeH}OZa`;g1*+bVLLFzAHB|D-Bn z!&(K|XeU~^-&Y{a#(j+58(5j~ zLjy#sI4fG4xQ^X_kbH@F#sn}1!;X)2ykHi9Jc;`A34k%vE14LeTJF{G!4Ji4?=wR- z)zjB5K8bajhY%D+T|1S3fN#zt-?FPFOh}pCVrr6 zLTI^gc?vM+d%#YhHQbeZxy-NXZF;C!_oRTgHKSK$Q_i#ex>VZ#>|sc?EPYD?LI#1E zcN(_#74PgUZ)dY`{>3M{@8q@Kp?k1FwB^2l;-~(+A!BfIC4r|?y~l9}juyA2H!gXc zTrJt&h767*t{P?16$BU?g@U+X4XU622arWnyi9bWh1h~F4bjM%yWc*SR0g^_L0|Cx zry_hh3|iWe>UOV_PGS-w!U{Qou~SgKg3phkt06Pe&Z(lGvJ0%rrEzd8*_N{U-7w?m zGE%LF={|x#?(XU5_u{|zRAPuBYDdOfd9~rXcTN2OtliPE$`jdGDLDNHbzXBzGh
  • hitUvXK$sVWTuHQ4`ZdY6WW&L~ zYrr|UGRP5;Nk&K=A2@&o4^{3P!YbP$DR$8*fgz^$Sh1*c?`S(CY4dUUb9om`0^=(hr*Ce&t{Yz$ihlP(i21%ghcqYblmS!V)?*#g4GUc%yqbWw{q1EfU96 zdoUIg(;!t*YXS&P+ETGQ7(}uXNHx5qNdp#Hx)B2o6bgEIb#`ISwH203$fl`Qb1*w8`e`gB{_gynce{ znS8HH0?~yJro9En)?SFWZJC*b%Fvhh%OB7QjMOL9w*(-bFv?tLYogb}Jp6=hzY5rX z?7M`G$X;7JH)FIV&1&Brw(oot0Q^U?K2zJyz&+smc9-F6oS63gQp=36sY*D2xWZ>F zg7$w^{|SQy1o?Y9o%?D!Zix2U>s==f$i}meb-24lJQC*VpKu^3`Q#_GFkN(MW!Ybh z;=JF10DQ1ofCk~}{axu`@Q7TW6SP*i3jw6T*A{*zTwokFZi`oMbVj<_)}Xk36vi|x zKnsnf&pCxuvj;dc=59GwcagEJ9;;#uXs%TpKp+N?lF=RGED=EWet5#4$fiA|z_#p* zDDs0^=ISWtn=}DH7UbDn&`)$%x{t08%}FrW$b4G@9?{mEu~b~p4^r>ZL4+O<^Q zmWlBiQw+J1h|j%WUg;L-5wgiJ=nm)iEM>W06V*1yVI7rY^~0x~&!d=3nUl{gI4{MV zz{aoZ3`dP7q7{j+#B8hwedfY5r-n7`N8l4_12d1GeXQ!~GIK_)wTC0@6?g-b`B|lq2bG-zEloEM#*Y&L z>iQ{;vHn+)Q5IkmswrfxFf>0Hcf5c;#lq`U5DK5@QZ$xL_P)KksR+k449)S}RcT`3 zJ55QKuecH|_`LmTb$1nrdBjsKYJ>X?1=~Ldqaj@_k1A{!`qcnM9DxOzRjRZ$Gue$^ zKz)<LSF(d)Q0N`7h)2otJhEf+5ejV9Whz*;U%qDS zt!lPdMwT3{&pgHhh*}cnO$VoLYDLG;dkBFazn;HN^G=pPX|&& z`V(cM@h`Phy4B$m(G{apx$P@({_U4>E8Y>2+M)c?uO87t{wifMT1UloIoWeD)hB^= zFO2o1-z8`^O=PW1lyG$kt+EBo`AjbCs_N9QhRkW7K73AO-T6XVHPpVHeb~JhW0qKr zRkaP0XpQac_8d48VgHO4$-X_c9oM4h=WMmwCp|UYRHi0LK4F8HR3BA*>6ZsKZEFaZ zI7@6sqGB1x`6Wk^m6RgWy?l5DIu#0YJL&LVd*Ek_H_O;4Duo=^hH*OpHLstavsX5} za)|yUF{mUbR2do&rEK2-YWR4}6Mrn_q{3&RJ6g4PG_!pp#H3S$7x^y%nWeZw;Mwxy z2>Wa#Q((X*uxZKtlj?bxGlD>XMSo{^(n(q=)p#hSiN?stSFuvg!%px*vF-_oWE7_& z=^fo<7l_9=OvK$LGdGZO=<4tiGJ}tGYA4JSz{JLVGcm%62fajWhb+`0Ezbw+2uDZ_ z=`6RLzWT#dX3gKoYE$EXm_JK_!mwVwBN=!?H?4++G9*&csKyne*_VwXSq9#E1~s~F zMIi85_ukPN(T+_}kR2-9k#W*;r8n|SyDzP+b{?0njVMSb!wV%(ZkHTv2`vC Date: Tue, 8 Jun 2021 20:33:57 +1200 Subject: [PATCH 0363/3093] Fixed sanity checks for cloud/scaleway/ modules (#2678) * fixed validation-modules for plugins/modules/cloud/scaleway/scaleway_image_info.py * fixed validation-modules for plugins/modules/cloud/scaleway/scaleway_ip_info.py * fixed validation-modules for plugins/modules/cloud/scaleway/scaleway_security_group_info.py * fixed validation-modules for plugins/modules/cloud/scaleway/scaleway_server_info.py * fixed validation-modules for plugins/modules/cloud/scaleway/scaleway_snapshot_info.py * fixed validation-modules for plugins/modules/cloud/scaleway/scaleway_volume_info.py * sanity fix --- plugins/modules/cloud/scaleway/scaleway_image_info.py | 9 +++++---- plugins/modules/cloud/scaleway/scaleway_ip_info.py | 7 +++++-- .../cloud/scaleway/scaleway_security_group_info.py | 7 +++++-- plugins/modules/cloud/scaleway/scaleway_server_info.py | 7 +++++-- plugins/modules/cloud/scaleway/scaleway_snapshot_info.py | 7 +++++-- plugins/modules/cloud/scaleway/scaleway_volume_info.py | 7 +++++-- tests/sanity/ignore-2.10.txt | 6 ------ tests/sanity/ignore-2.11.txt | 6 ------ tests/sanity/ignore-2.12.txt | 6 ------ tests/sanity/ignore-2.9.txt | 6 ------ 10 files changed, 30 insertions(+), 38 deletions(-) diff --git a/plugins/modules/cloud/scaleway/scaleway_image_info.py b/plugins/modules/cloud/scaleway/scaleway_image_info.py index 3fad216ee5..609ba3d1e8 100644 --- a/plugins/modules/cloud/scaleway/scaleway_image_info.py +++ b/plugins/modules/cloud/scaleway/scaleway_image_info.py @@ -19,9 +19,7 @@ author: extends_documentation_fragment: - community.general.scaleway - options: - region: type: str description: @@ -51,9 +49,12 @@ EXAMPLES = r''' RETURN = r''' --- scaleway_image_info: - description: Response from Scaleway API + description: + - Response from Scaleway API. + - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." returned: success - type: complex + type: list + elements: dict sample: "scaleway_image_info": [ { diff --git a/plugins/modules/cloud/scaleway/scaleway_ip_info.py b/plugins/modules/cloud/scaleway/scaleway_ip_info.py index 145fb20338..e2e49557cc 100644 --- a/plugins/modules/cloud/scaleway/scaleway_ip_info.py +++ b/plugins/modules/cloud/scaleway/scaleway_ip_info.py @@ -49,9 +49,12 @@ EXAMPLES = r''' RETURN = r''' --- scaleway_ip_info: - description: Response from Scaleway API + description: + - Response from Scaleway API. + - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." returned: success - type: complex + type: list + elements: dict sample: "scaleway_ip_info": [ { diff --git a/plugins/modules/cloud/scaleway/scaleway_security_group_info.py b/plugins/modules/cloud/scaleway/scaleway_security_group_info.py index d3488f0c8b..1f5af7da53 100644 --- a/plugins/modules/cloud/scaleway/scaleway_security_group_info.py +++ b/plugins/modules/cloud/scaleway/scaleway_security_group_info.py @@ -49,9 +49,12 @@ EXAMPLES = r''' RETURN = r''' --- scaleway_security_group_info: - description: Response from Scaleway API + description: + - Response from Scaleway API. + - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." returned: success - type: complex + type: list + elements: dict sample: "scaleway_security_group_info": [ { diff --git a/plugins/modules/cloud/scaleway/scaleway_server_info.py b/plugins/modules/cloud/scaleway/scaleway_server_info.py index 43b0badc14..61bd9de41b 100644 --- a/plugins/modules/cloud/scaleway/scaleway_server_info.py +++ b/plugins/modules/cloud/scaleway/scaleway_server_info.py @@ -49,9 +49,12 @@ EXAMPLES = r''' RETURN = r''' --- scaleway_server_info: - description: Response from Scaleway API + description: + - Response from Scaleway API. + - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." returned: success - type: complex + type: list + elements: dict sample: "scaleway_server_info": [ { diff --git a/plugins/modules/cloud/scaleway/scaleway_snapshot_info.py b/plugins/modules/cloud/scaleway/scaleway_snapshot_info.py index f31b74b00e..95ec04d16f 100644 --- a/plugins/modules/cloud/scaleway/scaleway_snapshot_info.py +++ b/plugins/modules/cloud/scaleway/scaleway_snapshot_info.py @@ -49,9 +49,12 @@ EXAMPLES = r''' RETURN = r''' --- scaleway_snapshot_info: - description: Response from Scaleway API + description: + - Response from Scaleway API. + - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." returned: success - type: complex + type: list + elements: dict sample: "scaleway_snapshot_info": [ { diff --git a/plugins/modules/cloud/scaleway/scaleway_volume_info.py b/plugins/modules/cloud/scaleway/scaleway_volume_info.py index ff6093e830..0042146795 100644 --- a/plugins/modules/cloud/scaleway/scaleway_volume_info.py +++ b/plugins/modules/cloud/scaleway/scaleway_volume_info.py @@ -49,9 +49,12 @@ EXAMPLES = r''' RETURN = r''' --- scaleway_volume_info: - description: Response from Scaleway API + description: + - Response from Scaleway API. + - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)." returned: success - type: complex + type: list + elements: dict sample: "scaleway_volume_info": [ { diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 1855fc963f..7a9c723337 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -9,13 +9,7 @@ plugins/modules/cloud/rackspace/rax_files.py validate-modules:parameter-state-in plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path plugins/modules/cloud/rackspace/rax_mon_notification_plan.py validate-modules:parameter-list-no-elements plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path # fix needed, expanduser() applied to dict values -plugins/modules/cloud/scaleway/scaleway_image_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_ip_info.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_organization_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_security_group_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_server_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_snapshot_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_volume_info.py validate-modules:return-syntax-error plugins/modules/cloud/smartos/vmadm.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/smartos/vmadm.py validate-modules:undocumented-parameter plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:parameter-list-no-elements diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 4727b8d6df..28fae579c9 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -8,13 +8,7 @@ plugins/modules/cloud/rackspace/rax_files.py validate-modules:parameter-state-in plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path plugins/modules/cloud/rackspace/rax_mon_notification_plan.py validate-modules:parameter-list-no-elements plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path # fix needed, expanduser() applied to dict values -plugins/modules/cloud/scaleway/scaleway_image_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_ip_info.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_organization_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_security_group_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_server_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_snapshot_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_volume_info.py validate-modules:return-syntax-error plugins/modules/cloud/smartos/vmadm.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/smartos/vmadm.py validate-modules:undocumented-parameter plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:parameter-list-no-elements diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt index 74b1ea16f6..708845dae9 100644 --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -8,13 +8,7 @@ plugins/modules/cloud/rackspace/rax_files.py validate-modules:parameter-state-in plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path plugins/modules/cloud/rackspace/rax_mon_notification_plan.py validate-modules:parameter-list-no-elements plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path # fix needed, expanduser() applied to dict values -plugins/modules/cloud/scaleway/scaleway_image_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_ip_info.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_organization_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_security_group_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_server_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_snapshot_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_volume_info.py validate-modules:return-syntax-error plugins/modules/cloud/smartos/vmadm.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/smartos/vmadm.py validate-modules:undocumented-parameter plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:parameter-list-no-elements diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 2dac082311..f7c8945c56 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -6,13 +6,7 @@ plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not- plugins/modules/cloud/rackspace/rax.py use-argspec-type-path plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path # fix needed, expanduser() applied to dict values -plugins/modules/cloud/scaleway/scaleway_image_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_ip_info.py validate-modules:return-syntax-error plugins/modules/cloud/scaleway/scaleway_organization_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_security_group_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_server_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_snapshot_info.py validate-modules:return-syntax-error -plugins/modules/cloud/scaleway/scaleway_volume_info.py validate-modules:return-syntax-error plugins/modules/cloud/smartos/vmadm.py validate-modules:parameter-type-not-in-doc plugins/modules/cloud/smartos/vmadm.py validate-modules:undocumented-parameter plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:parameter-type-not-in-doc From eef645c3f7c94d5086532feb29184f71e72ab994 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 8 Jun 2021 20:36:14 +1200 Subject: [PATCH 0364/3093] with great powers come great responsibility (#2755) --- .github/BOTMETA.yml | 2 -- commit-rights.md | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 6727373e85..d9f99c60dc 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1,7 +1,5 @@ automerge: true files: - plugins/: - supershipit: russoz changelogs/fragments/: support: community $actions: diff --git a/commit-rights.md b/commit-rights.md index 7aae8617fb..9b39d47b2c 100644 --- a/commit-rights.md +++ b/commit-rights.md @@ -67,6 +67,7 @@ Individuals who have been asked to become a part of this group have generally be | Name | GitHub ID | IRC Nick | Other | | ------------------- | -------------------- | ------------------ | -------------------- | +| Alexei Znamensky | russoz | russoz | | | Amin Vakil | aminvakil | aminvakil | | | Andrew Klychkov | andersson007 | andersson007_ | | | Felix Fontein | felixfontein | felixfontein | | From dab5d941e6ecc6401f97075eb9a5dabef984178e Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Tue, 8 Jun 2021 14:11:21 +0430 Subject: [PATCH 0365/3093] Add domain option to onepassword lookup (#2735) * Add domain to onepassword lookup * Add changelog * Add default to domain documentation * Improve format * Fix sanity issue * Add option type to documentation Co-authored-by: Felix Fontein * Add domain to init Co-authored-by: Felix Fontein --- .../fragments/2735-onepassword-add_domain_option.yml | 3 +++ plugins/lookup/onepassword.py | 9 ++++++++- 2 files changed, 11 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2735-onepassword-add_domain_option.yml diff --git a/changelogs/fragments/2735-onepassword-add_domain_option.yml b/changelogs/fragments/2735-onepassword-add_domain_option.yml new file mode 100644 index 0000000000..eef74439ce --- /dev/null +++ b/changelogs/fragments/2735-onepassword-add_domain_option.yml @@ -0,0 +1,3 @@ +--- +minor_changes: + - onepassword lookup plugin - add ``domain`` option (https://github.com/ansible-collections/community.general/issues/2734). diff --git a/plugins/lookup/onepassword.py b/plugins/lookup/onepassword.py index a2346ed072..715c337ffd 100644 --- a/plugins/lookup/onepassword.py +++ b/plugins/lookup/onepassword.py @@ -30,6 +30,11 @@ DOCUMENTATION = ''' aliases: ['vault_password'] section: description: Item section containing the field to retrieve (case-insensitive). If absent will return first match from any section. + domain: + description: Domain of 1Password. Default is U(1password.com). + version_added: 3.2.0 + default: '1password.com' + type: str subdomain: description: The 1Password subdomain to authenticate against. username: @@ -109,6 +114,7 @@ class OnePass(object): self.logged_in = False self.token = None self.subdomain = None + self.domain = None self.username = None self.secret_key = None self.master_password = None @@ -168,7 +174,7 @@ class OnePass(object): args = [ 'signin', - '{0}.1password.com'.format(self.subdomain), + '{0}.{1}'.format(self.subdomain, self.domain), to_bytes(self.username), to_bytes(self.secret_key), '--output=raw', @@ -265,6 +271,7 @@ class LookupModule(LookupBase): section = kwargs.get('section') vault = kwargs.get('vault') op.subdomain = kwargs.get('subdomain') + op.domain = kwargs.get('domain', '1password.com') op.username = kwargs.get('username') op.secret_key = kwargs.get('secret_key') op.master_password = kwargs.get('master_password', kwargs.get('vault_password')) From 1e968bce27f8cab0b6decd89c53a2475f63ca6e3 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 8 Jun 2021 14:47:51 +0200 Subject: [PATCH 0366/3093] Next expected release is 3.3.0. --- galaxy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/galaxy.yml b/galaxy.yml index ba1969d712..c559415eb2 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,6 +1,6 @@ namespace: community name: general -version: 3.2.0 +version: 3.3.0 readme: README.md authors: - Ansible (https://github.com/ansible) From f44300cec5fc002903139e8d5ee2f88f9d540262 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9my=20Keil?= Date: Thu, 10 Jun 2021 22:05:04 +0200 Subject: [PATCH 0367/3093] add inventory plugin unit test `test_verify_file` (#2773) * add inventory plugin unit test `test_verify_file` * fix typos in `test_verify_file_bad_config` unit test --- tests/unit/plugins/inventory/test_cobbler.py | 8 +++++++- tests/unit/plugins/inventory/test_linode.py | 8 +++++++- tests/unit/plugins/inventory/test_lxd.py | 6 ++++++ tests/unit/plugins/inventory/test_proxmox.py | 6 ++++++ tests/unit/plugins/inventory/test_stackpath_compute.py | 6 ++++++ 5 files changed, 32 insertions(+), 2 deletions(-) diff --git a/tests/unit/plugins/inventory/test_cobbler.py b/tests/unit/plugins/inventory/test_cobbler.py index 477a3039f7..e184d166dc 100644 --- a/tests/unit/plugins/inventory/test_cobbler.py +++ b/tests/unit/plugins/inventory/test_cobbler.py @@ -37,5 +37,11 @@ def test_init_cache(inventory): assert inventory._cache[inventory.cache_key] == {} +def test_verify_file(tmp_path, inventory): + file = tmp_path / "foobar.cobbler.yml" + file.touch() + assert inventory.verify_file(str(file)) is True + + def test_verify_file_bad_config(inventory): - assert inventory.verify_file('foobar.cobber.yml') is False + assert inventory.verify_file('foobar.cobbler.yml') is False diff --git a/tests/unit/plugins/inventory/test_linode.py b/tests/unit/plugins/inventory/test_linode.py index 427a7c69b3..ab75c6c9fc 100644 --- a/tests/unit/plugins/inventory/test_linode.py +++ b/tests/unit/plugins/inventory/test_linode.py @@ -74,5 +74,11 @@ def test_conig_query_options(inventory): assert tags == ['web-server'] +def test_verify_file(tmp_path, inventory): + file = tmp_path / "foobar.linode.yml" + file.touch() + assert inventory.verify_file(str(file)) is True + + def test_verify_file_bad_config(inventory): - assert inventory.verify_file('foobar.linde.yml') is False + assert inventory.verify_file('foobar.linode.yml') is False diff --git a/tests/unit/plugins/inventory/test_lxd.py b/tests/unit/plugins/inventory/test_lxd.py index 8a98af6e71..04cea0af71 100644 --- a/tests/unit/plugins/inventory/test_lxd.py +++ b/tests/unit/plugins/inventory/test_lxd.py @@ -51,6 +51,12 @@ def inventory(): return inv +def test_verify_file(tmp_path, inventory): + file = tmp_path / "foobar.lxd.yml" + file.touch() + assert inventory.verify_file(str(file)) is True + + def test_verify_file_bad_config(inventory): assert inventory.verify_file('foobar.lxd.yml') is False diff --git a/tests/unit/plugins/inventory/test_proxmox.py b/tests/unit/plugins/inventory/test_proxmox.py index e248fb05e3..c2b0408138 100644 --- a/tests/unit/plugins/inventory/test_proxmox.py +++ b/tests/unit/plugins/inventory/test_proxmox.py @@ -21,6 +21,12 @@ def inventory(): return r +def test_verify_file(tmp_path, inventory): + file = tmp_path / "foobar.proxmox.yml" + file.touch() + assert inventory.verify_file(str(file)) is True + + def test_verify_file_bad_config(inventory): assert inventory.verify_file('foobar.proxmox.yml') is False diff --git a/tests/unit/plugins/inventory/test_stackpath_compute.py b/tests/unit/plugins/inventory/test_stackpath_compute.py index 9359cd680f..8a409becd6 100644 --- a/tests/unit/plugins/inventory/test_stackpath_compute.py +++ b/tests/unit/plugins/inventory/test_stackpath_compute.py @@ -66,6 +66,12 @@ def test_get_stack_slugs(inventory): ] +def test_verify_file(tmp_path, inventory): + file = tmp_path / "foobar.stackpath_compute.yml" + file.touch() + assert inventory.verify_file(str(file)) is True + + def test_verify_file_bad_config(inventory): assert inventory.verify_file('foobar.stackpath_compute.yml') is False From c2ce7a0752f84d7e4ca5b373ec7a6103e3ddd6a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc?= Date: Fri, 11 Jun 2021 13:05:29 +0200 Subject: [PATCH 0368/3093] [scaleway inventory] Fix JSON object must be str, not 'bytes' (#2771) * Fix JSON object decoding * Code improvement : python 3.5 fix * Add changelog fragment * Update changelogs/fragments/2771-scaleway_inventory_json_accept_byte_array.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../2771-scaleway_inventory_json_accept_byte_array.yml | 3 +++ plugins/inventory/scaleway.py | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/2771-scaleway_inventory_json_accept_byte_array.yml diff --git a/changelogs/fragments/2771-scaleway_inventory_json_accept_byte_array.yml b/changelogs/fragments/2771-scaleway_inventory_json_accept_byte_array.yml new file mode 100644 index 0000000000..8a6bfd1603 --- /dev/null +++ b/changelogs/fragments/2771-scaleway_inventory_json_accept_byte_array.yml @@ -0,0 +1,3 @@ +bugfixes: + - scaleway plugin inventory - fix ``JSON object must be str, not 'bytes'`` with Python 3.5 + (https://github.com/ansible-collections/community.general/issues/2769). diff --git a/plugins/inventory/scaleway.py b/plugins/inventory/scaleway.py index ae557e2239..ad0a2321ae 100644 --- a/plugins/inventory/scaleway.py +++ b/plugins/inventory/scaleway.py @@ -89,7 +89,7 @@ from ansible.errors import AnsibleError from ansible.plugins.inventory import BaseInventoryPlugin, Constructable from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, parse_pagination_link from ansible.module_utils.urls import open_url -from ansible.module_utils._text import to_native +from ansible.module_utils._text import to_native, to_text import ansible.module_utils.six.moves.urllib.parse as urllib_parse @@ -105,7 +105,7 @@ def _fetch_information(token, url): except Exception as e: raise AnsibleError("Error while fetching %s: %s" % (url, to_native(e))) try: - raw_json = json.loads(response.read()) + raw_json = json.loads(to_text(response.read())) except ValueError: raise AnsibleError("Incorrect JSON payload") From 19549058ce9695b264e36701f2297e9d92b17bb5 Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Fri, 11 Jun 2021 15:42:01 +0430 Subject: [PATCH 0369/3093] yum_versionlock: enable fedora34 integration test (#2543) * Re-enable Fedora 34 * Update procps-ng before anything in yum_versionlock integration test * Move procps-ng installation to block * Revert "Move procps-ng installation to block" This reverts commit 3aa873a110f629d83d393bac648917f3302d8c93. * Update procps-ng only on Fedora 34 --- tests/integration/targets/yum_versionlock/tasks/main.yml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tests/integration/targets/yum_versionlock/tasks/main.yml b/tests/integration/targets/yum_versionlock/tasks/main.yml index 4084bdcb91..d1a1522087 100644 --- a/tests/integration/targets/yum_versionlock/tasks/main.yml +++ b/tests/integration/targets/yum_versionlock/tasks/main.yml @@ -4,6 +4,12 @@ # and should not be used as examples of how to write Ansible roles # #################################################################### +- name: Update procps-ng temporary until issue (#2539) is fixed + yum: + name: procps-ng + state: latest + when: ansible_distribution == 'Fedora' and ansible_distribution_major_version == '34' + - block: - name: Install necessary packages to test yum_versionlock yum: @@ -60,4 +66,4 @@ state: absent when: yum_versionlock_install is changed when: (ansible_distribution in ['CentOS', 'RedHat'] and ansible_distribution_major_version is version('7', '>=')) or - (ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('33', '<=')) + (ansible_distribution == 'Fedora') From 4b37b1bca630d240a9d0a1b7b5344100b1041a6f Mon Sep 17 00:00:00 2001 From: Abhijeet Kasurde Date: Fri, 11 Jun 2021 16:54:11 +0530 Subject: [PATCH 0370/3093] scaleway: Misc doc changes (#2776) * Updated example section for ``variables`` * Added link about token generation * Misc changes in doc Fixes: #467 Signed-off-by: Abhijeet Kasurde --- plugins/inventory/scaleway.py | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/plugins/inventory/scaleway.py b/plugins/inventory/scaleway.py index ad0a2321ae..843a006738 100644 --- a/plugins/inventory/scaleway.py +++ b/plugins/inventory/scaleway.py @@ -1,24 +1,24 @@ -# Copyright (c) 2017 Ansible Project +# Copyright: (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' name: scaleway author: - Remy Leone (@sieben) short_description: Scaleway inventory source description: - - Get inventory hosts from Scaleway + - Get inventory hosts from Scaleway. options: plugin: - description: token that ensures this is a source file for the 'scaleway' plugin. + description: Token that ensures this is a source file for the 'scaleway' plugin. required: True choices: ['scaleway', 'community.general.scaleway'] regions: - description: Filter results on a specific Scaleway region + description: Filter results on a specific Scaleway region. type: list default: - ams1 @@ -26,11 +26,13 @@ DOCUMENTATION = ''' - par2 - waw1 tags: - description: Filter results on a specific tag + description: Filter results on a specific tag. type: list oauth_token: required: True - description: Scaleway OAuth token. + description: + - Scaleway OAuth token. + - More details on L(how to generate token, https://www.scaleway.com/en/docs/generate-api-keys/). env: # in order of precedence - name: SCW_TOKEN @@ -48,14 +50,14 @@ DOCUMENTATION = ''' - hostname - id variables: - description: 'set individual variables: keys are variable names and + description: 'Set individual variables: keys are variable names and values are templates. Any value returned by the L(Scaleway API, https://developer.scaleway.com/#servers-server-get) can be used.' type: dict ''' -EXAMPLES = ''' +EXAMPLES = r''' # scaleway_inventory.yml file in YAML format # Example command line: ansible-inventory --list -i scaleway_inventory.yml @@ -81,6 +83,15 @@ regions: - par1 variables: ansible_host: public_ip.address + +# Using static strings as variables +plugin: community.general.scaleway +hostnames: + - hostname +variables: + ansible_host: public_ip.address + ansible_connection: "'ssh'" + ansible_user: "'admin'" ''' import json @@ -230,8 +241,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): if not matching_tags: return set() - else: - return matching_tags.union((server_zone,)) + return matching_tags.union((server_zone,)) def _filter_host(self, host_infos, hostname_preferences): From 343339655ded19ec35c113643f1d777ceadf84f7 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Sun, 13 Jun 2021 08:25:50 +0200 Subject: [PATCH 0371/3093] Documentation fix for access_level parameter of gitlab_runner (#2788) * * Documentation fix for access_level parameter of gitlab_runner Signed-off-by: Alina Buzachis * Address reviewer's comments Signed-off-by: Alina Buzachis --- plugins/modules/source_control/gitlab/gitlab_runner.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/plugins/modules/source_control/gitlab/gitlab_runner.py b/plugins/modules/source_control/gitlab/gitlab_runner.py index 8803990f22..d38b4819a6 100644 --- a/plugins/modules/source_control/gitlab/gitlab_runner.py +++ b/plugins/modules/source_control/gitlab/gitlab_runner.py @@ -77,7 +77,9 @@ options: type: bool access_level: description: - - Determines if a runner can pick up jobs from protected branches. + - Determines if a runner can pick up jobs only from protected branches. + - If set to C(ref_protected), runner can pick up jobs only from protected branches. + - If set to C(not_protected), runner can pick up jobs from both protected and unprotected branches. required: False default: ref_protected choices: ["ref_protected", "not_protected"] From d4c4d00ad1f08a538bbb3f0868745b721e65c59c Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 13 Jun 2021 23:01:46 +0200 Subject: [PATCH 0372/3093] CI: Remove scripts that are no longer needed (#2793) * Remove scripts that are no longer needed. ci_complete * Remove sanity ignores. --- tests/sanity/ignore-2.10.txt | 2 - tests/sanity/ignore-2.11.txt | 2 - tests/sanity/ignore-2.12.txt | 2 - tests/sanity/ignore-2.9.txt | 2 - tests/utils/shippable/check_matrix.py | 120 -------------------------- tests/utils/shippable/timing.py | 16 ---- tests/utils/shippable/timing.sh | 5 -- 7 files changed, 149 deletions(-) delete mode 100755 tests/utils/shippable/check_matrix.py delete mode 100755 tests/utils/shippable/timing.py delete mode 100755 tests/utils/shippable/timing.sh diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 7a9c723337..c9d750f417 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -56,5 +56,3 @@ plugins/modules/system/xfconf.py validate-modules:return-syntax-error plugins/modules/web_infrastructure/jenkins_plugin.py use-argspec-type-path tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.6 # django generated code tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.7 # django generated code -tests/utils/shippable/check_matrix.py replace-urlopen -tests/utils/shippable/timing.py shebang diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 28fae579c9..1311638dbc 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -55,5 +55,3 @@ plugins/modules/system/xfconf.py validate-modules:return-syntax-error plugins/modules/web_infrastructure/jenkins_plugin.py use-argspec-type-path tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.6 # django generated code tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.7 # django generated code -tests/utils/shippable/check_matrix.py replace-urlopen -tests/utils/shippable/timing.py shebang diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt index 708845dae9..f5b7d772fc 100644 --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -53,5 +53,3 @@ plugins/modules/system/ssh_config.py use-argspec-type-path # Required since modu plugins/modules/system/xfconf.py validate-modules:parameter-state-invalid-choice plugins/modules/system/xfconf.py validate-modules:return-syntax-error plugins/modules/web_infrastructure/jenkins_plugin.py use-argspec-type-path -tests/utils/shippable/check_matrix.py replace-urlopen -tests/utils/shippable/timing.py shebang diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index f7c8945c56..c8c5ff0d25 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -73,5 +73,3 @@ plugins/modules/system/xfconf.py validate-modules:return-syntax-error plugins/modules/web_infrastructure/jenkins_plugin.py use-argspec-type-path tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.6 # django generated code tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.7 # django generated code -tests/utils/shippable/check_matrix.py replace-urlopen -tests/utils/shippable/timing.py shebang diff --git a/tests/utils/shippable/check_matrix.py b/tests/utils/shippable/check_matrix.py deleted file mode 100755 index ca56c4db3d..0000000000 --- a/tests/utils/shippable/check_matrix.py +++ /dev/null @@ -1,120 +0,0 @@ -#!/usr/bin/env python -"""Verify the currently executing Shippable test matrix matches the one defined in the "shippable.yml" file.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import datetime -import json -import os -import re -import sys -import time - -try: - from typing import NoReturn -except ImportError: - NoReturn = None - -try: - # noinspection PyCompatibility - from urllib2 import urlopen # pylint: disable=ansible-bad-import-from -except ImportError: - # noinspection PyCompatibility - from urllib.request import urlopen - - -def main(): # type: () -> None - """Main entry point.""" - repo_full_name = os.environ['REPO_FULL_NAME'] - required_repo_full_name = 'ansible-collections/community.general' - - if repo_full_name != required_repo_full_name: - sys.stderr.write('Skipping matrix check on repo "%s" which is not "%s".\n' % (repo_full_name, required_repo_full_name)) - return - - with open('shippable.yml', 'rb') as yaml_file: - yaml = yaml_file.read().decode('utf-8').splitlines() - - defined_matrix = [match.group(1) for match in [re.search(r'^ *- env: T=(.*)$', line) for line in yaml] if match and match.group(1) != 'none'] - - if not defined_matrix: - fail('No matrix entries found in the "shippable.yml" file.', - 'Did you modify the "shippable.yml" file?') - - run_id = os.environ['SHIPPABLE_BUILD_ID'] - sleep = 1 - jobs = [] - - for attempts_remaining in range(4, -1, -1): - try: - jobs = json.loads(urlopen('https://api.shippable.com/jobs?runIds=%s' % run_id).read()) - - if not isinstance(jobs, list): - raise Exception('Shippable run %s data is not a list.' % run_id) - - break - except Exception as ex: - if not attempts_remaining: - fail('Unable to retrieve Shippable run %s matrix.' % run_id, - str(ex)) - - sys.stderr.write('Unable to retrieve Shippable run %s matrix: %s\n' % (run_id, ex)) - sys.stderr.write('Trying again in %d seconds...\n' % sleep) - time.sleep(sleep) - sleep *= 2 - - if len(jobs) != len(defined_matrix): - if len(jobs) == 1: - hint = '\n\nMake sure you do not use the "Rebuild with SSH" option.' - else: - hint = '' - - fail('Shippable run %s has %d jobs instead of the expected %d jobs.' % (run_id, len(jobs), len(defined_matrix)), - 'Try re-running the entire matrix.%s' % hint) - - actual_matrix = dict((job.get('jobNumber'), dict(tuple(line.split('=', 1)) for line in job.get('env', [])).get('T', '')) for job in jobs) - errors = [(job_number, test, actual_matrix.get(job_number)) for job_number, test in enumerate(defined_matrix, 1) if actual_matrix.get(job_number) != test] - - if len(errors): - error_summary = '\n'.join('Job %s expected "%s" but found "%s" instead.' % (job_number, expected, actual) for job_number, expected, actual in errors) - - fail('Shippable run %s has a job matrix mismatch.' % run_id, - 'Try re-running the entire matrix.\n\n%s' % error_summary) - - -def fail(message, output): # type: (str, str) -> NoReturn - # Include a leading newline to improve readability on Shippable "Tests" tab. - # Without this, the first line becomes indented. - output = '\n' + output.strip() - - timestamp = datetime.datetime.utcnow().replace(microsecond=0).isoformat() - - # hack to avoid requiring junit-xml, which isn't pre-installed on Shippable outside our test containers - xml = ''' - - -\t -\t\t -\t\t\t%s -\t\t -\t - -''' % (timestamp, message, output) - - path = 'shippable/testresults/check-matrix.xml' - dir_path = os.path.dirname(path) - - if not os.path.exists(dir_path): - os.makedirs(dir_path) - - with open(path, 'w') as junit_fd: - junit_fd.write(xml.lstrip()) - - sys.stderr.write(message + '\n') - sys.stderr.write(output + '\n') - - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/tests/utils/shippable/timing.py b/tests/utils/shippable/timing.py deleted file mode 100755 index fb538271b8..0000000000 --- a/tests/utils/shippable/timing.py +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env python3.7 -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import sys -import time - -start = time.time() - -sys.stdin.reconfigure(errors='surrogateescape') -sys.stdout.reconfigure(errors='surrogateescape') - -for line in sys.stdin: - seconds = time.time() - start - sys.stdout.write('%02d:%02d %s' % (seconds // 60, seconds % 60, line)) - sys.stdout.flush() diff --git a/tests/utils/shippable/timing.sh b/tests/utils/shippable/timing.sh deleted file mode 100755 index 77e2578304..0000000000 --- a/tests/utils/shippable/timing.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env bash - -set -o pipefail -eu - -"$@" 2>&1 | "$(dirname "$0")/timing.py" From a55c96d5c14661be3257cf3b465cbf8b301435eb Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 14 Jun 2021 07:25:46 +0200 Subject: [PATCH 0373/3093] Make extra sanity test runner produce ansibullbot and JUnit output. (#2794) --- tests/utils/shippable/sanity.sh | 2 +- tests/utils/shippable/shippable.sh | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/utils/shippable/sanity.sh b/tests/utils/shippable/sanity.sh index 187105409c..eacbd81609 100755 --- a/tests/utils/shippable/sanity.sh +++ b/tests/utils/shippable/sanity.sh @@ -14,7 +14,7 @@ else fi if [ "${group}" == "extra" ]; then - ../internal_test_tools/tools/run.py --color + ../internal_test_tools/tools/run.py --color --bot --junit exit fi diff --git a/tests/utils/shippable/shippable.sh b/tests/utils/shippable/shippable.sh index f70aa11380..472bfca1ca 100755 --- a/tests/utils/shippable/shippable.sh +++ b/tests/utils/shippable/shippable.sh @@ -73,6 +73,10 @@ else export ANSIBLE_COLLECTIONS_PATHS="${PWD}/../../../" fi +if [ "${test}" == "sanity/extra" ]; then + retry pip install junit-xml --disable-pip-version-check +fi + # START: HACK install dependencies if [ "${script}" != "sanity" ] || [ "${test}" == "sanity/extra" ]; then # Nothing further should be added to this list. From 0bd345bfb04d0a98e5f53e979234502d2c4c495b Mon Sep 17 00:00:00 2001 From: Abhijeet Kasurde Date: Mon, 14 Jun 2021 21:52:01 +0530 Subject: [PATCH 0374/3093] timezone: change warning to debug (#2789) * timezone: change warning to debug Convert warning message to debug when timedatectl found but not usable. Fixes: #1942 Signed-off-by: Abhijeet Kasurde * add changelog entry Signed-off-by: Abhijeet Kasurde --- changelogs/fragments/1942_timezone.yml | 3 +++ plugins/modules/system/timezone.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/1942_timezone.yml diff --git a/changelogs/fragments/1942_timezone.yml b/changelogs/fragments/1942_timezone.yml new file mode 100644 index 0000000000..349c263298 --- /dev/null +++ b/changelogs/fragments/1942_timezone.yml @@ -0,0 +1,3 @@ +--- +minor_changes: +- timezone - print error message to debug instead of warning when timedatectl fails (https://github.com/ansible-collections/community.general/issues/1942). diff --git a/plugins/modules/system/timezone.py b/plugins/modules/system/timezone.py index 3cb7601441..27dfc9a98d 100644 --- a/plugins/modules/system/timezone.py +++ b/plugins/modules/system/timezone.py @@ -107,7 +107,7 @@ class Timezone(object): if rc == 0: return super(Timezone, SystemdTimezone).__new__(SystemdTimezone) else: - module.warn('timedatectl command was found but not usable: %s. using other method.' % stderr) + module.debug('timedatectl command was found but not usable: %s. using other method.' % stderr) return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone) else: return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone) From bccf317814f959c23d5b0a61b4c65afb3ab55310 Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Mon, 14 Jun 2021 21:38:28 +0300 Subject: [PATCH 0375/3093] BOTMETA.yml: supershipit to quidame (#2801) --- .github/BOTMETA.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index d9f99c60dc..199a2f2c3c 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1,5 +1,7 @@ automerge: true files: + plugins/: + supershipit: quidame changelogs/fragments/: support: community $actions: From b1b34ee12ea2d404232578a63336e02d38fa6e11 Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Tue, 15 Jun 2021 16:11:48 +0300 Subject: [PATCH 0376/3093] BOTMETA.yml: grant supershipit (#2807) --- .github/BOTMETA.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 199a2f2c3c..8df7297720 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1,7 +1,7 @@ automerge: true files: plugins/: - supershipit: quidame + supershipit: quidame Ajpantuso changelogs/fragments/: support: community $actions: From adf50b106aa956ed1e0a9481965c9dc22a46993a Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Tue, 15 Jun 2021 21:49:18 +0430 Subject: [PATCH 0377/3093] Add Test PRs locally section to CONTRIBUTING.md (#2738) * Add Test PRs locally section to CONTRIBUTING.md * fix formatting Co-authored-by: Felix Fontein * Adjust PR now that ansible-collections/community-docs#16 has been merged * improve sentence Co-authored-by: Andrew Klychkov Co-authored-by: Felix Fontein Co-authored-by: Andrew Klychkov --- CONTRIBUTING.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 959d363236..5a068f9414 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -29,4 +29,8 @@ Also, consider taking up a valuable, reviewed, but abandoned pull request which You can also read [our Quick-start development guide](https://github.com/ansible/community-docs/blob/main/create_pr_quick_start_guide.rst). +## Test pull requests + +If you want to test a PR locally, refer to [our testing guide](https://github.com/ansible/community-docs/blob/main/test_pr_quick_start_guide.rst) for instructions on how do it quickly. + If you find any inconsistencies or places in this document which can be improved, feel free to raise an issue or pull request to fix it. From 2f2f384b4e7f9455631f9143dcb2c5f76817ed67 Mon Sep 17 00:00:00 2001 From: Tong He <68936428+unnecessary-username@users.noreply.github.com> Date: Wed, 16 Jun 2021 03:01:54 -0400 Subject: [PATCH 0378/3093] redhat_subscription: Add server_prefix and server_port as supported arguments (#2779) * Add server_prefix and server_port as supported arguments for the redhat_subscription module. * Adjust the argument sequence in the test case to be consistent with the original code in line 364 in redhat_subscription.py and add the changelog fragment. * Grammatical changes such as adding full stops and using 'an HTTP' instead of 'a HTTP'. * Commit the suggested changelog update. Co-authored-by: Amin Vakil * Fix typo. Co-authored-by: Amin Vakil Co-authored-by: Amin Vakil --- ...tion-add_server_prefix_and_server_port.yml | 2 + .../packaging/os/redhat_subscription.py | 20 +++++++-- .../packaging/os/test_redhat_subscription.py | 41 +++++++++++++++++++ 3 files changed, 60 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/2779_redhat_subscription-add_server_prefix_and_server_port.yml diff --git a/changelogs/fragments/2779_redhat_subscription-add_server_prefix_and_server_port.yml b/changelogs/fragments/2779_redhat_subscription-add_server_prefix_and_server_port.yml new file mode 100644 index 0000000000..d484874ee9 --- /dev/null +++ b/changelogs/fragments/2779_redhat_subscription-add_server_prefix_and_server_port.yml @@ -0,0 +1,2 @@ +minor_changes: + - redhat_subscription - add ``server_prefix`` and ``server_port`` parameters (https://github.com/ansible-collections/community.general/pull/2779). diff --git a/plugins/modules/packaging/os/redhat_subscription.py b/plugins/modules/packaging/os/redhat_subscription.py index b62a7f391c..c8b5e991a0 100644 --- a/plugins/modules/packaging/os/redhat_subscription.py +++ b/plugins/modules/packaging/os/redhat_subscription.py @@ -32,7 +32,7 @@ options: type: str username: description: - - access.redhat.com or Sat6 username + - access.redhat.com or Sat6 username type: str password: description: @@ -46,6 +46,16 @@ options: description: - Enable or disable https server certificate verification when connecting to C(server_hostname) type: str + server_prefix: + description: + - Specify the prefix when registering to the Red Hat Subscription Management or Sat6 server. + type: str + version_added: 3.3.0 + server_port: + description: + - Specify the port when registering to the Red Hat Subscription Management or Sat6 server. + type: str + version_added: 3.3.0 rhsm_baseurl: description: - Specify CDN baseurl @@ -56,11 +66,11 @@ options: type: str server_proxy_hostname: description: - - Specify a HTTP proxy hostname + - Specify an HTTP proxy hostname. type: str server_proxy_port: description: - - Specify a HTTP proxy port + - Specify an HTTP proxy port. type: str server_proxy_user: description: @@ -782,6 +792,8 @@ def main(): 'password': {'no_log': True}, 'server_hostname': {}, 'server_insecure': {}, + 'server_prefix': {}, + 'server_port': {}, 'rhsm_baseurl': {}, 'rhsm_repo_ca_cert': {}, 'auto_attach': {'aliases': ['autosubscribe'], 'type': 'bool'}, @@ -827,6 +839,8 @@ def main(): password = module.params['password'] server_hostname = module.params['server_hostname'] server_insecure = module.params['server_insecure'] + server_prefix = module.params['server_prefix'] + server_port = module.params['server_port'] rhsm_baseurl = module.params['rhsm_baseurl'] rhsm_repo_ca_cert = module.params['rhsm_repo_ca_cert'] auto_attach = module.params['auto_attach'] diff --git a/tests/unit/plugins/modules/packaging/os/test_redhat_subscription.py b/tests/unit/plugins/modules/packaging/os/test_redhat_subscription.py index ef6f28b812..7f430ee72c 100644 --- a/tests/unit/plugins/modules/packaging/os/test_redhat_subscription.py +++ b/tests/unit/plugins/modules/packaging/os/test_redhat_subscription.py @@ -258,6 +258,47 @@ TEST_CASES = [ 'msg': "System successfully registered to 'None'." } ], + # Test of registration with arguments that are not part of register options but needs to be configured + [ + { + 'state': 'present', + 'username': 'admin', + 'password': 'admin', + 'org_id': 'admin', + 'force_register': 'true', + 'server_prefix': '/rhsm', + 'server_port': '443' + }, + { + 'id': 'test_arguments_not_in_register_options', + 'run_command.calls': [ + ( + ['/testbin/subscription-manager', 'identity'], + {'check_rc': False}, + (0, 'This system already registered.', '') + ), + ( + ['/testbin/subscription-manager', 'config', + '--server.port=443', + '--server.prefix=/rhsm' + ], + {'check_rc': True}, + (0, '', '') + ), + ( + ['/testbin/subscription-manager', 'register', + '--force', + '--org', 'admin', + '--username', 'admin', + '--password', 'admin'], + {'check_rc': True, 'expand_user_and_vars': False}, + (0, '', '') + ) + ], + 'changed': True, + 'msg': "System successfully registered to 'None'." + } + ], # Test of registration using username, password and proxy options [ { From 3ca98c2edd2cd878b635b347c44b7de30b3522b7 Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Wed, 16 Jun 2021 13:58:09 -0400 Subject: [PATCH 0379/3093] callback_splunk - Add user-configurable event correlation id (#2790) * Initial commit * Adding changelog fragment * Updating batch description * Update plugins/callback/splunk.py Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../2790-callback_splunk-batch-option.yml | 3 +++ plugins/callback/splunk.py | 22 ++++++++++++++++++- tests/unit/plugins/callback/test_splunk.py | 10 +++++++-- 3 files changed, 32 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/2790-callback_splunk-batch-option.yml diff --git a/changelogs/fragments/2790-callback_splunk-batch-option.yml b/changelogs/fragments/2790-callback_splunk-batch-option.yml new file mode 100644 index 0000000000..70ee61ed64 --- /dev/null +++ b/changelogs/fragments/2790-callback_splunk-batch-option.yml @@ -0,0 +1,3 @@ +--- +minor_changes: + - splunk callback plugin - add ``batch`` option for user-configurable correlation ID's (https://github.com/ansible-collections/community.general/issues/2790). diff --git a/plugins/callback/splunk.py b/plugins/callback/splunk.py index f782161765..cb63d3b23f 100644 --- a/plugins/callback/splunk.py +++ b/plugins/callback/splunk.py @@ -68,6 +68,16 @@ DOCUMENTATION = ''' type: bool default: false version_added: 2.0.0 + batch: + description: + - Correlation ID which can be set across multiple playbook executions. + env: + - name: SPLUNK_BATCH + ini: + - section: callback_splunk + key: batch + type: str + version_added: 3.3.0 ''' EXAMPLES = ''' @@ -107,7 +117,7 @@ class SplunkHTTPCollectorSource(object): self.ip_address = socket.gethostbyname(socket.gethostname()) self.user = getpass.getuser() - def send_event(self, url, authtoken, validate_certs, include_milliseconds, state, result, runtime): + def send_event(self, url, authtoken, validate_certs, include_milliseconds, batch, state, result, runtime): if result._task_fields['args'].get('_ansible_check_mode') is True: self.ansible_check_mode = True @@ -126,6 +136,8 @@ class SplunkHTTPCollectorSource(object): data = {} data['uuid'] = result._task._uuid data['session'] = self.session + if batch is not None: + data['batch'] = batch data['status'] = state if include_milliseconds: @@ -175,6 +187,7 @@ class CallbackModule(CallbackBase): self.authtoken = None self.validate_certs = None self.include_milliseconds = None + self.batch = None self.splunk = SplunkHTTPCollectorSource() def _runtime(self, result): @@ -212,6 +225,8 @@ class CallbackModule(CallbackBase): self.include_milliseconds = self.get_option('include_milliseconds') + self.batch = self.get_option('batch') + def v2_playbook_on_start(self, playbook): self.splunk.ansible_playbook = basename(playbook._file_name) @@ -227,6 +242,7 @@ class CallbackModule(CallbackBase): self.authtoken, self.validate_certs, self.include_milliseconds, + self.batch, 'OK', result, self._runtime(result) @@ -238,6 +254,7 @@ class CallbackModule(CallbackBase): self.authtoken, self.validate_certs, self.include_milliseconds, + self.batch, 'SKIPPED', result, self._runtime(result) @@ -249,6 +266,7 @@ class CallbackModule(CallbackBase): self.authtoken, self.validate_certs, self.include_milliseconds, + self.batch, 'FAILED', result, self._runtime(result) @@ -260,6 +278,7 @@ class CallbackModule(CallbackBase): self.authtoken, self.validate_certs, self.include_milliseconds, + self.batch, 'FAILED', result, self._runtime(result) @@ -271,6 +290,7 @@ class CallbackModule(CallbackBase): self.authtoken, self.validate_certs, self.include_milliseconds, + self.batch, 'UNREACHABLE', result, self._runtime(result) diff --git a/tests/unit/plugins/callback/test_splunk.py b/tests/unit/plugins/callback/test_splunk.py index df4db38d56..3230228da1 100644 --- a/tests/unit/plugins/callback/test_splunk.py +++ b/tests/unit/plugins/callback/test_splunk.py @@ -43,7 +43,10 @@ class TestSplunkClient(unittest.TestCase): mock_datetime.utcnow.return_value = datetime(2020, 12, 1) result = TaskResult(host=self.mock_host, task=self.mock_task, return_data={}, task_fields=self.task_fields) - self.splunk.send_event(url='endpoint', authtoken='token', validate_certs=False, include_milliseconds=True, state='OK', result=result, runtime=100) + self.splunk.send_event( + url='endpoint', authtoken='token', validate_certs=False, include_milliseconds=True, + batch="abcefghi-1234-5678-9012-abcdefghijkl", state='OK', result=result, runtime=100 + ) args, kwargs = open_url_mock.call_args sent_data = json.loads(args[1]) @@ -58,7 +61,10 @@ class TestSplunkClient(unittest.TestCase): mock_datetime.utcnow.return_value = datetime(2020, 12, 1) result = TaskResult(host=self.mock_host, task=self.mock_task, return_data={}, task_fields=self.task_fields) - self.splunk.send_event(url='endpoint', authtoken='token', validate_certs=False, include_milliseconds=False, state='OK', result=result, runtime=100) + self.splunk.send_event( + url='endpoint', authtoken='token', validate_certs=False, include_milliseconds=False, + batch="abcefghi-1234-5678-9012-abcdefghijkl", state='OK', result=result, runtime=100 + ) args, kwargs = open_url_mock.call_args sent_data = json.loads(args[1]) From 4a47d121aa135f246b0ec6c0072f5eaaa532cad5 Mon Sep 17 00:00:00 2001 From: Abhijeet Kasurde Date: Thu, 17 Jun 2021 00:53:54 +0530 Subject: [PATCH 0380/3093] pamd: Add a note in docs about authselect profiles (#2815) pamd module does not handle or modify authselect profiles which are basically template files for authselect. The autheselect generates pam.d files from these profiles. Fixes: #1954 Signed-off-by: Abhijeet Kasurde --- plugins/modules/system/pamd.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/plugins/modules/system/pamd.py b/plugins/modules/system/pamd.py index 45f0082693..39b3f32e44 100644 --- a/plugins/modules/system/pamd.py +++ b/plugins/modules/system/pamd.py @@ -16,7 +16,9 @@ short_description: Manage PAM Modules description: - Edit PAM service's type, control, module path and module arguments. - In order for a PAM rule to be modified, the type, control and - module_path must match an existing rule. See man(5) pam.d for details. + module_path must match an existing rule. See man(5) pam.d for details. +notes: + - This module does not handle authselect profiles. options: name: description: From 3997d5fcc8a548e96ca1d6f2a6bddf7f8b8fd655 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 17 Jun 2021 18:00:49 +1200 Subject: [PATCH 0381/3093] flatpak - allow to add/remove multiple flatpaks at once (#2521) * reviving flatpack PR * added changelog fragment * adjusted integration tests per PR * adjusted examples to use the full name of the module * Use new local artifacts. * Re-add StrictVersion import. * Try to clean up PR. * ... * Use original name in installed/not installed list. * More fixes. * Work around flatpak bug. * Fix bug I introduced. Co-authored-by: Felix Fontein --- changelogs/fragments/2521-flatpak-list.yml | 2 + plugins/modules/packaging/os/flatpak.py | 107 ++++++++----- .../targets/flatpak/tasks/setup.yml | 4 +- .../targets/flatpak/tasks/test.yml | 143 ++++++++++++++++++ .../setup_flatpak_remote/create-repo.sh | 2 +- .../setup_flatpak_remote/files/repo.tar.xz | Bin 6436 -> 7352 bytes 6 files changed, 220 insertions(+), 38 deletions(-) create mode 100644 changelogs/fragments/2521-flatpak-list.yml diff --git a/changelogs/fragments/2521-flatpak-list.yml b/changelogs/fragments/2521-flatpak-list.yml new file mode 100644 index 0000000000..e30607b306 --- /dev/null +++ b/changelogs/fragments/2521-flatpak-list.yml @@ -0,0 +1,2 @@ +minor_changes: +- flatpak - allows installing or uninstalling a list of packages (https://github.com/ansible-collections/community.general/pull/2521). diff --git a/plugins/modules/packaging/os/flatpak.py b/plugins/modules/packaging/os/flatpak.py index 4a9e214fde..7f3963ad3e 100644 --- a/plugins/modules/packaging/os/flatpak.py +++ b/plugins/modules/packaging/os/flatpak.py @@ -38,7 +38,8 @@ options: default: system name: description: - - The name of the flatpak to manage. + - The name of the flatpak to manage. To operate on several packages this + can accept a list of packages. - When used with I(state=present), I(name) can be specified as a URL to a C(flatpakref) file or the unique reverse DNS name that identifies a flatpak. - Both C(https://) and C(http://) URLs are supported. @@ -50,7 +51,8 @@ options: installed flatpak based on the name of the flatpakref to remove it. However, there is no guarantee that the names of the flatpakref file and the reverse DNS name of the installed flatpak do match. - type: str + type: list + elements: str required: true no_dependencies: description: @@ -101,10 +103,25 @@ EXAMPLES = r''' state: present remote: gnome +- name: Install multiple packages + community.general.flatpak: + name: + - org.gimp.GIMP + - org.inkscape.Inkscape + - org.mozilla.firefox + - name: Remove the gedit flatpak community.general.flatpak: name: org.gnome.gedit state: absent + +- name: Remove multiple packages + community.general.flatpak: + name: + - org.gimp.GIMP + - org.inkscape.Inkscape + - org.mozilla.firefox + state: absent ''' RETURN = r''' @@ -143,47 +160,64 @@ from ansible.module_utils.basic import AnsibleModule OUTDATED_FLATPAK_VERSION_ERROR_MESSAGE = "Unknown option --columns=application" -def install_flat(module, binary, remote, name, method, no_dependencies): - """Add a new flatpak.""" +def install_flat(module, binary, remote, names, method, no_dependencies): + """Add new flatpaks.""" global result + uri_names = [] + id_names = [] + for name in names: + if name.startswith('http://') or name.startswith('https://'): + uri_names.append(name) + else: + id_names.append(name) + base_command = [binary, "install", "--{0}".format(method)] + flatpak_version = _flatpak_version(module, binary) + if StrictVersion(flatpak_version) < StrictVersion('1.1.3'): + base_command += ["-y"] + else: + base_command += ["--noninteractive"] + if no_dependencies: + base_command += ["--no-deps"] + if uri_names: + command = base_command + uri_names + _flatpak_command(module, module.check_mode, command) + if id_names: + command = base_command + [remote] + id_names + _flatpak_command(module, module.check_mode, command) + result['changed'] = True + + +def uninstall_flat(module, binary, names, method): + """Remove existing flatpaks.""" + global result + installed_flat_names = [ + _match_installed_flat_name(module, binary, name, method) + for name in names + ] + command = [binary, "uninstall"] flatpak_version = _flatpak_version(module, binary) - command = [binary, "install", "--{0}".format(method)] if StrictVersion(flatpak_version) < StrictVersion('1.1.3'): command += ["-y"] else: command += ["--noninteractive"] - if no_dependencies: - command += ["--no-deps"] - if name.startswith('http://') or name.startswith('https://'): - command += [name] - else: - command += [remote, name] + command += ["--{0}".format(method)] + installed_flat_names _flatpak_command(module, module.check_mode, command) result['changed'] = True -def uninstall_flat(module, binary, name, method): - """Remove an existing flatpak.""" - global result - flatpak_version = _flatpak_version(module, binary) - if StrictVersion(flatpak_version) < StrictVersion('1.1.3'): - noninteractive_arg = "-y" - else: - noninteractive_arg = "--noninteractive" - installed_flat_name = _match_installed_flat_name(module, binary, name, method) - command = [binary, "uninstall", "--{0}".format(method), noninteractive_arg, name] - _flatpak_command(module, module.check_mode, command) - result['changed'] = True - - -def flatpak_exists(module, binary, name, method): - """Check if the flatpak is installed.""" +def flatpak_exists(module, binary, names, method): + """Check if the flatpaks are installed.""" command = [binary, "list", "--{0}".format(method), "--app"] output = _flatpak_command(module, False, command) - name = _parse_flatpak_name(name).lower() - if name in output.lower(): - return True - return False + installed = [] + not_installed = [] + for name in names: + parsed_name = _parse_flatpak_name(name).lower() + if parsed_name in output.lower(): + installed.append(name) + else: + not_installed.append(name) + return installed, not_installed def _match_installed_flat_name(module, binary, name, method): @@ -266,7 +300,7 @@ def main(): # This module supports check mode module = AnsibleModule( argument_spec=dict( - name=dict(type='str', required=True), + name=dict(type='list', elements='str', required=True), remote=dict(type='str', default='flathub'), method=dict(type='str', default='system', choices=['user', 'system']), @@ -295,10 +329,11 @@ def main(): if not binary: module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result) - if state == 'present' and not flatpak_exists(module, binary, name, method): - install_flat(module, binary, remote, name, method, no_dependencies) - elif state == 'absent' and flatpak_exists(module, binary, name, method): - uninstall_flat(module, binary, name, method) + installed, not_installed = flatpak_exists(module, binary, name, method) + if state == 'present' and not_installed: + install_flat(module, binary, remote, not_installed, method, no_dependencies) + elif state == 'absent' and installed: + uninstall_flat(module, binary, installed, method) module.exit_json(**result) diff --git a/tests/integration/targets/flatpak/tasks/setup.yml b/tests/integration/targets/flatpak/tasks/setup.yml index 98b07cd480..8fc0a23566 100644 --- a/tests/integration/targets/flatpak/tasks/setup.yml +++ b/tests/integration/targets/flatpak/tasks/setup.yml @@ -36,7 +36,9 @@ - name: Remove (if necessary) flatpak for testing check mode on absent flatpak flatpak: - name: com.dummy.App1 + name: + - com.dummy.App1 + - com.dummy.App3 remote: dummy-remote state: absent no_dependencies: true diff --git a/tests/integration/targets/flatpak/tasks/test.yml b/tests/integration/targets/flatpak/tasks/test.yml index 7442e4b468..e1bfdbee09 100644 --- a/tests/integration/targets/flatpak/tasks/test.yml +++ b/tests/integration/targets/flatpak/tasks/test.yml @@ -139,3 +139,146 @@ that: - double_url_removal_result is not changed msg: "state=absent with url as name shall not do anything when flatpak is not present" + +- name: Make sure flatpak is really gone - {{ method }} + flatpak: + name: com.dummy.App1 + state: absent + method: "{{ method }}" + no_dependencies: true + +# state=present with list of packages + +- name: Test addition with list - {{ method }} + flatpak: + name: + - com.dummy.App1 + - http://127.0.0.1:8000/repo/com.dummy.App2.flatpakref + remote: dummy-remote + state: present + method: "{{ method }}" + no_dependencies: true + register: addition_result + +- name: Verify addition with list test result - {{ method }} + assert: + that: + - addition_result is changed + msg: "state=present shall add flatpak when absent" + +- name: Test idempotency of addition with list - {{ method }} + flatpak: + name: + - com.dummy.App1 + - http://127.0.0.1:8000/repo/com.dummy.App2.flatpakref + remote: dummy-remote + state: present + method: "{{ method }}" + no_dependencies: true + register: double_addition_result + +- name: Verify idempotency of addition with list test result - {{ method }} + assert: + that: + - double_addition_result is not changed + msg: "state=present shall not do anything when flatpak is already present" + +- name: Test addition with list partially installed - {{ method }} + flatpak: + name: + - com.dummy.App1 + - http://127.0.0.1:8000/repo/com.dummy.App2.flatpakref + - com.dummy.App3 + remote: dummy-remote + state: present + method: "{{ method }}" + no_dependencies: true + register: addition_result + +- name: Verify addition with list partially installed test result - {{ method }} + assert: + that: + - addition_result is changed + msg: "state=present shall add flatpak when absent" + +- name: Test idempotency of addition with list partially installed - {{ method }} + flatpak: + name: + - com.dummy.App1 + - http://127.0.0.1:8000/repo/com.dummy.App2.flatpakref + - com.dummy.App3 + remote: dummy-remote + state: present + method: "{{ method }}" + no_dependencies: true + register: double_addition_result + +- name: Verify idempotency of addition with list partially installed test result - {{ method }} + assert: + that: + - double_addition_result is not changed + msg: "state=present shall not do anything when flatpak is already present" + +# state=absent with list of packages + +- name: Test removal with list - {{ method }} + flatpak: + name: + - com.dummy.App1 + - com.dummy.App2 + state: absent + method: "{{ method }}" + register: removal_result + +- name: Verify removal with list test result - {{ method }} + assert: + that: + - removal_result is changed + msg: "state=absent shall remove flatpak when present" + +- name: Test idempotency of removal with list - {{ method }} + flatpak: + name: + - com.dummy.App1 + - com.dummy.App2 + state: absent + method: "{{ method }}" + register: double_removal_result + +- name: Verify idempotency of removal with list test result - {{ method }} + assert: + that: + - double_removal_result is not changed + msg: "state=absent shall not do anything when flatpak is not present" + +- name: Test removal with list partially removed - {{ method }} + flatpak: + name: + - com.dummy.App1 + - com.dummy.App2 + - com.dummy.App3 + state: absent + method: "{{ method }}" + register: removal_result + +- name: Verify removal with list partially removed test result - {{ method }} + assert: + that: + - removal_result is changed + msg: "state=absent shall remove flatpak when present" + +- name: Test idempotency of removal with list partially removed - {{ method }} + flatpak: + name: + - com.dummy.App1 + - com.dummy.App2 + - com.dummy.App3 + state: absent + method: "{{ method }}" + register: double_removal_result + +- name: Verify idempotency of removal with list partially removed test result - {{ method }} + assert: + that: + - double_removal_result is not changed + msg: "state=absent shall not do anything when flatpak is not present" diff --git a/tests/integration/targets/setup_flatpak_remote/create-repo.sh b/tests/integration/targets/setup_flatpak_remote/create-repo.sh index 4ece76ccfc..3f44fe96f2 100755 --- a/tests/integration/targets/setup_flatpak_remote/create-repo.sh +++ b/tests/integration/targets/setup_flatpak_remote/create-repo.sh @@ -18,7 +18,7 @@ flatpak install -y --system flathub org.freedesktop.Platform//1.6 org.freedeskto # Add individual flatpaks echo $'#!/bin/sh\necho hello world' > hello.sh -for NUM in 1 2; do +for NUM in 1 2 3; do flatpak build-init appdir${NUM} com.dummy.App${NUM} org.freedesktop.Sdk org.freedesktop.Platform 1.6; flatpak build appdir${NUM} mkdir /app/bin; flatpak build appdir${NUM} install --mode=750 hello.sh /app/bin; diff --git a/tests/integration/targets/setup_flatpak_remote/files/repo.tar.xz b/tests/integration/targets/setup_flatpak_remote/files/repo.tar.xz index bed20ff713f57730bc667b75d72a8560df22b636..609acaad7ba44e1b9b836ce80e528b9c5cab9a2c 100644 GIT binary patch literal 7352 zcmV;p97p5*H+ooF000E$*0e?f03iVu0001VFXf}*Xa5{_T>v>5N@un}Hi1L#y~)Z- z{84!W3f)Esb7jq5>g{oDum66E2a4MLZkl)>lqx^in)Ih^T8CUfhB1~dt?%8H8XP^b z9h}A(U~MNWX1tu~C*!)MKp3W*&!WAT31)mB@{V*6JF>PF)YRv6O>LqTQv3!Z!el2{ zaQkzYAXd|8_KjmqEG}{O=#c`t67-10;#q~d!y&%ubUW3y|2=?y&56W_MAv|yDz;Rd z^%k|Lh4klnN^;MO=Po#xICgfHurk}B)Lf6}XPjtoXwS>Va1iDxkoR>a6dFJ#zBWRZ(R8 zP;Aq?st;i6a8HF5&}{*B52sEo1Eu=TKeOGJwO$u*ICnD7y>I<|%6NLMp+{FTvMW$D zwWISRfH54$H3pm6!B$OSVYDigbV3bWcbe&>-y~3+K^RQl(D0H@N-z&czE;DX$*cS@ z^{IqzR8itz-ipygilAjij!S2`joYHBL$@YZv~YNqHGiYehh0`Rhi1kWeGs#Sjo_Sm z&JM#^1fh}nmtI%TRc`fL;$HOFFo^^XaLf_tG6>-0?9_V?M3quPrV;wd<@TmBsS>w3 ztO=oa&B{1E-0V^OXG?y{&=M%sz8kxmJL`*<1mA;dCrCY6mvkKy1P^udVdYMI_p~GI zFNkn-SZh7USmfN~LvytE)l~u|%ZrX#!F-iUcb8WD&HD`)6C`0=XMlo6t%~az0)X+Jn_l zR+7HyiTq#;OEHk!$oCX{Gyu;-2_e|j0`0OKHvc!$USU^}Dzp$!Qu=jC1I(<$TLG5m zr``M+6Y`1Y^`NrND-%wxp|Ll*?m3#zh&~IfKelv}oNm-Dq6F2Wkz0(z=j;tLhLffL z!%bD-JOZz8;KKbH(ylcvA@Vg}XR$xLK)L155Vvn|gW{5E@KT{qW5|G?iOLvmz8-?( zRSV{0BF$1fAf_amI2uL;hw$yT#z(>1*Q4}$tnv}YwonwogkEK+`hIQFEC7W5+IC4D zj#oD4+bW)ZAY>fGHzMf-MR_FReC8CRJ5*u!rEG6b&3UXqbq#|ujRi21P1X7W$M zt#|5Y0?&Uhj;=fRYb7Q{EPVt|r#Hgc_xat1nYavN!tUiYJB&XjzlY#mku=y;Ns}Y% z8H`zm3cHW)E{P<0gTW+(h+c$79;i9Kl`7%AZPpeL8Vu;V?X9_;5l7kxRikoBoKIc1 zyT%CI^BLHFFEp?!giDr zWP!ua64y_HS02||S{&Y28hV$Ugm>P3rw0;tvT28JdIpON8YlNhSPFS5muY<>%t(O3 z&@3Ku`!_gna+Tp|xG%zW9HWTz@YVMi-IfI4MSk-U%Fl=Q8iS0C#q@ot_w)TZp3B)L z?bGB@Y%?n_Ert!~$<7#dAxY=VAVhEzQjnQjJmM%PnN2xgwJdnmgt)h*qvLPaYlSb| z$M89;(bo~;@^duZ9^h(eZ0>XL=+R3O)7FzxcD`k?Q1&YQc>bB*yI^ zm2=|p$h@I^LRDM zG}Y`F(MBjtbuvLz?E?^ZzVo_52!@K`pz-qPc~37H^c;BoF23%>AnqJuK<6 zs2i(<=?QkvLPb|?Y2i%KSbD~}45DwwSfpVtrrV3vfJ6?b+qOklP}`yJ3=}RiwLOa% z>G$sfhLqkCg*2cbV1hTux0p52CKuY5hYB+pDzTrOM2qws(-DyDMIISPt1bV6s5lEQ z;rZUFjm9M+#mtuir0*6Sf4-?xH*8y`vP@r2dtZT=<*;qdZDtFu)3diJSkrv!L<^uqdi7|_6o%p4tG zXJh+2oqVjLQ$5T%3KzzGo37vQoUD4wL>K;QmXDSTrpn3Y=-y?-pOwU$tEbF{gV05I zR1Wj@*^O@V!^O_=XD8ac#O%u^t66|v{4-VY_I*Tlx{J~zeDq;ku~GP%M+q;JozdiJ zN^oFM{^#Sg1Cc`}*A&u;_CasTqlvpdyKLyI5DziPsHlm=o5b4u_;)Vn01!U@wj8p5 zW0iL8<|_xDPb{dc&C-^NP$Fz^mQe$qmd^&ALohcR@!@)sv@ii*FJx{s$Lk8qhA0n= zD}X3&{Sf-^Lnp`|S)urYM0dB6dQqN`JmaKxf2Z2e^@06}?Sp^2Iz$&gcMxfu(1;B* zz%`7yYcbZoQ7UndwM++qqM}8Z18C{NtayqFhpC4+(99?&XAGkuuXgk4z)5L+4T$UQU`4GNZT7Cm)1#hn_AH(_>z zR1A7gV2kgO!u#=U!xmeXSwke8qBK|!1R%Oz@utXGg^$^kbTE1EQ`id2OTX|M-#=t& z54ZB2fnpN&*~~^aU()sUefbr29Yar2F`8}z<5}6^VGNu;nI9;t zyG#QCL09o%6&}<0M@Rj9YU1y2ftoFyC)Z-jUsqM~+(dBt-yXA5JOnxp>3*bvEp%xl zfnv4?)?64Wb7S>hD{MXU@^5dDjfoEN;5V-n%6cio@P(q-PDxdbT_uzmGGe-(Dn5dz zHp-t@54vP=l&fv4CuJw}1E!0(wtfSpUKdWJ1@DPK3TedNk>p3*HuO=Y9RBTXX}_#X z3y{-CQL+eueNusB%niu{aN@yhH;k*1BQpxJ+$1}LfoN5&oWI{GHK4R(+!6@};p{M> zRk1Lg*fusH4$e%Eud?U78%zNn#|HxGvihEY9ZL%I2R#xy52=V$61B%knsM%jX@|XP z&`oLe$}5=Fwsqre)Cbu=+b9mW6H^ob_9j_#l)bE3%_Ca_IT5y@Gasf#HZ(2D*QbDr zL@q2NM~_YL(uq>QSd%i{Ive^$^{&+;CePN2)|(8M)ar136t7d^5Qh#-?0}Q*=RpB{ zm8w{{6wa@u|LjJooulS{$1a`;QYhY!fVmJNq&|ajGv||qSF4-F+%#XSvVn*P{8tX3 zZdxD3I98d#+1|zMe1TJ=1q>GN=ahFOyJLvZLwk7t!6i{ohTu!io)pFb+>$a;f%aS$ z2Xgp+=yK7=K56D|j8LDWj#OPS@i-oEm}IqFd6^tCumMDRoQ0co=pTG@BmXh{-K11} zjFtyrVNh)e;ip{HBTmb@y1My4g+=p3l{q`&|H>wrc#mM>f~%Y>oaDP4P(M8NmAPwI z0#3@TgP63QQ|tI~E5$q7XN22qkAYec!tVmA#*XR)HTYV>y?Y)?Mtk+_S^ZU%$8}GX z=@{b*lPRj#*p!v6gDPze2v2d)KKztATB1sjOr1R_ z_2OHQ^=SgojOGQunM3P1uuK{9lK{i$q=O_y{c8_F3Yq0~Gx^hR@?A-k_Hh1&b8Cmy zx6zqp zV+q&<&tIV&;;6CMOl*+Nj34MJj5F-;CO~B--HiD43tSUm`?P-=MFNDZMM+Uq;CgPP zkn6}R@Cs$huSnNOhC<;0-DexjSz4Cv`Ub0kOkkNs_e;ui{Wuh4u^Yv z2ui{Y6wT@s7)D{Cw?M1GgIK3%U_nqWdSLFZn49q+FlSl*1%zY(OE3s`UdA;s0ai;oW(%q6o zb(VE#CuPo^JQuJB4N9sxTjgpgT}kmukm*XX{;rHSH4^*+?ob_ianb88N{;8$idHql z4D#|3=$E>zVY-W3UFde4&g|a+(oKgG&5_sE&zlIT-${^k-J4CXTE$Pd9IJ+H@~Ffr ztYhPv@n=3gNjFjdR`@~X8IZyym z>sd_Q?4~47xja`V7)l}m#XGQ<^CE_1I95bp53}e&pucRM?RgQ)?oqjwk zYPKMT8j}~4lS;ML4n;OcGtt`ivImCTfg3E1m>8nL)M(DzU_*w^!S|L2wsA2R_3|;e zrh+x&v-AdEnx9MekDIX4=uoUO*Uw3hkd=G7QI{`SOx3k#0Z43(oAbpi_`AR}+`F`Z zd?mJhBQH^UBc@yk5)7uOXB)3v-UFZQTZUTcEDD-oss`|BJs*N~w=%+gz0xiD=L@;q zRwjLSw`4rs5>Ua(n@<=%2^OcM`0BD)2^tNVm__+jv%zNlg6;IaQi2yN_ZQmL6saLn z8G}q;)Ut8q?eB~U*X}otar0`PUpDN#T@g<@OWs>>$Kd-N^!Q)EQ&c85ST8~JC2}M= zWkltj0dbI3gbFim4Pe$-rx_M+bC8M+F~IbaFHfIK*hi7BxbS2YWEqPhnb365u> zC!><30uTdnDXFK{cYu}>)3n3GB<)BV&l;ML1PHqpvepQU14Sdy-Y?XP>g0--lUFt# z1=l(9Sql3CfO@Brr~VV<9oGl=@wGuRE1XTT7M>%>LVfot66Tgi%RFU|5t;BNA2@_ zeiTUGAd1IC?{59S)mjeKV@0VFU1}%z6N@7j z5l_5%ZRuhQDccN51Ithv*Nb`$SbVecdDj;)Y)7?wnV(q(&Bixjz_ewV385F`&T8Of z_q}A}y_Gzth~k6d{Cf=w&2NFW>D~ahPgffNS5{<8sSVyGwLtiArx6`8_j7%h-A4xt zd{bTZGROi|@dC+0mm7d_CVeH^(Qg5zV1}bPEF3aR?P2klvk}19JRJmXH@-+d>)%H-xG%HSL{xK5GI{D$=UR`=J z<}VDd_B7Qyt@=|cR9i1*sMyfp!^z)cU}5qdc^{C~^{r6`C-<&pN_<#AN_LfaC5WJ{ zcS&D0yymz|cr$3f&rp4U1XZVYW-VrR3&fWpfnI9T(z~NY?oJRzX^FjOEKEfa$z7%> zq?T*cD}*yT$XIneqnTN$r z@bX^_c51Lb;FVM|XeBOnlL;jgecM0?_T*_#s%)TAtMm#LG|Tvr}-2(KS<6RC{7d@wJfoq>Iq zM)hAg%_|WzO{76llRz;uUU=dTuyXUA;mW?(1@n(5V;Q|T9bB}lkUz^7zslE**gFYL z{ehFt6(%z)#T=wyKbJ%kdu9hIko@S|1%G2W@`f}Q`)Me2OzI%Mg`b9U>Z3BdpA%7q?8XA%3pNc4r`X$yxEV zb85bx@UyR89#lqtRC_ zx5&)wpP$r z%XWSpJy85W{hg*`b|KL$Vt3ru)ibv;Yz3TUImNp~-;C1B2Tjuf)32lgbUt*yvjIM( zv$?;Fo-;!jCT;zd8-!e?;ajO;;wfCuD5gyu@PA0vl%G90JDye%@hfv_k4P2yFJEQL zd#0Xr6Qbb4#h%2t93n&5&U{kFq+Nm~>r%l=YuL!#WK zPY;xyAKVjO)~&dU0x~4jR>k3y1@2(jlD`gA#Mvn`Upr#SWpAj7w#iK>fkWsBq)Zk# zRW?WCc;i>XU>TBzO>AGA;8KXHX=Eeme(Zcp#D2<1a#7n(DD>KKt>Q^R zaXPYUH->=xo?{bckp&}#hm_q^W-V#Ofbl}V(Cj3DlzXOk-*tVf3nhljF}A|DPm?%6 zb3VXmXDd2r(z&Oa=t&HzRO|)sk4Cl;a{Q&^0dmk{6E&D@sKG}K3yW2&IUEKd$3*jG z{*Y?(x_kaCcu|u$zRbQxtc&VJmp|&IL{2$R_@wSr28Q6--awtv>5N@un}Hi1L#y~)Z- z{84!W3f)Esb7jq8xj(H~uV-ILw_hEK-tS2ml%=3W(9ZbK%*zGuyzw% zx|CRvE$)Z8hP(q}r5_|!pL6c&F%PC`(i85LvmX4tby$`<){6<0vLZ&gXzNHVZ&Xf~SL^u8sOHdo})6*qX{X~g@Rb%r$B zquEDB>&GQIE73m2JY`xc#+vdGzyT>afBQP{JpJd%k^#05fTyu|mQGF%YV6zMjsMHtkdFZgCC3Hu1hT#!3sP zlX%{M1-qoEJrl#7R+_{(He}QOx3|*uAuy=7BSbaUQ8?+uC3n_im4s73NG;MPAh=aV zsr!5fZ9@geCLM}Jn>G|r+q1urn&NgkY4kCkgSRk&Nj)-6mJ}r({(ADFv?Rj1Qi%V= z#?W3PY}yyhh)TAJTmea{WQz!$!kLBs#hrb!Oe&|_EO+32=p~I7Sqw@dyF6|0q@M8hmxVyd6gGOW|A*fgO zCjkQ-W<>L7srkC{ZDbazsy?xXWtl6Tn^-^s5r^=XB~c~PZoyai;ogn=Y?otN+g6?B zWT2F(AKl^h>@zuadxsdm1EXN(hxfSJYH8;e zz;C?|(!_t_gVJQO-YhxZ#DIbf^JItaMJEvG3=GNsV|Pda>WptsL5Ak0>x>89MY&QR zvxJw5)nrELLCY}g=4zkb23M+VcZJ$kRf>*B)7NH^5*~YC1&?h)!NDqJ#4h)y^7lSF zi0y|>Mb(4Bi+hVEtJNPzDGi5ePhs41$l@VJ6rX;uYD1>|^@C@*E|^CE22`*L6)9O( z?oddJ%D4`7)w*+JE5dme9)yRz6QUBv7seZjaaPl+=5^@LXrU2)SW!puqXN*k~< zxJ=^x0|xbva8VGz2sP|0OQ49+NDlq!uPRo^kf`w}9bO}}YYnnZfrnFS6OoqH0DR0Z z)13cHH?2TM5eIPv98c!~DoZUl=tf4~Ez+DZpFz9!hD8{t@ygq-u-`2F@8>dIx~3Pp z_~>w^QGN4mH_|yzkiIl29x4}JQ`?SIiDy8OIc$Rthg#B&sK;_kF(Hm(+xpjo$9{3} zf@XWNwLx*Vd_dj*OANsrRiQ-b!<{tKhioy&{7kMaBAL9ZYqlDm`$jG_&t~4py9Ya*Ge+y~Pv>HtJcFY8 zihplQ2s3yH>rf+Gfp*T`P}GI@BX&K=2Culc>vvXu$qsx^^i!;(p07mgthO5Vt3XwW z-S*i?CrwF}cE4bC?{sxVTM0iKA}878XK!_j}n zlK6Acn`k-ISyDh<>V?YujFd0=*QcORor3$IxiL^c^H>cZN#Ti~LF>Z*guR2b@FiEP zi%z<6o<4R@1mf4(89Y@fr2_iLU2H;6^ScvM6>{p5qy#U3#`+2b5+>I~Z1{Qcp68UB>zR!!-prXQw5y4( zgDKV1lMkGqQl!qMQYo**XhY34KJT4ex%fejcv zZtoQ=*~j`gYdvg;tQHVS^TBB^88ATLx!w{}b>CjXr6-Gk!0=mAp!C5o2jJHp%{A6Q z);jVS6UGe2k_eN$vMlN9BEzj$P+lss#h9gtB0|Slj-IbZAjy0vp+wvw`|l=$>$Db{ z-zX7JIZcLr+t_(b6|;jMi!T zolTrC91#&H^S!VZL5FS!4n!;Ov1TVn?=$9+44O#ga`2q`7k`0GT9BdFrCf(5QkIcBh1g_v`9#z=X}^u zEX3sA`gUnR{hP2ok2#U)b{&^yKRK|m35I1K2YzRc@F<7ISA)5&68BU@T`2SaIh0{Q zR+4P&P?>nXe-5Tu4ADqcQ8IXzze|bQdAZbeumC-v_asSQn; zcsa{!jf~)!@ApoXJ|eoTn#@sI%n_!eZYFJ?wwYK}&b4lJ3xGVH*ed!&CYPC&HO*{2 zx!&?sSwm+%<^c6=(?~CB>I7PY3&9n2}!JRmQS?&N4(`>5bCe#G>_!l-U zV=&BmtaM!@pCI6<7{y&03<(BUBPfg=}#=V1QjNh9r^D3ZQ~$ z-+d4(XSuVNC#N0n>`4rUY=>51W00YMceM5OJr_HgTTfSLEz{8iti&SNn>(EV$o9JW z!m(K@?NUx=&_SExn*qX~{m)clg3E3uI!yy-DPLdxN1DU*tpvKk!8glk*Hf;dQar=R z7on>L@+5L?e(v{&A8(jHcO7kAloZWS&3%0pW#C-Auw_GzDtBS?LYD*}^H6l(nT4DYJ9ve?9;u&7=<;D*FhwV`{oy33F!QH)y+h(Q_{^S__4O*`fbWV(A7 zWjUxL4a#hZr!puK*C~5l45>P9E67TlKz$7iUb0YsC|Yb zCnipl&)YZ90KN)Bxo#d<(iou(K{&J$GWS^<<=I+@__=Xt(A)>w+Yi}dKp8?N2KRwN z^gr}Vwq&VE4@VV%9@O>z>ERVLH36<>+mQ?%=aXkN*odx#Kq2}t^_A!}w#ln1zKPps zedEUYxuo@>=iu>96C9}yB zCbMidGlvUKENqLxrW2cg``-*3sZf3a$US)1zpgLentlC+dxuXcb#e0pH6eYbJF<(` z_(Kj?UXp$c-l`|EQjCV;n|z?PW2`l{DIOv3&U`!(tFpw!5fx^g(=@fBZq$tCk;a}L z?6uR0bKlS#MZXT{8{XUTd)j)acoP7%fg`4QNBm-F=w;VM(4%AF&^DR|Guo7@3417* z1~o&vueD=BWCKjHpR(`EZpGc4HGP_xU-J_pCsD>lqJCrA#QFIhAjjF}&De*Ukji|& zwq=!tF1#YjM2Dln$(9PGgQxvK<_3#XaRKjv3FymIvD{+O|B~>3*5T`aNVtD;A^}9v zPqTh15!MoN?6?KmUg5QY4u|}W6V^A$t!@H*8YtK*JFgf5r2_F4+dd-;oDv7lRl?)A zjlAgqldfV%)8HzC&Ph+uhd^-yLWe^l=?8a&12-Ume96LIJV&0ayQ zEn?2vlqGF3$U=z(-(fZ$tm|>X99kReTLQ8t)u5ltKiMhYq8cL7?Ik@{Sl;Br$a^m& zFD{ZgAv<5POi51>jj>s4sY#-jfDD-Va9FS>^It5_)V8d1)gZs&3zY4)KNF@3d2tn_ zbmXJ*`qQy!S2LEe(bB>G+Y1`&U>rh%dhH3#{MJds9$SwyNYE0n8;s&C=F3DAF85K` zyz~>{k4>dfFy$ks6~q}?sE$8PqQ%BnWg}@qS&AEVvnB5oYxlSxFU|)8D}Re8y3`ur zUlS`Jr|{LMxcP;BwzbI=?!2m`=czdR!Gm*bZEG<;C*@u4)R_SOdzt}!#;0QU?GrWx z(Lv@(8um}*&n6Nas;|`H<~RFk{-`kFbxRuUy$aZ&=aIgMt*6Kx-_=&v6KPfrN&YcP z(ONj_*Q`9$^OcNv!)XRPW5)@JGiz>U2@9tp%aOVi5tfG;a(IT#T5aPIfsmM*BGw{3 z&>-cf;$BH$4La$#zC^ue#8eHZR}=l#%%=4|27{Roc&molVxkR1#f3crno(y59e7uY z;wGqG6(iv~UZT5Ck`J5l8bJv_bG{!(#C5rQyMgXIyVxxf)|HK zoh#K!_$D}6sygpkNZEj5Yy29c$dn03ZPXxSymbD0d@bNhq$4rvQ7=E@=g*}lpY3Ta z6DUhSvmpHVX~4A(|18}-FcAbQq=Et>e7pA(_V9tnXWQ#I4j{VFdF+cVXmQ#RE2bhV z;BUI6f#|8(?6$dxAB!|kOk}CvDP9uw{7OPr79RE6sfkBIqxl$>HUYW|X%H*0n>Z+x z$JeHb2AYZcPy9l7m|1V1i+YKu=rfEwX#rzJfs6&|WJZ!{1m&it#Y%K{3HW3b^2BAu zDxJD3_N=RGd-#Bcw9<(A4TluQX}beZ&4r?ZQ&f~b&ajHscRaFabd#BM5-vR$G{Tnm zakJ|iprJoO7nSuf*Jz4G7Ajx?kstaIqc^RXq=3k!8`QM>WD~YZUs_(<4iGAK0Gcp1 z*FsNFE#=b)cy+7F1D$*S57pxj4-|)vDN*O@8{UaYm9oJZO+;)GzF-U_wmGNGM`)4x zVC{Cq2QVh6mp}>&c>KRZym}qc{$QmdbC7MmVj~b34u%DbOycuRMi~Sz{I$=rNQv$y zDo5s_sUl8K2jV6fnXewZ(7Yeui~8!5peW@sCh5WYXG(eA@g)mpJK1vin?qY($3{n& zdyv10S$%^5I7L8{v6v@p&bt7X@7D#zHePiI_Hp(dIRfD^cR}4cY~IRq>>^{lVl|E zUt+k`p7s9=R+ji-(kp16*(WHvE>e0Oi{E5VRphpC?mL3_t*M5M1eiCBJ%Nk%cvO@7 z-UgmF$$xoxz94TH_m)K- zW>Nq`uil4P5J{Ir2+cL1V}w&1@BzO7B4L<@9JnPDz|u@4bb1C`^-ydjrW>}Kc8trq-Suw90+K&UIaW>GLwj@XbzXWmCp zn;kGaR8DThc(5iNBB^tN$`mHytB#qUu)ktV(B}87t6C%zrvid%=;)B_+f#Bkhed0k zb@OPvh^2Yp!h@4+C9s2zGobnbcGKYp5?!OH805IoPKw>nwhg;x4NJJ0e~`qumk|irf*8I#Ao{g000001X)@i7;SF= From 13ab8f412df668f9163f2c7c1d3a41e788b9ce33 Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Thu, 17 Jun 2021 14:17:28 +0430 Subject: [PATCH 0382/3093] Rename test_pr_quick_start_guide.rst to test_pr_locally_guide.rst (#2823) --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5a068f9414..4dfde91fca 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -31,6 +31,6 @@ You can also read [our Quick-start development guide](https://github.com/ansible ## Test pull requests -If you want to test a PR locally, refer to [our testing guide](https://github.com/ansible/community-docs/blob/main/test_pr_quick_start_guide.rst) for instructions on how do it quickly. +If you want to test a PR locally, refer to [our testing guide](https://github.com/ansible/community-docs/blob/main/test_pr_locally_guide.rst) for instructions on how do it quickly. If you find any inconsistencies or places in this document which can be improved, feel free to raise an issue or pull request to fix it. From e9f3455b623cac390ae56c31fe0c623bb0178f7c Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Thu, 17 Jun 2021 12:48:39 +0300 Subject: [PATCH 0383/3093] Update README (#2802) * Update README * Update README.md Co-authored-by: Amin Vakil * Change * Fix * Update README.md Co-authored-by: Felix Fontein * Update README.md Co-authored-by: Felix Fontein * Fix * Fix * Fix * Fix * Fix * Fix * Fix * Update README.md Co-authored-by: Felix Fontein Co-authored-by: Amin Vakil Co-authored-by: Felix Fontein --- README.md | 47 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 32 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index a874a3e929..6f13fe150c 100644 --- a/README.md +++ b/README.md @@ -3,12 +3,18 @@ [![Build Status](https://dev.azure.com/ansible/community.general/_apis/build/status/CI?branchName=main)](https://dev.azure.com/ansible/community.general/_build?definitionId=31) [![Codecov](https://img.shields.io/codecov/c/github/ansible-collections/community.general)](https://codecov.io/gh/ansible-collections/community.general) -This repo contains the `community.general` Ansible Collection. The collection includes many modules and plugins supported by Ansible community which are not part of more specialized community collections. +This repository contains the `community.general` Ansible Collection. The collection is a part of the Ansible package and includes many modules and plugins supported by Ansible community which are not part of more specialized community collections. You can find [documentation for this collection on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/). Please note that this collection does **not** support Windows targets. Only connection plugins included in this collection might support Windows targets, and will explicitly mention that in their documentation if they do so. +## Code of Conduct + +We follow [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) in all our interactions within this project. + +If you encounter abusive behavior violating the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html), please refer to the [policy violations](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html#policy-violations) section of the Code of Conduct for information on how to raise a complaint. + ## Tested with Ansible Tested with the current Ansible 2.9, ansible-base 2.10 and ansible-core 2.11 releases and the current development version of ansible-core. Ansible versions before 2.9.10 are not supported. @@ -23,7 +29,9 @@ Please check the included content on the [Ansible Galaxy page for this collectio ## Using this collection -Before using the General community collection, you need to install the collection with the `ansible-galaxy` CLI: +This collection is shipped with the Ansible package. So if you have it installed, no more action is required. + +If you have a minimal installation (only Ansible Core installed) or you want to use the latest version of the collection along with the whole Ansible package, you need to install the collection from [Ansible Galaxy](https://galaxy.ansible.com/community/general) manually with the `ansible-galaxy` command-line tool: ansible-galaxy collection install community.general @@ -34,19 +42,29 @@ collections: - name: community.general ``` +Note that if you install the collection manually, it will not be upgraded automatically when you upgrade the Ansible package. To upgrade the collection to the latest available version, run the following command: + +```bash +ansible-galaxy collection install community.general --upgrade +``` + +You can also install a specific version of the collection, for example, if you need to downgrade when something is broken in the latest version (please report an issue in this repository). Use the following syntax where `X.Y.Z` can be any [available version](https://galaxy.ansible.com/community/general): + +```bash +ansible-galaxy collection install community.general:==X.Y.Z +``` + See [Ansible Using collections](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html) for more details. ## Contributing to this collection -If you want to develop new content for this collection or improve what is already here, the easiest way to work on the collection is to clone it into one of the configured [`COLLECTIONS_PATH`](https://docs.ansible.com/ansible/latest/reference_appendices/config.html#collections-paths), and work on it there. +The content of this collection is made by good people like you, a community of individuals collaborating on making the world better through developing automation software. -For example, if you are working in the `~/dev` directory: +All types of contributions are very welcome. -``` -cd ~/dev -git clone git@github.com:ansible-collections/community.general.git collections/ansible_collections/community/general -export COLLECTIONS_PATH=$(pwd)/collections:$COLLECTIONS_PATH -``` +You don't know how to start? Refer to our [contribution guide](https://github.com/ansible-collections/community.general/blob/main/CONTRIBUTING.md)! + +The current maintainers are listed in the [commit-rights.md](https://github.com/ansible-collections/community.general/blob/main/commit-rights.md#people) file. If you have questions or need help, feel free to mention them in the proposals. You can find more information in the [developer guide for collections](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections), and in the [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html). @@ -58,16 +76,15 @@ See [here](https://docs.ansible.com/ansible/devel/dev_guide/developing_collectio ### Communication -We have a dedicated Working Group for Ansible development. +We announce important development changes and releases through Ansible's [The Bullhorn newsletter](https://eepurl.com/gZmiEP). If you are a collection developer, be sure you are subscribed. -You can find other people interested on the following [Libera.chat](https://libera.chat/) IRC channels - -- `#ansible` - For general use questions and support. -- `#ansible-devel` - For discussions on developer topics and code related to features or bugs in ansible-core. -- `#ansible-community` - For discussions on community topics and community meetings, and for general development questions for community collections. +Join us in the `#ansible` (general use questions and support), `#ansible-community` (community and collection development questions), and other [IRC channels](https://docs.ansible.com/ansible/devel/community/communication.html#irc-channels) on [Libera.chat](https://libera.chat). + +We take part in the global quarterly [Ansible Contributor Summit](https://github.com/ansible/community/wiki/Contributor-Summit) virtually or in-person. Track [The Bullhorn newsletter](https://eepurl.com/gZmiEP) and join us. For more information about communities, meetings and agendas see [Community Wiki](https://github.com/ansible/community/wiki/Community). -For more information about [communication](https://docs.ansible.com/ansible/latest/community/communication.html) +For more information about communication, refer to the [Ansible communication guide](https://docs.ansible.com/ansible/devel/community/communication.html). ### Publishing New Version From c9cf641188bad51cc214598e1816da880ee90d8b Mon Sep 17 00:00:00 2001 From: Anas Date: Thu, 17 Jun 2021 19:05:35 +0200 Subject: [PATCH 0384/3093] datadog_event : Adding api_host as an optional parameter (#2775) * 2774 Module datadog_event _ Adding api_host as an optional parameter * Update changelogs/fragments/2774-datadog_event_api_parameter.yml Co-authored-by: Felix Fontein * Update plugins/modules/monitoring/datadog/datadog_event.py Co-authored-by: Felix Fontein * Update datadog_event.py * Update datadog_event.py * Update datadog_event.py * Update datadog_event.py * Update datadog_event.py * Update datadog_event.py * Update datadog_event.py * Update datadog_event.py * Update plugins/modules/monitoring/datadog/datadog_event.py Co-authored-by: Felix Fontein * Update plugins/modules/monitoring/datadog/datadog_event.py Co-authored-by: Felix Fontein * Update plugins/modules/monitoring/datadog/datadog_event.py Co-authored-by: Felix Fontein * Update plugins/modules/monitoring/datadog/datadog_event.py Co-authored-by: Felix Fontein * Update plugins/modules/monitoring/datadog/datadog_event.py Co-authored-by: Amin Vakil * Update plugins/modules/monitoring/datadog/datadog_event.py Co-authored-by: Amin Vakil Co-authored-by: Anas Hamadeh Co-authored-by: Felix Fontein Co-authored-by: Amin Vakil --- .../2774-datadog_event_api_parameter.yml | 2 ++ .../monitoring/datadog/datadog_event.py | 23 ++++++++++++++++++- 2 files changed, 24 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2774-datadog_event_api_parameter.yml diff --git a/changelogs/fragments/2774-datadog_event_api_parameter.yml b/changelogs/fragments/2774-datadog_event_api_parameter.yml new file mode 100644 index 0000000000..6144b89400 --- /dev/null +++ b/changelogs/fragments/2774-datadog_event_api_parameter.yml @@ -0,0 +1,2 @@ +minor_changes: +- "datadog_event - adding parameter ``api_host`` to allow selecting a datadog API endpoint instead of using the default one (https://github.com/ansible-collections/community.general/issues/2774, https://github.com/ansible-collections/community.general/pull/2775)." diff --git a/plugins/modules/monitoring/datadog/datadog_event.py b/plugins/modules/monitoring/datadog/datadog_event.py index c3a3920aee..3f6500f11f 100644 --- a/plugins/modules/monitoring/datadog/datadog_event.py +++ b/plugins/modules/monitoring/datadog/datadog_event.py @@ -54,6 +54,11 @@ options: description: - Host name to associate with the event. - If not specified, it defaults to the remote system's hostname. + api_host: + type: str + description: + - DataDog API endpoint URL. + version_added: '3.3.0' tags: type: list elements: str @@ -90,6 +95,19 @@ EXAMPLES = ''' api_key: 9775a026f1ca7d1c6c5af9d94d9595a4 app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN tags: 'aa,bb,#host:{{ inventory_hostname }}' + +- name: Post an event with several tags to another endpoint + community.general.datadog_event: + title: Testing from ansible + text: Test + api_key: 9775a026f1ca7d1c6c5af9d94d9595a4 + app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN + api_host: 'https://example.datadoghq.eu' + tags: + - aa + - b + - '#host:{{ inventory_hostname }}' + ''' import platform @@ -113,6 +131,7 @@ def main(): argument_spec=dict( api_key=dict(required=True, no_log=True), app_key=dict(required=True, no_log=True), + api_host=dict(type='str'), title=dict(required=True), text=dict(required=True), date_happened=dict(type='int'), @@ -131,8 +150,10 @@ def main(): options = { 'api_key': module.params['api_key'], - 'app_key': module.params['app_key'] + 'app_key': module.params['app_key'], } + if module.params['api_host'] is not None: + options['api_host'] = module.params['api_host'] initialize(**options) From ee23c26150d1215ea315d689d9e4e5624fa6c8b9 Mon Sep 17 00:00:00 2001 From: TizeN85 Date: Thu, 17 Jun 2021 19:08:42 +0200 Subject: [PATCH 0385/3093] fix sudorule_add_allow_command_group (#2821) * fix sudorule_add_allow_command_group fix sudorule_add_allow_command_group is not working on freeIPA 4.8.7 at least, sudorule_add_allow_command should be used instead with item sudocmdgroup * Added changelog fragment --- changelogs/fragments/2821-ipa_sudorule.yml | 4 ++++ plugins/modules/identity/ipa/ipa_sudorule.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2821-ipa_sudorule.yml diff --git a/changelogs/fragments/2821-ipa_sudorule.yml b/changelogs/fragments/2821-ipa_sudorule.yml new file mode 100644 index 0000000000..5e1197da95 --- /dev/null +++ b/changelogs/fragments/2821-ipa_sudorule.yml @@ -0,0 +1,4 @@ +--- +bugfixes: + - "ipa_sudorule - call ``sudorule_add_allow_command`` method instead of ``sudorule_add_allow_command_group`` + (https://github.com/ansible-collections/community.general/issues/2442)." diff --git a/plugins/modules/identity/ipa/ipa_sudorule.py b/plugins/modules/identity/ipa/ipa_sudorule.py index 15abef8f17..4494122e8d 100644 --- a/plugins/modules/identity/ipa/ipa_sudorule.py +++ b/plugins/modules/identity/ipa/ipa_sudorule.py @@ -237,7 +237,7 @@ class SudoRuleIPAClient(IPAClient): return self._post_json(method='sudorule_add_allow_command', name=name, item={'sudocmd': item}) def sudorule_add_allow_command_group(self, name, item): - return self._post_json(method='sudorule_add_allow_command_group', name=name, item={'sudocmdgroup': item}) + return self._post_json(method='sudorule_add_allow_command', name=name, item={'sudocmdgroup': item}) def sudorule_remove_allow_command(self, name, item): return self._post_json(method='sudorule_remove_allow_command', name=name, item=item) From 1ed4394c5ed4e3e9e31165b7979e5b38e16def05 Mon Sep 17 00:00:00 2001 From: Shahar Mor Date: Fri, 18 Jun 2021 23:08:46 +0300 Subject: [PATCH 0386/3093] npm - fix updating version specific modules (#2830) * npm - fix updating version specific modules if a version specific module is used, the comparison will be used with the version and not only by name * Update plugins/modules/packaging/language/npm.py Co-authored-by: Ajpantuso * Update changelogs/fragments/2830-npm-version-update.yml Co-authored-by: Ajpantuso * Update changelogs/fragments/2830-npm-version-update.yml Co-authored-by: Amin Vakil * Update changelogs/fragments/2830-npm-version-update.yml Co-authored-by: Amin Vakil Co-authored-by: Ajpantuso Co-authored-by: Amin Vakil --- .../fragments/2830-npm-version-update.yml | 4 + plugins/modules/packaging/language/npm.py | 13 ++- .../modules/packaging/language/test_npm.py | 103 +++++++++++++++++- 3 files changed, 114 insertions(+), 6 deletions(-) create mode 100644 changelogs/fragments/2830-npm-version-update.yml diff --git a/changelogs/fragments/2830-npm-version-update.yml b/changelogs/fragments/2830-npm-version-update.yml new file mode 100644 index 0000000000..ab05258e2c --- /dev/null +++ b/changelogs/fragments/2830-npm-version-update.yml @@ -0,0 +1,4 @@ +bugfixes: + - "npm - when the ``version`` option is used the comparison of installed vs missing will + use name@version instead of just name, allowing version specific updates + (https://github.com/ansible-collections/community.general/issues/2021)." diff --git a/plugins/modules/packaging/language/npm.py b/plugins/modules/packaging/language/npm.py index 62121297d7..5a48468970 100644 --- a/plugins/modules/packaging/language/npm.py +++ b/plugins/modules/packaging/language/npm.py @@ -181,7 +181,7 @@ class Npm(object): cmd.append('--ignore-scripts') if self.unsafe_perm: cmd.append('--unsafe-perm') - if self.name and add_package_name: + if self.name_version and add_package_name: cmd.append(self.name_version) if self.registry: cmd.append('--registry') @@ -215,14 +215,17 @@ class Npm(object): except (getattr(json, 'JSONDecodeError', ValueError)) as e: self.module.fail_json(msg="Failed to parse NPM output with error %s" % to_native(e)) if 'dependencies' in data: - for dep in data['dependencies']: - if 'missing' in data['dependencies'][dep] and data['dependencies'][dep]['missing']: + for dep, props in data['dependencies'].items(): + dep_version = dep + '@' + str(props['version']) + + if 'missing' in props and props['missing']: missing.append(dep) - elif 'invalid' in data['dependencies'][dep] and data['dependencies'][dep]['invalid']: + elif 'invalid' in props and props['invalid']: missing.append(dep) else: installed.append(dep) - if self.name and self.name not in installed: + installed.append(dep_version) + if self.name_version and self.name_version not in installed: missing.append(self.name) # Named dependency not installed else: diff --git a/tests/unit/plugins/modules/packaging/language/test_npm.py b/tests/unit/plugins/modules/packaging/language/test_npm.py index 849bfac1a6..abdacc6aef 100644 --- a/tests/unit/plugins/modules/packaging/language/test_npm.py +++ b/tests/unit/plugins/modules/packaging/language/test_npm.py @@ -47,6 +47,66 @@ class NPMModuleTestCase(ModuleTestCase): result = self.module_main(AnsibleExitJson) self.assertTrue(result['changed']) + self.module_main_command.assert_has_calls([ + call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None), + call(['/testbin/npm', 'install', '--global', 'coffee-script'], check_rc=True, cwd=None), + ]) + + def test_present_version(self): + set_module_args({ + 'name': 'coffee-script', + 'global': 'true', + 'state': 'present', + 'version': '2.5.1' + }) + self.module_main_command.side_effect = [ + (0, '{}', ''), + (0, '{}', ''), + ] + + result = self.module_main(AnsibleExitJson) + + self.assertTrue(result['changed']) + self.module_main_command.assert_has_calls([ + call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None), + call(['/testbin/npm', 'install', '--global', 'coffee-script@2.5.1'], check_rc=True, cwd=None), + ]) + + def test_present_version_update(self): + set_module_args({ + 'name': 'coffee-script', + 'global': 'true', + 'state': 'present', + 'version': '2.5.1' + }) + self.module_main_command.side_effect = [ + (0, '{"dependencies": {"coffee-script": {"version" : "2.5.0"}}}', ''), + (0, '{}', ''), + ] + + result = self.module_main(AnsibleExitJson) + + self.assertTrue(result['changed']) + self.module_main_command.assert_has_calls([ + call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None), + call(['/testbin/npm', 'install', '--global', 'coffee-script@2.5.1'], check_rc=True, cwd=None), + ]) + + def test_present_version_exists(self): + set_module_args({ + 'name': 'coffee-script', + 'global': 'true', + 'state': 'present', + 'version': '2.5.1' + }) + self.module_main_command.side_effect = [ + (0, '{"dependencies": {"coffee-script": {"version" : "2.5.1"}}}', ''), + (0, '{}', ''), + ] + + result = self.module_main(AnsibleExitJson) + + self.assertFalse(result['changed']) self.module_main_command.assert_has_calls([ call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None), ]) @@ -58,7 +118,7 @@ class NPMModuleTestCase(ModuleTestCase): 'state': 'absent' }) self.module_main_command.side_effect = [ - (0, '{"dependencies": {"coffee-script": {}}}', ''), + (0, '{"dependencies": {"coffee-script": {"version" : "2.5.1"}}}', ''), (0, '{}', ''), ] @@ -66,5 +126,46 @@ class NPMModuleTestCase(ModuleTestCase): self.assertTrue(result['changed']) self.module_main_command.assert_has_calls([ + call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None), + call(['/testbin/npm', 'uninstall', '--global', 'coffee-script'], check_rc=True, cwd=None), + ]) + + def test_absent_version(self): + set_module_args({ + 'name': 'coffee-script', + 'global': 'true', + 'state': 'absent', + 'version': '2.5.1' + }) + self.module_main_command.side_effect = [ + (0, '{"dependencies": {"coffee-script": {"version" : "2.5.1"}}}', ''), + (0, '{}', ''), + ] + + result = self.module_main(AnsibleExitJson) + + self.assertTrue(result['changed']) + self.module_main_command.assert_has_calls([ + call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None), + call(['/testbin/npm', 'uninstall', '--global', 'coffee-script'], check_rc=True, cwd=None), + ]) + + def test_absent_version_different(self): + set_module_args({ + 'name': 'coffee-script', + 'global': 'true', + 'state': 'absent', + 'version': '2.5.1' + }) + self.module_main_command.side_effect = [ + (0, '{"dependencies": {"coffee-script": {"version" : "2.5.0"}}}', ''), + (0, '{}', ''), + ] + + result = self.module_main(AnsibleExitJson) + + self.assertTrue(result['changed']) + self.module_main_command.assert_has_calls([ + call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None), call(['/testbin/npm', 'uninstall', '--global', 'coffee-script'], check_rc=True, cwd=None), ]) From 67cabcb2aa858b5c1195b605468374748c509895 Mon Sep 17 00:00:00 2001 From: omula Date: Sat, 19 Jun 2021 14:42:05 +0200 Subject: [PATCH 0387/3093] Nmcli add options (#2732) * [nmcli] add new network configuration options * [nmcli_add_options] add documentation for new parameters nad add disabled method for IPv6 * [nmcli] fix and version adding. Add changelog fragment * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Felix Fontein * Update changelogs/fragments/2732-nmcli_add_options.yml Co-authored-by: Felix Fontein * Update changelogs/fragments/2732-nmcli_add_options.yml Co-authored-by: Felix Fontein * [nmcli_add_options] fix testing * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Felix Fontein * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Felix Fontein * Update changelogs/fragments/2732-nmcli_add_options.yml Co-authored-by: Amin Vakil * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Amin Vakil * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Amin Vakil * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Amin Vakil Co-authored-by: Oriol MULA VALLS Co-authored-by: Felix Fontein Co-authored-by: Amin Vakil --- .../fragments/2732-nmcli_add_options.yml | 3 +++ plugins/modules/net_tools/nmcli.py | 23 +++++++++++++++++-- .../plugins/modules/net_tools/test_nmcli.py | 8 +++++++ 3 files changed, 32 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/2732-nmcli_add_options.yml diff --git a/changelogs/fragments/2732-nmcli_add_options.yml b/changelogs/fragments/2732-nmcli_add_options.yml new file mode 100644 index 0000000000..58ed2d2ee4 --- /dev/null +++ b/changelogs/fragments/2732-nmcli_add_options.yml @@ -0,0 +1,3 @@ +minor_changes: + - nmcli - add ``routing_rules4`` and ``may_fail4`` options (https://github.com/ansible-collections/community.general/issues/2730). + - nmcli - add ``disabled`` value to ``method6`` option (https://github.com/ansible-collections/community.general/issues/2730). diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index 399d15267a..30f0537e70 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -95,6 +95,11 @@ options: - Set metric level of ipv4 routes configured on interface. type: int version_added: 2.0.0 + routing_rules4: + description: + - Is the same as in an C(ip route add) command, except always requires specifying a priority. + type: str + version_added: 3.3.0 never_default4: description: - Set as default route. @@ -126,6 +131,12 @@ options: type: str choices: [auto, link-local, manual, shared, disabled] version_added: 2.2.0 + may_fail4: + description: + - If you need I(ip4) configured before C(network-online.target) is reached, set this option to C(false). + type: bool + default: true + version_added: 3.3.0 ip6: description: - The IPv6 address to this interface. @@ -164,8 +175,9 @@ options: description: - Configuration method to be used for IPv6 - If I(ip6) is set, C(ipv6.method) is automatically set to C(manual) and this parameter is not needed. + - C(disabled) was added in community.general 3.3.0. type: str - choices: [ignore, auto, dhcp, link-local, manual, shared] + choices: [ignore, auto, dhcp, link-local, manual, shared, disabled] version_added: 2.2.0 mtu: description: @@ -675,11 +687,13 @@ class Nmcli(object): self.gw4_ignore_auto = module.params['gw4_ignore_auto'] self.routes4 = module.params['routes4'] self.route_metric4 = module.params['route_metric4'] + self.routing_rules4 = module.params['routing_rules4'] self.never_default4 = module.params['never_default4'] self.dns4 = module.params['dns4'] self.dns4_search = module.params['dns4_search'] self.dns4_ignore_auto = module.params['dns4_ignore_auto'] self.method4 = module.params['method4'] + self.may_fail4 = module.params['may_fail4'] self.ip6 = module.params['ip6'] self.gw6 = module.params['gw6'] self.gw6_ignore_auto = module.params['gw6_ignore_auto'] @@ -762,8 +776,10 @@ class Nmcli(object): 'ipv4.ignore-auto-routes': self.gw4_ignore_auto, 'ipv4.routes': self.routes4, 'ipv4.route-metric': self.route_metric4, + 'ipv4.routing-rules': self.routing_rules4, 'ipv4.never-default': self.never_default4, 'ipv4.method': self.ipv4_method, + 'ipv4.may-fail': self.may_fail4, 'ipv6.addresses': self.ip6, 'ipv6.dns': self.dns6, 'ipv6.dns-search': self.dns6_search, @@ -935,6 +951,7 @@ class Nmcli(object): 'ipv4.never-default', 'ipv4.ignore-auto-dns', 'ipv4.ignore-auto-routes', + 'ipv4.may-fail', 'ipv6.ignore-auto-dns', 'ipv6.ignore-auto-routes'): return bool @@ -1155,11 +1172,13 @@ def main(): gw4_ignore_auto=dict(type='bool', default=False), routes4=dict(type='list', elements='str'), route_metric4=dict(type='int'), + routing_rules4=dict(type='str'), never_default4=dict(type='bool', default=False), dns4=dict(type='list', elements='str'), dns4_search=dict(type='list', elements='str'), dns4_ignore_auto=dict(type='bool', default=False), method4=dict(type='str', choices=['auto', 'link-local', 'manual', 'shared', 'disabled']), + may_fail4=dict(type='bool', default=True), dhcp_client_id=dict(type='str'), ip6=dict(type='str'), gw6=dict(type='str'), @@ -1167,7 +1186,7 @@ def main(): dns6=dict(type='list', elements='str'), dns6_search=dict(type='list', elements='str'), dns6_ignore_auto=dict(type='bool', default=False), - method6=dict(type='str', choices=['ignore', 'auto', 'dhcp', 'link-local', 'manual', 'shared']), + method6=dict(type='str', choices=['ignore', 'auto', 'dhcp', 'link-local', 'manual', 'shared', 'disabled']), # Bond Specific vars mode=dict(type='str', default='balance-rr', choices=['802.3ad', 'active-backup', 'balance-alb', 'balance-rr', 'balance-tlb', 'balance-xor', 'broadcast']), diff --git a/tests/unit/plugins/modules/net_tools/test_nmcli.py b/tests/unit/plugins/modules/net_tools/test_nmcli.py index 5b3f96937b..8724bd4f60 100644 --- a/tests/unit/plugins/modules/net_tools/test_nmcli.py +++ b/tests/unit/plugins/modules/net_tools/test_nmcli.py @@ -98,6 +98,7 @@ ipv4.gateway: 10.10.10.1 ipv4.ignore-auto-dns: no ipv4.ignore-auto-routes: no ipv4.never-default: no +ipv4.may-fail: yes ipv6.method: auto ipv6.ignore-auto-dns: no ipv6.ignore-auto-routes: no @@ -128,6 +129,7 @@ ipv4.ignore-auto-dns: no ipv4.ignore-auto-routes: no ipv4.never-default: no ipv4.dns-search: search.redhat.com +ipv4.may-fail: yes ipv6.dns-search: search6.redhat.com ipv6.method: auto ipv6.ignore-auto-dns: no @@ -158,6 +160,7 @@ ipv4.gateway: 10.10.10.1 ipv4.ignore-auto-dns: no ipv4.ignore-auto-routes: no ipv4.never-default: no +ipv4.may-fail: yes ipv6.method: auto ipv6.ignore-auto-dns: no ipv6.ignore-auto-routes: no @@ -187,6 +190,7 @@ ipv4.gateway: 10.10.10.1 ipv4.ignore-auto-dns: no ipv4.ignore-auto-routes: no ipv4.never-default: no +ipv4.may-fail: yes ipv6.method: auto ipv6.ignore-auto-dns: no ipv6.ignore-auto-routes: no @@ -218,6 +222,7 @@ ipv4.gateway: 10.10.10.1 ipv4.ignore-auto-dns: no ipv4.ignore-auto-routes: no ipv4.never-default: no +ipv4.may-fail: yes ipv6.method: auto ipv6.ignore-auto-dns: no ipv6.ignore-auto-routes: no @@ -275,6 +280,7 @@ ipv4.gateway: 10.10.10.1 ipv4.ignore-auto-dns: no ipv4.ignore-auto-routes: no ipv4.never-default: no +ipv4.may-fail: yes ipv6.method: auto ipv6.ignore-auto-dns: no ipv6.ignore-auto-routes: no @@ -370,6 +376,7 @@ ipv4.dhcp-client-id: 00:11:22:AA:BB:CC:DD ipv4.ignore-auto-dns: no ipv4.ignore-auto-routes: no ipv4.never-default: no +ipv4.may-fail: yes ipv6.method: auto ipv6.ignore-auto-dns: no ipv6.ignore-auto-routes: no @@ -399,6 +406,7 @@ ipv4.gateway: 10.10.10.1 ipv4.ignore-auto-dns: no ipv4.ignore-auto-routes: no ipv4.never-default: no +ipv4.may-fail: yes ipv4.dns: 1.1.1.1,8.8.8.8 ipv6.method: auto ipv6.ignore-auto-dns: no From 08f7ad06bea1fc71c675550db0f0dc46b1f45224 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 19 Jun 2021 15:06:58 +0200 Subject: [PATCH 0388/3093] Remove inventory and vault scripts (#2696) * Remove inventory and vault scripts. * Remove foreman inventory script tests. --- changelogs/fragments/remove-scripts.yml | 2 + scripts/inventory/__init__.py | 0 scripts/inventory/abiquo.ini | 48 - scripts/inventory/abiquo.py | 224 ---- scripts/inventory/apache-libcloud.py | 336 ------ scripts/inventory/apstra_aos.ini | 20 - scripts/inventory/apstra_aos.py | 580 ----------- scripts/inventory/azure_rm.ini | 23 - scripts/inventory/azure_rm.py | 962 ------------------ scripts/inventory/brook.ini | 39 - scripts/inventory/brook.py | 248 ----- scripts/inventory/cloudforms.ini | 40 - scripts/inventory/cloudforms.py | 499 --------- scripts/inventory/cobbler.ini | 24 - scripts/inventory/cobbler.py | 305 ------ scripts/inventory/collins.ini | 57 -- scripts/inventory/collins.py | 429 -------- scripts/inventory/consul_io.ini | 54 - scripts/inventory/consul_io.py | 553 ---------- scripts/inventory/docker.py | 892 ---------------- scripts/inventory/docker.yml | 74 -- scripts/inventory/fleet.py | 99 -- scripts/inventory/foreman.ini | 200 ---- scripts/inventory/foreman.py | 651 ------------ scripts/inventory/freeipa.py | 126 --- scripts/inventory/infoblox.py | 129 --- scripts/inventory/infoblox.yaml | 24 - scripts/inventory/jail.py | 27 - scripts/inventory/landscape.py | 117 --- scripts/inventory/libcloud.ini | 15 - scripts/inventory/linode.ini | 18 - scripts/inventory/linode.py | 338 ------ scripts/inventory/lxc_inventory.py | 60 -- scripts/inventory/lxd.ini | 13 - scripts/inventory/lxd.py | 93 -- scripts/inventory/mdt.ini | 17 - scripts/inventory/mdt_dynamic_inventory.py | 122 --- scripts/inventory/nagios_livestatus.ini | 41 - scripts/inventory/nagios_livestatus.py | 163 --- scripts/inventory/nagios_ndo.ini | 10 - scripts/inventory/nagios_ndo.py | 95 -- scripts/inventory/nsot.py | 346 ------- scripts/inventory/nsot.yaml | 22 - scripts/inventory/openshift.py | 89 -- scripts/inventory/openvz.py | 74 -- scripts/inventory/ovirt.ini | 35 - scripts/inventory/ovirt.py | 279 ----- scripts/inventory/ovirt4.py | 258 ----- scripts/inventory/packet_net.ini | 53 - scripts/inventory/packet_net.py | 496 --------- scripts/inventory/proxmox.py | 240 ----- scripts/inventory/rackhd.py | 86 -- scripts/inventory/rax.ini | 66 -- scripts/inventory/rax.py | 460 --------- scripts/inventory/rhv.py | 1 - scripts/inventory/rudder.ini | 35 - scripts/inventory/rudder.py | 286 ------ scripts/inventory/scaleway.ini | 37 - scripts/inventory/scaleway.py | 220 ---- scripts/inventory/serf.py | 101 -- scripts/inventory/softlayer.py | 196 ---- scripts/inventory/spacewalk.ini | 16 - scripts/inventory/spacewalk.py | 226 ---- scripts/inventory/ssh_config.py | 121 --- scripts/inventory/stacki.py | 180 ---- scripts/inventory/stacki.yml | 7 - scripts/inventory/vagrant.py | 123 --- scripts/inventory/vbox.py | 107 -- scripts/inventory/zone.py | 33 - scripts/vault/__init__.py | 0 scripts/vault/azure_vault.ini | 10 - scripts/vault/azure_vault.py | 595 ----------- scripts/vault/vault-keyring-client.py | 134 --- scripts/vault/vault-keyring.py | 87 -- .../targets/script_inventory_foreman/aliases | 3 - .../script_inventory_foreman/foreman.sh | 10 - .../targets/script_inventory_foreman/runme.sh | 50 - .../test_foreman_inventory.yml | 7 - 78 files changed, 2 insertions(+), 12854 deletions(-) create mode 100644 changelogs/fragments/remove-scripts.yml delete mode 100644 scripts/inventory/__init__.py delete mode 100644 scripts/inventory/abiquo.ini delete mode 100755 scripts/inventory/abiquo.py delete mode 100755 scripts/inventory/apache-libcloud.py delete mode 100644 scripts/inventory/apstra_aos.ini delete mode 100755 scripts/inventory/apstra_aos.py delete mode 100644 scripts/inventory/azure_rm.ini delete mode 100755 scripts/inventory/azure_rm.py delete mode 100644 scripts/inventory/brook.ini delete mode 100755 scripts/inventory/brook.py delete mode 100644 scripts/inventory/cloudforms.ini delete mode 100755 scripts/inventory/cloudforms.py delete mode 100644 scripts/inventory/cobbler.ini delete mode 100755 scripts/inventory/cobbler.py delete mode 100644 scripts/inventory/collins.ini delete mode 100755 scripts/inventory/collins.py delete mode 100644 scripts/inventory/consul_io.ini delete mode 100755 scripts/inventory/consul_io.py delete mode 100755 scripts/inventory/docker.py delete mode 100644 scripts/inventory/docker.yml delete mode 100755 scripts/inventory/fleet.py delete mode 100644 scripts/inventory/foreman.ini delete mode 100755 scripts/inventory/foreman.py delete mode 100755 scripts/inventory/freeipa.py delete mode 100755 scripts/inventory/infoblox.py delete mode 100644 scripts/inventory/infoblox.yaml delete mode 100755 scripts/inventory/jail.py delete mode 100755 scripts/inventory/landscape.py delete mode 100644 scripts/inventory/libcloud.ini delete mode 100644 scripts/inventory/linode.ini delete mode 100755 scripts/inventory/linode.py delete mode 100755 scripts/inventory/lxc_inventory.py delete mode 100644 scripts/inventory/lxd.ini delete mode 100755 scripts/inventory/lxd.py delete mode 100644 scripts/inventory/mdt.ini delete mode 100755 scripts/inventory/mdt_dynamic_inventory.py delete mode 100644 scripts/inventory/nagios_livestatus.ini delete mode 100755 scripts/inventory/nagios_livestatus.py delete mode 100644 scripts/inventory/nagios_ndo.ini delete mode 100755 scripts/inventory/nagios_ndo.py delete mode 100755 scripts/inventory/nsot.py delete mode 100644 scripts/inventory/nsot.yaml delete mode 100755 scripts/inventory/openshift.py delete mode 100755 scripts/inventory/openvz.py delete mode 100644 scripts/inventory/ovirt.ini delete mode 100755 scripts/inventory/ovirt.py delete mode 100755 scripts/inventory/ovirt4.py delete mode 100644 scripts/inventory/packet_net.ini delete mode 100755 scripts/inventory/packet_net.py delete mode 100755 scripts/inventory/proxmox.py delete mode 100755 scripts/inventory/rackhd.py delete mode 100644 scripts/inventory/rax.ini delete mode 100755 scripts/inventory/rax.py delete mode 120000 scripts/inventory/rhv.py delete mode 100644 scripts/inventory/rudder.ini delete mode 100755 scripts/inventory/rudder.py delete mode 100644 scripts/inventory/scaleway.ini delete mode 100755 scripts/inventory/scaleway.py delete mode 100755 scripts/inventory/serf.py delete mode 100755 scripts/inventory/softlayer.py delete mode 100644 scripts/inventory/spacewalk.ini delete mode 100755 scripts/inventory/spacewalk.py delete mode 100755 scripts/inventory/ssh_config.py delete mode 100755 scripts/inventory/stacki.py delete mode 100644 scripts/inventory/stacki.yml delete mode 100755 scripts/inventory/vagrant.py delete mode 100755 scripts/inventory/vbox.py delete mode 100755 scripts/inventory/zone.py delete mode 100644 scripts/vault/__init__.py delete mode 100644 scripts/vault/azure_vault.ini delete mode 100755 scripts/vault/azure_vault.py delete mode 100755 scripts/vault/vault-keyring-client.py delete mode 100755 scripts/vault/vault-keyring.py delete mode 100644 tests/integration/targets/script_inventory_foreman/aliases delete mode 100755 tests/integration/targets/script_inventory_foreman/foreman.sh delete mode 100755 tests/integration/targets/script_inventory_foreman/runme.sh delete mode 100644 tests/integration/targets/script_inventory_foreman/test_foreman_inventory.yml diff --git a/changelogs/fragments/remove-scripts.yml b/changelogs/fragments/remove-scripts.yml new file mode 100644 index 0000000000..72cee7dee5 --- /dev/null +++ b/changelogs/fragments/remove-scripts.yml @@ -0,0 +1,2 @@ +removed_features: +- "All inventory and vault scripts contained in community.general were moved to the `contrib-scripts GitHub repository `_ (https://github.com/ansible-collections/community.general/pull/2696)." diff --git a/scripts/inventory/__init__.py b/scripts/inventory/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/scripts/inventory/abiquo.ini b/scripts/inventory/abiquo.ini deleted file mode 100644 index 991a2ed803..0000000000 --- a/scripts/inventory/abiquo.ini +++ /dev/null @@ -1,48 +0,0 @@ -# Ansible external inventory script settings for Abiquo -# - -# Define an Abiquo user with access to Abiquo API which will be used to -# perform required queries to obtain information to generate the Ansible -# inventory output. -# -[auth] -apiuser = admin -apipass = xabiquo - - -# Specify Abiquo API version in major.minor format and the access URI to -# API endpoint. Tested versions are: 2.6 , 3.0 and 3.1 -# To confirm that your box haves access to Abiquo API you can perform a -# curl command, replacing with suitable values, similar to this: -# curl -X GET https://192.168.2.100/api/login -u admin:xabiquo -# -[api] -version = 3.0 -uri = https://192.168.2.100/api -# You probably won't need to modify login preferences, but just in case -login_path = /login -login_type = application/vnd.abiquo.user+json - - -# To avoid performing excessive calls to Abiquo API you can define a -# cache for the plugin output. Within the time defined in seconds, latest -# output will be reused. After that time, the cache will be refreshed. -# -[cache] -cache_max_age = 30 -cache_dir = /tmp - - -[defaults] -# Depending in your Abiquo environment, you may want to use only public IP -# addresses (if using public cloud providers) or also private IP addresses. -# You can set this with public_ip_only configuration. -public_ip_only = false -# default_net_interface only is used if public_ip_only = false -# If public_ip_only is set to false, you can choose default nic to obtain -# IP address to define the host. -default_net_interface = nic0 -# Only deployed VM are displayed in the plugin output. -deployed_only = true -# Define if VM metadata is obtained from Abiquo API. -get_metadata = false diff --git a/scripts/inventory/abiquo.py b/scripts/inventory/abiquo.py deleted file mode 100755 index 7602a1d2cb..0000000000 --- a/scripts/inventory/abiquo.py +++ /dev/null @@ -1,224 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -''' -External inventory script for Abiquo -==================================== - -Shamelessly copied from an existing inventory script. - -This script generates an inventory that Ansible can understand by making API requests to Abiquo API -Requires some python libraries, ensure to have them installed when using this script. - -This script has been tested in Abiquo 3.0 but it may work also for Abiquo 2.6. - -Before using this script you may want to modify abiquo.ini config file. - -This script generates an Ansible hosts file with these host groups: - -ABQ_xxx: Defines a hosts itself by Abiquo VM name label -all: Contains all hosts defined in Abiquo user's enterprise -virtualdatecenter: Creates a host group for each virtualdatacenter containing all hosts defined on it -virtualappliance: Creates a host group for each virtualappliance containing all hosts defined on it -imagetemplate: Creates a host group for each image template containing all hosts using it - -''' - -# (c) 2014, Daniel Beneyto -# -# This file is part of Ansible, -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import os -import sys -import time - -import json - -from ansible.module_utils.six.moves import configparser as ConfigParser -from ansible.module_utils.urls import open_url - - -def api_get(link, config): - try: - if link is None: - url = config.get('api', 'uri') + config.get('api', 'login_path') - headers = {"Accept": config.get('api', 'login_type')} - else: - url = link['href'] + '?limit=0' - headers = {"Accept": link['type']} - result = open_url(url, headers=headers, url_username=config.get('auth', 'apiuser').replace('\n', ''), - url_password=config.get('auth', 'apipass').replace('\n', '')) - return json.loads(result.read()) - except Exception: - return None - - -def save_cache(data, config): - ''' saves item to cache ''' - dpath = config.get('cache', 'cache_dir') - try: - cache = open('/'.join([dpath, 'inventory']), 'w') - cache.write(json.dumps(data)) - cache.close() - except IOError as e: - pass # not really sure what to do here - - -def get_cache(cache_item, config): - ''' returns cached item ''' - dpath = config.get('cache', 'cache_dir') - inv = {} - try: - cache = open('/'.join([dpath, 'inventory']), 'r') - inv = cache.read() - cache.close() - except IOError as e: - pass # not really sure what to do here - - return inv - - -def cache_available(config): - ''' checks if we have a 'fresh' cache available for item requested ''' - - if config.has_option('cache', 'cache_dir'): - dpath = config.get('cache', 'cache_dir') - - try: - existing = os.stat('/'.join([dpath, 'inventory'])) - except Exception: - # cache doesn't exist or isn't accessible - return False - - if config.has_option('cache', 'cache_max_age'): - maxage = config.get('cache', 'cache_max_age') - if (int(time.time()) - int(existing.st_mtime)) <= int(maxage): - return True - - return False - - -def generate_inv_from_api(enterprise_entity, config): - try: - inventory['all'] = {} - inventory['all']['children'] = [] - inventory['all']['hosts'] = [] - inventory['_meta'] = {} - inventory['_meta']['hostvars'] = {} - - enterprise = api_get(enterprise_entity, config) - vms_entity = next(link for link in enterprise['links'] if link['rel'] == 'virtualmachines') - vms = api_get(vms_entity, config) - for vmcollection in vms['collection']: - for link in vmcollection['links']: - if link['rel'] == 'virtualappliance': - vm_vapp = link['title'].replace('[', '').replace(']', '').replace(' ', '_') - elif link['rel'] == 'virtualdatacenter': - vm_vdc = link['title'].replace('[', '').replace(']', '').replace(' ', '_') - elif link['rel'] == 'virtualmachinetemplate': - vm_template = link['title'].replace('[', '').replace(']', '').replace(' ', '_') - - # From abiquo.ini: Only adding to inventory VMs with public IP - if config.getboolean('defaults', 'public_ip_only') is True: - for link in vmcollection['links']: - if link['type'] == 'application/vnd.abiquo.publicip+json' and link['rel'] == 'ip': - vm_nic = link['title'] - break - else: - vm_nic = None - # Otherwise, assigning defined network interface IP address - else: - for link in vmcollection['links']: - if link['rel'] == config.get('defaults', 'default_net_interface'): - vm_nic = link['title'] - break - else: - vm_nic = None - - vm_state = True - # From abiquo.ini: Only adding to inventory VMs deployed - if config.getboolean('defaults', 'deployed_only') is True and vmcollection['state'] == 'NOT_ALLOCATED': - vm_state = False - - if vm_nic is not None and vm_state: - if vm_vapp not in inventory: - inventory[vm_vapp] = {} - inventory[vm_vapp]['children'] = [] - inventory[vm_vapp]['hosts'] = [] - if vm_vdc not in inventory: - inventory[vm_vdc] = {} - inventory[vm_vdc]['hosts'] = [] - inventory[vm_vdc]['children'] = [] - if vm_template not in inventory: - inventory[vm_template] = {} - inventory[vm_template]['children'] = [] - inventory[vm_template]['hosts'] = [] - if config.getboolean('defaults', 'get_metadata') is True: - meta_entity = next(link for link in vmcollection['links'] if link['rel'] == 'metadata') - try: - metadata = api_get(meta_entity, config) - if (config.getfloat("api", "version") >= 3.0): - vm_metadata = metadata['metadata'] - else: - vm_metadata = metadata['metadata']['metadata'] - inventory['_meta']['hostvars'][vm_nic] = vm_metadata - except Exception as e: - pass - - inventory[vm_vapp]['children'].append(vmcollection['name']) - inventory[vm_vdc]['children'].append(vmcollection['name']) - inventory[vm_template]['children'].append(vmcollection['name']) - inventory['all']['children'].append(vmcollection['name']) - inventory[vmcollection['name']] = [] - inventory[vmcollection['name']].append(vm_nic) - - return inventory - except Exception as e: - # Return empty hosts output - return {'all': {'hosts': []}, '_meta': {'hostvars': {}}} - - -def get_inventory(enterprise, config): - ''' Reads the inventory from cache or Abiquo api ''' - - if cache_available(config): - inv = get_cache('inventory', config) - else: - default_group = os.path.basename(sys.argv[0]).rstrip('.py') - # MAKE ABIQUO API CALLS # - inv = generate_inv_from_api(enterprise, config) - - save_cache(inv, config) - return json.dumps(inv) - - -if __name__ == '__main__': - inventory = {} - enterprise = {} - - # Read config - config = ConfigParser.SafeConfigParser() - for configfilename in [os.path.abspath(sys.argv[0]).rstrip('.py') + '.ini', 'abiquo.ini']: - if os.path.exists(configfilename): - config.read(configfilename) - break - - try: - login = api_get(None, config) - enterprise = next(link for link in login['links'] if link['rel'] == 'enterprise') - except Exception as e: - enterprise = None - - if cache_available(config): - inventory = get_cache('inventory', config) - else: - inventory = get_inventory(enterprise, config) - - # return to ansible - sys.stdout.write(str(inventory)) - sys.stdout.flush() diff --git a/scripts/inventory/apache-libcloud.py b/scripts/inventory/apache-libcloud.py deleted file mode 100755 index b05752352f..0000000000 --- a/scripts/inventory/apache-libcloud.py +++ /dev/null @@ -1,336 +0,0 @@ -#!/usr/bin/env python - -# (c) 2013, Sebastien Goasguen -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -###################################################################### - -''' -Apache Libcloud generic external inventory script -================================= - -Generates inventory that Ansible can understand by making API request to -Cloud providers using the Apache libcloud library. - -This script also assumes there is a libcloud.ini file alongside it - -''' - -import sys -import os -import argparse -import re -from time import time - -from ansible.module_utils.six import iteritems, string_types -from ansible.module_utils.six.moves import configparser as ConfigParser -from libcloud.compute.types import Provider -from libcloud.compute.providers import get_driver -import libcloud.security as sec - -import json - - -class LibcloudInventory(object): - def __init__(self): - ''' Main execution path ''' - - # Inventory grouped by instance IDs, tags, security groups, regions, - # and availability zones - self.inventory = {} - - # Index of hostname (address) to instance ID - self.index = {} - - # Read settings and parse CLI arguments - self.read_settings() - self.parse_cli_args() - - # Cache - if self.args.refresh_cache: - self.do_api_calls_update_cache() - elif not self.is_cache_valid(): - self.do_api_calls_update_cache() - - # Data to print - if self.args.host: - data_to_print = self.get_host_info() - - elif self.args.list: - # Display list of instances for inventory - if len(self.inventory) == 0: - data_to_print = self.get_inventory_from_cache() - else: - data_to_print = self.json_format_dict(self.inventory, True) - - print(data_to_print) - - def is_cache_valid(self): - ''' Determines if the cache files have expired, or if it is still valid ''' - - if os.path.isfile(self.cache_path_cache): - mod_time = os.path.getmtime(self.cache_path_cache) - current_time = time() - if (mod_time + self.cache_max_age) > current_time: - if os.path.isfile(self.cache_path_index): - return True - - return False - - def read_settings(self): - ''' Reads the settings from the libcloud.ini file ''' - - config = ConfigParser.SafeConfigParser() - libcloud_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'libcloud.ini') - libcloud_ini_path = os.environ.get('LIBCLOUD_INI_PATH', libcloud_default_ini_path) - config.read(libcloud_ini_path) - - if not config.has_section('driver'): - raise ValueError('libcloud.ini file must contain a [driver] section') - - if config.has_option('driver', 'provider'): - self.provider = config.get('driver', 'provider') - else: - raise ValueError('libcloud.ini does not have a provider defined') - - if config.has_option('driver', 'key'): - self.key = config.get('driver', 'key') - else: - raise ValueError('libcloud.ini does not have a key defined') - - if config.has_option('driver', 'secret'): - self.secret = config.get('driver', 'secret') - else: - raise ValueError('libcloud.ini does not have a secret defined') - - if config.has_option('driver', 'host'): - self.host = config.get('driver', 'host') - if config.has_option('driver', 'secure'): - self.secure = config.get('driver', 'secure') - if config.has_option('driver', 'verify_ssl_cert'): - self.verify_ssl_cert = config.get('driver', 'verify_ssl_cert') - if config.has_option('driver', 'port'): - self.port = config.get('driver', 'port') - if config.has_option('driver', 'path'): - self.path = config.get('driver', 'path') - if config.has_option('driver', 'api_version'): - self.api_version = config.get('driver', 'api_version') - - Driver = get_driver(getattr(Provider, self.provider)) - - self.conn = Driver(key=self.key, secret=self.secret, secure=self.secure, - host=self.host, path=self.path) - - # Cache related - cache_path = config.get('cache', 'cache_path') - self.cache_path_cache = cache_path + "/ansible-libcloud.cache" - self.cache_path_index = cache_path + "/ansible-libcloud.index" - self.cache_max_age = config.getint('cache', 'cache_max_age') - - def parse_cli_args(self): - ''' - Command line argument processing - ''' - - parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on libcloud supported providers') - parser.add_argument('--list', action='store_true', default=True, - help='List instances (default: True)') - parser.add_argument('--host', action='store', - help='Get all the variables about a specific instance') - parser.add_argument('--refresh-cache', action='store_true', default=False, - help='Force refresh of cache by making API requests to libcloud supported providers (default: False - use cache files)') - self.args = parser.parse_args() - - def do_api_calls_update_cache(self): - ''' - Do API calls to a location, and save data in cache files - ''' - - self.get_nodes() - - self.write_to_cache(self.inventory, self.cache_path_cache) - self.write_to_cache(self.index, self.cache_path_index) - - def get_nodes(self): - ''' - Gets the list of all nodes - ''' - - for node in self.conn.list_nodes(): - self.add_node(node) - - def get_node(self, node_id): - ''' - Gets details about a specific node - ''' - - return [node for node in self.conn.list_nodes() if node.id == node_id][0] - - def add_node(self, node): - ''' - Adds a node to the inventory and index, as long as it is - addressable - ''' - - # Only want running instances - if node.state != 0: - return - - # Select the best destination address - if not node.public_ips == []: - dest = node.public_ips[0] - if not dest: - # Skip instances we cannot address (e.g. private VPC subnet) - return - - # Add to index - self.index[dest] = node.name - - # Inventory: Group by instance ID (always a group of 1) - self.inventory[node.name] = [dest] - ''' - # Inventory: Group by region - self.push(self.inventory, region, dest) - - # Inventory: Group by availability zone - self.push(self.inventory, node.placement, dest) - - # Inventory: Group by instance type - self.push(self.inventory, self.to_safe('type_' + node.instance_type), dest) - ''' - # Inventory: Group by key pair - if node.extra['key_name']: - self.push(self.inventory, self.to_safe('key_' + node.extra['key_name']), dest) - - # Inventory: Group by security group, quick thing to handle single sg - if node.extra['security_group']: - self.push(self.inventory, self.to_safe('sg_' + node.extra['security_group'][0]), dest) - - # Inventory: Group by tag - if node.extra['tags']: - for tagkey in node.extra['tags'].keys(): - self.push(self.inventory, self.to_safe('tag_' + tagkey + '_' + node.extra['tags'][tagkey]), dest) - - def get_host_info(self): - ''' - Get variables about a specific host - ''' - - if len(self.index) == 0: - # Need to load index from cache - self.load_index_from_cache() - - if self.args.host not in self.index: - # try updating the cache - self.do_api_calls_update_cache() - if self.args.host not in self.index: - # host migh not exist anymore - return self.json_format_dict({}, True) - - node_id = self.index[self.args.host] - - node = self.get_node(node_id) - instance_vars = {} - for key, value in vars(node).items(): - key = self.to_safe('ec2_' + key) - - # Handle complex types - if isinstance(value, (int, bool)): - instance_vars[key] = value - elif isinstance(value, string_types): - instance_vars[key] = value.strip() - elif value is None: - instance_vars[key] = '' - elif key == 'ec2_region': - instance_vars[key] = value.name - elif key == 'ec2_tags': - for k, v in iteritems(value): - key = self.to_safe('ec2_tag_' + k) - instance_vars[key] = v - elif key == 'ec2_groups': - group_ids = [] - group_names = [] - for group in value: - group_ids.append(group.id) - group_names.append(group.name) - instance_vars["ec2_security_group_ids"] = ','.join(group_ids) - instance_vars["ec2_security_group_names"] = ','.join(group_names) - else: - pass - # TODO Product codes if someone finds them useful - # print(key) - # print(type(value)) - # print(value) - - return self.json_format_dict(instance_vars, True) - - def push(self, my_dict, key, element): - ''' - Pushed an element onto an array that may not have been defined in - the dict - ''' - - if key in my_dict: - my_dict[key].append(element) - else: - my_dict[key] = [element] - - def get_inventory_from_cache(self): - ''' - Reads the inventory from the cache file and returns it as a JSON - object - ''' - - cache = open(self.cache_path_cache, 'r') - json_inventory = cache.read() - return json_inventory - - def load_index_from_cache(self): - ''' - Reads the index from the cache file sets self.index - ''' - - cache = open(self.cache_path_index, 'r') - json_index = cache.read() - self.index = json.loads(json_index) - - def write_to_cache(self, data, filename): - ''' - Writes data in JSON format to a file - ''' - - json_data = self.json_format_dict(data, True) - cache = open(filename, 'w') - cache.write(json_data) - cache.close() - - def to_safe(self, word): - ''' - Converts 'bad' characters in a string to underscores so they can be - used as Ansible groups - ''' - - return re.sub(r"[^A-Za-z0-9\-]", "_", word) - - def json_format_dict(self, data, pretty=False): - ''' - Converts a dict to a JSON object and dumps it as a formatted - string - ''' - - if pretty: - return json.dumps(data, sort_keys=True, indent=2) - else: - return json.dumps(data) - - -def main(): - LibcloudInventory() - - -if __name__ == '__main__': - main() diff --git a/scripts/inventory/apstra_aos.ini b/scripts/inventory/apstra_aos.ini deleted file mode 100644 index 1ec1255c9c..0000000000 --- a/scripts/inventory/apstra_aos.ini +++ /dev/null @@ -1,20 +0,0 @@ -# Ansible Apstra AOS external inventory script settings -# Dynamic Inventory script parameter can be provided using this file -# Or by using Environment Variables: -# - AOS_SERVER, AOS_PORT, AOS_USERNAME, AOS_PASSWORD, AOS_BLUEPRINT -# -# This file takes precedence over the Environment Variables -# - -[aos] - -# aos_server = 172.20.62.3 -# port = 8888 -# username = admin -# password = admin - -## Blueprint Mode -# to use the inventory in mode Blueprint, you need to define the blueprint name you want to use - -# blueprint = my-blueprint-l2 -# blueprint_interface = true diff --git a/scripts/inventory/apstra_aos.py b/scripts/inventory/apstra_aos.py deleted file mode 100755 index ce2eb3def7..0000000000 --- a/scripts/inventory/apstra_aos.py +++ /dev/null @@ -1,580 +0,0 @@ -#!/usr/bin/env python -# -# (c) 2017 Apstra Inc, -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -""" -Apstra AOS external inventory script -==================================== - -Ansible has a feature where instead of reading from /etc/ansible/hosts -as a text file, it can query external programs to obtain the list -of hosts, groups the hosts are in, and even variables to assign to each host. - -To use this: - - copy this file over /etc/ansible/hosts and chmod +x the file. - - Copy both files (.py and .ini) in your preferred directory - -More information about Ansible Dynamic Inventory here -http://unix.stackexchange.com/questions/205479/in-ansible-dynamic-inventory-json-can-i-render-hostvars-based-on-the-hostname - -2 modes are currently, supported: **device based** or **blueprint based**: - - For **Device based**, the list of device is taken from the global device list - the serial ID will be used as the inventory_hostname - - For **Blueprint based**, the list of device is taken from the given blueprint - the Node name will be used as the inventory_hostname - -Input parameters parameter can be provided using either with the ini file or by using Environment Variables: -The following list of Environment Variables are supported: AOS_SERVER, AOS_PORT, AOS_USERNAME, AOS_PASSWORD, AOS_BLUEPRINT -The config file takes precedence over the Environment Variables - -Tested with Apstra AOS 1.1 - -This script has been inspired by the cobbler.py inventory. thanks - -Author: Damien Garros (@dgarros) -Version: 0.2.0 -""" -import json -import os -import re -import sys - -try: - import argparse - HAS_ARGPARSE = True -except ImportError: - HAS_ARGPARSE = False - -try: - from apstra.aosom.session import Session - HAS_AOS_PYEZ = True -except ImportError: - HAS_AOS_PYEZ = False - -from ansible.module_utils.six.moves import configparser - - -""" -## -Expected output format in Device mode -{ - "Cumulus": { - "hosts": [ - "52540073956E", - "52540022211A" - ], - "vars": {} - }, - "EOS": { - "hosts": [ - "5254001CAFD8", - "525400DDDF72" - ], - "vars": {} - }, - "Generic Model": { - "hosts": [ - "525400E5486D" - ], - "vars": {} - }, - "Ubuntu GNU/Linux": { - "hosts": [ - "525400E5486D" - ], - "vars": {} - }, - "VX": { - "hosts": [ - "52540073956E", - "52540022211A" - ], - "vars": {} - }, - "_meta": { - "hostvars": { - "5254001CAFD8": { - "agent_start_time": "2017-02-03T00:49:16.000000Z", - "ansible_ssh_host": "172.20.52.6", - "aos_hcl_model": "Arista_vEOS", - "aos_server": "", - "aos_version": "AOS_1.1.1_OB.5", - "comm_state": "on", - "device_start_time": "2017-02-03T00:47:58.454480Z", - "domain_name": "", - "error_message": "", - "fqdn": "localhost", - "hostname": "localhost", - "hw_model": "vEOS", - "hw_version": "", - "is_acknowledged": false, - "mgmt_ifname": "Management1", - "mgmt_ipaddr": "172.20.52.6", - "mgmt_macaddr": "52:54:00:1C:AF:D8", - "os_arch": "x86_64", - "os_family": "EOS", - "os_version": "4.16.6M", - "os_version_info": { - "build": "6M", - "major": "4", - "minor": "16" - }, - "serial_number": "5254001CAFD8", - "state": "OOS-QUARANTINED", - "vendor": "Arista" - }, - "52540022211A": { - "agent_start_time": "2017-02-03T00:45:22.000000Z", - "ansible_ssh_host": "172.20.52.7", - "aos_hcl_model": "Cumulus_VX", - "aos_server": "172.20.52.3", - "aos_version": "AOS_1.1.1_OB.5", - "comm_state": "on", - "device_start_time": "2017-02-03T00:45:11.019189Z", - "domain_name": "", - "error_message": "", - "fqdn": "cumulus", - "hostname": "cumulus", - "hw_model": "VX", - "hw_version": "", - "is_acknowledged": false, - "mgmt_ifname": "eth0", - "mgmt_ipaddr": "172.20.52.7", - "mgmt_macaddr": "52:54:00:22:21:1a", - "os_arch": "x86_64", - "os_family": "Cumulus", - "os_version": "3.1.1", - "os_version_info": { - "build": "1", - "major": "3", - "minor": "1" - }, - "serial_number": "52540022211A", - "state": "OOS-QUARANTINED", - "vendor": "Cumulus" - }, - "52540073956E": { - "agent_start_time": "2017-02-03T00:45:19.000000Z", - "ansible_ssh_host": "172.20.52.8", - "aos_hcl_model": "Cumulus_VX", - "aos_server": "172.20.52.3", - "aos_version": "AOS_1.1.1_OB.5", - "comm_state": "on", - "device_start_time": "2017-02-03T00:45:11.030113Z", - "domain_name": "", - "error_message": "", - "fqdn": "cumulus", - "hostname": "cumulus", - "hw_model": "VX", - "hw_version": "", - "is_acknowledged": false, - "mgmt_ifname": "eth0", - "mgmt_ipaddr": "172.20.52.8", - "mgmt_macaddr": "52:54:00:73:95:6e", - "os_arch": "x86_64", - "os_family": "Cumulus", - "os_version": "3.1.1", - "os_version_info": { - "build": "1", - "major": "3", - "minor": "1" - }, - "serial_number": "52540073956E", - "state": "OOS-QUARANTINED", - "vendor": "Cumulus" - }, - "525400DDDF72": { - "agent_start_time": "2017-02-03T00:49:07.000000Z", - "ansible_ssh_host": "172.20.52.5", - "aos_hcl_model": "Arista_vEOS", - "aos_server": "", - "aos_version": "AOS_1.1.1_OB.5", - "comm_state": "on", - "device_start_time": "2017-02-03T00:47:46.929921Z", - "domain_name": "", - "error_message": "", - "fqdn": "localhost", - "hostname": "localhost", - "hw_model": "vEOS", - "hw_version": "", - "is_acknowledged": false, - "mgmt_ifname": "Management1", - "mgmt_ipaddr": "172.20.52.5", - "mgmt_macaddr": "52:54:00:DD:DF:72", - "os_arch": "x86_64", - "os_family": "EOS", - "os_version": "4.16.6M", - "os_version_info": { - "build": "6M", - "major": "4", - "minor": "16" - }, - "serial_number": "525400DDDF72", - "state": "OOS-QUARANTINED", - "vendor": "Arista" - }, - "525400E5486D": { - "agent_start_time": "2017-02-02T18:44:42.000000Z", - "ansible_ssh_host": "172.20.52.4", - "aos_hcl_model": "Generic_Server_1RU_1x10G", - "aos_server": "172.20.52.3", - "aos_version": "AOS_1.1.1_OB.5", - "comm_state": "on", - "device_start_time": "2017-02-02T21:11:25.188734Z", - "domain_name": "", - "error_message": "", - "fqdn": "localhost", - "hostname": "localhost", - "hw_model": "Generic Model", - "hw_version": "pc-i440fx-trusty", - "is_acknowledged": false, - "mgmt_ifname": "eth0", - "mgmt_ipaddr": "172.20.52.4", - "mgmt_macaddr": "52:54:00:e5:48:6d", - "os_arch": "x86_64", - "os_family": "Ubuntu GNU/Linux", - "os_version": "14.04 LTS", - "os_version_info": { - "build": "", - "major": "14", - "minor": "04" - }, - "serial_number": "525400E5486D", - "state": "OOS-QUARANTINED", - "vendor": "Generic Manufacturer" - } - } - }, - "all": { - "hosts": [ - "5254001CAFD8", - "52540073956E", - "525400DDDF72", - "525400E5486D", - "52540022211A" - ], - "vars": {} - }, - "vEOS": { - "hosts": [ - "5254001CAFD8", - "525400DDDF72" - ], - "vars": {} - } -} -""" - - -def fail(msg): - sys.stderr.write("%s\n" % msg) - sys.exit(1) - - -class AosInventory(object): - - def __init__(self): - - """ Main execution path """ - - if not HAS_AOS_PYEZ: - raise Exception('aos-pyez is not installed. Please see details here: https://github.com/Apstra/aos-pyez') - if not HAS_ARGPARSE: - raise Exception('argparse is not installed. Please install the argparse library or upgrade to python-2.7') - - # Initialize inventory - self.inventory = dict() # A list of groups and the hosts in that group - self.inventory['_meta'] = dict() - self.inventory['_meta']['hostvars'] = dict() - - # Read settings and parse CLI arguments - self.read_settings() - self.parse_cli_args() - - # ---------------------------------------------------- - # Open session to AOS - # ---------------------------------------------------- - aos = Session(server=self.aos_server, - port=self.aos_server_port, - user=self.aos_username, - passwd=self.aos_password) - - aos.login() - - # Save session information in variables of group all - self.add_var_to_group('all', 'aos_session', aos.session) - - # Add the AOS server itself in the inventory - self.add_host_to_group("all", 'aos') - self.add_var_to_host("aos", "ansible_ssh_host", self.aos_server) - self.add_var_to_host("aos", "ansible_ssh_pass", self.aos_password) - self.add_var_to_host("aos", "ansible_ssh_user", self.aos_username) - - # ---------------------------------------------------- - # Build the inventory - # 2 modes are supported: device based or blueprint based - # - For device based, the list of device is taken from the global device list - # the serial ID will be used as the inventory_hostname - # - For Blueprint based, the list of device is taken from the given blueprint - # the Node name will be used as the inventory_hostname - # ---------------------------------------------------- - if self.aos_blueprint: - - bp = aos.Blueprints[self.aos_blueprint] - if bp.exists is False: - fail("Unable to find the Blueprint: %s" % self.aos_blueprint) - - for dev_name, dev_id in bp.params['devices'].value.items(): - - self.add_host_to_group('all', dev_name) - device = aos.Devices.find(uid=dev_id) - - if 'facts' in device.value.keys(): - self.add_device_facts_to_var(dev_name, device) - - # Define admin State and Status - if 'user_config' in device.value.keys(): - if 'admin_state' in device.value['user_config'].keys(): - self.add_var_to_host(dev_name, 'admin_state', device.value['user_config']['admin_state']) - - self.add_device_status_to_var(dev_name, device) - - # Go over the contents data structure - for node in bp.contents['system']['nodes']: - if node['display_name'] == dev_name: - self.add_host_to_group(node['role'], dev_name) - - # Check for additional attribute to import - attributes_to_import = [ - 'loopback_ip', - 'asn', - 'role', - 'position', - ] - for attr in attributes_to_import: - if attr in node.keys(): - self.add_var_to_host(dev_name, attr, node[attr]) - - # if blueprint_interface is enabled in the configuration - # Collect links information - if self.aos_blueprint_int: - interfaces = dict() - - for link in bp.contents['system']['links']: - # each link has 2 sides [0,1], and it's unknown which one match this device - # at first we assume, first side match(0) and peer is (1) - peer_id = 1 - - for side in link['endpoints']: - if side['display_name'] == dev_name: - - # import local information first - int_name = side['interface'] - - # init dict - interfaces[int_name] = dict() - if 'ip' in side.keys(): - interfaces[int_name]['ip'] = side['ip'] - - if 'interface' in side.keys(): - interfaces[int_name]['name'] = side['interface'] - - if 'display_name' in link['endpoints'][peer_id].keys(): - interfaces[int_name]['peer'] = link['endpoints'][peer_id]['display_name'] - - if 'ip' in link['endpoints'][peer_id].keys(): - interfaces[int_name]['peer_ip'] = link['endpoints'][peer_id]['ip'] - - if 'type' in link['endpoints'][peer_id].keys(): - interfaces[int_name]['peer_type'] = link['endpoints'][peer_id]['type'] - - else: - # if we haven't match the first time, prepare the peer_id - # for the second loop iteration - peer_id = 0 - - self.add_var_to_host(dev_name, 'interfaces', interfaces) - - else: - for device in aos.Devices: - # If not reacheable, create by key and - # If reacheable, create by hostname - - self.add_host_to_group('all', device.name) - - # populate information for this host - self.add_device_status_to_var(device.name, device) - - if 'user_config' in device.value.keys(): - for key, value in device.value['user_config'].items(): - self.add_var_to_host(device.name, key, value) - - # Based on device status online|offline, collect facts as well - if device.value['status']['comm_state'] == 'on': - - if 'facts' in device.value.keys(): - self.add_device_facts_to_var(device.name, device) - - # Check if device is associated with a blueprint - # if it's create a new group - if 'blueprint_active' in device.value['status'].keys(): - if 'blueprint_id' in device.value['status'].keys(): - bp = aos.Blueprints.find(uid=device.value['status']['blueprint_id']) - - if bp: - self.add_host_to_group(bp.name, device.name) - - # ---------------------------------------------------- - # Convert the inventory and return a JSON String - # ---------------------------------------------------- - data_to_print = "" - data_to_print += self.json_format_dict(self.inventory, True) - - print(data_to_print) - - def read_settings(self): - """ Reads the settings from the apstra_aos.ini file """ - - config = configparser.ConfigParser() - config.read(os.path.dirname(os.path.realpath(__file__)) + '/apstra_aos.ini') - - # Default Values - self.aos_blueprint = False - self.aos_blueprint_int = True - self.aos_username = 'admin' - self.aos_password = 'admin' - self.aos_server_port = 8888 - - # Try to reach all parameters from File, if not available try from ENV - try: - self.aos_server = config.get('aos', 'aos_server') - except Exception: - if 'AOS_SERVER' in os.environ.keys(): - self.aos_server = os.environ['AOS_SERVER'] - - try: - self.aos_server_port = config.get('aos', 'port') - except Exception: - if 'AOS_PORT' in os.environ.keys(): - self.aos_server_port = os.environ['AOS_PORT'] - - try: - self.aos_username = config.get('aos', 'username') - except Exception: - if 'AOS_USERNAME' in os.environ.keys(): - self.aos_username = os.environ['AOS_USERNAME'] - - try: - self.aos_password = config.get('aos', 'password') - except Exception: - if 'AOS_PASSWORD' in os.environ.keys(): - self.aos_password = os.environ['AOS_PASSWORD'] - - try: - self.aos_blueprint = config.get('aos', 'blueprint') - except Exception: - if 'AOS_BLUEPRINT' in os.environ.keys(): - self.aos_blueprint = os.environ['AOS_BLUEPRINT'] - - try: - if config.get('aos', 'blueprint_interface') in ['false', 'no']: - self.aos_blueprint_int = False - except Exception: - pass - - def parse_cli_args(self): - """ Command line argument processing """ - - parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Apstra AOS') - parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') - parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') - self.args = parser.parse_args() - - def json_format_dict(self, data, pretty=False): - """ Converts a dict to a JSON object and dumps it as a formatted string """ - - if pretty: - return json.dumps(data, sort_keys=True, indent=2) - else: - return json.dumps(data) - - def add_host_to_group(self, group, host): - - # Cleanup group name first - clean_group = self.cleanup_group_name(group) - - # Check if the group exist, if not initialize it - if clean_group not in self.inventory.keys(): - self.inventory[clean_group] = {} - self.inventory[clean_group]['hosts'] = [] - self.inventory[clean_group]['vars'] = {} - - self.inventory[clean_group]['hosts'].append(host) - - def add_var_to_host(self, host, var, value): - - # Check if the host exist, if not initialize it - if host not in self.inventory['_meta']['hostvars'].keys(): - self.inventory['_meta']['hostvars'][host] = {} - - self.inventory['_meta']['hostvars'][host][var] = value - - def add_var_to_group(self, group, var, value): - - # Cleanup group name first - clean_group = self.cleanup_group_name(group) - - # Check if the group exist, if not initialize it - if clean_group not in self.inventory.keys(): - self.inventory[clean_group] = {} - self.inventory[clean_group]['hosts'] = [] - self.inventory[clean_group]['vars'] = {} - - self.inventory[clean_group]['vars'][var] = value - - def add_device_facts_to_var(self, device_name, device): - - # Populate variables for this host - self.add_var_to_host(device_name, - 'ansible_ssh_host', - device.value['facts']['mgmt_ipaddr']) - - self.add_var_to_host(device_name, 'id', device.id) - - # self.add_host_to_group('all', device.name) - for key, value in device.value['facts'].items(): - self.add_var_to_host(device_name, key, value) - - if key == 'os_family': - self.add_host_to_group(value, device_name) - elif key == 'hw_model': - self.add_host_to_group(value, device_name) - - def cleanup_group_name(self, group_name): - """ - Clean up group name by : - - Replacing all non-alphanumeric caracter by underscore - - Converting to lowercase - """ - - rx = re.compile(r'\W+') - clean_group = rx.sub('_', group_name).lower() - - return clean_group - - def add_device_status_to_var(self, device_name, device): - - if 'status' in device.value.keys(): - for key, value in device.value['status'].items(): - self.add_var_to_host(device.name, key, value) - - -# Run the script -if __name__ == '__main__': - AosInventory() diff --git a/scripts/inventory/azure_rm.ini b/scripts/inventory/azure_rm.ini deleted file mode 100644 index 6edd9b981b..0000000000 --- a/scripts/inventory/azure_rm.ini +++ /dev/null @@ -1,23 +0,0 @@ -# -# Configuration file for azure_rm.py -# -[azure] -# Control which resource groups are included. By default all resources groups are included. -# Set resource_groups to a comma separated list of resource groups names. -#resource_groups= - -# Control which tags are included. Set tags to a comma separated list of keys or key:value pairs -#tags= - -# Control which locations are included. Set locations to a comma separated list (e.g. eastus,eastus2,westus) -#locations= - -# Include powerstate. If you don't need powerstate information, turning it off improves runtime performance. -include_powerstate=yes - -# Control grouping with the following boolean flags. Valid values: yes, no, true, false, True, False, 0, 1. -group_by_resource_group=yes -group_by_location=yes -group_by_security_group=yes -group_by_os_family=yes -group_by_tag=yes diff --git a/scripts/inventory/azure_rm.py b/scripts/inventory/azure_rm.py deleted file mode 100755 index ef9e7b1da4..0000000000 --- a/scripts/inventory/azure_rm.py +++ /dev/null @@ -1,962 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (c) 2016 Matt Davis, -# Chris Houseknecht, -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -''' -Important note (2018/10) -======================== -This inventory script is in maintenance mode: only critical bug fixes but no new features. -There's new Azure external inventory script at https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/inventory/azure_rm.py, -with better performance and latest new features. Please go to the link to get latest Azure inventory. - -Azure External Inventory Script -=============================== -Generates dynamic inventory by making API requests to the Azure Resource -Manager using the Azure Python SDK. For instruction on installing the -Azure Python SDK see https://azure-sdk-for-python.readthedocs.io/ - -Authentication --------------- -The order of precedence is command line arguments, environment variables, -and finally the [default] profile found in ~/.azure/credentials. - -If using a credentials file, it should be an ini formatted file with one or -more sections, which we refer to as profiles. The script looks for a -[default] section, if a profile is not specified either on the command line -or with an environment variable. The keys in a profile will match the -list of command line arguments below. - -For command line arguments and environment variables specify a profile found -in your ~/.azure/credentials file, or a service principal or Active Directory -user. - -Command line arguments: - - profile - - client_id - - secret - - subscription_id - - tenant - - ad_user - - password - - cloud_environment - - adfs_authority_url - -Environment variables: - - AZURE_PROFILE - - AZURE_CLIENT_ID - - AZURE_SECRET - - AZURE_SUBSCRIPTION_ID - - AZURE_TENANT - - AZURE_AD_USER - - AZURE_PASSWORD - - AZURE_CLOUD_ENVIRONMENT - - AZURE_ADFS_AUTHORITY_URL - -Run for Specific Host ------------------------ -When run for a specific host using the --host option, a resource group is -required. For a specific host, this script returns the following variables: - -{ - "ansible_host": "XXX.XXX.XXX.XXX", - "computer_name": "computer_name2", - "fqdn": null, - "id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Compute/virtualMachines/object-name", - "image": { - "offer": "CentOS", - "publisher": "OpenLogic", - "sku": "7.1", - "version": "latest" - }, - "location": "westus", - "mac_address": "00-00-5E-00-53-FE", - "name": "object-name", - "network_interface": "interface-name", - "network_interface_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkInterfaces/object-name1", - "network_security_group": null, - "network_security_group_id": null, - "os_disk": { - "name": "object-name", - "operating_system_type": "Linux" - }, - "plan": null, - "powerstate": "running", - "private_ip": "172.26.3.6", - "private_ip_alloc_method": "Static", - "provisioning_state": "Succeeded", - "public_ip": "XXX.XXX.XXX.XXX", - "public_ip_alloc_method": "Static", - "public_ip_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/publicIPAddresses/object-name", - "public_ip_name": "object-name", - "resource_group": "galaxy-production", - "security_group": "object-name", - "security_group_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkSecurityGroups/object-name", - "tags": { - "db": "database" - }, - "type": "Microsoft.Compute/virtualMachines", - "virtual_machine_size": "Standard_DS4" -} - -Groups ------- -When run in --list mode, instances are grouped by the following categories: - - azure - - location - - resource_group - - security_group - - tag key - - tag key_value - -Control groups using azure_rm.ini or set environment variables: - -AZURE_GROUP_BY_RESOURCE_GROUP=yes -AZURE_GROUP_BY_LOCATION=yes -AZURE_GROUP_BY_SECURITY_GROUP=yes -AZURE_GROUP_BY_TAG=yes - -Select hosts within specific resource groups by assigning a comma separated list to: - -AZURE_RESOURCE_GROUPS=resource_group_a,resource_group_b - -Select hosts for specific tag key by assigning a comma separated list of tag keys to: - -AZURE_TAGS=key1,key2,key3 - -Select hosts for specific locations: - -AZURE_LOCATIONS=eastus,westus,eastus2 - -Or, select hosts for specific tag key:value pairs by assigning a comma separated list key:value pairs to: - -AZURE_TAGS=key1:value1,key2:value2 - -If you don't need the powerstate, you can improve performance by turning off powerstate fetching: -AZURE_INCLUDE_POWERSTATE=no - -azure_rm.ini ------------- -As mentioned above, you can control execution using environment variables or a .ini file. A sample -azure_rm.ini is included. The name of the .ini file is the basename of the inventory script (in this case -'azure_rm') with a .ini extension. It also assumes the .ini file is alongside the script. To specify -a different path for the .ini file, define the AZURE_INI_PATH environment variable: - - export AZURE_INI_PATH=/path/to/custom.ini - -Powerstate: ------------ -The powerstate attribute indicates whether or not a host is running. If the value is 'running', the machine is -up. If the value is anything other than 'running', the machine is down, and will be unreachable. - -Examples: ---------- - Execute /bin/uname on all instances in the galaxy-qa resource group - $ ansible -i azure_rm.py galaxy-qa -m shell -a "/bin/uname -a" - - Use the inventory script to print instance specific information - $ contrib/inventory/azure_rm.py --host my_instance_host_name --pretty - - Use with a playbook - $ ansible-playbook -i contrib/inventory/azure_rm.py my_playbook.yml --limit galaxy-qa - - -Insecure Platform Warning -------------------------- -If you receive InsecurePlatformWarning from urllib3, install the -requests security packages: - - pip install requests[security] - - -author: - - Chris Houseknecht (@chouseknecht) - - Matt Davis (@nitzmahone) - -Company: Ansible by Red Hat - -Version: 1.0.0 -''' - -import argparse -import json -import os -import re -import sys -import inspect - -from os.path import expanduser -from ansible.module_utils.six.moves import configparser as cp -import ansible.module_utils.six.moves.urllib.parse as urlparse - -HAS_AZURE = True -HAS_AZURE_EXC = None -HAS_AZURE_CLI_CORE = True -CLIError = None - -try: - from msrestazure.azure_active_directory import AADTokenCredentials - from msrestazure.azure_exceptions import CloudError - from msrestazure.azure_active_directory import MSIAuthentication - from msrestazure import azure_cloud - from azure.mgmt.compute import __version__ as azure_compute_version - from azure.common import AzureMissingResourceHttpError, AzureHttpError - from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials - from azure.mgmt.network import NetworkManagementClient - from azure.mgmt.resource.resources import ResourceManagementClient - from azure.mgmt.resource.subscriptions import SubscriptionClient - from azure.mgmt.compute import ComputeManagementClient - from adal.authentication_context import AuthenticationContext -except ImportError as exc: - HAS_AZURE_EXC = exc - HAS_AZURE = False - -try: - from azure.cli.core.util import CLIError - from azure.common.credentials import get_azure_cli_credentials, get_cli_profile - from azure.common.cloud import get_cli_active_cloud -except ImportError: - HAS_AZURE_CLI_CORE = False - CLIError = Exception - -try: - from ansible.release import __version__ as ansible_version -except ImportError: - ansible_version = 'unknown' - -AZURE_CREDENTIAL_ENV_MAPPING = dict( - profile='AZURE_PROFILE', - subscription_id='AZURE_SUBSCRIPTION_ID', - client_id='AZURE_CLIENT_ID', - secret='AZURE_SECRET', - tenant='AZURE_TENANT', - ad_user='AZURE_AD_USER', - password='AZURE_PASSWORD', - cloud_environment='AZURE_CLOUD_ENVIRONMENT', - adfs_authority_url='AZURE_ADFS_AUTHORITY_URL' -) - -AZURE_CONFIG_SETTINGS = dict( - resource_groups='AZURE_RESOURCE_GROUPS', - tags='AZURE_TAGS', - locations='AZURE_LOCATIONS', - include_powerstate='AZURE_INCLUDE_POWERSTATE', - group_by_resource_group='AZURE_GROUP_BY_RESOURCE_GROUP', - group_by_location='AZURE_GROUP_BY_LOCATION', - group_by_security_group='AZURE_GROUP_BY_SECURITY_GROUP', - group_by_tag='AZURE_GROUP_BY_TAG', - group_by_os_family='AZURE_GROUP_BY_OS_FAMILY', - use_private_ip='AZURE_USE_PRIVATE_IP' -) - -AZURE_MIN_VERSION = "2.0.0" -ANSIBLE_USER_AGENT = 'Ansible/{0}'.format(ansible_version) - - -def azure_id_to_dict(id): - pieces = re.sub(r'^\/', '', id).split('/') - result = {} - index = 0 - while index < len(pieces) - 1: - result[pieces[index]] = pieces[index + 1] - index += 1 - return result - - -class AzureRM(object): - - def __init__(self, args): - self._args = args - self._cloud_environment = None - self._compute_client = None - self._resource_client = None - self._network_client = None - self._adfs_authority_url = None - self._resource = None - - self.debug = False - if args.debug: - self.debug = True - - self.credentials = self._get_credentials(args) - if not self.credentials: - self.fail("Failed to get credentials. Either pass as parameters, set environment variables, " - "or define a profile in ~/.azure/credentials.") - - # if cloud_environment specified, look up/build Cloud object - raw_cloud_env = self.credentials.get('cloud_environment') - if not raw_cloud_env: - self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD # SDK default - else: - # try to look up "well-known" values via the name attribute on azure_cloud members - all_clouds = [x[1] for x in inspect.getmembers(azure_cloud) if isinstance(x[1], azure_cloud.Cloud)] - matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env] - if len(matched_clouds) == 1: - self._cloud_environment = matched_clouds[0] - elif len(matched_clouds) > 1: - self.fail("Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'".format(raw_cloud_env)) - else: - if not urlparse.urlparse(raw_cloud_env).scheme: - self.fail("cloud_environment must be an endpoint discovery URL or one of {0}".format([x.name for x in all_clouds])) - try: - self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(raw_cloud_env) - except Exception as e: - self.fail("cloud_environment {0} could not be resolved: {1}".format(raw_cloud_env, e.message)) - - if self.credentials.get('subscription_id', None) is None: - self.fail("Credentials did not include a subscription_id value.") - self.log("setting subscription_id") - self.subscription_id = self.credentials['subscription_id'] - - # get authentication authority - # for adfs, user could pass in authority or not. - # for others, use default authority from cloud environment - if self.credentials.get('adfs_authority_url'): - self._adfs_authority_url = self.credentials.get('adfs_authority_url') - else: - self._adfs_authority_url = self._cloud_environment.endpoints.active_directory - - # get resource from cloud environment - self._resource = self._cloud_environment.endpoints.active_directory_resource_id - - if self.credentials.get('credentials'): - self.azure_credentials = self.credentials.get('credentials') - elif self.credentials.get('client_id') and self.credentials.get('secret') and self.credentials.get('tenant'): - self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'], - secret=self.credentials['secret'], - tenant=self.credentials['tenant'], - cloud_environment=self._cloud_environment) - - elif self.credentials.get('ad_user') is not None and \ - self.credentials.get('password') is not None and \ - self.credentials.get('client_id') is not None and \ - self.credentials.get('tenant') is not None: - - self.azure_credentials = self.acquire_token_with_username_password( - self._adfs_authority_url, - self._resource, - self.credentials['ad_user'], - self.credentials['password'], - self.credentials['client_id'], - self.credentials['tenant']) - - elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None: - tenant = self.credentials.get('tenant') - if not tenant: - tenant = 'common' - self.azure_credentials = UserPassCredentials(self.credentials['ad_user'], - self.credentials['password'], - tenant=tenant, - cloud_environment=self._cloud_environment) - - else: - self.fail("Failed to authenticate with provided credentials. Some attributes were missing. " - "Credentials must include client_id, secret and tenant or ad_user and password, or " - "ad_user, password, client_id, tenant and adfs_authority_url(optional) for ADFS authentication, or " - "be logged in using AzureCLI.") - - def log(self, msg): - if self.debug: - print(msg + u'\n') - - def fail(self, msg): - raise Exception(msg) - - def _get_profile(self, profile="default"): - path = expanduser("~") - path += "/.azure/credentials" - try: - config = cp.ConfigParser() - config.read(path) - except Exception as exc: - self.fail("Failed to access {0}. Check that the file exists and you have read " - "access. {1}".format(path, str(exc))) - credentials = dict() - for key in AZURE_CREDENTIAL_ENV_MAPPING: - try: - credentials[key] = config.get(profile, key, raw=True) - except Exception: - pass - - if credentials.get('client_id') is not None or credentials.get('ad_user') is not None: - return credentials - - return None - - def _get_env_credentials(self): - env_credentials = dict() - for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items(): - env_credentials[attribute] = os.environ.get(env_variable, None) - - if env_credentials['profile'] is not None: - credentials = self._get_profile(env_credentials['profile']) - return credentials - - if env_credentials['client_id'] is not None or env_credentials['ad_user'] is not None: - return env_credentials - - return None - - def _get_azure_cli_credentials(self): - credentials, subscription_id = get_azure_cli_credentials() - cloud_environment = get_cli_active_cloud() - - cli_credentials = { - 'credentials': credentials, - 'subscription_id': subscription_id, - 'cloud_environment': cloud_environment - } - return cli_credentials - - def _get_msi_credentials(self, subscription_id_param=None): - credentials = MSIAuthentication() - subscription_id_param = subscription_id_param or os.environ.get(AZURE_CREDENTIAL_ENV_MAPPING['subscription_id'], None) - try: - # try to get the subscription in MSI to test whether MSI is enabled - subscription_client = SubscriptionClient(credentials) - subscription = next(subscription_client.subscriptions.list()) - subscription_id = str(subscription.subscription_id) - return { - 'credentials': credentials, - 'subscription_id': subscription_id_param or subscription_id - } - except Exception as exc: - return None - - def _get_credentials(self, params): - # Get authentication credentials. - # Precedence: cmd line parameters-> environment variables-> default profile in ~/.azure/credentials. - - self.log('Getting credentials') - - arg_credentials = dict() - for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items(): - arg_credentials[attribute] = getattr(params, attribute) - - # try module params - if arg_credentials['profile'] is not None: - self.log('Retrieving credentials with profile parameter.') - credentials = self._get_profile(arg_credentials['profile']) - return credentials - - if arg_credentials['client_id'] is not None: - self.log('Received credentials from parameters.') - return arg_credentials - - if arg_credentials['ad_user'] is not None: - self.log('Received credentials from parameters.') - return arg_credentials - - # try environment - env_credentials = self._get_env_credentials() - if env_credentials: - self.log('Received credentials from env.') - return env_credentials - - # try default profile from ~./azure/credentials - default_credentials = self._get_profile() - if default_credentials: - self.log('Retrieved default profile credentials from ~/.azure/credentials.') - return default_credentials - - msi_credentials = self._get_msi_credentials(arg_credentials.get('subscription_id')) - if msi_credentials: - self.log('Retrieved credentials from MSI.') - return msi_credentials - - try: - if HAS_AZURE_CLI_CORE: - self.log('Retrieving credentials from AzureCLI profile') - cli_credentials = self._get_azure_cli_credentials() - return cli_credentials - except CLIError as ce: - self.log('Error getting AzureCLI profile credentials - {0}'.format(ce)) - - return None - - def acquire_token_with_username_password(self, authority, resource, username, password, client_id, tenant): - authority_uri = authority - - if tenant is not None: - authority_uri = authority + '/' + tenant - - context = AuthenticationContext(authority_uri) - token_response = context.acquire_token_with_username_password(resource, username, password, client_id) - return AADTokenCredentials(token_response) - - def _register(self, key): - try: - # We have to perform the one-time registration here. Otherwise, we receive an error the first - # time we attempt to use the requested client. - resource_client = self.rm_client - resource_client.providers.register(key) - except Exception as exc: - self.log("One-time registration of {0} failed - {1}".format(key, str(exc))) - self.log("You might need to register {0} using an admin account".format(key)) - self.log(("To register a provider using the Python CLI: " - "https://docs.microsoft.com/azure/azure-resource-manager/" - "resource-manager-common-deployment-errors#noregisteredproviderfound")) - - def get_mgmt_svc_client(self, client_type, base_url, api_version): - client = client_type(self.azure_credentials, - self.subscription_id, - base_url=base_url, - api_version=api_version) - client.config.add_user_agent(ANSIBLE_USER_AGENT) - return client - - @property - def network_client(self): - self.log('Getting network client') - if not self._network_client: - self._network_client = self.get_mgmt_svc_client(NetworkManagementClient, - self._cloud_environment.endpoints.resource_manager, - '2017-06-01') - self._register('Microsoft.Network') - return self._network_client - - @property - def rm_client(self): - self.log('Getting resource manager client') - if not self._resource_client: - self._resource_client = self.get_mgmt_svc_client(ResourceManagementClient, - self._cloud_environment.endpoints.resource_manager, - '2017-05-10') - return self._resource_client - - @property - def compute_client(self): - self.log('Getting compute client') - if not self._compute_client: - self._compute_client = self.get_mgmt_svc_client(ComputeManagementClient, - self._cloud_environment.endpoints.resource_manager, - '2017-03-30') - self._register('Microsoft.Compute') - return self._compute_client - - -class AzureInventory(object): - - def __init__(self): - - self._args = self._parse_cli_args() - - try: - rm = AzureRM(self._args) - except Exception as e: - sys.exit("{0}".format(str(e))) - - self._compute_client = rm.compute_client - self._network_client = rm.network_client - self._resource_client = rm.rm_client - self._security_groups = None - - self.resource_groups = [] - self.tags = None - self.locations = None - self.replace_dash_in_groups = False - self.group_by_resource_group = True - self.group_by_location = True - self.group_by_os_family = True - self.group_by_security_group = True - self.group_by_tag = True - self.include_powerstate = True - self.use_private_ip = False - - self._inventory = dict( - _meta=dict( - hostvars=dict() - ), - azure=[] - ) - - self._get_settings() - - if self._args.resource_groups: - self.resource_groups = self._args.resource_groups.split(',') - - if self._args.tags: - self.tags = self._args.tags.split(',') - - if self._args.locations: - self.locations = self._args.locations.split(',') - - if self._args.no_powerstate: - self.include_powerstate = False - - self.get_inventory() - print(self._json_format_dict(pretty=self._args.pretty)) - sys.exit(0) - - def _parse_cli_args(self): - # Parse command line arguments - parser = argparse.ArgumentParser( - description='Produce an Ansible Inventory file for an Azure subscription') - parser.add_argument('--list', action='store_true', default=True, - help='List instances (default: True)') - parser.add_argument('--debug', action='store_true', default=False, - help='Send debug messages to STDOUT') - parser.add_argument('--host', action='store', - help='Get all information about an instance') - parser.add_argument('--pretty', action='store_true', default=False, - help='Pretty print JSON output(default: False)') - parser.add_argument('--profile', action='store', - help='Azure profile contained in ~/.azure/credentials') - parser.add_argument('--subscription_id', action='store', - help='Azure Subscription Id') - parser.add_argument('--client_id', action='store', - help='Azure Client Id ') - parser.add_argument('--secret', action='store', - help='Azure Client Secret') - parser.add_argument('--tenant', action='store', - help='Azure Tenant Id') - parser.add_argument('--ad_user', action='store', - help='Active Directory User') - parser.add_argument('--password', action='store', - help='password') - parser.add_argument('--adfs_authority_url', action='store', - help='Azure ADFS authority url') - parser.add_argument('--cloud_environment', action='store', - help='Azure Cloud Environment name or metadata discovery URL') - parser.add_argument('--resource-groups', action='store', - help='Return inventory for comma separated list of resource group names') - parser.add_argument('--tags', action='store', - help='Return inventory for comma separated list of tag key:value pairs') - parser.add_argument('--locations', action='store', - help='Return inventory for comma separated list of locations') - parser.add_argument('--no-powerstate', action='store_true', default=False, - help='Do not include the power state of each virtual host') - return parser.parse_args() - - def get_inventory(self): - if len(self.resource_groups) > 0: - # get VMs for requested resource groups - for resource_group in self.resource_groups: - try: - virtual_machines = self._compute_client.virtual_machines.list(resource_group.lower()) - except Exception as exc: - sys.exit("Error: fetching virtual machines for resource group {0} - {1}".format(resource_group, str(exc))) - if self._args.host or self.tags: - selected_machines = self._selected_machines(virtual_machines) - self._load_machines(selected_machines) - else: - self._load_machines(virtual_machines) - else: - # get all VMs within the subscription - try: - virtual_machines = self._compute_client.virtual_machines.list_all() - except Exception as exc: - sys.exit("Error: fetching virtual machines - {0}".format(str(exc))) - - if self._args.host or self.tags or self.locations: - selected_machines = self._selected_machines(virtual_machines) - self._load_machines(selected_machines) - else: - self._load_machines(virtual_machines) - - def _load_machines(self, machines): - for machine in machines: - id_dict = azure_id_to_dict(machine.id) - - # TODO - The API is returning an ID value containing resource group name in ALL CAPS. If/when it gets - # fixed, we should remove the .lower(). Opened Issue - # #574: https://github.com/Azure/azure-sdk-for-python/issues/574 - resource_group = id_dict['resourceGroups'].lower() - - if self.group_by_security_group: - self._get_security_groups(resource_group) - - host_vars = dict( - ansible_host=None, - private_ip=None, - private_ip_alloc_method=None, - public_ip=None, - public_ip_name=None, - public_ip_id=None, - public_ip_alloc_method=None, - fqdn=None, - location=machine.location, - name=machine.name, - type=machine.type, - id=machine.id, - tags=machine.tags, - network_interface_id=None, - network_interface=None, - resource_group=resource_group, - mac_address=None, - plan=(machine.plan.name if machine.plan else None), - virtual_machine_size=machine.hardware_profile.vm_size, - computer_name=(machine.os_profile.computer_name if machine.os_profile else None), - provisioning_state=machine.provisioning_state, - ) - - host_vars['os_disk'] = dict( - name=machine.storage_profile.os_disk.name, - operating_system_type=machine.storage_profile.os_disk.os_type.value.lower() - ) - - if self.include_powerstate: - host_vars['powerstate'] = self._get_powerstate(resource_group, machine.name) - - if machine.storage_profile.image_reference: - host_vars['image'] = dict( - offer=machine.storage_profile.image_reference.offer, - publisher=machine.storage_profile.image_reference.publisher, - sku=machine.storage_profile.image_reference.sku, - version=machine.storage_profile.image_reference.version - ) - - # Add windows details - if machine.os_profile is not None and machine.os_profile.windows_configuration is not None: - host_vars['ansible_connection'] = 'winrm' - host_vars['windows_auto_updates_enabled'] = \ - machine.os_profile.windows_configuration.enable_automatic_updates - host_vars['windows_timezone'] = machine.os_profile.windows_configuration.time_zone - host_vars['windows_rm'] = None - if machine.os_profile.windows_configuration.win_rm is not None: - host_vars['windows_rm'] = dict(listeners=None) - if machine.os_profile.windows_configuration.win_rm.listeners is not None: - host_vars['windows_rm']['listeners'] = [] - for listener in machine.os_profile.windows_configuration.win_rm.listeners: - host_vars['windows_rm']['listeners'].append(dict(protocol=listener.protocol.name, - certificate_url=listener.certificate_url)) - - for interface in machine.network_profile.network_interfaces: - interface_reference = self._parse_ref_id(interface.id) - network_interface = self._network_client.network_interfaces.get( - interface_reference['resourceGroups'], - interface_reference['networkInterfaces']) - if network_interface.primary: - if self.group_by_security_group and \ - self._security_groups[resource_group].get(network_interface.id, None): - host_vars['security_group'] = \ - self._security_groups[resource_group][network_interface.id]['name'] - host_vars['security_group_id'] = \ - self._security_groups[resource_group][network_interface.id]['id'] - host_vars['network_interface'] = network_interface.name - host_vars['network_interface_id'] = network_interface.id - host_vars['mac_address'] = network_interface.mac_address - for ip_config in network_interface.ip_configurations: - host_vars['private_ip'] = ip_config.private_ip_address - host_vars['private_ip_alloc_method'] = ip_config.private_ip_allocation_method - if self.use_private_ip: - host_vars['ansible_host'] = ip_config.private_ip_address - if ip_config.public_ip_address: - public_ip_reference = self._parse_ref_id(ip_config.public_ip_address.id) - public_ip_address = self._network_client.public_ip_addresses.get( - public_ip_reference['resourceGroups'], - public_ip_reference['publicIPAddresses']) - if not self.use_private_ip: - host_vars['ansible_host'] = public_ip_address.ip_address - host_vars['public_ip'] = public_ip_address.ip_address - host_vars['public_ip_name'] = public_ip_address.name - host_vars['public_ip_alloc_method'] = public_ip_address.public_ip_allocation_method - host_vars['public_ip_id'] = public_ip_address.id - if public_ip_address.dns_settings: - host_vars['fqdn'] = public_ip_address.dns_settings.fqdn - - self._add_host(host_vars) - - def _selected_machines(self, virtual_machines): - selected_machines = [] - for machine in virtual_machines: - if self._args.host and self._args.host == machine.name: - selected_machines.append(machine) - if self.tags and self._tags_match(machine.tags, self.tags): - selected_machines.append(machine) - if self.locations and machine.location in self.locations: - selected_machines.append(machine) - return selected_machines - - def _get_security_groups(self, resource_group): - ''' For a given resource_group build a mapping of network_interface.id to security_group name ''' - if not self._security_groups: - self._security_groups = dict() - if not self._security_groups.get(resource_group): - self._security_groups[resource_group] = dict() - for group in self._network_client.network_security_groups.list(resource_group): - if group.network_interfaces: - for interface in group.network_interfaces: - self._security_groups[resource_group][interface.id] = dict( - name=group.name, - id=group.id - ) - - def _get_powerstate(self, resource_group, name): - try: - vm = self._compute_client.virtual_machines.get(resource_group, - name, - expand='instanceview') - except Exception as exc: - sys.exit("Error: fetching instanceview for host {0} - {1}".format(name, str(exc))) - - return next((s.code.replace('PowerState/', '') - for s in vm.instance_view.statuses if s.code.startswith('PowerState')), None) - - def _add_host(self, vars): - - host_name = self._to_safe(vars['name']) - resource_group = self._to_safe(vars['resource_group']) - operating_system_type = self._to_safe(vars['os_disk']['operating_system_type'].lower()) - security_group = None - if vars.get('security_group'): - security_group = self._to_safe(vars['security_group']) - - if self.group_by_os_family: - if not self._inventory.get(operating_system_type): - self._inventory[operating_system_type] = [] - self._inventory[operating_system_type].append(host_name) - - if self.group_by_resource_group: - if not self._inventory.get(resource_group): - self._inventory[resource_group] = [] - self._inventory[resource_group].append(host_name) - - if self.group_by_location: - if not self._inventory.get(vars['location']): - self._inventory[vars['location']] = [] - self._inventory[vars['location']].append(host_name) - - if self.group_by_security_group and security_group: - if not self._inventory.get(security_group): - self._inventory[security_group] = [] - self._inventory[security_group].append(host_name) - - self._inventory['_meta']['hostvars'][host_name] = vars - self._inventory['azure'].append(host_name) - - if self.group_by_tag and vars.get('tags'): - for key, value in vars['tags'].items(): - safe_key = self._to_safe(key) - safe_value = safe_key + '_' + self._to_safe(value) - if not self._inventory.get(safe_key): - self._inventory[safe_key] = [] - if not self._inventory.get(safe_value): - self._inventory[safe_value] = [] - self._inventory[safe_key].append(host_name) - self._inventory[safe_value].append(host_name) - - def _json_format_dict(self, pretty=False): - # convert inventory to json - if pretty: - return json.dumps(self._inventory, sort_keys=True, indent=2) - else: - return json.dumps(self._inventory) - - def _get_settings(self): - # Load settings from the .ini, if it exists. Otherwise, - # look for environment values. - file_settings = self._load_settings() - if file_settings: - for key in AZURE_CONFIG_SETTINGS: - if key in ('resource_groups', 'tags', 'locations') and file_settings.get(key): - values = file_settings.get(key).split(',') - if len(values) > 0: - setattr(self, key, values) - elif file_settings.get(key): - val = self._to_boolean(file_settings[key]) - setattr(self, key, val) - else: - env_settings = self._get_env_settings() - for key in AZURE_CONFIG_SETTINGS: - if key in ('resource_groups', 'tags', 'locations') and env_settings.get(key): - values = env_settings.get(key).split(',') - if len(values) > 0: - setattr(self, key, values) - elif env_settings.get(key, None) is not None: - val = self._to_boolean(env_settings[key]) - setattr(self, key, val) - - def _parse_ref_id(self, reference): - response = {} - keys = reference.strip('/').split('/') - for index in range(len(keys)): - if index < len(keys) - 1 and index % 2 == 0: - response[keys[index]] = keys[index + 1] - return response - - def _to_boolean(self, value): - if value in ['Yes', 'yes', 1, 'True', 'true', True]: - result = True - elif value in ['No', 'no', 0, 'False', 'false', False]: - result = False - else: - result = True - return result - - def _get_env_settings(self): - env_settings = dict() - for attribute, env_variable in AZURE_CONFIG_SETTINGS.items(): - env_settings[attribute] = os.environ.get(env_variable, None) - return env_settings - - def _load_settings(self): - basename = os.path.splitext(os.path.basename(__file__))[0] - default_path = os.path.join(os.path.dirname(__file__), (basename + '.ini')) - path = os.path.expanduser(os.path.expandvars(os.environ.get('AZURE_INI_PATH', default_path))) - config = None - settings = None - try: - config = cp.ConfigParser() - config.read(path) - except Exception: - pass - - if config is not None: - settings = dict() - for key in AZURE_CONFIG_SETTINGS: - try: - settings[key] = config.get('azure', key, raw=True) - except Exception: - pass - - return settings - - def _tags_match(self, tag_obj, tag_args): - ''' - Return True if the tags object from a VM contains the requested tag values. - - :param tag_obj: Dictionary of string:string pairs - :param tag_args: List of strings in the form key=value - :return: boolean - ''' - - if not tag_obj: - return False - - matches = 0 - for arg in tag_args: - arg_key = arg - arg_value = None - if re.search(r':', arg): - arg_key, arg_value = arg.split(':') - if arg_value and tag_obj.get(arg_key, None) == arg_value: - matches += 1 - elif not arg_value and tag_obj.get(arg_key, None) is not None: - matches += 1 - if matches == len(tag_args): - return True - return False - - def _to_safe(self, word): - ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' - regex = r"[^A-Za-z0-9\_" - if not self.replace_dash_in_groups: - regex += r"\-" - return re.sub(regex + "]", "_", word) - - -def main(): - if not HAS_AZURE: - sys.exit("The Azure python sdk is not installed (try `pip install 'azure>={0}' --upgrade`) - {1}".format(AZURE_MIN_VERSION, HAS_AZURE_EXC)) - - AzureInventory() - - -if __name__ == '__main__': - main() diff --git a/scripts/inventory/brook.ini b/scripts/inventory/brook.ini deleted file mode 100644 index e88c363150..0000000000 --- a/scripts/inventory/brook.ini +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright 2016 Doalitic. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# The Brook.io inventory script has the following dependencies: -# 1. A working Brook.io account -# See https://brook.io -# 2. A valid token generated through the 'API token' panel of Brook.io -# 3. The libbrook python libray. -# See https://github.com/doalitic/libbrook -# -# Author: Francisco Ros - -[brook] -# Valid API token (required). -# E.g. 'Aed342a12A60433697281FeEe1a4037C' -# -api_token = - -# Project id within Brook.io, as obtained from the project settings (optional). If provided, the -# generated inventory will just include the hosts that belong to such project. Otherwise, it will -# include all hosts in projects the requesting user has access to. The response includes groups -# 'project_x', being 'x' the project name. -# E.g. '2e8e099e1bc34cc0979d97ac34e9577b' -# -project_id = diff --git a/scripts/inventory/brook.py b/scripts/inventory/brook.py deleted file mode 100755 index 1acd370ec3..0000000000 --- a/scripts/inventory/brook.py +++ /dev/null @@ -1,248 +0,0 @@ -#!/usr/bin/env python -# Copyright 2016 Doalitic. -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -""" -Brook.io external inventory script -================================== - -Generates inventory that Ansible can understand by making API requests to Brook.io via the libbrook -library. Hence, such dependency must be installed in the system to run this script. - -The default configuration file is named 'brook.ini' and is located alongside this script. You can -choose any other file by setting the BROOK_INI_PATH environment variable. - -If param 'project_id' is left blank in 'brook.ini', the inventory includes all the instances in -projects where the requesting user belongs. Otherwise, only instances from the given project are -included, provided the requesting user belongs to it. - -The following variables are established for every host. They can be retrieved from the hostvars -dictionary. - - brook_pid: str - - brook_name: str - - brook_description: str - - brook_project: str - - brook_template: str - - brook_region: str - - brook_zone: str - - brook_status: str - - brook_tags: list(str) - - brook_internal_ips: list(str) - - brook_external_ips: list(str) - - brook_created_at - - brook_updated_at - - ansible_ssh_host - -Instances are grouped by the following categories: - - tag: - A group is created for each tag. E.g. groups 'tag_foo' and 'tag_bar' are created if there exist - instances with tags 'foo' and/or 'bar'. - - project: - A group is created for each project. E.g. group 'project_test' is created if a project named - 'test' exist. - - status: - A group is created for each instance state. E.g. groups 'status_RUNNING' and 'status_PENDING' - are created if there are instances in running and pending state. - -Examples: - Execute uname on all instances in project 'test' - $ ansible -i brook.py project_test -m shell -a "/bin/uname -a" - - Install nginx on all debian web servers tagged with 'www' - $ ansible -i brook.py tag_www -m apt -a "name=nginx state=present" - - Run site.yml playbook on web servers - $ ansible-playbook -i brook.py site.yml -l tag_www - -Support: - This script is tested on Python 2.7 and 3.4. It may work on other versions though. - -Author: Francisco Ros -Version: 0.2 -""" - - -import sys -import os - -from ansible.module_utils.six.moves.configparser import SafeConfigParser as ConfigParser - -import json - -try: - import libbrook -except Exception: - sys.exit('Brook.io inventory script requires libbrook. See https://github.com/doalitic/libbrook') - - -class BrookInventory: - - _API_ENDPOINT = 'https://api.brook.io' - - def __init__(self): - self._configure_from_file() - self.client = self.get_api_client() - self.inventory = self.get_inventory() - - def _configure_from_file(self): - """Initialize from .ini file. - - Configuration file is assumed to be named 'brook.ini' and to be located on the same - directory than this file, unless the environment variable BROOK_INI_PATH says otherwise. - """ - - brook_ini_default_path = \ - os.path.join(os.path.dirname(os.path.realpath(__file__)), 'brook.ini') - brook_ini_path = os.environ.get('BROOK_INI_PATH', brook_ini_default_path) - - config = ConfigParser(defaults={ - 'api_token': '', - 'project_id': '' - }) - config.read(brook_ini_path) - self.api_token = config.get('brook', 'api_token') - self.project_id = config.get('brook', 'project_id') - - if not self.api_token: - sys.exit('You must provide (at least) your Brook.io API token to generate the dynamic ' - 'inventory.') - - def get_api_client(self): - """Authenticate user via the provided credentials and return the corresponding API client. - """ - - # Get JWT token from API token - # - unauthenticated_client = libbrook.ApiClient(host=self._API_ENDPOINT) - auth_api = libbrook.AuthApi(unauthenticated_client) - api_token = libbrook.AuthTokenRequest() - api_token.token = self.api_token - jwt = auth_api.auth_token(token=api_token) - - # Create authenticated API client - # - return libbrook.ApiClient(host=self._API_ENDPOINT, - header_name='Authorization', - header_value='Bearer %s' % jwt.token) - - def get_inventory(self): - """Generate Ansible inventory. - """ - - groups = dict() - meta = dict() - meta['hostvars'] = dict() - - instances_api = libbrook.InstancesApi(self.client) - projects_api = libbrook.ProjectsApi(self.client) - templates_api = libbrook.TemplatesApi(self.client) - - # If no project is given, get all projects the requesting user has access to - # - if not self.project_id: - projects = [project.id for project in projects_api.index_projects()] - else: - projects = [self.project_id] - - # Build inventory from instances in all projects - # - for project_id in projects: - project = projects_api.show_project(project_id=project_id) - for instance in instances_api.index_instances(project_id=project_id): - # Get template used for this instance if known - template = templates_api.show_template(template_id=instance.template) if instance.template else None - - # Update hostvars - try: - meta['hostvars'][instance.name] = \ - self.hostvars(project, instance, template, instances_api) - except libbrook.rest.ApiException: - continue - - # Group by project - project_group = 'project_%s' % project.name - if project_group in groups: - groups[project_group].append(instance.name) - else: - groups[project_group] = [instance.name] - - # Group by status - status_group = 'status_%s' % meta['hostvars'][instance.name]['brook_status'] - if status_group in groups: - groups[status_group].append(instance.name) - else: - groups[status_group] = [instance.name] - - # Group by tags - tags = meta['hostvars'][instance.name]['brook_tags'] - for tag in tags: - tag_group = 'tag_%s' % tag - if tag_group in groups: - groups[tag_group].append(instance.name) - else: - groups[tag_group] = [instance.name] - - groups['_meta'] = meta - return groups - - def hostvars(self, project, instance, template, api): - """Return the hostvars dictionary for the given instance. - - Raise libbrook.rest.ApiException if it cannot retrieve all required information from the - Brook.io API. - """ - - hostvars = instance.to_dict() - hostvars['brook_pid'] = hostvars.pop('pid') - hostvars['brook_name'] = hostvars.pop('name') - hostvars['brook_description'] = hostvars.pop('description') - hostvars['brook_project'] = hostvars.pop('project') - hostvars['brook_template'] = hostvars.pop('template') - hostvars['brook_region'] = hostvars.pop('region') - hostvars['brook_zone'] = hostvars.pop('zone') - hostvars['brook_created_at'] = hostvars.pop('created_at') - hostvars['brook_updated_at'] = hostvars.pop('updated_at') - del hostvars['id'] - del hostvars['key'] - del hostvars['provider'] - del hostvars['image'] - - # Substitute identifiers for names - # - hostvars['brook_project'] = project.name - hostvars['brook_template'] = template.name if template else None - - # Retrieve instance state - # - status = api.status_instance(project_id=project.id, instance_id=instance.id) - hostvars.update({'brook_status': status.state}) - - # Retrieve instance tags - # - tags = api.instance_tags(project_id=project.id, instance_id=instance.id) - hostvars.update({'brook_tags': tags}) - - # Retrieve instance addresses - # - addresses = api.instance_addresses(project_id=project.id, instance_id=instance.id) - internal_ips = [address.address for address in addresses if address.scope == 'internal'] - external_ips = [address.address for address in addresses - if address.address and address.scope == 'external'] - hostvars.update({'brook_internal_ips': internal_ips}) - hostvars.update({'brook_external_ips': external_ips}) - try: - hostvars.update({'ansible_ssh_host': external_ips[0]}) - except IndexError: - raise libbrook.rest.ApiException(status='502', reason='Instance without public IP') - - return hostvars - - -# Run the script -# -brook = BrookInventory() -print(json.dumps(brook.inventory)) diff --git a/scripts/inventory/cloudforms.ini b/scripts/inventory/cloudforms.ini deleted file mode 100644 index 30b9aa609e..0000000000 --- a/scripts/inventory/cloudforms.ini +++ /dev/null @@ -1,40 +0,0 @@ -[cloudforms] - -# the version of CloudForms ; currently not used, but tested with -version = 4.1 - -# This should be the hostname of the CloudForms server -url = https://cfme.example.com - -# This will more than likely need to be a local CloudForms username -username = - -# The password for said username -password = - -# True = verify SSL certificate / False = trust anything -ssl_verify = True - -# limit the number of vms returned per request -limit = 100 - -# purge the CloudForms actions from hosts -purge_actions = True - -# Clean up group names (from tags and other groupings so Ansible doesn't complain) -clean_group_keys = True - -# Explode tags into nested groups / subgroups -nest_tags = False - -# If set, ensure host name are suffixed with this value -# Note: This suffix *must* include the leading '.' as it is appended to the hostname as is -# suffix = .example.org - -# If true, will try and use an IPv4 address for the ansible_ssh_host rather than just the first IP address in the list -prefer_ipv4 = False - -[cache] - -# Maximum time to trust the cache in seconds -max_age = 600 diff --git a/scripts/inventory/cloudforms.py b/scripts/inventory/cloudforms.py deleted file mode 100755 index 3514698d59..0000000000 --- a/scripts/inventory/cloudforms.py +++ /dev/null @@ -1,499 +0,0 @@ -#!/usr/bin/env python -# vim: set fileencoding=utf-8 : -# -# Copyright (C) 2016 Guido Günther -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import argparse -from ansible.module_utils.six.moves import configparser as ConfigParser -import os -import re -from time import time -import requests -from requests.auth import HTTPBasicAuth -import warnings -from ansible.errors import AnsibleError - -import json - - -class CloudFormsInventory(object): - def __init__(self): - """ - Main execution path - """ - self.inventory = dict() # A list of groups and the hosts in that group - self.hosts = dict() # Details about hosts in the inventory - - # Parse CLI arguments - self.parse_cli_args() - - # Read settings - self.read_settings() - - # Cache - if self.args.refresh_cache or not self.is_cache_valid(): - self.update_cache() - else: - self.load_inventory_from_cache() - self.load_hosts_from_cache() - - data_to_print = "" - - # Data to print - if self.args.host: - if self.args.debug: - print("Fetching host [%s]" % self.args.host) - data_to_print += self.get_host_info(self.args.host) - else: - self.inventory['_meta'] = {'hostvars': {}} - for hostname in self.hosts: - self.inventory['_meta']['hostvars'][hostname] = { - 'cloudforms': self.hosts[hostname], - } - # include the ansible_ssh_host in the top level - if 'ansible_ssh_host' in self.hosts[hostname]: - self.inventory['_meta']['hostvars'][hostname]['ansible_ssh_host'] = self.hosts[hostname]['ansible_ssh_host'] - - data_to_print += self.json_format_dict(self.inventory, self.args.pretty) - - print(data_to_print) - - def is_cache_valid(self): - """ - Determines if the cache files have expired, or if it is still valid - """ - if self.args.debug: - print("Determining if cache [%s] is still valid (< %s seconds old)" % (self.cache_path_hosts, self.cache_max_age)) - - if os.path.isfile(self.cache_path_hosts): - mod_time = os.path.getmtime(self.cache_path_hosts) - current_time = time() - if (mod_time + self.cache_max_age) > current_time: - if os.path.isfile(self.cache_path_inventory): - if self.args.debug: - print("Cache is still valid!") - return True - - if self.args.debug: - print("Cache is stale or does not exist.") - - return False - - def read_settings(self): - """ - Reads the settings from the cloudforms.ini file - """ - config = ConfigParser.SafeConfigParser() - config_paths = [ - os.path.dirname(os.path.realpath(__file__)) + '/cloudforms.ini', - "/etc/ansible/cloudforms.ini", - ] - - env_value = os.environ.get('CLOUDFORMS_INI_PATH') - if env_value is not None: - config_paths.append(os.path.expanduser(os.path.expandvars(env_value))) - - if self.args.debug: - for config_path in config_paths: - print("Reading from configuration file [%s]" % config_path) - - config.read(config_paths) - - # CloudForms API related - if config.has_option('cloudforms', 'url'): - self.cloudforms_url = config.get('cloudforms', 'url') - else: - self.cloudforms_url = None - - if not self.cloudforms_url: - warnings.warn("No url specified, expected something like 'https://cfme.example.com'") - - if config.has_option('cloudforms', 'username'): - self.cloudforms_username = config.get('cloudforms', 'username') - else: - self.cloudforms_username = None - - if not self.cloudforms_username: - warnings.warn("No username specified, you need to specify a CloudForms username.") - - if config.has_option('cloudforms', 'password'): - self.cloudforms_pw = config.get('cloudforms', 'password', raw=True) - else: - self.cloudforms_pw = None - - if not self.cloudforms_pw: - warnings.warn("No password specified, you need to specify a password for the CloudForms user.") - - if config.has_option('cloudforms', 'ssl_verify'): - self.cloudforms_ssl_verify = config.getboolean('cloudforms', 'ssl_verify') - else: - self.cloudforms_ssl_verify = True - - if config.has_option('cloudforms', 'version'): - self.cloudforms_version = config.get('cloudforms', 'version') - else: - self.cloudforms_version = None - - if config.has_option('cloudforms', 'limit'): - self.cloudforms_limit = config.getint('cloudforms', 'limit') - else: - self.cloudforms_limit = 100 - - if config.has_option('cloudforms', 'purge_actions'): - self.cloudforms_purge_actions = config.getboolean('cloudforms', 'purge_actions') - else: - self.cloudforms_purge_actions = True - - if config.has_option('cloudforms', 'clean_group_keys'): - self.cloudforms_clean_group_keys = config.getboolean('cloudforms', 'clean_group_keys') - else: - self.cloudforms_clean_group_keys = True - - if config.has_option('cloudforms', 'nest_tags'): - self.cloudforms_nest_tags = config.getboolean('cloudforms', 'nest_tags') - else: - self.cloudforms_nest_tags = False - - if config.has_option('cloudforms', 'suffix'): - self.cloudforms_suffix = config.get('cloudforms', 'suffix') - if self.cloudforms_suffix[0] != '.': - raise AnsibleError('Leading fullstop is required for Cloudforms suffix') - else: - self.cloudforms_suffix = None - - if config.has_option('cloudforms', 'prefer_ipv4'): - self.cloudforms_prefer_ipv4 = config.getboolean('cloudforms', 'prefer_ipv4') - else: - self.cloudforms_prefer_ipv4 = False - - # Ansible related - try: - group_patterns = config.get('ansible', 'group_patterns') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - group_patterns = "[]" - - self.group_patterns = eval(group_patterns) - - # Cache related - try: - cache_path = os.path.expanduser(config.get('cache', 'path')) - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - cache_path = '.' - (script, ext) = os.path.splitext(os.path.basename(__file__)) - self.cache_path_hosts = cache_path + "/%s.hosts" % script - self.cache_path_inventory = cache_path + "/%s.inventory" % script - self.cache_max_age = config.getint('cache', 'max_age') - - if self.args.debug: - print("CloudForms settings:") - print("cloudforms_url = %s" % self.cloudforms_url) - print("cloudforms_username = %s" % self.cloudforms_username) - print("cloudforms_pw = %s" % self.cloudforms_pw) - print("cloudforms_ssl_verify = %s" % self.cloudforms_ssl_verify) - print("cloudforms_version = %s" % self.cloudforms_version) - print("cloudforms_limit = %s" % self.cloudforms_limit) - print("cloudforms_purge_actions = %s" % self.cloudforms_purge_actions) - print("Cache settings:") - print("cache_max_age = %s" % self.cache_max_age) - print("cache_path_hosts = %s" % self.cache_path_hosts) - print("cache_path_inventory = %s" % self.cache_path_inventory) - - def parse_cli_args(self): - """ - Command line argument processing - """ - parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on CloudForms managed VMs') - parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') - parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') - parser.add_argument('--pretty', action='store_true', default=False, help='Pretty print JSON output (default: False)') - parser.add_argument('--refresh-cache', action='store_true', default=False, - help='Force refresh of cache by making API requests to CloudForms (default: False - use cache files)') - parser.add_argument('--debug', action='store_true', default=False, help='Show debug output while running (default: False)') - self.args = parser.parse_args() - - def _http_request(self, url): - """ - Make a request and return the result converted from JSON - """ - results = [] - - ret = requests.get(url, - auth=HTTPBasicAuth(self.cloudforms_username, self.cloudforms_pw), - verify=self.cloudforms_ssl_verify) - - ret.raise_for_status() - - try: - results = json.loads(ret.text) - except ValueError: - warnings.warn( - "Unexpected response from {0} ({1}): {2}".format(self.cloudforms_url, ret.status_code, ret.reason)) - results = {} - - if self.args.debug: - print("=======================================================================") - print("=======================================================================") - print("=======================================================================") - print(ret.text) - print("=======================================================================") - print("=======================================================================") - print("=======================================================================") - - return results - - def _get_json(self, endpoint, url_suffix): - """ - Make a request by given url, split request by configured limit, - go through all sub-requests and return the aggregated data received - by cloudforms - - :param endpoint: api endpoint to access - :param url_suffix: additional api parameters - - """ - - limit = int(self.cloudforms_limit) - - page = 0 - last_page = False - - results = [] - - while not last_page: - offset = page * limit - url = "%s%s?offset=%s&limit=%s%s" % ( - self.cloudforms_url, endpoint, offset, limit, url_suffix) - - if self.args.debug: - print("Connecting to url '%s'" % url) - - ret = self._http_request(url) - results += [ret] - - if 'subcount' in ret: - if ret['subcount'] < limit: - last_page = True - page += 1 - else: - last_page = True - - return results - - def _get_hosts(self): - """ - Get all hosts - """ - endpoint = "/api/vms" - url_suffix = "&expand=resources,tags,hosts,&attributes=active,ipaddresses&filter[]=active=true" - results = self._get_json(endpoint, url_suffix) - resources = [item for sublist in results for item in sublist['resources']] - - return resources - - def update_cache(self): - """ - Make calls to cloudforms and save the output in a cache - """ - self.groups = dict() - self.hosts = dict() - - if self.args.debug: - print("Updating cache...") - - for host in self._get_hosts(): - if self.cloudforms_suffix is not None and not host['name'].endswith(self.cloudforms_suffix): - host['name'] = host['name'] + self.cloudforms_suffix - - # Ignore VMs that are not powered on - if host['power_state'] != 'on': - if self.args.debug: - print("Skipping %s because power_state = %s" % (host['name'], host['power_state'])) - continue - - # purge actions - if self.cloudforms_purge_actions and 'actions' in host: - del host['actions'] - - # Create ansible groups for tags - if 'tags' in host: - - # Create top-level group - if 'tags' not in self.inventory: - self.inventory['tags'] = dict(children=[], vars={}, hosts=[]) - - if not self.cloudforms_nest_tags: - # don't expand tags, just use them in a safe way - for group in host['tags']: - # Add sub-group, as a child of top-level - safe_key = self.to_safe(group['name']) - if safe_key: - if self.args.debug: - print("Adding sub-group '%s' to parent 'tags'" % safe_key) - - if safe_key not in self.inventory['tags']['children']: - self.push(self.inventory['tags'], 'children', safe_key) - - self.push(self.inventory, safe_key, host['name']) - - if self.args.debug: - print("Found tag [%s] for host which will be mapped to [%s]" % (group['name'], safe_key)) - else: - # expand the tags into nested groups / sub-groups - # Create nested groups for tags - safe_parent_tag_name = 'tags' - for tag in host['tags']: - tag_hierarchy = tag['name'][1:].split('/') - - if self.args.debug: - print("Working on list %s" % tag_hierarchy) - - for tag_name in tag_hierarchy: - if self.args.debug: - print("Working on tag_name = %s" % tag_name) - - safe_tag_name = self.to_safe(tag_name) - if self.args.debug: - print("Using sanitized name %s" % safe_tag_name) - - # Create sub-group - if safe_tag_name not in self.inventory: - self.inventory[safe_tag_name] = dict(children=[], vars={}, hosts=[]) - - # Add sub-group, as a child of top-level - if safe_parent_tag_name: - if self.args.debug: - print("Adding sub-group '%s' to parent '%s'" % (safe_tag_name, safe_parent_tag_name)) - - if safe_tag_name not in self.inventory[safe_parent_tag_name]['children']: - self.push(self.inventory[safe_parent_tag_name], 'children', safe_tag_name) - - # Make sure the next one uses this one as it's parent - safe_parent_tag_name = safe_tag_name - - # Add the host to the last tag - self.push(self.inventory[safe_parent_tag_name], 'hosts', host['name']) - - # Set ansible_ssh_host to the first available ip address - if 'ipaddresses' in host and host['ipaddresses'] and isinstance(host['ipaddresses'], list): - # If no preference for IPv4, just use the first entry - if not self.cloudforms_prefer_ipv4: - host['ansible_ssh_host'] = host['ipaddresses'][0] - else: - # Before we search for an IPv4 address, set using the first entry in case we don't find any - host['ansible_ssh_host'] = host['ipaddresses'][0] - for currenthost in host['ipaddresses']: - if '.' in currenthost: - host['ansible_ssh_host'] = currenthost - - # Create additional groups - for key in ('location', 'type', 'vendor'): - safe_key = self.to_safe(host[key]) - - # Create top-level group - if key not in self.inventory: - self.inventory[key] = dict(children=[], vars={}, hosts=[]) - - # Create sub-group - if safe_key not in self.inventory: - self.inventory[safe_key] = dict(children=[], vars={}, hosts=[]) - - # Add sub-group, as a child of top-level - if safe_key not in self.inventory[key]['children']: - self.push(self.inventory[key], 'children', safe_key) - - if key in host: - # Add host to sub-group - self.push(self.inventory[safe_key], 'hosts', host['name']) - - self.hosts[host['name']] = host - self.push(self.inventory, 'all', host['name']) - - if self.args.debug: - print("Saving cached data") - - self.write_to_cache(self.hosts, self.cache_path_hosts) - self.write_to_cache(self.inventory, self.cache_path_inventory) - - def get_host_info(self, host): - """ - Get variables about a specific host - """ - if not self.hosts or len(self.hosts) == 0: - # Need to load cache from cache - self.load_hosts_from_cache() - - if host not in self.hosts: - if self.args.debug: - print("[%s] not found in cache." % host) - - # try updating the cache - self.update_cache() - - if host not in self.hosts: - if self.args.debug: - print("[%s] does not exist after cache update." % host) - # host might not exist anymore - return self.json_format_dict({}, self.args.pretty) - - return self.json_format_dict(self.hosts[host], self.args.pretty) - - def push(self, d, k, v): - """ - Safely puts a new entry onto an array. - """ - if k in d: - d[k].append(v) - else: - d[k] = [v] - - def load_inventory_from_cache(self): - """ - Reads the inventory from the cache file sets self.inventory - """ - cache = open(self.cache_path_inventory, 'r') - json_inventory = cache.read() - self.inventory = json.loads(json_inventory) - - def load_hosts_from_cache(self): - """ - Reads the cache from the cache file sets self.hosts - """ - cache = open(self.cache_path_hosts, 'r') - json_cache = cache.read() - self.hosts = json.loads(json_cache) - - def write_to_cache(self, data, filename): - """ - Writes data in JSON format to a file - """ - json_data = self.json_format_dict(data, True) - cache = open(filename, 'w') - cache.write(json_data) - cache.close() - - def to_safe(self, word): - """ - Converts 'bad' characters in a string to underscores so they can be used as Ansible groups - """ - if self.cloudforms_clean_group_keys: - regex = r"[^A-Za-z0-9\_]" - return re.sub(regex, "_", word.replace(" ", "")) - else: - return word - - def json_format_dict(self, data, pretty=False): - """ - Converts a dict to a JSON object and dumps it as a formatted string - """ - if pretty: - return json.dumps(data, sort_keys=True, indent=2) - else: - return json.dumps(data) - - -CloudFormsInventory() diff --git a/scripts/inventory/cobbler.ini b/scripts/inventory/cobbler.ini deleted file mode 100644 index 2dc8cd3379..0000000000 --- a/scripts/inventory/cobbler.ini +++ /dev/null @@ -1,24 +0,0 @@ -# Ansible Cobbler external inventory script settings -# - -[cobbler] - -host = http://PATH_TO_COBBLER_SERVER/cobbler_api - -# If API needs authentication add 'username' and 'password' options here. -#username = foo -#password = bar - -# API calls to Cobbler can be slow. For this reason, we cache the results of an API -# call. Set this to the path you want cache files to be written to. Two files -# will be written to this directory: -# - ansible-cobbler.cache -# - ansible-cobbler.index -cache_path = /tmp - -# The number of seconds a cache file is considered valid. After this many -# seconds, a new API call will be made, and the cache file will be updated. -cache_max_age = 900 - - - diff --git a/scripts/inventory/cobbler.py b/scripts/inventory/cobbler.py deleted file mode 100755 index eeb8f58286..0000000000 --- a/scripts/inventory/cobbler.py +++ /dev/null @@ -1,305 +0,0 @@ -#!/usr/bin/env python - -""" -Cobbler external inventory script -================================= - -Ansible has a feature where instead of reading from /etc/ansible/hosts -as a text file, it can query external programs to obtain the list -of hosts, groups the hosts are in, and even variables to assign to each host. - -To use this, copy this file over /etc/ansible/hosts and chmod +x the file. -This, more or less, allows you to keep one central database containing -info about all of your managed instances. - -This script is an example of sourcing that data from Cobbler -(https://cobbler.github.io). With cobbler each --mgmt-class in cobbler -will correspond to a group in Ansible, and --ks-meta variables will be -passed down for use in templates or even in argument lines. - -NOTE: The cobbler system names will not be used. Make sure a -cobbler --dns-name is set for each cobbler system. If a system -appears with two DNS names we do not add it twice because we don't want -ansible talking to it twice. The first one found will be used. If no ---dns-name is set the system will NOT be visible to ansible. We do -not add cobbler system names because there is no requirement in cobbler -that those correspond to addresses. - -Tested with Cobbler 2.0.11. - -Changelog: - - 2015-06-21 dmccue: Modified to support run-once _meta retrieval, results in - higher performance at ansible startup. Groups are determined by owner rather than - default mgmt_classes. DNS name determined from hostname. cobbler values are written - to a 'cobbler' fact namespace - - - 2013-09-01 pgehres: Refactored implementation to make use of caching and to - limit the number of connections to external cobbler server for performance. - Added use of cobbler.ini file to configure settings. Tested with Cobbler 2.4.0 - -""" - -# (c) 2012, Michael DeHaan -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -###################################################################### - -import argparse -import os -import re -from time import time -try: # Python 3 - from xmlrpc.client import Server -except ImportError: # Python 2 - from xmlrpclib import Server - -import json - -from ansible.module_utils.six import iteritems -from ansible.module_utils.six.moves import configparser as ConfigParser - -# NOTE -- this file assumes Ansible is being accessed FROM the cobbler -# server, so it does not attempt to login with a username and password. -# this will be addressed in a future version of this script. - -orderby_keyname = 'owners' # alternatively 'mgmt_classes' - - -class CobblerInventory(object): - - def __init__(self): - - """ Main execution path """ - self.conn = None - - self.inventory = dict() # A list of groups and the hosts in that group - self.cache = dict() # Details about hosts in the inventory - self.ignore_settings = False # used to only look at env vars for settings. - - # Read env vars, read settings, and parse CLI arguments - self.parse_env_vars() - self.read_settings() - self.parse_cli_args() - - # Cache - if self.args.refresh_cache: - self.update_cache() - elif not self.is_cache_valid(): - self.update_cache() - else: - self.load_inventory_from_cache() - self.load_cache_from_cache() - - data_to_print = "" - - # Data to print - if self.args.host: - data_to_print += self.get_host_info() - else: - self.inventory['_meta'] = {'hostvars': {}} - for hostname in self.cache: - self.inventory['_meta']['hostvars'][hostname] = {'cobbler': self.cache[hostname]} - data_to_print += self.json_format_dict(self.inventory, True) - - print(data_to_print) - - def _connect(self): - if not self.conn: - self.conn = Server(self.cobbler_host, allow_none=True) - self.token = None - if self.cobbler_username is not None: - self.token = self.conn.login(self.cobbler_username, self.cobbler_password) - - def is_cache_valid(self): - """ Determines if the cache files have expired, or if it is still valid """ - - if os.path.isfile(self.cache_path_cache): - mod_time = os.path.getmtime(self.cache_path_cache) - current_time = time() - if (mod_time + self.cache_max_age) > current_time: - if os.path.isfile(self.cache_path_inventory): - return True - - return False - - def read_settings(self): - """ Reads the settings from the cobbler.ini file """ - - if(self.ignore_settings): - return - - config = ConfigParser.SafeConfigParser() - config.read(os.path.dirname(os.path.realpath(__file__)) + '/cobbler.ini') - - self.cobbler_host = config.get('cobbler', 'host') - self.cobbler_username = None - self.cobbler_password = None - if config.has_option('cobbler', 'username'): - self.cobbler_username = config.get('cobbler', 'username') - if config.has_option('cobbler', 'password'): - self.cobbler_password = config.get('cobbler', 'password') - - # Cache related - cache_path = config.get('cobbler', 'cache_path') - self.cache_path_cache = cache_path + "/ansible-cobbler.cache" - self.cache_path_inventory = cache_path + "/ansible-cobbler.index" - self.cache_max_age = config.getint('cobbler', 'cache_max_age') - - def parse_env_vars(self): - """ Reads the settings from the environment """ - - # Env. Vars: - # COBBLER_host - # COBBLER_username - # COBBLER_password - # COBBLER_cache_path - # COBBLER_cache_max_age - # COBBLER_ignore_settings - - self.cobbler_host = os.getenv('COBBLER_host', None) - self.cobbler_username = os.getenv('COBBLER_username', None) - self.cobbler_password = os.getenv('COBBLER_password', None) - - # Cache related - cache_path = os.getenv('COBBLER_cache_path', None) - if(cache_path is not None): - self.cache_path_cache = cache_path + "/ansible-cobbler.cache" - self.cache_path_inventory = cache_path + "/ansible-cobbler.index" - - self.cache_max_age = int(os.getenv('COBBLER_cache_max_age', "30")) - - # ignore_settings is used to ignore the settings file, for use in Ansible - # Tower (or AWX inventory scripts and not throw python exceptions.) - if(os.getenv('COBBLER_ignore_settings', False) == "True"): - self.ignore_settings = True - - def parse_cli_args(self): - """ Command line argument processing """ - - parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Cobbler') - parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') - parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') - parser.add_argument('--refresh-cache', action='store_true', default=False, - help='Force refresh of cache by making API requests to cobbler (default: False - use cache files)') - self.args = parser.parse_args() - - def update_cache(self): - """ Make calls to cobbler and save the output in a cache """ - - self._connect() - self.groups = dict() - self.hosts = dict() - if self.token is not None: - data = self.conn.get_systems(self.token) - else: - data = self.conn.get_systems() - - for host in data: - # Get the FQDN for the host and add it to the right groups - dns_name = host['hostname'] # None - ksmeta = None - interfaces = host['interfaces'] - # hostname is often empty for non-static IP hosts - if dns_name == '': - for (iname, ivalue) in iteritems(interfaces): - if ivalue['management'] or not ivalue['static']: - this_dns_name = ivalue.get('dns_name', None) - dns_name = this_dns_name if this_dns_name else '' - - if dns_name == '' or dns_name is None: - continue - - status = host['status'] - profile = host['profile'] - classes = host[orderby_keyname] - - if status not in self.inventory: - self.inventory[status] = [] - self.inventory[status].append(dns_name) - - if profile not in self.inventory: - self.inventory[profile] = [] - self.inventory[profile].append(dns_name) - - for cls in classes: - if cls not in self.inventory: - self.inventory[cls] = [] - self.inventory[cls].append(dns_name) - - # Since we already have all of the data for the host, update the host details as well - - # The old way was ksmeta only -- provide backwards compatibility - - self.cache[dns_name] = host - if "ks_meta" in host: - for key, value in iteritems(host["ks_meta"]): - self.cache[dns_name][key] = value - - self.write_to_cache(self.cache, self.cache_path_cache) - self.write_to_cache(self.inventory, self.cache_path_inventory) - - def get_host_info(self): - """ Get variables about a specific host """ - - if not self.cache or len(self.cache) == 0: - # Need to load index from cache - self.load_cache_from_cache() - - if self.args.host not in self.cache: - # try updating the cache - self.update_cache() - - if self.args.host not in self.cache: - # host might not exist anymore - return self.json_format_dict({}, True) - - return self.json_format_dict(self.cache[self.args.host], True) - - def push(self, my_dict, key, element): - """ Pushed an element onto an array that may not have been defined in the dict """ - - if key in my_dict: - my_dict[key].append(element) - else: - my_dict[key] = [element] - - def load_inventory_from_cache(self): - """ Reads the index from the cache file sets self.index """ - - cache = open(self.cache_path_inventory, 'r') - json_inventory = cache.read() - self.inventory = json.loads(json_inventory) - - def load_cache_from_cache(self): - """ Reads the cache from the cache file sets self.cache """ - - cache = open(self.cache_path_cache, 'r') - json_cache = cache.read() - self.cache = json.loads(json_cache) - - def write_to_cache(self, data, filename): - """ Writes data in JSON format to a file """ - json_data = self.json_format_dict(data, True) - cache = open(filename, 'w') - cache.write(json_data) - cache.close() - - def to_safe(self, word): - """ Converts 'bad' characters in a string to underscores so they can be used as Ansible groups """ - - return re.sub(r"[^A-Za-z0-9\-]", "_", word) - - def json_format_dict(self, data, pretty=False): - """ Converts a dict to a JSON object and dumps it as a formatted string """ - - if pretty: - return json.dumps(data, sort_keys=True, indent=2) - else: - return json.dumps(data) - - -CobblerInventory() diff --git a/scripts/inventory/collins.ini b/scripts/inventory/collins.ini deleted file mode 100644 index 0ce0c2acbd..0000000000 --- a/scripts/inventory/collins.ini +++ /dev/null @@ -1,57 +0,0 @@ -# Ansible Collins external inventory script settings -# - -[collins] - -# You should not have a trailing slash or collins -# will not properly match the URI -host = http://localhost:9000 - -username = blake -password = admin:first - -# Specifies a timeout for all HTTP requests to Collins. -timeout_secs = 120 - -# Specifies a maximum number of retries per Collins request. -max_retries = 5 - -# Specifies the number of results to return per paginated query as specified in -# the Pagination section of the Collins API docs: -# http://tumblr.github.io/collins/api.html -results_per_query = 100 - -# Specifies the Collins asset type which will be queried for; most typically -# you'll want to leave this at the default of SERVER_NODE. -asset_type = SERVER_NODE - -# Collins assets can optionally be assigned hostnames; this option will preference -# the selection of an asset's hostname over an IP address as the primary identifier -# in the Ansible inventory. Typically, this value should be set to true if assets -# are assigned hostnames. -prefer_hostnames = true - -# Within Collins, assets can be granted multiple IP addresses; this configuration -# value specifies the index within the 'ADDRESSES' array as returned by the -# following API endpoint: -# http://tumblr.github.io/collins/api.html#api-ipam-asset-addresses-section -ip_address_index = 0 - -# Sets whether Collins instances in multiple datacenters will be queried. -query_remote_dcs = false - -# API calls to Collins can involve large, substantial queries. For this reason, -# we cache the results of an API call. Set this to the path you want cache files -# to be written to. Two files will be written to this directory: -# - ansible-collins.cache -# - ansible-collins.index -cache_path = /tmp - -# If errors occur while querying inventory, logging messages will be written -# to a logfile in the specified directory: -# - ansible-collins.log -log_path = /tmp - -# The number of seconds that a cache file is considered valid. After this many -# seconds, a new API call will be made, and the cache file will be updated. -cache_max_age = 600 diff --git a/scripts/inventory/collins.py b/scripts/inventory/collins.py deleted file mode 100755 index f481649eeb..0000000000 --- a/scripts/inventory/collins.py +++ /dev/null @@ -1,429 +0,0 @@ -#!/usr/bin/env python - -""" -Collins external inventory script -================================= - -Ansible has a feature where instead of reading from /etc/ansible/hosts -as a text file, it can query external programs to obtain the list -of hosts, groups the hosts are in, and even variables to assign to each host. - -Collins is a hardware asset management system originally developed by -Tumblr for tracking new hardware as it built out its own datacenters. It -exposes a rich API for manipulating and querying one's hardware inventory, -which makes it an ideal 'single point of truth' for driving systems -automation like Ansible. Extensive documentation on Collins, including a quickstart, -API docs, and a full reference manual, can be found here: - -http://tumblr.github.io/collins - -This script adds support to Ansible for obtaining a dynamic inventory of -assets in your infrastructure, grouping them in Ansible by their useful attributes, -and binding all facts provided by Collins to each host so that they can be used to -drive automation. Some parts of this script were cribbed shamelessly from mdehaan's -Cobbler inventory script. - -To use it, copy it to your repo and pass -i to the ansible or -ansible-playbook command; if you'd like to use it by default, simply copy collins.ini -to /etc/ansible and this script to /etc/ansible/hosts. - -Alongside the options set in collins.ini, there are several environment variables -that will be used instead of the configured values if they are set: - - - COLLINS_USERNAME - specifies a username to use for Collins authentication - - COLLINS_PASSWORD - specifies a password to use for Collins authentication - - COLLINS_ASSET_TYPE - specifies a Collins asset type to use during querying; - this can be used to run Ansible automation against different asset classes than - server nodes, such as network switches and PDUs - - COLLINS_CONFIG - specifies an alternative location for collins.ini, defaults to - /collins.ini - -If errors are encountered during operation, this script will return an exit code of -255; otherwise, it will return an exit code of 0. - -Collins attributes are accessible as variables in ansible via the COLLINS['attribute_name']. - -Tested against Ansible 1.8.2 and Collins 1.3.0. -""" - -# (c) 2014, Steve Salevan -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -###################################################################### - - -import argparse -import logging -import os -import re -import sys -from time import time -import traceback - -import json - -from ansible.module_utils.six import iteritems -from ansible.module_utils.six.moves import configparser as ConfigParser -from ansible.module_utils.six.moves.urllib.parse import urlencode - -from ansible.module_utils.urls import open_url - - -class CollinsDefaults(object): - ASSETS_API_ENDPOINT = '%s/api/assets' - SPECIAL_ATTRIBUTES = set([ - 'CREATED', - 'DELETED', - 'UPDATED', - 'STATE', - ]) - LOG_FORMAT = '%(asctime)-15s %(message)s' - - -class Error(Exception): - pass - - -class MaxRetriesError(Error): - pass - - -class CollinsInventory(object): - - def __init__(self): - """ Constructs CollinsInventory object and reads all configuration. """ - - self.inventory = dict() # A list of groups and the hosts in that group - self.cache = dict() # Details about hosts in the inventory - - # Read settings and parse CLI arguments - self.read_settings() - self.parse_cli_args() - - logging.basicConfig(format=CollinsDefaults.LOG_FORMAT, - filename=self.log_location) - self.log = logging.getLogger('CollinsInventory') - - def _asset_get_attribute(self, asset, attrib): - """ Returns a user-defined attribute from an asset if it exists; otherwise, - returns None. """ - - if 'ATTRIBS' in asset: - for attrib_block in asset['ATTRIBS'].keys(): - if attrib in asset['ATTRIBS'][attrib_block]: - return asset['ATTRIBS'][attrib_block][attrib] - return None - - def _asset_has_attribute(self, asset, attrib): - """ Returns whether a user-defined attribute is present on an asset. """ - - if 'ATTRIBS' in asset: - for attrib_block in asset['ATTRIBS'].keys(): - if attrib in asset['ATTRIBS'][attrib_block]: - return True - return False - - def run(self): - """ Main execution path """ - - # Updates cache if cache is not present or has expired. - successful = True - if self.args.refresh_cache: - successful = self.update_cache() - elif not self.is_cache_valid(): - successful = self.update_cache() - else: - successful = self.load_inventory_from_cache() - successful &= self.load_cache_from_cache() - - data_to_print = "" - - # Data to print - if self.args.host: - data_to_print = self.get_host_info() - - elif self.args.list: - # Display list of instances for inventory - data_to_print = self.json_format_dict(self.inventory, self.args.pretty) - - else: # default action with no options - data_to_print = self.json_format_dict(self.inventory, self.args.pretty) - - print(data_to_print) - return successful - - def find_assets(self, attributes=None, operation='AND'): - """ Obtains Collins assets matching the provided attributes. """ - attributes = {} if attributes is None else attributes - - # Formats asset search query to locate assets matching attributes, using - # the CQL search feature as described here: - # http://tumblr.github.io/collins/recipes.html - attributes_query = ['='.join(attr_pair) for attr_pair in iteritems(attributes)] - query_parameters = { - 'details': ['True'], - 'operation': [operation], - 'query': attributes_query, - 'remoteLookup': [str(self.query_remote_dcs)], - 'size': [self.results_per_query], - 'type': [self.collins_asset_type], - } - assets = [] - cur_page = 0 - num_retries = 0 - # Locates all assets matching the provided query, exhausting pagination. - while True: - if num_retries == self.collins_max_retries: - raise MaxRetriesError("Maximum of %s retries reached; giving up" % self.collins_max_retries) - query_parameters['page'] = cur_page - query_url = "%s?%s" % ( - (CollinsDefaults.ASSETS_API_ENDPOINT % self.collins_host), - urlencode(query_parameters, doseq=True) - ) - try: - response = open_url(query_url, - timeout=self.collins_timeout_secs, - url_username=self.collins_username, - url_password=self.collins_password, - force_basic_auth=True) - json_response = json.loads(response.read()) - # Adds any assets found to the array of assets. - assets += json_response['data']['Data'] - # If we've retrieved all of our assets, breaks out of the loop. - if len(json_response['data']['Data']) == 0: - break - cur_page += 1 - num_retries = 0 - except Exception: - self.log.error("Error while communicating with Collins, retrying:\n%s", traceback.format_exc()) - num_retries += 1 - return assets - - def is_cache_valid(self): - """ Determines if the cache files have expired, or if it is still valid """ - - if os.path.isfile(self.cache_path_cache): - mod_time = os.path.getmtime(self.cache_path_cache) - current_time = time() - if (mod_time + self.cache_max_age) > current_time: - if os.path.isfile(self.cache_path_inventory): - return True - - return False - - def read_settings(self): - """ Reads the settings from the collins.ini file """ - - config_loc = os.getenv('COLLINS_CONFIG', os.path.dirname(os.path.realpath(__file__)) + '/collins.ini') - - config = ConfigParser.SafeConfigParser() - config.read(os.path.dirname(os.path.realpath(__file__)) + '/collins.ini') - - self.collins_host = config.get('collins', 'host') - self.collins_username = os.getenv('COLLINS_USERNAME', config.get('collins', 'username')) - self.collins_password = os.getenv('COLLINS_PASSWORD', config.get('collins', 'password')) - self.collins_asset_type = os.getenv('COLLINS_ASSET_TYPE', config.get('collins', 'asset_type')) - self.collins_timeout_secs = config.getint('collins', 'timeout_secs') - self.collins_max_retries = config.getint('collins', 'max_retries') - - self.results_per_query = config.getint('collins', 'results_per_query') - self.ip_address_index = config.getint('collins', 'ip_address_index') - self.query_remote_dcs = config.getboolean('collins', 'query_remote_dcs') - self.prefer_hostnames = config.getboolean('collins', 'prefer_hostnames') - - cache_path = config.get('collins', 'cache_path') - self.cache_path_cache = cache_path + \ - '/ansible-collins-%s.cache' % self.collins_asset_type - self.cache_path_inventory = cache_path + \ - '/ansible-collins-%s.index' % self.collins_asset_type - self.cache_max_age = config.getint('collins', 'cache_max_age') - - log_path = config.get('collins', 'log_path') - self.log_location = log_path + '/ansible-collins.log' - - def parse_cli_args(self): - """ Command line argument processing """ - - parser = argparse.ArgumentParser( - description='Produces an Ansible Inventory file based on Collins') - parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') - parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') - parser.add_argument('--refresh-cache', action='store_true', default=False, - help='Force refresh of cache by making API requests to Collins ' - '(default: False - use cache files)') - parser.add_argument('--pretty', action='store_true', default=False, help='Pretty print all JSON output') - self.args = parser.parse_args() - - def update_cache(self): - """ Make calls to Collins and saves the output in a cache """ - - self.cache = dict() - self.inventory = dict() - - # Locates all server assets from Collins. - try: - server_assets = self.find_assets() - except Exception: - self.log.error("Error while locating assets from Collins:\n%s", traceback.format_exc()) - return False - - for asset in server_assets: - # Determines the index to retrieve the asset's IP address either by an - # attribute set on the Collins asset or the pre-configured value. - if self._asset_has_attribute(asset, 'ANSIBLE_IP_INDEX'): - ip_index = self._asset_get_attribute(asset, 'ANSIBLE_IP_INDEX') - try: - ip_index = int(ip_index) - except Exception: - self.log.error( - "ANSIBLE_IP_INDEX attribute on asset %s not an integer: %s", asset, - ip_index) - else: - ip_index = self.ip_address_index - - asset['COLLINS'] = {} - - # Attempts to locate the asset's primary identifier (hostname or IP address), - # which will be used to index the asset throughout the Ansible inventory. - if self.prefer_hostnames and self._asset_has_attribute(asset, 'HOSTNAME'): - asset_identifier = self._asset_get_attribute(asset, 'HOSTNAME') - elif 'ADDRESSES' not in asset: - self.log.warning("No IP addresses found for asset '%s', skipping", asset) - continue - elif len(asset['ADDRESSES']) < ip_index + 1: - self.log.warning( - "No IP address found at index %s for asset '%s', skipping", - ip_index, asset) - continue - else: - asset_identifier = asset['ADDRESSES'][ip_index]['ADDRESS'] - - # Adds an asset index to the Ansible inventory based upon unpacking - # the name of the asset's current STATE from its dictionary. - if 'STATE' in asset['ASSET'] and asset['ASSET']['STATE']: - state_inventory_key = self.to_safe( - 'STATE-%s' % asset['ASSET']['STATE']['NAME']) - self.push(self.inventory, state_inventory_key, asset_identifier) - - # Indexes asset by all user-defined Collins attributes. - if 'ATTRIBS' in asset: - for attrib_block in asset['ATTRIBS'].keys(): - for attrib in asset['ATTRIBS'][attrib_block].keys(): - asset['COLLINS'][attrib] = asset['ATTRIBS'][attrib_block][attrib] - attrib_key = self.to_safe('%s-%s' % (attrib, asset['ATTRIBS'][attrib_block][attrib])) - self.push(self.inventory, attrib_key, asset_identifier) - - # Indexes asset by all built-in Collins attributes. - for attribute in asset['ASSET'].keys(): - if attribute not in CollinsDefaults.SPECIAL_ATTRIBUTES: - attribute_val = asset['ASSET'][attribute] - if attribute_val is not None: - attrib_key = self.to_safe('%s-%s' % (attribute, attribute_val)) - self.push(self.inventory, attrib_key, asset_identifier) - - # Indexes asset by hardware product information. - if 'HARDWARE' in asset: - if 'PRODUCT' in asset['HARDWARE']['BASE']: - product = asset['HARDWARE']['BASE']['PRODUCT'] - if product: - product_key = self.to_safe( - 'HARDWARE-PRODUCT-%s' % asset['HARDWARE']['BASE']['PRODUCT']) - self.push(self.inventory, product_key, asset_identifier) - - # Indexing now complete, adds the host details to the asset cache. - self.cache[asset_identifier] = asset - - try: - self.write_to_cache(self.cache, self.cache_path_cache) - self.write_to_cache(self.inventory, self.cache_path_inventory) - except Exception: - self.log.error("Error while writing to cache:\n%s", traceback.format_exc()) - return False - return True - - def push(self, dictionary, key, value): - """ Adds a value to a list at a dictionary key, creating the list if it doesn't - exist. """ - - if key not in dictionary: - dictionary[key] = [] - dictionary[key].append(value) - - def get_host_info(self): - """ Get variables about a specific host. """ - - if not self.cache or len(self.cache) == 0: - # Need to load index from cache - self.load_cache_from_cache() - - if self.args.host not in self.cache: - # try updating the cache - self.update_cache() - - if self.args.host not in self.cache: - # host might not exist anymore - return self.json_format_dict({}, self.args.pretty) - - return self.json_format_dict(self.cache[self.args.host], self.args.pretty) - - def load_inventory_from_cache(self): - """ Reads the index from the cache file sets self.index """ - - try: - cache = open(self.cache_path_inventory, 'r') - json_inventory = cache.read() - self.inventory = json.loads(json_inventory) - return True - except Exception: - self.log.error("Error while loading inventory:\n%s", - traceback.format_exc()) - self.inventory = {} - return False - - def load_cache_from_cache(self): - """ Reads the cache from the cache file sets self.cache """ - - try: - cache = open(self.cache_path_cache, 'r') - json_cache = cache.read() - self.cache = json.loads(json_cache) - return True - except Exception: - self.log.error("Error while loading host cache:\n%s", - traceback.format_exc()) - self.cache = {} - return False - - def write_to_cache(self, data, filename): - """ Writes data in JSON format to a specified file. """ - - json_data = self.json_format_dict(data, self.args.pretty) - cache = open(filename, 'w') - cache.write(json_data) - cache.close() - - def to_safe(self, word): - """ Converts 'bad' characters in a string to underscores so they - can be used as Ansible groups """ - - return re.sub(r"[^A-Za-z0-9\-]", "_", word) - - def json_format_dict(self, data, pretty=False): - """ Converts a dict to a JSON object and dumps it as a formatted string """ - - if pretty: - return json.dumps(data, sort_keys=True, indent=2) - else: - return json.dumps(data) - - -if __name__ in '__main__': - inventory = CollinsInventory() - if inventory.run(): - sys.exit(0) - else: - sys.exit(-1) diff --git a/scripts/inventory/consul_io.ini b/scripts/inventory/consul_io.ini deleted file mode 100644 index d18a1494dd..0000000000 --- a/scripts/inventory/consul_io.ini +++ /dev/null @@ -1,54 +0,0 @@ -# Ansible Consul external inventory script settings. - -[consul] - -# -# Bulk load. Load all possible data before building inventory JSON -# If true, script processes in-memory data. JSON generation reduces drastically -# -bulk_load = false - -# restrict included nodes to those from this datacenter -#datacenter = nyc1 - -# url of the consul cluster to query -#url = http://demo.consul.io -url = http://localhost:8500 - -# suffix added to each service to create a group name e.g Service of 'redis' and -# a suffix of '_servers' will add each address to the group name 'redis_servers' -servers_suffix = _servers - -# -# By default, final JSON is built based on all available info in consul. -# Suffixes means that services groups will be added in addition to basic information. See servers_suffix for additional info -# There are cases when speed is preferable than having services groups -# False value will reduce script execution time drastically. -# -suffixes = true - -# if specified then the inventory will generate domain names that will resolve -# via Consul's inbuilt DNS. -#domain=consul - -# make groups from service tags. the name of the group is derived from the -# service name and the tag name e.g. a service named nginx with tags ['master', 'v1'] -# will create groups nginx_master and nginx_v1 -tags = true - -# looks up the node name at the given path for a list of groups to which the -# node should be added. -kv_groups=ansible/groups - -# looks up the node name at the given path for a json dictionary of metadata that -# should be attached as metadata for the node -kv_metadata=ansible/metadata - -# looks up the health of each service and adds the node to 'up' and 'down' groups -# based on the service availability -# -# !!!! if availability is true, suffixes also must be true. !!!! -# -availability = true -available_suffix = _up -unavailable_suffix = _down diff --git a/scripts/inventory/consul_io.py b/scripts/inventory/consul_io.py deleted file mode 100755 index 6af0675707..0000000000 --- a/scripts/inventory/consul_io.py +++ /dev/null @@ -1,553 +0,0 @@ -#!/usr/bin/env python - -# -# (c) 2015, Steve Gargan -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -###################################################################### - -''' -Consul.io inventory script (http://consul.io) -====================================== - -Generates Ansible inventory from nodes in a Consul cluster. This script will -group nodes by: - - datacenter, - - registered service - - service tags - - service status - - values from the k/v store - -This script can be run with the switches ---list as expected groups all the nodes in all datacenters ---datacenter, to restrict the nodes to a single datacenter ---host to restrict the inventory to a single named node. (requires datacenter config) - -The configuration for this plugin is read from a consul_io.ini file located in the -same directory as this inventory script or via environment variables. All config options in the config file -are optional except the host and port, which must point to a valid agent or -server running the http api. For more information on enabling the endpoint see. - -http://www.consul.io/docs/agent/options.html - -Other options include: - -'bulk_load' - -boolean flag. Load all possible data before building inventory JSON -If true, script processes in-memory data. JSON generation reduces drastically -This can also be set with the environmental variable CONSUL_BULK_LOAD. - -'datacenter': - -which restricts the included nodes to those from the given datacenter -This can also be set with the environmental variable CONSUL_DATACENTER. - -'url': - -the URL of the Consul cluster. host, port and scheme are derived from the -URL. If not specified, connection configuration defaults to http requests -to localhost on port 8500. -This can also be set with the environmental variable CONSUL_URL. - -'domain': - -if specified then the inventory will generate domain names that will resolve -via Consul's inbuilt DNS. The name is derived from the node name, datacenter -and domain .node... Note that you will need to -have consul hooked into your DNS server for these to resolve. See the consul -DNS docs for more info. - -which restricts the included nodes to those from the given datacenter -This can also be set with the environmental variable CONSUL_DOMAIN. - -'suffixes': - -boolean flag. By default, final JSON is built based on all available info in consul. -Suffixes means that services groups will be added in addition to basic information. See servers_suffix for additional info -There are cases when speed is preferable than having services groups -False value will reduce script execution time drastically. -This can also be set with the environmental variable CONSUL_SUFFIXES. - -'servers_suffix': - -defining the a suffix to add to the service name when creating the service -group. e.g Service name of 'redis' and a suffix of '_servers' will add -each nodes address to the group name 'redis_servers'. No suffix is added -if this is not set -This can also be set with the environmental variable CONSUL_SERVERS_SUFFIX. - -'tags': - -boolean flag defining if service tags should be used to create Inventory -groups e.g. an nginx service with the tags ['master', 'v1'] will create -groups nginx_master and nginx_v1 to which the node running the service -will be added. No tag groups are created if this is missing. -This can also be set with the environmental variable CONSUL_TAGS. - -'token': - -ACL token to use to authorize access to the key value store. May be required -to retrieve the kv_groups and kv_metadata based on your consul configuration. -This can also be set with the environmental variable CONSUL_TOKEN. - -'kv_groups': - -This is used to lookup groups for a node in the key value store. It specifies a -path to which each discovered node's name will be added to create a key to query -the key/value store. There it expects to find a comma separated list of group -names to which the node should be added e.g. if the inventory contains node -'nyc-web-1' in datacenter 'nyc-dc1' and kv_groups = 'ansible/groups' then the key -'ansible/groups/nyc-dc1/nyc-web-1' will be queried for a group list. If this query - returned 'test,honeypot' then the node address to both groups. -This can also be set with the environmental variable CONSUL_KV_GROUPS. - -'kv_metadata': - -kv_metadata is used to lookup metadata for each discovered node. Like kv_groups -above it is used to build a path to lookup in the kv store where it expects to -find a json dictionary of metadata entries. If found, each key/value pair in the -dictionary is added to the metadata for the node. eg node 'nyc-web-1' in datacenter -'nyc-dc1' and kv_metadata = 'ansible/metadata', then the key -'ansible/metadata/nyc-dc1/nyc-web-1' should contain '{"databse": "postgres"}' -This can also be set with the environmental variable CONSUL_KV_METADATA. - -'availability': - -if true then availability groups will be created for each service. The node will -be added to one of the groups based on the health status of the service. The -group name is derived from the service name and the configurable availability -suffixes -This can also be set with the environmental variable CONSUL_AVAILABILITY. - -'available_suffix': - -suffix that should be appended to the service availability groups for available -services e.g. if the suffix is '_up' and the service is nginx, then nodes with -healthy nginx services will be added to the nginix_up group. Defaults to -'_available' -This can also be set with the environmental variable CONSUL_AVAILABLE_SUFFIX. - -'unavailable_suffix': - -as above but for unhealthy services, defaults to '_unavailable' -This can also be set with the environmental variable CONSUL_UNAVAILABLE_SUFFIX. - -Note that if the inventory discovers an 'ssh' service running on a node it will -register the port as ansible_ssh_port in the node's metadata and this port will -be used to access the machine. -``` - -''' - -import os -import re -import argparse -import sys - -from ansible.module_utils.six.moves import configparser - - -def get_log_filename(): - tty_filename = '/dev/tty' - stdout_filename = '/dev/stdout' - - if not os.path.exists(tty_filename): - return stdout_filename - if not os.access(tty_filename, os.W_OK): - return stdout_filename - if os.getenv('TEAMCITY_VERSION'): - return stdout_filename - - return tty_filename - - -def setup_logging(): - filename = get_log_filename() - - import logging.config - logging.config.dictConfig({ - 'version': 1, - 'formatters': { - 'simple': { - 'format': '%(asctime)s - %(name)s - %(levelname)s - %(message)s', - }, - }, - 'root': { - 'level': os.getenv('ANSIBLE_INVENTORY_CONSUL_IO_LOG_LEVEL', 'WARN'), - 'handlers': ['console'], - }, - 'handlers': { - 'console': { - 'class': 'logging.FileHandler', - 'filename': filename, - 'formatter': 'simple', - }, - }, - 'loggers': { - 'iso8601': { - 'qualname': 'iso8601', - 'level': 'INFO', - }, - }, - }) - logger = logging.getLogger('consul_io.py') - logger.debug('Invoked with %r', sys.argv) - - -if os.getenv('ANSIBLE_INVENTORY_CONSUL_IO_LOG_ENABLED'): - setup_logging() - - -import json - -try: - import consul -except ImportError as e: - sys.exit("""failed=True msg='python-consul required for this module. -See https://python-consul.readthedocs.io/en/latest/#installation'""") - -from ansible.module_utils.six import iteritems - - -class ConsulInventory(object): - - def __init__(self): - ''' Create an inventory based on the catalog of nodes and services - registered in a consul cluster''' - self.node_metadata = {} - self.nodes = {} - self.nodes_by_service = {} - self.nodes_by_tag = {} - self.nodes_by_datacenter = {} - self.nodes_by_kv = {} - self.nodes_by_availability = {} - self.current_dc = None - self.inmemory_kv = [] - self.inmemory_nodes = [] - - config = ConsulConfig() - self.config = config - - self.consul_api = config.get_consul_api() - - if config.has_config('datacenter'): - if config.has_config('host'): - self.load_data_for_node(config.host, config.datacenter) - else: - self.load_data_for_datacenter(config.datacenter) - else: - self.load_all_data_consul() - - self.combine_all_results() - print(json.dumps(self.inventory, sort_keys=True, indent=2)) - - def bulk_load(self, datacenter): - index, groups_list = self.consul_api.kv.get(self.config.kv_groups, recurse=True, dc=datacenter) - index, metadata_list = self.consul_api.kv.get(self.config.kv_metadata, recurse=True, dc=datacenter) - index, nodes = self.consul_api.catalog.nodes(dc=datacenter) - self.inmemory_kv += groups_list - self.inmemory_kv += metadata_list - self.inmemory_nodes += nodes - - def load_all_data_consul(self): - ''' cycle through each of the datacenters in the consul catalog and process - the nodes in each ''' - self.datacenters = self.consul_api.catalog.datacenters() - for datacenter in self.datacenters: - self.current_dc = datacenter - self.bulk_load(datacenter) - self.load_data_for_datacenter(datacenter) - - def load_availability_groups(self, node, datacenter): - '''check the health of each service on a node and add the node to either - an 'available' or 'unavailable' grouping. The suffix for each group can be - controlled from the config''' - if self.config.has_config('availability'): - for service_name, service in iteritems(node['Services']): - for node in self.consul_api.health.service(service_name)[1]: - if self.is_service_available(node, service_name): - suffix = self.config.get_availability_suffix( - 'available_suffix', '_available') - else: - suffix = self.config.get_availability_suffix( - 'unavailable_suffix', '_unavailable') - self.add_node_to_map(self.nodes_by_availability, - service_name + suffix, node['Node']) - - def is_service_available(self, node, service_name): - '''check the availability of the service on the node beside ensuring the - availability of the node itself''' - consul_ok = service_ok = False - for check in node['Checks']: - if check['CheckID'] == 'serfHealth': - consul_ok = check['Status'] == 'passing' - elif check['ServiceName'] == service_name: - service_ok = check['Status'] == 'passing' - return consul_ok and service_ok - - def consul_get_kv_inmemory(self, key): - result = filter(lambda x: x['Key'] == key, self.inmemory_kv) - return result.pop() if result else None - - def consul_get_node_inmemory(self, node): - result = filter(lambda x: x['Node'] == node, self.inmemory_nodes) - return {"Node": result.pop(), "Services": {}} if result else None - - def load_data_for_datacenter(self, datacenter): - '''processes all the nodes in a particular datacenter''' - if self.config.bulk_load == 'true': - nodes = self.inmemory_nodes - else: - index, nodes = self.consul_api.catalog.nodes(dc=datacenter) - for node in nodes: - self.add_node_to_map(self.nodes_by_datacenter, datacenter, node) - self.load_data_for_node(node['Node'], datacenter) - - def load_data_for_node(self, node, datacenter): - '''loads the data for a single node adding it to various groups based on - metadata retrieved from the kv store and service availability''' - - if self.config.suffixes == 'true': - index, node_data = self.consul_api.catalog.node(node, dc=datacenter) - else: - node_data = self.consul_get_node_inmemory(node) - node = node_data['Node'] - - self.add_node_to_map(self.nodes, 'all', node) - self.add_metadata(node_data, "consul_datacenter", datacenter) - self.add_metadata(node_data, "consul_nodename", node['Node']) - - self.load_groups_from_kv(node_data) - self.load_node_metadata_from_kv(node_data) - if self.config.suffixes == 'true': - self.load_availability_groups(node_data, datacenter) - for name, service in node_data['Services'].items(): - self.load_data_from_service(name, service, node_data) - - def load_node_metadata_from_kv(self, node_data): - ''' load the json dict at the metadata path defined by the kv_metadata value - and the node name add each entry in the dictionary to the node's - metadata ''' - node = node_data['Node'] - if self.config.has_config('kv_metadata'): - key = "%s/%s/%s" % (self.config.kv_metadata, self.current_dc, node['Node']) - if self.config.bulk_load == 'true': - metadata = self.consul_get_kv_inmemory(key) - else: - index, metadata = self.consul_api.kv.get(key) - if metadata and metadata['Value']: - try: - metadata = json.loads(metadata['Value']) - for k, v in metadata.items(): - self.add_metadata(node_data, k, v) - except Exception: - pass - - def load_groups_from_kv(self, node_data): - ''' load the comma separated list of groups at the path defined by the - kv_groups config value and the node name add the node address to each - group found ''' - node = node_data['Node'] - if self.config.has_config('kv_groups'): - key = "%s/%s/%s" % (self.config.kv_groups, self.current_dc, node['Node']) - if self.config.bulk_load == 'true': - groups = self.consul_get_kv_inmemory(key) - else: - index, groups = self.consul_api.kv.get(key) - if groups and groups['Value']: - for group in groups['Value'].decode().split(','): - self.add_node_to_map(self.nodes_by_kv, group.strip(), node) - - def load_data_from_service(self, service_name, service, node_data): - '''process a service registered on a node, adding the node to a group with - the service name. Each service tag is extracted and the node is added to a - tag grouping also''' - self.add_metadata(node_data, "consul_services", service_name, True) - - if self.is_service("ssh", service_name): - self.add_metadata(node_data, "ansible_ssh_port", service['Port']) - - if self.config.has_config('servers_suffix'): - service_name = service_name + self.config.servers_suffix - - self.add_node_to_map(self.nodes_by_service, service_name, node_data['Node']) - self.extract_groups_from_tags(service_name, service, node_data) - - def is_service(self, target, name): - return name and (name.lower() == target.lower()) - - def extract_groups_from_tags(self, service_name, service, node_data): - '''iterates each service tag and adds the node to groups derived from the - service and tag names e.g. nginx_master''' - if self.config.has_config('tags') and service['Tags']: - tags = service['Tags'] - self.add_metadata(node_data, "consul_%s_tags" % service_name, tags) - for tag in service['Tags']: - tagname = service_name + '_' + tag - self.add_node_to_map(self.nodes_by_tag, tagname, node_data['Node']) - - def combine_all_results(self): - '''prunes and sorts all groupings for combination into the final map''' - self.inventory = {"_meta": {"hostvars": self.node_metadata}} - groupings = [self.nodes, self.nodes_by_datacenter, self.nodes_by_service, - self.nodes_by_tag, self.nodes_by_kv, self.nodes_by_availability] - for grouping in groupings: - for name, addresses in grouping.items(): - self.inventory[name] = sorted(list(set(addresses))) - - def add_metadata(self, node_data, key, value, is_list=False): - ''' Pushed an element onto a metadata dict for the node, creating - the dict if it doesn't exist ''' - key = self.to_safe(key) - node = self.get_inventory_name(node_data['Node']) - - if node in self.node_metadata: - metadata = self.node_metadata[node] - else: - metadata = {} - self.node_metadata[node] = metadata - if is_list: - self.push(metadata, key, value) - else: - metadata[key] = value - - def get_inventory_name(self, node_data): - '''return the ip or a node name that can be looked up in consul's dns''' - domain = self.config.domain - if domain: - node_name = node_data['Node'] - if self.current_dc: - return '%s.node.%s.%s' % (node_name, self.current_dc, domain) - else: - return '%s.node.%s' % (node_name, domain) - else: - return node_data['Address'] - - def add_node_to_map(self, map, name, node): - self.push(map, name, self.get_inventory_name(node)) - - def push(self, my_dict, key, element): - ''' Pushed an element onto an array that may not have been defined in the - dict ''' - key = self.to_safe(key) - if key in my_dict: - my_dict[key].append(element) - else: - my_dict[key] = [element] - - def to_safe(self, word): - ''' Converts 'bad' characters in a string to underscores so they can be used - as Ansible groups ''' - return re.sub(r'[^A-Za-z0-9\-\.]', '_', word) - - def sanitize_dict(self, d): - - new_dict = {} - for k, v in d.items(): - if v is not None: - new_dict[self.to_safe(str(k))] = self.to_safe(str(v)) - return new_dict - - def sanitize_list(self, seq): - new_seq = [] - for d in seq: - new_seq.append(self.sanitize_dict(d)) - return new_seq - - -class ConsulConfig(dict): - - def __init__(self): - self.read_settings() - self.read_cli_args() - self.read_env_vars() - - def has_config(self, name): - if hasattr(self, name): - return getattr(self, name) - else: - return False - - def read_settings(self): - ''' Reads the settings from the consul_io.ini file (or consul.ini for backwards compatibility)''' - config = configparser.SafeConfigParser() - if os.path.isfile(os.path.dirname(os.path.realpath(__file__)) + '/consul_io.ini'): - config.read(os.path.dirname(os.path.realpath(__file__)) + '/consul_io.ini') - else: - config.read(os.path.dirname(os.path.realpath(__file__)) + '/consul.ini') - - config_options = ['host', 'token', 'datacenter', 'servers_suffix', - 'tags', 'kv_metadata', 'kv_groups', 'availability', - 'unavailable_suffix', 'available_suffix', 'url', - 'domain', 'suffixes', 'bulk_load'] - for option in config_options: - value = None - if config.has_option('consul', option): - value = config.get('consul', option).lower() - setattr(self, option, value) - - def read_cli_args(self): - ''' Command line argument processing ''' - parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based nodes in a Consul cluster') - - parser.add_argument('--list', action='store_true', - help='Get all inventory variables from all nodes in the consul cluster') - parser.add_argument('--host', action='store', - help='Get all inventory variables about a specific consul node,' - 'requires datacenter set in consul.ini.') - parser.add_argument('--datacenter', action='store', - help='Get all inventory about a specific consul datacenter') - - args = parser.parse_args() - arg_names = ['host', 'datacenter'] - - for arg in arg_names: - if getattr(args, arg): - setattr(self, arg, getattr(args, arg)) - - def read_env_vars(self): - env_var_options = ['host', 'token', 'datacenter', 'servers_suffix', - 'tags', 'kv_metadata', 'kv_groups', 'availability', - 'unavailable_suffix', 'available_suffix', 'url', - 'domain', 'suffixes', 'bulk_load'] - for option in env_var_options: - value = None - env_var = 'CONSUL_' + option.upper() - if os.environ.get(env_var): - setattr(self, option, os.environ.get(env_var)) - - def get_availability_suffix(self, suffix, default): - if self.has_config(suffix): - return self.has_config(suffix) - return default - - def get_consul_api(self): - '''get an instance of the api based on the supplied configuration''' - host = 'localhost' - port = 8500 - token = None - scheme = 'http' - - if hasattr(self, 'url'): - from ansible.module_utils.six.moves.urllib.parse import urlparse - o = urlparse(self.url) - if o.hostname: - host = o.hostname - if o.port: - port = o.port - if o.scheme: - scheme = o.scheme - - if hasattr(self, 'token'): - token = self.token - if not token: - token = 'anonymous' - return consul.Consul(host=host, port=port, token=token, scheme=scheme) - - -ConsulInventory() diff --git a/scripts/inventory/docker.py b/scripts/inventory/docker.py deleted file mode 100755 index b029d1f51b..0000000000 --- a/scripts/inventory/docker.py +++ /dev/null @@ -1,892 +0,0 @@ -#!/usr/bin/env python -# -# (c) 2016 Paul Durivage -# Chris Houseknecht -# James Tanner -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' - -Docker Inventory Script -======================= -The inventory script generates dynamic inventory by making API requests to one or more Docker APIs. It's dynamic -because the inventory is generated at run-time rather than being read from a static file. The script generates the -inventory by connecting to one or many Docker APIs and inspecting the containers it finds at each API. Which APIs the -script contacts can be defined using environment variables or a configuration file. - -Requirements ------------- - -Using the docker modules requires having docker-py -installed on the host running Ansible. To install docker-py: - - pip install docker-py - - -Run for Specific Host ---------------------- -When run for a specific container using the --host option this script returns the following hostvars: - -{ - "ansible_ssh_host": "", - "ansible_ssh_port": 0, - "docker_apparmorprofile": "", - "docker_args": [], - "docker_config": { - "AttachStderr": false, - "AttachStdin": false, - "AttachStdout": false, - "Cmd": [ - "/hello" - ], - "Domainname": "", - "Entrypoint": null, - "Env": null, - "Hostname": "9f2f80b0a702", - "Image": "hello-world", - "Labels": {}, - "OnBuild": null, - "OpenStdin": false, - "StdinOnce": false, - "Tty": false, - "User": "", - "Volumes": null, - "WorkingDir": "" - }, - "docker_created": "2016-04-18T02:05:59.659599249Z", - "docker_driver": "aufs", - "docker_execdriver": "native-0.2", - "docker_execids": null, - "docker_graphdriver": { - "Data": null, - "Name": "aufs" - }, - "docker_hostconfig": { - "Binds": null, - "BlkioWeight": 0, - "CapAdd": null, - "CapDrop": null, - "CgroupParent": "", - "ConsoleSize": [ - 0, - 0 - ], - "ContainerIDFile": "", - "CpuPeriod": 0, - "CpuQuota": 0, - "CpuShares": 0, - "CpusetCpus": "", - "CpusetMems": "", - "Devices": null, - "Dns": null, - "DnsOptions": null, - "DnsSearch": null, - "ExtraHosts": null, - "GroupAdd": null, - "IpcMode": "", - "KernelMemory": 0, - "Links": null, - "LogConfig": { - "Config": {}, - "Type": "json-file" - }, - "LxcConf": null, - "Memory": 0, - "MemoryReservation": 0, - "MemorySwap": 0, - "MemorySwappiness": null, - "NetworkMode": "default", - "OomKillDisable": false, - "PidMode": "host", - "PortBindings": null, - "Privileged": false, - "PublishAllPorts": false, - "ReadonlyRootfs": false, - "RestartPolicy": { - "MaximumRetryCount": 0, - "Name": "" - }, - "SecurityOpt": [ - "label:disable" - ], - "UTSMode": "", - "Ulimits": null, - "VolumeDriver": "", - "VolumesFrom": null - }, - "docker_hostnamepath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/hostname", - "docker_hostspath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/hosts", - "docker_id": "9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14", - "docker_image": "0a6ba66e537a53a5ea94f7c6a99c534c6adb12e3ed09326d4bf3b38f7c3ba4e7", - "docker_logpath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/9f2f80b0a702361d1ac432e6a-json.log", - "docker_mountlabel": "", - "docker_mounts": [], - "docker_name": "/hello-world", - "docker_networksettings": { - "Bridge": "", - "EndpointID": "", - "Gateway": "", - "GlobalIPv6Address": "", - "GlobalIPv6PrefixLen": 0, - "HairpinMode": false, - "IPAddress": "", - "IPPrefixLen": 0, - "IPv6Gateway": "", - "LinkLocalIPv6Address": "", - "LinkLocalIPv6PrefixLen": 0, - "MacAddress": "", - "Networks": { - "bridge": { - "EndpointID": "", - "Gateway": "", - "GlobalIPv6Address": "", - "GlobalIPv6PrefixLen": 0, - "IPAddress": "", - "IPPrefixLen": 0, - "IPv6Gateway": "", - "MacAddress": "" - } - }, - "Ports": null, - "SandboxID": "", - "SandboxKey": "", - "SecondaryIPAddresses": null, - "SecondaryIPv6Addresses": null - }, - "docker_path": "/hello", - "docker_processlabel": "", - "docker_resolvconfpath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/resolv.conf", - "docker_restartcount": 0, - "docker_short_id": "9f2f80b0a7023", - "docker_state": { - "Dead": false, - "Error": "", - "ExitCode": 0, - "FinishedAt": "2016-04-18T02:06:00.296619369Z", - "OOMKilled": false, - "Paused": false, - "Pid": 0, - "Restarting": false, - "Running": false, - "StartedAt": "2016-04-18T02:06:00.272065041Z", - "Status": "exited" - } -} - -Groups ------- -When run in --list mode (the default), container instances are grouped by: - - - container id - - container name - - container short id - - image_name (image_) - - stack_name (stack_) - - service_name (service_) - - docker_host - - running - - stopped - - -Configuration: --------------- -You can control the behavior of the inventory script by passing arguments, defining environment variables, or -creating a configuration file named docker.yml (sample provided in ansible/contrib/inventory). The order of precedence -is command line args, then the docker.yml file and finally environment variables. - -Environment variables: -...................... - -To connect to a single Docker API the following variables can be defined in the environment to control the connection -options. These are the same environment variables used by the Docker modules. - - DOCKER_HOST - The URL or Unix socket path used to connect to the Docker API. Defaults to unix://var/run/docker.sock. - - DOCKER_API_VERSION: - The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported - by docker-py. - - DOCKER_TIMEOUT: - The maximum amount of time in seconds to wait on a response fromm the API. Defaults to 60 seconds. - - DOCKER_TLS: - Secure the connection to the API by using TLS without verifying the authenticity of the Docker host server. - Defaults to False. - - DOCKER_TLS_VERIFY: - Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server. - Default is False - - DOCKER_TLS_HOSTNAME: - When verifying the authenticity of the Docker Host server, provide the expected name of the server. Defaults - to localhost. - - DOCKER_CERT_PATH: - Path to the directory containing the client certificate, client key and CA certificate. - - DOCKER_SSL_VERSION: - Provide a valid SSL version number. Default value determined by docker-py, which at the time of this writing - was 1.0 - -In addition to the connection variables there are a couple variables used to control the execution and output of the -script: - - DOCKER_CONFIG_FILE - Path to the configuration file. Defaults to ./docker.yml. - - DOCKER_PRIVATE_SSH_PORT: - The private port (container port) on which SSH is listening for connections. Defaults to 22. - - DOCKER_DEFAULT_IP: - The IP address to assign to ansible_host when the container's SSH port is mapped to interface '0.0.0.0'. - - -Configuration File -.................. - -Using a configuration file provides a means for defining a set of Docker APIs from which to build an inventory. - -The default name of the file is derived from the name of the inventory script. By default the script will look for -basename of the script (i.e. docker) with an extension of '.yml'. - -You can also override the default name of the script by defining DOCKER_CONFIG_FILE in the environment. - -Here's what you can define in docker_inventory.yml: - - defaults - Defines a default connection. Defaults will be taken from this and applied to any values not provided - for a host defined in the hosts list. - - hosts - If you wish to get inventory from more than one Docker host, define a hosts list. - -For the default host and each host in the hosts list define the following attributes: - - host: - description: The URL or Unix socket path used to connect to the Docker API. - required: yes - - tls: - description: Connect using TLS without verifying the authenticity of the Docker host server. - default: false - required: false - - tls_verify: - description: Connect using TLS without verifying the authenticity of the Docker host server. - default: false - required: false - - cert_path: - description: Path to the client's TLS certificate file. - default: null - required: false - - cacert_path: - description: Use a CA certificate when performing server verification by providing the path to a CA certificate file. - default: null - required: false - - key_path: - description: Path to the client's TLS key file. - default: null - required: false - - version: - description: The Docker API version. - required: false - default: will be supplied by the docker-py module. - - timeout: - description: The amount of time in seconds to wait on an API response. - required: false - default: 60 - - default_ip: - description: The IP address to assign to ansible_host when the container's SSH port is mapped to interface - '0.0.0.0'. - required: false - default: 127.0.0.1 - - private_ssh_port: - description: The port containers use for SSH - required: false - default: 22 - -Examples --------- - -# Connect to the Docker API on localhost port 4243 and format the JSON output -DOCKER_HOST=tcp://localhost:4243 ./docker.py --pretty - -# Any container's ssh port exposed on 0.0.0.0 will be mapped to -# another IP address (where Ansible will attempt to connect via SSH) -DOCKER_DEFAULT_IP=1.2.3.4 ./docker.py --pretty - -# Run as input to a playbook: -ansible-playbook -i ~/projects/ansible/contrib/inventory/docker.py docker_inventory_test.yml - -# Simple playbook to invoke with the above example: - - - name: Test docker_inventory - hosts: all - connection: local - gather_facts: no - tasks: - - debug: msg="Container - {{ inventory_hostname }}" - -''' - -import os -import sys -import json -import argparse -import re -import yaml - -from collections import defaultdict -# Manipulation of the path is needed because the docker-py -# module is imported by the name docker, and because this file -# is also named docker -for path in [os.getcwd(), '', os.path.dirname(os.path.abspath(__file__))]: - try: - del sys.path[sys.path.index(path)] - except Exception: - pass - -HAS_DOCKER_PY = True -HAS_DOCKER_ERROR = False - -try: - from docker.errors import APIError, TLSParameterError - from docker.tls import TLSConfig - from docker.constants import DEFAULT_TIMEOUT_SECONDS, DEFAULT_DOCKER_API_VERSION -except ImportError as exc: - HAS_DOCKER_ERROR = str(exc) - HAS_DOCKER_PY = False - -# Client has recently been split into DockerClient and APIClient -try: - from docker import Client -except ImportError as dummy: - try: - from docker import APIClient as Client - except ImportError as exc: - HAS_DOCKER_ERROR = str(exc) - HAS_DOCKER_PY = False - - class Client: - pass - -DEFAULT_DOCKER_CONFIG_FILE = os.path.splitext(os.path.basename(__file__))[0] + '.yml' -DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock' -DEFAULT_TLS = False -DEFAULT_TLS_VERIFY = False -DEFAULT_TLS_HOSTNAME = "localhost" -DEFAULT_IP = '127.0.0.1' -DEFAULT_SSH_PORT = '22' - -BOOLEANS_TRUE = ['yes', 'on', '1', 'true', 1, True] -BOOLEANS_FALSE = ['no', 'off', '0', 'false', 0, False] - - -DOCKER_ENV_ARGS = dict( - config_file='DOCKER_CONFIG_FILE', - docker_host='DOCKER_HOST', - api_version='DOCKER_API_VERSION', - cert_path='DOCKER_CERT_PATH', - ssl_version='DOCKER_SSL_VERSION', - tls='DOCKER_TLS', - tls_verify='DOCKER_TLS_VERIFY', - tls_hostname='DOCKER_TLS_HOSTNAME', - timeout='DOCKER_TIMEOUT', - private_ssh_port='DOCKER_DEFAULT_SSH_PORT', - default_ip='DOCKER_DEFAULT_IP', -) - - -def fail(msg): - sys.stderr.write("%s\n" % msg) - sys.exit(1) - - -def log(msg, pretty_print=False): - if pretty_print: - print(json.dumps(msg, sort_keys=True, indent=2)) - else: - print(msg + u'\n') - - -class AnsibleDockerClient(Client): - def __init__(self, auth_params, debug): - - self.auth_params = auth_params - self.debug = debug - self._connect_params = self._get_connect_params() - - try: - super(AnsibleDockerClient, self).__init__(**self._connect_params) - except APIError as exc: - self.fail("Docker API error: %s" % exc) - except Exception as exc: - self.fail("Error connecting: %s" % exc) - - def fail(self, msg): - fail(msg) - - def log(self, msg, pretty_print=False): - if self.debug: - log(msg, pretty_print) - - def _get_tls_config(self, **kwargs): - self.log("get_tls_config:") - for key in kwargs: - self.log(" %s: %s" % (key, kwargs[key])) - try: - tls_config = TLSConfig(**kwargs) - return tls_config - except TLSParameterError as exc: - self.fail("TLS config error: %s" % exc) - - def _get_connect_params(self): - auth = self.auth_params - - self.log("auth params:") - for key in auth: - self.log(" %s: %s" % (key, auth[key])) - - if auth['tls'] or auth['tls_verify']: - auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://') - - if auth['tls'] and auth['cert_path'] and auth['key_path']: - # TLS with certs and no host verification - tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']), - verify=False, - ssl_version=auth['ssl_version']) - return dict(base_url=auth['docker_host'], - tls=tls_config, - version=auth['api_version'], - timeout=auth['timeout']) - - if auth['tls']: - # TLS with no certs and not host verification - tls_config = self._get_tls_config(verify=False, - ssl_version=auth['ssl_version']) - return dict(base_url=auth['docker_host'], - tls=tls_config, - version=auth['api_version'], - timeout=auth['timeout']) - - if auth['tls_verify'] and auth['cert_path'] and auth['key_path']: - # TLS with certs and host verification - if auth['cacert_path']: - tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']), - ca_cert=auth['cacert_path'], - verify=True, - assert_hostname=auth['tls_hostname'], - ssl_version=auth['ssl_version']) - else: - tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']), - verify=True, - assert_hostname=auth['tls_hostname'], - ssl_version=auth['ssl_version']) - - return dict(base_url=auth['docker_host'], - tls=tls_config, - version=auth['api_version'], - timeout=auth['timeout']) - - if auth['tls_verify'] and auth['cacert_path']: - # TLS with cacert only - tls_config = self._get_tls_config(ca_cert=auth['cacert_path'], - assert_hostname=auth['tls_hostname'], - verify=True, - ssl_version=auth['ssl_version']) - return dict(base_url=auth['docker_host'], - tls=tls_config, - version=auth['api_version'], - timeout=auth['timeout']) - - if auth['tls_verify']: - # TLS with verify and no certs - tls_config = self._get_tls_config(verify=True, - assert_hostname=auth['tls_hostname'], - ssl_version=auth['ssl_version']) - return dict(base_url=auth['docker_host'], - tls=tls_config, - version=auth['api_version'], - timeout=auth['timeout']) - # No TLS - return dict(base_url=auth['docker_host'], - version=auth['api_version'], - timeout=auth['timeout']) - - def _handle_ssl_error(self, error): - match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error)) - if match: - msg = "You asked for verification that Docker host name matches %s. The actual hostname is %s. " \ - "Most likely you need to set DOCKER_TLS_HOSTNAME or pass tls_hostname with a value of %s. " \ - "You may also use TLS without verification by setting the tls parameter to true." \ - % (self.auth_params['tls_hostname'], match.group(1), match.group(1)) - self.fail(msg) - self.fail("SSL Exception: %s" % (error)) - - -class EnvArgs(object): - def __init__(self): - self.config_file = None - self.docker_host = None - self.api_version = None - self.cert_path = None - self.ssl_version = None - self.tls = None - self.tls_verify = None - self.tls_hostname = None - self.timeout = None - self.default_ssh_port = None - self.default_ip = None - - -class DockerInventory(object): - - def __init__(self): - self._args = self._parse_cli_args() - self._env_args = self._parse_env_args() - self.groups = defaultdict(list) - self.hostvars = defaultdict(dict) - - def run(self): - config_from_file = self._parse_config_file() - if not config_from_file: - config_from_file = dict() - docker_hosts = self.get_hosts(config_from_file) - - for host in docker_hosts: - client = AnsibleDockerClient(host, self._args.debug) - self.get_inventory(client, host) - - if not self._args.host: - self.groups['docker_hosts'] = [host.get('docker_host') for host in docker_hosts] - self.groups['_meta'] = dict( - hostvars=self.hostvars - ) - print(self._json_format_dict(self.groups, pretty_print=self._args.pretty)) - else: - print(self._json_format_dict(self.hostvars.get(self._args.host, dict()), pretty_print=self._args.pretty)) - - sys.exit(0) - - def get_inventory(self, client, host): - - ssh_port = host.get('default_ssh_port') - default_ip = host.get('default_ip') - hostname = host.get('docker_host') - - try: - containers = client.containers(all=True) - except Exception as exc: - self.fail("Error fetching containers for host %s - %s" % (hostname, str(exc))) - - for container in containers: - id = container.get('Id') - short_id = id[:13] - - try: - name = container.get('Names', list()).pop(0).lstrip('/') - except IndexError: - name = short_id - - if not self._args.host or (self._args.host and self._args.host in [name, id, short_id]): - try: - inspect = client.inspect_container(id) - except Exception as exc: - self.fail("Error inspecting container %s - %s" % (name, str(exc))) - - running = inspect.get('State', dict()).get('Running') - - # Add container to groups - image_name = inspect.get('Config', dict()).get('Image') - if image_name: - self.groups["image_%s" % (image_name)].append(name) - - stack_name = inspect.get('Config', dict()).get('Labels', dict()).get('com.docker.stack.namespace') - if stack_name: - self.groups["stack_%s" % stack_name].append(name) - - service_name = inspect.get('Config', dict()).get('Labels', dict()).get('com.docker.swarm.service.name') - if service_name: - self.groups["service_%s" % service_name].append(name) - - self.groups[id].append(name) - self.groups[name].append(name) - if short_id not in self.groups: - self.groups[short_id].append(name) - self.groups[hostname].append(name) - - if running is True: - self.groups['running'].append(name) - else: - self.groups['stopped'].append(name) - - # Figure ous ssh IP and Port - try: - # Lookup the public facing port Nat'ed to ssh port. - port = client.port(container, ssh_port)[0] - except (IndexError, AttributeError, TypeError): - port = dict() - - try: - ip = default_ip if port['HostIp'] == '0.0.0.0' else port['HostIp'] - except KeyError: - ip = '' - - facts = dict( - ansible_ssh_host=ip, - ansible_ssh_port=port.get('HostPort', int()), - docker_name=name, - docker_short_id=short_id - ) - - for key in inspect: - fact_key = self._slugify(key) - facts[fact_key] = inspect.get(key) - - self.hostvars[name].update(facts) - - def _slugify(self, value): - return 'docker_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_')) - - def get_hosts(self, config): - ''' - Determine the list of docker hosts we need to talk to. - - :param config: dictionary read from config file. can be empty. - :return: list of connection dictionaries - ''' - hosts = list() - - hosts_list = config.get('hosts') - defaults = config.get('defaults', dict()) - self.log('defaults:') - self.log(defaults, pretty_print=True) - def_host = defaults.get('host') - def_tls = defaults.get('tls') - def_tls_verify = defaults.get('tls_verify') - def_tls_hostname = defaults.get('tls_hostname') - def_ssl_version = defaults.get('ssl_version') - def_cert_path = defaults.get('cert_path') - def_cacert_path = defaults.get('cacert_path') - def_key_path = defaults.get('key_path') - def_version = defaults.get('version') - def_timeout = defaults.get('timeout') - def_ip = defaults.get('default_ip') - def_ssh_port = defaults.get('private_ssh_port') - - if hosts_list: - # use hosts from config file - for host in hosts_list: - docker_host = host.get('host') or def_host or self._args.docker_host or \ - self._env_args.docker_host or DEFAULT_DOCKER_HOST - api_version = host.get('version') or def_version or self._args.api_version or \ - self._env_args.api_version or DEFAULT_DOCKER_API_VERSION - tls_hostname = host.get('tls_hostname') or def_tls_hostname or self._args.tls_hostname or \ - self._env_args.tls_hostname or DEFAULT_TLS_HOSTNAME - tls_verify = host.get('tls_verify') or def_tls_verify or self._args.tls_verify or \ - self._env_args.tls_verify or DEFAULT_TLS_VERIFY - tls = host.get('tls') or def_tls or self._args.tls or self._env_args.tls or DEFAULT_TLS - ssl_version = host.get('ssl_version') or def_ssl_version or self._args.ssl_version or \ - self._env_args.ssl_version - - cert_path = host.get('cert_path') or def_cert_path or self._args.cert_path or \ - self._env_args.cert_path - if cert_path and cert_path == self._env_args.cert_path: - cert_path = os.path.join(cert_path, 'cert.pem') - - cacert_path = host.get('cacert_path') or def_cacert_path or self._args.cacert_path or \ - self._env_args.cert_path - if cacert_path and cacert_path == self._env_args.cert_path: - cacert_path = os.path.join(cacert_path, 'ca.pem') - - key_path = host.get('key_path') or def_key_path or self._args.key_path or \ - self._env_args.cert_path - if key_path and key_path == self._env_args.cert_path: - key_path = os.path.join(key_path, 'key.pem') - - timeout = host.get('timeout') or def_timeout or self._args.timeout or self._env_args.timeout or \ - DEFAULT_TIMEOUT_SECONDS - default_ip = host.get('default_ip') or def_ip or self._env_args.default_ip or \ - self._args.default_ip_address or DEFAULT_IP - default_ssh_port = host.get('private_ssh_port') or def_ssh_port or self._args.private_ssh_port or \ - DEFAULT_SSH_PORT - host_dict = dict( - docker_host=docker_host, - api_version=api_version, - tls=tls, - tls_verify=tls_verify, - tls_hostname=tls_hostname, - cert_path=cert_path, - cacert_path=cacert_path, - key_path=key_path, - ssl_version=ssl_version, - timeout=timeout, - default_ip=default_ip, - default_ssh_port=default_ssh_port, - ) - hosts.append(host_dict) - else: - # use default definition - docker_host = def_host or self._args.docker_host or self._env_args.docker_host or DEFAULT_DOCKER_HOST - api_version = def_version or self._args.api_version or self._env_args.api_version or \ - DEFAULT_DOCKER_API_VERSION - tls_hostname = def_tls_hostname or self._args.tls_hostname or self._env_args.tls_hostname or \ - DEFAULT_TLS_HOSTNAME - tls_verify = def_tls_verify or self._args.tls_verify or self._env_args.tls_verify or DEFAULT_TLS_VERIFY - tls = def_tls or self._args.tls or self._env_args.tls or DEFAULT_TLS - ssl_version = def_ssl_version or self._args.ssl_version or self._env_args.ssl_version - - cert_path = def_cert_path or self._args.cert_path or self._env_args.cert_path - if cert_path and cert_path == self._env_args.cert_path: - cert_path = os.path.join(cert_path, 'cert.pem') - - cacert_path = def_cacert_path or self._args.cacert_path or self._env_args.cert_path - if cacert_path and cacert_path == self._env_args.cert_path: - cacert_path = os.path.join(cacert_path, 'ca.pem') - - key_path = def_key_path or self._args.key_path or self._env_args.cert_path - if key_path and key_path == self._env_args.cert_path: - key_path = os.path.join(key_path, 'key.pem') - - timeout = def_timeout or self._args.timeout or self._env_args.timeout or DEFAULT_TIMEOUT_SECONDS - default_ip = def_ip or self._env_args.default_ip or self._args.default_ip_address or DEFAULT_IP - default_ssh_port = def_ssh_port or self._args.private_ssh_port or DEFAULT_SSH_PORT - host_dict = dict( - docker_host=docker_host, - api_version=api_version, - tls=tls, - tls_verify=tls_verify, - tls_hostname=tls_hostname, - cert_path=cert_path, - cacert_path=cacert_path, - key_path=key_path, - ssl_version=ssl_version, - timeout=timeout, - default_ip=default_ip, - default_ssh_port=default_ssh_port, - ) - hosts.append(host_dict) - self.log("hosts: ") - self.log(hosts, pretty_print=True) - return hosts - - def _parse_config_file(self): - config = dict() - config_file = DEFAULT_DOCKER_CONFIG_FILE - - if self._args.config_file: - config_file = self._args.config_file - elif self._env_args.config_file: - config_file = self._env_args.config_file - - config_file = os.path.abspath(config_file) - - if os.path.isfile(config_file): - with open(config_file) as f: - try: - config = yaml.safe_load(f.read()) - except Exception as exc: - self.fail("Error: parsing %s - %s" % (config_file, str(exc))) - else: - msg = "Error: config file given by {} does not exist - " + config_file - if self._args.config_file: - self.fail(msg.format('command line argument')) - elif self._env_args.config_file: - self.fail(msg.format(DOCKER_ENV_ARGS.get('config_file'))) - else: - self.log(msg.format('DEFAULT_DOCKER_CONFIG_FILE')) - return config - - def log(self, msg, pretty_print=False): - if self._args.debug: - log(msg, pretty_print) - - def fail(self, msg): - fail(msg) - - def _parse_env_args(self): - args = EnvArgs() - for key, value in DOCKER_ENV_ARGS.items(): - if os.environ.get(value): - val = os.environ.get(value) - if val in BOOLEANS_TRUE: - val = True - if val in BOOLEANS_FALSE: - val = False - setattr(args, key, val) - return args - - def _parse_cli_args(self): - # Parse command line arguments - - parser = argparse.ArgumentParser( - description='Return Ansible inventory for one or more Docker hosts.') - parser.add_argument('--list', action='store_true', default=True, - help='List all containers (default: True)') - parser.add_argument('--debug', action='store_true', default=False, - help='Send debug messages to STDOUT') - parser.add_argument('--host', action='store', - help='Only get information for a specific container.') - parser.add_argument('--pretty', action='store_true', default=False, - help='Pretty print JSON output(default: False)') - parser.add_argument('--config-file', action='store', default=None, - help="Name of the config file to use. Default is %s" % (DEFAULT_DOCKER_CONFIG_FILE)) - parser.add_argument('--docker-host', action='store', default=None, - help="The base url or Unix sock path to connect to the docker daemon. Defaults to %s" - % (DEFAULT_DOCKER_HOST)) - parser.add_argument('--tls-hostname', action='store', default=None, - help="Host name to expect in TLS certs. Defaults to %s" % DEFAULT_TLS_HOSTNAME) - parser.add_argument('--api-version', action='store', default=None, - help="Docker daemon API version. Defaults to %s" % (DEFAULT_DOCKER_API_VERSION)) - parser.add_argument('--timeout', action='store', default=None, - help="Docker connection timeout in seconds. Defaults to %s" - % (DEFAULT_TIMEOUT_SECONDS)) - parser.add_argument('--cacert-path', action='store', default=None, - help="Path to the TLS certificate authority pem file.") - parser.add_argument('--cert-path', action='store', default=None, - help="Path to the TLS certificate pem file.") - parser.add_argument('--key-path', action='store', default=None, - help="Path to the TLS encryption key pem file.") - parser.add_argument('--ssl-version', action='store', default=None, - help="TLS version number") - parser.add_argument('--tls', action='store_true', default=None, - help="Use TLS. Defaults to %s" % (DEFAULT_TLS)) - parser.add_argument('--tls-verify', action='store_true', default=None, - help="Verify TLS certificates. Defaults to %s" % (DEFAULT_TLS_VERIFY)) - parser.add_argument('--private-ssh-port', action='store', default=None, - help="Default private container SSH Port. Defaults to %s" % (DEFAULT_SSH_PORT)) - parser.add_argument('--default-ip-address', action='store', default=None, - help="Default container SSH IP address. Defaults to %s" % (DEFAULT_IP)) - return parser.parse_args() - - def _json_format_dict(self, data, pretty_print=False): - # format inventory data for output - if pretty_print: - return json.dumps(data, sort_keys=True, indent=4) - else: - return json.dumps(data) - - -def main(): - - if not HAS_DOCKER_PY: - fail("Failed to import docker-py. Try `pip install docker-py` - %s" % (HAS_DOCKER_ERROR)) - - DockerInventory().run() - - -main() diff --git a/scripts/inventory/docker.yml b/scripts/inventory/docker.yml deleted file mode 100644 index 97239392d1..0000000000 --- a/scripts/inventory/docker.yml +++ /dev/null @@ -1,74 +0,0 @@ -# This is the configuration file for the Docker inventory script: docker_inventory.py. -# -# You can define the following in this file: -# -# defaults -# Defines a default connection. Defaults will be taken from this and applied to any values not provided -# for a host defined in the hosts list. -# -# hosts -# If you wish to get inventory from more than one Docker host, define a hosts list. -# -# For the default host and each host in the hosts list define the following attributes: -# -# host: -# description: The URL or Unix socket path used to connect to the Docker API. -# required: yes -# -# tls: -# description: Connect using TLS without verifying the authenticity of the Docker host server. -# default: false -# required: false -# -# tls_verify: -# description: Connect using TLS without verifying the authenticity of the Docker host server. -# default: false -# required: false -# -# cert_path: -# description: Path to the client's TLS certificate file. -# default: null -# required: false -# -# cacert_path: -# description: Use a CA certificate when performing server verification by providing the path to a CA certificate file. -# default: null -# required: false -# -# key_path: -# description: Path to the client's TLS key file. -# default: null -# required: false -# -# version: -# description: The Docker API version. -# required: false -# default: will be supplied by the docker-py module. -# -# timeout: -# description: The amount of time in seconds to wait on an API response. -# required: false -# default: 60 -# -# default_ip: -# description: The IP address to assign to ansible_host when the container's SSH port is mapped to interface -# '0.0.0.0'. -# required: false -# default: 127.0.0.1 -# -# private_ssh_port: -# description: The port containers use for SSH -# required: false -# default: 22 - -#defaults: -# host: unix:///var/run/docker.sock -# private_ssh_port: 22 -# default_ip: 127.0.0.1 - -#hosts: -# - host: tcp://10.45.5.16:4243 -# private_ssh_port: 2022 -# default_ip: 172.16.3.45 -# - host: tcp://localhost:4243 -# private_ssh_port: 2029 diff --git a/scripts/inventory/fleet.py b/scripts/inventory/fleet.py deleted file mode 100755 index cc9537e115..0000000000 --- a/scripts/inventory/fleet.py +++ /dev/null @@ -1,99 +0,0 @@ -#!/usr/bin/env python -""" -fleetctl base external inventory script. Automatically finds the IPs of the booted coreos instances and -returns it under the host group 'coreos' -""" - -# Copyright (C) 2014 Andrew Rothstein -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -# -# Thanks to the vagrant.py inventory script for giving me the basic structure -# of this. -# - -import sys -import subprocess -import re -import string -from optparse import OptionParser -import json - -# Options -# ------------------------------ - -parser = OptionParser(usage="%prog [options] --list | --host ") -parser.add_option('--list', default=False, dest="list", action="store_true", - help="Produce a JSON consumable grouping of servers in your fleet") -parser.add_option('--host', default=None, dest="host", - help="Generate additional host specific details for given host for Ansible") -(options, args) = parser.parse_args() - -# -# helper functions -# - - -def get_ssh_config(): - configs = [] - for box in list_running_boxes(): - config = get_a_ssh_config(box) - configs.append(config) - return configs - - -# list all the running instances in the fleet -def list_running_boxes(): - boxes = [] - for line in subprocess.check_output(["fleetctl", "list-machines"]).split('\n'): - matcher = re.search(r"[^\s]+[\s]+([^\s]+).+", line) - if matcher and matcher.group(1) != "IP": - boxes.append(matcher.group(1)) - - return boxes - - -def get_a_ssh_config(box_name): - config = {} - config['Host'] = box_name - config['ansible_ssh_user'] = 'core' - config['ansible_python_interpreter'] = '/opt/bin/python' - return config - - -# List out servers that vagrant has running -# ------------------------------ -if options.list: - ssh_config = get_ssh_config() - hosts = {'coreos': []} - - for data in ssh_config: - hosts['coreos'].append(data['Host']) - - print(json.dumps(hosts)) - sys.exit(1) - -# Get out the host details -# ------------------------------ -elif options.host: - result = {} - ssh_config = get_ssh_config() - - details = filter(lambda x: (x['Host'] == options.host), ssh_config) - if len(details) > 0: - # pass through the port, in case it's non standard. - result = details[0] - - print(json.dumps(result)) - sys.exit(1) - - -# Print out help -# ------------------------------ -else: - parser.print_help() - sys.exit(1) diff --git a/scripts/inventory/foreman.ini b/scripts/inventory/foreman.ini deleted file mode 100644 index d157963848..0000000000 --- a/scripts/inventory/foreman.ini +++ /dev/null @@ -1,200 +0,0 @@ -# Foreman inventory (https://github.com/theforeman/foreman_ansible_inventory) -# -# This script can be used as an Ansible dynamic inventory. -# The connection parameters are set up via *foreman.ini* -# This is how the script founds the configuration file in -# order of discovery. -# -# * `/etc/ansible/foreman.ini` -# * Current directory of your inventory script. -# * `FOREMAN_INI_PATH` environment variable. -# -# ## Variables and Parameters -# -# The data returned from Foreman for each host is stored in a foreman -# hash so they're available as *host_vars* along with the parameters -# of the host and it's hostgroups: -# -# "foo.example.com": { -# "foreman": { -# "architecture_id": 1, -# "architecture_name": "x86_64", -# "build": false, -# "build_status": 0, -# "build_status_label": "Installed", -# "capabilities": [ -# "build", -# "image" -# ], -# "compute_profile_id": 4, -# "hostgroup_name": "webtier/myapp", -# "id": 70, -# "image_name": "debian8.1", -# ... -# "uuid": "50197c10-5ebb-b5cf-b384-a1e203e19e77" -# }, -# "foreman_params": { -# "testparam1": "foobar", -# "testparam2": "small", -# ... -# } -# -# and could therefore be used in Ansible like: -# -# - debug: msg="From Foreman host {{ foreman['uuid'] }}" -# -# Which yields -# -# TASK [test_foreman : debug] **************************************************** -# ok: [foo.example.com] => { -# "msg": "From Foreman host 50190bd1-052a-a34a-3c9c-df37a39550bf" -# } -# -# ## Automatic Ansible groups -# -# The inventory will provide a set of groups, by default prefixed by -# 'foreman_'. If you want to customize this prefix, change the -# group_prefix option in /etc/ansible/foreman.ini. The rest of this -# guide will assume the default prefix of 'foreman' -# -# The hostgroup, location, organization, content view, and lifecycle -# environment of each host are created as Ansible groups with a -# foreman_ prefix, all lowercase and problematic parameters -# removed. So e.g. the foreman hostgroup -# -# myapp / webtier / datacenter1 -# -# would turn into the Ansible group: -# -# foreman_hostgroup_myapp_webtier_datacenter1 -# -# If the parameter want_hostcollections is set to true, the -# collections each host is in are created as Ansible groups with a -# foreman_hostcollection prefix, all lowercase and problematic -# parameters removed. So e.g. the Foreman host collection -# -# Patch Window Thursday -# -# would turn into the Ansible group: -# -# foreman_hostcollection_patchwindowthursday -# -# If the parameter host_filters is set, it will be used as the -# "search" parameter for the /api/v2/hosts call. This can be used to -# restrict the list of returned host, as shown below. -# -# Furthermore Ansible groups can be created on the fly using the -# *group_patterns* variable in *foreman.ini* so that you can build up -# hierarchies using parameters on the hostgroup and host variables. -# -# Lets assume you have a host that is built using this nested hostgroup: -# -# myapp / webtier / datacenter1 -# -# and each of the hostgroups defines a parameters respectively: -# -# myapp: app_param = myapp -# webtier: tier_param = webtier -# datacenter1: dc_param = datacenter1 -# -# The host is also in a subnet called "mysubnet" and provisioned via an image -# then *group_patterns* like: -# -# [ansible] -# group_patterns = ["{app_param}-{tier_param}-{dc_param}", -# "{app_param}-{tier_param}", -# "{app_param}", -# "{subnet_name}-{provision_method}"] -# -# would put the host into the additional Ansible groups: -# -# - myapp-webtier-datacenter1 -# - myapp-webtier -# - myapp -# - mysubnet-image -# -# by recursively resolving the hostgroups, getting the parameter keys -# and values and doing a Python *string.format()* like replacement on -# it. -# -[foreman] -url = http://localhost:3000/ -user = foreman -password = secret -ssl_verify = True - -# Foreman 1.24 introduces a new reports API to improve performance of the inventory script. -# Note: This requires foreman_ansible plugin installed. -# Set to False if you want to use the old API. Defaults to True. - -use_reports_api = True - -# Retrieve only hosts from the organization "Web Engineering". -# host_filters = organization="Web Engineering" - -# Retrieve only hosts from the organization "Web Engineering" that are -# also in the host collection "Apache Servers". -# host_filters = organization="Web Engineering" and host_collection="Apache Servers" - -# Foreman Inventory report related configuration options. -# Configs that default to True : -# want_organization , want_location, want_ipv4, want_host_group, want_subnet, want_smart_proxies, want_facts -# Configs that default to False : -# want_ipv6, want_subnet_v6, want_content_facet_attributes, want_host_params - -[report] -# want_organization = True -# want_location = True -# want_ipv4 = True -# want_ipv6 = False -# want_host_group = True -# want_subnet = True -# want_subnet_v6 = False -# want_smart_proxies = True -# want_content_facet_attributes = False -# want_host_params = False - -# use this config to determine if facts are to be fetched in the report and stored on the hosts. -# want_facts = False - -# Upon receiving a request to return inventory report, Foreman schedules a report generation job. -# The script then polls the report_data endpoint repeatedly to check if the job is complete and retrieves data -# poll_interval allows to define the polling interval between 2 calls to the report_data endpoint while polling. -# Defaults to 10 seconds - -# poll_interval = 10 - -[ansible] -group_patterns = ["{app}-{tier}-{color}", - "{app}-{color}", - "{app}", - "{tier}"] -group_prefix = foreman_ - -# Whether to fetch facts from Foreman and store them on the host -want_facts = True - -# Whether to create Ansible groups for host collections. Only tested -# with Katello (Red Hat Satellite). Disabled by default to not break -# the script for stand-alone Foreman. -want_hostcollections = False - -# Whether to interpret global parameters value as JSON (if possible, else -# take as is). Only tested with Katello (Red Hat Satellite). -# This allows to define lists and dictionaries (and more complicated structures) -# variables by entering them as JSON string in Foreman parameters. -# Disabled by default as the change would else not be backward compatible. -rich_params = False - -# Whether to populate the ansible_ssh_host variable to explicitly specify the -# connection target. Only tested with Katello (Red Hat Satellite). -# If the foreman 'ip' fact exists then the ansible_ssh_host varibale is populated -# to permit connections where DNS resolution fails. -want_ansible_ssh_host = False - -[cache] -path = . -max_age = 60 - -# Whether to scan foreman to add recently created hosts in inventory cache -scan_new_hosts = True diff --git a/scripts/inventory/foreman.py b/scripts/inventory/foreman.py deleted file mode 100755 index f2e729b6a7..0000000000 --- a/scripts/inventory/foreman.py +++ /dev/null @@ -1,651 +0,0 @@ -#!/usr/bin/env python -# vim: set fileencoding=utf-8 : -# -# Copyright (C) 2016 Guido Günther , -# Daniel Lobato Garcia -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# -# This is somewhat based on cobbler inventory - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import json -import argparse -import copy -import os -import re -import sys -from time import time, sleep -from collections import defaultdict -from distutils.version import LooseVersion, StrictVersion - -# 3rd party imports -import requests -if LooseVersion(requests.__version__) < LooseVersion('1.1.0'): - print('This script requires python-requests 1.1 as a minimum version') - sys.exit(1) - -from requests.auth import HTTPBasicAuth - -from ansible.module_utils._text import to_text -from ansible.module_utils.six.moves import configparser as ConfigParser - - -def json_format_dict(data, pretty=False): - """Converts a dict to a JSON object and dumps it as a formatted string""" - - if pretty: - return json.dumps(data, sort_keys=True, indent=2) - else: - return json.dumps(data) - - -class ForemanInventory(object): - - def __init__(self): - self.inventory = defaultdict(list) # A list of groups and the hosts in that group - self.cache = dict() # Details about hosts in the inventory - self.params = dict() # Params of each host - self.facts = dict() # Facts of each host - self.hostgroups = dict() # host groups - self.hostcollections = dict() # host collections - self.session = None # Requests session - self.config_paths = [ - "/etc/ansible/foreman.ini", - os.path.dirname(os.path.realpath(__file__)) + '/foreman.ini', - ] - env_value = os.environ.get('FOREMAN_INI_PATH') - if env_value is not None: - self.config_paths.append(os.path.expanduser(os.path.expandvars(env_value))) - - def read_settings(self): - """Reads the settings from the foreman.ini file""" - - config = ConfigParser.SafeConfigParser() - config.read(self.config_paths) - - # Foreman API related - try: - self.foreman_url = config.get('foreman', 'url') - self.foreman_user = config.get('foreman', 'user') - self.foreman_pw = config.get('foreman', 'password', raw=True) - self.foreman_ssl_verify = config.getboolean('foreman', 'ssl_verify') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError) as e: - print("Error parsing configuration: %s" % e, file=sys.stderr) - return False - - # Inventory Report Related - try: - self.foreman_use_reports_api = config.getboolean('foreman', 'use_reports_api') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.foreman_use_reports_api = True - - try: - self.want_organization = config.getboolean('report', 'want_organization') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.want_organization = True - - try: - self.want_location = config.getboolean('report', 'want_location') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.want_location = True - - try: - self.want_IPv4 = config.getboolean('report', 'want_ipv4') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.want_IPv4 = True - - try: - self.want_IPv6 = config.getboolean('report', 'want_ipv6') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.want_IPv6 = False - - try: - self.want_host_group = config.getboolean('report', 'want_host_group') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.want_host_group = True - - try: - self.want_host_params = config.getboolean('report', 'want_host_params') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.want_host_params = False - - try: - self.want_subnet = config.getboolean('report', 'want_subnet') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.want_subnet = True - - try: - self.want_subnet_v6 = config.getboolean('report', 'want_subnet_v6') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.want_subnet_v6 = False - - try: - self.want_smart_proxies = config.getboolean('report', 'want_smart_proxies') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.want_smart_proxies = True - - try: - self.want_content_facet_attributes = config.getboolean('report', 'want_content_facet_attributes') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.want_content_facet_attributes = False - - try: - self.report_want_facts = config.getboolean('report', 'want_facts') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.report_want_facts = True - - try: - self.poll_interval = config.getint('report', 'poll_interval') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.poll_interval = 10 - - # Ansible related - try: - group_patterns = config.get('ansible', 'group_patterns') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - group_patterns = "[]" - - self.group_patterns = json.loads(group_patterns) - - try: - self.group_prefix = config.get('ansible', 'group_prefix') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.group_prefix = "foreman_" - - try: - self.want_facts = config.getboolean('ansible', 'want_facts') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.want_facts = True - - self.want_facts = self.want_facts and self.report_want_facts - - try: - self.want_hostcollections = config.getboolean('ansible', 'want_hostcollections') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.want_hostcollections = False - - try: - self.want_ansible_ssh_host = config.getboolean('ansible', 'want_ansible_ssh_host') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.want_ansible_ssh_host = False - - # Do we want parameters to be interpreted if possible as JSON? (no by default) - try: - self.rich_params = config.getboolean('ansible', 'rich_params') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.rich_params = False - - try: - self.host_filters = config.get('foreman', 'host_filters') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.host_filters = None - - # Cache related - try: - cache_path = os.path.expanduser(config.get('cache', 'path')) - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - cache_path = '.' - (script, ext) = os.path.splitext(os.path.basename(__file__)) - self.cache_path_cache = cache_path + "/%s.cache" % script - self.cache_path_inventory = cache_path + "/%s.index" % script - self.cache_path_params = cache_path + "/%s.params" % script - self.cache_path_facts = cache_path + "/%s.facts" % script - self.cache_path_hostcollections = cache_path + "/%s.hostcollections" % script - try: - self.cache_max_age = config.getint('cache', 'max_age') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.cache_max_age = 60 - try: - self.scan_new_hosts = config.getboolean('cache', 'scan_new_hosts') - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - self.scan_new_hosts = False - - return True - - def parse_cli_args(self): - """Command line argument processing""" - - parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on foreman') - parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') - parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') - parser.add_argument('--refresh-cache', action='store_true', default=False, - help='Force refresh of cache by making API requests to foreman (default: False - use cache files)') - self.args = parser.parse_args() - - def _get_session(self): - if not self.session: - self.session = requests.session() - self.session.auth = HTTPBasicAuth(self.foreman_user, self.foreman_pw) - self.session.verify = self.foreman_ssl_verify - return self.session - - def _get_json(self, url, ignore_errors=None, params=None): - if params is None: - params = {} - params['per_page'] = 250 - - page = 1 - results = [] - s = self._get_session() - while True: - params['page'] = page - ret = s.get(url, params=params) - if ignore_errors and ret.status_code in ignore_errors: - break - ret.raise_for_status() - json = ret.json() - # /hosts/:id has not results key - if 'results' not in json: - return json - # Facts are returned as dict in results not list - if isinstance(json['results'], dict): - return json['results'] - # List of all hosts is returned paginaged - results = results + json['results'] - if len(results) >= json['subtotal']: - break - page += 1 - if len(json['results']) == 0: - print("Did not make any progress during loop. " - "expected %d got %d" % (json['total'], len(results)), - file=sys.stderr) - break - return results - - def _use_inventory_report(self): - if not self.foreman_use_reports_api: - return False - status_url = "%s/api/v2/status" % self.foreman_url - result = self._get_json(status_url) - foreman_version = (LooseVersion(result.get('version')) >= LooseVersion('1.24.0')) - return foreman_version - - def _fetch_params(self): - options, params = ("no", "yes"), dict() - params["Organization"] = options[self.want_organization] - params["Location"] = options[self.want_location] - params["IPv4"] = options[self.want_IPv4] - params["IPv6"] = options[self.want_IPv6] - params["Facts"] = options[self.want_facts] - params["Host Group"] = options[self.want_host_group] - params["Host Collections"] = options[self.want_hostcollections] - params["Subnet"] = options[self.want_subnet] - params["Subnet v6"] = options[self.want_subnet_v6] - params["Smart Proxies"] = options[self.want_smart_proxies] - params["Content Attributes"] = options[self.want_content_facet_attributes] - params["Host Parameters"] = options[self.want_host_params] - if self.host_filters: - params["Hosts"] = self.host_filters - return params - - def _post_request(self): - url = "%s/ansible/api/v2/ansible_inventories/schedule" % self.foreman_url - session = self._get_session() - params = {'input_values': self._fetch_params()} - ret = session.post(url, json=params) - if not ret: - raise Exception("Error scheduling inventory report on foreman. Please check foreman logs!") - url = "{0}/{1}".format(self.foreman_url, ret.json().get('data_url')) - response = session.get(url) - while response: - if response.status_code != 204: - break - else: - sleep(self.poll_interval) - response = session.get(url) - if not response: - raise Exception("Error receiving inventory report from foreman. Please check foreman logs!") - else: - return response.json() - - def _get_hosts(self): - url = "%s/api/v2/hosts" % self.foreman_url - - params = {} - if self.host_filters: - params['search'] = self.host_filters - - return self._get_json(url, params=params) - - def _get_host_data_by_id(self, hid): - url = "%s/api/v2/hosts/%s" % (self.foreman_url, hid) - return self._get_json(url) - - def _get_facts_by_id(self, hid): - url = "%s/api/v2/hosts/%s/facts" % (self.foreman_url, hid) - return self._get_json(url) - - def _resolve_params(self, host_params): - """Convert host params to dict""" - params = {} - - for param in host_params: - name = param['name'] - if self.rich_params: - try: - params[name] = json.loads(param['value']) - except ValueError: - params[name] = param['value'] - else: - params[name] = param['value'] - - return params - - def _get_facts(self, host): - """Fetch all host facts of the host""" - if not self.want_facts: - return {} - - ret = self._get_facts_by_id(host['id']) - if len(ret.values()) == 0: - facts = {} - elif len(ret.values()) == 1: - facts = list(ret.values())[0] - else: - raise ValueError("More than one set of facts returned for '%s'" % host) - return facts - - def write_to_cache(self, data, filename): - """Write data in JSON format to a file""" - json_data = json_format_dict(data, True) - cache = open(filename, 'w') - cache.write(json_data) - cache.close() - - def _write_cache(self): - self.write_to_cache(self.cache, self.cache_path_cache) - self.write_to_cache(self.inventory, self.cache_path_inventory) - self.write_to_cache(self.params, self.cache_path_params) - self.write_to_cache(self.facts, self.cache_path_facts) - self.write_to_cache(self.hostcollections, self.cache_path_hostcollections) - - def to_safe(self, word): - '''Converts 'bad' characters in a string to underscores - so they can be used as Ansible groups - - >>> ForemanInventory.to_safe("foo-bar baz") - 'foo_barbaz' - ''' - regex = r"[^A-Za-z0-9\_]" - return re.sub(regex, "_", word.replace(" ", "")) - - def update_cache(self, scan_only_new_hosts=False): - """Make calls to foreman and save the output in a cache""" - use_inventory_report = self._use_inventory_report() - if use_inventory_report: - self._update_cache_inventory(scan_only_new_hosts) - else: - self._update_cache_host_api(scan_only_new_hosts) - - def _update_cache_inventory(self, scan_only_new_hosts): - self.groups = dict() - self.hosts = dict() - try: - inventory_report_response = self._post_request() - except Exception: - self._update_cache_host_api(scan_only_new_hosts) - return - host_data = json.loads(inventory_report_response) - for host in host_data: - if not(host) or (host["name"] in self.cache.keys() and scan_only_new_hosts): - continue - dns_name = host['name'] - - host_params = host.pop('host_parameters', {}) - fact_list = host.pop('facts', {}) - content_facet_attributes = host.get('content_attributes', {}) or {} - - # Create ansible groups for hostgroup - group = 'host_group' - val = host.get(group) - if val: - safe_key = self.to_safe('%s%s_%s' % ( - to_text(self.group_prefix), - group, - to_text(val).lower() - )) - self.inventory[safe_key].append(dns_name) - - # Create ansible groups for environment, location and organization - for group in ['environment', 'location', 'organization']: - val = host.get('%s' % group) - if val: - safe_key = self.to_safe('%s%s_%s' % ( - to_text(self.group_prefix), - group, - to_text(val).lower() - )) - self.inventory[safe_key].append(dns_name) - - for group in ['lifecycle_environment', 'content_view']: - val = content_facet_attributes.get('%s_name' % group) - if val: - safe_key = self.to_safe('%s%s_%s' % ( - to_text(self.group_prefix), - group, - to_text(val).lower() - )) - self.inventory[safe_key].append(dns_name) - - params = host_params - - # Ansible groups by parameters in host groups and Foreman host - # attributes. - groupby = dict() - for k, v in params.items(): - groupby[k] = self.to_safe(to_text(v)) - - # The name of the ansible groups is given by group_patterns: - for pattern in self.group_patterns: - try: - key = pattern.format(**groupby) - self.inventory[key].append(dns_name) - except KeyError: - pass # Host not part of this group - - if self.want_hostcollections: - hostcollections = host.get('host_collections') - - if hostcollections: - # Create Ansible groups for host collections - for hostcollection in hostcollections: - safe_key = self.to_safe('%shostcollection_%s' % (self.group_prefix, hostcollection.lower())) - self.inventory[safe_key].append(dns_name) - - self.hostcollections[dns_name] = hostcollections - - self.cache[dns_name] = host - self.params[dns_name] = params - self.facts[dns_name] = fact_list - self.inventory['all'].append(dns_name) - self._write_cache() - - def _update_cache_host_api(self, scan_only_new_hosts): - """Make calls to foreman and save the output in a cache""" - - self.groups = dict() - self.hosts = dict() - - for host in self._get_hosts(): - if host['name'] in self.cache.keys() and scan_only_new_hosts: - continue - dns_name = host['name'] - - host_data = self._get_host_data_by_id(host['id']) - host_params = host_data.get('all_parameters', {}) - - # Create ansible groups for hostgroup - group = 'hostgroup' - val = host.get('%s_title' % group) or host.get('%s_name' % group) - if val: - safe_key = self.to_safe('%s%s_%s' % ( - to_text(self.group_prefix), - group, - to_text(val).lower() - )) - self.inventory[safe_key].append(dns_name) - - # Create ansible groups for environment, location and organization - for group in ['environment', 'location', 'organization']: - val = host.get('%s_name' % group) - if val: - safe_key = self.to_safe('%s%s_%s' % ( - to_text(self.group_prefix), - group, - to_text(val).lower() - )) - self.inventory[safe_key].append(dns_name) - - for group in ['lifecycle_environment', 'content_view']: - val = host.get('content_facet_attributes', {}).get('%s_name' % group) - if val: - safe_key = self.to_safe('%s%s_%s' % ( - to_text(self.group_prefix), - group, - to_text(val).lower() - )) - self.inventory[safe_key].append(dns_name) - - params = self._resolve_params(host_params) - - # Ansible groups by parameters in host groups and Foreman host - # attributes. - groupby = dict() - for k, v in params.items(): - groupby[k] = self.to_safe(to_text(v)) - - # The name of the ansible groups is given by group_patterns: - for pattern in self.group_patterns: - try: - key = pattern.format(**groupby) - self.inventory[key].append(dns_name) - except KeyError: - pass # Host not part of this group - - if self.want_hostcollections: - hostcollections = host_data.get('host_collections') - - if hostcollections: - # Create Ansible groups for host collections - for hostcollection in hostcollections: - safe_key = self.to_safe('%shostcollection_%s' % (self.group_prefix, hostcollection['name'].lower())) - self.inventory[safe_key].append(dns_name) - - self.hostcollections[dns_name] = hostcollections - - self.cache[dns_name] = host - self.params[dns_name] = params - self.facts[dns_name] = self._get_facts(host) - self.inventory['all'].append(dns_name) - self._write_cache() - - def is_cache_valid(self): - """Determines if the cache is still valid""" - if os.path.isfile(self.cache_path_cache): - mod_time = os.path.getmtime(self.cache_path_cache) - current_time = time() - if (mod_time + self.cache_max_age) > current_time: - if (os.path.isfile(self.cache_path_inventory) and - os.path.isfile(self.cache_path_params) and - os.path.isfile(self.cache_path_facts)): - return True - return False - - def load_inventory_from_cache(self): - """Read the index from the cache file sets self.index""" - - with open(self.cache_path_inventory, 'r') as fp: - self.inventory = json.load(fp) - - def load_params_from_cache(self): - """Read the index from the cache file sets self.index""" - - with open(self.cache_path_params, 'r') as fp: - self.params = json.load(fp) - - def load_facts_from_cache(self): - """Read the index from the cache file sets self.facts""" - - if not self.want_facts: - return - with open(self.cache_path_facts, 'r') as fp: - self.facts = json.load(fp) - - def load_hostcollections_from_cache(self): - """Read the index from the cache file sets self.hostcollections""" - - if not self.want_hostcollections: - return - with open(self.cache_path_hostcollections, 'r') as fp: - self.hostcollections = json.load(fp) - - def load_cache_from_cache(self): - """Read the cache from the cache file sets self.cache""" - - with open(self.cache_path_cache, 'r') as fp: - self.cache = json.load(fp) - - def get_inventory(self): - if self.args.refresh_cache or not self.is_cache_valid(): - self.update_cache() - else: - self.load_inventory_from_cache() - self.load_params_from_cache() - self.load_facts_from_cache() - self.load_hostcollections_from_cache() - self.load_cache_from_cache() - if self.scan_new_hosts: - self.update_cache(True) - - def get_host_info(self): - """Get variables about a specific host""" - - if not self.cache or len(self.cache) == 0: - # Need to load index from cache - self.load_cache_from_cache() - - if self.args.host not in self.cache: - # try updating the cache - self.update_cache() - - if self.args.host not in self.cache: - # host might not exist anymore - return json_format_dict({}, True) - - return json_format_dict(self.cache[self.args.host], True) - - def _print_data(self): - data_to_print = "" - if self.args.host: - data_to_print += self.get_host_info() - else: - self.inventory['_meta'] = {'hostvars': {}} - for hostname in self.cache: - self.inventory['_meta']['hostvars'][hostname] = { - 'foreman': self.cache[hostname], - 'foreman_params': self.params[hostname], - } - if self.want_ansible_ssh_host and 'ip' in self.cache[hostname]: - self.inventory['_meta']['hostvars'][hostname]['ansible_ssh_host'] = self.cache[hostname]['ip'] - if self.want_facts: - self.inventory['_meta']['hostvars'][hostname]['foreman_facts'] = self.facts[hostname] - - data_to_print += json_format_dict(self.inventory, True) - - print(data_to_print) - - def run(self): - # Read settings and parse CLI arguments - if not self.read_settings(): - return False - self.parse_cli_args() - self.get_inventory() - self._print_data() - return True - - -if __name__ == '__main__': - sys.exit(not ForemanInventory().run()) diff --git a/scripts/inventory/freeipa.py b/scripts/inventory/freeipa.py deleted file mode 100755 index f7ffe1d223..0000000000 --- a/scripts/inventory/freeipa.py +++ /dev/null @@ -1,126 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import argparse -from distutils.version import LooseVersion -import json -import os -import sys -from ipalib import api, errors, __version__ as IPA_VERSION -from ansible.module_utils.six import u - - -def initialize(): - ''' - This function initializes the FreeIPA/IPA API. This function requires - no arguments. A kerberos key must be present in the users keyring in - order for this to work. IPA default configuration directory is /etc/ipa, - this path could be overridden with IPA_CONFDIR environment variable. - ''' - - api.bootstrap(context='cli') - - if not os.path.isdir(api.env.confdir): - print("WARNING: IPA configuration directory (%s) is missing. " - "Environment variable IPA_CONFDIR could be used to override " - "default path." % api.env.confdir) - - if LooseVersion(IPA_VERSION) >= LooseVersion('4.6.2'): - # With ipalib < 4.6.0 'server' and 'domain' have default values - # ('localhost:8888', 'example.com'), newer versions don't and - # DNS autodiscovery is broken, then one of jsonrpc_uri / xmlrpc_uri is - # required. - # ipalib 4.6.0 is unusable (https://pagure.io/freeipa/issue/7132) - # that's why 4.6.2 is explicitely tested. - if 'server' not in api.env or 'domain' not in api.env: - sys.exit("ERROR: ('jsonrpc_uri' or 'xmlrpc_uri') or 'domain' are not " - "defined in '[global]' section of '%s' nor in '%s'." % - (api.env.conf, api.env.conf_default)) - - api.finalize() - try: - api.Backend.rpcclient.connect() - except AttributeError: - # FreeIPA < 4.0 compatibility - api.Backend.xmlclient.connect() - - return api - - -def list_groups(api): - ''' - This function prints a list of all host groups. This function requires - one argument, the FreeIPA/IPA API object. - ''' - - inventory = {} - hostvars = {} - - result = api.Command.hostgroup_find(all=True)['result'] - - for hostgroup in result: - # Get direct and indirect members (nested hostgroups) of hostgroup - members = [] - - if 'member_host' in hostgroup: - members = list(hostgroup['member_host']) - if 'memberindirect_host' in hostgroup: - members += (host for host in hostgroup['memberindirect_host']) - inventory[hostgroup['cn'][0]] = {'hosts': list(members)} - - for member in members: - hostvars[member] = {} - - inventory['_meta'] = {'hostvars': hostvars} - inv_string = json.dumps(inventory, indent=1, sort_keys=True) - print(inv_string) - - return None - - -def parse_args(): - ''' - This function parses the arguments that were passed in via the command line. - This function expects no arguments. - ''' - - parser = argparse.ArgumentParser(description='Ansible FreeIPA/IPA ' - 'inventory module') - group = parser.add_mutually_exclusive_group(required=True) - group.add_argument('--list', action='store_true', - help='List active servers') - group.add_argument('--host', help='List details about the specified host') - - return parser.parse_args() - - -def get_host_attributes(api, host): - """ - This function expects one string, this hostname to lookup variables for. - Args: - api: FreeIPA API Object - host: Name of Hostname - - Returns: Dict of Host vars if found else None - """ - try: - result = api.Command.host_show(u(host))['result'] - if 'usercertificate' in result: - del result['usercertificate'] - return json.dumps(result, indent=1) - except errors.NotFound as e: - return {} - - -if __name__ == '__main__': - args = parse_args() - api = initialize() - - if args.host: - print(get_host_attributes(api, args.host)) - elif args.list: - list_groups(api) diff --git a/scripts/inventory/infoblox.py b/scripts/inventory/infoblox.py deleted file mode 100755 index 209509025e..0000000000 --- a/scripts/inventory/infoblox.py +++ /dev/null @@ -1,129 +0,0 @@ -#!/usr/bin/env python -# -# (c) 2018, Red Hat, Inc. -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import os -import sys -import json -import argparse - -from ansible.parsing.dataloader import DataLoader -from ansible.module_utils.six import iteritems, raise_from -from ansible.module_utils._text import to_text -try: - from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiInventory - from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import normalize_extattrs, flatten_extattrs -except ImportError as exc: - try: - # Fallback for Ansible 2.9 - from ansible.module_utils.net_tools.nios.api import WapiInventory - from ansible.module_utils.net_tools.nios.api import normalize_extattrs, flatten_extattrs - except ImportError: - raise_from( - Exception( - 'This inventory plugin only works with Ansible 2.9, 2.10, or 3, or when community.general is installed correctly in PYTHONPATH.' - ' Try using the inventory plugin from infoblox.nios_modules instead.'), - exc) - - -CONFIG_FILES = [ - os.environ.get('INFOBLOX_CONFIG_FILE', ''), - '/etc/ansible/infoblox.yaml', - '/etc/ansible/infoblox.yml' -] - - -def parse_args(): - parser = argparse.ArgumentParser() - - parser.add_argument('--list', action='store_true', - help='List host records from NIOS for use in Ansible') - - parser.add_argument('--host', - help='List meta data about single host (not used)') - - return parser.parse_args() - - -def main(): - args = parse_args() - - for config_file in CONFIG_FILES: - if os.path.exists(config_file): - break - else: - sys.stderr.write('unable to locate config file at /etc/ansible/infoblox.yaml\n') - sys.exit(-1) - - try: - loader = DataLoader() - config = loader.load_from_file(config_file) - provider = config.get('provider') or {} - wapi = WapiInventory(provider) - except Exception as exc: - sys.stderr.write(to_text(exc)) - sys.exit(-1) - - if args.host: - host_filter = {'name': args.host} - else: - host_filter = {} - - config_filters = config.get('filters') - - if config_filters.get('view') is not None: - host_filter['view'] = config_filters['view'] - - if config_filters.get('extattrs'): - extattrs = normalize_extattrs(config_filters['extattrs']) - else: - extattrs = {} - - hostvars = {} - inventory = { - '_meta': { - 'hostvars': hostvars - } - } - - return_fields = ['name', 'view', 'extattrs', 'ipv4addrs'] - - hosts = wapi.get_object('record:host', - host_filter, - extattrs=extattrs, - return_fields=return_fields) - - if hosts: - for item in hosts: - view = item['view'] - name = item['name'] - - if view not in inventory: - inventory[view] = {'hosts': []} - - inventory[view]['hosts'].append(name) - - hostvars[name] = { - 'view': view - } - - if item.get('extattrs'): - for key, value in iteritems(flatten_extattrs(item['extattrs'])): - if key.startswith('ansible_'): - hostvars[name][key] = value - else: - if 'extattrs' not in hostvars[name]: - hostvars[name]['extattrs'] = {} - hostvars[name]['extattrs'][key] = value - - sys.stdout.write(json.dumps(inventory, indent=4)) - sys.exit(0) - - -if __name__ == '__main__': - main() diff --git a/scripts/inventory/infoblox.yaml b/scripts/inventory/infoblox.yaml deleted file mode 100644 index c1be5324ac..0000000000 --- a/scripts/inventory/infoblox.yaml +++ /dev/null @@ -1,24 +0,0 @@ ---- -# This file provides the configuration information for the Infoblox dynamic -# inventory script that is used to dynamically pull host information from NIOS. -# This file should be copied to /etc/ansible/infoblox.yaml in order for the -# dynamic script to find it. - -# Sets the provider arguments for authenticating to the Infoblox server to -# retrieve inventory hosts. Provider arguments can also be set using -# environment variables. Supported environment variables all start with -# INFOBLOX_{{ name }}. For instance, to set the host provider value, the -# environment variable would be INFOBLOX_HOST. -provider: - host: - username: - password: - -# Filters allow the dynamic inventory script to restrict the set of hosts that -# are returned from the Infoblox server. -filters: - # restrict returned hosts by extensible attributes - extattrs: {} - - # restrict returned hosts to a specified DNS view - view: null diff --git a/scripts/inventory/jail.py b/scripts/inventory/jail.py deleted file mode 100755 index a28b923b10..0000000000 --- a/scripts/inventory/jail.py +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env python - -# (c) 2013, Michael Scherer -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from subprocess import Popen, PIPE -import sys -import json - -result = {} -result['all'] = {} - -pipe = Popen(['jls', '-q', 'name'], stdout=PIPE, universal_newlines=True) -result['all']['hosts'] = [x[:-1] for x in pipe.stdout.readlines()] -result['all']['vars'] = {} -result['all']['vars']['ansible_connection'] = 'jail' - -if len(sys.argv) == 2 and sys.argv[1] == '--list': - print(json.dumps(result)) -elif len(sys.argv) == 3 and sys.argv[1] == '--host': - print(json.dumps({'ansible_connection': 'jail'})) -else: - sys.stderr.write("Need an argument, either --list or --host \n") diff --git a/scripts/inventory/landscape.py b/scripts/inventory/landscape.py deleted file mode 100755 index 8301e00b71..0000000000 --- a/scripts/inventory/landscape.py +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/env python - -# (c) 2015, Marc Abramowitz -# -# This file is part of Ansible. -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -# Dynamic inventory script which lets you use nodes discovered by Canonical's -# Landscape (http://www.ubuntu.com/management/landscape-features). -# -# Requires the `landscape_api` Python module -# See: -# - https://landscape.canonical.com/static/doc/api/api-client-package.html -# - https://landscape.canonical.com/static/doc/api/python-api.html -# -# Environment variables -# --------------------- -# - `LANDSCAPE_API_URI` -# - `LANDSCAPE_API_KEY` -# - `LANDSCAPE_API_SECRET` -# - `LANDSCAPE_API_SSL_CA_FILE` (optional) - - -import argparse -import collections -import os -import sys - -from landscape_api.base import API, HTTPError - -import json - -_key = 'landscape' - - -class EnvironmentConfig(object): - uri = os.getenv('LANDSCAPE_API_URI') - access_key = os.getenv('LANDSCAPE_API_KEY') - secret_key = os.getenv('LANDSCAPE_API_SECRET') - ssl_ca_file = os.getenv('LANDSCAPE_API_SSL_CA_FILE') - - -def _landscape_client(): - env = EnvironmentConfig() - return API( - uri=env.uri, - access_key=env.access_key, - secret_key=env.secret_key, - ssl_ca_file=env.ssl_ca_file) - - -def get_landscape_members_data(): - return _landscape_client().get_computers() - - -def get_nodes(data): - return [node['hostname'] for node in data] - - -def get_groups(data): - groups = collections.defaultdict(list) - - for node in data: - for value in node['tags']: - groups[value].append(node['hostname']) - - return groups - - -def get_meta(data): - meta = {'hostvars': {}} - for node in data: - meta['hostvars'][node['hostname']] = {'tags': node['tags']} - return meta - - -def print_list(): - data = get_landscape_members_data() - nodes = get_nodes(data) - groups = get_groups(data) - meta = get_meta(data) - inventory_data = {_key: nodes, '_meta': meta} - inventory_data.update(groups) - print(json.dumps(inventory_data)) - - -def print_host(host): - data = get_landscape_members_data() - meta = get_meta(data) - print(json.dumps(meta['hostvars'][host])) - - -def get_args(args_list): - parser = argparse.ArgumentParser( - description='ansible inventory script reading from landscape cluster') - mutex_group = parser.add_mutually_exclusive_group(required=True) - help_list = 'list all hosts from landscape cluster' - mutex_group.add_argument('--list', action='store_true', help=help_list) - help_host = 'display variables for a host' - mutex_group.add_argument('--host', help=help_host) - return parser.parse_args(args_list) - - -def main(args_list): - args = get_args(args_list) - if args.list: - print_list() - if args.host: - print_host(args.host) - - -if __name__ == '__main__': - main(sys.argv[1:]) diff --git a/scripts/inventory/libcloud.ini b/scripts/inventory/libcloud.ini deleted file mode 100644 index 7592c41cd0..0000000000 --- a/scripts/inventory/libcloud.ini +++ /dev/null @@ -1,15 +0,0 @@ -# Ansible Apache Libcloud Generic inventory script - -[driver] -provider = CLOUDSTACK -host = -path = -secure = True -verify_ssl_cert = True - -key = -secret = - -[cache] -cache_path=/path/to/your/cache -cache_max_age=60 diff --git a/scripts/inventory/linode.ini b/scripts/inventory/linode.ini deleted file mode 100644 index c925d970e9..0000000000 --- a/scripts/inventory/linode.ini +++ /dev/null @@ -1,18 +0,0 @@ -# Ansible Linode external inventory script settings -# - -[linode] - -# API calls to Linode are slow. For this reason, we cache the results of an API -# call. Set this to the path you want cache files to be written to. Two files -# will be written to this directory: -# - ansible-Linode.cache -# - ansible-Linode.index -cache_path = /tmp - -# The number of seconds a cache file is considered valid. After this many -# seconds, a new API call will be made, and the cache file will be updated. -cache_max_age = 300 - -# If set to true use the hosts public ip in the dictionary instead of the label -use_public_ip = false \ No newline at end of file diff --git a/scripts/inventory/linode.py b/scripts/inventory/linode.py deleted file mode 100755 index 2972725d88..0000000000 --- a/scripts/inventory/linode.py +++ /dev/null @@ -1,338 +0,0 @@ -#!/usr/bin/env python - -''' -Linode external inventory script -================================= - -Generates inventory that Ansible can understand by making API request to -Linode using the Chube library. - -NOTE: This script assumes Ansible is being executed where Chube is already -installed and has a valid config at ~/.chube. If not, run: - - pip install chube - echo -e "---\napi_key: " > ~/.chube - -For more details, see: https://github.com/exosite/chube - -NOTE: By default, this script also assumes that the Linodes in your account all have -labels that correspond to hostnames that are in your resolver search path. -Your resolver search path resides in /etc/hosts. -Optionally, if you would like to use the hosts public IP instead of it's label use -the following setting in linode.ini: - - use_public_ip = true - -When run against a specific host, this script returns the following variables: - - - api_id - - datacenter_id - - datacenter_city (lowercase city name of data center, e.g. 'tokyo') - - label - - display_group - - create_dt - - total_hd - - total_xfer - - total_ram - - status - - public_ip (The first public IP found) - - private_ip (The first private IP found, or empty string if none) - - alert_cpu_enabled - - alert_cpu_threshold - - alert_diskio_enabled - - alert_diskio_threshold - - alert_bwin_enabled - - alert_bwin_threshold - - alert_bwout_enabled - - alert_bwout_threshold - - alert_bwquota_enabled - - alert_bwquota_threshold - - backup_weekly_daily - - backup_window - - watchdog - -Peter Sankauskas did most of the legwork here with his linode plugin; I -just adapted that for Linode. -''' - -# (c) 2013, Dan Slimmon -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -###################################################################### - -# Standard imports -import os -import re -import sys -import argparse -from time import time - -import json - -try: - from chube import load_chube_config - from chube import api as chube_api - from chube.datacenter import Datacenter - from chube.linode_obj import Linode -except Exception: - try: - # remove local paths and other stuff that may - # cause an import conflict, as chube is sensitive - # to name collisions on importing - old_path = sys.path - sys.path = [d for d in sys.path if d not in ('', os.getcwd(), os.path.dirname(os.path.realpath(__file__)))] - - from chube import load_chube_config - from chube import api as chube_api - from chube.datacenter import Datacenter - from chube.linode_obj import Linode - - sys.path = old_path - except Exception as e: - raise Exception("could not import chube") - -load_chube_config() - -# Imports for ansible -from ansible.module_utils.six.moves import configparser as ConfigParser - - -class LinodeInventory(object): - def _empty_inventory(self): - return {"_meta": {"hostvars": {}}} - - def __init__(self): - """Main execution path.""" - # Inventory grouped by display group - self.inventory = self._empty_inventory() - # Index of label to Linode ID - self.index = {} - # Local cache of Datacenter objects populated by populate_datacenter_cache() - self._datacenter_cache = None - - # Read settings and parse CLI arguments - self.read_settings() - self.parse_cli_args() - - # Cache - if self.args.refresh_cache: - self.do_api_calls_update_cache() - elif not self.is_cache_valid(): - self.do_api_calls_update_cache() - - # Data to print - if self.args.host: - data_to_print = self.get_host_info() - elif self.args.list: - # Display list of nodes for inventory - if len(self.inventory) == 1: - data_to_print = self.get_inventory_from_cache() - else: - data_to_print = self.json_format_dict(self.inventory, True) - - print(data_to_print) - - def is_cache_valid(self): - """Determines if the cache file has expired, or if it is still valid.""" - if os.path.isfile(self.cache_path_cache): - mod_time = os.path.getmtime(self.cache_path_cache) - current_time = time() - if (mod_time + self.cache_max_age) > current_time: - if os.path.isfile(self.cache_path_index): - return True - return False - - def read_settings(self): - """Reads the settings from the .ini file.""" - config = ConfigParser.SafeConfigParser() - config.read(os.path.dirname(os.path.realpath(__file__)) + '/linode.ini') - - # Cache related - cache_path = config.get('linode', 'cache_path') - self.cache_path_cache = cache_path + "/ansible-linode.cache" - self.cache_path_index = cache_path + "/ansible-linode.index" - self.cache_max_age = config.getint('linode', 'cache_max_age') - self.use_public_ip = config.getboolean('linode', 'use_public_ip') - - def parse_cli_args(self): - """Command line argument processing""" - parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Linode') - parser.add_argument('--list', action='store_true', default=True, - help='List nodes (default: True)') - parser.add_argument('--host', action='store', - help='Get all the variables about a specific node') - parser.add_argument('--refresh-cache', action='store_true', default=False, - help='Force refresh of cache by making API requests to Linode (default: False - use cache files)') - self.args = parser.parse_args() - - def do_api_calls_update_cache(self): - """Do API calls, and save data in cache files.""" - self.get_nodes() - self.write_to_cache(self.inventory, self.cache_path_cache) - self.write_to_cache(self.index, self.cache_path_index) - - def get_nodes(self): - """Makes an Linode API call to get the list of nodes.""" - try: - for node in Linode.search(status=Linode.STATUS_RUNNING): - self.add_node(node) - except chube_api.linode_api.ApiError as e: - sys.exit("Looks like Linode's API is down:\n %s" % e) - - def get_node(self, linode_id): - """Gets details about a specific node.""" - try: - return Linode.find(api_id=linode_id) - except chube_api.linode_api.ApiError as e: - sys.exit("Looks like Linode's API is down:\n%s" % e) - - def populate_datacenter_cache(self): - """Creates self._datacenter_cache, containing all Datacenters indexed by ID.""" - self._datacenter_cache = {} - dcs = Datacenter.search() - for dc in dcs: - self._datacenter_cache[dc.api_id] = dc - - def get_datacenter_city(self, node): - """Returns a the lowercase city name of the node's data center.""" - if self._datacenter_cache is None: - self.populate_datacenter_cache() - location = self._datacenter_cache[node.datacenter_id].location - location = location.lower() - location = location.split(",")[0] - return location - - def add_node(self, node): - """Adds an node to the inventory and index.""" - if self.use_public_ip: - dest = self.get_node_public_ip(node) - else: - dest = node.label - - # Add to index - self.index[dest] = node.api_id - - # Inventory: Group by node ID (always a group of 1) - self.inventory[node.api_id] = [dest] - - # Inventory: Group by datacenter city - self.push(self.inventory, self.get_datacenter_city(node), dest) - - # Inventory: Group by display group - self.push(self.inventory, node.display_group, dest) - - # Inventory: Add a "linode" global tag group - self.push(self.inventory, "linode", dest) - - # Add host info to hostvars - self.inventory["_meta"]["hostvars"][dest] = self._get_host_info(node) - - def get_node_public_ip(self, node): - """Returns a the public IP address of the node""" - return [addr.address for addr in node.ipaddresses if addr.is_public][0] - - def get_host_info(self): - """Get variables about a specific host.""" - - if len(self.index) == 0: - # Need to load index from cache - self.load_index_from_cache() - - if self.args.host not in self.index: - # try updating the cache - self.do_api_calls_update_cache() - if self.args.host not in self.index: - # host might not exist anymore - return self.json_format_dict({}, True) - - node_id = self.index[self.args.host] - node = self.get_node(node_id) - - return self.json_format_dict(self._get_host_info(node), True) - - def _get_host_info(self, node): - node_vars = {} - for direct_attr in [ - "api_id", - "datacenter_id", - "label", - "display_group", - "create_dt", - "total_hd", - "total_xfer", - "total_ram", - "status", - "alert_cpu_enabled", - "alert_cpu_threshold", - "alert_diskio_enabled", - "alert_diskio_threshold", - "alert_bwin_enabled", - "alert_bwin_threshold", - "alert_bwout_enabled", - "alert_bwout_threshold", - "alert_bwquota_enabled", - "alert_bwquota_threshold", - "backup_weekly_daily", - "backup_window", - "watchdog" - ]: - node_vars[direct_attr] = getattr(node, direct_attr) - - node_vars["datacenter_city"] = self.get_datacenter_city(node) - node_vars["public_ip"] = self.get_node_public_ip(node) - - # Set the SSH host information, so these inventory items can be used if - # their labels aren't FQDNs - node_vars['ansible_ssh_host'] = node_vars["public_ip"] - node_vars['ansible_host'] = node_vars["public_ip"] - - private_ips = [addr.address for addr in node.ipaddresses if not addr.is_public] - - if private_ips: - node_vars["private_ip"] = private_ips[0] - - return node_vars - - def push(self, my_dict, key, element): - """Pushed an element onto an array that may not have been defined in the dict.""" - if key in my_dict: - my_dict[key].append(element) - else: - my_dict[key] = [element] - - def get_inventory_from_cache(self): - """Reads the inventory from the cache file and returns it as a JSON object.""" - cache = open(self.cache_path_cache, 'r') - json_inventory = cache.read() - return json_inventory - - def load_index_from_cache(self): - """Reads the index from the cache file and sets self.index.""" - cache = open(self.cache_path_index, 'r') - json_index = cache.read() - self.index = json.loads(json_index) - - def write_to_cache(self, data, filename): - """Writes data in JSON format to a file.""" - json_data = self.json_format_dict(data, True) - cache = open(filename, 'w') - cache.write(json_data) - cache.close() - - def to_safe(self, word): - """Escapes any characters that would be invalid in an ansible group name.""" - return re.sub(r"[^A-Za-z0-9\-]", "_", word) - - def json_format_dict(self, data, pretty=False): - """Converts a dict to a JSON object and dumps it as a formatted string.""" - if pretty: - return json.dumps(data, sort_keys=True, indent=2) - else: - return json.dumps(data) - - -LinodeInventory() diff --git a/scripts/inventory/lxc_inventory.py b/scripts/inventory/lxc_inventory.py deleted file mode 100755 index 5a40b40837..0000000000 --- a/scripts/inventory/lxc_inventory.py +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/env python -# -# (c) 2015-16 Florian Haas, hastexo Professional Services GmbH -# -# Based in part on: -# libvirt_lxc.py, (c) 2013, Michael Scherer -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -""" -Ansible inventory script for LXC containers. Requires Python -bindings for LXC API. - -In LXC, containers can be grouped by setting the lxc.group option, -which may be found more than once in a container's -configuration. So, we enumerate all containers, fetch their list -of groups, and then build the dictionary in the way Ansible expects -it. -""" - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import sys -import lxc -import json - - -def build_dict(): - """Returns a dictionary keyed to the defined LXC groups. All - containers, including the ones not in any group, are included in the - "all" group.""" - # Enumerate all containers, and list the groups they are in. Also, - # implicitly add every container to the 'all' group. - containers = dict([(c, - ['all'] + - (lxc.Container(c).get_config_item('lxc.group') or [])) - for c in lxc.list_containers()]) - - # Extract the groups, flatten the list, and remove duplicates - groups = set(sum(containers.values(), [])) - - # Create a dictionary for each group (including the 'all' group - return dict([(g, {'hosts': [k for k, v in containers.items() if g in v], - 'vars': {'ansible_connection': 'lxc'}}) for g in groups]) - - -def main(argv): - """Returns a JSON dictionary as expected by Ansible""" - result = build_dict() - if len(argv) == 2 and argv[1] == '--list': - json.dump(result, sys.stdout) - elif len(argv) == 3 and argv[1] == '--host': - json.dump({'ansible_connection': 'lxc'}, sys.stdout) - else: - print("Need an argument, either --list or --host ", file=sys.stderr) - - -if __name__ == '__main__': - main(sys.argv) diff --git a/scripts/inventory/lxd.ini b/scripts/inventory/lxd.ini deleted file mode 100644 index 5398e7d021..0000000000 --- a/scripts/inventory/lxd.ini +++ /dev/null @@ -1,13 +0,0 @@ -# LXD external inventory script settings - -[lxd] - -# The default resource -#resource = local: - -# The group name to add the hosts to -#group = lxd - -# The connection type to return for these hosts - lxd hasn't been tested yet -#connection = lxd -connection = smart diff --git a/scripts/inventory/lxd.py b/scripts/inventory/lxd.py deleted file mode 100755 index 8e8794eb81..0000000000 --- a/scripts/inventory/lxd.py +++ /dev/null @@ -1,93 +0,0 @@ -#!/usr/bin/env python - -# (c) 2013, Michael Scherer -# (c) 2014, Hiroaki Nakamura -# (c) 2016, Andew Clarke -# -# This file is based on https://github.com/ansible/ansible/blob/devel/plugins/inventory/libvirt_lxc.py which is part of Ansible, -# and https://github.com/hnakamur/lxc-ansible-playbooks/blob/master/provisioning/inventory-lxc.py -# -# NOTE, this file has some obvious limitations, improvements welcome -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import os -from subprocess import Popen, PIPE -import distutils.spawn -import sys -import json - -from ansible.module_utils.six.moves import configparser - -# Set up defaults -resource = 'local:' -group = 'lxd' -connection = 'lxd' -hosts = {} -result = {} - -# Read the settings from the lxd.ini file -config = configparser.SafeConfigParser() -config.read(os.path.dirname(os.path.realpath(__file__)) + '/lxd.ini') -if config.has_option('lxd', 'resource'): - resource = config.get('lxd', 'resource') -if config.has_option('lxd', 'group'): - group = config.get('lxd', 'group') -if config.has_option('lxd', 'connection'): - connection = config.get('lxd', 'connection') - -# Ensure executable exists -if distutils.spawn.find_executable('lxc'): - - # Set up containers result and hosts array - result[group] = {} - result[group]['hosts'] = [] - - # Run the command and load json result - pipe = Popen(['lxc', 'list', resource, '--format', 'json'], stdout=PIPE, universal_newlines=True) - lxdjson = json.load(pipe.stdout) - - # Iterate the json lxd output - for item in lxdjson: - - # Check state and network - if 'state' in item and item['state'] is not None and 'network' in item['state']: - network = item['state']['network'] - - # Check for eth0 and addresses - if 'eth0' in network and 'addresses' in network['eth0']: - addresses = network['eth0']['addresses'] - - # Iterate addresses - for address in addresses: - - # Only return inet family addresses - if 'family' in address and address['family'] == 'inet': - if 'address' in address: - ip = address['address'] - name = item['name'] - - # Add the host to the results and the host array - result[group]['hosts'].append(name) - hosts[name] = ip - - # Set the other containers result values - result[group]['vars'] = {} - result[group]['vars']['ansible_connection'] = connection - -# Process arguments -if len(sys.argv) == 2 and sys.argv[1] == '--list': - print(json.dumps(result)) -elif len(sys.argv) == 3 and sys.argv[1] == '--host': - if sys.argv[2] == 'localhost': - print(json.dumps({'ansible_connection': 'local'})) - else: - if connection == 'lxd': - print(json.dumps({'ansible_connection': connection})) - else: - print(json.dumps({'ansible_connection': connection, 'ansible_host': hosts[sys.argv[2]]})) -else: - print("Need an argument, either --list or --host ") diff --git a/scripts/inventory/mdt.ini b/scripts/inventory/mdt.ini deleted file mode 100644 index c401c0ce17..0000000000 --- a/scripts/inventory/mdt.ini +++ /dev/null @@ -1,17 +0,0 @@ -[mdt] - -# Set the MDT server to connect to -server = localhost.example.com - -# Set the MDT Instance -instance = EXAMPLEINSTANCE - -# Set the MDT database -database = MDTDB - -# Configure login credentials -user = local.domain\admin -password = adminpassword - -[tower] -groupname = mdt diff --git a/scripts/inventory/mdt_dynamic_inventory.py b/scripts/inventory/mdt_dynamic_inventory.py deleted file mode 100755 index 6409e37fe1..0000000000 --- a/scripts/inventory/mdt_dynamic_inventory.py +++ /dev/null @@ -1,122 +0,0 @@ -#!/usr/bin/env python - -# (c) 2016, Julian Barnett -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -''' -MDT external inventory script -================================= -author: J Barnett 06/23/2016 01:15 -maintainer: J Barnett (github @jbarnett1981) -''' - -import argparse -import json -import pymssql -from ansible.module_utils.six.moves import configparser - - -class MDTInventory(object): - - def __init__(self): - ''' Main execution path ''' - self.conn = None - - # Initialize empty inventory - self.inventory = self._empty_inventory() - - # Read CLI arguments - self.read_settings() - self.parse_cli_args() - - # Get Hosts - if self.args.list: - self.get_hosts() - - # Get specific host vars - if self.args.host: - self.get_hosts(self.args.host) - - def _connect(self, query): - ''' - Connect to MDT and dump contents of dbo.ComputerIdentity database - ''' - if not self.conn: - self.conn = pymssql.connect(server=self.mdt_server + "\\" + self.mdt_instance, user=self.mdt_user, password=self.mdt_password, - database=self.mdt_database) - cursor = self.conn.cursor() - cursor.execute(query) - self.mdt_dump = cursor.fetchall() - self.conn.close() - - def get_hosts(self, hostname=False): - ''' - Gets host from MDT Database - ''' - if hostname: - query = ("SELECT t1.ID, t1.Description, t1.MacAddress, t2.Role " - "FROM ComputerIdentity as t1 join Settings_Roles as t2 on t1.ID = t2.ID where t1.Description = '%s'" % hostname) - else: - query = 'SELECT t1.ID, t1.Description, t1.MacAddress, t2.Role FROM ComputerIdentity as t1 join Settings_Roles as t2 on t1.ID = t2.ID' - self._connect(query) - - # Configure to group name configured in Ansible Tower for this inventory - groupname = self.mdt_groupname - - # Initialize empty host list - hostlist = [] - - # Parse through db dump and populate inventory - for hosts in self.mdt_dump: - self.inventory['_meta']['hostvars'][hosts[1]] = {'id': hosts[0], 'name': hosts[1], 'mac': hosts[2], 'role': hosts[3]} - hostlist.append(hosts[1]) - self.inventory[groupname] = hostlist - - # Print it all out - print(json.dumps(self.inventory, indent=2)) - - def _empty_inventory(self): - ''' - Create empty inventory dictionary - ''' - return {"_meta": {"hostvars": {}}} - - def read_settings(self): - ''' - Reads the settings from the mdt.ini file - ''' - config = configparser.SafeConfigParser() - config.read('mdt.ini') - - # MDT Server and instance and database - self.mdt_server = config.get('mdt', 'server') - self.mdt_instance = config.get('mdt', 'instance') - self.mdt_database = config.get('mdt', 'database') - - # MDT Login credentials - if config.has_option('mdt', 'user'): - self.mdt_user = config.get('mdt', 'user') - if config.has_option('mdt', 'password'): - self.mdt_password = config.get('mdt', 'password') - - # Group name in Tower - if config.has_option('tower', 'groupname'): - self.mdt_groupname = config.get('tower', 'groupname') - - def parse_cli_args(self): - ''' - Command line argument processing - ''' - parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on MDT') - parser.add_argument('--list', action='store_true', default=False, help='List instances') - parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') - self.args = parser.parse_args() - - -if __name__ == "__main__": - # Run the script - MDTInventory() diff --git a/scripts/inventory/nagios_livestatus.ini b/scripts/inventory/nagios_livestatus.ini deleted file mode 100644 index 320f11f35c..0000000000 --- a/scripts/inventory/nagios_livestatus.ini +++ /dev/null @@ -1,41 +0,0 @@ -# Ansible Nagios external inventory script settings -# -# To get all available possibilities, check following URL: -# http://www.naemon.org/documentation/usersguide/livestatus.html -# https://mathias-kettner.de/checkmk_livestatus.html -# - -[local] -# Livestatus URI -# Example for default naemon livestatus unix socket : -# livestatus_uri=unix:/var/cache/naemon/live - -[remote] - -# default field name for host: name -# Uncomment to override: -# host_field=address -# -# default field group for host: groups -# Uncomment to override: -# group_field=state -# default fields retrieved: address, alias, display_name, children, parents -# To override, uncomment the following line -# fields_to_retrieve=address,alias,display_name -# -# default variable prefix: livestatus_ -# To override, uncomment the following line -# var_prefix=naemon_ -# -# default filter: None -# -# Uncomment to override -# -# All host with state = OK -# host_filter=state = 0 -# Warning: for the moment, you can use only one filter at a time. You cannot combine various conditions. -# -# All host in groups Linux -# host_filter=groups >= Linux -# -livestatus_uri=tcp:192.168.66.137:6557 diff --git a/scripts/inventory/nagios_livestatus.py b/scripts/inventory/nagios_livestatus.py deleted file mode 100755 index bdf9f673de..0000000000 --- a/scripts/inventory/nagios_livestatus.py +++ /dev/null @@ -1,163 +0,0 @@ -#!/usr/bin/env python - -# (c) 2015, Yannig Perre -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -''' -Nagios livestatus inventory script. Before using this script, please -update nagios_livestatus.ini file. - -Livestatus is a nagios/naemon/shinken module which let you retrieve -informations stored in the monitoring core. - -This plugin inventory need livestatus API for python. Please install it -before using this script (apt/pip/yum/...). - -Checkmk livestatus: https://mathias-kettner.de/checkmk_livestatus.html -Livestatus API: http://www.naemon.org/documentation/usersguide/livestatus.html -''' - -import os -import re -import argparse -import sys - -from ansible.module_utils.six.moves import configparser -import json - -try: - from mk_livestatus import Socket -except ImportError: - sys.exit("Error: mk_livestatus is needed. Try something like: pip install python-mk-livestatus") - - -class NagiosLivestatusInventory(object): - - def parse_ini_file(self): - config = configparser.SafeConfigParser() - config.read(os.path.dirname(os.path.realpath(__file__)) + '/nagios_livestatus.ini') - for section in config.sections(): - if not config.has_option(section, 'livestatus_uri'): - continue - - # If fields_to_retrieve is not set, using default fields - fields_to_retrieve = self.default_fields_to_retrieve - if config.has_option(section, 'fields_to_retrieve'): - fields_to_retrieve = [field.strip() for field in config.get(section, 'fields_to_retrieve').split(',')] - fields_to_retrieve = tuple(fields_to_retrieve) - - # default section values - section_values = { - 'var_prefix': 'livestatus_', - 'host_filter': None, - 'host_field': 'name', - 'group_field': 'groups' - } - for key, value in section_values.items(): - if config.has_option(section, key): - section_values[key] = config.get(section, key).strip() - - # Retrieving livestatus string connection - livestatus_uri = config.get(section, 'livestatus_uri') - backend_definition = None - - # Local unix socket - unix_match = re.match('unix:(.*)', livestatus_uri) - if unix_match is not None: - backend_definition = {'connection': unix_match.group(1)} - - # Remote tcp connection - tcp_match = re.match('tcp:(.*):([^:]*)', livestatus_uri) - if tcp_match is not None: - backend_definition = {'connection': (tcp_match.group(1), int(tcp_match.group(2)))} - - # No valid livestatus_uri => exiting - if backend_definition is None: - raise Exception('livestatus_uri field is invalid (%s). Expected: unix:/path/to/live or tcp:host:port' % livestatus_uri) - - # Updating backend_definition with current value - backend_definition['name'] = section - backend_definition['fields'] = fields_to_retrieve - for key, value in section_values.items(): - backend_definition[key] = value - - self.backends.append(backend_definition) - - def parse_options(self): - parser = argparse.ArgumentParser() - parser.add_argument('--host', nargs=1) - parser.add_argument('--list', action='store_true') - parser.add_argument('--pretty', action='store_true') - self.options = parser.parse_args() - - def add_host(self, hostname, group): - if group not in self.result: - self.result[group] = {} - self.result[group]['hosts'] = [] - if hostname not in self.result[group]['hosts']: - self.result[group]['hosts'].append(hostname) - - def query_backend(self, backend, host=None): - '''Query a livestatus backend''' - hosts_request = Socket(backend['connection']).hosts.columns(backend['host_field'], backend['group_field']) - - if backend['host_filter'] is not None: - hosts_request = hosts_request.filter(backend['host_filter']) - - if host is not None: - hosts_request = hosts_request.filter('name = ' + host[0]) - - hosts_request._columns += backend['fields'] - - hosts = hosts_request.call() - for host in hosts: - hostname = host[backend['host_field']] - hostgroups = host[backend['group_field']] - if not isinstance(hostgroups, list): - hostgroups = [hostgroups] - self.add_host(hostname, 'all') - self.add_host(hostname, backend['name']) - for group in hostgroups: - self.add_host(hostname, group) - for field in backend['fields']: - var_name = backend['var_prefix'] + field - if hostname not in self.result['_meta']['hostvars']: - self.result['_meta']['hostvars'][hostname] = {} - self.result['_meta']['hostvars'][hostname][var_name] = host[field] - - def __init__(self): - - self.defaultgroup = 'group_all' - self.default_fields_to_retrieve = ('address', 'alias', 'display_name', 'childs', 'parents') - self.backends = [] - self.options = None - - self.parse_ini_file() - self.parse_options() - - self.result = {} - self.result['_meta'] = {} - self.result['_meta']['hostvars'] = {} - self.json_indent = None - if self.options.pretty: - self.json_indent = 2 - - if len(self.backends) == 0: - sys.exit("Error: Livestatus configuration is missing. See nagios_livestatus.ini.") - - for backend in self.backends: - self.query_backend(backend, self.options.host) - - if self.options.host: - print(json.dumps(self.result['_meta']['hostvars'][self.options.host[0]], indent=self.json_indent)) - elif self.options.list: - print(json.dumps(self.result, indent=self.json_indent)) - else: - sys.exit("usage: --list or --host HOSTNAME [--pretty]") - - -NagiosLivestatusInventory() diff --git a/scripts/inventory/nagios_ndo.ini b/scripts/inventory/nagios_ndo.ini deleted file mode 100644 index 1e133a29f3..0000000000 --- a/scripts/inventory/nagios_ndo.ini +++ /dev/null @@ -1,10 +0,0 @@ -# Ansible Nagios external inventory script settings -# - -[ndo] -# NDO database URI -# Make sure that data is returned as strings and not bytes if using python 3. -# See http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html -# for supported databases and URI format. -# Example for mysqlclient module : -database_uri=mysql+mysqldb://user:passwd@hostname/ndo?charset=utf8&use_unicode=1 diff --git a/scripts/inventory/nagios_ndo.py b/scripts/inventory/nagios_ndo.py deleted file mode 100755 index ffd5500f8e..0000000000 --- a/scripts/inventory/nagios_ndo.py +++ /dev/null @@ -1,95 +0,0 @@ -#!/usr/bin/env python - -# (c) 2014, Jonathan Lestrelin -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -""" -Nagios NDO external inventory script. -======================================== - -Returns hosts and hostgroups from Nagios NDO. - -Configuration is read from `nagios_ndo.ini`. -""" - -import os -import argparse -import sys -from ansible.module_utils.six.moves import configparser -import json - -try: - from sqlalchemy import text - from sqlalchemy.engine import create_engine -except ImportError: - sys.exit("Error: SQLAlchemy is needed. Try something like: pip install sqlalchemy") - - -class NagiosNDOInventory(object): - - def read_settings(self): - config = configparser.SafeConfigParser() - config.read(os.path.dirname(os.path.realpath(__file__)) + '/nagios_ndo.ini') - if config.has_option('ndo', 'database_uri'): - self.ndo_database_uri = config.get('ndo', 'database_uri') - - def read_cli(self): - parser = argparse.ArgumentParser() - parser.add_argument('--host', nargs=1) - parser.add_argument('--list', action='store_true') - self.options = parser.parse_args() - - def get_hosts(self): - engine = create_engine(self.ndo_database_uri) - connection = engine.connect() - select_hosts = text("SELECT display_name \ - FROM nagios_hosts") - select_hostgroups = text("SELECT alias \ - FROM nagios_hostgroups") - select_hostgroup_hosts = text("SELECT h.display_name \ - FROM nagios_hostgroup_members hgm, nagios_hosts h, nagios_hostgroups hg \ - WHERE hgm.hostgroup_id = hg.hostgroup_id \ - AND hgm.host_object_id = h.host_object_id \ - AND hg.alias =:hostgroup_alias") - - hosts = connection.execute(select_hosts) - self.result['all']['hosts'] = [host['display_name'] for host in hosts] - - for hostgroup in connection.execute(select_hostgroups): - hostgroup_alias = hostgroup['alias'] - self.result[hostgroup_alias] = {} - hosts = connection.execute(select_hostgroup_hosts, hostgroup_alias=hostgroup_alias) - self.result[hostgroup_alias]['hosts'] = [host['display_name'] for host in hosts] - - def __init__(self): - - self.defaultgroup = 'group_all' - self.ndo_database_uri = None - self.options = None - - self.read_settings() - self.read_cli() - - self.result = {} - self.result['all'] = {} - self.result['all']['hosts'] = [] - self.result['_meta'] = {} - self.result['_meta']['hostvars'] = {} - - if self.ndo_database_uri: - self.get_hosts() - if self.options.host: - print(json.dumps({})) - elif self.options.list: - print(json.dumps(self.result)) - else: - sys.exit("usage: --list or --host HOSTNAME") - else: - sys.exit("Error: Database configuration is missing. See nagios_ndo.ini.") - - -NagiosNDOInventory() diff --git a/scripts/inventory/nsot.py b/scripts/inventory/nsot.py deleted file mode 100755 index 10f24ad48b..0000000000 --- a/scripts/inventory/nsot.py +++ /dev/null @@ -1,346 +0,0 @@ -#!/usr/bin/env python - -''' -nsot -==== - -Ansible Dynamic Inventory to pull hosts from NSoT, a flexible CMDB by Dropbox - -Features --------- - -* Define host groups in form of NSoT device attribute criteria - -* All parameters defined by the spec as of 2015-09-05 are supported. - - + ``--list``: Returns JSON hash of host groups -> hosts and top-level - ``_meta`` -> ``hostvars`` which correspond to all device attributes. - - Group vars can be specified in the YAML configuration, noted below. - - + ``--host ``: Returns JSON hash where every item is a device - attribute. - -* In addition to all attributes assigned to resource being returned, script - will also append ``site_id`` and ``id`` as facts to utilize. - - -Configuration ------------- - -Since it'd be annoying and failure prone to guess where you're configuration -file is, use ``NSOT_INVENTORY_CONFIG`` to specify the path to it. - -This file should adhere to the YAML spec. All top-level variable must be -desired Ansible group-name hashed with single 'query' item to define the NSoT -attribute query. - -Queries follow the normal NSoT query syntax, `shown here`_ - -.. _shown here: https://github.com/dropbox/pynsot#set-queries - -.. code:: yaml - - routers: - query: 'deviceType=ROUTER' - vars: - a: b - c: d - - juniper_fw: - query: 'deviceType=FIREWALL manufacturer=JUNIPER' - - not_f10: - query: '-manufacturer=FORCE10' - -The inventory will automatically use your ``.pynsotrc`` like normal pynsot from -cli would, so make sure that's configured appropriately. - -.. note:: - - Attributes I'm showing above are influenced from ones that the Trigger - project likes. As is the spirit of NSoT, use whichever attributes work best - for your workflow. - -If config file is blank or absent, the following default groups will be -created: - -* ``routers``: deviceType=ROUTER -* ``switches``: deviceType=SWITCH -* ``firewalls``: deviceType=FIREWALL - -These are likely not useful for everyone so please use the configuration. :) - -.. note:: - - By default, resources will only be returned for what your default - site is set for in your ``~/.pynsotrc``. - - If you want to specify, add an extra key under the group for ``site: n``. - -Output Examples ---------------- - -Here are some examples shown from just calling the command directly:: - - $ NSOT_INVENTORY_CONFIG=$PWD/test.yaml ansible_nsot --list | jq '.' - { - "routers": { - "hosts": [ - "test1.example.com" - ], - "vars": { - "cool_level": "very", - "group": "routers" - } - }, - "firewalls": { - "hosts": [ - "test2.example.com" - ], - "vars": { - "cool_level": "enough", - "group": "firewalls" - } - }, - "_meta": { - "hostvars": { - "test2.example.com": { - "make": "SRX", - "site_id": 1, - "id": 108 - }, - "test1.example.com": { - "make": "MX80", - "site_id": 1, - "id": 107 - } - } - }, - "rtr_and_fw": { - "hosts": [ - "test1.example.com", - "test2.example.com" - ], - "vars": {} - } - } - - - $ NSOT_INVENTORY_CONFIG=$PWD/test.yaml ansible_nsot --host test1 | jq '.' - { - "make": "MX80", - "site_id": 1, - "id": 107 - } - -''' - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import sys -import os -import pkg_resources -import argparse -import json -import yaml -from textwrap import dedent -from pynsot.client import get_api_client -from pynsot.app import HttpServerError -from click.exceptions import UsageError - -from ansible.module_utils.six import string_types - - -def warning(*objs): - print("WARNING: ", *objs, file=sys.stderr) - - -class NSoTInventory(object): - '''NSoT Client object for gather inventory''' - - def __init__(self): - self.config = dict() - config_env = os.environ.get('NSOT_INVENTORY_CONFIG') - if config_env: - try: - config_file = os.path.abspath(config_env) - except IOError: # If file non-existent, use default config - self._config_default() - except Exception as e: - sys.exit('%s\n' % e) - - with open(config_file) as f: - try: - self.config.update(yaml.safe_load(f)) - except TypeError: # If empty file, use default config - warning('Empty config file') - self._config_default() - except Exception as e: - sys.exit('%s\n' % e) - else: # Use defaults if env var missing - self._config_default() - self.groups = list(self.config.keys()) - self.client = get_api_client() - self._meta = {'hostvars': dict()} - - def _config_default(self): - default_yaml = ''' - --- - routers: - query: deviceType=ROUTER - switches: - query: deviceType=SWITCH - firewalls: - query: deviceType=FIREWALL - ''' - self.config = yaml.safe_load(dedent(default_yaml)) - - def do_list(self): - '''Direct callback for when ``--list`` is provided - - Relies on the configuration generated from init to run - _inventory_group() - ''' - inventory = dict() - for group, contents in self.config.items(): - group_response = self._inventory_group(group, contents) - inventory.update(group_response) - inventory.update({'_meta': self._meta}) - return json.dumps(inventory) - - def do_host(self, host): - return json.dumps(self._hostvars(host)) - - def _hostvars(self, host): - '''Return dictionary of all device attributes - - Depending on number of devices in NSoT, could be rather slow since this - has to request every device resource to filter through - ''' - device = [i for i in self.client.devices.get() - if host in i['hostname']][0] - attributes = device['attributes'] - attributes.update({'site_id': device['site_id'], 'id': device['id']}) - return attributes - - def _inventory_group(self, group, contents): - '''Takes a group and returns inventory for it as dict - - :param group: Group name - :type group: str - :param contents: The contents of the group's YAML config - :type contents: dict - - contents param should look like:: - - { - 'query': 'xx', - 'vars': - 'a': 'b' - } - - Will return something like:: - - { group: { - hosts: [], - vars: {}, - } - ''' - query = contents.get('query') - hostvars = contents.get('vars', dict()) - site = contents.get('site', dict()) - obj = {group: dict()} - obj[group]['hosts'] = [] - obj[group]['vars'] = hostvars - try: - assert isinstance(query, string_types) - except Exception: - sys.exit('ERR: Group queries must be a single string\n' - ' Group: %s\n' - ' Query: %s\n' % (group, query) - ) - try: - if site: - site = self.client.sites(site) - devices = site.devices.query.get(query=query) - else: - devices = self.client.devices.query.get(query=query) - except HttpServerError as e: - if '500' in str(e.response): - _site = 'Correct site id?' - _attr = 'Queried attributes actually exist?' - questions = _site + '\n' + _attr - sys.exit('ERR: 500 from server.\n%s' % questions) - else: - raise - except UsageError: - sys.exit('ERR: Could not connect to server. Running?') - - # Would do a list comprehension here, but would like to save code/time - # and also acquire attributes in this step - for host in devices: - # Iterate through each device that matches query, assign hostname - # to the group's hosts array and then use this single iteration as - # a chance to update self._meta which will be used in the final - # return - hostname = host['hostname'] - obj[group]['hosts'].append(hostname) - attributes = host['attributes'] - attributes.update({'site_id': host['site_id'], 'id': host['id']}) - self._meta['hostvars'].update({hostname: attributes}) - - return obj - - -def parse_args(): - desc = __doc__.splitlines()[4] # Just to avoid being redundant - - # Establish parser with options and error out if no action provided - parser = argparse.ArgumentParser( - description=desc, - conflict_handler='resolve', - ) - - # Arguments - # - # Currently accepting (--list | -l) and (--host | -h) - # These must not be allowed together - parser.add_argument( - '--list', '-l', - help='Print JSON object containing hosts to STDOUT', - action='store_true', - dest='list_', # Avoiding syntax highlighting for list - ) - - parser.add_argument( - '--host', '-h', - help='Print JSON object containing hostvars for ', - action='store', - ) - args = parser.parse_args() - - if not args.list_ and not args.host: # Require at least one option - parser.exit(status=1, message='No action requested') - - if args.list_ and args.host: # Do not allow multiple options - parser.exit(status=1, message='Too many actions requested') - - return args - - -def main(): - '''Set up argument handling and callback routing''' - args = parse_args() - client = NSoTInventory() - - # Callback condition - if args.list_: - print(client.do_list()) - elif args.host: - print(client.do_host(args.host)) - - -if __name__ == '__main__': - main() diff --git a/scripts/inventory/nsot.yaml b/scripts/inventory/nsot.yaml deleted file mode 100644 index ebddbc8234..0000000000 --- a/scripts/inventory/nsot.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- -juniper_routers: - query: 'deviceType=ROUTER manufacturer=JUNIPER' - vars: - group: juniper_routers - netconf: true - os: junos - -cisco_asa: - query: 'manufacturer=CISCO deviceType=FIREWALL' - vars: - group: cisco_asa - routed_vpn: false - stateful: true - -old_cisco_asa: - query: 'manufacturer=CISCO deviceType=FIREWALL -softwareVersion=8.3+' - vars: - old_nat: true - -not_f10: - query: '-manufacturer=FORCE10' diff --git a/scripts/inventory/openshift.py b/scripts/inventory/openshift.py deleted file mode 100755 index 85ea00cb1d..0000000000 --- a/scripts/inventory/openshift.py +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env python - -# (c) 2013, Michael Scherer -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' ---- -inventory: openshift -short_description: Openshift gears external inventory script -description: - - Generates inventory of Openshift gears using the REST interface - - this permit to reuse playbook to setup an Openshift gear -author: Michael Scherer -''' - -import json -import os -import os.path -import sys -import StringIO - -from ansible.module_utils.urls import open_url -from ansible.module_utils.six.moves import configparser as ConfigParser - -configparser = None - - -def get_from_rhc_config(variable): - global configparser - CONF_FILE = os.path.expanduser('~/.openshift/express.conf') - if os.path.exists(CONF_FILE): - if not configparser: - ini_str = '[root]\n' + open(CONF_FILE, 'r').read() - configparser = ConfigParser.SafeConfigParser() - configparser.readfp(StringIO.StringIO(ini_str)) - try: - return configparser.get('root', variable) - except ConfigParser.NoOptionError: - return None - - -def get_config(env_var, config_var): - result = os.getenv(env_var) - if not result: - result = get_from_rhc_config(config_var) - if not result: - sys.exit("failed=True msg='missing %s'" % env_var) - return result - - -def get_json_from_api(url, username, password): - headers = {'Accept': 'application/json; version=1.5'} - response = open_url(url, headers=headers, url_username=username, url_password=password) - return json.loads(response.read())['data'] - - -username = get_config('ANSIBLE_OPENSHIFT_USERNAME', 'default_rhlogin') -password = get_config('ANSIBLE_OPENSHIFT_PASSWORD', 'password') -broker_url = 'https://%s/broker/rest/' % get_config('ANSIBLE_OPENSHIFT_BROKER', 'libra_server') - - -response = get_json_from_api(broker_url + '/domains', username, password) - -response = get_json_from_api("%s/domains/%s/applications" % - (broker_url, response[0]['id']), username, password) - -result = {} -for app in response: - - # ssh://520311404832ce3e570000ff@blog-johndoe.example.org - (user, host) = app['ssh_url'][6:].split('@') - app_name = host.split('-')[0] - - result[app_name] = {} - result[app_name]['hosts'] = [] - result[app_name]['hosts'].append(host) - result[app_name]['vars'] = {} - result[app_name]['vars']['ansible_ssh_user'] = user - -if len(sys.argv) == 2 and sys.argv[1] == '--list': - print(json.dumps(result)) -elif len(sys.argv) == 3 and sys.argv[1] == '--host': - print(json.dumps({})) -else: - print("Need an argument, either --list or --host ") diff --git a/scripts/inventory/openvz.py b/scripts/inventory/openvz.py deleted file mode 100755 index 95eec83912..0000000000 --- a/scripts/inventory/openvz.py +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# openvz.py -# -# Copyright 2014 jordonr -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# -# Inspired by libvirt_lxc.py inventory script -# https://github.com/ansible/ansible/blob/e5ef0eca03cbb6c8950c06dc50d0ca22aa8902f4/plugins/inventory/libvirt_lxc.py -# -# Groups are determined by the description field of openvz guests -# multiple groups can be separated by commas: webserver,dbserver - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from subprocess import Popen, PIPE -import sys -import json - - -# List openvz hosts -vzhosts = ['vzhost1', 'vzhost2', 'vzhost3'] -# Add openvz hosts to the inventory and Add "_meta" trick -inventory = {'vzhosts': {'hosts': vzhosts}, '_meta': {'hostvars': {}}} -# default group, when description not defined -default_group = ['vzguest'] - - -def get_guests(): - # Loop through vzhosts - for h in vzhosts: - # SSH to vzhost and get the list of guests in json - pipe = Popen(['ssh', h, 'vzlist', '-j'], stdout=PIPE, universal_newlines=True) - - # Load Json info of guests - json_data = json.loads(pipe.stdout.read()) - - # loop through guests - for j in json_data: - # Add information to host vars - inventory['_meta']['hostvars'][j['hostname']] = { - 'ctid': j['ctid'], - 'veid': j['veid'], - 'vpsid': j['vpsid'], - 'private_path': j['private'], - 'root_path': j['root'], - 'ip': j['ip'] - } - - # determine group from guest description - if j['description'] is not None: - groups = j['description'].split(",") - else: - groups = default_group - - # add guest to inventory - for g in groups: - if g not in inventory: - inventory[g] = {'hosts': []} - - inventory[g]['hosts'].append(j['hostname']) - - return inventory - - -if len(sys.argv) == 2 and sys.argv[1] == '--list': - inv_json = get_guests() - print(json.dumps(inv_json, sort_keys=True)) -elif len(sys.argv) == 3 and sys.argv[1] == '--host': - print(json.dumps({})) -else: - print("Need an argument, either --list or --host ") diff --git a/scripts/inventory/ovirt.ini b/scripts/inventory/ovirt.ini deleted file mode 100644 index d9aaf8a73e..0000000000 --- a/scripts/inventory/ovirt.ini +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2013 Google Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - - -# Author: Josha Inglis based on the gce.ini by Eric Johnson - -[ovirt] -# For ovirt.py script, which can be used with Python SDK version 3 -# Service Account configuration information can be stored in the -# libcloud 'secrets.py' file. Ideally, the 'secrets.py' file will already -# exist in your PYTHONPATH and be picked up automatically with an import -# statement in the inventory script. However, you can specify an absolute -# path to the secrets.py file with 'libcloud_secrets' parameter. -ovirt_api_secrets = - -# If you are not going to use a 'secrets.py' file, you can set the necessary -# authorization parameters here. -ovirt_url = -ovirt_username = -ovirt_password = -ovirt_ca_file = diff --git a/scripts/inventory/ovirt.py b/scripts/inventory/ovirt.py deleted file mode 100755 index 04f7fc58ae..0000000000 --- a/scripts/inventory/ovirt.py +++ /dev/null @@ -1,279 +0,0 @@ -#!/usr/bin/env python -# Copyright 2015 IIX Inc. -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -""" -ovirt external inventory script -================================= - -Generates inventory that Ansible can understand by making API requests to -oVirt via the ovirt-engine-sdk-python library. - -When run against a specific host, this script returns the following variables -based on the data obtained from the ovirt_sdk Node object: - - ovirt_uuid - - ovirt_id - - ovirt_image - - ovirt_machine_type - - ovirt_ips - - ovirt_name - - ovirt_description - - ovirt_status - - ovirt_zone - - ovirt_tags - - ovirt_stats - -When run in --list mode, instances are grouped by the following categories: - - - zone: - zone group name. - - instance tags: - An entry is created for each tag. For example, if you have two instances - with a common tag called 'foo', they will both be grouped together under - the 'tag_foo' name. - - network name: - the name of the network is appended to 'network_' (e.g. the 'default' - network will result in a group named 'network_default') - - running status: - group name prefixed with 'status_' (e.g. status_up, status_down,..) - -Examples: - Execute uname on all instances in the us-central1-a zone - $ ansible -i ovirt.py us-central1-a -m shell -a "/bin/uname -a" - - Use the ovirt inventory script to print out instance specific information - $ contrib/inventory/ovirt.py --host my_instance - -Author: Josha Inglis based on the gce.py by Eric Johnson -Version: 0.0.1 -""" - -USER_AGENT_PRODUCT = "Ansible-ovirt_inventory_plugin" -USER_AGENT_VERSION = "v1" - -import sys -import os -import argparse -from collections import defaultdict -from ansible.module_utils.six.moves import configparser as ConfigParser - -import json - -try: - # noinspection PyUnresolvedReferences - from ovirtsdk.api import API - # noinspection PyUnresolvedReferences - from ovirtsdk.xml import params -except ImportError: - print("ovirt inventory script requires ovirt-engine-sdk-python") - sys.exit(1) - - -class OVirtInventory(object): - def __init__(self): - # Read settings and parse CLI arguments - self.args = self.parse_cli_args() - self.driver = self.get_ovirt_driver() - - # Just display data for specific host - if self.args.host: - print(self.json_format_dict( - self.node_to_dict(self.get_instance(self.args.host)), - pretty=self.args.pretty - )) - sys.exit(0) - - # Otherwise, assume user wants all instances grouped - print( - self.json_format_dict( - data=self.group_instances(), - pretty=self.args.pretty - ) - ) - sys.exit(0) - - @staticmethod - def get_ovirt_driver(): - """ - Determine the ovirt authorization settings and return a ovirt_sdk driver. - - :rtype : ovirtsdk.api.API - """ - kwargs = {} - - ovirt_ini_default_path = os.path.join( - os.path.dirname(os.path.realpath(__file__)), "ovirt.ini") - ovirt_ini_path = os.environ.get('OVIRT_INI_PATH', ovirt_ini_default_path) - - # Create a ConfigParser. - # This provides empty defaults to each key, so that environment - # variable configuration (as opposed to INI configuration) is able - # to work. - config = ConfigParser.SafeConfigParser(defaults={ - 'ovirt_url': '', - 'ovirt_username': '', - 'ovirt_password': '', - 'ovirt_api_secrets': '', - }) - if 'ovirt' not in config.sections(): - config.add_section('ovirt') - config.read(ovirt_ini_path) - - # Attempt to get ovirt params from a configuration file, if one - # exists. - secrets_path = config.get('ovirt', 'ovirt_api_secrets') - secrets_found = False - try: - # noinspection PyUnresolvedReferences,PyPackageRequirements - import secrets - - kwargs = getattr(secrets, 'OVIRT_KEYWORD_PARAMS', {}) - secrets_found = True - except ImportError: - pass - - if not secrets_found and secrets_path: - if not secrets_path.endswith('secrets.py'): - err = "Must specify ovirt_sdk secrets file as /absolute/path/to/secrets.py" - print(err) - sys.exit(1) - sys.path.append(os.path.dirname(secrets_path)) - try: - # noinspection PyUnresolvedReferences,PyPackageRequirements - import secrets - - kwargs = getattr(secrets, 'OVIRT_KEYWORD_PARAMS', {}) - except ImportError: - pass - if not secrets_found: - kwargs = { - 'url': config.get('ovirt', 'ovirt_url'), - 'username': config.get('ovirt', 'ovirt_username'), - 'password': config.get('ovirt', 'ovirt_password'), - } - - # If the appropriate environment variables are set, they override - # other configuration; process those into our args and kwargs. - kwargs['url'] = os.environ.get('OVIRT_URL', kwargs['url']) - kwargs['username'] = next(val for val in [os.environ.get('OVIRT_EMAIL'), os.environ.get('OVIRT_USERNAME'), kwargs['username']] if val is not None) - kwargs['password'] = next(val for val in [os.environ.get('OVIRT_PASS'), os.environ.get('OVIRT_PASSWORD'), kwargs['password']] if val is not None) - - # Retrieve and return the ovirt driver. - return API(insecure=True, **kwargs) - - @staticmethod - def parse_cli_args(): - """ - Command line argument processing - - :rtype : argparse.Namespace - """ - - parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on ovirt') - parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') - parser.add_argument('--host', action='store', help='Get all information about an instance') - parser.add_argument('--pretty', action='store_true', default=False, help='Pretty format (default: False)') - return parser.parse_args() - - def node_to_dict(self, inst): - """ - :type inst: params.VM - """ - if inst is None: - return {} - - inst.get_custom_properties() - ips = [ip.get_address() for ip in inst.get_guest_info().get_ips().get_ip()] \ - if inst.get_guest_info() is not None else [] - stats = {} - for stat in inst.get_statistics().list(): - stats[stat.get_name()] = stat.get_values().get_value()[0].get_datum() - - return { - 'ovirt_uuid': inst.get_id(), - 'ovirt_id': inst.get_id(), - 'ovirt_image': inst.get_os().get_type(), - 'ovirt_machine_type': self.get_machine_type(inst), - 'ovirt_ips': ips, - 'ovirt_name': inst.get_name(), - 'ovirt_description': inst.get_description(), - 'ovirt_status': inst.get_status().get_state(), - 'ovirt_zone': inst.get_cluster().get_id(), - 'ovirt_tags': self.get_tags(inst), - 'ovirt_stats': stats, - # Hosts don't have a public name, so we add an IP - 'ansible_ssh_host': ips[0] if len(ips) > 0 else None - } - - @staticmethod - def get_tags(inst): - """ - :type inst: params.VM - """ - return [x.get_name() for x in inst.get_tags().list()] - - def get_machine_type(self, inst): - inst_type = inst.get_instance_type() - if inst_type: - return self.driver.instancetypes.get(id=inst_type.id).name - - # noinspection PyBroadException,PyUnusedLocal - def get_instance(self, instance_name): - """Gets details about a specific instance """ - try: - return self.driver.vms.get(name=instance_name) - except Exception as e: - return None - - def group_instances(self): - """Group all instances""" - groups = defaultdict(list) - meta = {"hostvars": {}} - - for node in self.driver.vms.list(): - assert isinstance(node, params.VM) - name = node.get_name() - - meta["hostvars"][name] = self.node_to_dict(node) - - zone = node.get_cluster().get_name() - groups[zone].append(name) - - tags = self.get_tags(node) - for t in tags: - tag = 'tag_%s' % t - groups[tag].append(name) - - nets = [x.get_name() for x in node.get_nics().list()] - for net in nets: - net = 'network_%s' % net - groups[net].append(name) - - status = node.get_status().get_state() - stat = 'status_%s' % status.lower() - if stat in groups: - groups[stat].append(name) - else: - groups[stat] = [name] - - groups["_meta"] = meta - - return groups - - @staticmethod - def json_format_dict(data, pretty=False): - """ Converts a dict to a JSON object and dumps it as a formatted - string """ - - if pretty: - return json.dumps(data, sort_keys=True, indent=2) - else: - return json.dumps(data) - - -# Run the script -OVirtInventory() diff --git a/scripts/inventory/ovirt4.py b/scripts/inventory/ovirt4.py deleted file mode 100755 index 84b68a1258..0000000000 --- a/scripts/inventory/ovirt4.py +++ /dev/null @@ -1,258 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2016 Red Hat, Inc. -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -""" -oVirt dynamic inventory script -================================= - -Generates dynamic inventory file for oVirt. - -Script will return following attributes for each virtual machine: - - id - - name - - host - - cluster - - status - - description - - fqdn - - os_type - - template - - tags - - statistics - - devices - -When run in --list mode, virtual machines are grouped by the following categories: - - cluster - - tag - - status - - Note: If there is some virtual machine which has has more tags it will be in both tag - records. - -Examples: - # Execute update of system on webserver virtual machine: - - $ ansible -i contrib/inventory/ovirt4.py webserver -m yum -a "name=* state=latest" - - # Get webserver virtual machine information: - - $ contrib/inventory/ovirt4.py --host webserver - -Author: Ondra Machacek (@machacekondra) -""" - -import argparse -import os -import sys - -from collections import defaultdict - -from ansible.module_utils.six.moves import configparser -from ansible.module_utils.six import PY2 - -import json - -try: - import ovirtsdk4 as sdk - import ovirtsdk4.types as otypes -except ImportError: - print('oVirt inventory script requires ovirt-engine-sdk-python >= 4.0.0') - sys.exit(1) - - -def parse_args(): - """ - Create command line parser for oVirt dynamic inventory script. - """ - parser = argparse.ArgumentParser( - description='Ansible dynamic inventory script for oVirt.', - ) - parser.add_argument( - '--list', - action='store_true', - default=True, - help='Get data of all virtual machines (default: True).', - ) - parser.add_argument( - '--host', - help='Get data of virtual machines running on specified host.', - ) - parser.add_argument( - '--pretty', - action='store_true', - default=False, - help='Pretty format (default: False).', - ) - return parser.parse_args() - - -def create_connection(): - """ - Create a connection to oVirt engine API. - """ - # Get the path of the configuration file, by default use - # 'ovirt.ini' file in script directory: - default_path = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - 'ovirt.ini', - ) - config_path = os.environ.get('OVIRT_INI_PATH', default_path) - - # Create parser and add ovirt section if it doesn't exist: - if PY2: - config = configparser.SafeConfigParser( - defaults={ - 'ovirt_url': os.environ.get('OVIRT_URL'), - 'ovirt_username': os.environ.get('OVIRT_USERNAME'), - 'ovirt_password': os.environ.get('OVIRT_PASSWORD'), - 'ovirt_ca_file': os.environ.get('OVIRT_CAFILE', ''), - }, allow_no_value=True - ) - else: - config = configparser.ConfigParser( - defaults={ - 'ovirt_url': os.environ.get('OVIRT_URL'), - 'ovirt_username': os.environ.get('OVIRT_USERNAME'), - 'ovirt_password': os.environ.get('OVIRT_PASSWORD'), - 'ovirt_ca_file': os.environ.get('OVIRT_CAFILE', ''), - }, allow_no_value=True - ) - if not config.has_section('ovirt'): - config.add_section('ovirt') - config.read(config_path) - - # Create a connection with options defined in ini file: - return sdk.Connection( - url=config.get('ovirt', 'ovirt_url'), - username=config.get('ovirt', 'ovirt_username'), - password=config.get('ovirt', 'ovirt_password', raw=True), - ca_file=config.get('ovirt', 'ovirt_ca_file') or None, - insecure=not config.get('ovirt', 'ovirt_ca_file'), - ) - - -def get_dict_of_struct(connection, vm): - """ - Transform SDK Vm Struct type to Python dictionary. - """ - if vm is None: - return dict() - - vms_service = connection.system_service().vms_service() - clusters_service = connection.system_service().clusters_service() - vm_service = vms_service.vm_service(vm.id) - devices = vm_service.reported_devices_service().list() - tags = vm_service.tags_service().list() - stats = vm_service.statistics_service().list() - labels = vm_service.affinity_labels_service().list() - groups = clusters_service.cluster_service( - vm.cluster.id - ).affinity_groups_service().list() - - return { - 'id': vm.id, - 'name': vm.name, - 'host': connection.follow_link(vm.host).name if vm.host else None, - 'cluster': connection.follow_link(vm.cluster).name, - 'status': str(vm.status), - 'description': vm.description, - 'fqdn': vm.fqdn, - 'os_type': vm.os.type, - 'template': connection.follow_link(vm.template).name, - 'tags': [tag.name for tag in tags], - 'affinity_labels': [label.name for label in labels], - 'affinity_groups': [ - group.name for group in groups - if vm.name in [vm.name for vm in connection.follow_link(group.vms)] - ], - 'statistics': dict( - (stat.name, stat.values[0].datum) for stat in stats if stat.values - ), - 'devices': dict( - (device.name, [ip.address for ip in device.ips]) for device in devices if device.ips - ), - 'ansible_host': next((device.ips[0].address for device in devices if device.ips), None) - } - - -def get_data(connection, vm_name=None): - """ - Obtain data of `vm_name` if specified, otherwise obtain data of all vms. - """ - vms_service = connection.system_service().vms_service() - clusters_service = connection.system_service().clusters_service() - - if vm_name: - vm = vms_service.list(search='name=%s' % vm_name) or [None] - data = get_dict_of_struct( - connection=connection, - vm=vm[0], - ) - else: - vms = dict() - data = defaultdict(list) - for vm in vms_service.list(): - name = vm.name - vm_service = vms_service.vm_service(vm.id) - cluster_service = clusters_service.cluster_service(vm.cluster.id) - - # Add vm to vms dict: - vms[name] = get_dict_of_struct(connection, vm) - - # Add vm to cluster group: - cluster_name = connection.follow_link(vm.cluster).name - data['cluster_%s' % cluster_name].append(name) - - # Add vm to tag group: - tags_service = vm_service.tags_service() - for tag in tags_service.list(): - data['tag_%s' % tag.name].append(name) - - # Add vm to status group: - data['status_%s' % vm.status].append(name) - - # Add vm to affinity group: - for group in cluster_service.affinity_groups_service().list(): - if vm.name in [ - v.name for v in connection.follow_link(group.vms) - ]: - data['affinity_group_%s' % group.name].append(vm.name) - - # Add vm to affinity label group: - affinity_labels_service = vm_service.affinity_labels_service() - for label in affinity_labels_service.list(): - data['affinity_label_%s' % label.name].append(name) - - data["_meta"] = { - 'hostvars': vms, - } - - return data - - -def main(): - args = parse_args() - connection = create_connection() - - print( - json.dumps( - obj=get_data( - connection=connection, - vm_name=args.host, - ), - sort_keys=args.pretty, - indent=args.pretty * 2, - ) - ) - - -if __name__ == '__main__': - main() diff --git a/scripts/inventory/packet_net.ini b/scripts/inventory/packet_net.ini deleted file mode 100644 index 6dcc027b15..0000000000 --- a/scripts/inventory/packet_net.ini +++ /dev/null @@ -1,53 +0,0 @@ -# Ansible Packet.net external inventory script settings -# - -[packet] - -# Packet projects to get info for. Set this to 'all' to get info for all -# projects in Packet and merge the results together. Alternatively, set -# this to a comma separated list of projects. E.g. 'project-1,project-3,project-4' -projects = all -projects_exclude = - -# By default, packet devices in all state are returned. Specify -# packet device states to return as a comma-separated list. -# device_states = active, inactive, queued, provisioning - -# items per page to retrieve from packet api at a time -items_per_page = 999 - -# API calls to Packet are costly. For this reason, we cache the results of an API -# call. Set this to the path you want cache files to be written to. Two files -# will be written to this directory: -# - ansible-packet.cache -# - ansible-packet.index -cache_path = ~/.ansible/tmp - -# The number of seconds a cache file is considered valid. After this many -# seconds, a new API call will be made, and the cache file will be updated. -# To disable the cache, set this value to 0 -cache_max_age = 300 - -# Organize groups into a nested/hierarchy instead of a flat namespace. -nested_groups = False - -# Replace - tags when creating groups to avoid issues with ansible -replace_dash_in_groups = True - -# The packet inventory output can become very large. To manage its size, -# configure which groups should be created. -group_by_device_id = True -group_by_hostname = True -group_by_facility = True -group_by_project = True -group_by_operating_system = True -group_by_plan_type = True -group_by_tags = True -group_by_tag_none = True - -# If you only want to include hosts that match a certain regular expression -# pattern_include = staging-* - -# If you want to exclude any hosts that match a certain regular expression -# pattern_exclude = staging-* - diff --git a/scripts/inventory/packet_net.py b/scripts/inventory/packet_net.py deleted file mode 100755 index 196e26869d..0000000000 --- a/scripts/inventory/packet_net.py +++ /dev/null @@ -1,496 +0,0 @@ -#!/usr/bin/env python - -''' -Packet.net external inventory script -================================= - -Generates inventory that Ansible can understand by making API request to -Packet.net using the Packet library. - -NOTE: This script assumes Ansible is being executed where the environment -variable needed for Packet API Token already been set: - export PACKET_API_TOKEN=Bfse9F24SFtfs423Gsd3ifGsd43sSdfs - -This script also assumes there is a packet_net.ini file alongside it. To specify a -different path to packet_net.ini, define the PACKET_NET_INI_PATH environment variable: - - export PACKET_NET_INI_PATH=/path/to/my_packet_net.ini - -''' - -# (c) 2016, Peter Sankauskas -# (c) 2017, Tomas Karasek -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -###################################################################### - -import sys -import os -import argparse -import re -from time import time - -from ansible.module_utils import six -from ansible.module_utils.six.moves import configparser - -try: - import packet -except ImportError as e: - sys.exit("failed=True msg='`packet-python` library required for this script'") - -import traceback - - -import json - - -ini_section = 'packet' - - -class PacketInventory(object): - - def _empty_inventory(self): - return {"_meta": {"hostvars": {}}} - - def __init__(self): - ''' Main execution path ''' - - # Inventory grouped by device IDs, tags, security groups, regions, - # and availability zones - self.inventory = self._empty_inventory() - - # Index of hostname (address) to device ID - self.index = {} - - # Read settings and parse CLI arguments - self.parse_cli_args() - self.read_settings() - - # Cache - if self.args.refresh_cache: - self.do_api_calls_update_cache() - elif not self.is_cache_valid(): - self.do_api_calls_update_cache() - - # Data to print - if self.args.host: - data_to_print = self.get_host_info() - - elif self.args.list: - # Display list of devices for inventory - if self.inventory == self._empty_inventory(): - data_to_print = self.get_inventory_from_cache() - else: - data_to_print = self.json_format_dict(self.inventory, True) - - print(data_to_print) - - def is_cache_valid(self): - ''' Determines if the cache files have expired, or if it is still valid ''' - - if os.path.isfile(self.cache_path_cache): - mod_time = os.path.getmtime(self.cache_path_cache) - current_time = time() - if (mod_time + self.cache_max_age) > current_time: - if os.path.isfile(self.cache_path_index): - return True - - return False - - def read_settings(self): - ''' Reads the settings from the packet_net.ini file ''' - if six.PY3: - config = configparser.ConfigParser() - else: - config = configparser.SafeConfigParser() - - _ini_path_raw = os.environ.get('PACKET_NET_INI_PATH') - - if _ini_path_raw: - packet_ini_path = os.path.expanduser(os.path.expandvars(_ini_path_raw)) - else: - packet_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'packet_net.ini') - config.read(packet_ini_path) - - # items per page - self.items_per_page = 999 - if config.has_option(ini_section, 'items_per_page'): - config.get(ini_section, 'items_per_page') - - # Instance states to be gathered in inventory. Default is all of them. - packet_valid_device_states = [ - 'active', - 'inactive', - 'queued', - 'provisioning' - ] - self.packet_device_states = [] - if config.has_option(ini_section, 'device_states'): - for device_state in config.get(ini_section, 'device_states').split(','): - device_state = device_state.strip() - if device_state not in packet_valid_device_states: - continue - self.packet_device_states.append(device_state) - else: - self.packet_device_states = packet_valid_device_states - - # Cache related - cache_dir = os.path.expanduser(config.get(ini_section, 'cache_path')) - if not os.path.exists(cache_dir): - os.makedirs(cache_dir) - - self.cache_path_cache = cache_dir + "/ansible-packet.cache" - self.cache_path_index = cache_dir + "/ansible-packet.index" - self.cache_max_age = config.getint(ini_section, 'cache_max_age') - - # Configure nested groups instead of flat namespace. - if config.has_option(ini_section, 'nested_groups'): - self.nested_groups = config.getboolean(ini_section, 'nested_groups') - else: - self.nested_groups = False - - # Replace dash or not in group names - if config.has_option(ini_section, 'replace_dash_in_groups'): - self.replace_dash_in_groups = config.getboolean(ini_section, 'replace_dash_in_groups') - else: - self.replace_dash_in_groups = True - - # Configure which groups should be created. - group_by_options = [ - 'group_by_device_id', - 'group_by_hostname', - 'group_by_facility', - 'group_by_project', - 'group_by_operating_system', - 'group_by_plan_type', - 'group_by_tags', - 'group_by_tag_none', - ] - for option in group_by_options: - if config.has_option(ini_section, option): - setattr(self, option, config.getboolean(ini_section, option)) - else: - setattr(self, option, True) - - # Do we need to just include hosts that match a pattern? - try: - pattern_include = config.get(ini_section, 'pattern_include') - if pattern_include and len(pattern_include) > 0: - self.pattern_include = re.compile(pattern_include) - else: - self.pattern_include = None - except configparser.NoOptionError: - self.pattern_include = None - - # Do we need to exclude hosts that match a pattern? - try: - pattern_exclude = config.get(ini_section, 'pattern_exclude') - if pattern_exclude and len(pattern_exclude) > 0: - self.pattern_exclude = re.compile(pattern_exclude) - else: - self.pattern_exclude = None - except configparser.NoOptionError: - self.pattern_exclude = None - - # Projects - self.projects = [] - configProjects = config.get(ini_section, 'projects') - configProjects_exclude = config.get(ini_section, 'projects_exclude') - if (configProjects == 'all'): - for projectInfo in self.get_projects(): - if projectInfo.name not in configProjects_exclude: - self.projects.append(projectInfo.name) - else: - self.projects = configProjects.split(",") - - def parse_cli_args(self): - ''' Command line argument processing ''' - - parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Packet') - parser.add_argument('--list', action='store_true', default=True, - help='List Devices (default: True)') - parser.add_argument('--host', action='store', - help='Get all the variables about a specific device') - parser.add_argument('--refresh-cache', action='store_true', default=False, - help='Force refresh of cache by making API requests to Packet (default: False - use cache files)') - self.args = parser.parse_args() - - def do_api_calls_update_cache(self): - ''' Do API calls to each region, and save data in cache files ''' - - for projectInfo in self.get_projects(): - if projectInfo.name in self.projects: - self.get_devices_by_project(projectInfo) - - self.write_to_cache(self.inventory, self.cache_path_cache) - self.write_to_cache(self.index, self.cache_path_index) - - def connect(self): - ''' create connection to api server''' - token = os.environ.get('PACKET_API_TOKEN') - if token is None: - raise Exception("Error reading token from environment (PACKET_API_TOKEN)!") - manager = packet.Manager(auth_token=token) - return manager - - def get_projects(self): - '''Makes a Packet API call to get the list of projects''' - - params = { - 'per_page': self.items_per_page - } - - try: - manager = self.connect() - projects = manager.list_projects(params=params) - return projects - except Exception as e: - traceback.print_exc() - self.fail_with_error(e, 'getting Packet projects') - - def get_devices_by_project(self, project): - ''' Makes an Packet API call to the list of devices in a particular - project ''' - - params = { - 'per_page': self.items_per_page - } - - try: - manager = self.connect() - devices = manager.list_devices(project_id=project.id, params=params) - - for device in devices: - self.add_device(device, project) - - except Exception as e: - traceback.print_exc() - self.fail_with_error(e, 'getting Packet devices') - - def fail_with_error(self, err_msg, err_operation=None): - '''log an error to std err for ansible-playbook to consume and exit''' - if err_operation: - err_msg = 'ERROR: "{err_msg}", while: {err_operation}\n'.format( - err_msg=err_msg, err_operation=err_operation) - sys.stderr.write(err_msg) - sys.exit(1) - - def get_device(self, device_id): - manager = self.connect() - - device = manager.get_device(device_id) - return device - - def add_device(self, device, project): - ''' Adds a device to the inventory and index, as long as it is - addressable ''' - - # Only return devices with desired device states - if device.state not in self.packet_device_states: - return - - # Select the best destination address. Only include management - # addresses as non-management (elastic) addresses need manual - # host configuration to be routable. - # See https://help.packet.net/article/54-elastic-ips. - dest = None - for ip_address in device.ip_addresses: - if ip_address['public'] is True and \ - ip_address['address_family'] == 4 and \ - ip_address['management'] is True: - dest = ip_address['address'] - - if not dest: - # Skip devices we cannot address (e.g. private VPC subnet) - return - - # if we only want to include hosts that match a pattern, skip those that don't - if self.pattern_include and not self.pattern_include.match(device.hostname): - return - - # if we need to exclude hosts that match a pattern, skip those - if self.pattern_exclude and self.pattern_exclude.match(device.hostname): - return - - # Add to index - self.index[dest] = [project.id, device.id] - - # Inventory: Group by device ID (always a group of 1) - if self.group_by_device_id: - self.inventory[device.id] = [dest] - if self.nested_groups: - self.push_group(self.inventory, 'devices', device.id) - - # Inventory: Group by device name (hopefully a group of 1) - if self.group_by_hostname: - self.push(self.inventory, device.hostname, dest) - if self.nested_groups: - self.push_group(self.inventory, 'hostnames', project.name) - - # Inventory: Group by project - if self.group_by_project: - self.push(self.inventory, project.name, dest) - if self.nested_groups: - self.push_group(self.inventory, 'projects', project.name) - - # Inventory: Group by facility - if self.group_by_facility: - self.push(self.inventory, device.facility['code'], dest) - if self.nested_groups: - if self.group_by_facility: - self.push_group(self.inventory, project.name, device.facility['code']) - - # Inventory: Group by OS - if self.group_by_operating_system: - self.push(self.inventory, device.operating_system.slug, dest) - if self.nested_groups: - self.push_group(self.inventory, 'operating_systems', device.operating_system.slug) - - # Inventory: Group by plan type - if self.group_by_plan_type: - self.push(self.inventory, device.plan['slug'], dest) - if self.nested_groups: - self.push_group(self.inventory, 'plans', device.plan['slug']) - - # Inventory: Group by tag keys - if self.group_by_tags: - for k in device.tags: - key = self.to_safe("tag_" + k) - self.push(self.inventory, key, dest) - if self.nested_groups: - self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) - - # Global Tag: devices without tags - if self.group_by_tag_none and len(device.tags) == 0: - self.push(self.inventory, 'tag_none', dest) - if self.nested_groups: - self.push_group(self.inventory, 'tags', 'tag_none') - - # Global Tag: tag all Packet devices - self.push(self.inventory, 'packet', dest) - - self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_device(device) - - def get_host_info_dict_from_device(self, device): - device_vars = {} - for key in vars(device): - value = getattr(device, key) - key = self.to_safe('packet_' + key) - - # Handle complex types - if key == 'packet_state': - device_vars[key] = device.state or '' - elif key == 'packet_hostname': - device_vars[key] = value - elif isinstance(value, (int, bool)): - device_vars[key] = value - elif isinstance(value, six.string_types): - device_vars[key] = value.strip() - elif value is None: - device_vars[key] = '' - elif key == 'packet_facility': - device_vars[key] = value['code'] - elif key == 'packet_operating_system': - device_vars[key] = value.slug - elif key == 'packet_plan': - device_vars[key] = value['slug'] - elif key == 'packet_tags': - for k in value: - key = self.to_safe('packet_tag_' + k) - device_vars[key] = k - else: - pass - # print key - # print type(value) - # print value - - return device_vars - - def get_host_info(self): - ''' Get variables about a specific host ''' - - if len(self.index) == 0: - # Need to load index from cache - self.load_index_from_cache() - - if self.args.host not in self.index: - # try updating the cache - self.do_api_calls_update_cache() - if self.args.host not in self.index: - # host might not exist anymore - return self.json_format_dict({}, True) - - (project_id, device_id) = self.index[self.args.host] - - device = self.get_device(device_id) - return self.json_format_dict(self.get_host_info_dict_from_device(device), True) - - def push(self, my_dict, key, element): - ''' Push an element onto an array that may not have been defined in - the dict ''' - group_info = my_dict.setdefault(key, []) - if isinstance(group_info, dict): - host_list = group_info.setdefault('hosts', []) - host_list.append(element) - else: - group_info.append(element) - - def push_group(self, my_dict, key, element): - ''' Push a group as a child of another group. ''' - parent_group = my_dict.setdefault(key, {}) - if not isinstance(parent_group, dict): - parent_group = my_dict[key] = {'hosts': parent_group} - child_groups = parent_group.setdefault('children', []) - if element not in child_groups: - child_groups.append(element) - - def get_inventory_from_cache(self): - ''' Reads the inventory from the cache file and returns it as a JSON - object ''' - - cache = open(self.cache_path_cache, 'r') - json_inventory = cache.read() - return json_inventory - - def load_index_from_cache(self): - ''' Reads the index from the cache file sets self.index ''' - - cache = open(self.cache_path_index, 'r') - json_index = cache.read() - self.index = json.loads(json_index) - - def write_to_cache(self, data, filename): - ''' Writes data in JSON format to a file ''' - - json_data = self.json_format_dict(data, True) - cache = open(filename, 'w') - cache.write(json_data) - cache.close() - - def uncammelize(self, key): - temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key) - return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower() - - def to_safe(self, word): - ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' - regex = r"[^A-Za-z0-9\_" - if not self.replace_dash_in_groups: - regex += r"\-" - return re.sub(regex + "]", "_", word) - - def json_format_dict(self, data, pretty=False): - ''' Converts a dict to a JSON object and dumps it as a formatted - string ''' - - if pretty: - return json.dumps(data, sort_keys=True, indent=2) - else: - return json.dumps(data) - - -# Run the script -PacketInventory() diff --git a/scripts/inventory/proxmox.py b/scripts/inventory/proxmox.py deleted file mode 100755 index 2196934115..0000000000 --- a/scripts/inventory/proxmox.py +++ /dev/null @@ -1,240 +0,0 @@ -#!/usr/bin/env python - -# Copyright (C) 2014 Mathieu GAUTHIER-LAFAYE -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -# Updated 2016 by Matt Harris -# -# Added support for Proxmox VE 4.x -# Added support for using the Notes field of a VM to define groups and variables: -# A well-formatted JSON object in the Notes field will be added to the _meta -# section for that VM. In addition, the "groups" key of this JSON object may be -# used to specify group membership: -# -# { "groups": ["utility", "databases"], "a": false, "b": true } - -import json -import os -import sys -from optparse import OptionParser - -from ansible.module_utils.six import iteritems -from ansible.module_utils.six.moves.urllib.parse import urlencode - -from ansible.module_utils.urls import open_url - - -class ProxmoxNodeList(list): - def get_names(self): - return [node['node'] for node in self] - - -class ProxmoxVM(dict): - def get_variables(self): - variables = {} - for key, value in iteritems(self): - variables['proxmox_' + key] = value - return variables - - -class ProxmoxVMList(list): - def __init__(self, data=None): - data = [] if data is None else data - - for item in data: - self.append(ProxmoxVM(item)) - - def get_names(self): - return [vm['name'] for vm in self if vm['template'] != 1] - - def get_by_name(self, name): - results = [vm for vm in self if vm['name'] == name] - return results[0] if len(results) > 0 else None - - def get_variables(self): - variables = {} - for vm in self: - variables[vm['name']] = vm.get_variables() - - return variables - - -class ProxmoxPoolList(list): - def get_names(self): - return [pool['poolid'] for pool in self] - - -class ProxmoxPool(dict): - def get_members_name(self): - return [member['name'] for member in self['members'] if member['template'] != 1] - - -class ProxmoxAPI(object): - def __init__(self, options): - self.options = options - self.credentials = None - - if not options.url: - raise Exception('Missing mandatory parameter --url (or PROXMOX_URL).') - elif not options.username: - raise Exception('Missing mandatory parameter --username (or PROXMOX_USERNAME).') - elif not options.password: - raise Exception('Missing mandatory parameter --password (or PROXMOX_PASSWORD).') - - def auth(self): - request_path = '{0}api2/json/access/ticket'.format(self.options.url) - - request_params = urlencode({ - 'username': self.options.username, - 'password': self.options.password, - }) - - data = json.load(open_url(request_path, data=request_params)) - - self.credentials = { - 'ticket': data['data']['ticket'], - 'CSRFPreventionToken': data['data']['CSRFPreventionToken'], - } - - def get(self, url, data=None): - request_path = '{0}{1}'.format(self.options.url, url) - - headers = {'Cookie': 'PVEAuthCookie={0}'.format(self.credentials['ticket'])} - request = open_url(request_path, data=data, headers=headers) - - response = json.load(request) - return response['data'] - - def nodes(self): - return ProxmoxNodeList(self.get('api2/json/nodes')) - - def vms_by_type(self, node, type): - return ProxmoxVMList(self.get('api2/json/nodes/{0}/{1}'.format(node, type))) - - def vm_description_by_type(self, node, vm, type): - return self.get('api2/json/nodes/{0}/{1}/{2}/config'.format(node, type, vm)) - - def node_qemu(self, node): - return self.vms_by_type(node, 'qemu') - - def node_qemu_description(self, node, vm): - return self.vm_description_by_type(node, vm, 'qemu') - - def node_lxc(self, node): - return self.vms_by_type(node, 'lxc') - - def node_lxc_description(self, node, vm): - return self.vm_description_by_type(node, vm, 'lxc') - - def pools(self): - return ProxmoxPoolList(self.get('api2/json/pools')) - - def pool(self, poolid): - return ProxmoxPool(self.get('api2/json/pools/{0}'.format(poolid))) - - -def main_list(options): - results = { - 'all': { - 'hosts': [], - }, - '_meta': { - 'hostvars': {}, - } - } - - proxmox_api = ProxmoxAPI(options) - proxmox_api.auth() - - for node in proxmox_api.nodes().get_names(): - qemu_list = proxmox_api.node_qemu(node) - results['all']['hosts'] += qemu_list.get_names() - results['_meta']['hostvars'].update(qemu_list.get_variables()) - lxc_list = proxmox_api.node_lxc(node) - results['all']['hosts'] += lxc_list.get_names() - results['_meta']['hostvars'].update(lxc_list.get_variables()) - - for vm in results['_meta']['hostvars']: - vmid = results['_meta']['hostvars'][vm]['proxmox_vmid'] - try: - type = results['_meta']['hostvars'][vm]['proxmox_type'] - except KeyError: - type = 'qemu' - try: - description = proxmox_api.vm_description_by_type(node, vmid, type)['description'] - except KeyError: - description = None - - try: - metadata = json.loads(description) - except TypeError: - metadata = {} - except ValueError: - metadata = { - 'notes': description - } - - if 'groups' in metadata: - # print metadata - for group in metadata['groups']: - if group not in results: - results[group] = { - 'hosts': [] - } - results[group]['hosts'] += [vm] - - results['_meta']['hostvars'][vm].update(metadata) - - # pools - for pool in proxmox_api.pools().get_names(): - results[pool] = { - 'hosts': proxmox_api.pool(pool).get_members_name(), - } - - return results - - -def main_host(options): - proxmox_api = ProxmoxAPI(options) - proxmox_api.auth() - - for node in proxmox_api.nodes().get_names(): - qemu_list = proxmox_api.node_qemu(node) - qemu = qemu_list.get_by_name(options.host) - if qemu: - return qemu.get_variables() - - return {} - - -def main(): - parser = OptionParser(usage='%prog [options] --list | --host HOSTNAME') - parser.add_option('--list', action="store_true", default=False, dest="list") - parser.add_option('--host', dest="host") - parser.add_option('--url', default=os.environ.get('PROXMOX_URL'), dest='url') - parser.add_option('--username', default=os.environ.get('PROXMOX_USERNAME'), dest='username') - parser.add_option('--password', default=os.environ.get('PROXMOX_PASSWORD'), dest='password') - parser.add_option('--pretty', action="store_true", default=False, dest='pretty') - (options, args) = parser.parse_args() - - if options.list: - data = main_list(options) - elif options.host: - data = main_host(options) - else: - parser.print_help() - sys.exit(1) - - indent = None - if options.pretty: - indent = 2 - - print(json.dumps(data, indent=indent)) - - -if __name__ == '__main__': - main() diff --git a/scripts/inventory/rackhd.py b/scripts/inventory/rackhd.py deleted file mode 100755 index 9b4372f679..0000000000 --- a/scripts/inventory/rackhd.py +++ /dev/null @@ -1,86 +0,0 @@ -#!/usr/bin/env python -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import json -import os -import requests -import argparse - -RACKHD_URL = 'http://localhost:8080' - - -class RackhdInventory(object): - def __init__(self, nodeids): - self._inventory = {} - for nodeid in nodeids: - self._load_inventory_data(nodeid) - inventory = {} - for (nodeid, info) in self._inventory.items(): - inventory[nodeid] = (self._format_output(nodeid, info)) - print(json.dumps(inventory)) - - def _load_inventory_data(self, nodeid): - info = {} - info['ohai'] = RACKHD_URL + '/api/common/nodes/{0}/catalogs/ohai'.format(nodeid) - info['lookup'] = RACKHD_URL + '/api/common/lookups/?q={0}'.format(nodeid) - - results = {} - for (key, url) in info.items(): - r = requests.get(url, verify=False) - results[key] = r.text - self._inventory[nodeid] = results - - def _format_output(self, nodeid, info): - try: - node_info = json.loads(info['lookup']) - ipaddress = '' - if len(node_info) > 0: - ipaddress = node_info[0]['ipAddress'] - output = {'hosts': [ipaddress], 'vars': {}} - for (key, result) in info.items(): - output['vars'][key] = json.loads(result) - output['vars']['ansible_ssh_user'] = 'monorail' - except KeyError: - pass - return output - - -def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument('--host') - parser.add_argument('--list', action='store_true') - return parser.parse_args() - - -try: - # check if rackhd url(ie:10.1.1.45:8080) is specified in the environment - RACKHD_URL = 'http://' + str(os.environ['RACKHD_URL']) -except Exception: - # use default values - pass - -# Use the nodeid specified in the environment to limit the data returned -# or return data for all available nodes -nodeids = [] - -if (parse_args().host): - try: - nodeids += parse_args().host.split(',') - RackhdInventory(nodeids) - except Exception: - pass -if (parse_args().list): - try: - url = RACKHD_URL + '/api/common/nodes' - r = requests.get(url, verify=False) - data = json.loads(r.text) - for entry in data: - if entry['type'] == 'compute': - nodeids.append(entry['id']) - RackhdInventory(nodeids) - except Exception: - pass diff --git a/scripts/inventory/rax.ini b/scripts/inventory/rax.ini deleted file mode 100644 index 15948e7b2e..0000000000 --- a/scripts/inventory/rax.ini +++ /dev/null @@ -1,66 +0,0 @@ -# Ansible Rackspace external inventory script settings -# - -[rax] - -# Environment Variable: RAX_CREDS_FILE -# -# An optional configuration that points to a pyrax-compatible credentials -# file. -# -# If not supplied, rax.py will look for a credentials file -# at ~/.rackspace_cloud_credentials. It uses the Rackspace Python SDK, -# and therefore requires a file formatted per the SDK's specifications. -# -# https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md -# creds_file = ~/.rackspace_cloud_credentials - -# Environment Variable: RAX_REGION -# -# An optional environment variable to narrow inventory search -# scope. If used, needs a value like ORD, DFW, SYD (a Rackspace -# datacenter) and optionally accepts a comma-separated list. -# regions = IAD,ORD,DFW - -# Environment Variable: RAX_ENV -# -# A configuration that will use an environment as configured in -# ~/.pyrax.cfg, see -# https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md -# env = prod - -# Environment Variable: RAX_META_PREFIX -# Default: meta -# -# A configuration that changes the prefix used for meta key/value groups. -# For compatibility with ec2.py set to "tag" -# meta_prefix = meta - -# Environment Variable: RAX_ACCESS_NETWORK -# Default: public -# -# A configuration that will tell the inventory script to use a specific -# server network to determine the ansible_ssh_host value. If no address -# is found, ansible_ssh_host will not be set. Accepts a comma-separated -# list of network names, the first found wins. -# access_network = public - -# Environment Variable: RAX_ACCESS_IP_VERSION -# Default: 4 -# -# A configuration related to "access_network" that will attempt to -# determine the ansible_ssh_host value for either IPv4 or IPv6. If no -# address is found, ansible_ssh_host will not be set. -# Acceptable values are: 4 or 6. Values other than 4 or 6 -# will be ignored, and 4 will be used. Accepts a comma separated list, -# the first found wins. -# access_ip_version = 4 - -# Environment Variable: RAX_CACHE_MAX_AGE -# Default: 600 -# -# A configuration the changes the behavior or the inventory cache. -# Inventory listing performed before this value will be returned from -# the cache instead of making a full request for all inventory. Setting -# this value to 0 will force a full request. -# cache_max_age = 600 \ No newline at end of file diff --git a/scripts/inventory/rax.py b/scripts/inventory/rax.py deleted file mode 100755 index 0cac0f002c..0000000000 --- a/scripts/inventory/rax.py +++ /dev/null @@ -1,460 +0,0 @@ -#!/usr/bin/env python - -# (c) 2013, Jesse Keating , -# Matt Martz -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -""" -Rackspace Cloud Inventory - -Authors: - Jesse Keating , - Matt Martz - - -Description: - Generates inventory that Ansible can understand by making API request to - Rackspace Public Cloud API - - When run against a specific host, this script returns variables similar to: - rax_os-ext-sts_task_state - rax_addresses - rax_links - rax_image - rax_os-ext-sts_vm_state - rax_flavor - rax_id - rax_rax-bandwidth_bandwidth - rax_user_id - rax_os-dcf_diskconfig - rax_accessipv4 - rax_accessipv6 - rax_progress - rax_os-ext-sts_power_state - rax_metadata - rax_status - rax_updated - rax_hostid - rax_name - rax_created - rax_tenant_id - rax_loaded - -Configuration: - rax.py can be configured using a rax.ini file or via environment - variables. The rax.ini file should live in the same directory along side - this script. - - The section header for configuration values related to this - inventory plugin is [rax] - - [rax] - creds_file = ~/.rackspace_cloud_credentials - regions = IAD,ORD,DFW - env = prod - meta_prefix = meta - access_network = public - access_ip_version = 4 - - Each of these configurations also has a corresponding environment variable. - An environment variable will override a configuration file value. - - creds_file: - Environment Variable: RAX_CREDS_FILE - - An optional configuration that points to a pyrax-compatible credentials - file. - - If not supplied, rax.py will look for a credentials file - at ~/.rackspace_cloud_credentials. It uses the Rackspace Python SDK, - and therefore requires a file formatted per the SDK's specifications. - - https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md - - regions: - Environment Variable: RAX_REGION - - An optional environment variable to narrow inventory search - scope. If used, needs a value like ORD, DFW, SYD (a Rackspace - datacenter) and optionally accepts a comma-separated list. - - environment: - Environment Variable: RAX_ENV - - A configuration that will use an environment as configured in - ~/.pyrax.cfg, see - https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md - - meta_prefix: - Environment Variable: RAX_META_PREFIX - Default: meta - - A configuration that changes the prefix used for meta key/value groups. - For compatibility with ec2.py set to "tag" - - access_network: - Environment Variable: RAX_ACCESS_NETWORK - Default: public - - A configuration that will tell the inventory script to use a specific - server network to determine the ansible_ssh_host value. If no address - is found, ansible_ssh_host will not be set. Accepts a comma-separated - list of network names, the first found wins. - - access_ip_version: - Environment Variable: RAX_ACCESS_IP_VERSION - Default: 4 - - A configuration related to "access_network" that will attempt to - determine the ansible_ssh_host value for either IPv4 or IPv6. If no - address is found, ansible_ssh_host will not be set. - Acceptable values are: 4 or 6. Values other than 4 or 6 - will be ignored, and 4 will be used. Accepts a comma-separated list, - the first found wins. - -Examples: - List server instances - $ RAX_CREDS_FILE=~/.raxpub rax.py --list - - List servers in ORD datacenter only - $ RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD rax.py --list - - List servers in ORD and DFW datacenters - $ RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD,DFW rax.py --list - - Get server details for server named "server.example.com" - $ RAX_CREDS_FILE=~/.raxpub rax.py --host server.example.com - - Use the instance private IP to connect (instead of public IP) - $ RAX_CREDS_FILE=~/.raxpub RAX_ACCESS_NETWORK=private rax.py --list -""" - -import os -import re -import sys -import argparse -import warnings -import collections - -from ansible.module_utils.six import iteritems -from ansible.module_utils.six.moves import configparser as ConfigParser - -import json - -try: - import pyrax - from pyrax.utils import slugify -except ImportError: - sys.exit('pyrax is required for this module') - -from time import time - -from ansible.constants import get_config -from ansible.module_utils.parsing.convert_bool import boolean -from ansible.module_utils.six import text_type - -NON_CALLABLES = (text_type, str, bool, dict, int, list, type(None)) - - -def load_config_file(): - p = ConfigParser.ConfigParser() - config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), - 'rax.ini') - try: - p.read(config_file) - except ConfigParser.Error: - return None - else: - return p - - -def rax_slugify(value): - return 'rax_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_')) - - -def to_dict(obj): - instance = {} - for key in dir(obj): - value = getattr(obj, key) - if isinstance(value, NON_CALLABLES) and not key.startswith('_'): - key = rax_slugify(key) - instance[key] = value - - return instance - - -def host(regions, hostname): - hostvars = {} - - for region in regions: - # Connect to the region - cs = pyrax.connect_to_cloudservers(region=region) - for server in cs.servers.list(): - if server.name == hostname: - for key, value in to_dict(server).items(): - hostvars[key] = value - - # And finally, add an IP address - hostvars['ansible_ssh_host'] = server.accessIPv4 - print(json.dumps(hostvars, sort_keys=True, indent=4)) - - -def _list_into_cache(regions): - groups = collections.defaultdict(list) - hostvars = collections.defaultdict(dict) - images = {} - cbs_attachments = collections.defaultdict(dict) - - prefix = get_config(p, 'rax', 'meta_prefix', 'RAX_META_PREFIX', 'meta') - - try: - # Ansible 2.3+ - networks = get_config(p, 'rax', 'access_network', - 'RAX_ACCESS_NETWORK', 'public', value_type='list') - except TypeError: - # Ansible 2.2.x and below - # pylint: disable=unexpected-keyword-arg - networks = get_config(p, 'rax', 'access_network', - 'RAX_ACCESS_NETWORK', 'public', islist=True) - try: - try: - # Ansible 2.3+ - ip_versions = map(int, get_config(p, 'rax', 'access_ip_version', - 'RAX_ACCESS_IP_VERSION', 4, value_type='list')) - except TypeError: - # Ansible 2.2.x and below - # pylint: disable=unexpected-keyword-arg - ip_versions = map(int, get_config(p, 'rax', 'access_ip_version', - 'RAX_ACCESS_IP_VERSION', 4, islist=True)) - except Exception: - ip_versions = [4] - else: - ip_versions = [v for v in ip_versions if v in [4, 6]] - if not ip_versions: - ip_versions = [4] - - # Go through all the regions looking for servers - for region in regions: - # Connect to the region - cs = pyrax.connect_to_cloudservers(region=region) - if cs is None: - warnings.warn( - 'Connecting to Rackspace region "%s" has caused Pyrax to ' - 'return None. Is this a valid region?' % region, - RuntimeWarning) - continue - for server in cs.servers.list(): - # Create a group on region - groups[region].append(server.name) - - # Check if group metadata key in servers' metadata - group = server.metadata.get('group') - if group: - groups[group].append(server.name) - - for extra_group in server.metadata.get('groups', '').split(','): - if extra_group: - groups[extra_group].append(server.name) - - # Add host metadata - for key, value in to_dict(server).items(): - hostvars[server.name][key] = value - - hostvars[server.name]['rax_region'] = region - - for key, value in iteritems(server.metadata): - groups['%s_%s_%s' % (prefix, key, value)].append(server.name) - - groups['instance-%s' % server.id].append(server.name) - groups['flavor-%s' % server.flavor['id']].append(server.name) - - # Handle boot from volume - if not server.image: - if not cbs_attachments[region]: - cbs = pyrax.connect_to_cloud_blockstorage(region) - for vol in cbs.list(): - if boolean(vol.bootable, strict=False): - for attachment in vol.attachments: - metadata = vol.volume_image_metadata - server_id = attachment['server_id'] - cbs_attachments[region][server_id] = { - 'id': metadata['image_id'], - 'name': slugify(metadata['image_name']) - } - image = cbs_attachments[region].get(server.id) - if image: - server.image = {'id': image['id']} - hostvars[server.name]['rax_image'] = server.image - hostvars[server.name]['rax_boot_source'] = 'volume' - images[image['id']] = image['name'] - else: - hostvars[server.name]['rax_boot_source'] = 'local' - - try: - imagegroup = 'image-%s' % images[server.image['id']] - groups[imagegroup].append(server.name) - groups['image-%s' % server.image['id']].append(server.name) - except KeyError: - try: - image = cs.images.get(server.image['id']) - except cs.exceptions.NotFound: - groups['image-%s' % server.image['id']].append(server.name) - else: - images[image.id] = image.human_id - groups['image-%s' % image.human_id].append(server.name) - groups['image-%s' % server.image['id']].append(server.name) - - # And finally, add an IP address - ansible_ssh_host = None - # use accessIPv[46] instead of looping address for 'public' - for network_name in networks: - if ansible_ssh_host: - break - if network_name == 'public': - for version_name in ip_versions: - if ansible_ssh_host: - break - if version_name == 6 and server.accessIPv6: - ansible_ssh_host = server.accessIPv6 - elif server.accessIPv4: - ansible_ssh_host = server.accessIPv4 - if not ansible_ssh_host: - addresses = server.addresses.get(network_name, []) - for address in addresses: - for version_name in ip_versions: - if ansible_ssh_host: - break - if address.get('version') == version_name: - ansible_ssh_host = address.get('addr') - break - if ansible_ssh_host: - hostvars[server.name]['ansible_ssh_host'] = ansible_ssh_host - - if hostvars: - groups['_meta'] = {'hostvars': hostvars} - - with open(get_cache_file_path(regions), 'w') as cache_file: - json.dump(groups, cache_file) - - -def get_cache_file_path(regions): - regions_str = '.'.join([reg.strip().lower() for reg in regions]) - ansible_tmp_path = os.path.join(os.path.expanduser("~"), '.ansible', 'tmp') - if not os.path.exists(ansible_tmp_path): - os.makedirs(ansible_tmp_path) - return os.path.join(ansible_tmp_path, - 'ansible-rax-%s-%s.cache' % ( - pyrax.identity.username, regions_str)) - - -def _list(regions, refresh_cache=True): - cache_max_age = int(get_config(p, 'rax', 'cache_max_age', - 'RAX_CACHE_MAX_AGE', 600)) - - if (not os.path.exists(get_cache_file_path(regions)) or - refresh_cache or - (time() - os.stat(get_cache_file_path(regions))[-1]) > cache_max_age): - # Cache file doesn't exist or older than 10m or refresh cache requested - _list_into_cache(regions) - - with open(get_cache_file_path(regions), 'r') as cache_file: - groups = json.load(cache_file) - print(json.dumps(groups, sort_keys=True, indent=4)) - - -def parse_args(): - parser = argparse.ArgumentParser(description='Ansible Rackspace Cloud ' - 'inventory module') - group = parser.add_mutually_exclusive_group(required=True) - group.add_argument('--list', action='store_true', - help='List active servers') - group.add_argument('--host', help='List details about the specific host') - parser.add_argument('--refresh-cache', action='store_true', default=False, - help=('Force refresh of cache, making API requests to' - 'RackSpace (default: False - use cache files)')) - return parser.parse_args() - - -def setup(): - default_creds_file = os.path.expanduser('~/.rackspace_cloud_credentials') - - env = get_config(p, 'rax', 'environment', 'RAX_ENV', None) - if env: - pyrax.set_environment(env) - - keyring_username = pyrax.get_setting('keyring_username') - - # Attempt to grab credentials from environment first - creds_file = get_config(p, 'rax', 'creds_file', - 'RAX_CREDS_FILE', None) - if creds_file is not None: - creds_file = os.path.expanduser(creds_file) - else: - # But if that fails, use the default location of - # ~/.rackspace_cloud_credentials - if os.path.isfile(default_creds_file): - creds_file = default_creds_file - elif not keyring_username: - sys.exit('No value in environment variable %s and/or no ' - 'credentials file at %s' - % ('RAX_CREDS_FILE', default_creds_file)) - - identity_type = pyrax.get_setting('identity_type') - pyrax.set_setting('identity_type', identity_type or 'rackspace') - - region = pyrax.get_setting('region') - - try: - if keyring_username: - pyrax.keyring_auth(keyring_username, region=region) - else: - pyrax.set_credential_file(creds_file, region=region) - except Exception as e: - sys.exit("%s: %s" % (e, e.message)) - - regions = [] - if region: - regions.append(region) - else: - try: - # Ansible 2.3+ - region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all', - value_type='list') - except TypeError: - # Ansible 2.2.x and below - # pylint: disable=unexpected-keyword-arg - region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all', - islist=True) - - for region in region_list: - region = region.strip().upper() - if region == 'ALL': - regions = pyrax.regions - break - elif region not in pyrax.regions: - sys.exit('Unsupported region %s' % region) - elif region not in regions: - regions.append(region) - - return regions - - -def main(): - args = parse_args() - regions = setup() - if args.list: - _list(regions, refresh_cache=args.refresh_cache) - elif args.host: - host(regions, args.host) - sys.exit(0) - - -p = load_config_file() -if __name__ == '__main__': - main() diff --git a/scripts/inventory/rhv.py b/scripts/inventory/rhv.py deleted file mode 120000 index e66635dd42..0000000000 --- a/scripts/inventory/rhv.py +++ /dev/null @@ -1 +0,0 @@ -ovirt4.py \ No newline at end of file diff --git a/scripts/inventory/rudder.ini b/scripts/inventory/rudder.ini deleted file mode 100644 index 748b3d2121..0000000000 --- a/scripts/inventory/rudder.ini +++ /dev/null @@ -1,35 +0,0 @@ -# Rudder external inventory script settings -# - -[rudder] - -# Your Rudder server API URL, typically: -# https://rudder.local/rudder/api -uri = https://rudder.local/rudder/api - -# By default, Rudder uses a self-signed certificate. Set this to True -# to disable certificate validation. -disable_ssl_certificate_validation = True - -# Your Rudder API token, created in the Web interface. -token = aaabbbccc - -# Rudder API version to use, use "latest" for latest available -# version. -version = latest - -# Property to use as group name in the output. -# Can generally be "id" or "displayName". -group_name = displayName - -# Fail if there are two groups with the same name or two hosts with the -# same hostname in the output. -fail_if_name_collision = True - -# We cache the results of Rudder API in a local file -cache_path = /tmp/ansible-rudder.cache - -# The number of seconds a cache file is considered valid. After this many -# seconds, a new API call will be made, and the cache file will be updated. -# Set to 0 to disable cache. -cache_max_age = 500 diff --git a/scripts/inventory/rudder.py b/scripts/inventory/rudder.py deleted file mode 100755 index 9a65aca99a..0000000000 --- a/scripts/inventory/rudder.py +++ /dev/null @@ -1,286 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2015, Normation SAS -# -# Inspired by the EC2 inventory plugin: -# https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.py -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -###################################################################### - -''' -Rudder external inventory script -================================= - -Generates inventory that Ansible can understand by making API request to -a Rudder server. This script is compatible with Rudder 2.10 or later. - -The output JSON includes all your Rudder groups, containing the hostnames of -their nodes. Groups and nodes have a variable called rudder_group_id and -rudder_node_id, which is the Rudder internal id of the item, allowing to identify -them uniquely. Hosts variables also include your node properties, which are -key => value properties set by the API and specific to each node. - -This script assumes there is an rudder.ini file alongside it. To specify a -different path to rudder.ini, define the RUDDER_INI_PATH environment variable: - - export RUDDER_INI_PATH=/path/to/my_rudder.ini - -You have to configure your Rudder server information, either in rudder.ini or -by overriding it with environment variables: - - export RUDDER_API_VERSION='latest' - export RUDDER_API_TOKEN='my_token' - export RUDDER_API_URI='https://rudder.local/rudder/api' -''' - - -import sys -import os -import re -import argparse -import httplib2 as http -from time import time -from ansible.module_utils import six -from ansible.module_utils.six.moves import configparser -from ansible.module_utils.six.moves.urllib.parse import urlparse - -import json - - -class RudderInventory(object): - def __init__(self): - ''' Main execution path ''' - - # Empty inventory by default - self.inventory = {} - - # Read settings and parse CLI arguments - self.read_settings() - self.parse_cli_args() - - # Create connection - self.conn = http.Http(disable_ssl_certificate_validation=self.disable_ssl_validation) - - # Cache - if self.args.refresh_cache: - self.update_cache() - elif not self.is_cache_valid(): - self.update_cache() - else: - self.load_cache() - - data_to_print = {} - - if self.args.host: - data_to_print = self.get_host_info(self.args.host) - elif self.args.list: - data_to_print = self.get_list_info() - - print(self.json_format_dict(data_to_print, True)) - - def read_settings(self): - ''' Reads the settings from the rudder.ini file ''' - if six.PY2: - config = configparser.SafeConfigParser() - else: - config = configparser.ConfigParser() - rudder_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'rudder.ini') - rudder_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('RUDDER_INI_PATH', rudder_default_ini_path))) - config.read(rudder_ini_path) - - self.token = os.environ.get('RUDDER_API_TOKEN', config.get('rudder', 'token')) - self.version = os.environ.get('RUDDER_API_VERSION', config.get('rudder', 'version')) - self.uri = os.environ.get('RUDDER_API_URI', config.get('rudder', 'uri')) - - self.disable_ssl_validation = config.getboolean('rudder', 'disable_ssl_certificate_validation') - self.group_name = config.get('rudder', 'group_name') - self.fail_if_name_collision = config.getboolean('rudder', 'fail_if_name_collision') - - self.cache_path = config.get('rudder', 'cache_path') - self.cache_max_age = config.getint('rudder', 'cache_max_age') - - def parse_cli_args(self): - ''' Command line argument processing ''' - - parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Rudder inventory') - parser.add_argument('--list', action='store_true', default=True, - help='List instances (default: True)') - parser.add_argument('--host', action='store', - help='Get all the variables about a specific instance') - parser.add_argument('--refresh-cache', action='store_true', default=False, - help='Force refresh of cache by making API requests to Rudder (default: False - use cache files)') - self.args = parser.parse_args() - - def is_cache_valid(self): - ''' Determines if the cache files have expired, or if it is still valid ''' - - if os.path.isfile(self.cache_path): - mod_time = os.path.getmtime(self.cache_path) - current_time = time() - if (mod_time + self.cache_max_age) > current_time: - return True - - return False - - def load_cache(self): - ''' Reads the cache from the cache file sets self.cache ''' - - cache = open(self.cache_path, 'r') - json_cache = cache.read() - - try: - self.inventory = json.loads(json_cache) - except ValueError as e: - self.fail_with_error('Could not parse JSON response from local cache', 'parsing local cache') - - def write_cache(self): - ''' Writes data in JSON format to a file ''' - - json_data = self.json_format_dict(self.inventory, True) - cache = open(self.cache_path, 'w') - cache.write(json_data) - cache.close() - - def get_nodes(self): - ''' Gets the nodes list from Rudder ''' - - path = '/nodes?select=nodeAndPolicyServer' - result = self.api_call(path) - - nodes = {} - - for node in result['data']['nodes']: - nodes[node['id']] = {} - nodes[node['id']]['hostname'] = node['hostname'] - if 'properties' in node: - nodes[node['id']]['properties'] = node['properties'] - else: - nodes[node['id']]['properties'] = [] - - return nodes - - def get_groups(self): - ''' Gets the groups list from Rudder ''' - - path = '/groups' - result = self.api_call(path) - - groups = {} - - for group in result['data']['groups']: - groups[group['id']] = {'hosts': group['nodeIds'], 'name': self.to_safe(group[self.group_name])} - - return groups - - def update_cache(self): - ''' Fetches the inventory information from Rudder and creates the inventory ''' - - nodes = self.get_nodes() - groups = self.get_groups() - - inventory = {} - - for group in groups: - # Check for name collision - if self.fail_if_name_collision: - if groups[group]['name'] in inventory: - self.fail_with_error('Name collision on groups: "%s" appears twice' % groups[group]['name'], 'creating groups') - # Add group to inventory - inventory[groups[group]['name']] = {} - inventory[groups[group]['name']]['hosts'] = [] - inventory[groups[group]['name']]['vars'] = {} - inventory[groups[group]['name']]['vars']['rudder_group_id'] = group - for node in groups[group]['hosts']: - # Add node to group - inventory[groups[group]['name']]['hosts'].append(nodes[node]['hostname']) - - properties = {} - - for node in nodes: - # Check for name collision - if self.fail_if_name_collision: - if nodes[node]['hostname'] in properties: - self.fail_with_error('Name collision on hosts: "%s" appears twice' % nodes[node]['hostname'], 'creating hosts') - # Add node properties to inventory - properties[nodes[node]['hostname']] = {} - properties[nodes[node]['hostname']]['rudder_node_id'] = node - for node_property in nodes[node]['properties']: - properties[nodes[node]['hostname']][self.to_safe(node_property['name'])] = node_property['value'] - - inventory['_meta'] = {} - inventory['_meta']['hostvars'] = properties - - self.inventory = inventory - - if self.cache_max_age > 0: - self.write_cache() - - def get_list_info(self): - ''' Gets inventory information from local cache ''' - - return self.inventory - - def get_host_info(self, hostname): - ''' Gets information about a specific host from local cache ''' - - if hostname in self.inventory['_meta']['hostvars']: - return self.inventory['_meta']['hostvars'][hostname] - else: - return {} - - def api_call(self, path): - ''' Performs an API request ''' - - headers = { - 'X-API-Token': self.token, - 'X-API-Version': self.version, - 'Content-Type': 'application/json;charset=utf-8' - } - - target = urlparse(self.uri + path) - method = 'GET' - body = '' - - try: - response, content = self.conn.request(target.geturl(), method, body, headers) - except Exception: - self.fail_with_error('Error connecting to Rudder server') - - try: - data = json.loads(content) - except ValueError as e: - self.fail_with_error('Could not parse JSON response from Rudder API', 'reading API response') - - return data - - def fail_with_error(self, err_msg, err_operation=None): - ''' Logs an error to std err for ansible-playbook to consume and exit ''' - if err_operation: - err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format( - err_msg=err_msg, err_operation=err_operation) - sys.stderr.write(err_msg) - sys.exit(1) - - def json_format_dict(self, data, pretty=False): - ''' Converts a dict to a JSON object and dumps it as a formatted - string ''' - - if pretty: - return json.dumps(data, sort_keys=True, indent=2) - else: - return json.dumps(data) - - def to_safe(self, word): - ''' Converts 'bad' characters in a string to underscores so they can be - used as Ansible variable names ''' - - return re.sub(r'[^A-Za-z0-9\_]', '_', word) - - -# Run the script -RudderInventory() diff --git a/scripts/inventory/scaleway.ini b/scripts/inventory/scaleway.ini deleted file mode 100644 index 99615a124c..0000000000 --- a/scripts/inventory/scaleway.ini +++ /dev/null @@ -1,37 +0,0 @@ -# Ansible dynamic inventory script for Scaleway cloud provider -# - -[compute] -# Fetch inventory for regions. If not defined will read the SCALEWAY_REGION environment variable -# -# regions = all -# regions = ams1 -# regions = par1, ams1 -regions = par1 - - -# Define a Scaleway token to perform required queries on the API -# in order to generate inventory output. -# -[auth] -# Token to authenticate with Scaleway's API. -# If not defined will read the SCALEWAY_TOKEN environment variable -# -api_token = mysecrettoken - - -# To avoid performing excessive calls to Scaleway API you can define a -# cache for the plugin output. Within the time defined in seconds, latest -# output will be reused. After that time, the cache will be refreshed. -# -[cache] -cache_max_age = 60 -cache_dir = '~/.ansible/tmp' - - -[defaults] -# You may want to use only public IP addresses or private IP addresses. -# You can set public_ip_only configuration to get public IPs only. -# If not defined defaults to retrieving private IP addresses. -# -public_ip_only = false diff --git a/scripts/inventory/scaleway.py b/scripts/inventory/scaleway.py deleted file mode 100755 index f68eb128a5..0000000000 --- a/scripts/inventory/scaleway.py +++ /dev/null @@ -1,220 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -''' -External inventory script for Scaleway -==================================== - -Shamelessly copied from an existing inventory script. - -This script generates an inventory that Ansible can understand by making API requests to Scaleway API - -Requires some python libraries, ensure to have them installed when using this script. (pip install requests https://pypi.org/project/requests/) - -Before using this script you may want to modify scaleway.ini config file. - -This script generates an Ansible hosts file with these host groups: - -: Defines host itself with Scaleway's hostname as group name. -: Contains all hosts which has "" as tag. -: Contains all hosts which are in the "" region. -all: Contains all hosts defined in Scaleway. -''' - -# (c) 2017, Paul B. -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import copy -import os -import requests -from ansible.module_utils import six -from ansible.module_utils.six.moves import configparser -import sys -import time -import traceback - -import json - -EMPTY_GROUP = { - 'children': [], - 'hosts': [] -} - - -class ScalewayAPI: - REGIONS = ['par1', 'ams1'] - - def __init__(self, auth_token, region): - self.session = requests.session() - self.session.headers.update({ - 'User-Agent': 'Ansible Python/%s' % (sys.version.split(' ')[0]) - }) - self.session.headers.update({ - 'X-Auth-Token': auth_token.encode('latin1') - }) - self.base_url = 'https://cp-%s.scaleway.com' % (region) - - def servers(self): - raw = self.session.get('/'.join([self.base_url, 'servers'])) - - try: - response = raw.json() - return self.get_resource('servers', response, raw) - except ValueError: - return [] - - def get_resource(self, resource, response, raw): - raw.raise_for_status() - - if resource in response: - return response[resource] - else: - raise ValueError( - "Resource %s not found in Scaleway API response" % (resource)) - - -def env_or_param(env_key, param=None, fallback=None): - env_value = os.environ.get(env_key) - - if (param, env_value) == (None, None): - return fallback - elif env_value is not None: - return env_value - else: - return param - - -def save_cache(data, config): - ''' saves item to cache ''' - dpath = config.get('cache', 'cache_dir') - try: - cache = open('/'.join([dpath, 'scaleway_ansible_inventory.json']), 'w') - cache.write(json.dumps(data)) - cache.close() - except IOError as e: - pass # not really sure what to do here - - -def get_cache(cache_item, config): - ''' returns cached item ''' - dpath = config.get('cache', 'cache_dir') - inv = {} - try: - cache = open('/'.join([dpath, 'scaleway_ansible_inventory.json']), 'r') - inv = cache.read() - cache.close() - except IOError as e: - pass # not really sure what to do here - - return inv - - -def cache_available(config): - ''' checks if we have a 'fresh' cache available for item requested ''' - - if config.has_option('cache', 'cache_dir'): - dpath = config.get('cache', 'cache_dir') - - try: - existing = os.stat( - '/'.join([dpath, 'scaleway_ansible_inventory.json'])) - except OSError: - return False - - if config.has_option('cache', 'cache_max_age'): - maxage = config.get('cache', 'cache_max_age') - else: - maxage = 60 - if (int(time.time()) - int(existing.st_mtime)) <= int(maxage): - return True - - return False - - -def generate_inv_from_api(config): - try: - inventory['scaleway'] = copy.deepcopy(EMPTY_GROUP) - - auth_token = None - if config.has_option('auth', 'api_token'): - auth_token = config.get('auth', 'api_token') - auth_token = env_or_param('SCALEWAY_TOKEN', param=auth_token) - if auth_token is None: - sys.stderr.write('ERROR: missing authentication token for Scaleway API') - sys.exit(1) - - if config.has_option('compute', 'regions'): - regions = config.get('compute', 'regions') - if regions == 'all': - regions = ScalewayAPI.REGIONS - else: - regions = map(str.strip, regions.split(',')) - else: - regions = [ - env_or_param('SCALEWAY_REGION', fallback='par1') - ] - - for region in regions: - api = ScalewayAPI(auth_token, region) - - for server in api.servers(): - hostname = server['hostname'] - if config.has_option('defaults', 'public_ip_only') and config.getboolean('defaults', 'public_ip_only'): - ip = server['public_ip']['address'] - else: - ip = server['private_ip'] - for server_tag in server['tags']: - if server_tag not in inventory: - inventory[server_tag] = copy.deepcopy(EMPTY_GROUP) - inventory[server_tag]['children'].append(hostname) - if region not in inventory: - inventory[region] = copy.deepcopy(EMPTY_GROUP) - inventory[region]['children'].append(hostname) - inventory['scaleway']['children'].append(hostname) - inventory[hostname] = [] - inventory[hostname].append(ip) - - return inventory - except Exception: - # Return empty hosts output - traceback.print_exc() - return {'scaleway': {'hosts': []}, '_meta': {'hostvars': {}}} - - -def get_inventory(config): - ''' Reads the inventory from cache or Scaleway api ''' - - if cache_available(config): - inv = get_cache('scaleway_ansible_inventory.json', config) - else: - inv = generate_inv_from_api(config) - - save_cache(inv, config) - return json.dumps(inv) - - -if __name__ == '__main__': - inventory = {} - - # Read config - if six.PY3: - config = configparser.ConfigParser() - else: - config = configparser.SafeConfigParser() - for configfilename in [os.path.abspath(sys.argv[0]).rsplit('.py')[0] + '.ini', 'scaleway.ini']: - if os.path.exists(configfilename): - config.read(configfilename) - break - - if cache_available(config): - inventory = get_cache('scaleway_ansible_inventory.json', config) - else: - inventory = get_inventory(config) - - # return to ansible - sys.stdout.write(str(inventory)) - sys.stdout.flush() diff --git a/scripts/inventory/serf.py b/scripts/inventory/serf.py deleted file mode 100755 index df917ef554..0000000000 --- a/scripts/inventory/serf.py +++ /dev/null @@ -1,101 +0,0 @@ -#!/usr/bin/env python - -# (c) 2015, Marc Abramowitz -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -# Dynamic inventory script which lets you use nodes discovered by Serf -# (https://serfdom.io/). -# -# Requires the `serfclient` Python module from -# https://pypi.org/project/serfclient/ -# -# Environment variables -# --------------------- -# - `SERF_RPC_ADDR` -# - `SERF_RPC_AUTH` -# -# These variables are described at https://www.serfdom.io/docs/commands/members.html#_rpc_addr - -import argparse -import collections -import os -import sys - -# https://pypi.org/project/serfclient/ -from serfclient import SerfClient, EnvironmentConfig - -import json - -_key = 'serf' - - -def _serf_client(): - env = EnvironmentConfig() - return SerfClient(host=env.host, port=env.port, rpc_auth=env.auth_key) - - -def get_serf_members_data(): - return _serf_client().members().body['Members'] - - -def get_nodes(data): - return [node['Name'] for node in data] - - -def get_groups(data): - groups = collections.defaultdict(list) - - for node in data: - for key, value in node['Tags'].items(): - groups[value].append(node['Name']) - - return groups - - -def get_meta(data): - meta = {'hostvars': {}} - for node in data: - meta['hostvars'][node['Name']] = node['Tags'] - return meta - - -def print_list(): - data = get_serf_members_data() - nodes = get_nodes(data) - groups = get_groups(data) - meta = get_meta(data) - inventory_data = {_key: nodes, '_meta': meta} - inventory_data.update(groups) - print(json.dumps(inventory_data)) - - -def print_host(host): - data = get_serf_members_data() - meta = get_meta(data) - print(json.dumps(meta['hostvars'][host])) - - -def get_args(args_list): - parser = argparse.ArgumentParser( - description='ansible inventory script reading from serf cluster') - mutex_group = parser.add_mutually_exclusive_group(required=True) - help_list = 'list all hosts from serf cluster' - mutex_group.add_argument('--list', action='store_true', help=help_list) - help_host = 'display variables for a host' - mutex_group.add_argument('--host', help=help_host) - return parser.parse_args(args_list) - - -def main(args_list): - args = get_args(args_list) - if args.list: - print_list() - if args.host: - print_host(args.host) - - -if __name__ == '__main__': - main(sys.argv[1:]) diff --git a/scripts/inventory/softlayer.py b/scripts/inventory/softlayer.py deleted file mode 100755 index 03f9820ad2..0000000000 --- a/scripts/inventory/softlayer.py +++ /dev/null @@ -1,196 +0,0 @@ -#!/usr/bin/env python -""" -SoftLayer external inventory script. - -The SoftLayer Python API client is required. Use `pip install softlayer` to install it. -You have a few different options for configuring your username and api_key. You can pass -environment variables (SL_USERNAME and SL_API_KEY). You can also write INI file to -~/.softlayer or /etc/softlayer.conf. For more information see the SL API at: -- https://softlayer-python.readthedocs.io/en/latest/config_file.html - -The SoftLayer Python client has a built in command for saving this configuration file -via the command `sl config setup`. -""" - -# Copyright (C) 2014 AJ Bourg -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -# -# I found the structure of the ec2.py script very helpful as an example -# as I put this together. Thanks to whoever wrote that script! -# - -import SoftLayer -import re -import argparse -import itertools -import json - - -class SoftLayerInventory(object): - common_items = [ - 'id', - 'globalIdentifier', - 'hostname', - 'domain', - 'fullyQualifiedDomainName', - 'primaryBackendIpAddress', - 'primaryIpAddress', - 'datacenter', - 'tagReferences', - 'userData.value', - ] - - vs_items = [ - 'lastKnownPowerState.name', - 'powerState', - 'maxCpu', - 'maxMemory', - 'activeTransaction.transactionStatus[friendlyName,name]', - 'status', - ] - - hw_items = [ - 'hardwareStatusId', - 'processorPhysicalCoreAmount', - 'memoryCapacity', - ] - - def _empty_inventory(self): - return {"_meta": {"hostvars": {}}} - - def __init__(self): - '''Main path''' - - self.inventory = self._empty_inventory() - - self.parse_options() - - if self.args.list: - self.get_all_servers() - print(self.json_format_dict(self.inventory, True)) - elif self.args.host: - self.get_all_servers() - print(self.json_format_dict(self.inventory["_meta"]["hostvars"][self.args.host], True)) - - def to_safe(self, word): - '''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups''' - - return re.sub(r"[^A-Za-z0-9\-\.]", "_", word) - - def push(self, my_dict, key, element): - '''Push an element onto an array that may not have been defined in the dict''' - - if key in my_dict: - my_dict[key].append(element) - else: - my_dict[key] = [element] - - def parse_options(self): - '''Parse all the arguments from the CLI''' - - parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on SoftLayer') - parser.add_argument('--list', action='store_true', default=False, - help='List instances (default: False)') - parser.add_argument('--host', action='store', - help='Get all the variables about a specific instance') - self.args = parser.parse_args() - - def json_format_dict(self, data, pretty=False): - '''Converts a dict to a JSON object and dumps it as a formatted string''' - - if pretty: - return json.dumps(data, sort_keys=True, indent=2) - else: - return json.dumps(data) - - def process_instance(self, instance, instance_type="virtual"): - '''Populate the inventory dictionary with any instance information''' - - # only want active instances - if 'status' in instance and instance['status']['name'] != 'Active': - return - - # and powered on instances - if 'powerState' in instance and instance['powerState']['name'] != 'Running': - return - - # 5 is active for hardware... see https://forums.softlayer.com/forum/softlayer-developer-network/general-discussion/2955-hardwarestatusid - if 'hardwareStatusId' in instance and instance['hardwareStatusId'] != 5: - return - - # if there's no IP address, we can't reach it - if 'primaryIpAddress' not in instance: - return - - instance['userData'] = instance['userData'][0]['value'] if instance['userData'] else '' - - dest = instance['primaryIpAddress'] - - instance['tags'] = list() - for tag in instance['tagReferences']: - instance['tags'].append(tag['tag']['name']) - - del instance['tagReferences'] - - self.inventory["_meta"]["hostvars"][dest] = instance - - # Inventory: group by memory - if 'maxMemory' in instance: - self.push(self.inventory, self.to_safe('memory_' + str(instance['maxMemory'])), dest) - elif 'memoryCapacity' in instance: - self.push(self.inventory, self.to_safe('memory_' + str(instance['memoryCapacity'])), dest) - - # Inventory: group by cpu count - if 'maxCpu' in instance: - self.push(self.inventory, self.to_safe('cpu_' + str(instance['maxCpu'])), dest) - elif 'processorPhysicalCoreAmount' in instance: - self.push(self.inventory, self.to_safe('cpu_' + str(instance['processorPhysicalCoreAmount'])), dest) - - # Inventory: group by datacenter - self.push(self.inventory, self.to_safe('datacenter_' + instance['datacenter']['name']), dest) - - # Inventory: group by hostname - self.push(self.inventory, self.to_safe(instance['hostname']), dest) - - # Inventory: group by FQDN - self.push(self.inventory, self.to_safe(instance['fullyQualifiedDomainName']), dest) - - # Inventory: group by domain - self.push(self.inventory, self.to_safe(instance['domain']), dest) - - # Inventory: group by type (hardware/virtual) - self.push(self.inventory, instance_type, dest) - - for tag in instance['tags']: - self.push(self.inventory, tag, dest) - - def get_virtual_servers(self): - '''Get all the CCI instances''' - vs = SoftLayer.VSManager(self.client) - mask = "mask[%s]" % ','.join(itertools.chain(self.common_items, self.vs_items)) - instances = vs.list_instances(mask=mask) - - for instance in instances: - self.process_instance(instance) - - def get_physical_servers(self): - '''Get all the hardware instances''' - hw = SoftLayer.HardwareManager(self.client) - mask = "mask[%s]" % ','.join(itertools.chain(self.common_items, self.hw_items)) - instances = hw.list_hardware(mask=mask) - - for instance in instances: - self.process_instance(instance, 'hardware') - - def get_all_servers(self): - self.client = SoftLayer.Client() - self.get_virtual_servers() - self.get_physical_servers() - - -SoftLayerInventory() diff --git a/scripts/inventory/spacewalk.ini b/scripts/inventory/spacewalk.ini deleted file mode 100644 index 5433c4221b..0000000000 --- a/scripts/inventory/spacewalk.ini +++ /dev/null @@ -1,16 +0,0 @@ -# Put this ini-file in the same directory as spacewalk.py -# Command line options have precedence over options defined in here. - -[spacewalk] -# To limit the script on one organization in spacewalk, uncomment org_number -# and fill in the organization ID: -# org_number=2 - -# To prefix the group names with the organization ID set prefix_org_name=true. -# This is convenient when org_number is not set and you have the same group names -# in multiple organizations within spacewalk -# The prefix is "org_number-" -prefix_org_name=false - -# Default cache_age for files created with spacewalk-report is 300sec. -cache_age=300 diff --git a/scripts/inventory/spacewalk.py b/scripts/inventory/spacewalk.py deleted file mode 100755 index b3b8cf8e7f..0000000000 --- a/scripts/inventory/spacewalk.py +++ /dev/null @@ -1,226 +0,0 @@ -#!/usr/bin/env python - -""" -Spacewalk external inventory script -================================= - -Ansible has a feature where instead of reading from /etc/ansible/hosts -as a text file, it can query external programs to obtain the list -of hosts, groups the hosts are in, and even variables to assign to each host. - -To use this, copy this file over /etc/ansible/hosts and chmod +x the file. -This, more or less, allows you to keep one central database containing -info about all of your managed instances. - -This script is dependent upon the spacealk-reports package being installed -on the same machine. It is basically a CSV-to-JSON converter from the -output of "spacewalk-report system-groups-systems|inventory". - -Tested with Ansible 1.9.2 and spacewalk 2.3 -""" -# -# Author:: Jon Miller -# Copyright:: Copyright (c) 2013, Jon Miller -# -# Extended for support of multiple organizations and -# adding the "_meta" dictionary to --list output by -# Bernhard Lichtinger 2015 -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import sys -import os -import time -from optparse import OptionParser -import subprocess -import json - -from ansible.module_utils.six import iteritems -from ansible.module_utils.six.moves import configparser as ConfigParser - - -base_dir = os.path.dirname(os.path.realpath(__file__)) -default_ini_file = os.path.join(base_dir, "spacewalk.ini") - -SW_REPORT = '/usr/bin/spacewalk-report' -CACHE_DIR = os.path.join(base_dir, ".spacewalk_reports") -CACHE_AGE = 300 # 5min -INI_FILE = os.path.expanduser(os.path.expandvars(os.environ.get("SPACEWALK_INI_PATH", default_ini_file))) - - -# Sanity check -if not os.path.exists(SW_REPORT): - print('Error: %s is required for operation.' % (SW_REPORT), file=sys.stderr) - sys.exit(1) - -# Pre-startup work -if not os.path.exists(CACHE_DIR): - os.mkdir(CACHE_DIR) - os.chmod(CACHE_DIR, 0o2775) - -# Helper functions -# ------------------------------ - - -def spacewalk_report(name): - """Yield a dictionary form of each CSV output produced by the specified - spacewalk-report - """ - cache_filename = os.path.join(CACHE_DIR, name) - if not os.path.exists(cache_filename) or \ - (time.time() - os.stat(cache_filename).st_mtime) > CACHE_AGE: - # Update the cache - fh = open(cache_filename, 'w') - p = subprocess.Popen([SW_REPORT, name], stdout=fh) - p.wait() - fh.close() - - with open(cache_filename, 'r') as f: - lines = f.readlines() - keys = lines[0].strip().split(',') - # add 'spacewalk_' prefix to the keys - keys = ['spacewalk_' + key for key in keys] - for line in lines[1:]: - values = line.strip().split(',') - if len(keys) == len(values): - yield dict(zip(keys, values)) - - -# Options -# ------------------------------ - -parser = OptionParser(usage="%prog [options] --list | --host ") -parser.add_option('--list', default=False, dest="list", action="store_true", - help="Produce a JSON consumable grouping of servers for Ansible") -parser.add_option('--host', default=None, dest="host", - help="Generate additional host specific details for given host for Ansible") -parser.add_option('-H', '--human', dest="human", - default=False, action="store_true", - help="Produce a friendlier version of either server list or host detail") -parser.add_option('-o', '--org', default=None, dest="org_number", - help="Limit to spacewalk organization number") -parser.add_option('-p', default=False, dest="prefix_org_name", action="store_true", - help="Prefix the group name with the organization number") -(options, args) = parser.parse_args() - - -# read spacewalk.ini if present -# ------------------------------ -if os.path.exists(INI_FILE): - config = ConfigParser.SafeConfigParser() - config.read(INI_FILE) - if config.has_option('spacewalk', 'cache_age'): - CACHE_AGE = config.get('spacewalk', 'cache_age') - if not options.org_number and config.has_option('spacewalk', 'org_number'): - options.org_number = config.get('spacewalk', 'org_number') - if not options.prefix_org_name and config.has_option('spacewalk', 'prefix_org_name'): - options.prefix_org_name = config.getboolean('spacewalk', 'prefix_org_name') - - -# Generate dictionary for mapping group_id to org_id -# ------------------------------ -org_groups = {} -try: - for group in spacewalk_report('system-groups'): - org_groups[group['spacewalk_group_id']] = group['spacewalk_org_id'] - -except (OSError) as e: - print('Problem executing the command "%s system-groups": %s' % - (SW_REPORT, str(e)), file=sys.stderr) - sys.exit(2) - - -# List out the known server from Spacewalk -# ------------------------------ -if options.list: - - # to build the "_meta"-Group with hostvars first create dictionary for later use - host_vars = {} - try: - for item in spacewalk_report('inventory'): - host_vars[item['spacewalk_profile_name']] = dict((key, (value.split(';') if ';' in value else value)) for key, value in item.items()) - - except (OSError) as e: - print('Problem executing the command "%s inventory": %s' % - (SW_REPORT, str(e)), file=sys.stderr) - sys.exit(2) - - groups = {} - meta = {"hostvars": {}} - try: - for system in spacewalk_report('system-groups-systems'): - # first get org_id of system - org_id = org_groups[system['spacewalk_group_id']] - - # shall we add the org_id as prefix to the group name: - if options.prefix_org_name: - prefix = org_id + "-" - group_name = prefix + system['spacewalk_group_name'] - else: - group_name = system['spacewalk_group_name'] - - # if we are limited to one organization: - if options.org_number: - if org_id == options.org_number: - if group_name not in groups: - groups[group_name] = set() - - groups[group_name].add(system['spacewalk_server_name']) - if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta["hostvars"]: - meta["hostvars"][system['spacewalk_server_name']] = host_vars[system['spacewalk_server_name']] - # or we list all groups and systems: - else: - if group_name not in groups: - groups[group_name] = set() - - groups[group_name].add(system['spacewalk_server_name']) - if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta["hostvars"]: - meta["hostvars"][system['spacewalk_server_name']] = host_vars[system['spacewalk_server_name']] - - except (OSError) as e: - print('Problem executing the command "%s system-groups-systems": %s' % - (SW_REPORT, str(e)), file=sys.stderr) - sys.exit(2) - - if options.human: - for group, systems in iteritems(groups): - print('[%s]\n%s\n' % (group, '\n'.join(systems))) - else: - final = dict([(k, list(s)) for k, s in iteritems(groups)]) - final["_meta"] = meta - print(json.dumps(final)) - # print(json.dumps(groups)) - sys.exit(0) - - -# Return a details information concerning the spacewalk server -# ------------------------------ -elif options.host: - - host_details = {} - try: - for system in spacewalk_report('inventory'): - if system['spacewalk_hostname'] == options.host: - host_details = system - break - - except (OSError) as e: - print('Problem executing the command "%s inventory": %s' % - (SW_REPORT, str(e)), file=sys.stderr) - sys.exit(2) - - if options.human: - print('Host: %s' % options.host) - for k, v in iteritems(host_details): - print(' %s: %s' % (k, '\n '.join(v.split(';')))) - else: - print(json.dumps(dict((key, (value.split(';') if ';' in value else value)) for key, value in host_details.items()))) - sys.exit(0) - -else: - - parser.print_help() - sys.exit(1) diff --git a/scripts/inventory/ssh_config.py b/scripts/inventory/ssh_config.py deleted file mode 100755 index ad56a53ebb..0000000000 --- a/scripts/inventory/ssh_config.py +++ /dev/null @@ -1,121 +0,0 @@ -#!/usr/bin/env python - -# (c) 2014, Tomas Karasek -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -# Dynamic inventory script which lets you use aliases from ~/.ssh/config. -# -# There were some issues with various Paramiko versions. I took a deeper look -# and tested heavily. Now, ansible parses this alright with Paramiko versions -# 1.7.2 to 1.15.2. -# -# It prints inventory based on parsed ~/.ssh/config. You can refer to hosts -# with their alias, rather than with the IP or hostname. It takes advantage -# of the ansible_ssh_{host,port,user,private_key_file}. -# -# If you have in your .ssh/config: -# Host git -# HostName git.domain.org -# User tkarasek -# IdentityFile /home/tomk/keys/thekey -# -# You can do -# $ ansible git -m ping -# -# Example invocation: -# ssh_config.py --list -# ssh_config.py --host - -import argparse -import os.path -import sys - -import json - -import paramiko - -from ansible.module_utils.common._collections_compat import MutableSequence - -SSH_CONF = '~/.ssh/config' - -_key = 'ssh_config' - -_ssh_to_ansible = [('user', 'ansible_ssh_user'), - ('hostname', 'ansible_ssh_host'), - ('identityfile', 'ansible_ssh_private_key_file'), - ('port', 'ansible_ssh_port')] - - -def get_config(): - if not os.path.isfile(os.path.expanduser(SSH_CONF)): - return {} - with open(os.path.expanduser(SSH_CONF)) as f: - cfg = paramiko.SSHConfig() - cfg.parse(f) - ret_dict = {} - for d in cfg._config: - if isinstance(d['host'], MutableSequence): - alias = d['host'][0] - else: - alias = d['host'] - if ('?' in alias) or ('*' in alias): - continue - _copy = dict(d) - del _copy['host'] - if 'config' in _copy: - ret_dict[alias] = _copy['config'] - else: - ret_dict[alias] = _copy - return ret_dict - - -def print_list(): - cfg = get_config() - meta = {'hostvars': {}} - for alias, attributes in cfg.items(): - tmp_dict = {} - for ssh_opt, ans_opt in _ssh_to_ansible: - if ssh_opt in attributes: - # If the attribute is a list, just take the first element. - # Private key is returned in a list for some reason. - attr = attributes[ssh_opt] - if isinstance(attr, MutableSequence): - attr = attr[0] - tmp_dict[ans_opt] = attr - if tmp_dict: - meta['hostvars'][alias] = tmp_dict - - print(json.dumps({_key: list(set(meta['hostvars'].keys())), '_meta': meta})) - - -def print_host(host): - cfg = get_config() - print(json.dumps(cfg[host])) - - -def get_args(args_list): - parser = argparse.ArgumentParser( - description='ansible inventory script parsing .ssh/config') - mutex_group = parser.add_mutually_exclusive_group(required=True) - help_list = 'list all hosts from .ssh/config inventory' - mutex_group.add_argument('--list', action='store_true', help=help_list) - help_host = 'display variables for a host' - mutex_group.add_argument('--host', help=help_host) - return parser.parse_args(args_list) - - -def main(args_list): - - args = get_args(args_list) - if args.list: - print_list() - if args.host: - print_host(args.host) - - -if __name__ == '__main__': - main(sys.argv[1:]) diff --git a/scripts/inventory/stacki.py b/scripts/inventory/stacki.py deleted file mode 100755 index 2c6bb37c9a..0000000000 --- a/scripts/inventory/stacki.py +++ /dev/null @@ -1,180 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2016, Hugh Ma -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -# Stacki inventory script -# Configure stacki.yml with proper auth information and place in the following: -# - ../inventory/stacki.yml -# - /etc/stacki/stacki.yml -# - /etc/ansible/stacki.yml -# The stacki.yml file can contain entries for authentication information -# regarding the Stacki front-end node. -# -# use_hostnames uses hostname rather than interface ip as connection -# -# - -""" -Example Usage: - List Stacki Nodes - $ ./stack.py --list - - -Example Configuration: ---- -stacki: - auth: - stacki_user: admin - stacki_password: abc12345678910 - stacki_endpoint: http://192.168.200.50/stack -use_hostnames: false -""" - -import argparse -import os -import sys -import yaml -from distutils.version import StrictVersion - -import json - -try: - import requests -except Exception: - sys.exit('requests package is required for this inventory script') - - -CONFIG_FILES = ['/etc/stacki/stacki.yml', '/etc/ansible/stacki.yml'] - - -def stack_auth(params): - endpoint = params['stacki_endpoint'] - auth_creds = {'USERNAME': params['stacki_user'], - 'PASSWORD': params['stacki_password']} - - client = requests.session() - client.get(endpoint) - - init_csrf = client.cookies['csrftoken'] - - header = {'csrftoken': init_csrf, 'X-CSRFToken': init_csrf, - 'Content-type': 'application/x-www-form-urlencoded'} - - login_endpoint = endpoint + "/login" - - login_req = client.post(login_endpoint, data=auth_creds, headers=header) - - csrftoken = login_req.cookies['csrftoken'] - sessionid = login_req.cookies['sessionid'] - - auth_creds.update(CSRFTOKEN=csrftoken, SESSIONID=sessionid) - - return client, auth_creds - - -def stack_build_header(auth_creds): - header = {'csrftoken': auth_creds['CSRFTOKEN'], - 'X-CSRFToken': auth_creds['CSRFTOKEN'], - 'sessionid': auth_creds['SESSIONID'], - 'Content-type': 'application/json'} - - return header - - -def stack_host_list(endpoint, header, client): - - stack_r = client.post(endpoint, data=json.dumps({"cmd": "list host"}), - headers=header) - return json.loads(stack_r.json()) - - -def stack_net_list(endpoint, header, client): - - stack_r = client.post(endpoint, data=json.dumps({"cmd": "list host interface"}), - headers=header) - return json.loads(stack_r.json()) - - -def format_meta(hostdata, intfdata, config): - use_hostnames = config['use_hostnames'] - meta = dict(all=dict(hosts=list()), - frontends=dict(hosts=list()), - backends=dict(hosts=list()), - _meta=dict(hostvars=dict())) - - # Iterate through list of dicts of hosts and remove - # environment key as it causes conflicts - for host in hostdata: - del host['environment'] - meta['_meta']['hostvars'][host['host']] = host - meta['_meta']['hostvars'][host['host']]['interfaces'] = list() - - # @bbyhuy to improve readability in next iteration - - for intf in intfdata: - if intf['host'] in meta['_meta']['hostvars']: - meta['_meta']['hostvars'][intf['host']]['interfaces'].append(intf) - if intf['default'] is True: - meta['_meta']['hostvars'][intf['host']]['ansible_host'] = intf['ip'] - if not use_hostnames: - meta['all']['hosts'].append(intf['ip']) - if meta['_meta']['hostvars'][intf['host']]['appliance'] != 'frontend': - meta['backends']['hosts'].append(intf['ip']) - else: - meta['frontends']['hosts'].append(intf['ip']) - else: - meta['all']['hosts'].append(intf['host']) - if meta['_meta']['hostvars'][intf['host']]['appliance'] != 'frontend': - meta['backends']['hosts'].append(intf['host']) - else: - meta['frontends']['hosts'].append(intf['host']) - return meta - - -def parse_args(): - parser = argparse.ArgumentParser(description='Stacki Inventory Module') - group = parser.add_mutually_exclusive_group(required=True) - group.add_argument('--list', action='store_true', - help='List active hosts') - group.add_argument('--host', help='List details about the specific host') - - return parser.parse_args() - - -def main(): - args = parse_args() - - if StrictVersion(requests.__version__) < StrictVersion("2.4.3"): - sys.exit('requests>=2.4.3 is required for this inventory script') - - try: - config_files = CONFIG_FILES - config_files.append(os.path.dirname(os.path.realpath(__file__)) + '/stacki.yml') - config = None - for cfg_file in config_files: - if os.path.isfile(cfg_file): - stream = open(cfg_file, 'r') - config = yaml.safe_load(stream) - break - if not config: - sys.stderr.write("No config file found at {0}\n".format(config_files)) - sys.exit(1) - client, auth_creds = stack_auth(config['stacki']['auth']) - header = stack_build_header(auth_creds) - host_list = stack_host_list(config['stacki']['auth']['stacki_endpoint'], header, client) - intf_list = stack_net_list(config['stacki']['auth']['stacki_endpoint'], header, client) - final_meta = format_meta(host_list, intf_list, config) - print(json.dumps(final_meta, indent=4)) - except Exception as e: - sys.stderr.write('%s\n' % e.message) - sys.exit(1) - sys.exit(0) - - -if __name__ == '__main__': - main() diff --git a/scripts/inventory/stacki.yml b/scripts/inventory/stacki.yml deleted file mode 100644 index 2e31c72cbc..0000000000 --- a/scripts/inventory/stacki.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -stacki: - auth: - stacki_user: admin - stacki_password: GhYgWut1hfGbbnstmbW3m-bJbeME-3EvC20rF1LHrDM - stacki_endpoint: http://192.168.200.50/stack -use_hostnames: false \ No newline at end of file diff --git a/scripts/inventory/vagrant.py b/scripts/inventory/vagrant.py deleted file mode 100755 index 74db0212c5..0000000000 --- a/scripts/inventory/vagrant.py +++ /dev/null @@ -1,123 +0,0 @@ -#!/usr/bin/env python -""" -Vagrant external inventory script. Automatically finds the IP of the booted vagrant vm(s), and -returns it under the host group 'vagrant' - -Example Vagrant configuration using this script: - - config.vm.provision :ansible do |ansible| - ansible.playbook = "./provision/your_playbook.yml" - ansible.inventory_path = "./provision/inventory/vagrant.py" - ansible.verbose = true - end -""" - -# Copyright (C) 2013 Mark Mandel -# 2015 Igor Khomyakov -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -# -# Thanks to the spacewalk.py inventory script for giving me the basic structure -# of this. -# - -import sys -import os.path -import subprocess -import re -from paramiko import SSHConfig -from optparse import OptionParser -from collections import defaultdict -import json - -from ansible.module_utils._text import to_text -from ansible.module_utils.six.moves import StringIO - - -_group = 'vagrant' # a default group -_ssh_to_ansible = [('user', 'ansible_user'), - ('hostname', 'ansible_host'), - ('identityfile', 'ansible_ssh_private_key_file'), - ('port', 'ansible_port')] - -# Options -# ------------------------------ - -parser = OptionParser(usage="%prog [options] --list | --host ") -parser.add_option('--list', default=False, dest="list", action="store_true", - help="Produce a JSON consumable grouping of Vagrant servers for Ansible") -parser.add_option('--host', default=None, dest="host", - help="Generate additional host specific details for given host for Ansible") -(options, args) = parser.parse_args() - -# -# helper functions -# - - -# get all the ssh configs for all boxes in an array of dictionaries. -def get_ssh_config(): - return dict((k, get_a_ssh_config(k)) for k in list_running_boxes()) - - -# list all the running boxes -def list_running_boxes(): - - output = to_text(subprocess.check_output(["vagrant", "status"]), errors='surrogate_or_strict').split('\n') - - boxes = [] - - for line in output: - matcher = re.search(r"([^\s]+)[\s]+running \(.+", line) - if matcher: - boxes.append(matcher.group(1)) - - return boxes - - -# get the ssh config for a single box -def get_a_ssh_config(box_name): - """Gives back a map of all the machine's ssh configurations""" - - output = to_text(subprocess.check_output(["vagrant", "ssh-config", box_name]), errors='surrogate_or_strict') - config = SSHConfig() - config.parse(StringIO(output)) - host_config = config.lookup(box_name) - - # man 5 ssh_config: - # > It is possible to have multiple identity files ... - # > all these identities will be tried in sequence. - for id in host_config['identityfile']: - if os.path.isfile(id): - host_config['identityfile'] = id - - return dict((v, host_config[k]) for k, v in _ssh_to_ansible) - - -# List out servers that vagrant has running -# ------------------------------ -if options.list: - ssh_config = get_ssh_config() - meta = defaultdict(dict) - - for host in ssh_config: - meta['hostvars'][host] = ssh_config[host] - - print(json.dumps({_group: list(ssh_config.keys()), '_meta': meta})) - sys.exit(0) - -# Get out the host details -# ------------------------------ -elif options.host: - print(json.dumps(get_a_ssh_config(options.host))) - sys.exit(0) - -# Print out help -# ------------------------------ -else: - parser.print_help() - sys.exit(0) diff --git a/scripts/inventory/vbox.py b/scripts/inventory/vbox.py deleted file mode 100755 index 110ead1471..0000000000 --- a/scripts/inventory/vbox.py +++ /dev/null @@ -1,107 +0,0 @@ -#!/usr/bin/env python - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import sys -from subprocess import Popen, PIPE - -import json - - -class SetEncoder(json.JSONEncoder): - def default(self, obj): - if isinstance(obj, set): - return list(obj) - return json.JSONEncoder.default(self, obj) - - -VBOX = "VBoxManage" - - -def get_hosts(host=None): - - returned = {} - try: - if host: - p = Popen([VBOX, 'showvminfo', host], stdout=PIPE) - else: - returned = {'all': set(), '_metadata': {}} - p = Popen([VBOX, 'list', '-l', 'vms'], stdout=PIPE) - except Exception: - sys.exit(1) - - hostvars = {} - prevkey = pref_k = '' - - for line in p.stdout.readlines(): - - try: - k, v = line.split(':', 1) - except Exception: - continue - - if k == '': - continue - - v = v.strip() - if k.startswith('Name'): - if v not in hostvars: - curname = v - hostvars[curname] = {} - try: # try to get network info - x = Popen([VBOX, 'guestproperty', 'get', curname, "/VirtualBox/GuestInfo/Net/0/V4/IP"], stdout=PIPE) - ipinfo = x.stdout.read() - if 'Value' in ipinfo: - a, ip = ipinfo.split(':', 1) - hostvars[curname]['ansible_ssh_host'] = ip.strip() - except Exception: - pass - - continue - - if not host: - if k == 'Groups': - for group in v.split('/'): - if group: - if group not in returned: - returned[group] = set() - returned[group].add(curname) - returned['all'].add(curname) - continue - - pref_k = 'vbox_' + k.strip().replace(' ', '_') - if k.startswith(' '): - if prevkey not in hostvars[curname]: - hostvars[curname][prevkey] = {} - hostvars[curname][prevkey][pref_k] = v - else: - if v != '': - hostvars[curname][pref_k] = v - - prevkey = pref_k - - if not host: - returned['_metadata']['hostvars'] = hostvars - else: - returned = hostvars[host] - return returned - - -if __name__ == '__main__': - - inventory = {} - hostname = None - - if len(sys.argv) > 1: - if sys.argv[1] == "--host": - hostname = sys.argv[2] - - if hostname: - inventory = get_hosts(hostname) - else: - inventory = get_hosts() - - sys.stdout.write(json.dumps(inventory, indent=2, cls=SetEncoder)) diff --git a/scripts/inventory/zone.py b/scripts/inventory/zone.py deleted file mode 100755 index 9020f9ea79..0000000000 --- a/scripts/inventory/zone.py +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env python - -# (c) 2015, Dagobert Michelsen -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from subprocess import Popen, PIPE -import sys -import json - -result = {} -result['all'] = {} - -pipe = Popen(['zoneadm', 'list', '-ip'], stdout=PIPE, universal_newlines=True) -result['all']['hosts'] = [] -for l in pipe.stdout.readlines(): - # 1:work:running:/zones/work:3126dc59-9a07-4829-cde9-a816e4c5040e:native:shared - s = l.split(':') - if s[1] != 'global': - result['all']['hosts'].append(s[1]) - -result['all']['vars'] = {} -result['all']['vars']['ansible_connection'] = 'zone' - -if len(sys.argv) == 2 and sys.argv[1] == '--list': - print(json.dumps(result)) -elif len(sys.argv) == 3 and sys.argv[1] == '--host': - print(json.dumps({'ansible_connection': 'zone'})) -else: - sys.stderr.write("Need an argument, either --list or --host \n") diff --git a/scripts/vault/__init__.py b/scripts/vault/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/scripts/vault/azure_vault.ini b/scripts/vault/azure_vault.ini deleted file mode 100644 index d47f976201..0000000000 --- a/scripts/vault/azure_vault.ini +++ /dev/null @@ -1,10 +0,0 @@ -[azure_keyvault] # Used with Azure KeyVault -vault_name=django-keyvault -secret_name=vaultpw -secret_version=9k1e6c7367b33eac8ee241b3698009f3 - -[azure] # Used by Dynamic Inventory -group_by_resource_group=yes -group_by_location=yes -group_by_security_group=yes -group_by_tag=yes \ No newline at end of file diff --git a/scripts/vault/azure_vault.py b/scripts/vault/azure_vault.py deleted file mode 100755 index c27418f34f..0000000000 --- a/scripts/vault/azure_vault.py +++ /dev/null @@ -1,595 +0,0 @@ -#!/usr/bin/env python -# -# This script borrows a great deal of code from the azure_rm.py dynamic inventory script -# that is packaged with Ansible. This can be found in the Ansible GitHub project at: -# https://github.com/ansible/ansible/blob/devel/contrib/inventory/azure_rm.py -# -# The Azure Dynamic Inventory script was written by: -# Copyright (c) 2016 Matt Davis, -# Chris Houseknecht, -# Altered/Added for Vault functionality: -# Austin Hobbs, GitHub: @OxHobbs - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -''' -Ansible Vault Password with Azure Key Vault Secret Script -========================================================= -This script is designed to be used with Ansible Vault. It provides the -capability to provide this script as the password file to the ansible-vault -command. This script uses the Azure Python SDK. For instruction on installing -the Azure Python SDK see http://azure-sdk-for-python.readthedocs.org/ - -Authentication --------------- -The order of precedence is command line arguments, environment variables, -and finally the [default] profile found in ~/.azure/credentials for all -authentication parameters. - -If using a credentials file, it should be an ini formatted file with one or -more sections, which we refer to as profiles. The script looks for a -[default] section, if a profile is not specified either on the command line -or with an environment variable. The keys in a profile will match the -list of command line arguments below. - -For command line arguments and environment variables specify a profile found -in your ~/.azure/credentials file, or a service principal or Active Directory -user. - -Command line arguments: - - profile - - client_id - - secret - - subscription_id - - tenant - - ad_user - - password - - cloud_environment - - adfs_authority_url - - vault-name - - secret-name - - secret-version - -Environment variables: - - AZURE_PROFILE - - AZURE_CLIENT_ID - - AZURE_SECRET - - AZURE_SUBSCRIPTION_ID - - AZURE_TENANT - - AZURE_AD_USER - - AZURE_PASSWORD - - AZURE_CLOUD_ENVIRONMENT - - AZURE_ADFS_AUTHORITY_URL - - AZURE_VAULT_NAME - - AZURE_VAULT_SECRET_NAME - - AZURE_VAULT_SECRET_VERSION - - -Vault ------ - -The order of precedence of Azure Key Vault Secret information is the same. -Command line arguments, environment variables, and finally the azure_vault.ini -file with the [azure_keyvault] section. - -azure_vault.ini (or azure_rm.ini if merged with Azure Dynamic Inventory Script) ------------------------------------------------------------------------------- -As mentioned above, you can control execution using environment variables or a .ini file. A sample -azure_vault.ini is included. The name of the .ini file is the basename of the inventory script (in this case -'azure_vault') with a .ini extension. It also assumes the .ini file is alongside the script. To specify -a different path for the .ini file, define the AZURE_VAULT_INI_PATH environment variable: - - export AZURE_VAULT_INI_PATH=/path/to/custom.ini - or - export AZURE_VAULT_INI_PATH=[same path as azure_rm.ini if merged] - - __NOTE__: If using the azure_rm.py dynamic inventory script, it is possible to use the same .ini - file for both the azure_rm dynamic inventory and the azure_vault password file. Simply add a section - named [azure_keyvault] to the ini file with the following properties: vault_name, secret_name and - secret_version. - -Examples: ---------- - Validate the vault_pw script with Python - $ python azure_vault.py -n mydjangovault -s vaultpw -v 6b6w7f7252b44eac8ee726b3698009f3 - $ python azure_vault.py --vault-name 'mydjangovault' --secret-name 'vaultpw' \ - --secret-version 6b6w7f7252b44eac8ee726b3698009f3 - - Use with a playbook - $ ansible-playbook -i ./azure_rm.py my_playbook.yml --limit galaxy-qa --vault-password-file ./azure_vault.py - - -Insecure Platform Warning -------------------------- -If you receive InsecurePlatformWarning from urllib3, install the -requests security packages: - - pip install requests[security] - - -author: - - Chris Houseknecht (@chouseknecht) - - Matt Davis (@nitzmahone) - - Austin Hobbs (@OxHobbs) - -Company: Ansible by Red Hat, Microsoft - -Version: 0.1.0 -''' - -import argparse -import os -import re -import sys -import inspect -from azure.keyvault import KeyVaultClient - -from ansible.module_utils.six.moves import configparser as cp - -from os.path import expanduser -import ansible.module_utils.six.moves.urllib.parse as urlparse - -HAS_AZURE = True -HAS_AZURE_EXC = None -HAS_AZURE_CLI_CORE = True -CLIError = None - -try: - from msrestazure.azure_active_directory import AADTokenCredentials - from msrestazure.azure_exceptions import CloudError - from msrestazure.azure_active_directory import MSIAuthentication - from msrestazure import azure_cloud - from azure.mgmt.compute import __version__ as azure_compute_version - from azure.common import AzureMissingResourceHttpError, AzureHttpError - from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials - from azure.mgmt.network import NetworkManagementClient - from azure.mgmt.resource.resources import ResourceManagementClient - from azure.mgmt.resource.subscriptions import SubscriptionClient - from azure.mgmt.compute import ComputeManagementClient - from adal.authentication_context import AuthenticationContext -except ImportError as exc: - HAS_AZURE_EXC = exc - HAS_AZURE = False - -try: - from azure.cli.core.util import CLIError - from azure.common.credentials import get_azure_cli_credentials, get_cli_profile - from azure.common.cloud import get_cli_active_cloud -except ImportError: - HAS_AZURE_CLI_CORE = False - CLIError = Exception - -try: - from ansible.release import __version__ as ansible_version -except ImportError: - ansible_version = 'unknown' - - -AZURE_CREDENTIAL_ENV_MAPPING = dict( - profile='AZURE_PROFILE', - subscription_id='AZURE_SUBSCRIPTION_ID', - client_id='AZURE_CLIENT_ID', - secret='AZURE_SECRET', - tenant='AZURE_TENANT', - ad_user='AZURE_AD_USER', - password='AZURE_PASSWORD', - cloud_environment='AZURE_CLOUD_ENVIRONMENT', - adfs_authority_url='AZURE_ADFS_AUTHORITY_URL' -) - -AZURE_VAULT_SETTINGS = dict( - vault_name='AZURE_VAULT_NAME', - secret_name='AZURE_VAULT_SECRET_NAME', - secret_version='AZURE_VAULT_SECRET_VERSION', -) - -AZURE_MIN_VERSION = "2.0.0" -ANSIBLE_USER_AGENT = 'Ansible/{0}'.format(ansible_version) - - -class AzureRM(object): - - def __init__(self, args): - self._args = args - self._cloud_environment = None - self._compute_client = None - self._resource_client = None - self._network_client = None - self._adfs_authority_url = None - self._vault_client = None - self._resource = None - - self.debug = False - if args.debug: - self.debug = True - - self.credentials = self._get_credentials(args) - if not self.credentials: - self.fail("Failed to get credentials. Either pass as parameters, set environment variables, " - "or define a profile in ~/.azure/credentials.") - - # if cloud_environment specified, look up/build Cloud object - raw_cloud_env = self.credentials.get('cloud_environment') - if not raw_cloud_env: - self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD # SDK default - else: - # try to look up "well-known" values via the name attribute on azure_cloud members - all_clouds = [x[1] for x in inspect.getmembers(azure_cloud) if isinstance(x[1], azure_cloud.Cloud)] - matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env] - if len(matched_clouds) == 1: - self._cloud_environment = matched_clouds[0] - elif len(matched_clouds) > 1: - self.fail("Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'".format( - raw_cloud_env)) - else: - if not urlparse.urlparse(raw_cloud_env).scheme: - self.fail("cloud_environment must be an endpoint discovery URL or one of {0}".format( - [x.name for x in all_clouds])) - try: - self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(raw_cloud_env) - except Exception as e: - self.fail("cloud_environment {0} could not be resolved: {1}".format(raw_cloud_env, e.message)) - - if self.credentials.get('subscription_id', None) is None: - self.fail("Credentials did not include a subscription_id value.") - self.log("setting subscription_id") - self.subscription_id = self.credentials['subscription_id'] - - # get authentication authority - # for adfs, user could pass in authority or not. - # for others, use default authority from cloud environment - if self.credentials.get('adfs_authority_url'): - self._adfs_authority_url = self.credentials.get('adfs_authority_url') - else: - self._adfs_authority_url = self._cloud_environment.endpoints.active_directory - - # get resource from cloud environment - self._resource = self._cloud_environment.endpoints.active_directory_resource_id - - if self.credentials.get('credentials'): - self.azure_credentials = self.credentials.get('credentials') - elif self.credentials.get('client_id') and self.credentials.get('secret') and self.credentials.get('tenant'): - self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'], - secret=self.credentials['secret'], - tenant=self.credentials['tenant'], - cloud_environment=self._cloud_environment) - - elif self.credentials.get('ad_user') is not None and \ - self.credentials.get('password') is not None and \ - self.credentials.get('client_id') is not None and \ - self.credentials.get('tenant') is not None: - - self.azure_credentials = self.acquire_token_with_username_password( - self._adfs_authority_url, - self._resource, - self.credentials['ad_user'], - self.credentials['password'], - self.credentials['client_id'], - self.credentials['tenant']) - - elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None: - tenant = self.credentials.get('tenant') - if not tenant: - tenant = 'common' - self.azure_credentials = UserPassCredentials(self.credentials['ad_user'], - self.credentials['password'], - tenant=tenant, - cloud_environment=self._cloud_environment) - - else: - self.fail("Failed to authenticate with provided credentials. Some attributes were missing. " - "Credentials must include client_id, secret and tenant or ad_user and password, or " - "ad_user, password, client_id, tenant and adfs_authority_url(optional) for ADFS authentication, " - "or be logged in using AzureCLI.") - - def log(self, msg): - if self.debug: - print(msg + u'\n') - - def fail(self, msg): - raise Exception(msg) - - def _get_profile(self, profile="default"): - path = expanduser("~") - path += "/.azure/credentials" - try: - config = cp.ConfigParser() - config.read(path) - except Exception as exc: - self.fail("Failed to access {0}. Check that the file exists and you have read " - "access. {1}".format(path, str(exc))) - credentials = dict() - for key in AZURE_CREDENTIAL_ENV_MAPPING: - try: - credentials[key] = config.get(profile, key, raw=True) - except Exception: - pass - - if credentials.get('client_id') is not None or credentials.get('ad_user') is not None: - return credentials - - return None - - def _get_env_credentials(self): - env_credentials = dict() - for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items(): - env_credentials[attribute] = os.environ.get(env_variable, None) - - if env_credentials['profile'] is not None: - credentials = self._get_profile(env_credentials['profile']) - return credentials - - if env_credentials['client_id'] is not None or env_credentials['ad_user'] is not None: - return env_credentials - - return None - - def _get_azure_cli_credentials(self): - credentials, subscription_id = get_azure_cli_credentials() - cloud_environment = get_cli_active_cloud() - - cli_credentials = { - 'credentials': credentials, - 'subscription_id': subscription_id, - 'cloud_environment': cloud_environment - } - return cli_credentials - - def _get_msi_credentials(self, subscription_id_param=None): - credentials = MSIAuthentication() - try: - # try to get the subscription in MSI to test whether MSI is enabled - subscription_client = SubscriptionClient(credentials) - subscription = next(subscription_client.subscriptions.list()) - subscription_id = str(subscription.subscription_id) - return { - 'credentials': credentials, - 'subscription_id': subscription_id_param or subscription_id - } - except Exception as exc: - return None - - def _get_credentials(self, params): - # Get authentication credentials. - # Precedence: cmd line parameters-> environment variables-> default profile in ~/.azure/credentials. - - self.log('Getting credentials') - - arg_credentials = dict() - for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items(): - arg_credentials[attribute] = getattr(params, attribute) - - # try module params - if arg_credentials['profile'] is not None: - self.log('Retrieving credentials with profile parameter.') - credentials = self._get_profile(arg_credentials['profile']) - return credentials - - if arg_credentials['client_id'] is not None: - self.log('Received credentials from parameters.') - return arg_credentials - - if arg_credentials['ad_user'] is not None: - self.log('Received credentials from parameters.') - return arg_credentials - - # try environment - env_credentials = self._get_env_credentials() - if env_credentials: - self.log('Received credentials from env.') - return env_credentials - - # try default profile from ~./azure/credentials - default_credentials = self._get_profile() - if default_credentials: - self.log('Retrieved default profile credentials from ~/.azure/credentials.') - return default_credentials - - msi_credentials = self._get_msi_credentials(arg_credentials.get('subscription_id')) - if msi_credentials: - self.log('Retrieved credentials from MSI.') - return msi_credentials - - try: - if HAS_AZURE_CLI_CORE: - self.log('Retrieving credentials from AzureCLI profile') - cli_credentials = self._get_azure_cli_credentials() - return cli_credentials - except CLIError as ce: - self.log('Error getting AzureCLI profile credentials - {0}'.format(ce)) - - return None - - def acquire_token_with_username_password(self, authority, resource, username, password, client_id, tenant): - authority_uri = authority - - if tenant is not None: - authority_uri = authority + '/' + tenant - - context = AuthenticationContext(authority_uri) - token_response = context.acquire_token_with_username_password(resource, username, password, client_id) - return AADTokenCredentials(token_response) - - def _register(self, key): - try: - # We have to perform the one-time registration here. Otherwise, we receive an error the first - # time we attempt to use the requested client. - resource_client = self.rm_client - resource_client.providers.register(key) - except Exception as exc: - self.log("One-time registration of {0} failed - {1}".format(key, str(exc))) - self.log("You might need to register {0} using an admin account".format(key)) - self.log(("To register a provider using the Python CLI: " - "https://docs.microsoft.com/azure/azure-resource-manager/" - "resource-manager-common-deployment-errors#noregisteredproviderfound")) - - def get_mgmt_svc_client(self, client_type, base_url, api_version): - client = client_type(self.azure_credentials, - self.subscription_id, - base_url=base_url, - api_version=api_version) - client.config.add_user_agent(ANSIBLE_USER_AGENT) - return client - - def get_vault_client(self): - return KeyVaultClient(self.azure_credentials) - - def get_vault_suffix(self): - return self._cloud_environment.suffixes.keyvault_dns - - @property - def network_client(self): - self.log('Getting network client') - if not self._network_client: - self._network_client = self.get_mgmt_svc_client(NetworkManagementClient, - self._cloud_environment.endpoints.resource_manager, - '2017-06-01') - self._register('Microsoft.Network') - return self._network_client - - @property - def rm_client(self): - self.log('Getting resource manager client') - if not self._resource_client: - self._resource_client = self.get_mgmt_svc_client(ResourceManagementClient, - self._cloud_environment.endpoints.resource_manager, - '2017-05-10') - return self._resource_client - - @property - def compute_client(self): - self.log('Getting compute client') - if not self._compute_client: - self._compute_client = self.get_mgmt_svc_client(ComputeManagementClient, - self._cloud_environment.endpoints.resource_manager, - '2017-03-30') - self._register('Microsoft.Compute') - return self._compute_client - - @property - def vault_client(self): - self.log('Getting the Key Vault client') - if not self._vault_client: - self._vault_client = self.get_vault_client() - - return self._vault_client - - -class AzureKeyVaultSecret: - - def __init__(self): - - self._args = self._parse_cli_args() - - try: - rm = AzureRM(self._args) - except Exception as e: - sys.exit("{0}".format(str(e))) - - self._get_vault_settings() - - if self._args.vault_name: - self.vault_name = self._args.vault_name - - if self._args.secret_name: - self.secret_name = self._args.secret_name - - if self._args.secret_version: - self.secret_version = self._args.secret_version - - self._vault_suffix = rm.get_vault_suffix() - self._vault_client = rm.vault_client - - print(self.get_password_from_vault()) - - def _parse_cli_args(self): - parser = argparse.ArgumentParser( - description='Obtain the vault password used to secure your Ansilbe secrets' - ) - parser.add_argument('-n', '--vault-name', action='store', help='Name of Azure Key Vault') - parser.add_argument('-s', '--secret-name', action='store', - help='Name of the secret stored in Azure Key Vault') - parser.add_argument('-v', '--secret-version', action='store', - help='Version of the secret to be retrieved') - parser.add_argument('--debug', action='store_true', default=False, - help='Send the debug messages to STDOUT') - parser.add_argument('--profile', action='store', - help='Azure profile contained in ~/.azure/credentials') - parser.add_argument('--subscription_id', action='store', - help='Azure Subscription Id') - parser.add_argument('--client_id', action='store', - help='Azure Client Id ') - parser.add_argument('--secret', action='store', - help='Azure Client Secret') - parser.add_argument('--tenant', action='store', - help='Azure Tenant Id') - parser.add_argument('--ad_user', action='store', - help='Active Directory User') - parser.add_argument('--password', action='store', - help='password') - parser.add_argument('--adfs_authority_url', action='store', - help='Azure ADFS authority url') - parser.add_argument('--cloud_environment', action='store', - help='Azure Cloud Environment name or metadata discovery URL') - - return parser.parse_args() - - def get_password_from_vault(self): - vault_url = 'https://{0}{1}'.format(self.vault_name, self._vault_suffix) - secret = self._vault_client.get_secret(vault_url, self.secret_name, self.secret_version) - return secret.value - - def _get_vault_settings(self): - env_settings = self._get_vault_env_settings() - if None not in set(env_settings.values()): - for key in AZURE_VAULT_SETTINGS: - setattr(self, key, env_settings.get(key, None)) - else: - file_settings = self._load_vault_settings() - if not file_settings: - return - - for key in AZURE_VAULT_SETTINGS: - if file_settings.get(key): - setattr(self, key, file_settings.get(key)) - - def _get_vault_env_settings(self): - env_settings = dict() - for attribute, env_variable in AZURE_VAULT_SETTINGS.items(): - env_settings[attribute] = os.environ.get(env_variable, None) - return env_settings - - def _load_vault_settings(self): - basename = os.path.splitext(os.path.basename(__file__))[0] - default_path = os.path.join(os.path.dirname(__file__), (basename + '.ini')) - path = os.path.expanduser(os.path.expandvars(os.environ.get('AZURE_VAULT_INI_PATH', default_path))) - config = None - settings = None - try: - config = cp.ConfigParser() - config.read(path) - except Exception: - pass - - if config is not None: - settings = dict() - for key in AZURE_VAULT_SETTINGS: - try: - settings[key] = config.get('azure_keyvault', key, raw=True) - except Exception: - pass - - return settings - - -def main(): - if not HAS_AZURE: - sys.exit("The Azure python sdk is not installed (try `pip install 'azure>={0}' --upgrade`) - {1}".format( - AZURE_MIN_VERSION, HAS_AZURE_EXC)) - - AzureKeyVaultSecret() - - -if __name__ == '__main__': - main() diff --git a/scripts/vault/vault-keyring-client.py b/scripts/vault/vault-keyring-client.py deleted file mode 100755 index 8332b228f9..0000000000 --- a/scripts/vault/vault-keyring-client.py +++ /dev/null @@ -1,134 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# (c) 2014, Matt Martz -# (c) 2016, Justin Mayer -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# -# ============================================================================= -# -# This script is to be used with ansible-vault's --vault-id arg -# to retrieve the vault password via your OS's native keyring application. -# -# This file *MUST* be saved with executable permissions. Otherwise, Ansible -# will try to parse as a password file and display: "ERROR! Decryption failed" -# -# The `keyring` Python module is required: https://pypi.org/project/keyring/ -# -# By default, this script will store the specified password in the keyring of -# the user that invokes the script. To specify a user keyring, add a [vault] -# section to your ansible.cfg file with a 'username' option. Example: -# -# [vault] -# username = 'ansible-vault' -# -# In useage like: -# -# ansible-vault --vault-id keyring_id@contrib/vault/vault-keyring-client.py view some_encrypted_file -# -# --vault-id will call this script like: -# -# contrib/vault/vault-keyring-client.py --vault-id keyring_id -# -# That will retrieve the password from users keyring for the -# keyring service 'keyring_id'. The equivalent of: -# -# keyring get keyring_id $USER -# -# If no vault-id name is specified to ansible command line, the vault-keyring-client.py -# script will be called without a '--vault-id' and will default to the keyring service 'ansible' -# This is equivalent to: -# -# keyring get ansible $USER -# -# You can configure the `vault_password_file` option in ansible.cfg: -# -# [defaults] -# ... -# vault_password_file = /path/to/vault-keyring-client.py -# ... -# -# To set your password, `cd` to your project directory and run: -# -# # will use default keyring service / vault-id of 'ansible' -# /path/to/vault-keyring-client.py --set -# -# or to specify the keyring service / vault-id of 'my_ansible_secret': -# -# /path/to/vault-keyring-client.py --vault-id my_ansible_secret --set -# -# If you choose not to configure the path to `vault_password_file` in -# ansible.cfg, your `ansible-playbook` command might look like: -# -# ansible-playbook --vault-id=keyring_id@/path/to/vault-keyring-client.py site.yml - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import argparse -import sys -import getpass -import keyring - -from ansible.config.manager import ConfigManager - -KEYNAME_UNKNOWN_RC = 2 - - -def build_arg_parser(): - parser = argparse.ArgumentParser(description='Get a vault password from user keyring') - - parser.add_argument('--vault-id', action='store', default=None, - dest='vault_id', - help='name of the vault secret to get from keyring') - parser.add_argument('--username', action='store', default=None, - help='the username whose keyring is queried') - parser.add_argument('--set', action='store_true', default=False, - dest='set_password', - help='set the password instead of getting it') - return parser - - -def main(): - config_manager = ConfigManager() - username = config_manager.data.get_setting('vault.username') - if not username: - username = getpass.getuser() - - keyname = config_manager.data.get_setting('vault.keyname') - if not keyname: - keyname = 'ansible' - - arg_parser = build_arg_parser() - args = arg_parser.parse_args() - - username = args.username or username - keyname = args.vault_id or keyname - - # print('username: %s keyname: %s' % (username, keyname)) - - if args.set_password: - intro = 'Storing password in "{}" user keyring using key name: {}\n' - sys.stdout.write(intro.format(username, keyname)) - password = getpass.getpass() - confirm = getpass.getpass('Confirm password: ') - if password == confirm: - keyring.set_password(keyname, username, password) - else: - sys.stderr.write('Passwords do not match\n') - sys.exit(1) - else: - secret = keyring.get_password(keyname, username) - if secret is None: - sys.stderr.write('vault-keyring-client could not find key="%s" for user="%s" via backend="%s"\n' % - (keyname, username, keyring.get_keyring().name)) - sys.exit(KEYNAME_UNKNOWN_RC) - - # print('secret: %s' % secret) - sys.stdout.write('%s\n' % secret) - - sys.exit(0) - - -if __name__ == '__main__': - main() diff --git a/scripts/vault/vault-keyring.py b/scripts/vault/vault-keyring.py deleted file mode 100755 index 45188b122d..0000000000 --- a/scripts/vault/vault-keyring.py +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# (c) 2014, Matt Martz -# (c) 2016, Justin Mayer -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# -# ============================================================================= -# -# This script is to be used with vault_password_file or --vault-password-file -# to retrieve the vault password via your OS's native keyring application. -# -# This file *MUST* be saved with executable permissions. Otherwise, Ansible -# will try to parse as a password file and display: "ERROR! Decryption failed" -# -# The `keyring` Python module is required: https://pypi.org/project/keyring/ -# -# By default, this script will store the specified password in the keyring of -# the user that invokes the script. To specify a user keyring, add a [vault] -# section to your ansible.cfg file with a 'username' option. Example: -# -# [vault] -# username = 'ansible-vault' -# -# Another optional setting is for the key name, which allows you to use this -# script to handle multiple project vaults with different passwords: -# -# [vault] -# keyname = 'ansible-vault-yourproject' -# -# You can configure the `vault_password_file` option in ansible.cfg: -# -# [defaults] -# ... -# vault_password_file = /path/to/vault-keyring.py -# ... -# -# To set your password, `cd` to your project directory and run: -# -# python /path/to/vault-keyring.py set -# -# If you choose not to configure the path to `vault_password_file` in -# ansible.cfg, your `ansible-playbook` command might look like: -# -# ansible-playbook --vault-password-file=/path/to/vault-keyring.py site.yml - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import sys -import getpass -import keyring - -from ansible.config.manager import ConfigManager, get_ini_config_value - - -def main(): - config = ConfigManager() - username = get_ini_config_value( - config._parsers[config._config_file], - dict(section='vault', key='username') - ) or getpass.getuser() - - keyname = get_ini_config_value( - config._parsers[config._config_file], - dict(section='vault', key='keyname') - ) or 'ansible' - - if len(sys.argv) == 2 and sys.argv[1] == 'set': - intro = 'Storing password in "{}" user keyring using key name: {}\n' - sys.stdout.write(intro.format(username, keyname)) - password = getpass.getpass() - confirm = getpass.getpass('Confirm password: ') - if password == confirm: - keyring.set_password(keyname, username, password) - else: - sys.stderr.write('Passwords do not match\n') - sys.exit(1) - else: - sys.stdout.write('{0}\n'.format(keyring.get_password(keyname, - username))) - - sys.exit(0) - - -if __name__ == '__main__': - main() diff --git a/tests/integration/targets/script_inventory_foreman/aliases b/tests/integration/targets/script_inventory_foreman/aliases deleted file mode 100644 index a965d6e836..0000000000 --- a/tests/integration/targets/script_inventory_foreman/aliases +++ /dev/null @@ -1,3 +0,0 @@ -shippable/cloud/group1 -cloud/foreman -needs/file/scripts/inventory/foreman.py diff --git a/tests/integration/targets/script_inventory_foreman/foreman.sh b/tests/integration/targets/script_inventory_foreman/foreman.sh deleted file mode 100755 index 1b3e70fb1a..0000000000 --- a/tests/integration/targets/script_inventory_foreman/foreman.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env bash -# Wrapper to use the correct Python interpreter and support code coverage. - -REL_SCRIPT="../../../../scripts/inventory/foreman.py" -ABS_SCRIPT="$("${ANSIBLE_TEST_PYTHON_INTERPRETER}" -c "import os; print(os.path.abspath('${REL_SCRIPT}'))")" - -# Make sure output written to current directory ends up in the temp dir. -cd "${OUTPUT_DIR}" - -python.py "${ABS_SCRIPT}" "$@" diff --git a/tests/integration/targets/script_inventory_foreman/runme.sh b/tests/integration/targets/script_inventory_foreman/runme.sh deleted file mode 100755 index a9c94fbe7d..0000000000 --- a/tests/integration/targets/script_inventory_foreman/runme.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -export FOREMAN_HOST="${FOREMAN_HOST:-localhost}" -export FOREMAN_PORT="${FOREMAN_PORT:-8080}" -export FOREMAN_INI_PATH="${OUTPUT_DIR}/foreman.ini" - - -############################################ -# SMOKETEST WITH SIMPLE INI -############################################ - -cat > "$FOREMAN_INI_PATH" < "$FOREMAN_INI_PATH" < 900 }}" From db713bd0f5c88b2ffbc27d5c4eb123b35972af8f Mon Sep 17 00:00:00 2001 From: Anup Chenthamarakshan Date: Sun, 20 Jun 2021 03:42:19 -0700 Subject: [PATCH 0389/3093] proxmox_kvm: Fix ZFS device string parsing (#2841) ZFS-backed block devices may contain just the bare device name and not have extra options like `,size=foo`, `,format=qcow2` etc. This breaks an assumption in existing regex (which expects a comma). Support such device strings and add a couple of testcases to validate. --- .../fragments/2841-proxmox_kvm_zfs_devstr.yml | 4 ++++ plugins/modules/cloud/misc/proxmox_kvm.py | 20 ++++++++++--------- .../modules/cloud/misc/test_proxmox_kvm.py | 17 ++++++++++++++++ 3 files changed, 32 insertions(+), 9 deletions(-) create mode 100644 changelogs/fragments/2841-proxmox_kvm_zfs_devstr.yml create mode 100644 tests/unit/plugins/modules/cloud/misc/test_proxmox_kvm.py diff --git a/changelogs/fragments/2841-proxmox_kvm_zfs_devstr.yml b/changelogs/fragments/2841-proxmox_kvm_zfs_devstr.yml new file mode 100644 index 0000000000..7b61f175c6 --- /dev/null +++ b/changelogs/fragments/2841-proxmox_kvm_zfs_devstr.yml @@ -0,0 +1,4 @@ +bugfixes: + - "proxmox_kvm - fix parsing of Proxmox VM information with device info not containing + a comma, like disks backed by ZFS zvols + (https://github.com/ansible-collections/community.general/issues/2840)." diff --git a/plugins/modules/cloud/misc/proxmox_kvm.py b/plugins/modules/cloud/misc/proxmox_kvm.py index a664279e57..0fb486600c 100644 --- a/plugins/modules/cloud/misc/proxmox_kvm.py +++ b/plugins/modules/cloud/misc/proxmox_kvm.py @@ -818,23 +818,25 @@ def get_vminfo(module, proxmox, node, vmid, **kwargs): # Split information by type re_net = re.compile(r'net[0-9]') re_dev = re.compile(r'(virtio|ide|scsi|sata)[0-9]') - for k, v in kwargs.items(): + for k in kwargs.keys(): if re_net.match(k): - interface = k - k = vm[k] - k = re.search('=(.*?),', k).group(1) - mac[interface] = k + mac[k] = parse_mac(vm[k]) elif re_dev.match(k): - device = k - k = vm[k] - k = re.search('(.*?),', k).group(1) - devices[device] = k + devices[k] = parse_dev(vm[k]) results['mac'] = mac results['devices'] = devices results['vmid'] = int(vmid) +def parse_mac(netstr): + return re.search('=(.*?),', netstr).group(1) + + +def parse_dev(devstr): + return re.search('(.*?)(,|$)', devstr).group(1) + + def settings(proxmox, vmid, node, **kwargs): proxmox_node = proxmox.nodes(node) diff --git a/tests/unit/plugins/modules/cloud/misc/test_proxmox_kvm.py b/tests/unit/plugins/modules/cloud/misc/test_proxmox_kvm.py new file mode 100644 index 0000000000..d486000ed1 --- /dev/null +++ b/tests/unit/plugins/modules/cloud/misc/test_proxmox_kvm.py @@ -0,0 +1,17 @@ +# Copyright: (c) 2021, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible_collections.community.general.plugins.modules.cloud.misc.proxmox_kvm import parse_dev, parse_mac + + +def test_parse_mac(): + assert parse_mac('virtio=00:11:22:AA:BB:CC,bridge=vmbr0,firewall=1') == '00:11:22:AA:BB:CC' + + +def test_parse_dev(): + assert parse_dev('local-lvm:vm-1000-disk-0,format=qcow2') == 'local-lvm:vm-1000-disk-0' + assert parse_dev('local-lvm:vm-101-disk-1,size=8G') == 'local-lvm:vm-101-disk-1' + assert parse_dev('local-zfs:vm-1001-disk-0') == 'local-zfs:vm-1001-disk-0' From 2768eda89573e8ec82ddc046f9d754bc131dfa3e Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 20 Jun 2021 23:07:45 +1200 Subject: [PATCH 0390/3093] serverless - deprecating unused param (#2845) * deprecating unused param * added changelog fragment * deprecate param in arg_spec * Update plugins/modules/cloud/misc/serverless.py Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../2845-serverless-deprecate-functions-param.yml | 2 ++ plugins/modules/cloud/misc/serverless.py | 11 +++-------- 2 files changed, 5 insertions(+), 8 deletions(-) create mode 100644 changelogs/fragments/2845-serverless-deprecate-functions-param.yml diff --git a/changelogs/fragments/2845-serverless-deprecate-functions-param.yml b/changelogs/fragments/2845-serverless-deprecate-functions-param.yml new file mode 100644 index 0000000000..6565b18974 --- /dev/null +++ b/changelogs/fragments/2845-serverless-deprecate-functions-param.yml @@ -0,0 +1,2 @@ +deprecated_features: + - serverless - deprecating parameter ``functions`` because it was not used in the code (https://github.com/ansible-collections/community.general/pull/2845). diff --git a/plugins/modules/cloud/misc/serverless.py b/plugins/modules/cloud/misc/serverless.py index 1b2f8b62a6..878621c38c 100644 --- a/plugins/modules/cloud/misc/serverless.py +++ b/plugins/modules/cloud/misc/serverless.py @@ -38,6 +38,7 @@ options: description: - A list of specific functions to deploy. - If this is not provided, all functions in the service will be deployed. + - Deprecated parameter, it will be removed in community.general 5.0.0. type: list elements: str default: [] @@ -79,13 +80,6 @@ EXAMPLES = r''' service_path: '{{ project_dir }}' state: present -- name: Deploy specific functions - community.general.serverless: - service_path: '{{ project_dir }}' - functions: - - my_func_one - - my_func_two - - name: Deploy a project, then pull its resource list back into Ansible community.general.serverless: stage: dev @@ -165,7 +159,8 @@ def main(): argument_spec=dict( service_path=dict(type='path', required=True), state=dict(type='str', default='present', choices=['absent', 'present']), - functions=dict(type='list', elements='str'), + functions=dict(type='list', elements='str', + removed_in_version="5.0.0", removed_from_collection="community.general"), region=dict(type='str', default=''), stage=dict(type='str', default=''), deploy=dict(type='bool', default=True), From 519411e026760477113de5332ba6f514e9599108 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 20 Jun 2021 23:17:58 +1200 Subject: [PATCH 0391/3093] ali_instance_info - marked parameters for deprecation in c.g. 5.0.0 (#2844) * marked parameters for deprecation in c.g. 5.0.0 * added changelog fragment * deprecate params in arg_spec * doc adjusment per PR --- .../2844-ali_instance_info-deprecate-params.yml | 2 ++ plugins/modules/cloud/alicloud/ali_instance_info.py | 12 ++++++++---- 2 files changed, 10 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/2844-ali_instance_info-deprecate-params.yml diff --git a/changelogs/fragments/2844-ali_instance_info-deprecate-params.yml b/changelogs/fragments/2844-ali_instance_info-deprecate-params.yml new file mode 100644 index 0000000000..a37555edcd --- /dev/null +++ b/changelogs/fragments/2844-ali_instance_info-deprecate-params.yml @@ -0,0 +1,2 @@ +deprecated_features: + - ali_instance_info - marked removal version of deprecated parameters ``availability_zone`` and ``instance_names`` (https://github.com/ansible-collections/community.general/issues/2429). diff --git a/plugins/modules/cloud/alicloud/ali_instance_info.py b/plugins/modules/cloud/alicloud/ali_instance_info.py index 8a3b8aeed0..23665bbcad 100644 --- a/plugins/modules/cloud/alicloud/ali_instance_info.py +++ b/plugins/modules/cloud/alicloud/ali_instance_info.py @@ -35,12 +35,14 @@ description: options: availability_zone: description: - - (Deprecated) Aliyun availability zone ID in which to launch the instance. Please use filter item 'zone_id' instead. + - Aliyun availability zone ID in which to launch the instance. + - Deprecated parameter, it will be removed in community.general 5.0.0. Please use filter item I(zone_id) instead. aliases: ['alicloud_zone'] type: str instance_names: description: - - (Deprecated) A list of ECS instance names. Please use filter item 'instance_name' instead. + - A list of ECS instance names. + - Deprecated parameter, it will be removed in community.general 5.0.0. Please use filter item I(instance_name) instead. aliases: ["names"] type: list elements: str @@ -374,8 +376,10 @@ except ImportError: def main(): argument_spec = ecs_argument_spec() argument_spec.update(dict( - availability_zone=dict(aliases=['alicloud_zone']), - instance_ids=dict(type='list', elements='str', aliases=['ids']), + availability_zone=dict(aliases=['alicloud_zone'], + removed_in_version="5.0.0", removed_from_collection="community.general"), + instance_ids=dict(type='list', elements='str', aliases=['ids'], + removed_in_version="5.0.0", removed_from_collection="community.general"), instance_names=dict(type='list', elements='str', aliases=['names']), name_prefix=dict(type='str'), tags=dict(type='dict', aliases=['instance_tags']), From ce35d8809474d2a1dee1f2cefd5416c491b97594 Mon Sep 17 00:00:00 2001 From: Stanislav German-Evtushenko Date: Mon, 21 Jun 2021 16:53:03 +0900 Subject: [PATCH 0392/3093] gem_module: Add bindir option (#2837) * gem_module: Add bindir option This option allows to specify directory to install executables, e.g. `/home/user/bin` or `/home/user/.local/bin`. This comes especially handy when used with user_install option as the default path of executables is not in PATH. * Update changelogs/fragments/gem_module_add_bindir_option.yml Co-authored-by: Ajpantuso * gem_module: Integration tests for bindir option * gem_module: Update Integration tests for bindir option * gem_module: Update Integration tests for bindir option Make sure gist is not installed system-wide prior the tests * Revert "gem_module: Update Integration tests for bindir option" This reverts commit 04eec6db27aa90d2b23ead7941aeb5889a7c6437. * Do not check "install_gem_result is changed" for ansible develop on openSUSE * Revert "Do not check "install_gem_result is changed" for ansible develop on openSUSE" This reverts commit 48ecb27889a6d86b91eb70a5b1432a5649846b99. * gem_module: Use --norc to avoid surprises Run install and uninstall actions with `--norc`. This way ansible has more control over the way gems are installed. * Revert "gem_module: Use --norc to avoid surprises" This reverts commit 66f40bcfe684ba306759a0fdc028a21ba73ba1dd. * gem_module: bindir - Ignore openSUSE Leap * Update plugins/modules/packaging/language/gem.py Co-authored-by: Felix Fontein * gem_module: Use --norc to avoid surprises Run install and uninstall actions with `--norc` when supported (rubygems >= 2.5.2). This way ansible has more control over the way gems are installed. * Try distutils.version instead of packaging * ver is an list, not string * ver is not list either but tuple * Update changelogs/fragments/gem_module_add_bindir_option.yml Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> * ver can be None (when can this happen?) * gem: Add norc option * Apply suggestions from code review Co-authored-by: Felix Fontein * Update plugins/modules/packaging/language/gem.py Co-authored-by: Felix Fontein * Use tuples to compare versions * Apply suggestions from code review Co-authored-by: Amin Vakil * Update plugins/modules/packaging/language/gem.py Co-authored-by: Amin Vakil * lost norc option check is back * Move handling norc option to separate function * cosmetic * fix for the previos commit * Apply suggestions from code review Co-authored-by: Felix Fontein * Cache result of get_rubygems_version Co-authored-by: Ajpantuso Co-authored-by: Felix Fontein Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> Co-authored-by: Amin Vakil --- .../gem_module_add_bindir_option.yml | 3 ++ plugins/modules/packaging/language/gem.py | 47 ++++++++++++++++--- tests/integration/targets/gem/tasks/main.yml | 41 ++++++++++++++++ 3 files changed, 84 insertions(+), 7 deletions(-) create mode 100644 changelogs/fragments/gem_module_add_bindir_option.yml diff --git a/changelogs/fragments/gem_module_add_bindir_option.yml b/changelogs/fragments/gem_module_add_bindir_option.yml new file mode 100644 index 0000000000..f47b6deb27 --- /dev/null +++ b/changelogs/fragments/gem_module_add_bindir_option.yml @@ -0,0 +1,3 @@ +minor_changes: + - gem - add ``bindir`` option to specify an installation path for executables such as ``/home/user/bin`` or ``/home/user/.local/bin`` (https://github.com/ansible-collections/community.general/pull/2837). + - gem - add ``norc`` option to avoid loading any ``.gemrc`` file (https://github.com/ansible-collections/community.general/pull/2837). diff --git a/plugins/modules/packaging/language/gem.py b/plugins/modules/packaging/language/gem.py index 516c9b0a41..c7ccdec498 100644 --- a/plugins/modules/packaging/language/gem.py +++ b/plugins/modules/packaging/language/gem.py @@ -62,6 +62,19 @@ options: These gems will be independent from the global installed ones. Specifying this requires user_install to be false. required: false + bindir: + type: path + description: + - Install executables into a specific directory. + version_added: 3.3.0 + norc: + type: bool + default: false + description: + - Avoid loading any C(.gemrc) file. Ignored for RubyGems prior to 2.5.2. + - "The current default value will be deprecated in community.general 4.0.0: if the value is not explicitly specified, a deprecation message will be shown." + - From community.general 5.0.0 on, the default will be changed to C(true). + version_added: 3.3.0 env_shebang: description: - Rewrite the shebang line on installed scripts to use /usr/bin/env. @@ -134,6 +147,9 @@ def get_rubygems_path(module): def get_rubygems_version(module): + if hasattr(get_rubygems_version, "ver"): + return get_rubygems_version.ver + cmd = get_rubygems_path(module) + ['--version'] (rc, out, err) = module.run_command(cmd, check_rc=True) @@ -141,7 +157,10 @@ def get_rubygems_version(module): if not match: return None - return tuple(int(x) for x in match.groups()) + ver = tuple(int(x) for x in match.groups()) + get_rubygems_version.ver = ver + + return ver def get_rubygems_environ(module): @@ -154,6 +173,7 @@ def get_installed_versions(module, remote=False): cmd = get_rubygems_path(module) cmd.append('query') + cmd.extend(common_opts(module)) if remote: cmd.append('--remote') if module.params['repository']: @@ -188,6 +208,14 @@ def exists(module): return False +def common_opts(module): + opts = [] + ver = get_rubygems_version(module) + if module.params['norc'] and ver and ver >= (2, 5, 2): + opts.append('--norc') + return opts + + def uninstall(module): if module.check_mode: @@ -195,9 +223,13 @@ def uninstall(module): cmd = get_rubygems_path(module) environ = get_rubygems_environ(module) cmd.append('uninstall') + cmd.extend(common_opts(module)) if module.params['install_dir']: cmd.extend(['--install-dir', module.params['install_dir']]) + if module.params['bindir']: + cmd.extend(['--bindir', module.params['bindir']]) + if module.params['version']: cmd.extend(['--version', module.params['version']]) else: @@ -213,13 +245,10 @@ def install(module): return ver = get_rubygems_version(module) - if ver: - major = ver[0] - else: - major = None cmd = get_rubygems_path(module) cmd.append('install') + cmd.extend(common_opts(module)) if module.params['version']: cmd.extend(['--version', module.params['version']]) if module.params['repository']: @@ -227,7 +256,7 @@ def install(module): if not module.params['include_dependencies']: cmd.append('--ignore-dependencies') else: - if major and major < 2: + if ver and ver < (2, 0, 0): cmd.append('--include-dependencies') if module.params['user_install']: cmd.append('--user-install') @@ -235,10 +264,12 @@ def install(module): cmd.append('--no-user-install') if module.params['install_dir']: cmd.extend(['--install-dir', module.params['install_dir']]) + if module.params['bindir']: + cmd.extend(['--bindir', module.params['bindir']]) if module.params['pre_release']: cmd.append('--pre') if not module.params['include_doc']: - if major and major < 2: + if ver and ver < (2, 0, 0): cmd.append('--no-rdoc') cmd.append('--no-ri') else: @@ -265,6 +296,8 @@ def main(): state=dict(required=False, default='present', choices=['present', 'absent', 'latest'], type='str'), user_install=dict(required=False, default=True, type='bool'), install_dir=dict(required=False, type='path'), + bindir=dict(type='path'), + norc=dict(default=False, type='bool'), pre_release=dict(required=False, default=False, type='bool'), include_doc=dict(required=False, default=False, type='bool'), env_shebang=dict(required=False, default=False, type='bool'), diff --git a/tests/integration/targets/gem/tasks/main.yml b/tests/integration/targets/gem/tasks/main.yml index ce64364d78..499057775c 100644 --- a/tests/integration/targets/gem/tasks/main.yml +++ b/tests/integration/targets/gem/tasks/main.yml @@ -178,3 +178,44 @@ that: - install_gem_result is changed - gem_search.files | length == 0 + +# Custom directory for executables (--bindir) +- name: Install gem with custom bindir + gem: + name: gist + state: present + bindir: "{{ output_dir }}/custom_bindir" + norc: yes + user_install: no # Avoid conflicts between --install-dir and --user-install when running as root on CentOS / Fedora / RHEL + register: install_gem_result + +- name: Get stats of gem executable + stat: + path: "{{ output_dir }}/custom_bindir/gist" + register: gem_bindir_stat + +- name: Ensure gem executable was installed in custom directory + assert: + that: + - install_gem_result is changed + - gem_bindir_stat.stat.exists and gem_bindir_stat.stat.isreg + +- name: Remove gem with custom bindir + gem: + name: gist + state: absent + bindir: "{{ output_dir }}/custom_bindir" + norc: yes + user_install: no # Avoid conflicts between --install-dir and --user-install when running as root on CentOS / Fedora / RHEL + register: install_gem_result + +- name: Get stats of gem executable + stat: + path: "{{ output_dir }}/custom_bindir/gist" + register: gem_bindir_stat + +- name: Ensure gem executable was removed from custom directory + assert: + that: + - install_gem_result is changed + - not gem_bindir_stat.stat.exists From d6d0b6f0c1e760bfe4343457d64d0f40ab8a0b15 Mon Sep 17 00:00:00 2001 From: Lennert Mertens Date: Mon, 21 Jun 2021 21:32:07 +0200 Subject: [PATCH 0393/3093] gitlab_user: add support for identity provider (#2691) * Add identity functionality * Add functionality for user without provider or extern_uid * Fix missing key error and documentation * Fix failing tests * Update docs * Add changelog fragment * Update plugins/modules/source_control/gitlab/gitlab_user.py Add version Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_user.py Update boolean default Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_user.py Fix syntax Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_user.py Remove no_log Co-authored-by: Felix Fontein * Update changelogs/fragments/2691-gitlab_user-support-identity-provider.yml Update syntax Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_user.py Update syntax Co-authored-by: Felix Fontein * Update docs * Add functionality to add multiple identities at once * Fix identity example * Add suboptions * Add elements * Update plugins/modules/source_control/gitlab/gitlab_user.py Co-authored-by: Felix Fontein * Apply comma's at the end of dictionaries Co-authored-by: Felix Fontein * Add check mode * Change checkmode for user add and identity delete * Update plugins/modules/source_control/gitlab/gitlab_user.py * Update changelogs/fragments/2691-gitlab_user-support-identity-provider.yml Add more features to changelog as suggested here https://github.com/ansible-collections/community.general/pull/2691#discussion_r653250717 Co-authored-by: Felix Fontein * Add better description for identities list and overwrite_identities boolean Co-authored-by: Felix Fontein Co-authored-by: lennert.mertens Co-authored-by: Felix Fontein Co-authored-by: stef.graces Co-authored-by: Stef Graces Co-authored-by: Stef Graces Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> --- ...-gitlab_user-support-identity-provider.yml | 5 + .../source_control/gitlab/gitlab_user.py | 122 +++++++++++++++++- 2 files changed, 123 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/2691-gitlab_user-support-identity-provider.yml diff --git a/changelogs/fragments/2691-gitlab_user-support-identity-provider.yml b/changelogs/fragments/2691-gitlab_user-support-identity-provider.yml new file mode 100644 index 0000000000..065b524c86 --- /dev/null +++ b/changelogs/fragments/2691-gitlab_user-support-identity-provider.yml @@ -0,0 +1,5 @@ +--- +minor_changes: + - "gitlab_user - specifying a password is no longer necessary (https://github.com/ansible-collections/community.general/pull/2691)." + - "gitlab_user - allow to reset an existing password with the new ``reset_password`` option (https://github.com/ansible-collections/community.general/pull/2691)." + - "gitlab_user - add functionality for adding external identity providers to a GitLab user (https://github.com/ansible-collections/community.general/pull/2691)." diff --git a/plugins/modules/source_control/gitlab/gitlab_user.py b/plugins/modules/source_control/gitlab/gitlab_user.py index 4d300ea842..8770a041b4 100644 --- a/plugins/modules/source_control/gitlab/gitlab_user.py +++ b/plugins/modules/source_control/gitlab/gitlab_user.py @@ -1,6 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- +# Copyright: (c) 2021, Lennert Mertens (lennert@nubera.be) # Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) # Copyright: (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -22,6 +23,8 @@ notes: author: - Werner Dijkerman (@dj-wasabi) - Guillaume Martinez (@Lunik) + - Lennert Mertens (@LennertMertens) + - Stef Graces (@stgrace) requirements: - python >= 2.7 - python-gitlab python module @@ -50,6 +53,12 @@ options: - GitLab server enforces minimum password length to 8, set this value with 8 or more characters. - Required only if C(state) is set to C(present). type: str + reset_password: + description: + - Whether the user can change its password or not. + default: false + type: bool + version_added: 3.3.0 email: description: - The email that belongs to the user. @@ -107,6 +116,30 @@ options: - Define external parameter for this user. type: bool default: no + identities: + description: + - List of identities to be added/updated for this user. + - To remove all other identities from this user, set I(overwrite_identities=true). + type: list + elements: dict + suboptions: + provider: + description: + - The name of the external identity provider + type: str + extern_uid: + description: + - User ID for external identity. + type: str + version_added: 3.3.0 + overwrite_identities: + description: + - Overwrite identities with identities added in this module. + - This means that all identities that the user has and that are not listed in I(identities) are removed from the user. + - This is only done if a list is provided for I(identities). To remove all identities, provide an empty list. + type: bool + default: false + version_added: 3.3.0 ''' EXAMPLES = ''' @@ -134,6 +167,22 @@ EXAMPLES = ''' group: super_group/mon_group access_level: owner +- name: "Create GitLab User using external identity provider" + community.general.gitlab_user: + api_url: https://gitlab.example.com/ + validate_certs: True + api_token: "{{ access_token }}" + name: My Name + username: myusername + password: mysecretpassword + email: me@example.com + identities: + - provider: Keycloak + extern_uid: f278f95c-12c7-4d51-996f-758cc2eb11bc + state: present + group: super_group/mon_group + access_level: owner + - name: "Block GitLab User" community.general.gitlab_user: api_url: https://gitlab.example.com/ @@ -219,10 +268,13 @@ class GitLabUser(object): 'name': options['name'], 'username': username, 'password': options['password'], + 'reset_password': options['reset_password'], 'email': options['email'], 'skip_confirmation': not options['confirm'], 'admin': options['isadmin'], - 'external': options['external']}) + 'external': options['external'], + 'identities': options['identities'], + }) changed = True else: changed, user = self.updateUser( @@ -240,6 +292,7 @@ class GitLabUser(object): 'value': options['isadmin'], 'setter': 'admin' }, 'external': {'value': options['external']}, + 'identities': {'value': options['identities']}, }, { # put "uncheckable" params here, this means params @@ -247,6 +300,8 @@ class GitLabUser(object): # not return any information about it 'skip_reconfirmation': {'value': not options['confirm']}, 'password': {'value': options['password']}, + 'reset_password': {'value': options['reset_password']}, + 'overwrite_identities': {'value': options['overwrite_identities']}, } ) @@ -393,7 +448,10 @@ class GitLabUser(object): av = arg_value['value'] if av is not None: - if getattr(user, arg_key) != av: + if arg_key == "identities": + changed = self.addIdentities(user, av, uncheckable_args['overwrite_identities']['value']) + + elif getattr(user, arg_key) != av: setattr(user, arg_value.get('setter', arg_key), av) changed = True @@ -412,13 +470,53 @@ class GitLabUser(object): if self._module.check_mode: return True + identities = None + if 'identities' in arguments: + identities = arguments['identities'] + del arguments['identities'] + try: user = self._gitlab.users.create(arguments) + if identities: + self.addIdentities(user, identities) + except (gitlab.exceptions.GitlabCreateError) as e: self._module.fail_json(msg="Failed to create user: %s " % to_native(e)) return user + ''' + @param user User object + @param identites List of identities to be added/updated + @param overwrite_identities Overwrite user identities with identities passed to this module + ''' + def addIdentities(self, user, identities, overwrite_identities=False): + changed = False + if overwrite_identities: + changed = self.deleteIdentities(user, identities) + + for identity in identities: + if identity not in user.identities: + setattr(user, 'provider', identity['provider']) + setattr(user, 'extern_uid', identity['extern_uid']) + if not self._module.check_mode: + user.save() + changed = True + return changed + + ''' + @param user User object + @param identites List of identities to be added/updated + ''' + def deleteIdentities(self, user, identities): + changed = False + for identity in user.identities: + if identity not in identities: + if not self._module.check_mode: + user.identityproviders.delete(identity['provider']) + changed = True + return changed + ''' @param username Username of the user ''' @@ -471,6 +569,13 @@ class GitLabUser(object): return user.unblock() +def sanitize_arguments(arguments): + for key, value in list(arguments.items()): + if value is None: + del arguments[key] + return arguments + + def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(dict( @@ -479,6 +584,7 @@ def main(): state=dict(type='str', default="present", choices=["absent", "present", "blocked", "unblocked"]), username=dict(type='str', required=True), password=dict(type='str', no_log=True), + reset_password=dict(type='bool', default=False, no_log=False), email=dict(type='str'), sshkey_name=dict(type='str'), sshkey_file=dict(type='str', no_log=False), @@ -488,6 +594,8 @@ def main(): confirm=dict(type='bool', default=True), isadmin=dict(type='bool', default=False), external=dict(type='bool', default=False), + identities=dict(type='list', elements='dict'), + overwrite_identities=dict(type='bool', default=False), )) module = AnsibleModule( @@ -504,7 +612,7 @@ def main(): ], supports_check_mode=True, required_if=( - ('state', 'present', ['name', 'email', 'password']), + ('state', 'present', ['name', 'email']), ) ) @@ -512,6 +620,7 @@ def main(): state = module.params['state'] user_username = module.params['username'].lower() user_password = module.params['password'] + user_reset_password = module.params['reset_password'] user_email = module.params['email'] user_sshkey_name = module.params['sshkey_name'] user_sshkey_file = module.params['sshkey_file'] @@ -521,6 +630,8 @@ def main(): confirm = module.params['confirm'] user_isadmin = module.params['isadmin'] user_external = module.params['external'] + user_identities = module.params['identities'] + overwrite_identities = module.params['overwrite_identities'] if not HAS_GITLAB_PACKAGE: module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) @@ -559,6 +670,7 @@ def main(): if gitlab_user.createOrUpdateUser(user_username, { "name": user_name, "password": user_password, + "reset_password": user_reset_password, "email": user_email, "sshkey_name": user_sshkey_name, "sshkey_file": user_sshkey_file, @@ -567,7 +679,9 @@ def main(): "access_level": access_level, "confirm": confirm, "isadmin": user_isadmin, - "external": user_external}): + "external": user_external, + "identities": user_identities, + "overwrite_identities": overwrite_identities}): module.exit_json(changed=True, msg="Successfully created or updated the user %s" % user_username, user=gitlab_user.userObject._attrs) else: module.exit_json(changed=False, msg="No need to update the user %s" % user_username, user=gitlab_user.userObject._attrs) From 07085785a38ed456742ec893481fe7f7746d0e30 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 22 Jun 2021 08:16:26 +0200 Subject: [PATCH 0394/3093] Dynamically add meta/runtime.yml redirects before integration tests. (#2633) ci_coverage --- tests/utils/shippable/shippable.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/utils/shippable/shippable.sh b/tests/utils/shippable/shippable.sh index 472bfca1ca..3a00812f12 100755 --- a/tests/utils/shippable/shippable.sh +++ b/tests/utils/shippable/shippable.sh @@ -97,6 +97,13 @@ fi # END: HACK +if [ "${script}" != "sanity" ] && [ "${script}" != "units" ]; then + # Adds meta/runtime.yml redirects for all modules before running integration tests. + # This ensures that ansible-base and ansible-core will use the "real" modules instead of the + # symbolic links, which results in coverage to be reported correctly. + "${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/community/internal_test_tools/tools/meta_runtime.py" redirect --target both --flatmap +fi + export PYTHONIOENCODING='utf-8' if [ "${JOB_TRIGGERED_BY_NAME:-}" == "nightly-trigger" ]; then From 860b2b89a308549d4225e0b3a8576e0c8f35f3d2 Mon Sep 17 00:00:00 2001 From: Tong He <68936428+unnecessary-username@users.noreply.github.com> Date: Wed, 23 Jun 2021 17:29:50 -0400 Subject: [PATCH 0395/3093] jenkins_build: Support stop a running Jenkins build (#2850) * Support stop a running Jenkins build. Meanwhile enrich document content and test cases. * Fix the inconsistencies regarding the function name. * Submit the changelog and fix a PEP8 issue. * Remedy whitespace related PEP8 issues. * Implement the idempotent test case for the stop build function. * Make sure it returns proper changed status when we stop a build repeatedly. * Fix incorrect usages on comparison with True or False and incorrect usages on validating the changed status. * In this mocking situation, adjust the mock return value and test case to perform unit testing. * Implement JenkinsMockIdempotent() to mock return value in idempotent test cases. * Fix issues reported by CI. * Refactor the code to avoid CI exception and remove get_build_status() from mock function as they should not be there. * Update plugins/modules/web_infrastructure/jenkins_build.py Co-authored-by: Felix Fontein --- ...nkins_build-support-stop-jenkins-build.yml | 4 + .../web_infrastructure/jenkins_build.py | 59 +++++++++++--- .../web_infrastructure/test_jenkins_build.py | 76 ++++++++++++++++++- 3 files changed, 126 insertions(+), 13 deletions(-) create mode 100644 changelogs/fragments/2850-jenkins_build-support-stop-jenkins-build.yml diff --git a/changelogs/fragments/2850-jenkins_build-support-stop-jenkins-build.yml b/changelogs/fragments/2850-jenkins_build-support-stop-jenkins-build.yml new file mode 100644 index 0000000000..ad64e58eec --- /dev/null +++ b/changelogs/fragments/2850-jenkins_build-support-stop-jenkins-build.yml @@ -0,0 +1,4 @@ +minor_changes: + - jenkins_build - support stopping a running jenkins build (https://github.com/ansible-collections/community.general/pull/2850). +bugfixes: + - jenkins_build - examine presence of ``build_number`` before deleting a jenkins build (https://github.com/ansible-collections/community.general/pull/2850). \ No newline at end of file diff --git a/plugins/modules/web_infrastructure/jenkins_build.py b/plugins/modules/web_infrastructure/jenkins_build.py index 7f1d32b602..68f64f7a7b 100644 --- a/plugins/modules/web_infrastructure/jenkins_build.py +++ b/plugins/modules/web_infrastructure/jenkins_build.py @@ -15,7 +15,9 @@ description: - Manage Jenkins builds with Jenkins REST API. requirements: - "python-jenkins >= 0.4.12" -author: Brett Milford (@brettmilford) +author: + - Brett Milford (@brettmilford) + - Tong He (@unnecessary-username) options: args: description: @@ -36,9 +38,10 @@ options: type: str state: description: - - Attribute that specifies if the build is to be created or deleted. + - Attribute that specifies if the build is to be created, deleted or stopped. + - The C(stopped) state has been added in community.general 3.3.0. default: present - choices: ['present', 'absent'] + choices: ['present', 'absent', 'stopped'] type: str token: description: @@ -62,9 +65,26 @@ EXAMPLES = ''' args: cloud: "test" availability_zone: "test_az" + state: present user: admin password: asdfg url: http://localhost:8080 + +- name: Stop a running jenkins build anonymously + community.general.jenkins_build: + name: "stop-check" + build_number: 3 + state: stopped + url: http://localhost:8080 + +- name: Delete a jenkins build using token authentication + community.general.jenkins_build: + name: "delete-experiment" + build_number: 30 + state: absent + user: Jenkins + token: abcdefghijklmnopqrstuvwxyz123456 + url: http://localhost:8080 ''' RETURN = ''' @@ -152,7 +172,8 @@ class JenkinsBuild: try: build_number = self.server.get_job_info(self.name)['nextBuildNumber'] except Exception as e: - self.module.fail_json(msg='Unable to get job info from Jenkins server, %s' % to_native(e), exception=traceback.format_exc()) + self.module.fail_json(msg='Unable to get job info from Jenkins server, %s' % to_native(e), + exception=traceback.format_exc()) return build_number @@ -162,7 +183,8 @@ class JenkinsBuild: return response except Exception as e: - self.module.fail_json(msg='Unable to fetch build information, %s' % to_native(e), exception=traceback.format_exc()) + self.module.fail_json(msg='Unable to fetch build information, %s' % to_native(e), + exception=traceback.format_exc()) def present_build(self): self.build_number = self.get_next_build() @@ -176,6 +198,19 @@ class JenkinsBuild: self.module.fail_json(msg='Unable to create build for %s: %s' % (self.jenkins_url, to_native(e)), exception=traceback.format_exc()) + def stopped_build(self): + build_info = None + try: + build_info = self.server.get_build_info(self.name, self.build_number) + if build_info['building'] is True: + self.server.stop_build(self.name, self.build_number) + except Exception as e: + self.module.fail_json(msg='Unable to stop build for %s: %s' % (self.jenkins_url, to_native(e)), + exception=traceback.format_exc()) + else: + if build_info['building'] is False: + self.module.exit_json(**self.result) + def absent_build(self): try: self.server.delete_build(self.name, self.build_number) @@ -191,7 +226,10 @@ class JenkinsBuild: sleep(10) self.get_result() else: - if build_status['result'] == "SUCCESS": + if self.state == "stopped" and build_status['result'] == "ABORTED": + result['changed'] = True + result['build_info'] = build_status + elif build_status['result'] == "SUCCESS": result['changed'] = True result['build_info'] = build_status else: @@ -216,14 +254,13 @@ def main(): build_number=dict(type='int'), name=dict(required=True), password=dict(no_log=True), - state=dict(choices=['present', 'absent'], default="present"), + state=dict(choices=['present', 'absent', 'stopped'], default="present"), token=dict(no_log=True), url=dict(default="http://localhost:8080"), user=dict(), ), - mutually_exclusive=[ - ['password', 'token'], - ], + mutually_exclusive=[['password', 'token']], + required_if=[['state', 'absent', ['build_number'], True], ['state', 'stopped', ['build_number'], True]], ) test_dependencies(module) @@ -231,6 +268,8 @@ def main(): if module.params.get('state') == "present": jenkins_build.present_build() + elif module.params.get('state') == "stopped": + jenkins_build.stopped_build() else: jenkins_build.absent_build() diff --git a/tests/unit/plugins/modules/web_infrastructure/test_jenkins_build.py b/tests/unit/plugins/modules/web_infrastructure/test_jenkins_build.py index d0bbafcc91..3774871329 100644 --- a/tests/unit/plugins/modules/web_infrastructure/test_jenkins_build.py +++ b/tests/unit/plugins/modules/web_infrastructure/test_jenkins_build.py @@ -50,18 +50,42 @@ class JenkinsMock(): def get_build_info(self, name, build_number): return { + "building": True, "result": "SUCCESS" } - def get_build_status(self): - pass - def build_job(self, *args): return None def delete_build(self, name, build_number): return None + def stop_build(self, name, build_number): + return None + + +class JenkinsMockIdempotent(): + + def get_job_info(self, name): + return { + "nextBuildNumber": 1235 + } + + def get_build_info(self, name, build_number): + return { + "building": False, + "result": "ABORTED" + } + + def build_job(self, *args): + return None + + def delete_build(self, name, build_number): + return None + + def stop_build(self, name, build_number): + return None + class TestJenkinsBuild(unittest.TestCase): @@ -79,6 +103,16 @@ class TestJenkinsBuild(unittest.TestCase): set_module_args({}) jenkins_build.main() + @patch('ansible_collections.community.general.plugins.modules.web_infrastructure.jenkins_build.test_dependencies') + def test_module_fail_when_missing_build_number(self, test_deps): + test_deps.return_value = None + with self.assertRaises(AnsibleFailJson): + set_module_args({ + "name": "required-if", + "state": "stopped" + }) + jenkins_build.main() + @patch('ansible_collections.community.general.plugins.modules.web_infrastructure.jenkins_build.test_dependencies') @patch('ansible_collections.community.general.plugins.modules.web_infrastructure.jenkins_build.JenkinsBuild.get_jenkins_connection') def test_module_create_build(self, jenkins_connection, test_deps): @@ -93,6 +127,42 @@ class TestJenkinsBuild(unittest.TestCase): }) jenkins_build.main() + @patch('ansible_collections.community.general.plugins.modules.web_infrastructure.jenkins_build.test_dependencies') + @patch('ansible_collections.community.general.plugins.modules.web_infrastructure.jenkins_build.JenkinsBuild.get_jenkins_connection') + def test_module_stop_build(self, jenkins_connection, test_deps): + test_deps.return_value = None + jenkins_connection.return_value = JenkinsMock() + + with self.assertRaises(AnsibleExitJson) as return_json: + set_module_args({ + "name": "host-check", + "build_number": "1234", + "state": "stopped", + "user": "abc", + "token": "xyz" + }) + jenkins_build.main() + + self.assertTrue(return_json.exception.args[0]['changed']) + + @patch('ansible_collections.community.general.plugins.modules.web_infrastructure.jenkins_build.test_dependencies') + @patch('ansible_collections.community.general.plugins.modules.web_infrastructure.jenkins_build.JenkinsBuild.get_jenkins_connection') + def test_module_stop_build_again(self, jenkins_connection, test_deps): + test_deps.return_value = None + jenkins_connection.return_value = JenkinsMockIdempotent() + + with self.assertRaises(AnsibleExitJson) as return_json: + set_module_args({ + "name": "host-check", + "build_number": "1234", + "state": "stopped", + "user": "abc", + "password": "xyz" + }) + jenkins_build.main() + + self.assertFalse(return_json.exception.args[0]['changed']) + @patch('ansible_collections.community.general.plugins.modules.web_infrastructure.jenkins_build.test_dependencies') @patch('ansible_collections.community.general.plugins.modules.web_infrastructure.jenkins_build.JenkinsBuild.get_jenkins_connection') def test_module_delete_build(self, jenkins_connection, test_deps): From 24dabda95b4bf6340436a445c13cf9689029b51f Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Thu, 24 Jun 2021 07:33:10 -0400 Subject: [PATCH 0396/3093] archive - refactor and bugfix (#2816) * Initial Commit * Further refinement * Fixing archive name distortion for single file zips * Applying initial review suggestions * Updating path value for single target * Adding test case for single target zip archiving * Fixing integration for RHEL/FreeBSD on ansible 2.x * Fixing integration second attempt * Adding changelog fragment * Updating changelog fragment --- .../fragments/2816-archive-refactor.yml | 5 + plugins/modules/files/archive.py | 719 +++++++++--------- .../targets/archive/files/sub/subfile.txt | 0 .../targets/archive/tasks/main.yml | 96 ++- .../targets/archive/tasks/remove.yml | 31 + 5 files changed, 475 insertions(+), 376 deletions(-) create mode 100644 changelogs/fragments/2816-archive-refactor.yml create mode 100644 tests/integration/targets/archive/files/sub/subfile.txt diff --git a/changelogs/fragments/2816-archive-refactor.yml b/changelogs/fragments/2816-archive-refactor.yml new file mode 100644 index 0000000000..75c30bcdfc --- /dev/null +++ b/changelogs/fragments/2816-archive-refactor.yml @@ -0,0 +1,5 @@ +--- +bugfixes: + - archive - fixed incorrect ``state`` result value documentation (https://github.com/ansible-collections/community.general/pull/2816). + - archive - fixed ``exclude_path`` values causing incorrect archive root (https://github.com/ansible-collections/community.general/pull/2816). + - archive - fixed improper file names for single file zip archives (https://github.com/ansible-collections/community.general/issues/2818). diff --git a/plugins/modules/files/archive.py b/plugins/modules/files/archive.py index 8d4afa58a5..5cdd6630d1 100644 --- a/plugins/modules/files/archive.py +++ b/plugins/modules/files/archive.py @@ -44,6 +44,7 @@ options: - Use I(exclusion_patterns) to instead exclude files or subdirectories below any of the paths from the I(path) list. type: list elements: path + default: [] exclusion_patterns: description: - Glob style patterns to exclude files or directories from the resulting archive. @@ -133,11 +134,7 @@ EXAMPLES = r''' RETURN = r''' state: description: - The current state of the archived file. - If 'absent', then no source files were found and the archive does not exist. - If 'compress', then the file source file is in the compressed state. - If 'archive', then the source file or paths are currently archived. - If 'incomplete', then an archive was created, but not all source paths were found. + The state of the input C(path). type: str returned: always missing: @@ -162,6 +159,7 @@ expanded_exclude_paths: returned: always ''' +import abc import bz2 import glob import gzip @@ -176,12 +174,12 @@ from sys import version_info from traceback import format_exc from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_bytes, to_native -from ansible.module_utils.six import PY3 +from ansible.module_utils.common.text.converters import to_bytes, to_native +from ansible.module_utils import six LZMA_IMP_ERR = None -if PY3: +if six.PY3: try: import lzma HAS_LZMA = True @@ -196,18 +194,24 @@ else: LZMA_IMP_ERR = format_exc() HAS_LZMA = False +PATH_SEP = to_bytes(os.sep) PY27 = version_info[0:2] >= (2, 7) +STATE_ABSENT = 'absent' +STATE_ARCHIVED = 'archive' +STATE_COMPRESSED = 'compress' +STATE_INCOMPLETE = 'incomplete' -def to_b(s): + +def _to_bytes(s): return to_bytes(s, errors='surrogate_or_strict') -def to_n(s): +def _to_native(s): return to_native(s, errors='surrogate_or_strict') -def to_na(s): +def _to_native_ascii(s): return to_native(s, errors='surrogate_or_strict', encoding='ascii') @@ -215,68 +219,330 @@ def expand_paths(paths): expanded_path = [] is_globby = False for path in paths: - b_path = to_b(path) + b_path = _to_bytes(path) if b'*' in b_path or b'?' in b_path: e_paths = glob.glob(b_path) is_globby = True - else: e_paths = [b_path] expanded_path.extend(e_paths) return expanded_path, is_globby +def is_archive(path): + return re.search(br'\.(tar|tar\.(gz|bz2|xz)|tgz|tbz2|zip)$', os.path.basename(path), re.IGNORECASE) + + +def legacy_filter(path, exclusion_patterns): + return matches_exclusion_patterns(path, exclusion_patterns) + + def matches_exclusion_patterns(path, exclusion_patterns): return any(fnmatch(path, p) for p in exclusion_patterns) -def get_filter(exclusion_patterns, format): - def zip_filter(path): - return matches_exclusion_patterns(path, exclusion_patterns) +@six.add_metaclass(abc.ABCMeta) +class Archive(object): + def __init__(self, module): + self.module = module - def tar_filter(tarinfo): - return None if matches_exclusion_patterns(tarinfo.name, exclusion_patterns) else tarinfo + self.destination = _to_bytes(module.params['dest']) if module.params['dest'] else None + self.exclusion_patterns = module.params['exclusion_patterns'] or [] + self.format = module.params['format'] + self.must_archive = module.params['force_archive'] + self.remove = module.params['remove'] - return zip_filter if format == 'zip' or not PY27 else tar_filter + self.changed = False + self.destination_state = STATE_ABSENT + self.errors = [] + self.file = None + self.root = b'' + self.successes = [] + self.targets = [] + self.not_found = [] + paths = module.params['path'] + self.expanded_paths, has_globs = expand_paths(paths) + self.expanded_exclude_paths = expand_paths(module.params['exclude_path'])[0] -def get_archive_contains(format): - def archive_contains(archive, name): + self.paths = list(set(self.expanded_paths) - set(self.expanded_exclude_paths)) + + if not self.paths: + module.fail_json( + path=', '.join(paths), + expanded_paths=_to_native(b', '.join(self.expanded_paths)), + expanded_exclude_paths=_to_native(b', '.join(self.expanded_exclude_paths)), + msg='Error, no source paths were found' + ) + + if not self.must_archive: + self.must_archive = any([has_globs, os.path.isdir(self.paths[0]), len(self.paths) > 1]) + + if not self.destination and not self.must_archive: + self.destination = b'%s.%s' % (self.paths[0], _to_bytes(self.format)) + + if self.must_archive and not self.destination: + module.fail_json( + dest=_to_native(self.destination), + path=', '.join(paths), + msg='Error, must specify "dest" when archiving multiple files or trees' + ) + + def add(self, path, archive_name): try: - if format == 'zip': - archive.getinfo(name) + self._add(_to_native_ascii(path), _to_native(archive_name)) + if self.contains(_to_native(archive_name)): + self.successes.append(path) + except Exception as e: + self.errors.append('%s: %s' % (_to_native_ascii(path), _to_native(e))) + + def add_single_target(self, path): + if self.format in ('zip', 'tar'): + archive_name = re.sub(br'^%s' % re.escape(self.root), b'', path) + self.open() + self.add(path, archive_name) + self.close() + self.destination_state = STATE_ARCHIVED + else: + try: + f_out = self._open_compressed_file(_to_native_ascii(self.destination)) + with open(path, 'rb') as f_in: + shutil.copyfileobj(f_in, f_out) + f_out.close() + self.successes.append(path) + self.destination_state = STATE_COMPRESSED + except (IOError, OSError) as e: + self.module.fail_json( + path=_to_native(path), + dest=_to_native(self.destination), + msg='Unable to write to compressed file: %s' % _to_native(e), exception=format_exc() + ) + + def add_targets(self): + self.open() + try: + match_root = re.compile(br'^%s' % re.escape(self.root)) + for target in self.targets: + if os.path.isdir(target): + for directory_path, directory_names, file_names in os.walk(target, topdown=True): + if not directory_path.endswith(PATH_SEP): + directory_path += PATH_SEP + + for directory_name in directory_names: + full_path = directory_path + directory_name + archive_name = match_root.sub(b'', full_path) + self.add(full_path, archive_name) + + for file_name in file_names: + full_path = directory_path + file_name + archive_name = match_root.sub(b'', full_path) + self.add(full_path, archive_name) + else: + archive_name = match_root.sub(b'', target) + self.add(target, archive_name) + except Exception as e: + if self.format in ('zip', 'tar'): + archive_format = self.format else: - archive.getmember(name) + archive_format = 'tar.' + self.format + self.module.fail_json( + msg='Error when writing %s archive at %s: %s' % ( + archive_format, _to_native(self.destination), _to_native(e) + ), + exception=format_exc() + ) + self.close() + + if self.errors: + self.module.fail_json( + msg='Errors when writing archive at %s: %s' % (_to_native(self.destination), '; '.join(self.errors)) + ) + + def destination_exists(self): + return self.destination and os.path.exists(self.destination) + + def destination_size(self): + return os.path.getsize(self.destination) if self.destination_exists() else 0 + + def find_targets(self): + for path in self.paths: + # Use the longest common directory name among all the files as the archive root path + if self.root == b'': + self.root = os.path.dirname(path) + PATH_SEP + else: + for i in range(len(self.root)): + if path[i] != self.root[i]: + break + + if i < len(self.root): + self.root = os.path.dirname(self.root[0:i + 1]) + + self.root += PATH_SEP + # Don't allow archives to be created anywhere within paths to be removed + if self.remove and os.path.isdir(path): + prefix = path if path.endswith(PATH_SEP) else path + PATH_SEP + if self.destination.startswith(prefix): + self.module.fail_json( + path=', '.join(self.paths), + msg='Error, created archive can not be contained in source paths when remove=true' + ) + if not os.path.lexists(path): + self.not_found.append(path) + else: + self.targets.append(path) + + def has_targets(self): + return bool(self.targets) + + def has_unfound_targets(self): + return bool(self.not_found) + + def remove_targets(self): + for path in self.successes: + try: + if os.path.isdir(path): + shutil.rmtree(path) + else: + os.remove(path) + except OSError: + self.errors.append(_to_native(path)) + for path in self.paths: + try: + if os.path.isdir(path): + shutil.rmtree(path) + except OSError: + self.errors.append(_to_native(path)) + + if self.errors: + self.module.fail_json( + dest=_to_native(self.destination), msg='Error deleting some source files: ', files=self.errors + ) + + def update_permissions(self): + try: + file_args = self.module.load_file_common_arguments(self.module.params, path=self.destination) + except TypeError: + # The path argument is only supported in Ansible-base 2.10+. Fall back to + # pre-2.10 behavior for older Ansible versions. + self.module.params['path'] = self.destination + file_args = self.module.load_file_common_arguments(self.module.params) + + self.changed = self.module.set_fs_attributes_if_different(file_args, self.changed) + + @property + def result(self): + return { + 'archived': [_to_native(p) for p in self.successes], + 'dest': _to_native(self.destination), + 'changed': self.changed, + 'arcroot': _to_native(self.root), + 'missing': [_to_native(p) for p in self.not_found], + 'expanded_paths': [_to_native(p) for p in self.expanded_paths], + 'expanded_exclude_paths': [_to_native(p) for p in self.expanded_exclude_paths], + } + + def _open_compressed_file(self, path): + f = None + if self.format == 'gz': + f = gzip.open(path, 'wb') + elif self.format == 'bz2': + f = bz2.BZ2File(path, 'wb') + elif self.format == 'xz': + f = lzma.LZMAFile(path, 'wb') + else: + self.module.fail_json(msg="%s is not a valid format" % self.format) + + return f + + @abc.abstractmethod + def close(self): + pass + + @abc.abstractmethod + def contains(self, name): + pass + + @abc.abstractmethod + def open(self): + pass + + @abc.abstractmethod + def _add(self, path, archive_name): + pass + + +class ZipArchive(Archive): + def __init__(self, module): + super(ZipArchive, self).__init__(module) + + def close(self): + self.file.close() + + def contains(self, name): + try: + self.file.getinfo(name) except KeyError: return False - return True - return archive_contains + def open(self): + self.file = zipfile.ZipFile(_to_native_ascii(self.destination), 'w', zipfile.ZIP_DEFLATED, True) + + def _add(self, path, archive_name): + if not legacy_filter(path, self.exclusion_patterns): + self.file.write(path, archive_name) -def get_add_to_archive(format, filter): - def add_to_zip_archive(archive_file, path, archive_name): +class TarArchive(Archive): + def __init__(self, module): + super(TarArchive, self).__init__(module) + self.fileIO = None + + def close(self): + self.file.close() + if self.format == 'xz': + with lzma.open(_to_native(self.destination), 'wb') as f: + f.write(self.fileIO.getvalue()) + self.fileIO.close() + + def contains(self, name): try: - if not filter(path): - archive_file.write(path, archive_name) - except Exception as e: - return e + self.file.getmember(name) + except KeyError: + return False + return True - return None + def open(self): + if self.format in ('gz', 'bz2'): + self.file = tarfile.open(_to_native_ascii(self.destination), 'w|' + self.format) + # python3 tarfile module allows xz format but for python2 we have to create the tarfile + # in memory and then compress it with lzma. + elif self.format == 'xz': + self.fileIO = io.BytesIO() + self.file = tarfile.open(fileobj=self.fileIO, mode='w') + elif self.format == 'tar': + self.file = tarfile.open(_to_native_ascii(self.destination), 'w') + else: + self.module.fail_json(msg="%s is not a valid archive format" % self.format) - def add_to_tar_archive(archive_file, path, archive_name): - try: - if PY27: - archive_file.add(path, archive_name, recursive=False, filter=filter) - else: - archive_file.add(path, archive_name, recursive=False, exclude=filter) - except Exception as e: - return e + def _add(self, path, archive_name): + def py27_filter(tarinfo): + return None if matches_exclusion_patterns(tarinfo.name, self.exclusion_patterns) else tarinfo - return None + def py26_filter(path): + return matches_exclusion_patterns(path, self.exclusion_patterns) - return add_to_zip_archive if format == 'zip' else add_to_tar_archive + if PY27: + self.file.add(path, archive_name, recursive=False, filter=py27_filter) + else: + self.file.add(path, archive_name, recursive=False, exclude=py26_filter) + + +def get_archive(module): + if module.params['format'] == 'zip': + return ZipArchive(module) + else: + return TarArchive(module) def main(): @@ -285,7 +551,7 @@ def main(): path=dict(type='list', elements='path', required=True), format=dict(type='str', default='gz', choices=['bz2', 'gz', 'tar', 'xz', 'zip']), dest=dict(type='path'), - exclude_path=dict(type='list', elements='path'), + exclude_path=dict(type='list', elements='path', default=[]), exclusion_patterns=dict(type='list', elements='path'), force_archive=dict(type='bool', default=False), remove=dict(type='bool', default=False), @@ -294,349 +560,52 @@ def main(): supports_check_mode=True, ) - params = module.params - check_mode = module.check_mode - paths = params['path'] - dest = params['dest'] - b_dest = None if not dest else to_b(dest) - exclude_paths = params['exclude_path'] - remove = params['remove'] - - fmt = params['format'] - b_fmt = to_b(fmt) - force_archive = params['force_archive'] - changed = False - state = 'absent' - - exclusion_patterns = params['exclusion_patterns'] or [] - - # Simple or archive file compression (inapplicable with 'zip' since it's always an archive) - b_successes = [] - - # Fail early - if not HAS_LZMA and fmt == 'xz': - module.fail_json(msg=missing_required_lib("lzma or backports.lzma", reason="when using xz format"), - exception=LZMA_IMP_ERR) - module.fail_json(msg="lzma or backports.lzma is required when using xz format.") - - b_expanded_paths, globby = expand_paths(paths) - if not b_expanded_paths: - return module.fail_json( - path=', '.join(paths), - expanded_paths=to_native(b', '.join(b_expanded_paths), errors='surrogate_or_strict'), - msg='Error, no source paths were found' + if not HAS_LZMA and module.params['format'] == 'xz': + module.fail_json( + msg=missing_required_lib("lzma or backports.lzma", reason="when using xz format"), exception=LZMA_IMP_ERR ) - # Only attempt to expand the exclude paths if it exists - b_expanded_exclude_paths = expand_paths(exclude_paths)[0] if exclude_paths else [] + check_mode = module.check_mode - filter = get_filter(exclusion_patterns, fmt) - archive_contains = get_archive_contains(fmt) - add_to_archive = get_add_to_archive(fmt, filter) + archive = get_archive(module) + size = archive.destination_size() + archive.find_targets() - # Only try to determine if we are working with an archive or not if we haven't set archive to true - if not force_archive: - # If we actually matched multiple files or TRIED to, then - # treat this as a multi-file archive - archive = globby or os.path.isdir(b_expanded_paths[0]) or len(b_expanded_paths) > 1 + if not archive.has_targets(): + if archive.destination_exists(): + archive.destination_state = STATE_ARCHIVED if is_archive(archive.destination) else STATE_COMPRESSED + elif archive.has_targets() and archive.must_archive: + if check_mode: + archive.changed = True + else: + archive.add_targets() + archive.destination_state = STATE_INCOMPLETE if archive.has_unfound_targets() else STATE_ARCHIVED + if archive.remove: + archive.remove_targets() + if archive.destination_size() != size: + archive.changed = True else: - archive = True - - # Default created file name (for single-file archives) to - # . - if not b_dest and not archive: - b_dest = b'%s.%s' % (b_expanded_paths[0], b_fmt) - - # Force archives to specify 'dest' - if archive and not b_dest: - module.fail_json(dest=dest, path=', '.join(paths), msg='Error, must specify "dest" when archiving multiple files or trees') - - b_sep = to_b(os.sep) - - b_archive_paths = [] - b_missing = [] - b_arcroot = b'' - - for b_path in b_expanded_paths: - # Use the longest common directory name among all the files - # as the archive root path - if b_arcroot == b'': - b_arcroot = os.path.dirname(b_path) + b_sep + if check_mode: + if not archive.destination_exists(): + archive.changed = True else: - for i in range(len(b_arcroot)): - if b_path[i] != b_arcroot[i]: - break - - if i < len(b_arcroot): - b_arcroot = os.path.dirname(b_arcroot[0:i + 1]) - - b_arcroot += b_sep - - # Don't allow archives to be created anywhere within paths to be removed - if remove and os.path.isdir(b_path): - b_path_dir = b_path - if not b_path.endswith(b'/'): - b_path_dir += b'/' - - if b_dest.startswith(b_path_dir): - module.fail_json( - path=', '.join(paths), - msg='Error, created archive can not be contained in source paths when remove=True' - ) - - if os.path.lexists(b_path) and b_path not in b_expanded_exclude_paths: - b_archive_paths.append(b_path) - else: - b_missing.append(b_path) - - # No source files were found but the named archive exists: are we 'compress' or 'archive' now? - if len(b_missing) == len(b_expanded_paths) and b_dest and os.path.exists(b_dest): - # Just check the filename to know if it's an archive or simple compressed file - if re.search(br'\.(tar|tar\.(gz|bz2|xz)|tgz|tbz2|zip)$', os.path.basename(b_dest), re.IGNORECASE): - state = 'archive' - else: - state = 'compress' - - # Multiple files, or globbiness - elif archive: - if not b_archive_paths: - # No source files were found, but the archive is there. - if os.path.lexists(b_dest): - state = 'archive' - elif b_missing: - # SOME source files were found, but not all of them - state = 'incomplete' - - archive = None - size = 0 - errors = [] - - if os.path.lexists(b_dest): - size = os.path.getsize(b_dest) - - if state != 'archive': - if check_mode: - changed = True - - else: + path = archive.paths[0] + archive.add_single_target(path) + if archive.destination_size() != size: + archive.changed = True + if archive.remove: try: - # Slightly more difficult (and less efficient!) compression using zipfile module - if fmt == 'zip': - arcfile = zipfile.ZipFile( - to_na(b_dest), - 'w', - zipfile.ZIP_DEFLATED, - True - ) - - # Easier compression using tarfile module - elif fmt == 'gz' or fmt == 'bz2': - arcfile = tarfile.open(to_na(b_dest), 'w|' + fmt) - - # python3 tarfile module allows xz format but for python2 we have to create the tarfile - # in memory and then compress it with lzma. - elif fmt == 'xz': - arcfileIO = io.BytesIO() - arcfile = tarfile.open(fileobj=arcfileIO, mode='w') - - # Or plain tar archiving - elif fmt == 'tar': - arcfile = tarfile.open(to_na(b_dest), 'w') - - b_match_root = re.compile(br'^%s' % re.escape(b_arcroot)) - for b_path in b_archive_paths: - if os.path.isdir(b_path): - # Recurse into directories - for b_dirpath, b_dirnames, b_filenames in os.walk(b_path, topdown=True): - if not b_dirpath.endswith(b_sep): - b_dirpath += b_sep - - for b_dirname in b_dirnames: - b_fullpath = b_dirpath + b_dirname - n_fullpath = to_na(b_fullpath) - n_arcname = to_native(b_match_root.sub(b'', b_fullpath), errors='surrogate_or_strict') - - err = add_to_archive(arcfile, n_fullpath, n_arcname) - if err: - errors.append('%s: %s' % (n_fullpath, to_native(err))) - - for b_filename in b_filenames: - b_fullpath = b_dirpath + b_filename - n_fullpath = to_na(b_fullpath) - n_arcname = to_n(b_match_root.sub(b'', b_fullpath)) - - err = add_to_archive(arcfile, n_fullpath, n_arcname) - if err: - errors.append('Adding %s: %s' % (to_native(b_path), to_native(err))) - - if archive_contains(arcfile, n_arcname): - b_successes.append(b_fullpath) - else: - path = to_na(b_path) - arcname = to_n(b_match_root.sub(b'', b_path)) - - err = add_to_archive(arcfile, path, arcname) - if err: - errors.append('Adding %s: %s' % (to_native(b_path), to_native(err))) - - if archive_contains(arcfile, arcname): - b_successes.append(b_path) - - except Exception as e: - expanded_fmt = 'zip' if fmt == 'zip' else ('tar.' + fmt) - module.fail_json( - msg='Error when writing %s archive at %s: %s' % (expanded_fmt, dest, to_native(e)), - exception=format_exc() - ) - - if arcfile: - arcfile.close() - state = 'archive' - - if fmt == 'xz': - with lzma.open(b_dest, 'wb') as f: - f.write(arcfileIO.getvalue()) - arcfileIO.close() - - if errors: - module.fail_json(msg='Errors when writing archive at %s: %s' % (dest, '; '.join(errors))) - - if state in ['archive', 'incomplete'] and remove: - for b_path in b_successes: - try: - if os.path.isdir(b_path): - shutil.rmtree(b_path) - elif not check_mode: - os.remove(b_path) - except OSError: - errors.append(to_native(b_path)) - - for b_path in b_expanded_paths: - try: - if os.path.isdir(b_path): - shutil.rmtree(b_path) - except OSError: - errors.append(to_native(b_path)) - - if errors: - module.fail_json(dest=dest, msg='Error deleting some source files: ', files=errors) - - # Rudimentary check: If size changed then file changed. Not perfect, but easy. - if not check_mode and os.path.getsize(b_dest) != size: - changed = True - - if b_successes and state != 'incomplete': - state = 'archive' - - # Simple, single-file compression - else: - b_path = b_expanded_paths[0] - - # No source or compressed file - if not (os.path.exists(b_path) or os.path.lexists(b_dest)): - state = 'absent' - - # if it already exists and the source file isn't there, consider this done - elif not os.path.lexists(b_path) and os.path.lexists(b_dest): - state = 'compress' - - else: - if module.check_mode: - if not os.path.exists(b_dest): - changed = True - else: - size = 0 - f_in = f_out = arcfile = None - - if os.path.lexists(b_dest): - size = os.path.getsize(b_dest) - - try: - if fmt == 'zip': - arcfile = zipfile.ZipFile( - to_na(b_dest), - 'w', - zipfile.ZIP_DEFLATED, - True - ) - arcfile.write( - to_na(b_path), - to_n(b_path[len(b_arcroot):]) - ) - arcfile.close() - state = 'archive' # because all zip files are archives - elif fmt == 'tar': - arcfile = tarfile.open(to_na(b_dest), 'w') - arcfile.add(to_na(b_path)) - arcfile.close() - else: - f_in = open(b_path, 'rb') - - n_dest = to_na(b_dest) - if fmt == 'gz': - f_out = gzip.open(n_dest, 'wb') - elif fmt == 'bz2': - f_out = bz2.BZ2File(n_dest, 'wb') - elif fmt == 'xz': - f_out = lzma.LZMAFile(n_dest, 'wb') - else: - raise OSError("Invalid format") - - shutil.copyfileobj(f_in, f_out) - - b_successes.append(b_path) - + os.remove(path) except OSError as e: module.fail_json( - path=to_native(b_path), - dest=dest, - msg='Unable to write to compressed file: %s' % to_native(e), exception=format_exc() + path=_to_native(path), + msg='Unable to remove source file: %s' % _to_native(e), exception=format_exc() ) - if arcfile: - arcfile.close() - if f_in: - f_in.close() - if f_out: - f_out.close() + if archive.destination_exists(): + archive.update_permissions() - # Rudimentary check: If size changed then file changed. Not perfect, but easy. - if os.path.getsize(b_dest) != size: - changed = True - - state = 'compress' - - if remove and not check_mode: - try: - os.remove(b_path) - - except OSError as e: - module.fail_json( - path=to_native(b_path), - msg='Unable to remove source file: %s' % to_native(e), exception=format_exc() - ) - - try: - file_args = module.load_file_common_arguments(params, path=b_dest) - except TypeError: - # The path argument is only supported in Ansible-base 2.10+. Fall back to - # pre-2.10 behavior for older Ansible versions. - params['path'] = b_dest - file_args = module.load_file_common_arguments(params) - - if not check_mode: - changed = module.set_fs_attributes_if_different(file_args, changed) - - module.exit_json( - archived=[to_n(p) for p in b_successes], - dest=dest, - changed=changed, - state=state, - arcroot=to_n(b_arcroot), - missing=[to_n(p) for p in b_missing], - expanded_paths=[to_n(p) for p in b_expanded_paths], - expanded_exclude_paths=[to_n(p) for p in b_expanded_exclude_paths], - ) + module.exit_json(**archive.result) if __name__ == '__main__': diff --git a/tests/integration/targets/archive/files/sub/subfile.txt b/tests/integration/targets/archive/files/sub/subfile.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/integration/targets/archive/tasks/main.yml b/tests/integration/targets/archive/tasks/main.yml index 761f9eb7b8..35a8f1edf3 100644 --- a/tests/integration/targets/archive/tasks/main.yml +++ b/tests/integration/targets/archive/tasks/main.yml @@ -79,6 +79,8 @@ - foo.txt - bar.txt - empty.txt + - sub + - sub/subfile.txt - name: archive using gz archive: @@ -366,7 +368,7 @@ - name: Test exclusion_patterns option archive: path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/test-archive-exclustion-patterns.tgz" + dest: "{{ output_dir }}/test-archive-exclusion-patterns.tgz" exclusion_patterns: b?r.* register: exclusion_patterns_result @@ -376,6 +378,98 @@ - exclusion_patterns_result is changed - "'bar.txt' not in exclusion_patterns_result.archived" +- name: Test that excluded paths do not influence archive root + archive: + path: + - "{{ output_dir }}/sub/subfile.txt" + - "{{ output_dir }}" + exclude_path: + - "{{ output_dir }}" + dest: "{{ output_dir }}/test-archive-root.tgz" + register: archive_root_result + +- name: Assert that excluded paths do not influence archive root + assert: + that: + - archive_root_result.arcroot != output_dir + +- name: Remove archive root test + file: + path: "{{ output_dir }}/test-archive-root.tgz" + state: absent + +- name: Test Single Target with format={{ item }} + archive: + path: "{{ output_dir }}/foo.txt" + dest: "{{ output_dir }}/test-single-target.{{ item }}" + format: "{{ item }}" + register: "single_target_test" + loop: + - zip + - tar + - gz + - bz2 + - xz + +# Dummy tests until ``dest_state`` result value can be implemented +- name: Assert that single target tests are effective + assert: + that: + - single_target_test.results[0] is changed + - single_target_test.results[1] is changed + - single_target_test.results[2] is changed + - single_target_test.results[3] is changed + - single_target_test.results[4] is changed + +- name: Retrieve contents of single target archives + ansible.builtin.unarchive: + src: "{{ output_dir }}/test-single-target.zip" + dest: . + list_files: true + check_mode: true + ignore_errors: true + register: single_target_test_contents + +- name: Assert that file names in single-file zip archives are preserved + assert: + that: + - "'oo.txt' not in single_target_test_contents.files" + - "'foo.txt' in single_target_test_contents.files" + # ``unarchive`` fails for RHEL and FreeBSD on ansible 2.x + when: single_target_test_contents is success and single_target_test_contents is not skipped + +- name: Remove single target test with format={{ item }} + file: + path: "{{ output_dir }}/test-single-target.{{ item }}" + state: absent + loop: + - zip + - tar + - gz + - bz2 + - xz + +- name: Test that missing files result in incomplete state + archive: + path: + - "{{ output_dir }}/*.txt" + - "{{ output_dir }}/dne.txt" + exclude_path: "{{ output_dir }}/foo.txt" + dest: "{{ output_dir }}/test-incomplete-archive.tgz" + register: incomplete_archive_result + +- name: Assert that incomplete archive has incomplete state + assert: + that: + - incomplete_archive_result is changed + - "'{{ output_dir }}/dne.txt' in incomplete_archive_result.missing" + - "'{{ output_dir }}/foo.txt' not in incomplete_archive_result.missing" + +- name: Remove incomplete archive + file: + path: "{{ output_dir }}/test-incomplete-archive.tgz" + state: absent + - name: Remove backports.lzma if previously installed (pip) pip: name=backports.lzma state=absent when: backports_lzma_pip is changed diff --git a/tests/integration/targets/archive/tasks/remove.yml b/tests/integration/targets/archive/tasks/remove.yml index 44d2024068..9600eb9f6d 100644 --- a/tests/integration/targets/archive/tasks/remove.yml +++ b/tests/integration/targets/archive/tasks/remove.yml @@ -117,6 +117,37 @@ - name: verify that excluded file is still present file: path={{ output_dir }}/tmpdir/empty.txt state=file +- name: prep our files in tmpdir again + copy: src={{ item }} dest={{ output_dir }}/tmpdir/{{ item }} + with_items: + - foo.txt + - bar.txt + - empty.txt + - sub + - sub/subfile.txt + +- name: archive using gz and remove src directory + archive: + path: + - "{{ output_dir }}/tmpdir/*.txt" + - "{{ output_dir }}/tmpdir/sub/*" + dest: "{{ output_dir }}/archive_remove_04.gz" + format: gz + remove: yes + exclude_path: "{{ output_dir }}/tmpdir/sub/subfile.txt" + register: archive_remove_result_04 + +- debug: msg="{{ archive_remove_result_04 }}" + +- name: verify that the files archived + file: path={{ output_dir }}/archive_remove_04.gz state=file + +- name: remove our gz + file: path="{{ output_dir }}/archive_remove_04.gz" state=absent + +- name: verify that excluded sub file is still present + file: path={{ output_dir }}/tmpdir/sub/subfile.txt state=file + - name: remove temporary directory file: path: "{{ output_dir }}/tmpdir" From 24c5d4320f64ec3ec6a155e6be0a1b5f8be08a5a Mon Sep 17 00:00:00 2001 From: Gaetan2907 <48204380+Gaetan2907@users.noreply.github.com> Date: Thu, 24 Jun 2021 13:35:00 +0200 Subject: [PATCH 0397/3093] Keycloak: add authentication management (#2456) * Allow keycloak_group.py to take token as parameter for the authentification Refactor get_token to pass module.params + Documentation Fix unit test and add new one for token as param Fix identation Update plugins/modules/identity/keycloak/keycloak_client.py Co-authored-by: Felix Fontein Update plugins/modules/identity/keycloak/keycloak_clienttemplate.py Co-authored-by: Felix Fontein Allow keycloak_group.py to take token as parameter for the authentification Refactor get_token to pass module.params + Documentation * Update plugins/module_utils/identity/keycloak/keycloak.py Co-authored-by: Felix Fontein Check if base_url is None before to check format Update plugins/module_utils/identity/keycloak/keycloak.py Co-authored-by: Felix Fontein Update plugins/modules/identity/keycloak/keycloak_client.py Co-authored-by: Amin Vakil Update plugins/modules/identity/keycloak/keycloak_clienttemplate.py Co-authored-by: Amin Vakil Switch to modern syntax for the documentation (e.g. community.general.keycloak_client) Update keycloak_client.py Update keycloak_clienttemplate.py Add keycloak_authentication module to manage authentication Minor fixex Fix indent * Update plugins/modules/identity/keycloak/keycloak_authentication.py Co-authored-by: Felix Fontein Update plugins/modules/identity/keycloak/keycloak_authentication.py Co-authored-by: Felix Fontein Update plugins/modules/identity/keycloak/keycloak_authentication.py Co-authored-by: Felix Fontein Update plugins/modules/identity/keycloak/keycloak_authentication.py Co-authored-by: Felix Fontein Update plugins/modules/identity/keycloak/keycloak_authentication.py Co-authored-by: Felix Fontein Removing variable ANSIBLE_METADATA from beginning of file Minor fix Refactoring create_or_update_executions :add change_execution_priority function Refactoring create_or_update_executions :add create_execution function Refactoring create_or_update_executions: add create_subflow Refactoring create_or_update_executions: add update_authentication_executions function Minor fix * Using FQCN for the examples Minor fix Update plugins/module_utils/identity/keycloak/keycloak.py Co-authored-by: Felix Fontein Update plugins/module_utils/identity/keycloak/keycloak.py Co-authored-by: Felix Fontein Update plugins/module_utils/identity/keycloak/keycloak.py Co-authored-by: Felix Fontein Update plugins/module_utils/identity/keycloak/keycloak.py Co-authored-by: Felix Fontein Update plugins/module_utils/identity/keycloak/keycloak.py Co-authored-by: Felix Fontein Update plugins/module_utils/identity/keycloak/keycloak.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_authentication.py Co-authored-by: Felix Fontein Update plugins/modules/identity/keycloak/keycloak_authentication.py Co-authored-by: Felix Fontein Refactoring: rename isDictEquals into is_dict_equals Refactoring: rename variable as authentication_flow Refactoring: rename variable as new_name Refactoring: rename variable as flow_list Refactoring: rename variable as new_flow Refactoring: changing construction of dict newAuthenticationRepresentation and renaming as new_auth_repr Minor fix * Refactoring: rename variables with correct Python syntax (auth_repr, exec_repr) Move create_or_update_executions function from keycloak.py to keycloak_authentication.py Minor fix Remove mock_create_or_update_executions not needed anymore Fix unit test Update plugins/module_utils/identity/keycloak/keycloak.py is_dict_equals function return True if value1 empty Update plugins/module_utils/identity/keycloak/keycloak.py Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> Rename is_dict_equal as is_struct_included and rename params as struct1 and struct2 Rename variables according to Python naming conventions Refactoring: add find_exec_in_executions function in keycloak_authentication to remove code duplication typo Add blank line Add required parameter, either creds or token Typo try/except only surround for loop containing struct2[key] Add sub-options to meta_args assigment of result['changed'] after if-elif-else block Fix CI error: parameter-type-not-in-doc Fix unit test: none value excluded from comparison Minor fix Simplify is_struct_included function Replace 'type(..) is' by isinstance(..) Remove redundant required=True and redundant parenthesis Add check_mode, check if value is None (None value added by argument spec checker) Apply suggestions from code review Update plugins/modules/identity/keycloak/keycloak_authentication.py * Update plugins/modules/identity/keycloak/keycloak_authentication.py * Add index paramter to configure the priority order of the execution * Minor fix: authenticationConfig dict instead of str Co-authored-by: Felix Fontein --- .../identity/keycloak/keycloak.py | 319 ++++++++- .../keycloak/keycloak_authentication.py | 383 +++++++++++ plugins/modules/keycloak_authentication.py | 1 + .../keycloak/test_keycloak_authentication.py | 622 ++++++++++++++++++ 4 files changed, 1323 insertions(+), 2 deletions(-) create mode 100644 plugins/modules/identity/keycloak/keycloak_authentication.py create mode 120000 plugins/modules/keycloak_authentication.py create mode 100644 tests/unit/plugins/modules/identity/keycloak/test_keycloak_authentication.py diff --git a/plugins/module_utils/identity/keycloak/keycloak.py b/plugins/module_utils/identity/keycloak/keycloak.py index c0a1c2a158..ae002a7c94 100644 --- a/plugins/module_utils/identity/keycloak/keycloak.py +++ b/plugins/module_utils/identity/keycloak/keycloak.py @@ -33,9 +33,9 @@ import json import traceback from ansible.module_utils.urls import open_url -from ansible.module_utils.six.moves.urllib.parse import urlencode +from ansible.module_utils.six.moves.urllib.parse import urlencode, quote from ansible.module_utils.six.moves.urllib.error import HTTPError -from ansible.module_utils._text import to_native +from ansible.module_utils._text import to_native, to_text URL_REALMS = "{url}/admin/realms" URL_REALM = "{url}/admin/realms/{realm}" @@ -51,6 +51,17 @@ URL_CLIENTTEMPLATES = "{url}/admin/realms/{realm}/client-templates" URL_GROUPS = "{url}/admin/realms/{realm}/groups" URL_GROUP = "{url}/admin/realms/{realm}/groups/{groupid}" +URL_AUTHENTICATION_FLOWS = "{url}/admin/realms/{realm}/authentication/flows" +URL_AUTHENTICATION_FLOW = "{url}/admin/realms/{realm}/authentication/flows/{id}" +URL_AUTHENTICATION_FLOW_COPY = "{url}/admin/realms/{realm}/authentication/flows/{copyfrom}/copy" +URL_AUTHENTICATION_FLOW_EXECUTIONS = "{url}/admin/realms/{realm}/authentication/flows/{flowalias}/executions" +URL_AUTHENTICATION_FLOW_EXECUTIONS_EXECUTION = "{url}/admin/realms/{realm}/authentication/flows/{flowalias}/executions/execution" +URL_AUTHENTICATION_FLOW_EXECUTIONS_FLOW = "{url}/admin/realms/{realm}/authentication/flows/{flowalias}/executions/flow" +URL_AUTHENTICATION_EXECUTION_CONFIG = "{url}/admin/realms/{realm}/authentication/executions/{id}/config" +URL_AUTHENTICATION_EXECUTION_RAISE_PRIORITY = "{url}/admin/realms/{realm}/authentication/executions/{id}/raise-priority" +URL_AUTHENTICATION_EXECUTION_LOWER_PRIORITY = "{url}/admin/realms/{realm}/authentication/executions/{id}/lower-priority" +URL_AUTHENTICATION_CONFIG = "{url}/admin/realms/{realm}/authentication/config/{id}" + def keycloak_argument_spec(): """ @@ -132,6 +143,59 @@ def get_token(module_params): } +def is_struct_included(struct1, struct2, exclude=None): + """ + This function compare if the first parameter structure is included in the second. + The function use every elements of struct1 and validates they are present in the struct2 structure. + The two structure does not need to be equals for that function to return true. + Each elements are compared recursively. + :param struct1: + type: + dict for the initial call, can be dict, list, bool, int or str for recursive calls + description: + reference structure + :param struct2: + type: + dict for the initial call, can be dict, list, bool, int or str for recursive calls + description: + structure to compare with first parameter. + :param exclude: + type: + list + description: + Key to exclude from the comparison. + default: None + :return: + type: + bool + description: + Return True if all element of dict 1 are present in dict 2, return false otherwise. + """ + if isinstance(struct1, list) and isinstance(struct2, list): + for item1 in struct1: + if isinstance(item1, (list, dict)): + for item2 in struct2: + if not is_struct_included(item1, item2, exclude): + return False + else: + if item1 not in struct2: + return False + return True + elif isinstance(struct1, dict) and isinstance(struct2, dict): + try: + for key in struct1: + if not (exclude and key in exclude): + if not is_struct_included(struct1[key], struct2[key], exclude): + return False + return True + except KeyError: + return False + elif isinstance(struct1, bool) and isinstance(struct2, bool): + return struct1 == struct2 + else: + return to_text(struct1, 'utf-8') == to_text(struct2, 'utf-8') + + class KeycloakAPI(object): """ Keycloak API access; Keycloak uses OAuth 2.0 to protect its API, an access token for which is obtained through OpenID connect @@ -571,3 +635,254 @@ class KeycloakAPI(object): except Exception as e: self.module.fail_json(msg="Unable to delete group %s: %s" % (groupid, str(e))) + + def get_authentication_flow_by_alias(self, alias, realm='master'): + """ + Get an authentication flow by it's alias + :param alias: Alias of the authentication flow to get. + :param realm: Realm. + :return: Authentication flow representation. + """ + try: + authentication_flow = {} + # Check if the authentication flow exists on the Keycloak serveraders + authentications = json.load(open_url(URL_AUTHENTICATION_FLOWS.format(url=self.baseurl, realm=realm), method='GET', headers=self.restheaders)) + for authentication in authentications: + if authentication["alias"] == alias: + authentication_flow = authentication + break + return authentication_flow + except Exception as e: + self.module.fail_json(msg="Unable get authentication flow %s: %s" % (alias, str(e))) + + def delete_authentication_flow_by_id(self, id, realm='master'): + """ + Delete an authentication flow from Keycloak + :param id: id of authentication flow to be deleted + :param realm: realm of client to be deleted + :return: HTTPResponse object on success + """ + flow_url = URL_AUTHENTICATION_FLOW.format(url=self.baseurl, realm=realm, id=id) + + try: + return open_url(flow_url, method='DELETE', headers=self.restheaders, + validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Could not delete authentication flow %s in realm %s: %s' + % (id, realm, str(e))) + + def copy_auth_flow(self, config, realm='master'): + """ + Create a new authentication flow from a copy of another. + :param config: Representation of the authentication flow to create. + :param realm: Realm. + :return: Representation of the new authentication flow. + """ + try: + new_name = dict( + newName=config["alias"] + ) + open_url( + URL_AUTHENTICATION_FLOW_COPY.format( + url=self.baseurl, + realm=realm, + copyfrom=quote(config["copyFrom"])), + method='POST', + headers=self.restheaders, + data=json.dumps(new_name)) + flow_list = json.load( + open_url( + URL_AUTHENTICATION_FLOWS.format(url=self.baseurl, + realm=realm), + method='GET', + headers=self.restheaders)) + for flow in flow_list: + if flow["alias"] == config["alias"]: + return flow + return None + except Exception as e: + self.module.fail_json(msg='Could not copy authentication flow %s in realm %s: %s' + % (config["alias"], realm, str(e))) + + def create_empty_auth_flow(self, config, realm='master'): + """ + Create a new empty authentication flow. + :param config: Representation of the authentication flow to create. + :param realm: Realm. + :return: Representation of the new authentication flow. + """ + try: + new_flow = dict( + alias=config["alias"], + providerId=config["providerId"], + description=config["description"], + topLevel=True + ) + open_url( + URL_AUTHENTICATION_FLOWS.format( + url=self.baseurl, + realm=realm), + method='POST', + headers=self.restheaders, + data=json.dumps(new_flow)) + flow_list = json.load( + open_url( + URL_AUTHENTICATION_FLOWS.format( + url=self.baseurl, + realm=realm), + method='GET', + headers=self.restheaders)) + for flow in flow_list: + if flow["alias"] == config["alias"]: + return flow + return None + except Exception as e: + self.module.fail_json(msg='Could not create empty authentication flow %s in realm %s: %s' + % (config["alias"], realm, str(e))) + + def update_authentication_executions(self, flowAlias, updatedExec, realm='master'): + """ Update authentication executions + + :param flowAlias: name of the parent flow + :param updatedExec: JSON containing updated execution + :return: HTTPResponse object on success + """ + try: + open_url( + URL_AUTHENTICATION_FLOW_EXECUTIONS.format( + url=self.baseurl, + realm=realm, + flowalias=quote(flowAlias)), + method='PUT', + headers=self.restheaders, + data=json.dumps(updatedExec)) + except Exception as e: + self.module.fail_json(msg="Unable to update executions %s: %s" % (updatedExec, str(e))) + + def add_authenticationConfig_to_execution(self, executionId, authenticationConfig, realm='master'): + """ Add autenticatorConfig to the execution + + :param executionId: id of execution + :param authenticationConfig: config to add to the execution + :return: HTTPResponse object on success + """ + try: + open_url( + URL_AUTHENTICATION_EXECUTION_CONFIG.format( + url=self.baseurl, + realm=realm, + id=executionId), + method='POST', + headers=self.restheaders, + data=json.dumps(authenticationConfig)) + except Exception as e: + self.module.fail_json(msg="Unable to add authenticationConfig %s: %s" % (executionId, str(e))) + + def create_subflow(self, subflowName, flowAlias, realm='master'): + """ Create new sublow on the flow + + :param subflowName: name of the subflow to create + :param flowAlias: name of the parent flow + :return: HTTPResponse object on success + """ + try: + newSubFlow = {} + newSubFlow["alias"] = subflowName + newSubFlow["provider"] = "registration-page-form" + newSubFlow["type"] = "basic-flow" + open_url( + URL_AUTHENTICATION_FLOW_EXECUTIONS_FLOW.format( + url=self.baseurl, + realm=realm, + flowalias=quote(flowAlias)), + method='POST', + headers=self.restheaders, + data=json.dumps(newSubFlow)) + except Exception as e: + self.module.fail_json(msg="Unable to create new subflow %s: %s" % (subflowName, str(e))) + + def create_execution(self, execution, flowAlias, realm='master'): + """ Create new execution on the flow + + :param execution: name of execution to create + :param flowAlias: name of the parent flow + :return: HTTPResponse object on success + """ + try: + newExec = {} + newExec["provider"] = execution["providerId"] + newExec["requirement"] = execution["requirement"] + open_url( + URL_AUTHENTICATION_FLOW_EXECUTIONS_EXECUTION.format( + url=self.baseurl, + realm=realm, + flowalias=quote(flowAlias)), + method='POST', + headers=self.restheaders, + data=json.dumps(newExec)) + except Exception as e: + self.module.fail_json(msg="Unable to create new execution %s: %s" % (execution["provider"], str(e))) + + def change_execution_priority(self, executionId, diff, realm='master'): + """ Raise or lower execution priority of diff time + + :param executionId: id of execution to lower priority + :param realm: realm the client is in + :param diff: Integer number, raise of diff time if positive lower of diff time if negative + :return: HTTPResponse object on success + """ + try: + if diff > 0: + for i in range(diff): + open_url( + URL_AUTHENTICATION_EXECUTION_RAISE_PRIORITY.format( + url=self.baseurl, + realm=realm, + id=executionId), + method='POST', + headers=self.restheaders) + elif diff < 0: + for i in range(-diff): + open_url( + URL_AUTHENTICATION_EXECUTION_LOWER_PRIORITY.format( + url=self.baseurl, + realm=realm, + id=executionId), + method='POST', + headers=self.restheaders) + except Exception as e: + self.module.fail_json(msg="Unable to change execution priority %s: %s" % (executionId, str(e))) + + def get_executions_representation(self, config, realm='master'): + """ + Get a representation of the executions for an authentication flow. + :param config: Representation of the authentication flow + :param realm: Realm + :return: Representation of the executions + """ + try: + # Get executions created + executions = json.load( + open_url( + URL_AUTHENTICATION_FLOW_EXECUTIONS.format( + url=self.baseurl, + realm=realm, + flowalias=quote(config["alias"])), + method='GET', + headers=self.restheaders)) + for execution in executions: + if "authenticationConfig" in execution: + execConfigId = execution["authenticationConfig"] + execConfig = json.load( + open_url( + URL_AUTHENTICATION_CONFIG.format( + url=self.baseurl, + realm=realm, + id=execConfigId), + method='GET', + headers=self.restheaders)) + execution["authenticationConfig"] = execConfig + return executions + except Exception as e: + self.module.fail_json(msg='Could not get executions for authentication flow %s in realm %s: %s' + % (config["alias"], realm, str(e))) diff --git a/plugins/modules/identity/keycloak/keycloak_authentication.py b/plugins/modules/identity/keycloak/keycloak_authentication.py new file mode 100644 index 0000000000..98b6378dac --- /dev/null +++ b/plugins/modules/identity/keycloak/keycloak_authentication.py @@ -0,0 +1,383 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2019, INSPQ +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: keycloak_authentication +short_description: Configure authentication in Keycloak +description: + - This module actually can only make a copy of an existing authentication flow, add an execution to it and configure it. + - It can also delete the flow. +version_added: "3.3.0" +options: + realm: + description: + - The name of the realm in which is the authentication. + required: true + type: str + alias: + description: + - Alias for the authentication flow. + required: true + type: str + description: + description: + - Description of the flow. + type: str + providerId: + description: + - C(providerId) for the new flow when not copied from an existing flow. + type: str + copyFrom: + description: + - C(flowAlias) of the authentication flow to use for the copy. + type: str + authenticationExecutions: + description: + - Configuration structure for the executions. + type: list + elements: dict + suboptions: + providerId: + description: + - C(providerID) for the new flow when not copied from an existing flow. + type: str + displayName: + description: + - Name of the execution or subflow to create or update. + type: str + requirement: + description: + - Control status of the subflow or execution. + choices: [ "REQUIRED", "ALTERNATIVE", "DISABLED", "CONDITIONAL" ] + type: str + flowAlias: + description: + - Alias of parent flow. + type: str + authenticationConfig: + description: + - Describe the config of the authentication. + type: dict + index: + description: + - Priority order of the execution. + type: int + state: + description: + - Control if the authentication flow must exists or not. + choices: [ "present", "absent" ] + default: present + type: str + force: + type: bool + default: false + description: + - If C(true), allows to remove the authentication flow and recreate it. +extends_documentation_fragment: +- community.general.keycloak + +author: + - Philippe Gauthier (@elfelip) + - Gaëtan Daubresse (@Gaetan2907) +''' + +EXAMPLES = ''' + - name: Create an authentication flow from first broker login and add an execution to it. + community.general.keycloak_authentication: + auth_keycloak_url: http://localhost:8080/auth + auth_realm: master + auth_username: admin + auth_password: password + realm: master + alias: "Copy of first broker login" + copyFrom: "first broker login" + authenticationExecutions: + - providerId: "test-execution1" + requirement: "REQUIRED" + authenticationConfig: + alias: "test.execution1.property" + config: + test1.property: "value" + - providerId: "test-execution2" + requirement: "REQUIRED" + authenticationConfig: + alias: "test.execution2.property" + config: + test2.property: "value" + state: present + + - name: Re-create the authentication flow + community.general.keycloak_authentication: + auth_keycloak_url: http://localhost:8080/auth + auth_realm: master + auth_username: admin + auth_password: password + realm: master + alias: "Copy of first broker login" + copyFrom: "first broker login" + authenticationExecutions: + - providerId: "test-provisioning" + requirement: "REQUIRED" + authenticationConfig: + alias: "test.provisioning.property" + config: + test.provisioning.property: "value" + state: present + force: true + + - name: Create an authentication flow with subflow containing an execution. + community.general.keycloak_authentication: + auth_keycloak_url: http://localhost:8080/auth + auth_realm: master + auth_username: admin + auth_password: password + realm: master + alias: "Copy of first broker login" + copyFrom: "first broker login" + authenticationExecutions: + - providerId: "test-execution1" + requirement: "REQUIRED" + - displayName: "New Subflow" + requirement: "REQUIRED" + - providerId: "auth-cookie" + requirement: "REQUIRED" + flowAlias: "New Sublow" + state: present + + - name: Remove authentication. + community.general.keycloak_authentication: + auth_keycloak_url: http://localhost:8080/auth + auth_realm: master + auth_username: admin + auth_password: password + realm: master + alias: "Copy of first broker login" + state: absent +''' + +RETURN = ''' +flow: + description: JSON representation for the authentication. + returned: on success + type: dict +''' + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak \ + import KeycloakAPI, camel, keycloak_argument_spec, get_token, KeycloakError, is_struct_included +from ansible.module_utils.basic import AnsibleModule + + +def find_exec_in_executions(searched_exec, executions): + """ + Search if exec is contained in the executions. + :param searched_exec: Execution to search for. + :param executions: List of executions. + :return: Index of the execution, -1 if not found.. + """ + for i, existing_exec in enumerate(executions, start=0): + if ("providerId" in existing_exec and "providerId" in searched_exec and + existing_exec["providerId"] == searched_exec["providerId"] or + "displayName" in existing_exec and "displayName" in searched_exec and + existing_exec["displayName"] == searched_exec["displayName"]): + return i + return -1 + + +def create_or_update_executions(kc, config, realm='master'): + """ + Create or update executions for an authentication flow. + :param kc: Keycloak API access. + :param config: Representation of the authentication flow including it's executions. + :param realm: Realm + :return: True if executions have been modified. False otherwise. + """ + try: + changed = False + if "authenticationExecutions" in config: + for new_exec_index, new_exec in enumerate(config["authenticationExecutions"], start=0): + if new_exec["index"] is not None: + new_exec_index = new_exec["index"] + # Get existing executions on the Keycloak server for this alias + existing_executions = kc.get_executions_representation(config, realm=realm) + exec_found = False + # Get flowalias parent if given + if new_exec["flowAlias"] is not None: + flow_alias_parent = new_exec["flowAlias"] + else: + flow_alias_parent = config["alias"] + # Check if same providerId or displayName name between existing and new execution + exec_index = find_exec_in_executions(new_exec, existing_executions) + if exec_index != -1: + # Remove key that doesn't need to be compared with existing_exec + exclude_key = ["flowAlias"] + for index_key, key in enumerate(new_exec, start=0): + if new_exec[key] is None: + exclude_key.append(key) + # Compare the executions to see if it need changes + if not is_struct_included(new_exec, existing_executions[exec_index], exclude_key) or exec_index != new_exec_index: + changed = True + elif new_exec["providerId"] is not None: + kc.create_execution(new_exec, flowAlias=flow_alias_parent, realm=realm) + changed = True + elif new_exec["displayName"] is not None: + kc.create_subflow(new_exec["displayName"], flow_alias_parent, realm=realm) + changed = True + if changed: + # Get existing executions on the Keycloak server for this alias + existing_executions = kc.get_executions_representation(config, realm=realm) + exec_index = find_exec_in_executions(new_exec, existing_executions) + if exec_index != -1: + # Update the existing execution + updated_exec = { + "id": existing_executions[exec_index]["id"] + } + # add the execution configuration + if new_exec["authenticationConfig"] is not None: + kc.add_authenticationConfig_to_execution(updated_exec["id"], new_exec["authenticationConfig"], realm=realm) + for key in new_exec: + # remove unwanted key for the next API call + if key != "flowAlias" and key != "authenticationConfig": + updated_exec[key] = new_exec[key] + if new_exec["requirement"] is not None: + kc.update_authentication_executions(flow_alias_parent, updated_exec, realm=realm) + diff = exec_index - new_exec_index + kc.change_execution_priority(updated_exec["id"], diff, realm=realm) + return changed + except Exception as e: + kc.module.fail_json(msg='Could not create or update executions for authentication flow %s in realm %s: %s' + % (config["alias"], realm, str(e))) + + +def main(): + """ + Module execution + :return: + """ + argument_spec = keycloak_argument_spec() + meta_args = dict( + realm=dict(type='str', required=True), + alias=dict(type='str', required=True), + providerId=dict(type='str'), + description=dict(type='str'), + copyFrom=dict(type='str'), + authenticationExecutions=dict(type='list', elements='dict', + options=dict( + providerId=dict(type='str'), + displayName=dict(type='str'), + requirement=dict(choices=["REQUIRED", "ALTERNATIVE", "DISABLED", "CONDITIONAL"], type='str'), + flowAlias=dict(type='str'), + authenticationConfig=dict(type='dict'), + index=dict(type='int'), + )), + state=dict(choices=["absent", "present"], default='present'), + force=dict(type='bool', default=False), + ) + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), + required_together=([['auth_realm', 'auth_username', 'auth_password']]) + ) + + result = dict(changed=False, msg='', flow={}) + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + realm = module.params.get('realm') + state = module.params.get('state') + force = module.params.get('force') + + new_auth_repr = { + "alias": module.params.get("alias"), + "copyFrom": module.params.get("copyFrom"), + "providerId": module.params.get("providerId"), + "authenticationExecutions": module.params.get("authenticationExecutions"), + "description": module.params.get("description"), + "builtIn": module.params.get("builtIn"), + "subflow": module.params.get("subflow"), + } + + auth_repr = kc.get_authentication_flow_by_alias(alias=new_auth_repr["alias"], realm=realm) + if auth_repr == {}: # Authentication flow does not exist + if state == 'present': # If desired state is present + result['changed'] = True + if module._diff: + result['diff'] = dict(before='', after=new_auth_repr) + if module.check_mode: + module.exit_json(**result) + # If copyFrom is defined, create authentication flow from a copy + if "copyFrom" in new_auth_repr and new_auth_repr["copyFrom"] is not None: + auth_repr = kc.copy_auth_flow(config=new_auth_repr, realm=realm) + else: # Create an empty authentication flow + auth_repr = kc.create_empty_auth_flow(config=new_auth_repr, realm=realm) + # If the authentication still not exist on the server, raise an exception. + if auth_repr is None: + result['msg'] = "Authentication just created not found: " + str(new_auth_repr) + module.fail_json(**result) + # Configure the executions for the flow + create_or_update_executions(kc=kc, config=new_auth_repr, realm=realm) + # Get executions created + exec_repr = kc.get_executions_representation(config=new_auth_repr, realm=realm) + if exec_repr is not None: + auth_repr["authenticationExecutions"] = exec_repr + result['flow'] = auth_repr + elif state == 'absent': # If desired state is absent. + if module._diff: + result['diff'] = dict(before='', after='') + result['msg'] = new_auth_repr["alias"] + ' absent' + else: # The authentication flow already exist + if state == 'present': # if desired state is present + if force: # If force option is true + # Delete the actual authentication flow + result['changed'] = True + if module._diff: + result['diff'] = dict(before=auth_repr, after=new_auth_repr) + if module.check_mode: + module.exit_json(**result) + kc.delete_authentication_flow_by_id(id=auth_repr["id"], realm=realm) + # If copyFrom is defined, create authentication flow from a copy + if "copyFrom" in new_auth_repr and new_auth_repr["copyFrom"] is not None: + auth_repr = kc.copy_auth_flow(config=new_auth_repr, realm=realm) + else: # Create an empty authentication flow + auth_repr = kc.create_empty_auth_flow(config=new_auth_repr, realm=realm) + # If the authentication still not exist on the server, raise an exception. + if auth_repr is None: + result['msg'] = "Authentication just created not found: " + str(new_auth_repr) + module.fail_json(**result) + # Configure the executions for the flow + if module.check_mode: + module.exit_json(**result) + if create_or_update_executions(kc=kc, config=new_auth_repr, realm=realm): + result['changed'] = True + # Get executions created + exec_repr = kc.get_executions_representation(config=new_auth_repr, realm=realm) + if exec_repr is not None: + auth_repr["authenticationExecutions"] = exec_repr + result['flow'] = auth_repr + elif state == 'absent': # If desired state is absent + result['changed'] = True + # Delete the authentication flow alias. + if module._diff: + result['diff'] = dict(before=auth_repr, after='') + if module.check_mode: + module.exit_json(**result) + kc.delete_authentication_flow_by_id(id=auth_repr["id"], realm=realm) + result['msg'] = 'Authentication flow: {alias} id: {id} is deleted'.format(alias=new_auth_repr['alias'], + id=auth_repr["id"]) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_authentication.py b/plugins/modules/keycloak_authentication.py new file mode 120000 index 0000000000..e27a180a01 --- /dev/null +++ b/plugins/modules/keycloak_authentication.py @@ -0,0 +1 @@ +./identity/keycloak/keycloak_authentication.py \ No newline at end of file diff --git a/tests/unit/plugins/modules/identity/keycloak/test_keycloak_authentication.py b/tests/unit/plugins/modules/identity/keycloak/test_keycloak_authentication.py new file mode 100644 index 0000000000..91e34eea7b --- /dev/null +++ b/tests/unit/plugins/modules/identity/keycloak/test_keycloak_authentication.py @@ -0,0 +1,622 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from contextlib import contextmanager + +from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.general.tests.unit.compat.mock import call, patch +from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args + +from ansible_collections.community.general.plugins.modules.identity.keycloak import keycloak_authentication + +from itertools import count + +from ansible.module_utils.six import StringIO + + +@contextmanager +def patch_keycloak_api(get_authentication_flow_by_alias=None, copy_auth_flow=None, create_empty_auth_flow=None, + get_executions_representation=None, delete_authentication_flow_by_id=None): + """Mock context manager for patching the methods in PwPolicyIPAClient that contact the IPA server + + Patches the `login` and `_post_json` methods + + Keyword arguments are passed to the mock object that patches `_post_json` + + No arguments are passed to the mock object that patches `login` because no tests require it + + Example:: + + with patch_ipa(return_value={}) as (mock_login, mock_post): + ... + """ + + obj = keycloak_authentication.KeycloakAPI + with patch.object(obj, 'get_authentication_flow_by_alias', side_effect=get_authentication_flow_by_alias) \ + as mock_get_authentication_flow_by_alias: + with patch.object(obj, 'copy_auth_flow', side_effect=copy_auth_flow) \ + as mock_copy_auth_flow: + with patch.object(obj, 'create_empty_auth_flow', side_effect=create_empty_auth_flow) \ + as mock_create_empty_auth_flow: + with patch.object(obj, 'get_executions_representation', return_value=get_executions_representation) \ + as mock_get_executions_representation: + with patch.object(obj, 'delete_authentication_flow_by_id', side_effect=delete_authentication_flow_by_id) \ + as mock_delete_authentication_flow_by_id: + yield mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, \ + mock_get_executions_representation, mock_delete_authentication_flow_by_id + + +def get_response(object_with_future_response, method, get_id_call_count): + if callable(object_with_future_response): + return object_with_future_response() + if isinstance(object_with_future_response, dict): + return get_response( + object_with_future_response[method], method, get_id_call_count) + if isinstance(object_with_future_response, list): + call_number = next(get_id_call_count) + return get_response( + object_with_future_response[call_number], method, get_id_call_count) + return object_with_future_response + + +def build_mocked_request(get_id_user_count, response_dict): + def _mocked_requests(*args, **kwargs): + url = args[0] + method = kwargs['method'] + future_response = response_dict.get(url, None) + return get_response(future_response, method, get_id_user_count) + return _mocked_requests + + +def create_wrapper(text_as_string): + """Allow to mock many times a call to one address. + Without this function, the StringIO is empty for the second call. + """ + def _create_wrapper(): + return StringIO(text_as_string) + return _create_wrapper + + +def mock_good_connection(): + token_response = { + 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), } + return patch( + 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + side_effect=build_mocked_request(count(), token_response), + autospec=True + ) + + +class TestKeycloakAuthentication(ModuleTestCase): + def setUp(self): + super(TestKeycloakAuthentication, self).setUp() + self.module = keycloak_authentication + + def test_create_auth_flow_from_copy(self): + """Add a new authentication flow from copy of an other flow""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'realm': 'realm-name', + 'alias': 'Test create authentication flow copy', + 'copyFrom': 'first broker login', + 'authenticationExecutions': [ + { + 'providerId': 'identity-provider-redirector', + 'requirement': 'ALTERNATIVE', + }, + ], + 'state': 'present', + } + return_value_auth_flow_before = [{}] + return_value_copied = [{ + 'id': '2ac059fc-c548-414f-9c9e-84d42bd4944e', + 'alias': 'first broker login', + 'description': 'browser based authentication', + 'providerId': 'basic-flow', + 'topLevel': True, + 'builtIn': False, + 'authenticationExecutions': [ + { + 'authenticator': 'auth-cookie', + 'requirement': 'ALTERNATIVE', + 'priority': 10, + 'userSetupAllowed': False, + 'autheticatorFlow': False + }, + ], + }] + return_value_executions_after = [ + { + 'id': 'b678e30c-8469-40a7-8c21-8d0cda76a591', + 'requirement': 'ALTERNATIVE', + 'displayName': 'Identity Provider Redirector', + 'requirementChoices': ['REQUIRED', 'DISABLED'], + 'configurable': True, + 'providerId': 'identity-provider-redirector', + 'level': 0, + 'index': 0 + }, + { + 'id': 'fdc208e9-c292-48b7-b7d1-1d98315ee893', + 'requirement': 'ALTERNATIVE', + 'displayName': 'Cookie', + 'requirementChoices': [ + 'REQUIRED', + 'ALTERNATIVE', + 'DISABLED' + ], + 'configurable': False, + 'providerId': 'auth-cookie', + 'level': 0, + 'index': 1 + }, + ] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before, copy_auth_flow=return_value_copied, + get_executions_representation=return_value_executions_after) \ + as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, + mock_get_executions_representation, mock_delete_authentication_flow_by_id): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + # Verify number of call on each mock + self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1) + self.assertEqual(len(mock_copy_auth_flow.mock_calls), 1) + self.assertEqual(len(mock_create_empty_auth_flow.mock_calls), 0) + self.assertEqual(len(mock_get_executions_representation.mock_calls), 2) + self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_create_auth_flow_from_copy_idempotency(self): + """Add an already existing authentication flow from copy of an other flow to test idempotency""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'realm': 'realm-name', + 'alias': 'Test create authentication flow copy', + 'copyFrom': 'first broker login', + 'authenticationExecutions': [ + { + 'providerId': 'identity-provider-redirector', + 'requirement': 'ALTERNATIVE', + }, + ], + 'state': 'present', + } + return_value_auth_flow_before = [{ + 'id': '71275d5e-e11f-4be4-b119-0abfa87987a4', + 'alias': 'Test create authentication flow copy', + 'description': '', + 'providerId': 'basic-flow', + 'topLevel': True, + 'builtIn': False, + 'authenticationExecutions': [ + { + 'authenticator': 'identity-provider-redirector', + 'requirement': 'ALTERNATIVE', + 'priority': 0, + 'userSetupAllowed': False, + 'autheticatorFlow': False + }, + { + 'authenticator': 'auth-cookie', + 'requirement': 'ALTERNATIVE', + 'priority': 0, + 'userSetupAllowed': False, + 'autheticatorFlow': False + }, + ], + }] + return_value_executions_after = [ + { + 'id': 'b678e30c-8469-40a7-8c21-8d0cda76a591', + 'requirement': 'ALTERNATIVE', + 'displayName': 'Identity Provider Redirector', + 'requirementChoices': ['REQUIRED', 'DISABLED'], + 'configurable': True, + 'providerId': 'identity-provider-redirector', + 'level': 0, + 'index': 0 + }, + { + 'id': 'fdc208e9-c292-48b7-b7d1-1d98315ee893', + 'requirement': 'ALTERNATIVE', + 'displayName': 'Cookie', + 'requirementChoices': [ + 'REQUIRED', + 'ALTERNATIVE', + 'DISABLED' + ], + 'configurable': False, + 'providerId': 'auth-cookie', + 'level': 0, + 'index': 1 + }, + ] + changed = False + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before, + get_executions_representation=return_value_executions_after) \ + as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, + mock_get_executions_representation, mock_delete_authentication_flow_by_id): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + # Verify number of call on each mock + self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1) + self.assertEqual(len(mock_copy_auth_flow.mock_calls), 0) + self.assertEqual(len(mock_create_empty_auth_flow.mock_calls), 0) + self.assertEqual(len(mock_get_executions_representation.mock_calls), 2) + self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_create_auth_flow_without_copy(self): + """Add authentication without copy""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'realm': 'realm-name', + 'alias': 'Test create authentication flow copy', + 'authenticationExecutions': [ + { + 'providerId': 'identity-provider-redirector', + 'requirement': 'ALTERNATIVE', + 'authenticationConfig': { + 'alias': 'name', + 'config': { + 'defaultProvider': 'value' + }, + }, + }, + ], + 'state': 'present', + } + return_value_auth_flow_before = [{}] + return_value_created_empty_flow = [ + { + "alias": "Test of the keycloak_auth module", + "authenticationExecutions": [], + "builtIn": False, + "description": "", + "id": "513f5baa-cc42-47bf-b4b6-1d23ccc0a67f", + "providerId": "basic-flow", + "topLevel": True + }, + ] + return_value_executions_after = [ + { + 'id': 'b678e30c-8469-40a7-8c21-8d0cda76a591', + 'requirement': 'ALTERNATIVE', + 'displayName': 'Identity Provider Redirector', + 'requirementChoices': ['REQUIRED', 'DISABLED'], + 'configurable': True, + 'providerId': 'identity-provider-redirector', + 'level': 0, + 'index': 0 + }, + ] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before, + get_executions_representation=return_value_executions_after, create_empty_auth_flow=return_value_created_empty_flow) \ + as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, + mock_get_executions_representation, mock_delete_authentication_flow_by_id): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + # Verify number of call on each mock + self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1) + self.assertEqual(len(mock_copy_auth_flow.mock_calls), 0) + self.assertEqual(len(mock_create_empty_auth_flow.mock_calls), 1) + self.assertEqual(len(mock_get_executions_representation.mock_calls), 3) + self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_update_auth_flow_adding_exec(self): + """Update authentication flow by adding execution""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'realm': 'realm-name', + 'alias': 'Test create authentication flow copy', + 'authenticationExecutions': [ + { + 'providerId': 'identity-provider-redirector', + 'requirement': 'ALTERNATIVE', + 'authenticationConfig': { + 'alias': 'name', + 'config': { + 'defaultProvider': 'value' + }, + }, + }, + ], + 'state': 'present', + } + return_value_auth_flow_before = [{ + 'id': '71275d5e-e11f-4be4-b119-0abfa87987a4', + 'alias': 'Test create authentication flow copy', + 'description': '', + 'providerId': 'basic-flow', + 'topLevel': True, + 'builtIn': False, + 'authenticationExecutions': [ + { + 'authenticator': 'auth-cookie', + 'requirement': 'ALTERNATIVE', + 'priority': 0, + 'userSetupAllowed': False, + 'autheticatorFlow': False + }, + ], + }] + return_value_executions_after = [ + { + 'id': 'b678e30c-8469-40a7-8c21-8d0cda76a591', + 'requirement': 'DISABLED', + 'displayName': 'Identity Provider Redirector', + 'requirementChoices': ['REQUIRED', 'DISABLED'], + 'configurable': True, + 'providerId': 'identity-provider-redirector', + 'level': 0, + 'index': 0 + }, + { + 'id': 'fdc208e9-c292-48b7-b7d1-1d98315ee893', + 'requirement': 'ALTERNATIVE', + 'displayName': 'Cookie', + 'requirementChoices': [ + 'REQUIRED', + 'ALTERNATIVE', + 'DISABLED' + ], + 'configurable': False, + 'providerId': 'auth-cookie', + 'level': 0, + 'index': 1 + }, + ] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before, + get_executions_representation=return_value_executions_after) \ + as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, + mock_get_executions_representation, mock_delete_authentication_flow_by_id): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + # Verify number of call on each mock + self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1) + self.assertEqual(len(mock_copy_auth_flow.mock_calls), 0) + self.assertEqual(len(mock_create_empty_auth_flow.mock_calls), 0) + self.assertEqual(len(mock_get_executions_representation.mock_calls), 3) + self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_delete_auth_flow(self): + """Delete authentication flow""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'realm': 'realm-name', + 'alias': 'Test create authentication flow copy', + 'state': 'absent', + } + return_value_auth_flow_before = [{ + 'id': '71275d5e-e11f-4be4-b119-0abfa87987a4', + 'alias': 'Test create authentication flow copy', + 'description': '', + 'providerId': 'basic-flow', + 'topLevel': True, + 'builtIn': False, + 'authenticationExecutions': [ + { + 'authenticator': 'auth-cookie', + 'requirement': 'ALTERNATIVE', + 'priority': 0, + 'userSetupAllowed': False, + 'autheticatorFlow': False + }, + ], + }] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before) \ + as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, + mock_get_executions_representation, mock_delete_authentication_flow_by_id): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + # Verify number of call on each mock + self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1) + self.assertEqual(len(mock_copy_auth_flow.mock_calls), 0) + self.assertEqual(len(mock_create_empty_auth_flow.mock_calls), 0) + self.assertEqual(len(mock_get_executions_representation.mock_calls), 0) + self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 1) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_delete_auth_flow_idempotency(self): + """Delete second time authentication flow to test idempotency""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'realm': 'realm-name', + 'alias': 'Test create authentication flow copy', + 'state': 'absent', + } + return_value_auth_flow_before = [{}] + changed = False + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before) \ + as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, + mock_get_executions_representation, mock_delete_authentication_flow_by_id): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + # Verify number of call on each mock + self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1) + self.assertEqual(len(mock_copy_auth_flow.mock_calls), 0) + self.assertEqual(len(mock_create_empty_auth_flow.mock_calls), 0) + self.assertEqual(len(mock_get_executions_representation.mock_calls), 0) + self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_force_update_auth_flow(self): + """Delete authentication flow and create new one""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'realm': 'realm-name', + 'alias': 'Test create authentication flow copy', + 'authenticationExecutions': [ + { + 'providerId': 'identity-provider-redirector', + 'requirement': 'ALTERNATIVE', + 'authenticationConfig': { + 'alias': 'name', + 'config': { + 'defaultProvider': 'value' + }, + }, + }, + ], + 'state': 'present', + 'force': 'yes', + } + return_value_auth_flow_before = [{ + 'id': '71275d5e-e11f-4be4-b119-0abfa87987a4', + 'alias': 'Test create authentication flow copy', + 'description': '', + 'providerId': 'basic-flow', + 'topLevel': True, + 'builtIn': False, + 'authenticationExecutions': [ + { + 'authenticator': 'auth-cookie', + 'requirement': 'ALTERNATIVE', + 'priority': 0, + 'userSetupAllowed': False, + 'autheticatorFlow': False + }, + ], + }] + return_value_created_empty_flow = [ + { + "alias": "Test of the keycloak_auth module", + "authenticationExecutions": [], + "builtIn": False, + "description": "", + "id": "513f5baa-cc42-47bf-b4b6-1d23ccc0a67f", + "providerId": "basic-flow", + "topLevel": True + }, + ] + return_value_executions_after = [ + { + 'id': 'b678e30c-8469-40a7-8c21-8d0cda76a591', + 'requirement': 'DISABLED', + 'displayName': 'Identity Provider Redirector', + 'requirementChoices': ['REQUIRED', 'DISABLED'], + 'configurable': True, + 'providerId': 'identity-provider-redirector', + 'level': 0, + 'index': 0 + }, + ] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before, + get_executions_representation=return_value_executions_after, create_empty_auth_flow=return_value_created_empty_flow) \ + as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, + mock_get_executions_representation, mock_delete_authentication_flow_by_id): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + # Verify number of call on each mock + self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1) + self.assertEqual(len(mock_copy_auth_flow.mock_calls), 0) + self.assertEqual(len(mock_create_empty_auth_flow.mock_calls), 1) + self.assertEqual(len(mock_get_executions_representation.mock_calls), 3) + self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 1) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + +if __name__ == '__main__': + unittest.main() From 2d1f5408d3f1bc370502aebc39a3f18c0fc6715d Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 24 Jun 2021 22:33:29 +0200 Subject: [PATCH 0398/3093] Redis: slave -> replica (#2867) * Redis: slave -> replica * Fallback for old Redis versions in CI. --- .../fragments/2867-redis-terminology.yml | 2 + plugins/modules/database/misc/redis.py | 66 ++++++++++--------- .../targets/redis_info/defaults/main.yml | 2 +- .../targets/redis_info/tasks/main.yml | 6 +- .../setup_redis_replication/defaults/main.yml | 17 +++-- .../setup_redis_replication/handlers/main.yml | 10 +-- .../tasks/setup_redis_cluster.yml | 22 +++---- 7 files changed, 70 insertions(+), 55 deletions(-) create mode 100644 changelogs/fragments/2867-redis-terminology.yml diff --git a/changelogs/fragments/2867-redis-terminology.yml b/changelogs/fragments/2867-redis-terminology.yml new file mode 100644 index 0000000000..add76c0f91 --- /dev/null +++ b/changelogs/fragments/2867-redis-terminology.yml @@ -0,0 +1,2 @@ +minor_changes: +- "redis - allow to use the term ``replica`` instead of ``slave``, which has been the official Redis terminology since 2018 (https://github.com/ansible-collections/community.general/pull/2867)." diff --git a/plugins/modules/database/misc/redis.py b/plugins/modules/database/misc/redis.py index 5ffbd7db57..602aaf6c74 100644 --- a/plugins/modules/database/misc/redis.py +++ b/plugins/modules/database/misc/redis.py @@ -10,17 +10,17 @@ __metaclass__ = type DOCUMENTATION = ''' --- module: redis -short_description: Various redis commands, slave and flush +short_description: Various redis commands, replica and flush description: - Unified utility to interact with redis instances. options: command: description: - The selected redis command - - C(config) (new in 1.6), ensures a configuration setting on an instance. + - C(config) ensures a configuration setting on an instance. - C(flush) flushes all the instance or a specified db. - - C(slave) sets a redis instance in slave or master mode. - choices: [ config, flush, slave ] + - C(replica) sets a redis instance in replica or master mode. (C(slave) is an alias for C(replica).) + choices: [ config, flush, replica, slave ] type: str login_password: description: @@ -38,18 +38,21 @@ options: type: int master_host: description: - - The host of the master instance [slave command] + - The host of the master instance [replica command] type: str master_port: description: - - The port of the master instance [slave command] + - The port of the master instance [replica command] type: int - slave_mode: + replica_mode: description: - - the mode of the redis instance [slave command] - default: slave - choices: [ master, slave ] + - The mode of the redis instance [replica command] + - C(slave) is an alias for C(replica). + default: replica + choices: [ master, replica, slave ] type: str + aliases: + - slave_mode db: description: - The database to flush (used in db mode) [flush command] @@ -76,7 +79,7 @@ notes: - Requires the redis-py Python package on the remote host. You can install it with pip (pip install redis) or with a package manager. https://github.com/andymccurdy/redis-py - - If the redis master instance we are making slave of is password protected + - If the redis master instance we are making replica of is password protected this needs to be in the redis.conf in the masterauth variable seealso: @@ -86,16 +89,16 @@ author: "Xabier Larrakoetxea (@slok)" ''' EXAMPLES = ''' -- name: Set local redis instance to be slave of melee.island on port 6377 +- name: Set local redis instance to be a replica of melee.island on port 6377 community.general.redis: - command: slave + command: replica master_host: melee.island master_port: 6377 -- name: Deactivate slave mode +- name: Deactivate replica mode community.general.redis: - command: slave - slave_mode: master + command: replica + replica_mode: master - name: Flush all the redis db community.general.redis: @@ -145,7 +148,7 @@ import re # Redis module specific support methods. -def set_slave_mode(client, master_host, master_port): +def set_replica_mode(client, master_host, master_port): try: return client.slaveof(master_host, master_port) except Exception: @@ -174,13 +177,13 @@ def flush(client, db=None): def main(): module = AnsibleModule( argument_spec=dict( - command=dict(type='str', choices=['config', 'flush', 'slave']), + command=dict(type='str', choices=['config', 'flush', 'replica', 'slave']), login_password=dict(type='str', no_log=True), login_host=dict(type='str', default='localhost'), login_port=dict(type='int', default=6379), master_host=dict(type='str'), master_port=dict(type='int'), - slave_mode=dict(type='str', default='slave', choices=['master', 'slave']), + replica_mode=dict(type='str', default='replica', choices=['master', 'replica', 'slave'], aliases=["slave_mode"]), db=dict(type='int'), flush_mode=dict(type='str', default='all', choices=['all', 'db']), name=dict(type='str'), @@ -196,20 +199,24 @@ def main(): login_host = module.params['login_host'] login_port = module.params['login_port'] command = module.params['command'] - - # Slave Command section ----------- if command == "slave": + command = "replica" + + # Replica Command section ----------- + if command == "replica": master_host = module.params['master_host'] master_port = module.params['master_port'] - mode = module.params['slave_mode'] + mode = module.params['replica_mode'] + if mode == "slave": + mode = "replica" # Check if we have all the data - if mode == "slave": # Only need data if we want to be slave + if mode == "replica": # Only need data if we want to be replica if not master_host: - module.fail_json(msg='In slave mode master host must be provided') + module.fail_json(msg='In replica mode master host must be provided') if not master_port: - module.fail_json(msg='In slave mode master port must be provided') + module.fail_json(msg='In replica mode master port must be provided') # Connect and check r = redis.StrictRedis(host=login_host, port=login_port, password=login_password) @@ -223,7 +230,7 @@ def main(): if mode == "master" and info["role"] == "master": module.exit_json(changed=False, mode=mode) - elif mode == "slave" and info["role"] == "slave" and info["master_host"] == master_host and info["master_port"] == master_port: + elif mode == "replica" and info["role"] == "slave" and info["master_host"] == master_host and info["master_port"] == master_port: status = dict( status=mode, master_host=master_host, @@ -234,9 +241,8 @@ def main(): # Do the stuff # (Check Check_mode before commands so the commands aren't evaluated # if not necessary) - if mode == "slave": - if module.check_mode or\ - set_slave_mode(r, master_host, master_port): + if mode == "replica": + if module.check_mode or set_replica_mode(r, master_host, master_port): info = r.info() status = { 'status': mode, @@ -245,7 +251,7 @@ def main(): } module.exit_json(changed=True, mode=status) else: - module.fail_json(msg='Unable to set slave mode') + module.fail_json(msg='Unable to set replica mode') else: if module.check_mode or set_master_mode(r): diff --git a/tests/integration/targets/redis_info/defaults/main.yml b/tests/integration/targets/redis_info/defaults/main.yml index 1352c55cc3..e1f03ee7ed 100644 --- a/tests/integration/targets/redis_info/defaults/main.yml +++ b/tests/integration/targets/redis_info/defaults/main.yml @@ -1,4 +1,4 @@ --- redis_password: PASS master_port: 6379 -slave_port: 6380 +replica_port: 6380 diff --git a/tests/integration/targets/redis_info/tasks/main.yml b/tests/integration/targets/redis_info/tasks/main.yml index d02775200c..dc76101157 100644 --- a/tests/integration/targets/redis_info/tasks/main.yml +++ b/tests/integration/targets/redis_info/tasks/main.yml @@ -33,9 +33,9 @@ - result.info.tcp_port == master_port - result.info.role == 'master' -- name: redis_info - connect to slave +- name: redis_info - connect to replica community.general.redis_info: - login_port: "{{ slave_port }}" + login_port: "{{ replica_port }}" login_password: "{{ redis_password }}" register: result @@ -43,5 +43,5 @@ that: - result is not changed - result.info is defined - - result.info.tcp_port == slave_port + - result.info.tcp_port == replica_port - result.info.role == 'slave' diff --git a/tests/integration/targets/setup_redis_replication/defaults/main.yml b/tests/integration/targets/setup_redis_replication/defaults/main.yml index bdbbbb2cac..5855519fc9 100644 --- a/tests/integration/targets/setup_redis_replication/defaults/main.yml +++ b/tests/integration/targets/setup_redis_replication/defaults/main.yml @@ -22,14 +22,21 @@ redis_module: "{{ (ansible_python_version is version('2.7', '>=')) | ternary('re redis_password: PASS +old_redis: >- + {{ + (ansible_distribution == 'CentOS' and ansible_distribution_major_version|int <= 7) or + (ansible_distribution == 'Ubuntu' and ansible_distribution_major_version|int <= 18) or + (ansible_os_family == 'FreeBSD' and ansible_distribution_major_version|int <= 12) + }} + # Master master_port: 6379 master_conf: /etc/redis-master.conf master_datadir: /var/lib/redis-master master_logdir: /var/log/redis-master -# Slave -slave_port: 6380 -slave_conf: /etc/redis-slave.conf -slave_datadir: /var/lib/redis-slave -slave_logdir: /var/log/redis-slave +# Replica +replica_port: 6380 +replica_conf: /etc/redis-replica.conf +replica_datadir: /var/lib/redis-replica +replica_logdir: /var/log/redis-replica diff --git a/tests/integration/targets/setup_redis_replication/handlers/main.yml b/tests/integration/targets/setup_redis_replication/handlers/main.yml index d4d535cdf7..1b3cd57912 100644 --- a/tests/integration/targets/setup_redis_replication/handlers/main.yml +++ b/tests/integration/targets/setup_redis_replication/handlers/main.yml @@ -1,7 +1,7 @@ - name: stop redis services shell: | kill -TERM $(cat /var/run/redis_{{ master_port }}.pid) - kill -TERM $(cat /var/run/redis_{{ slave_port }}.pid) + kill -TERM $(cat /var/run/redis_{{ replica_port }}.pid) listen: cleanup redis - name: remove redis packages @@ -27,8 +27,8 @@ - "{{ master_datadir }}" - "{{ master_logdir }}" - /var/run/redis_{{ master_port }}.pid - - "{{ slave_conf }}" - - "{{ slave_datadir }}" - - "{{ slave_logdir }}" - - /var/run/redis_{{ slave_port }}.pid + - "{{ replica_conf }}" + - "{{ replica_datadir }}" + - "{{ replica_logdir }}" + - /var/run/redis_{{ replica_port }}.pid listen: cleanup redis diff --git a/tests/integration/targets/setup_redis_replication/tasks/setup_redis_cluster.yml b/tests/integration/targets/setup_redis_replication/tasks/setup_redis_cluster.yml index 2445ba242a..03cb9da6ab 100644 --- a/tests/integration/targets/setup_redis_replication/tasks/setup_redis_cluster.yml +++ b/tests/integration/targets/setup_redis_replication/tasks/setup_redis_cluster.yml @@ -1,5 +1,5 @@ # We run two servers listening different ports -# to be able to check replication (one server for master, another for slave). +# to be able to check replication (one server for master, another for replica). - name: Install redis server apt dependencies apt: @@ -56,8 +56,8 @@ loop: - "{{ master_datadir }}" - "{{ master_logdir }}" - - "{{ slave_datadir }}" - - "{{ slave_logdir }}" + - "{{ replica_datadir }}" + - "{{ replica_logdir }}" - name: Create redis configs copy: @@ -75,16 +75,16 @@ port: "{{ master_port }}" logdir: "{{ master_logdir }}" datadir: "{{ master_datadir }}" - - file: "{{ slave_conf }}" - port: "{{ slave_port }}" - logdir: "{{ slave_logdir }}" - datadir: "{{ slave_datadir }}" + - file: "{{ replica_conf }}" + port: "{{ replica_port }}" + logdir: "{{ replica_logdir }}" + datadir: "{{ replica_datadir }}" - name: Start redis master shell: "{{ redis_bin[ansible_distribution] }} {{ master_conf }}" -- name: Start redis slave - shell: "{{ redis_bin[ansible_distribution] }} {{ slave_conf }} --slaveof 127.0.0.1 {{ master_port }}" +- name: Start redis replica + shell: "{{ redis_bin[ansible_distribution] }} {{ replica_conf }} --{% if old_redis %}slaveof{% else %}replicaof{% endif %} 127.0.0.1 {{ master_port }}" - name: Wait for redis master to be started ansible.builtin.wait_for: @@ -95,10 +95,10 @@ connect_timeout: 5 timeout: 30 -- name: Wait for redis slave to be started +- name: Wait for redis replica to be started ansible.builtin.wait_for: host: 127.0.0.1 - port: "{{ slave_port }}" + port: "{{ replica_port }}" state: started delay: 1 connect_timeout: 5 From d2a984ded1b9f59ab5ac1c8588d82ee53ed97af2 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 24 Jun 2021 22:57:40 +0200 Subject: [PATCH 0399/3093] Adjust example to remove unnecessary offensive language. (#2869) --- plugins/modules/cloud/softlayer/sl_vm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/modules/cloud/softlayer/sl_vm.py b/plugins/modules/cloud/softlayer/sl_vm.py index c8db13d815..825d82e173 100644 --- a/plugins/modules/cloud/softlayer/sl_vm.py +++ b/plugins/modules/cloud/softlayer/sl_vm.py @@ -217,7 +217,7 @@ EXAMPLES = ''' datacenter: dal09 tags: - ansible-module-test - - ansible-module-test-slaves + - ansible-module-test-replicas hourly: yes private: no dedicated: no @@ -235,7 +235,7 @@ EXAMPLES = ''' datacenter: dal09 tags: - ansible-module-test - - ansible-module-test-slaves + - ansible-module-test-replicas hourly: yes private: no dedicated: no From d180390dbc99e9cfd0cffaee3edb6e9d8eee406c Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Sat, 26 Jun 2021 07:27:41 -0400 Subject: [PATCH 0400/3093] modprobe - fix task status when module cannot be loaded (#2843) * Initial Commit * Adding changelog fragment * Ensured params are present during verbose output and enhanced check_mode * Making specific to builtins * Removing unneccessary external call * Acutal bugfix --- .../2843-modprobe-failure-conditions.yml | 3 + plugins/modules/system/modprobe.py | 139 ++++++++------ .../plugins/modules/system/test_modprobe.py | 174 ++++++++++++++++++ 3 files changed, 263 insertions(+), 53 deletions(-) create mode 100644 changelogs/fragments/2843-modprobe-failure-conditions.yml create mode 100644 tests/unit/plugins/modules/system/test_modprobe.py diff --git a/changelogs/fragments/2843-modprobe-failure-conditions.yml b/changelogs/fragments/2843-modprobe-failure-conditions.yml new file mode 100644 index 0000000000..78ee5ce1e9 --- /dev/null +++ b/changelogs/fragments/2843-modprobe-failure-conditions.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - modprobe - added additional checks to ensure module load/unload is effective (https://github.com/ansible-collections/community.general/issues/1608). diff --git a/plugins/modules/system/modprobe.py b/plugins/modules/system/modprobe.py index 0ab7523537..07f7cd8cc3 100644 --- a/plugins/modules/system/modprobe.py +++ b/plugins/modules/system/modprobe.py @@ -50,11 +50,90 @@ EXAMPLES = ''' ''' import os.path +import platform import shlex import traceback from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native + +RELEASE_VER = platform.release() + + +class Modprobe(object): + def __init__(self, module): + self.module = module + self.modprobe_bin = module.get_bin_path('modprobe', True) + + self.check_mode = module.check_mode + self.desired_state = module.params['state'] + self.name = module.params['name'] + self.params = module.params['params'] + + self.changed = False + + def load_module(self): + command = [self.modprobe_bin] + if self.check_mode: + command.append('-n') + command.extend([self.name] + shlex.split(self.params)) + + rc, out, err = self.module.run_command(command) + + if rc != 0: + return self.module.fail_json(msg=err, rc=rc, stdout=out, stderr=err, **self.result) + + if self.check_mode or self.module_loaded(): + self.changed = True + else: + rc, stdout, stderr = self.module.run_command( + [self.modprobe_bin, '-n', '--first-time', self.name] + shlex.split(self.params) + ) + if rc != 0: + self.module.warn(stderr) + + def module_loaded(self): + is_loaded = False + try: + with open('/proc/modules') as modules: + module_name = self.name.replace('-', '_') + ' ' + for line in modules: + if line.startswith(module_name): + is_loaded = True + break + + if not is_loaded: + module_file = '/' + self.name + '.ko' + builtin_path = os.path.join('/lib/modules/', RELEASE_VER, 'modules.builtin') + with open(builtin_path) as builtins: + for line in builtins: + if line.rstrip().endswith(module_file): + is_loaded = True + break + except (IOError, OSError) as e: + self.module.fail_json(msg=to_native(e), exception=traceback.format_exc(), **self.result) + + return is_loaded + + def unload_module(self): + command = [self.modprobe_bin, '-r', self.name] + if self.check_mode: + command.append('-n') + + rc, out, err = self.module.run_command(command) + if rc != 0: + return self.module.fail_json(msg=err, rc=rc, stdout=out, stderr=err, **self.result) + + self.changed = True + + @property + def result(self): + return { + 'changed': self.changed, + 'name': self.name, + 'params': self.params, + 'state': self.desired_state, + } def main(): @@ -67,60 +146,14 @@ def main(): supports_check_mode=True, ) - name = module.params['name'] - params = module.params['params'] - state = module.params['state'] + modprobe = Modprobe(module) - # FIXME: Adding all parameters as result values is useless - result = dict( - changed=False, - name=name, - params=params, - state=state, - ) + if modprobe.desired_state == 'present' and not modprobe.module_loaded(): + modprobe.load_module() + elif modprobe.desired_state == 'absent' and modprobe.module_loaded(): + modprobe.unload_module() - # Check if module is present - try: - present = False - with open('/proc/modules') as modules: - module_name = name.replace('-', '_') + ' ' - for line in modules: - if line.startswith(module_name): - present = True - break - if not present: - command = [module.get_bin_path('uname', True), '-r'] - rc, uname_kernel_release, err = module.run_command(command) - module_file = '/' + name + '.ko' - builtin_path = os.path.join('/lib/modules/', uname_kernel_release.strip(), - 'modules.builtin') - with open(builtin_path) as builtins: - for line in builtins: - if line.endswith(module_file): - present = True - break - except IOError as e: - module.fail_json(msg=to_native(e), exception=traceback.format_exc(), **result) - - # Add/remove module as needed - if state == 'present': - if not present: - if not module.check_mode: - command = [module.get_bin_path('modprobe', True), name] - command.extend(shlex.split(params)) - rc, out, err = module.run_command(command) - if rc != 0: - module.fail_json(msg=err, rc=rc, stdout=out, stderr=err, **result) - result['changed'] = True - elif state == 'absent': - if present: - if not module.check_mode: - rc, out, err = module.run_command([module.get_bin_path('modprobe', True), '-r', name]) - if rc != 0: - module.fail_json(msg=err, rc=rc, stdout=out, stderr=err, **result) - result['changed'] = True - - module.exit_json(**result) + module.exit_json(**modprobe.result) if __name__ == '__main__': diff --git a/tests/unit/plugins/modules/system/test_modprobe.py b/tests/unit/plugins/modules/system/test_modprobe.py new file mode 100644 index 0000000000..6f2c6b3d19 --- /dev/null +++ b/tests/unit/plugins/modules/system/test_modprobe.py @@ -0,0 +1,174 @@ +# -*- coding: utf-8 -*- +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible_collections.community.general.tests.unit.plugins.modules.utils import ModuleTestCase, set_module_args +from ansible_collections.community.general.tests.unit.compat.mock import patch +from ansible_collections.community.general.tests.unit.compat.mock import Mock +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.modules.system.modprobe import Modprobe + + +class TestLoadModule(ModuleTestCase): + def setUp(self): + super(TestLoadModule, self).setUp() + + self.mock_module_loaded = patch( + 'ansible_collections.community.general.plugins.modules.system.modprobe.Modprobe.module_loaded' + ) + self.mock_run_command = patch('ansible.module_utils.basic.AnsibleModule.run_command') + self.mock_get_bin_path = patch('ansible.module_utils.basic.AnsibleModule.get_bin_path') + + self.module_loaded = self.mock_module_loaded.start() + self.run_command = self.mock_run_command.start() + self.get_bin_path = self.mock_get_bin_path.start() + + def tearDown(self): + """Teardown.""" + super(TestLoadModule, self).tearDown() + self.mock_module_loaded.stop() + self.mock_run_command.stop() + self.mock_get_bin_path.stop() + + def test_load_module_success(self): + set_module_args(dict( + name='test', + state='present', + )) + + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + params=dict(type='str', default=''), + ), + supports_check_mode=True, + ) + + self.get_bin_path.side_effect = ['modprobe'] + self.module_loaded.side_effect = [True] + self.run_command.side_effect = [(0, '', '')] + + modprobe = Modprobe(module) + modprobe.load_module() + + assert modprobe.result == { + 'changed': True, + 'name': 'test', + 'params': '', + 'state': 'present', + } + + def test_load_module_unchanged(self): + set_module_args(dict( + name='test', + state='present', + )) + + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + params=dict(type='str', default=''), + ), + supports_check_mode=True, + ) + + module.warn = Mock() + + self.get_bin_path.side_effect = ['modprobe'] + self.module_loaded.side_effect = [False] + self.run_command.side_effect = [(0, '', ''), (1, '', '')] + + modprobe = Modprobe(module) + modprobe.load_module() + + module.warn.assert_called_once_with('') + + +class TestUnloadModule(ModuleTestCase): + def setUp(self): + super(TestUnloadModule, self).setUp() + + self.mock_module_loaded = patch( + 'ansible_collections.community.general.plugins.modules.system.modprobe.Modprobe.module_loaded' + ) + self.mock_run_command = patch('ansible.module_utils.basic.AnsibleModule.run_command') + self.mock_get_bin_path = patch('ansible.module_utils.basic.AnsibleModule.get_bin_path') + + self.module_loaded = self.mock_module_loaded.start() + self.run_command = self.mock_run_command.start() + self.get_bin_path = self.mock_get_bin_path.start() + + def tearDown(self): + """Teardown.""" + super(TestUnloadModule, self).tearDown() + self.mock_module_loaded.stop() + self.mock_run_command.stop() + self.mock_get_bin_path.stop() + + def test_unload_module_success(self): + set_module_args(dict( + name='test', + state='absent', + )) + + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + params=dict(type='str', default=''), + ), + supports_check_mode=True, + ) + + self.get_bin_path.side_effect = ['modprobe'] + self.module_loaded.side_effect = [False] + self.run_command.side_effect = [(0, '', '')] + + modprobe = Modprobe(module) + modprobe.unload_module() + + assert modprobe.result == { + 'changed': True, + 'name': 'test', + 'params': '', + 'state': 'absent', + } + + def test_unload_module_failure(self): + set_module_args(dict( + name='test', + state='absent', + )) + + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['absent', 'present']), + params=dict(type='str', default=''), + ), + supports_check_mode=True, + ) + + module.fail_json = Mock() + + self.get_bin_path.side_effect = ['modprobe'] + self.module_loaded.side_effect = [True] + self.run_command.side_effect = [(1, '', '')] + + modprobe = Modprobe(module) + modprobe.unload_module() + + dummy_result = { + 'changed': False, + 'name': 'test', + 'state': 'absent', + 'params': '', + } + + module.fail_json.assert_called_once_with( + msg='', rc=1, stdout='', stderr='', **dummy_result + ) From fafabed9e6acc6bd49ce6e9bf266ee27f686aebe Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 26 Jun 2021 23:59:11 +0200 Subject: [PATCH 0401/3093] Replace ansible.module_utils._text by ansible.module_utils.common.text.converters (#2877) * Replace ansible.module_utils._text by ansible.module_utils.common.text.converters. * Also adjust tests. --- changelogs/fragments/ansible-core-_text.yml | 2 ++ plugins/action/system/shutdown.py | 2 +- plugins/become/doas.py | 2 +- plugins/become/ksu.py | 2 +- plugins/cache/redis.py | 2 +- plugins/callback/diy.py | 2 +- plugins/callback/log_plays.py | 2 +- plugins/callback/logentries.py | 2 +- plugins/callback/mail.py | 2 +- plugins/callback/selective.py | 2 +- plugins/callback/slack.py | 2 +- plugins/callback/unixy.py | 2 +- plugins/callback/yaml.py | 2 +- plugins/connection/chroot.py | 2 +- plugins/connection/iocage.py | 2 +- plugins/connection/jail.py | 2 +- plugins/connection/lxc.py | 2 +- plugins/connection/lxd.py | 2 +- plugins/connection/qubes.py | 2 +- plugins/connection/zone.py | 2 +- plugins/filter/from_csv.py | 2 +- plugins/inventory/cobbler.py | 2 +- plugins/inventory/gitlab_runners.py | 2 +- plugins/inventory/lxd.py | 2 +- plugins/inventory/nmap.py | 2 +- plugins/inventory/online.py | 2 +- plugins/inventory/scaleway.py | 2 +- plugins/inventory/virtualbox.py | 2 +- plugins/lookup/consul_kv.py | 2 +- plugins/lookup/cyberarkpassword.py | 2 +- plugins/lookup/dig.py | 2 +- plugins/lookup/dnstxt.py | 2 +- plugins/lookup/etcd3.py | 2 +- plugins/lookup/filetree.py | 2 +- plugins/lookup/hiera.py | 2 +- plugins/lookup/lastpass.py | 2 +- plugins/lookup/lmdb_kv.py | 2 +- plugins/lookup/nios_next_ip.py | 2 +- plugins/lookup/nios_next_network.py | 2 +- plugins/lookup/onepassword.py | 2 +- plugins/lookup/passwordstore.py | 2 +- plugins/lookup/random_string.py | 2 +- plugins/lookup/redis.py | 2 +- plugins/lookup/shelvefile.py | 2 +- plugins/module_utils/_netapp.py | 2 +- plugins/module_utils/csv.py | 2 +- plugins/module_utils/gandi_livedns_api.py | 2 +- plugins/module_utils/gitlab.py | 2 +- plugins/module_utils/hwc_utils.py | 2 +- plugins/module_utils/ibm_sa_utils.py | 2 +- plugins/module_utils/identity/keycloak/keycloak.py | 2 +- plugins/module_utils/ipa.py | 2 +- plugins/module_utils/ldap.py | 2 +- plugins/module_utils/lxd.py | 2 +- plugins/module_utils/net_tools/nios/api.py | 4 ++-- plugins/module_utils/oneview.py | 2 +- plugins/module_utils/oracle/oci_utils.py | 2 +- plugins/module_utils/redfish_utils.py | 4 ++-- plugins/module_utils/source_control/bitbucket.py | 2 +- plugins/module_utils/utm_utils.py | 2 +- plugins/module_utils/vexata.py | 2 +- plugins/modules/cloud/atomic/atomic_container.py | 2 +- plugins/modules/cloud/atomic/atomic_host.py | 2 +- plugins/modules/cloud/atomic/atomic_image.py | 2 +- plugins/modules/cloud/dimensiondata/dimensiondata_network.py | 2 +- plugins/modules/cloud/lxc/lxc_container.py | 2 +- plugins/modules/cloud/misc/cloud_init_data_facts.py | 2 +- plugins/modules/cloud/misc/proxmox.py | 2 +- plugins/modules/cloud/misc/proxmox_kvm.py | 2 +- plugins/modules/cloud/misc/proxmox_snap.py | 2 +- plugins/modules/cloud/packet/packet_device.py | 2 +- plugins/modules/cloud/packet/packet_ip_subnet.py | 2 +- plugins/modules/cloud/packet/packet_project.py | 2 +- plugins/modules/cloud/packet/packet_volume.py | 2 +- plugins/modules/cloud/packet/packet_volume_attachment.py | 2 +- plugins/modules/cloud/profitbricks/profitbricks.py | 2 +- plugins/modules/cloud/profitbricks/profitbricks_volume.py | 2 +- plugins/modules/cloud/pubnub/pubnub_blocks.py | 2 +- plugins/modules/cloud/rackspace/rax_cdb_user.py | 2 +- .../modules/cloud/scaleway/scaleway_security_group_rule.py | 2 +- plugins/modules/cloud/smartos/vmadm.py | 2 +- plugins/modules/clustering/consul/consul_kv.py | 2 +- plugins/modules/clustering/etcd3.py | 2 +- plugins/modules/clustering/nomad/nomad_job.py | 2 +- plugins/modules/clustering/nomad/nomad_job_info.py | 2 +- plugins/modules/clustering/znode.py | 2 +- plugins/modules/database/influxdb/influxdb_query.py | 2 +- .../modules/database/influxdb/influxdb_retention_policy.py | 2 +- plugins/modules/database/influxdb/influxdb_user.py | 2 +- plugins/modules/database/influxdb/influxdb_write.py | 2 +- plugins/modules/database/misc/odbc.py | 2 +- plugins/modules/database/misc/redis.py | 2 +- plugins/modules/database/misc/redis_info.py | 2 +- plugins/modules/database/saphana/hana_query.py | 2 +- plugins/modules/database/vertica/vertica_configuration.py | 2 +- plugins/modules/database/vertica/vertica_info.py | 2 +- plugins/modules/database/vertica/vertica_role.py | 2 +- plugins/modules/database/vertica/vertica_schema.py | 2 +- plugins/modules/database/vertica/vertica_user.py | 2 +- plugins/modules/files/filesize.py | 2 +- plugins/modules/files/iso_create.py | 2 +- plugins/modules/files/read_csv.py | 2 +- plugins/modules/files/sapcar_extract.py | 2 +- plugins/modules/files/xattr.py | 2 +- plugins/modules/files/xml.py | 2 +- plugins/modules/identity/ipa/ipa_config.py | 2 +- plugins/modules/identity/ipa/ipa_dnsrecord.py | 2 +- plugins/modules/identity/ipa/ipa_dnszone.py | 2 +- plugins/modules/identity/ipa/ipa_group.py | 2 +- plugins/modules/identity/ipa/ipa_hbacrule.py | 2 +- plugins/modules/identity/ipa/ipa_host.py | 2 +- plugins/modules/identity/ipa/ipa_hostgroup.py | 2 +- plugins/modules/identity/ipa/ipa_otpconfig.py | 2 +- plugins/modules/identity/ipa/ipa_otptoken.py | 2 +- plugins/modules/identity/ipa/ipa_pwpolicy.py | 2 +- plugins/modules/identity/ipa/ipa_role.py | 2 +- plugins/modules/identity/ipa/ipa_service.py | 2 +- plugins/modules/identity/ipa/ipa_subca.py | 2 +- plugins/modules/identity/ipa/ipa_sudocmd.py | 2 +- plugins/modules/identity/ipa/ipa_sudocmdgroup.py | 2 +- plugins/modules/identity/ipa/ipa_sudorule.py | 2 +- plugins/modules/identity/ipa/ipa_user.py | 2 +- plugins/modules/identity/ipa/ipa_vault.py | 2 +- plugins/modules/identity/onepassword_info.py | 2 +- plugins/modules/monitoring/bigpanda.py | 2 +- plugins/modules/monitoring/circonus_annotation.py | 2 +- plugins/modules/monitoring/datadog/datadog_event.py | 2 +- plugins/modules/monitoring/datadog/datadog_monitor.py | 2 +- plugins/modules/monitoring/honeybadger_deployment.py | 2 +- plugins/modules/monitoring/rollbar_deployment.py | 2 +- plugins/modules/monitoring/sensu/sensu_check.py | 2 +- plugins/modules/monitoring/sensu/sensu_silence.py | 2 +- plugins/modules/monitoring/sensu/sensu_subscription.py | 2 +- plugins/modules/monitoring/spectrum_model_attrs.py | 2 +- plugins/modules/monitoring/stackdriver.py | 2 +- plugins/modules/monitoring/statusio_maintenance.py | 2 +- plugins/modules/monitoring/uptimerobot.py | 2 +- plugins/modules/net_tools/cloudflare_dns.py | 2 +- plugins/modules/net_tools/haproxy.py | 2 +- plugins/modules/net_tools/ip_netns.py | 2 +- plugins/modules/net_tools/ipify_facts.py | 2 +- plugins/modules/net_tools/ldap/ldap_attrs.py | 2 +- plugins/modules/net_tools/ldap/ldap_entry.py | 2 +- plugins/modules/net_tools/ldap/ldap_search.py | 2 +- plugins/modules/net_tools/nmcli.py | 2 +- plugins/modules/net_tools/nsupdate.py | 2 +- plugins/modules/net_tools/omapi_host.py | 2 +- plugins/modules/net_tools/pritunl/pritunl_org.py | 2 +- plugins/modules/net_tools/pritunl/pritunl_org_info.py | 2 +- plugins/modules/net_tools/pritunl/pritunl_user.py | 2 +- plugins/modules/net_tools/pritunl/pritunl_user_info.py | 2 +- plugins/modules/net_tools/snmp_facts.py | 2 +- plugins/modules/notification/hipchat.py | 2 +- plugins/modules/notification/irc.py | 2 +- plugins/modules/notification/jabber.py | 2 +- plugins/modules/notification/mail.py | 2 +- plugins/modules/notification/mqtt.py | 2 +- plugins/modules/notification/sendgrid.py | 2 +- plugins/modules/notification/syslogger.py | 2 +- plugins/modules/packaging/language/maven_artifact.py | 2 +- plugins/modules/packaging/language/npm.py | 2 +- plugins/modules/packaging/language/pear.py | 2 +- plugins/modules/packaging/language/pip_package_info.py | 2 +- plugins/modules/packaging/os/flatpak_remote.py | 2 +- plugins/modules/packaging/os/homebrew_cask.py | 2 +- plugins/modules/packaging/os/mas.py | 2 +- plugins/modules/packaging/os/pacman_key.py | 2 +- plugins/modules/packaging/os/portage.py | 2 +- plugins/modules/packaging/os/redhat_subscription.py | 2 +- plugins/modules/packaging/os/rhn_channel.py | 2 +- plugins/modules/packaging/os/yum_versionlock.py | 2 +- plugins/modules/packaging/os/zypper.py | 2 +- plugins/modules/remote_management/cobbler/cobbler_sync.py | 2 +- plugins/modules/remote_management/cobbler/cobbler_system.py | 2 +- plugins/modules/remote_management/hpilo/hpilo_info.py | 2 +- .../remote_management/lenovoxcc/xcc_redfish_command.py | 2 +- .../remote_management/redfish/idrac_redfish_command.py | 2 +- .../modules/remote_management/redfish/idrac_redfish_config.py | 2 +- .../modules/remote_management/redfish/idrac_redfish_info.py | 2 +- plugins/modules/remote_management/redfish/redfish_command.py | 2 +- plugins/modules/remote_management/redfish/redfish_config.py | 2 +- plugins/modules/remote_management/wakeonlan.py | 2 +- plugins/modules/source_control/github/github_release.py | 2 +- plugins/modules/source_control/github/github_webhook.py | 2 +- plugins/modules/source_control/github/github_webhook_info.py | 2 +- plugins/modules/source_control/gitlab/gitlab_deploy_key.py | 2 +- plugins/modules/source_control/gitlab/gitlab_group.py | 2 +- plugins/modules/source_control/gitlab/gitlab_hook.py | 2 +- plugins/modules/source_control/gitlab/gitlab_project.py | 2 +- .../modules/source_control/gitlab/gitlab_project_variable.py | 2 +- plugins/modules/source_control/gitlab/gitlab_runner.py | 2 +- plugins/modules/source_control/gitlab/gitlab_user.py | 2 +- plugins/modules/source_control/hg.py | 2 +- plugins/modules/storage/emc/emc_vnx_sg_member.py | 2 +- plugins/modules/system/crypttab.py | 2 +- plugins/modules/system/dpkg_divert.py | 2 +- plugins/modules/system/filesystem.py | 2 +- plugins/modules/system/interfaces_file.py | 2 +- plugins/modules/system/iptables_state.py | 2 +- plugins/modules/system/launchd.py | 2 +- plugins/modules/system/listen_ports_facts.py | 2 +- plugins/modules/system/locale_gen.py | 2 +- plugins/modules/system/nosh.py | 2 +- plugins/modules/system/openwrt_init.py | 2 +- plugins/modules/system/pam_limits.py | 2 +- plugins/modules/system/runit.py | 2 +- plugins/modules/system/sefcontext.py | 2 +- plugins/modules/system/selinux_permissive.py | 2 +- plugins/modules/system/selogin.py | 2 +- plugins/modules/system/seport.py | 2 +- plugins/modules/system/ssh_config.py | 2 +- plugins/modules/system/svc.py | 2 +- plugins/modules/web_infrastructure/deploy_helper.py | 2 +- plugins/modules/web_infrastructure/htpasswd.py | 2 +- plugins/modules/web_infrastructure/jenkins_build.py | 2 +- plugins/modules/web_infrastructure/jenkins_job.py | 2 +- plugins/modules/web_infrastructure/jenkins_job_info.py | 2 +- plugins/modules/web_infrastructure/jenkins_plugin.py | 2 +- plugins/modules/web_infrastructure/jenkins_script.py | 2 +- plugins/modules/web_infrastructure/jira.py | 2 +- plugins/modules/web_infrastructure/nginx_status_info.py | 2 +- plugins/modules/web_infrastructure/rundeck_acl_policy.py | 2 +- plugins/modules/web_infrastructure/rundeck_project.py | 2 +- .../modules/web_infrastructure/sophos_utm/utm_aaa_group.py | 2 +- .../web_infrastructure/sophos_utm/utm_aaa_group_info.py | 2 +- .../web_infrastructure/sophos_utm/utm_ca_host_key_cert.py | 2 +- .../sophos_utm/utm_ca_host_key_cert_info.py | 2 +- plugins/modules/web_infrastructure/sophos_utm/utm_dns_host.py | 2 +- .../sophos_utm/utm_network_interface_address.py | 2 +- .../sophos_utm/utm_network_interface_address_info.py | 2 +- .../web_infrastructure/sophos_utm/utm_proxy_auth_profile.py | 2 +- .../web_infrastructure/sophos_utm/utm_proxy_exception.py | 2 +- .../web_infrastructure/sophos_utm/utm_proxy_frontend.py | 2 +- .../web_infrastructure/sophos_utm/utm_proxy_frontend_info.py | 2 +- .../web_infrastructure/sophos_utm/utm_proxy_location.py | 2 +- .../web_infrastructure/sophos_utm/utm_proxy_location_info.py | 2 +- plugins/modules/web_infrastructure/taiga_issue.py | 2 +- tests/unit/mock/loader.py | 2 +- tests/unit/mock/procenv.py | 2 +- tests/unit/mock/vault_helper.py | 2 +- tests/unit/plugins/module_utils/conftest.py | 2 +- tests/unit/plugins/modules/conftest.py | 2 +- .../plugins/modules/monitoring/test_circonus_annotation.py | 2 +- tests/unit/plugins/modules/net_tools/test_nmcli.py | 2 +- tests/unit/plugins/modules/packaging/os/test_rhn_register.py | 2 +- .../remote_management/lenovoxcc/test_xcc_redfish_command.py | 2 +- tests/unit/plugins/modules/system/test_ufw.py | 2 +- tests/unit/plugins/modules/utils.py | 2 +- .../plugins/modules/web_infrastructure/test_jenkins_build.py | 2 +- 249 files changed, 252 insertions(+), 250 deletions(-) create mode 100644 changelogs/fragments/ansible-core-_text.yml diff --git a/changelogs/fragments/ansible-core-_text.yml b/changelogs/fragments/ansible-core-_text.yml new file mode 100644 index 0000000000..fae6391582 --- /dev/null +++ b/changelogs/fragments/ansible-core-_text.yml @@ -0,0 +1,2 @@ +minor_changes: +- "Avoid internal ansible-core module_utils in favor of equivalent public API available since at least Ansible 2.9 (https://github.com/ansible-collections/community.general/pull/2877)." diff --git a/plugins/action/system/shutdown.py b/plugins/action/system/shutdown.py index e36397ffe7..953b73778b 100644 --- a/plugins/action/system/shutdown.py +++ b/plugins/action/system/shutdown.py @@ -7,7 +7,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.errors import AnsibleError, AnsibleConnectionFailure -from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.common.text.converters import to_native, to_text from ansible.module_utils.common.collections import is_string from ansible.plugins.action import ActionBase from ansible.utils.display import Display diff --git a/plugins/become/doas.py b/plugins/become/doas.py index ec660bb763..431e33cd6d 100644 --- a/plugins/become/doas.py +++ b/plugins/become/doas.py @@ -81,7 +81,7 @@ DOCUMENTATION = ''' import re -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes from ansible.plugins.become import BecomeBase diff --git a/plugins/become/ksu.py b/plugins/become/ksu.py index dad2663639..f5600c1d70 100644 --- a/plugins/become/ksu.py +++ b/plugins/become/ksu.py @@ -82,7 +82,7 @@ DOCUMENTATION = ''' import re -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes from ansible.plugins.become import BecomeBase diff --git a/plugins/cache/redis.py b/plugins/cache/redis.py index 6af7c731e4..20616096ae 100644 --- a/plugins/cache/redis.py +++ b/plugins/cache/redis.py @@ -67,7 +67,7 @@ import json from ansible import constants as C from ansible.errors import AnsibleError -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder from ansible.plugins.cache import BaseCacheModule from ansible.release import __version__ as ansible_base_version diff --git a/plugins/callback/diy.py b/plugins/callback/diy.py index dfed68b791..b288ee4b97 100644 --- a/plugins/callback/diy.py +++ b/plugins/callback/diy.py @@ -792,7 +792,7 @@ from ansible.utils.color import colorize, hostcolor from ansible.template import Templar from ansible.vars.manager import VariableManager from ansible.plugins.callback.default import CallbackModule as Default -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text class DummyStdout(object): diff --git a/plugins/callback/log_plays.py b/plugins/callback/log_plays.py index 7383313482..df3482f483 100644 --- a/plugins/callback/log_plays.py +++ b/plugins/callback/log_plays.py @@ -31,7 +31,7 @@ import time import json from ansible.utils.path import makedirs_safe -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes from ansible.module_utils.common._collections_compat import MutableMapping from ansible.parsing.ajson import AnsibleJSONEncoder from ansible.plugins.callback import CallbackBase diff --git a/plugins/callback/logentries.py b/plugins/callback/logentries.py index e4a8b51e79..d78bff331c 100644 --- a/plugins/callback/logentries.py +++ b/plugins/callback/logentries.py @@ -111,7 +111,7 @@ try: except ImportError: HAS_FLATDICT = False -from ansible.module_utils._text import to_bytes, to_text +from ansible.module_utils.common.text.converters import to_bytes, to_text from ansible.plugins.callback import CallbackBase # Todo: diff --git a/plugins/callback/mail.py b/plugins/callback/mail.py index 6964528da6..e48e2de98e 100644 --- a/plugins/callback/mail.py +++ b/plugins/callback/mail.py @@ -62,7 +62,7 @@ import re import smtplib from ansible.module_utils.six import string_types -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes from ansible.parsing.ajson import AnsibleJSONEncoder from ansible.plugins.callback import CallbackBase diff --git a/plugins/callback/selective.py b/plugins/callback/selective.py index 23813b0e3c..8d882d89bd 100644 --- a/plugins/callback/selective.py +++ b/plugins/callback/selective.py @@ -40,7 +40,7 @@ import difflib from ansible import constants as C from ansible.plugins.callback import CallbackBase -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text try: codeCodes = C.COLOR_CODES diff --git a/plugins/callback/slack.py b/plugins/callback/slack.py index 5974c41a71..74d338dbcc 100644 --- a/plugins/callback/slack.py +++ b/plugins/callback/slack.py @@ -58,7 +58,7 @@ import os import uuid from ansible import context -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.urls import open_url from ansible.plugins.callback import CallbackBase diff --git a/plugins/callback/unixy.py b/plugins/callback/unixy.py index 783729916f..aaca1bd8cc 100644 --- a/plugins/callback/unixy.py +++ b/plugins/callback/unixy.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' from os.path import basename from ansible import constants as C from ansible import context -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text from ansible.utils.color import colorize, hostcolor from ansible.plugins.callback.default import CallbackModule as CallbackModule_default diff --git a/plugins/callback/yaml.py b/plugins/callback/yaml.py index 9aa8488807..da931d6b73 100644 --- a/plugins/callback/yaml.py +++ b/plugins/callback/yaml.py @@ -25,7 +25,7 @@ import re import string import sys -from ansible.module_utils._text import to_bytes, to_text +from ansible.module_utils.common.text.converters import to_bytes, to_text from ansible.module_utils.six import string_types from ansible.parsing.yaml.dumper import AnsibleDumper from ansible.plugins.callback import CallbackBase, strip_internal_keys, module_response_deepcopy diff --git a/plugins/connection/chroot.py b/plugins/connection/chroot.py index a18506cb80..c4c427aa0a 100644 --- a/plugins/connection/chroot.py +++ b/plugins/connection/chroot.py @@ -54,7 +54,7 @@ from ansible.errors import AnsibleError from ansible.module_utils.basic import is_executable from ansible.module_utils.common.process import get_bin_path from ansible.module_utils.six.moves import shlex_quote -from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils.common.text.converters import to_bytes, to_native from ansible.plugins.connection import ConnectionBase, BUFSIZE from ansible.utils.display import Display diff --git a/plugins/connection/iocage.py b/plugins/connection/iocage.py index beb440eae3..e97867e58f 100644 --- a/plugins/connection/iocage.py +++ b/plugins/connection/iocage.py @@ -32,7 +32,7 @@ DOCUMENTATION = ''' import subprocess from ansible_collections.community.general.plugins.connection.jail import Connection as Jail -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.errors import AnsibleError from ansible.utils.display import Display diff --git a/plugins/connection/jail.py b/plugins/connection/jail.py index f5d787b62f..cee08ed8fd 100644 --- a/plugins/connection/jail.py +++ b/plugins/connection/jail.py @@ -38,7 +38,7 @@ import traceback from ansible.errors import AnsibleError from ansible.module_utils.six.moves import shlex_quote -from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text from ansible.plugins.connection import ConnectionBase, BUFSIZE from ansible.utils.display import Display diff --git a/plugins/connection/lxc.py b/plugins/connection/lxc.py index 6512a87c6d..b18919efd3 100644 --- a/plugins/connection/lxc.py +++ b/plugins/connection/lxc.py @@ -43,7 +43,7 @@ except ImportError: pass from ansible import errors -from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils.common.text.converters import to_bytes, to_native from ansible.plugins.connection import ConnectionBase diff --git a/plugins/connection/lxd.py b/plugins/connection/lxd.py index 58bb09906e..d523234449 100644 --- a/plugins/connection/lxd.py +++ b/plugins/connection/lxd.py @@ -46,7 +46,7 @@ from distutils.spawn import find_executable from subprocess import Popen, PIPE from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound -from ansible.module_utils._text import to_bytes, to_text +from ansible.module_utils.common.text.converters import to_bytes, to_text from ansible.plugins.connection import ConnectionBase diff --git a/plugins/connection/qubes.py b/plugins/connection/qubes.py index d3f934b601..ca221a7fac 100644 --- a/plugins/connection/qubes.py +++ b/plugins/connection/qubes.py @@ -39,7 +39,7 @@ DOCUMENTATION = ''' import subprocess -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes from ansible.plugins.connection import ConnectionBase, ensure_connect from ansible.errors import AnsibleConnectionFailure from ansible.utils.display import Display diff --git a/plugins/connection/zone.py b/plugins/connection/zone.py index b101ec5cf3..b12cffe28d 100644 --- a/plugins/connection/zone.py +++ b/plugins/connection/zone.py @@ -33,7 +33,7 @@ import traceback from ansible.errors import AnsibleError from ansible.module_utils.six.moves import shlex_quote -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes from ansible.plugins.connection import ConnectionBase, BUFSIZE from ansible.utils.display import Display diff --git a/plugins/filter/from_csv.py b/plugins/filter/from_csv.py index 13a18aa88a..b66d47699b 100644 --- a/plugins/filter/from_csv.py +++ b/plugins/filter/from_csv.py @@ -8,7 +8,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type from ansible.errors import AnsibleFilterError -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.csv import (initialize_dialect, read_csv, CSVError, DialectNotAvailableError, diff --git a/plugins/inventory/cobbler.py b/plugins/inventory/cobbler.py index 1550c41a4f..d9bc549ed6 100644 --- a/plugins/inventory/cobbler.py +++ b/plugins/inventory/cobbler.py @@ -72,7 +72,7 @@ from distutils.version import LooseVersion import socket from ansible.errors import AnsibleError -from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text from ansible.module_utils.common._collections_compat import MutableMapping from ansible.module_utils.six import iteritems from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable, to_safe_group_name diff --git a/plugins/inventory/gitlab_runners.py b/plugins/inventory/gitlab_runners.py index daa3755875..ddf64cd626 100644 --- a/plugins/inventory/gitlab_runners.py +++ b/plugins/inventory/gitlab_runners.py @@ -82,7 +82,7 @@ keyed_groups: ''' from ansible.errors import AnsibleError, AnsibleParserError -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.plugins.inventory import BaseInventoryPlugin, Constructable try: diff --git a/plugins/inventory/lxd.py b/plugins/inventory/lxd.py index 06c620ac60..59bb8845ff 100644 --- a/plugins/inventory/lxd.py +++ b/plugins/inventory/lxd.py @@ -124,7 +124,7 @@ import time import os import socket from ansible.plugins.inventory import BaseInventoryPlugin -from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.common.text.converters import to_native, to_text from ansible.module_utils.common.dict_transformations import dict_merge from ansible.module_utils.six import raise_from from ansible.errors import AnsibleError, AnsibleParserError diff --git a/plugins/inventory/nmap.py b/plugins/inventory/nmap.py index 39a6ff3a67..05a83367af 100644 --- a/plugins/inventory/nmap.py +++ b/plugins/inventory/nmap.py @@ -56,7 +56,7 @@ from subprocess import Popen, PIPE from ansible import constants as C from ansible.errors import AnsibleParserError -from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.common.text.converters import to_native, to_text from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable from ansible.module_utils.common.process import get_bin_path diff --git a/plugins/inventory/online.py b/plugins/inventory/online.py index f5a939b69d..2d305bb8d6 100644 --- a/plugins/inventory/online.py +++ b/plugins/inventory/online.py @@ -61,7 +61,7 @@ from sys import version as python_version from ansible.errors import AnsibleError from ansible.module_utils.urls import open_url from ansible.plugins.inventory import BaseInventoryPlugin -from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.common.text.converters import to_native, to_text from ansible.module_utils.ansible_release import __version__ as ansible_version from ansible.module_utils.six.moves.urllib.parse import urljoin diff --git a/plugins/inventory/scaleway.py b/plugins/inventory/scaleway.py index 843a006738..2e863a2531 100644 --- a/plugins/inventory/scaleway.py +++ b/plugins/inventory/scaleway.py @@ -100,7 +100,7 @@ from ansible.errors import AnsibleError from ansible.plugins.inventory import BaseInventoryPlugin, Constructable from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, parse_pagination_link from ansible.module_utils.urls import open_url -from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.common.text.converters import to_native, to_text import ansible.module_utils.six.moves.urllib.parse as urllib_parse diff --git a/plugins/inventory/virtualbox.py b/plugins/inventory/virtualbox.py index 3827aa0d1a..827618131a 100644 --- a/plugins/inventory/virtualbox.py +++ b/plugins/inventory/virtualbox.py @@ -56,7 +56,7 @@ import os from subprocess import Popen, PIPE from ansible.errors import AnsibleParserError -from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text from ansible.module_utils.common._collections_compat import MutableMapping from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable from ansible.module_utils.common.process import get_bin_path diff --git a/plugins/lookup/consul_kv.py b/plugins/lookup/consul_kv.py index d567b7f687..8b9e4e9102 100644 --- a/plugins/lookup/consul_kv.py +++ b/plugins/lookup/consul_kv.py @@ -106,7 +106,7 @@ import os from ansible.module_utils.six.moves.urllib.parse import urlparse from ansible.errors import AnsibleError, AnsibleAssertionError from ansible.plugins.lookup import LookupBase -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text try: import consul diff --git a/plugins/lookup/cyberarkpassword.py b/plugins/lookup/cyberarkpassword.py index f2a720a042..ec6e6fcb56 100644 --- a/plugins/lookup/cyberarkpassword.py +++ b/plugins/lookup/cyberarkpassword.py @@ -74,7 +74,7 @@ from subprocess import Popen from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase from ansible.parsing.splitter import parse_kv -from ansible.module_utils._text import to_bytes, to_text, to_native +from ansible.module_utils.common.text.converters import to_bytes, to_text, to_native from ansible.utils.display import Display display = Display() diff --git a/plugins/lookup/dig.py b/plugins/lookup/dig.py index 16e6bf4f69..b6c71954f0 100644 --- a/plugins/lookup/dig.py +++ b/plugins/lookup/dig.py @@ -152,7 +152,7 @@ RETURN = """ from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native import socket try: diff --git a/plugins/lookup/dnstxt.py b/plugins/lookup/dnstxt.py index 5252991c72..d52301e7fb 100644 --- a/plugins/lookup/dnstxt.py +++ b/plugins/lookup/dnstxt.py @@ -54,7 +54,7 @@ except ImportError: pass from ansible.errors import AnsibleError -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.plugins.lookup import LookupBase # ============================================================== diff --git a/plugins/lookup/etcd3.py b/plugins/lookup/etcd3.py index 333b8889d8..5b2c334c41 100644 --- a/plugins/lookup/etcd3.py +++ b/plugins/lookup/etcd3.py @@ -138,7 +138,7 @@ import re from ansible.plugins.lookup import LookupBase from ansible.utils.display import Display from ansible.module_utils.basic import missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.plugins.lookup import LookupBase from ansible.errors import AnsibleError, AnsibleLookupError diff --git a/plugins/lookup/filetree.py b/plugins/lookup/filetree.py index 40e449e600..06b89bf396 100644 --- a/plugins/lookup/filetree.py +++ b/plugins/lookup/filetree.py @@ -124,7 +124,7 @@ except ImportError: pass from ansible.plugins.lookup import LookupBase -from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.common.text.converters import to_native, to_text from ansible.utils.display import Display display = Display() diff --git a/plugins/lookup/hiera.py b/plugins/lookup/hiera.py index 899820191a..a4358f7b1e 100644 --- a/plugins/lookup/hiera.py +++ b/plugins/lookup/hiera.py @@ -63,7 +63,7 @@ import os from ansible.plugins.lookup import LookupBase from ansible.utils.cmd_functions import run_cmd -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text ANSIBLE_HIERA_CFG = os.getenv('ANSIBLE_HIERA_CFG', '/etc/hiera.yaml') ANSIBLE_HIERA_BIN = os.getenv('ANSIBLE_HIERA_BIN', '/usr/bin/hiera') diff --git a/plugins/lookup/lastpass.py b/plugins/lookup/lastpass.py index e6137f4080..5e9f9907bd 100644 --- a/plugins/lookup/lastpass.py +++ b/plugins/lookup/lastpass.py @@ -39,7 +39,7 @@ RETURN = """ from subprocess import Popen, PIPE from ansible.errors import AnsibleError -from ansible.module_utils._text import to_bytes, to_text +from ansible.module_utils.common.text.converters import to_bytes, to_text from ansible.plugins.lookup import LookupBase diff --git a/plugins/lookup/lmdb_kv.py b/plugins/lookup/lmdb_kv.py index 18a6a2ceac..a417874898 100644 --- a/plugins/lookup/lmdb_kv.py +++ b/plugins/lookup/lmdb_kv.py @@ -55,7 +55,7 @@ _raw: from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase -from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.common.text.converters import to_native, to_text HAVE_LMDB = True try: import lmdb diff --git a/plugins/lookup/nios_next_ip.py b/plugins/lookup/nios_next_ip.py index 21773cb53e..58e95c7d13 100644 --- a/plugins/lookup/nios_next_ip.py +++ b/plugins/lookup/nios_next_ip.py @@ -74,7 +74,7 @@ _list: from ansible.plugins.lookup import LookupBase from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiLookup -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text from ansible.errors import AnsibleError diff --git a/plugins/lookup/nios_next_network.py b/plugins/lookup/nios_next_network.py index 2aa22ab704..c18c6ae993 100644 --- a/plugins/lookup/nios_next_network.py +++ b/plugins/lookup/nios_next_network.py @@ -84,7 +84,7 @@ _list: from ansible.plugins.lookup import LookupBase from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiLookup -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text from ansible.errors import AnsibleError diff --git a/plugins/lookup/onepassword.py b/plugins/lookup/onepassword.py index 715c337ffd..9f97a90e71 100644 --- a/plugins/lookup/onepassword.py +++ b/plugins/lookup/onepassword.py @@ -103,7 +103,7 @@ from subprocess import Popen, PIPE from ansible.plugins.lookup import LookupBase from ansible.errors import AnsibleLookupError -from ansible.module_utils._text import to_bytes, to_text +from ansible.module_utils.common.text.converters import to_bytes, to_text class OnePass(object): diff --git a/plugins/lookup/passwordstore.py b/plugins/lookup/passwordstore.py index 976dfb837e..9c545a1cb0 100644 --- a/plugins/lookup/passwordstore.py +++ b/plugins/lookup/passwordstore.py @@ -142,7 +142,7 @@ import yaml from distutils import util from ansible.errors import AnsibleError, AnsibleAssertionError -from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text from ansible.utils.display import Display from ansible.utils.encrypt import random_password from ansible.plugins.lookup import LookupBase diff --git a/plugins/lookup/random_string.py b/plugins/lookup/random_string.py index 6a05cfd041..d67a75ed99 100644 --- a/plugins/lookup/random_string.py +++ b/plugins/lookup/random_string.py @@ -138,7 +138,7 @@ import string from ansible.errors import AnsibleLookupError from ansible.plugins.lookup import LookupBase -from ansible.module_utils._text import to_bytes, to_text +from ansible.module_utils.common.text.converters import to_bytes, to_text class LookupModule(LookupBase): diff --git a/plugins/lookup/redis.py b/plugins/lookup/redis.py index 074b9490bf..a1d5a381b2 100644 --- a/plugins/lookup/redis.py +++ b/plugins/lookup/redis.py @@ -80,7 +80,7 @@ try: except ImportError: pass -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase diff --git a/plugins/lookup/shelvefile.py b/plugins/lookup/shelvefile.py index 808bb942b0..0067472513 100644 --- a/plugins/lookup/shelvefile.py +++ b/plugins/lookup/shelvefile.py @@ -36,7 +36,7 @@ import shelve from ansible.errors import AnsibleError, AnsibleAssertionError from ansible.plugins.lookup import LookupBase -from ansible.module_utils._text import to_bytes, to_text +from ansible.module_utils.common.text.converters import to_bytes, to_text class LookupModule(LookupBase): diff --git a/plugins/module_utils/_netapp.py b/plugins/module_utils/_netapp.py index d80506bb9a..81a50a336d 100644 --- a/plugins/module_utils/_netapp.py +++ b/plugins/module_utils/_netapp.py @@ -41,7 +41,7 @@ from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError from ansible.module_utils.urls import open_url from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native try: from ansible.module_utils.ansible_release import __version__ as ansible_version diff --git a/plugins/module_utils/csv.py b/plugins/module_utils/csv.py index 426e2eb279..86c4694524 100644 --- a/plugins/module_utils/csv.py +++ b/plugins/module_utils/csv.py @@ -10,7 +10,7 @@ __metaclass__ = type import csv from io import BytesIO, StringIO -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.six import PY3 diff --git a/plugins/module_utils/gandi_livedns_api.py b/plugins/module_utils/gandi_livedns_api.py index 60e0761d26..2c785353ad 100644 --- a/plugins/module_utils/gandi_livedns_api.py +++ b/plugins/module_utils/gandi_livedns_api.py @@ -7,7 +7,7 @@ __metaclass__ = type import json -from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.common.text.converters import to_native, to_text from ansible.module_utils.urls import fetch_url diff --git a/plugins/module_utils/gitlab.py b/plugins/module_utils/gitlab.py index e13f38c099..5ddafa2b42 100644 --- a/plugins/module_utils/gitlab.py +++ b/plugins/module_utils/gitlab.py @@ -12,7 +12,7 @@ from distutils.version import StrictVersion from ansible.module_utils.basic import missing_required_lib from ansible.module_utils.urls import fetch_url -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native try: from urllib import quote_plus # Python 2.X diff --git a/plugins/module_utils/hwc_utils.py b/plugins/module_utils/hwc_utils.py index 05e0c1378d..c11cb7d4d2 100644 --- a/plugins/module_utils/hwc_utils.py +++ b/plugins/module_utils/hwc_utils.py @@ -21,7 +21,7 @@ except ImportError: from ansible.module_utils.basic import (AnsibleModule, env_fallback, missing_required_lib) -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text class HwcModuleException(Exception): diff --git a/plugins/module_utils/ibm_sa_utils.py b/plugins/module_utils/ibm_sa_utils.py index c3ab4103a9..fdaa38a9fc 100644 --- a/plugins/module_utils/ibm_sa_utils.py +++ b/plugins/module_utils/ibm_sa_utils.py @@ -9,7 +9,7 @@ __metaclass__ = type import traceback from functools import wraps -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.basic import missing_required_lib PYXCLI_INSTALLED = True diff --git a/plugins/module_utils/identity/keycloak/keycloak.py b/plugins/module_utils/identity/keycloak/keycloak.py index ae002a7c94..b11289a634 100644 --- a/plugins/module_utils/identity/keycloak/keycloak.py +++ b/plugins/module_utils/identity/keycloak/keycloak.py @@ -35,7 +35,7 @@ import traceback from ansible.module_utils.urls import open_url from ansible.module_utils.six.moves.urllib.parse import urlencode, quote from ansible.module_utils.six.moves.urllib.error import HTTPError -from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.common.text.converters import to_native, to_text URL_REALMS = "{url}/admin/realms" URL_REALM = "{url}/admin/realms/{realm}" diff --git a/plugins/module_utils/ipa.py b/plugins/module_utils/ipa.py index b2b1a892cd..76fe6ca717 100644 --- a/plugins/module_utils/ipa.py +++ b/plugins/module_utils/ipa.py @@ -18,7 +18,7 @@ import socket import uuid import re -from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text from ansible.module_utils.six import PY3 from ansible.module_utils.six.moves.urllib.parse import quote from ansible.module_utils.urls import fetch_url, HAS_GSSAPI diff --git a/plugins/module_utils/ldap.py b/plugins/module_utils/ldap.py index 999d7e67ee..30dbaf7640 100644 --- a/plugins/module_utils/ldap.py +++ b/plugins/module_utils/ldap.py @@ -10,7 +10,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type import traceback -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native try: import ldap diff --git a/plugins/module_utils/lxd.py b/plugins/module_utils/lxd.py index e835a6abca..e393090799 100644 --- a/plugins/module_utils/lxd.py +++ b/plugins/module_utils/lxd.py @@ -20,7 +20,7 @@ import ssl from ansible.module_utils.urls import generic_urlparse from ansible.module_utils.six.moves.urllib.parse import urlparse from ansible.module_utils.six.moves import http_client -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text # httplib/http.client connection using unix domain socket HTTPConnection = http_client.HTTPConnection diff --git a/plugins/module_utils/net_tools/nios/api.py b/plugins/module_utils/net_tools/nios/api.py index 4a771e49af..cbb8b63f3b 100644 --- a/plugins/module_utils/net_tools/nios/api.py +++ b/plugins/module_utils/net_tools/nios/api.py @@ -14,9 +14,9 @@ __metaclass__ = type import os from functools import partial -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.six import iteritems -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.basic import env_fallback from ansible.module_utils.common.validation import check_type_dict diff --git a/plugins/module_utils/oneview.py b/plugins/module_utils/oneview.py index bfa5f09102..3ebb057ca7 100644 --- a/plugins/module_utils/oneview.py +++ b/plugins/module_utils/oneview.py @@ -27,7 +27,7 @@ except ImportError: from ansible.module_utils import six from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.common._collections_compat import Mapping diff --git a/plugins/module_utils/oracle/oci_utils.py b/plugins/module_utils/oracle/oci_utils.py index 610366d9ba..0b82dadf0e 100644 --- a/plugins/module_utils/oracle/oci_utils.py +++ b/plugins/module_utils/oracle/oci_utils.py @@ -38,7 +38,7 @@ except ImportError: HAS_OCI_PY_SDK = False -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes from ansible.module_utils.six import iteritems __version__ = "1.6.0-dev" diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index df7011a0b4..c39c02a42e 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -6,8 +6,8 @@ __metaclass__ = type import json from ansible.module_utils.urls import open_url -from ansible.module_utils._text import to_native -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.six.moves import http_client from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError from ansible.module_utils.six.moves.urllib.parse import urlparse diff --git a/plugins/module_utils/source_control/bitbucket.py b/plugins/module_utils/source_control/bitbucket.py index c17dcb1d9e..c24a25074a 100644 --- a/plugins/module_utils/source_control/bitbucket.py +++ b/plugins/module_utils/source_control/bitbucket.py @@ -7,7 +7,7 @@ __metaclass__ = type import json -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.basic import env_fallback from ansible.module_utils.urls import fetch_url, basic_auth_header diff --git a/plugins/module_utils/utm_utils.py b/plugins/module_utils/utm_utils.py index 591305a4b3..fd196dcbca 100644 --- a/plugins/module_utils/utm_utils.py +++ b/plugins/module_utils/utm_utils.py @@ -13,7 +13,7 @@ __metaclass__ = type import json -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url diff --git a/plugins/module_utils/vexata.py b/plugins/module_utils/vexata.py index e5c9bdb819..3d6fb7aaca 100644 --- a/plugins/module_utils/vexata.py +++ b/plugins/module_utils/vexata.py @@ -13,7 +13,7 @@ try: except ImportError: HAS_VEXATAPI = False -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.basic import env_fallback VXOS_VERSION = None diff --git a/plugins/modules/cloud/atomic/atomic_container.py b/plugins/modules/cloud/atomic/atomic_container.py index 273cdc8931..ca63125661 100644 --- a/plugins/modules/cloud/atomic/atomic_container.py +++ b/plugins/modules/cloud/atomic/atomic_container.py @@ -95,7 +95,7 @@ msg: import traceback from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def do_install(module, mode, rootfs, container, image, values_list, backend): diff --git a/plugins/modules/cloud/atomic/atomic_host.py b/plugins/modules/cloud/atomic/atomic_host.py index d7164a9adb..85b00f917a 100644 --- a/plugins/modules/cloud/atomic/atomic_host.py +++ b/plugins/modules/cloud/atomic/atomic_host.py @@ -52,7 +52,7 @@ import os import traceback from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def core(module): diff --git a/plugins/modules/cloud/atomic/atomic_image.py b/plugins/modules/cloud/atomic/atomic_image.py index fd99bb3bf7..350ad4c2ae 100644 --- a/plugins/modules/cloud/atomic/atomic_image.py +++ b/plugins/modules/cloud/atomic/atomic_image.py @@ -69,7 +69,7 @@ msg: import traceback from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def do_upgrade(module, image): diff --git a/plugins/modules/cloud/dimensiondata/dimensiondata_network.py b/plugins/modules/cloud/dimensiondata/dimensiondata_network.py index 246b486d06..64cc8b118a 100644 --- a/plugins/modules/cloud/dimensiondata/dimensiondata_network.py +++ b/plugins/modules/cloud/dimensiondata/dimensiondata_network.py @@ -113,7 +113,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.dimensiondata import HAS_LIBCLOUD, DimensionDataModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native if HAS_LIBCLOUD: from libcloud.compute.base import NodeLocation diff --git a/plugins/modules/cloud/lxc/lxc_container.py b/plugins/modules/cloud/lxc/lxc_container.py index 636508dbda..18f1d02efe 100644 --- a/plugins/modules/cloud/lxc/lxc_container.py +++ b/plugins/modules/cloud/lxc/lxc_container.py @@ -433,7 +433,7 @@ else: from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE, BOOLEANS_TRUE from ansible.module_utils.six.moves import xrange -from ansible.module_utils._text import to_text, to_bytes +from ansible.module_utils.common.text.converters import to_text, to_bytes # LXC_COMPRESSION_MAP is a map of available compression types when creating diff --git a/plugins/modules/cloud/misc/cloud_init_data_facts.py b/plugins/modules/cloud/misc/cloud_init_data_facts.py index 5774fa6f39..1b44c50cbe 100644 --- a/plugins/modules/cloud/misc/cloud_init_data_facts.py +++ b/plugins/modules/cloud/misc/cloud_init_data_facts.py @@ -85,7 +85,7 @@ cloud_init_data_facts: import os from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text CLOUD_INIT_PATH = "/var/lib/cloud/data" diff --git a/plugins/modules/cloud/misc/proxmox.py b/plugins/modules/cloud/misc/proxmox.py index 422c108c35..21817f10dc 100644 --- a/plugins/modules/cloud/misc/proxmox.py +++ b/plugins/modules/cloud/misc/proxmox.py @@ -364,7 +364,7 @@ except ImportError: HAS_PROXMOXER = False from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native VZ_TYPE = None diff --git a/plugins/modules/cloud/misc/proxmox_kvm.py b/plugins/modules/cloud/misc/proxmox_kvm.py index 0fb486600c..939c72a126 100644 --- a/plugins/modules/cloud/misc/proxmox_kvm.py +++ b/plugins/modules/cloud/misc/proxmox_kvm.py @@ -771,7 +771,7 @@ except ImportError: HAS_PROXMOXER = False from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def get_nextvmid(module, proxmox): diff --git a/plugins/modules/cloud/misc/proxmox_snap.py b/plugins/modules/cloud/misc/proxmox_snap.py index 17c6ef335a..4ee2d27893 100644 --- a/plugins/modules/cloud/misc/proxmox_snap.py +++ b/plugins/modules/cloud/misc/proxmox_snap.py @@ -119,7 +119,7 @@ except ImportError: HAS_PROXMOXER = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native VZ_TYPE = None diff --git a/plugins/modules/cloud/packet/packet_device.py b/plugins/modules/cloud/packet/packet_device.py index 5dc662a255..f939572656 100644 --- a/plugins/modules/cloud/packet/packet_device.py +++ b/plugins/modules/cloud/packet/packet_device.py @@ -275,7 +275,7 @@ import uuid import traceback from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native HAS_PACKET_SDK = True try: diff --git a/plugins/modules/cloud/packet/packet_ip_subnet.py b/plugins/modules/cloud/packet/packet_ip_subnet.py index fbc12698a1..718de36f22 100644 --- a/plugins/modules/cloud/packet/packet_ip_subnet.py +++ b/plugins/modules/cloud/packet/packet_ip_subnet.py @@ -151,7 +151,7 @@ import uuid import re from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native HAS_PACKET_SDK = True diff --git a/plugins/modules/cloud/packet/packet_project.py b/plugins/modules/cloud/packet/packet_project.py index 38d7ca7640..c6502c6ea6 100644 --- a/plugins/modules/cloud/packet/packet_project.py +++ b/plugins/modules/cloud/packet/packet_project.py @@ -122,7 +122,7 @@ id: ''' from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native HAS_PACKET_SDK = True diff --git a/plugins/modules/cloud/packet/packet_volume.py b/plugins/modules/cloud/packet/packet_volume.py index 2966139a43..97c1e7498d 100644 --- a/plugins/modules/cloud/packet/packet_volume.py +++ b/plugins/modules/cloud/packet/packet_volume.py @@ -168,7 +168,7 @@ description: import uuid from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native HAS_PACKET_SDK = True diff --git a/plugins/modules/cloud/packet/packet_volume_attachment.py b/plugins/modules/cloud/packet/packet_volume_attachment.py index 7cda16ce86..9044fbcffa 100644 --- a/plugins/modules/cloud/packet/packet_volume_attachment.py +++ b/plugins/modules/cloud/packet/packet_volume_attachment.py @@ -130,7 +130,7 @@ device_id: import uuid from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native HAS_PACKET_SDK = True diff --git a/plugins/modules/cloud/profitbricks/profitbricks.py b/plugins/modules/cloud/profitbricks/profitbricks.py index c64151d68e..4c24d6408f 100644 --- a/plugins/modules/cloud/profitbricks/profitbricks.py +++ b/plugins/modules/cloud/profitbricks/profitbricks.py @@ -198,7 +198,7 @@ except ImportError: from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves import xrange -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native LOCATIONS = ['us/las', diff --git a/plugins/modules/cloud/profitbricks/profitbricks_volume.py b/plugins/modules/cloud/profitbricks/profitbricks_volume.py index 0e9523c664..5fff01d3d7 100644 --- a/plugins/modules/cloud/profitbricks/profitbricks_volume.py +++ b/plugins/modules/cloud/profitbricks/profitbricks_volume.py @@ -149,7 +149,7 @@ except ImportError: from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves import xrange -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native uuid_match = re.compile( diff --git a/plugins/modules/cloud/pubnub/pubnub_blocks.py b/plugins/modules/cloud/pubnub/pubnub_blocks.py index 1dbe416b9c..c8de702597 100644 --- a/plugins/modules/cloud/pubnub/pubnub_blocks.py +++ b/plugins/modules/cloud/pubnub/pubnub_blocks.py @@ -247,7 +247,7 @@ except ImportError: exceptions = None from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text def pubnub_user(module): diff --git a/plugins/modules/cloud/rackspace/rax_cdb_user.py b/plugins/modules/cloud/rackspace/rax_cdb_user.py index 2034170f42..01c10950c4 100644 --- a/plugins/modules/cloud/rackspace/rax_cdb_user.py +++ b/plugins/modules/cloud/rackspace/rax_cdb_user.py @@ -77,7 +77,7 @@ except ImportError: HAS_PYRAX = False from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module diff --git a/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py b/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py index 48e2f10ef3..118883328a 100644 --- a/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py +++ b/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py @@ -133,7 +133,7 @@ data: import traceback from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway, payload_from_object -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.basic import AnsibleModule, missing_required_lib try: diff --git a/plugins/modules/cloud/smartos/vmadm.py b/plugins/modules/cloud/smartos/vmadm.py index 63a4c21231..03a022423e 100644 --- a/plugins/modules/cloud/smartos/vmadm.py +++ b/plugins/modules/cloud/smartos/vmadm.py @@ -404,7 +404,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native # While vmadm(1M) supports a -E option to return any errors in JSON, the # generated JSON does not play well with the JSON parsers of Python. diff --git a/plugins/modules/clustering/consul/consul_kv.py b/plugins/modules/clustering/consul/consul_kv.py index 01e9be2d05..d392228146 100644 --- a/plugins/modules/clustering/consul/consul_kv.py +++ b/plugins/modules/clustering/consul/consul_kv.py @@ -136,7 +136,7 @@ EXAMPLES = ''' state: acquire ''' -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text try: import consul diff --git a/plugins/modules/clustering/etcd3.py b/plugins/modules/clustering/etcd3.py index 0f87e32d13..28c5915693 100644 --- a/plugins/modules/clustering/etcd3.py +++ b/plugins/modules/clustering/etcd3.py @@ -119,7 +119,7 @@ old_value: import traceback from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native try: diff --git a/plugins/modules/clustering/nomad/nomad_job.py b/plugins/modules/clustering/nomad/nomad_job.py index 6c28579773..a5e1cd3755 100644 --- a/plugins/modules/clustering/nomad/nomad_job.py +++ b/plugins/modules/clustering/nomad/nomad_job.py @@ -84,7 +84,7 @@ EXAMPLES = ''' import json from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native import_nomad = None try: diff --git a/plugins/modules/clustering/nomad/nomad_job_info.py b/plugins/modules/clustering/nomad/nomad_job_info.py index 5e9455f77b..d913ebeb61 100644 --- a/plugins/modules/clustering/nomad/nomad_job_info.py +++ b/plugins/modules/clustering/nomad/nomad_job_info.py @@ -270,7 +270,7 @@ import os import json from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native import_nomad = None try: diff --git a/plugins/modules/clustering/znode.py b/plugins/modules/clustering/znode.py index e85537e6e8..8456a187ee 100644 --- a/plugins/modules/clustering/znode.py +++ b/plugins/modules/clustering/znode.py @@ -108,7 +108,7 @@ except ImportError: KAZOO_INSTALLED = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes def main(): diff --git a/plugins/modules/database/influxdb/influxdb_query.py b/plugins/modules/database/influxdb/influxdb_query.py index d9cf500727..bff6fa989b 100644 --- a/plugins/modules/database/influxdb/influxdb_query.py +++ b/plugins/modules/database/influxdb/influxdb_query.py @@ -64,7 +64,7 @@ query_results: ''' from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb diff --git a/plugins/modules/database/influxdb/influxdb_retention_policy.py b/plugins/modules/database/influxdb/influxdb_retention_policy.py index 3ff48cbad0..a145f9e32b 100644 --- a/plugins/modules/database/influxdb/influxdb_retention_policy.py +++ b/plugins/modules/database/influxdb/influxdb_retention_policy.py @@ -145,7 +145,7 @@ except ImportError: from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native VALID_DURATION_REGEX = re.compile(r'^(INF|(\d+(ns|u|µ|ms|s|m|h|d|w)))+$') diff --git a/plugins/modules/database/influxdb/influxdb_user.py b/plugins/modules/database/influxdb/influxdb_user.py index cb35ea7ce6..8746445335 100644 --- a/plugins/modules/database/influxdb/influxdb_user.py +++ b/plugins/modules/database/influxdb/influxdb_user.py @@ -104,7 +104,7 @@ import json from ansible.module_utils.urls import ConnectionError from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native import ansible_collections.community.general.plugins.module_utils.influxdb as influx diff --git a/plugins/modules/database/influxdb/influxdb_write.py b/plugins/modules/database/influxdb/influxdb_write.py index 0dc063a7b1..e34fe9c2cf 100644 --- a/plugins/modules/database/influxdb/influxdb_write.py +++ b/plugins/modules/database/influxdb/influxdb_write.py @@ -61,7 +61,7 @@ RETURN = r''' ''' from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb diff --git a/plugins/modules/database/misc/odbc.py b/plugins/modules/database/misc/odbc.py index 313a7f7096..5d1cdf884b 100644 --- a/plugins/modules/database/misc/odbc.py +++ b/plugins/modules/database/misc/odbc.py @@ -78,7 +78,7 @@ row_count: ''' from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native HAS_PYODBC = None try: diff --git a/plugins/modules/database/misc/redis.py b/plugins/modules/database/misc/redis.py index 602aaf6c74..960b072fea 100644 --- a/plugins/modules/database/misc/redis.py +++ b/plugins/modules/database/misc/redis.py @@ -143,7 +143,7 @@ else: from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.common.text.formatters import human_to_bytes -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native import re diff --git a/plugins/modules/database/misc/redis_info.py b/plugins/modules/database/misc/redis_info.py index b615addbd2..9762b03c98 100644 --- a/plugins/modules/database/misc/redis_info.py +++ b/plugins/modules/database/misc/redis_info.py @@ -196,7 +196,7 @@ except ImportError: HAS_REDIS_PACKAGE = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def redis_client(**client_params): diff --git a/plugins/modules/database/saphana/hana_query.py b/plugins/modules/database/saphana/hana_query.py index ab147ef3fe..9b26134022 100644 --- a/plugins/modules/database/saphana/hana_query.py +++ b/plugins/modules/database/saphana/hana_query.py @@ -103,7 +103,7 @@ query_result: import csv from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six import StringIO -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def csv_to_list(rawcsv): diff --git a/plugins/modules/database/vertica/vertica_configuration.py b/plugins/modules/database/vertica/vertica_configuration.py index 1d67a831d9..b210e3f6f0 100644 --- a/plugins/modules/database/vertica/vertica_configuration.py +++ b/plugins/modules/database/vertica/vertica_configuration.py @@ -76,7 +76,7 @@ else: pyodbc_found = True from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class NotSupportedError(Exception): diff --git a/plugins/modules/database/vertica/vertica_info.py b/plugins/modules/database/vertica/vertica_info.py index c0aa94be1e..feaebecbdc 100644 --- a/plugins/modules/database/vertica/vertica_info.py +++ b/plugins/modules/database/vertica/vertica_info.py @@ -74,7 +74,7 @@ else: pyodbc_found = True from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class NotSupportedError(Exception): diff --git a/plugins/modules/database/vertica/vertica_role.py b/plugins/modules/database/vertica/vertica_role.py index fc80907cc6..06dd218ed0 100644 --- a/plugins/modules/database/vertica/vertica_role.py +++ b/plugins/modules/database/vertica/vertica_role.py @@ -87,7 +87,7 @@ else: pyodbc_found = True from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class NotSupportedError(Exception): diff --git a/plugins/modules/database/vertica/vertica_schema.py b/plugins/modules/database/vertica/vertica_schema.py index 0c85e3e091..749234add0 100644 --- a/plugins/modules/database/vertica/vertica_schema.py +++ b/plugins/modules/database/vertica/vertica_schema.py @@ -109,7 +109,7 @@ else: pyodbc_found = True from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class NotSupportedError(Exception): diff --git a/plugins/modules/database/vertica/vertica_user.py b/plugins/modules/database/vertica/vertica_user.py index 791ef5fef9..fed3a2a56f 100644 --- a/plugins/modules/database/vertica/vertica_user.py +++ b/plugins/modules/database/vertica/vertica_user.py @@ -118,7 +118,7 @@ else: pyodbc_found = True from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class NotSupportedError(Exception): diff --git a/plugins/modules/files/filesize.py b/plugins/modules/files/filesize.py index 5b22fb4512..f073ff4119 100644 --- a/plugins/modules/files/filesize.py +++ b/plugins/modules/files/filesize.py @@ -224,7 +224,7 @@ import os import math from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native # These are the multiplicative suffixes understood (or returned) by dd and diff --git a/plugins/modules/files/iso_create.py b/plugins/modules/files/iso_create.py index bf6359b14a..3fa456339e 100644 --- a/plugins/modules/files/iso_create.py +++ b/plugins/modules/files/iso_create.py @@ -153,7 +153,7 @@ except ImportError: HAS_PYCDLIB = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def add_file(module, iso_file=None, src_file=None, file_path=None, rock_ridge=None, use_joliet=None, use_udf=None): diff --git a/plugins/modules/files/read_csv.py b/plugins/modules/files/read_csv.py index c48efc7440..2d5644db2e 100644 --- a/plugins/modules/files/read_csv.py +++ b/plugins/modules/files/read_csv.py @@ -138,7 +138,7 @@ list: ''' from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.csv import (initialize_dialect, read_csv, CSVError, DialectNotAvailableError, diff --git a/plugins/modules/files/sapcar_extract.py b/plugins/modules/files/sapcar_extract.py index db0f5f9ea8..b6a76a1629 100644 --- a/plugins/modules/files/sapcar_extract.py +++ b/plugins/modules/files/sapcar_extract.py @@ -90,7 +90,7 @@ import os from tempfile import NamedTemporaryFile from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import open_url -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def get_list_of_files(dir_name): diff --git a/plugins/modules/files/xattr.py b/plugins/modules/files/xattr.py index 7691f30905..8578ed4c4e 100644 --- a/plugins/modules/files/xattr.py +++ b/plugins/modules/files/xattr.py @@ -94,7 +94,7 @@ import os # import module snippets from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def get_xattr_keys(module, path, follow): diff --git a/plugins/modules/files/xml.py b/plugins/modules/files/xml.py index e7c6ca3f1e..ffdb65400c 100644 --- a/plugins/modules/files/xml.py +++ b/plugins/modules/files/xml.py @@ -369,7 +369,7 @@ except ImportError: from ansible.module_utils.basic import AnsibleModule, json_dict_bytes_to_unicode, missing_required_lib from ansible.module_utils.six import iteritems, string_types -from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils.common.text.converters import to_bytes, to_native from ansible.module_utils.common._collections_compat import MutableMapping _IDENT = r"[a-zA-Z-][a-zA-Z0-9_\-\.]*" diff --git a/plugins/modules/identity/ipa/ipa_config.py b/plugins/modules/identity/ipa/ipa_config.py index 49d46fb5b2..e8ee073d6e 100644 --- a/plugins/modules/identity/ipa/ipa_config.py +++ b/plugins/modules/identity/ipa/ipa_config.py @@ -194,7 +194,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class ConfigIPAClient(IPAClient): diff --git a/plugins/modules/identity/ipa/ipa_dnsrecord.py b/plugins/modules/identity/ipa/ipa_dnsrecord.py index 635bf2ff91..73b6695698 100644 --- a/plugins/modules/identity/ipa/ipa_dnsrecord.py +++ b/plugins/modules/identity/ipa/ipa_dnsrecord.py @@ -151,7 +151,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class DNSRecordIPAClient(IPAClient): diff --git a/plugins/modules/identity/ipa/ipa_dnszone.py b/plugins/modules/identity/ipa/ipa_dnszone.py index 1536866c29..3dabad8db8 100644 --- a/plugins/modules/identity/ipa/ipa_dnszone.py +++ b/plugins/modules/identity/ipa/ipa_dnszone.py @@ -71,7 +71,7 @@ zone: from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class DNSZoneIPAClient(IPAClient): diff --git a/plugins/modules/identity/ipa/ipa_group.py b/plugins/modules/identity/ipa/ipa_group.py index 84ff443a59..f62d9f0a18 100644 --- a/plugins/modules/identity/ipa/ipa_group.py +++ b/plugins/modules/identity/ipa/ipa_group.py @@ -115,7 +115,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class GroupIPAClient(IPAClient): diff --git a/plugins/modules/identity/ipa/ipa_hbacrule.py b/plugins/modules/identity/ipa/ipa_hbacrule.py index cb49fd53dd..5f0704d58b 100644 --- a/plugins/modules/identity/ipa/ipa_hbacrule.py +++ b/plugins/modules/identity/ipa/ipa_hbacrule.py @@ -153,7 +153,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class HBACRuleIPAClient(IPAClient): diff --git a/plugins/modules/identity/ipa/ipa_host.py b/plugins/modules/identity/ipa/ipa_host.py index 80892c01c0..25c65f0b34 100644 --- a/plugins/modules/identity/ipa/ipa_host.py +++ b/plugins/modules/identity/ipa/ipa_host.py @@ -163,7 +163,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class HostIPAClient(IPAClient): diff --git a/plugins/modules/identity/ipa/ipa_hostgroup.py b/plugins/modules/identity/ipa/ipa_hostgroup.py index ae1f1a6b33..9d5c6f99c7 100644 --- a/plugins/modules/identity/ipa/ipa_hostgroup.py +++ b/plugins/modules/identity/ipa/ipa_hostgroup.py @@ -86,7 +86,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class HostGroupIPAClient(IPAClient): diff --git a/plugins/modules/identity/ipa/ipa_otpconfig.py b/plugins/modules/identity/ipa/ipa_otpconfig.py index 84a9e969cb..9a10baec0b 100644 --- a/plugins/modules/identity/ipa/ipa_otpconfig.py +++ b/plugins/modules/identity/ipa/ipa_otpconfig.py @@ -78,7 +78,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class OTPConfigIPAClient(IPAClient): diff --git a/plugins/modules/identity/ipa/ipa_otptoken.py b/plugins/modules/identity/ipa/ipa_otptoken.py index f8f48d68a6..4027a1c459 100644 --- a/plugins/modules/identity/ipa/ipa_otptoken.py +++ b/plugins/modules/identity/ipa/ipa_otptoken.py @@ -168,7 +168,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule, sanitize_keys from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class OTPTokenIPAClient(IPAClient): diff --git a/plugins/modules/identity/ipa/ipa_pwpolicy.py b/plugins/modules/identity/ipa/ipa_pwpolicy.py index 7c694f32ee..0f9b141b4c 100644 --- a/plugins/modules/identity/ipa/ipa_pwpolicy.py +++ b/plugins/modules/identity/ipa/ipa_pwpolicy.py @@ -127,7 +127,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class PwPolicyIPAClient(IPAClient): diff --git a/plugins/modules/identity/ipa/ipa_role.py b/plugins/modules/identity/ipa/ipa_role.py index 589a6d5efe..c602614ef9 100644 --- a/plugins/modules/identity/ipa/ipa_role.py +++ b/plugins/modules/identity/ipa/ipa_role.py @@ -131,7 +131,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class RoleIPAClient(IPAClient): diff --git a/plugins/modules/identity/ipa/ipa_service.py b/plugins/modules/identity/ipa/ipa_service.py index 088127e0c3..f85b80d44e 100644 --- a/plugins/modules/identity/ipa/ipa_service.py +++ b/plugins/modules/identity/ipa/ipa_service.py @@ -82,7 +82,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class ServiceIPAClient(IPAClient): diff --git a/plugins/modules/identity/ipa/ipa_subca.py b/plugins/modules/identity/ipa/ipa_subca.py index 218951a071..3b0d3e8707 100644 --- a/plugins/modules/identity/ipa/ipa_subca.py +++ b/plugins/modules/identity/ipa/ipa_subca.py @@ -77,7 +77,7 @@ subca: from distutils.version import LooseVersion from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class SubCAIPAClient(IPAClient): diff --git a/plugins/modules/identity/ipa/ipa_sudocmd.py b/plugins/modules/identity/ipa/ipa_sudocmd.py index aa09e0e44b..d75aff44ce 100644 --- a/plugins/modules/identity/ipa/ipa_sudocmd.py +++ b/plugins/modules/identity/ipa/ipa_sudocmd.py @@ -63,7 +63,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class SudoCmdIPAClient(IPAClient): diff --git a/plugins/modules/identity/ipa/ipa_sudocmdgroup.py b/plugins/modules/identity/ipa/ipa_sudocmdgroup.py index 96eb655930..65fdd4f75f 100644 --- a/plugins/modules/identity/ipa/ipa_sudocmdgroup.py +++ b/plugins/modules/identity/ipa/ipa_sudocmdgroup.py @@ -72,7 +72,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class SudoCmdGroupIPAClient(IPAClient): diff --git a/plugins/modules/identity/ipa/ipa_sudorule.py b/plugins/modules/identity/ipa/ipa_sudorule.py index 4494122e8d..2054599f9d 100644 --- a/plugins/modules/identity/ipa/ipa_sudorule.py +++ b/plugins/modules/identity/ipa/ipa_sudorule.py @@ -178,7 +178,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class SudoRuleIPAClient(IPAClient): diff --git a/plugins/modules/identity/ipa/ipa_user.py b/plugins/modules/identity/ipa/ipa_user.py index 847749f15e..8a7b3abea2 100644 --- a/plugins/modules/identity/ipa/ipa_user.py +++ b/plugins/modules/identity/ipa/ipa_user.py @@ -172,7 +172,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class UserIPAClient(IPAClient): diff --git a/plugins/modules/identity/ipa/ipa_vault.py b/plugins/modules/identity/ipa/ipa_vault.py index 3376b8c4e7..7a6a601fa9 100644 --- a/plugins/modules/identity/ipa/ipa_vault.py +++ b/plugins/modules/identity/ipa/ipa_vault.py @@ -135,7 +135,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class VaultIPAClient(IPAClient): diff --git a/plugins/modules/identity/onepassword_info.py b/plugins/modules/identity/onepassword_info.py index 42a6311c0d..95ef7c12b7 100644 --- a/plugins/modules/identity/onepassword_info.py +++ b/plugins/modules/identity/onepassword_info.py @@ -163,7 +163,7 @@ import re from subprocess import Popen, PIPE -from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils.common.text.converters import to_bytes, to_native from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/monitoring/bigpanda.py b/plugins/modules/monitoring/bigpanda.py index 8392c19536..c5fe61cbf6 100644 --- a/plugins/modules/monitoring/bigpanda.py +++ b/plugins/modules/monitoring/bigpanda.py @@ -130,7 +130,7 @@ import socket import traceback from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.urls import fetch_url diff --git a/plugins/modules/monitoring/circonus_annotation.py b/plugins/modules/monitoring/circonus_annotation.py index 27d2316873..8543aa00fa 100644 --- a/plugins/modules/monitoring/circonus_annotation.py +++ b/plugins/modules/monitoring/circonus_annotation.py @@ -155,7 +155,7 @@ except ImportError: from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.six import PY3 -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def check_requests_dep(module): diff --git a/plugins/modules/monitoring/datadog/datadog_event.py b/plugins/modules/monitoring/datadog/datadog_event.py index 3f6500f11f..6284b5bf23 100644 --- a/plugins/modules/monitoring/datadog/datadog_event.py +++ b/plugins/modules/monitoring/datadog/datadog_event.py @@ -123,7 +123,7 @@ except Exception: HAS_DATADOG = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/monitoring/datadog/datadog_monitor.py b/plugins/modules/monitoring/datadog/datadog_monitor.py index 8be71297f4..6c0f8cdb02 100644 --- a/plugins/modules/monitoring/datadog/datadog_monitor.py +++ b/plugins/modules/monitoring/datadog/datadog_monitor.py @@ -198,7 +198,7 @@ except Exception: HAS_DATADOG = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/monitoring/honeybadger_deployment.py b/plugins/modules/monitoring/honeybadger_deployment.py index 0b96af04a9..2e2198e1a3 100644 --- a/plugins/modules/monitoring/honeybadger_deployment.py +++ b/plugins/modules/monitoring/honeybadger_deployment.py @@ -67,7 +67,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.urls import fetch_url diff --git a/plugins/modules/monitoring/rollbar_deployment.py b/plugins/modules/monitoring/rollbar_deployment.py index 161361b774..cea3bfdf51 100644 --- a/plugins/modules/monitoring/rollbar_deployment.py +++ b/plugins/modules/monitoring/rollbar_deployment.py @@ -84,7 +84,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.urls import fetch_url diff --git a/plugins/modules/monitoring/sensu/sensu_check.py b/plugins/modules/monitoring/sensu/sensu_check.py index 71e8f07228..ec43b60abe 100644 --- a/plugins/modules/monitoring/sensu/sensu_check.py +++ b/plugins/modules/monitoring/sensu/sensu_check.py @@ -179,7 +179,7 @@ import json import traceback from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def sensu_check(module, path, name, state='present', backup=False): diff --git a/plugins/modules/monitoring/sensu/sensu_silence.py b/plugins/modules/monitoring/sensu/sensu_silence.py index 12dc5d2068..80a5216711 100644 --- a/plugins/modules/monitoring/sensu/sensu_silence.py +++ b/plugins/modules/monitoring/sensu/sensu_silence.py @@ -97,7 +97,7 @@ RETURN = ''' import json -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url diff --git a/plugins/modules/monitoring/sensu/sensu_subscription.py b/plugins/modules/monitoring/sensu/sensu_subscription.py index 6316254d7b..947c6e0de5 100644 --- a/plugins/modules/monitoring/sensu/sensu_subscription.py +++ b/plugins/modules/monitoring/sensu/sensu_subscription.py @@ -66,7 +66,7 @@ import json import traceback from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def sensu_subscription(module, path, name, state='present', backup=False): diff --git a/plugins/modules/monitoring/spectrum_model_attrs.py b/plugins/modules/monitoring/spectrum_model_attrs.py index d6f3948254..231352acd6 100644 --- a/plugins/modules/monitoring/spectrum_model_attrs.py +++ b/plugins/modules/monitoring/spectrum_model_attrs.py @@ -142,7 +142,7 @@ changed_attrs: from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.urls import fetch_url from ansible.module_utils.six.moves.urllib.parse import quote import json diff --git a/plugins/modules/monitoring/stackdriver.py b/plugins/modules/monitoring/stackdriver.py index 8e2d19a9ab..8eacdbfe49 100644 --- a/plugins/modules/monitoring/stackdriver.py +++ b/plugins/modules/monitoring/stackdriver.py @@ -96,7 +96,7 @@ import json import traceback from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.urls import fetch_url diff --git a/plugins/modules/monitoring/statusio_maintenance.py b/plugins/modules/monitoring/statusio_maintenance.py index 3a6124f8b0..10f733d4a8 100644 --- a/plugins/modules/monitoring/statusio_maintenance.py +++ b/plugins/modules/monitoring/statusio_maintenance.py @@ -177,7 +177,7 @@ import datetime import json from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.urls import open_url diff --git a/plugins/modules/monitoring/uptimerobot.py b/plugins/modules/monitoring/uptimerobot.py index bb4e60faee..833a7f191e 100644 --- a/plugins/modules/monitoring/uptimerobot.py +++ b/plugins/modules/monitoring/uptimerobot.py @@ -56,7 +56,7 @@ import json from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils.urls import fetch_url -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text API_BASE = "https://api.uptimerobot.com/" diff --git a/plugins/modules/net_tools/cloudflare_dns.py b/plugins/modules/net_tools/cloudflare_dns.py index ffa4e55745..4e82e0af36 100644 --- a/plugins/modules/net_tools/cloudflare_dns.py +++ b/plugins/modules/net_tools/cloudflare_dns.py @@ -360,7 +360,7 @@ import json from ansible.module_utils.basic import AnsibleModule, env_fallback from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils._text import to_native, to_text +from ansible.module_utils.common.text.converters import to_native, to_text from ansible.module_utils.urls import fetch_url diff --git a/plugins/modules/net_tools/haproxy.py b/plugins/modules/net_tools/haproxy.py index a3320b45c5..f736036671 100644 --- a/plugins/modules/net_tools/haproxy.py +++ b/plugins/modules/net_tools/haproxy.py @@ -211,7 +211,7 @@ import time from string import Template from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_bytes, to_text +from ansible.module_utils.common.text.converters import to_bytes, to_text DEFAULT_SOCKET_LOCATION = "/var/run/haproxy.sock" diff --git a/plugins/modules/net_tools/ip_netns.py b/plugins/modules/net_tools/ip_netns.py index 50aec392c5..9854709e82 100644 --- a/plugins/modules/net_tools/ip_netns.py +++ b/plugins/modules/net_tools/ip_netns.py @@ -58,7 +58,7 @@ RETURN = ''' ''' from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text class Namespace(object): diff --git a/plugins/modules/net_tools/ipify_facts.py b/plugins/modules/net_tools/ipify_facts.py index dcdc5ef801..2ae0348cb1 100644 --- a/plugins/modules/net_tools/ipify_facts.py +++ b/plugins/modules/net_tools/ipify_facts.py @@ -62,7 +62,7 @@ import json from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text class IpifyFacts(object): diff --git a/plugins/modules/net_tools/ldap/ldap_attrs.py b/plugins/modules/net_tools/ldap/ldap_attrs.py index ae5cb7fdae..c357a83087 100644 --- a/plugins/modules/net_tools/ldap/ldap_attrs.py +++ b/plugins/modules/net_tools/ldap/ldap_attrs.py @@ -166,7 +166,7 @@ modlist: import traceback from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native, to_bytes +from ansible.module_utils.common.text.converters import to_native, to_bytes from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs import re diff --git a/plugins/modules/net_tools/ldap/ldap_entry.py b/plugins/modules/net_tools/ldap/ldap_entry.py index ac1d63ac0e..2ef06b9693 100644 --- a/plugins/modules/net_tools/ldap/ldap_entry.py +++ b/plugins/modules/net_tools/ldap/ldap_entry.py @@ -104,7 +104,7 @@ RETURN = """ import traceback from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native, to_bytes +from ansible.module_utils.common.text.converters import to_native, to_bytes from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs LDAP_IMP_ERR = None diff --git a/plugins/modules/net_tools/ldap/ldap_search.py b/plugins/modules/net_tools/ldap/ldap_search.py index f4d02c1cd2..6b83321ff9 100644 --- a/plugins/modules/net_tools/ldap/ldap_search.py +++ b/plugins/modules/net_tools/ldap/ldap_search.py @@ -77,7 +77,7 @@ EXAMPLES = r""" import traceback from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs LDAP_IMP_ERR = None diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index 30f0537e70..657df3bd2a 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -650,7 +650,7 @@ RETURN = r"""# """ from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text import re diff --git a/plugins/modules/net_tools/nsupdate.py b/plugins/modules/net_tools/nsupdate.py index b110c6fe20..520d12e803 100644 --- a/plugins/modules/net_tools/nsupdate.py +++ b/plugins/modules/net_tools/nsupdate.py @@ -198,7 +198,7 @@ except ImportError: HAVE_DNSPYTHON = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class RecordManager(object): diff --git a/plugins/modules/net_tools/omapi_host.py b/plugins/modules/net_tools/omapi_host.py index 41c68a471a..4d65fcb95d 100644 --- a/plugins/modules/net_tools/omapi_host.py +++ b/plugins/modules/net_tools/omapi_host.py @@ -140,7 +140,7 @@ except ImportError: pureomapi_found = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils.common.text.converters import to_bytes, to_native class OmapiHostManager: diff --git a/plugins/modules/net_tools/pritunl/pritunl_org.py b/plugins/modules/net_tools/pritunl/pritunl_org.py index 7fa7cbc124..35796ae361 100644 --- a/plugins/modules/net_tools/pritunl/pritunl_org.py +++ b/plugins/modules/net_tools/pritunl/pritunl_org.py @@ -78,7 +78,7 @@ response: from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.common.dict_transformations import dict_merge from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api import ( PritunlException, diff --git a/plugins/modules/net_tools/pritunl/pritunl_org_info.py b/plugins/modules/net_tools/pritunl/pritunl_org_info.py index e0c573fb19..a7e65c80d1 100644 --- a/plugins/modules/net_tools/pritunl/pritunl_org_info.py +++ b/plugins/modules/net_tools/pritunl/pritunl_org_info.py @@ -75,7 +75,7 @@ organizations: """ from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.common.dict_transformations import dict_merge from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api import ( PritunlException, diff --git a/plugins/modules/net_tools/pritunl/pritunl_user.py b/plugins/modules/net_tools/pritunl/pritunl_user.py index 3d1c7f338f..7ea4f18a44 100644 --- a/plugins/modules/net_tools/pritunl/pritunl_user.py +++ b/plugins/modules/net_tools/pritunl/pritunl_user.py @@ -142,7 +142,7 @@ response: from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.common.dict_transformations import dict_merge from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api import ( PritunlException, diff --git a/plugins/modules/net_tools/pritunl/pritunl_user_info.py b/plugins/modules/net_tools/pritunl/pritunl_user_info.py index c00da6dc23..e8cf5e2955 100644 --- a/plugins/modules/net_tools/pritunl/pritunl_user_info.py +++ b/plugins/modules/net_tools/pritunl/pritunl_user_info.py @@ -93,7 +93,7 @@ users: """ from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.common.dict_transformations import dict_merge from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api import ( PritunlException, diff --git a/plugins/modules/net_tools/snmp_facts.py b/plugins/modules/net_tools/snmp_facts.py index 3918a3a1c0..221eda30f9 100644 --- a/plugins/modules/net_tools/snmp_facts.py +++ b/plugins/modules/net_tools/snmp_facts.py @@ -190,7 +190,7 @@ except Exception: HAS_PYSNMP = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text class DefineOid(object): diff --git a/plugins/modules/notification/hipchat.py b/plugins/modules/notification/hipchat.py index 06c9fca4d2..76c1227af4 100644 --- a/plugins/modules/notification/hipchat.py +++ b/plugins/modules/notification/hipchat.py @@ -96,7 +96,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils.six.moves.urllib.request import pathname2url -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.urls import fetch_url diff --git a/plugins/modules/notification/irc.py b/plugins/modules/notification/irc.py index 1c050fc187..9b1b91f586 100644 --- a/plugins/modules/notification/irc.py +++ b/plugins/modules/notification/irc.py @@ -137,7 +137,7 @@ import ssl import time import traceback -from ansible.module_utils._text import to_native, to_bytes +from ansible.module_utils.common.text.converters import to_native, to_bytes from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/notification/jabber.py b/plugins/modules/notification/jabber.py index 68e2c5938b..9b6811b3fa 100644 --- a/plugins/modules/notification/jabber.py +++ b/plugins/modules/notification/jabber.py @@ -92,7 +92,7 @@ except ImportError: HAS_XMPP = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/notification/mail.py b/plugins/modules/notification/mail.py index 3b5936d134..2f03f8c239 100644 --- a/plugins/modules/notification/mail.py +++ b/plugins/modules/notification/mail.py @@ -204,7 +204,7 @@ from email.header import Header from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six import PY3 -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/notification/mqtt.py b/plugins/modules/notification/mqtt.py index 0551ab203c..991114e8ae 100644 --- a/plugins/modules/notification/mqtt.py +++ b/plugins/modules/notification/mqtt.py @@ -136,7 +136,7 @@ except ImportError: HAS_PAHOMQTT = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native # =========================================== diff --git a/plugins/modules/notification/sendgrid.py b/plugins/modules/notification/sendgrid.py index 02ab072270..4a63a03db7 100644 --- a/plugins/modules/notification/sendgrid.py +++ b/plugins/modules/notification/sendgrid.py @@ -136,7 +136,7 @@ except ImportError: from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes from ansible.module_utils.urls import fetch_url diff --git a/plugins/modules/notification/syslogger.py b/plugins/modules/notification/syslogger.py index 7f4f899f8c..226126f5a9 100644 --- a/plugins/modules/notification/syslogger.py +++ b/plugins/modules/notification/syslogger.py @@ -98,7 +98,7 @@ import syslog import traceback from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def get_facility(facility): diff --git a/plugins/modules/packaging/language/maven_artifact.py b/plugins/modules/packaging/language/maven_artifact.py index 83833b0480..9e2f94190f 100644 --- a/plugins/modules/packaging/language/maven_artifact.py +++ b/plugins/modules/packaging/language/maven_artifact.py @@ -261,7 +261,7 @@ except ImportError: from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.six.moves.urllib.parse import urlparse from ansible.module_utils.urls import fetch_url -from ansible.module_utils._text import to_bytes, to_native, to_text +from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text def split_pre_existing_dir(dirname): diff --git a/plugins/modules/packaging/language/npm.py b/plugins/modules/packaging/language/npm.py index 5a48468970..283b8e0be7 100644 --- a/plugins/modules/packaging/language/npm.py +++ b/plugins/modules/packaging/language/npm.py @@ -141,7 +141,7 @@ import os import re from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class Npm(object): diff --git a/plugins/modules/packaging/language/pear.py b/plugins/modules/packaging/language/pear.py index fef04d325f..e8e36b3c56 100644 --- a/plugins/modules/packaging/language/pear.py +++ b/plugins/modules/packaging/language/pear.py @@ -111,7 +111,7 @@ EXAMPLES = r''' import os -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/packaging/language/pip_package_info.py b/plugins/modules/packaging/language/pip_package_info.py index b769afb866..cdcc9f51cc 100644 --- a/plugins/modules/packaging/language/pip_package_info.py +++ b/plugins/modules/packaging/language/pip_package_info.py @@ -89,7 +89,7 @@ packages: import json import os -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.facts.packages import CLIMgr diff --git a/plugins/modules/packaging/os/flatpak_remote.py b/plugins/modules/packaging/os/flatpak_remote.py index a7767621d7..e0e4170f47 100644 --- a/plugins/modules/packaging/os/flatpak_remote.py +++ b/plugins/modules/packaging/os/flatpak_remote.py @@ -119,7 +119,7 @@ stdout: ''' from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils.common.text.converters import to_bytes, to_native def add_remote(module, binary, name, flatpakrepo_url, method): diff --git a/plugins/modules/packaging/os/homebrew_cask.py b/plugins/modules/packaging/os/homebrew_cask.py index 498d0b8771..6c3de1c9ba 100644 --- a/plugins/modules/packaging/os/homebrew_cask.py +++ b/plugins/modules/packaging/os/homebrew_cask.py @@ -142,7 +142,7 @@ import re import tempfile from distutils import version -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six import iteritems, string_types diff --git a/plugins/modules/packaging/os/mas.py b/plugins/modules/packaging/os/mas.py index bc3e6dfd66..dd394b7c43 100644 --- a/plugins/modules/packaging/os/mas.py +++ b/plugins/modules/packaging/os/mas.py @@ -96,7 +96,7 @@ EXAMPLES = ''' RETURN = r''' # ''' from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from distutils.version import StrictVersion import os diff --git a/plugins/modules/packaging/os/pacman_key.py b/plugins/modules/packaging/os/pacman_key.py index 85896c211d..a40575b697 100644 --- a/plugins/modules/packaging/os/pacman_key.py +++ b/plugins/modules/packaging/os/pacman_key.py @@ -118,7 +118,7 @@ import os.path import tempfile from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class PacmanKey(object): diff --git a/plugins/modules/packaging/os/portage.py b/plugins/modules/packaging/os/portage.py index 1f0fdc682a..2a8679dbbd 100644 --- a/plugins/modules/packaging/os/portage.py +++ b/plugins/modules/packaging/os/portage.py @@ -229,7 +229,7 @@ import os import re from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def query_package(module, package, action): diff --git a/plugins/modules/packaging/os/redhat_subscription.py b/plugins/modules/packaging/os/redhat_subscription.py index c8b5e991a0..f3e5400900 100644 --- a/plugins/modules/packaging/os/redhat_subscription.py +++ b/plugins/modules/packaging/os/redhat_subscription.py @@ -277,7 +277,7 @@ import tempfile import json from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.six.moves import configparser diff --git a/plugins/modules/packaging/os/rhn_channel.py b/plugins/modules/packaging/os/rhn_channel.py index 63be03230c..f1954037fa 100644 --- a/plugins/modules/packaging/os/rhn_channel.py +++ b/plugins/modules/packaging/os/rhn_channel.py @@ -73,7 +73,7 @@ EXAMPLES = ''' ''' import ssl -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves import xmlrpc_client diff --git a/plugins/modules/packaging/os/yum_versionlock.py b/plugins/modules/packaging/os/yum_versionlock.py index 13319f6711..6dfb3d20ba 100644 --- a/plugins/modules/packaging/os/yum_versionlock.py +++ b/plugins/modules/packaging/os/yum_versionlock.py @@ -75,7 +75,7 @@ state: ''' from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class YumVersionLock: diff --git a/plugins/modules/packaging/os/zypper.py b/plugins/modules/packaging/os/zypper.py index 9c9b12a1a5..367bd8d9a0 100644 --- a/plugins/modules/packaging/os/zypper.py +++ b/plugins/modules/packaging/os/zypper.py @@ -216,7 +216,7 @@ EXAMPLES = ''' import xml import re from xml.dom.minidom import parseString as parseXML -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native # import module snippets from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/remote_management/cobbler/cobbler_sync.py b/plugins/modules/remote_management/cobbler/cobbler_sync.py index 3ce1c25564..157208216b 100644 --- a/plugins/modules/remote_management/cobbler/cobbler_sync.py +++ b/plugins/modules/remote_management/cobbler/cobbler_sync.py @@ -72,7 +72,7 @@ import ssl from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves import xmlrpc_client -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text def main(): diff --git a/plugins/modules/remote_management/cobbler/cobbler_system.py b/plugins/modules/remote_management/cobbler/cobbler_system.py index 504369e56a..e97be01239 100644 --- a/plugins/modules/remote_management/cobbler/cobbler_system.py +++ b/plugins/modules/remote_management/cobbler/cobbler_system.py @@ -151,7 +151,7 @@ import ssl from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six import iteritems from ansible.module_utils.six.moves import xmlrpc_client -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text IFPROPS_MAPPING = dict( bondingopts='bonding_opts', diff --git a/plugins/modules/remote_management/hpilo/hpilo_info.py b/plugins/modules/remote_management/hpilo/hpilo_info.py index 0f204b4a15..f373b58639 100644 --- a/plugins/modules/remote_management/hpilo/hpilo_info.py +++ b/plugins/modules/remote_management/hpilo/hpilo_info.py @@ -128,7 +128,7 @@ except ImportError: HAS_HPILO = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native # Suppress warnings from hpilo diff --git a/plugins/modules/remote_management/lenovoxcc/xcc_redfish_command.py b/plugins/modules/remote_management/lenovoxcc/xcc_redfish_command.py index d8966c6d64..1dbf4ad0b6 100644 --- a/plugins/modules/remote_management/lenovoxcc/xcc_redfish_command.py +++ b/plugins/modules/remote_management/lenovoxcc/xcc_redfish_command.py @@ -283,7 +283,7 @@ redfish_facts: ''' from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils diff --git a/plugins/modules/remote_management/redfish/idrac_redfish_command.py b/plugins/modules/remote_management/redfish/idrac_redfish_command.py index a637d15631..5e02154ed8 100644 --- a/plugins/modules/remote_management/redfish/idrac_redfish_command.py +++ b/plugins/modules/remote_management/redfish/idrac_redfish_command.py @@ -82,7 +82,7 @@ msg: import re from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class IdracRedfishUtils(RedfishUtils): diff --git a/plugins/modules/remote_management/redfish/idrac_redfish_config.py b/plugins/modules/remote_management/redfish/idrac_redfish_config.py index b16401311b..adea4b11a9 100644 --- a/plugins/modules/remote_management/redfish/idrac_redfish_config.py +++ b/plugins/modules/remote_management/redfish/idrac_redfish_config.py @@ -150,7 +150,7 @@ from ansible.module_utils.common.validation import ( check_required_arguments ) from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class IdracRedfishUtils(RedfishUtils): diff --git a/plugins/modules/remote_management/redfish/idrac_redfish_info.py b/plugins/modules/remote_management/redfish/idrac_redfish_info.py index 0033db7384..cb1aa8f34f 100644 --- a/plugins/modules/remote_management/redfish/idrac_redfish_info.py +++ b/plugins/modules/remote_management/redfish/idrac_redfish_info.py @@ -120,7 +120,7 @@ msg: from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class IdracRedfishUtils(RedfishUtils): diff --git a/plugins/modules/remote_management/redfish/redfish_command.py b/plugins/modules/remote_management/redfish/redfish_command.py index a2f290d16a..01f1fd771d 100644 --- a/plugins/modules/remote_management/redfish/redfish_command.py +++ b/plugins/modules/remote_management/redfish/redfish_command.py @@ -551,7 +551,7 @@ msg: from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native # More will be added as module features are expanded diff --git a/plugins/modules/remote_management/redfish/redfish_config.py b/plugins/modules/remote_management/redfish/redfish_config.py index e084c670f4..9b15a3e63e 100644 --- a/plugins/modules/remote_management/redfish/redfish_config.py +++ b/plugins/modules/remote_management/redfish/redfish_config.py @@ -204,7 +204,7 @@ msg: from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native # More will be added as module features are expanded diff --git a/plugins/modules/remote_management/wakeonlan.py b/plugins/modules/remote_management/wakeonlan.py index 2f097fcf30..725e070cd8 100644 --- a/plugins/modules/remote_management/wakeonlan.py +++ b/plugins/modules/remote_management/wakeonlan.py @@ -65,7 +65,7 @@ import struct import traceback from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def wakeonlan(module, mac, broadcast, port): diff --git a/plugins/modules/source_control/github/github_release.py b/plugins/modules/source_control/github/github_release.py index 7813ba1d89..654dce5f98 100644 --- a/plugins/modules/source_control/github/github_release.py +++ b/plugins/modules/source_control/github/github_release.py @@ -135,7 +135,7 @@ except ImportError: HAS_GITHUB_API = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/source_control/github/github_webhook.py b/plugins/modules/source_control/github/github_webhook.py index 2a737ef5a4..b1f0cb7a2b 100644 --- a/plugins/modules/source_control/github/github_webhook.py +++ b/plugins/modules/source_control/github/github_webhook.py @@ -148,7 +148,7 @@ except ImportError: HAS_GITHUB = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def _create_hook_config(module): diff --git a/plugins/modules/source_control/github/github_webhook_info.py b/plugins/modules/source_control/github/github_webhook_info.py index 2e7012e631..3936cbe37b 100644 --- a/plugins/modules/source_control/github/github_webhook_info.py +++ b/plugins/modules/source_control/github/github_webhook_info.py @@ -94,7 +94,7 @@ except ImportError: HAS_GITHUB = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def _munge_hook(hook_obj): diff --git a/plugins/modules/source_control/gitlab/gitlab_deploy_key.py b/plugins/modules/source_control/gitlab/gitlab_deploy_key.py index a75aef4e48..45149e275c 100644 --- a/plugins/modules/source_control/gitlab/gitlab_deploy_key.py +++ b/plugins/modules/source_control/gitlab/gitlab_deploy_key.py @@ -124,7 +124,7 @@ except Exception: from ansible.module_utils.api import basic_auth_argument_spec from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.gitlab import findProject, gitlabAuthentication diff --git a/plugins/modules/source_control/gitlab/gitlab_group.py b/plugins/modules/source_control/gitlab/gitlab_group.py index 0c61273363..42e1801a81 100644 --- a/plugins/modules/source_control/gitlab/gitlab_group.py +++ b/plugins/modules/source_control/gitlab/gitlab_group.py @@ -131,7 +131,7 @@ except Exception: from ansible.module_utils.api import basic_auth_argument_spec from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.gitlab import findGroup, gitlabAuthentication diff --git a/plugins/modules/source_control/gitlab/gitlab_hook.py b/plugins/modules/source_control/gitlab/gitlab_hook.py index bc4b6ecba4..5128fba9e1 100644 --- a/plugins/modules/source_control/gitlab/gitlab_hook.py +++ b/plugins/modules/source_control/gitlab/gitlab_hook.py @@ -174,7 +174,7 @@ except Exception: from ansible.module_utils.api import basic_auth_argument_spec from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.gitlab import findProject, gitlabAuthentication diff --git a/plugins/modules/source_control/gitlab/gitlab_project.py b/plugins/modules/source_control/gitlab/gitlab_project.py index 060d77ef6a..73def710c3 100644 --- a/plugins/modules/source_control/gitlab/gitlab_project.py +++ b/plugins/modules/source_control/gitlab/gitlab_project.py @@ -181,7 +181,7 @@ except Exception: from ansible.module_utils.api import basic_auth_argument_spec from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.gitlab import findGroup, findProject, gitlabAuthentication diff --git a/plugins/modules/source_control/gitlab/gitlab_project_variable.py b/plugins/modules/source_control/gitlab/gitlab_project_variable.py index 2ca788a194..21821cd495 100644 --- a/plugins/modules/source_control/gitlab/gitlab_project_variable.py +++ b/plugins/modules/source_control/gitlab/gitlab_project_variable.py @@ -129,7 +129,7 @@ project_variable: import traceback from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.api import basic_auth_argument_spec from ansible.module_utils.six import string_types from ansible.module_utils.six import integer_types diff --git a/plugins/modules/source_control/gitlab/gitlab_runner.py b/plugins/modules/source_control/gitlab/gitlab_runner.py index d38b4819a6..25490b00dd 100644 --- a/plugins/modules/source_control/gitlab/gitlab_runner.py +++ b/plugins/modules/source_control/gitlab/gitlab_runner.py @@ -169,7 +169,7 @@ except Exception: from ansible.module_utils.api import basic_auth_argument_spec from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.gitlab import gitlabAuthentication diff --git a/plugins/modules/source_control/gitlab/gitlab_user.py b/plugins/modules/source_control/gitlab/gitlab_user.py index 8770a041b4..c586cafd60 100644 --- a/plugins/modules/source_control/gitlab/gitlab_user.py +++ b/plugins/modules/source_control/gitlab/gitlab_user.py @@ -236,7 +236,7 @@ except Exception: from ansible.module_utils.api import basic_auth_argument_spec from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.gitlab import findGroup, gitlabAuthentication diff --git a/plugins/modules/source_control/hg.py b/plugins/modules/source_control/hg.py index 810b918bd6..572b036e1f 100644 --- a/plugins/modules/source_control/hg.py +++ b/plugins/modules/source_control/hg.py @@ -89,7 +89,7 @@ EXAMPLES = ''' import os from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class Hg(object): diff --git a/plugins/modules/storage/emc/emc_vnx_sg_member.py b/plugins/modules/storage/emc/emc_vnx_sg_member.py index b5b68d4ef4..2698f5327a 100644 --- a/plugins/modules/storage/emc/emc_vnx_sg_member.py +++ b/plugins/modules/storage/emc/emc_vnx_sg_member.py @@ -79,7 +79,7 @@ hluid: import traceback from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.storage.emc.emc_vnx import emc_vnx_argument_spec LIB_IMP_ERR = None diff --git a/plugins/modules/system/crypttab.py b/plugins/modules/system/crypttab.py index 9841a786c1..8eeec56d3d 100644 --- a/plugins/modules/system/crypttab.py +++ b/plugins/modules/system/crypttab.py @@ -76,7 +76,7 @@ import os import traceback from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils.common.text.converters import to_bytes, to_native def main(): diff --git a/plugins/modules/system/dpkg_divert.py b/plugins/modules/system/dpkg_divert.py index b7b57fd321..1033f70f14 100644 --- a/plugins/modules/system/dpkg_divert.py +++ b/plugins/modules/system/dpkg_divert.py @@ -161,7 +161,7 @@ import os from distutils.version import LooseVersion from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils.common.text.converters import to_bytes, to_native def diversion_state(module, command, path): diff --git a/plugins/modules/system/filesystem.py b/plugins/modules/system/filesystem.py index 97fe2dc1ab..cbb0e5e95e 100644 --- a/plugins/modules/system/filesystem.py +++ b/plugins/modules/system/filesystem.py @@ -110,7 +110,7 @@ import re import stat from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class Device(object): diff --git a/plugins/modules/system/interfaces_file.py b/plugins/modules/system/interfaces_file.py index 618a472d91..c22c0ce29e 100644 --- a/plugins/modules/system/interfaces_file.py +++ b/plugins/modules/system/interfaces_file.py @@ -145,7 +145,7 @@ import re import tempfile from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes def lineDict(line): diff --git a/plugins/modules/system/iptables_state.py b/plugins/modules/system/iptables_state.py index 66ba2c9b20..1f35edc04b 100644 --- a/plugins/modules/system/iptables_state.py +++ b/plugins/modules/system/iptables_state.py @@ -232,7 +232,7 @@ import filecmp import shutil from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils.common.text.converters import to_bytes, to_native IPTABLES = dict( diff --git a/plugins/modules/system/launchd.py b/plugins/modules/system/launchd.py index 919d8d7bd2..30a5ed02b2 100644 --- a/plugins/modules/system/launchd.py +++ b/plugins/modules/system/launchd.py @@ -114,7 +114,7 @@ from abc import ABCMeta, abstractmethod from time import sleep from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class ServiceState: diff --git a/plugins/modules/system/listen_ports_facts.py b/plugins/modules/system/listen_ports_facts.py index 27ecca8f50..c81977d7f4 100644 --- a/plugins/modules/system/listen_ports_facts.py +++ b/plugins/modules/system/listen_ports_facts.py @@ -137,7 +137,7 @@ ansible_facts: import re import platform -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/system/locale_gen.py b/plugins/modules/system/locale_gen.py index 9a5b84f071..c142da1ceb 100644 --- a/plugins/modules/system/locale_gen.py +++ b/plugins/modules/system/locale_gen.py @@ -40,7 +40,7 @@ import re from subprocess import Popen, PIPE, call from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native LOCALE_NORMALIZATION = { ".utf8": ".UTF-8", diff --git a/plugins/modules/system/nosh.py b/plugins/modules/system/nosh.py index 0f7de471d3..4fe3020393 100644 --- a/plugins/modules/system/nosh.py +++ b/plugins/modules/system/nosh.py @@ -315,7 +315,7 @@ import json from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.service import fail_if_missing -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def run_sys_ctl(module, args): diff --git a/plugins/modules/system/openwrt_init.py b/plugins/modules/system/openwrt_init.py index 817ed9f4b5..afc3c3a956 100644 --- a/plugins/modules/system/openwrt_init.py +++ b/plugins/modules/system/openwrt_init.py @@ -72,7 +72,7 @@ RETURN = ''' import os import glob from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_bytes, to_native +from ansible.module_utils.common.text.converters import to_bytes, to_native module = None init_script = None diff --git a/plugins/modules/system/pam_limits.py b/plugins/modules/system/pam_limits.py index bde41d44f1..17b1ea1304 100644 --- a/plugins/modules/system/pam_limits.py +++ b/plugins/modules/system/pam_limits.py @@ -138,7 +138,7 @@ import re import tempfile from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def _assert_is_valid_value(module, item, value, prefix=''): diff --git a/plugins/modules/system/runit.py b/plugins/modules/system/runit.py index 30cd611b29..053c77ff07 100644 --- a/plugins/modules/system/runit.py +++ b/plugins/modules/system/runit.py @@ -84,7 +84,7 @@ import os import re from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class Sv(object): diff --git a/plugins/modules/system/sefcontext.py b/plugins/modules/system/sefcontext.py index 457e2e236b..73c79662bc 100644 --- a/plugins/modules/system/sefcontext.py +++ b/plugins/modules/system/sefcontext.py @@ -102,7 +102,7 @@ RETURN = r''' import traceback from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native SELINUX_IMP_ERR = None try: diff --git a/plugins/modules/system/selinux_permissive.py b/plugins/modules/system/selinux_permissive.py index 0d1f9f5985..7289705192 100644 --- a/plugins/modules/system/selinux_permissive.py +++ b/plugins/modules/system/selinux_permissive.py @@ -63,7 +63,7 @@ except ImportError: SEOBJECT_IMP_ERR = traceback.format_exc() from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/system/selogin.py b/plugins/modules/system/selogin.py index 7036dad958..53b077f954 100644 --- a/plugins/modules/system/selogin.py +++ b/plugins/modules/system/selogin.py @@ -113,7 +113,7 @@ except ImportError: from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def semanage_login_add(module, login, seuser, do_reload, serange='s0', sestore=''): diff --git a/plugins/modules/system/seport.py b/plugins/modules/system/seport.py index 71df8d6be9..c2eee25ae1 100644 --- a/plugins/modules/system/seport.py +++ b/plugins/modules/system/seport.py @@ -109,7 +109,7 @@ except ImportError: HAVE_SEOBJECT = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def get_runtime_status(ignore_selinux_state=False): diff --git a/plugins/modules/system/ssh_config.py b/plugins/modules/system/ssh_config.py index be177baaaf..49525849f1 100644 --- a/plugins/modules/system/ssh_config.py +++ b/plugins/modules/system/ssh_config.py @@ -157,7 +157,7 @@ except ImportError: STORM_IMP_ERR = traceback.format_exc() from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class SSHConfig(): diff --git a/plugins/modules/system/svc.py b/plugins/modules/system/svc.py index e921567074..f49f904d93 100644 --- a/plugins/modules/system/svc.py +++ b/plugins/modules/system/svc.py @@ -91,7 +91,7 @@ import re import traceback from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def _load_dist_subclass(cls, *args, **kwargs): diff --git a/plugins/modules/web_infrastructure/deploy_helper.py b/plugins/modules/web_infrastructure/deploy_helper.py index a07281819b..f879594bc3 100644 --- a/plugins/modules/web_infrastructure/deploy_helper.py +++ b/plugins/modules/web_infrastructure/deploy_helper.py @@ -274,7 +274,7 @@ import time import traceback from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class DeployHelper(object): diff --git a/plugins/modules/web_infrastructure/htpasswd.py b/plugins/modules/web_infrastructure/htpasswd.py index 6ff041316f..b15a946448 100644 --- a/plugins/modules/web_infrastructure/htpasswd.py +++ b/plugins/modules/web_infrastructure/htpasswd.py @@ -97,7 +97,7 @@ import tempfile import traceback from distutils.version import LooseVersion from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native PASSLIB_IMP_ERR = None try: diff --git a/plugins/modules/web_infrastructure/jenkins_build.py b/plugins/modules/web_infrastructure/jenkins_build.py index 68f64f7a7b..43dc667ace 100644 --- a/plugins/modules/web_infrastructure/jenkins_build.py +++ b/plugins/modules/web_infrastructure/jenkins_build.py @@ -127,7 +127,7 @@ except ImportError: python_jenkins_installed = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class JenkinsBuild: diff --git a/plugins/modules/web_infrastructure/jenkins_job.py b/plugins/modules/web_infrastructure/jenkins_job.py index 6fb775d22a..9993a996e0 100644 --- a/plugins/modules/web_infrastructure/jenkins_job.py +++ b/plugins/modules/web_infrastructure/jenkins_job.py @@ -167,7 +167,7 @@ except ImportError: python_jenkins_installed = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native class JenkinsJob(object): diff --git a/plugins/modules/web_infrastructure/jenkins_job_info.py b/plugins/modules/web_infrastructure/jenkins_job_info.py index c927e5b954..9dcf5776c9 100644 --- a/plugins/modules/web_infrastructure/jenkins_job_info.py +++ b/plugins/modules/web_infrastructure/jenkins_job_info.py @@ -146,7 +146,7 @@ except ImportError: HAS_JENKINS = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def get_jenkins_connection(module): diff --git a/plugins/modules/web_infrastructure/jenkins_plugin.py b/plugins/modules/web_infrastructure/jenkins_plugin.py index be335fcfd3..20fd8554bc 100644 --- a/plugins/modules/web_infrastructure/jenkins_plugin.py +++ b/plugins/modules/web_infrastructure/jenkins_plugin.py @@ -273,7 +273,7 @@ from ansible.module_utils.six.moves import http_cookiejar as cookiejar from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils.urls import fetch_url, url_argument_spec from ansible.module_utils.six import text_type, binary_type -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native import base64 import hashlib import io diff --git a/plugins/modules/web_infrastructure/jenkins_script.py b/plugins/modules/web_infrastructure/jenkins_script.py index 68f06c2758..6d3b3d2253 100644 --- a/plugins/modules/web_infrastructure/jenkins_script.py +++ b/plugins/modules/web_infrastructure/jenkins_script.py @@ -107,7 +107,7 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves import http_cookiejar as cookiejar from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils.urls import fetch_url -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def is_csrf_protection_enabled(module): diff --git a/plugins/modules/web_infrastructure/jira.py b/plugins/modules/web_infrastructure/jira.py index 4c10974126..9cd86341a1 100644 --- a/plugins/modules/web_infrastructure/jira.py +++ b/plugins/modules/web_infrastructure/jira.py @@ -390,7 +390,7 @@ import traceback from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper, cause_changes from ansible.module_utils.six.moves.urllib.request import pathname2url -from ansible.module_utils._text import to_text, to_bytes, to_native +from ansible.module_utils.common.text.converters import to_text, to_bytes, to_native from ansible.module_utils.urls import fetch_url diff --git a/plugins/modules/web_infrastructure/nginx_status_info.py b/plugins/modules/web_infrastructure/nginx_status_info.py index a13a57a637..ada6881714 100644 --- a/plugins/modules/web_infrastructure/nginx_status_info.py +++ b/plugins/modules/web_infrastructure/nginx_status_info.py @@ -94,7 +94,7 @@ data: import re from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text class NginxStatusInfo(object): diff --git a/plugins/modules/web_infrastructure/rundeck_acl_policy.py b/plugins/modules/web_infrastructure/rundeck_acl_policy.py index 8c2043d22c..6356f5a166 100644 --- a/plugins/modules/web_infrastructure/rundeck_acl_policy.py +++ b/plugins/modules/web_infrastructure/rundeck_acl_policy.py @@ -123,7 +123,7 @@ after: # import module snippets from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url, url_argument_spec -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text import json import re diff --git a/plugins/modules/web_infrastructure/rundeck_project.py b/plugins/modules/web_infrastructure/rundeck_project.py index 5c84648207..ef78299596 100644 --- a/plugins/modules/web_infrastructure/rundeck_project.py +++ b/plugins/modules/web_infrastructure/rundeck_project.py @@ -103,7 +103,7 @@ after: # import module snippets from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.urls import fetch_url, url_argument_spec import json diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group.py b/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group.py index b4aca155dc..70a0a78fd8 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group.py @@ -188,7 +188,7 @@ result: """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py index 6d230c1a71..88356a2e54 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py @@ -101,7 +101,7 @@ result: """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py b/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py index e940f4168e..81dffe223b 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py @@ -132,7 +132,7 @@ result: """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py index ad315df9a0..02542532f7 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py @@ -79,7 +79,7 @@ result: """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_dns_host.py b/plugins/modules/web_infrastructure/sophos_utm/utm_dns_host.py index 1f080abfa0..76d463ccba 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_dns_host.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_dns_host.py @@ -128,7 +128,7 @@ result: """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address.py b/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address.py index ecf08871fc..a8b3cc1f2b 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address.py @@ -108,7 +108,7 @@ result: """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py index c1d0f7d880..3f623d5a86 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py @@ -75,7 +75,7 @@ result: """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py index caa0085c25..0dd460509a 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py @@ -307,7 +307,7 @@ result: """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_exception.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_exception.py index ed241af1fe..6d606abf89 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_exception.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_exception.py @@ -204,7 +204,7 @@ result: """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend.py index 8dba3640db..a738bfab6b 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend.py @@ -234,7 +234,7 @@ result: """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py index 450bd16168..263b976045 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py @@ -120,7 +120,7 @@ result: """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location.py index 7c4bc8b6cf..99d56030be 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location.py @@ -178,7 +178,7 @@ result: """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py index 1125c4fada..afc0f5efcd 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py @@ -101,7 +101,7 @@ result: """ from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native def main(): diff --git a/plugins/modules/web_infrastructure/taiga_issue.py b/plugins/modules/web_infrastructure/taiga_issue.py index ae8f31c0ef..f05550276e 100644 --- a/plugins/modules/web_infrastructure/taiga_issue.py +++ b/plugins/modules/web_infrastructure/taiga_issue.py @@ -117,7 +117,7 @@ import traceback from os import getenv from os.path import isfile from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native TAIGA_IMP_ERR = None try: diff --git a/tests/unit/mock/loader.py b/tests/unit/mock/loader.py index 907ec9b928..756d532e68 100644 --- a/tests/unit/mock/loader.py +++ b/tests/unit/mock/loader.py @@ -9,7 +9,7 @@ import os from ansible.errors import AnsibleParserError from ansible.parsing.dataloader import DataLoader -from ansible.module_utils._text import to_bytes, to_text +from ansible.module_utils.common.text.converters import to_bytes, to_text class DictDataLoader(DataLoader): diff --git a/tests/unit/mock/procenv.py b/tests/unit/mock/procenv.py index 616a75bbd3..5673863e16 100644 --- a/tests/unit/mock/procenv.py +++ b/tests/unit/mock/procenv.py @@ -13,7 +13,7 @@ from contextlib import contextmanager from io import BytesIO, StringIO from ansible_collections.community.general.tests.unit.compat import unittest from ansible.module_utils.six import PY3 -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes @contextmanager diff --git a/tests/unit/mock/vault_helper.py b/tests/unit/mock/vault_helper.py index b54629da49..6bd2db9c32 100644 --- a/tests/unit/mock/vault_helper.py +++ b/tests/unit/mock/vault_helper.py @@ -3,7 +3,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes from ansible.parsing.vault import VaultSecret diff --git a/tests/unit/plugins/module_utils/conftest.py b/tests/unit/plugins/module_utils/conftest.py index 8bc13c4d55..61ed0acd27 100644 --- a/tests/unit/plugins/module_utils/conftest.py +++ b/tests/unit/plugins/module_utils/conftest.py @@ -12,7 +12,7 @@ import pytest import ansible.module_utils.basic from ansible.module_utils.six import PY3, string_types -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes from ansible.module_utils.common._collections_compat import MutableMapping diff --git a/tests/unit/plugins/modules/conftest.py b/tests/unit/plugins/modules/conftest.py index a7d1e0475f..9d8c52e6c5 100644 --- a/tests/unit/plugins/modules/conftest.py +++ b/tests/unit/plugins/modules/conftest.py @@ -9,7 +9,7 @@ import json import pytest from ansible.module_utils.six import string_types -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes from ansible.module_utils.common._collections_compat import MutableMapping diff --git a/tests/unit/plugins/modules/monitoring/test_circonus_annotation.py b/tests/unit/plugins/modules/monitoring/test_circonus_annotation.py index b64a20f8ae..b380e857b1 100644 --- a/tests/unit/plugins/modules/monitoring/test_circonus_annotation.py +++ b/tests/unit/plugins/modules/monitoring/test_circonus_annotation.py @@ -11,7 +11,7 @@ import uuid from urllib3.response import HTTPResponse from ansible_collections.community.general.tests.unit.compat.mock import patch -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes from ansible_collections.community.general.plugins.modules.monitoring import circonus_annotation from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args diff --git a/tests/unit/plugins/modules/net_tools/test_nmcli.py b/tests/unit/plugins/modules/net_tools/test_nmcli.py index 8724bd4f60..911ffd1217 100644 --- a/tests/unit/plugins/modules/net_tools/test_nmcli.py +++ b/tests/unit/plugins/modules/net_tools/test_nmcli.py @@ -8,7 +8,7 @@ import json import pytest -from ansible.module_utils._text import to_text +from ansible.module_utils.common.text.converters import to_text from ansible_collections.community.general.plugins.modules.net_tools import nmcli pytestmark = pytest.mark.usefixtures('patch_ansible_module') diff --git a/tests/unit/plugins/modules/packaging/os/test_rhn_register.py b/tests/unit/plugins/modules/packaging/os/test_rhn_register.py index ae2f44aeda..9dde4bae7d 100644 --- a/tests/unit/plugins/modules/packaging/os/test_rhn_register.py +++ b/tests/unit/plugins/modules/packaging/os/test_rhn_register.py @@ -8,7 +8,7 @@ import os from ansible_collections.community.general.tests.unit.compat.mock import mock_open from ansible.module_utils import basic -from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native import ansible.module_utils.six from ansible.module_utils.six.moves import xmlrpc_client from ansible_collections.community.general.plugins.modules.packaging.os import rhn_register diff --git a/tests/unit/plugins/modules/remote_management/lenovoxcc/test_xcc_redfish_command.py b/tests/unit/plugins/modules/remote_management/lenovoxcc/test_xcc_redfish_command.py index 38a6652fb1..418474c578 100644 --- a/tests/unit/plugins/modules/remote_management/lenovoxcc/test_xcc_redfish_command.py +++ b/tests/unit/plugins/modules/remote_management/lenovoxcc/test_xcc_redfish_command.py @@ -8,7 +8,7 @@ from ansible_collections.community.general.tests.unit.compat import mock from ansible_collections.community.general.tests.unit.compat.mock import patch from ansible_collections.community.general.tests.unit.compat import unittest from ansible.module_utils import basic -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes import ansible_collections.community.general.plugins.modules.remote_management.lenovoxcc.xcc_redfish_command as module from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args, exit_json, fail_json diff --git a/tests/unit/plugins/modules/system/test_ufw.py b/tests/unit/plugins/modules/system/test_ufw.py index 3374c49322..44882e0e93 100644 --- a/tests/unit/plugins/modules/system/test_ufw.py +++ b/tests/unit/plugins/modules/system/test_ufw.py @@ -6,7 +6,7 @@ __metaclass__ = type from ansible_collections.community.general.tests.unit.compat import unittest from ansible_collections.community.general.tests.unit.compat.mock import patch from ansible.module_utils import basic -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes import ansible_collections.community.general.plugins.modules.system.ufw as module import json diff --git a/tests/unit/plugins/modules/utils.py b/tests/unit/plugins/modules/utils.py index 1a28072be1..6a00fd25fc 100644 --- a/tests/unit/plugins/modules/utils.py +++ b/tests/unit/plugins/modules/utils.py @@ -8,7 +8,7 @@ import json from ansible_collections.community.general.tests.unit.compat import unittest from ansible_collections.community.general.tests.unit.compat.mock import patch from ansible.module_utils import basic -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes def set_module_args(args): diff --git a/tests/unit/plugins/modules/web_infrastructure/test_jenkins_build.py b/tests/unit/plugins/modules/web_infrastructure/test_jenkins_build.py index 3774871329..687ef0f766 100644 --- a/tests/unit/plugins/modules/web_infrastructure/test_jenkins_build.py +++ b/tests/unit/plugins/modules/web_infrastructure/test_jenkins_build.py @@ -6,7 +6,7 @@ __metaclass__ = type from ansible_collections.community.general.tests.unit.compat import unittest from ansible_collections.community.general.tests.unit.compat.mock import patch from ansible.module_utils import basic -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes from ansible_collections.community.general.plugins.modules.web_infrastructure import jenkins_build import json From acf7b106c93dd7a2de3ce609b5fda512c352e528 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 27 Jun 2021 19:39:08 +1200 Subject: [PATCH 0402/3093] _mount module utils - fixed sanity checks (#2883) * updated _mount.py, removed ignore lines * added changelog fragment --- changelogs/fragments/2883-_mount-fixed-sanity-checks.yml | 2 ++ plugins/module_utils/_mount.py | 4 ++++ tests/sanity/ignore-2.10.txt | 2 -- tests/sanity/ignore-2.11.txt | 2 -- tests/sanity/ignore-2.12.txt | 2 -- tests/sanity/ignore-2.9.txt | 2 -- 6 files changed, 6 insertions(+), 8 deletions(-) create mode 100644 changelogs/fragments/2883-_mount-fixed-sanity-checks.yml diff --git a/changelogs/fragments/2883-_mount-fixed-sanity-checks.yml b/changelogs/fragments/2883-_mount-fixed-sanity-checks.yml new file mode 100644 index 0000000000..35496e1233 --- /dev/null +++ b/changelogs/fragments/2883-_mount-fixed-sanity-checks.yml @@ -0,0 +1,2 @@ +bugfixes: + - _mount module utils - fixed the sanity checks (https://github.com/ansible-collections/community.general/pull/2883). diff --git a/plugins/module_utils/_mount.py b/plugins/module_utils/_mount.py index 62feb354cc..58be0e8b68 100644 --- a/plugins/module_utils/_mount.py +++ b/plugins/module_utils/_mount.py @@ -48,6 +48,10 @@ # agrees to be bound by the terms and conditions of this License # Agreement. +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + import os diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index c9d750f417..bdb3ca4e9a 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -1,6 +1,4 @@ plugins/module_utils/cloud.py pylint:bad-option-value # a pylint test that is disabled was modified over time -plugins/module_utils/_mount.py future-import-boilerplate -plugins/module_utils/_mount.py metaclass-boilerplate plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-choice diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 1311638dbc..34889a2651 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -1,5 +1,3 @@ -plugins/module_utils/_mount.py future-import-boilerplate -plugins/module_utils/_mount.py metaclass-boilerplate plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-choice diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt index f5b7d772fc..6e14759c9d 100644 --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -1,5 +1,3 @@ -plugins/module_utils/_mount.py future-import-boilerplate -plugins/module_utils/_mount.py metaclass-boilerplate plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-choice diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index c8c5ff0d25..33f3d183d4 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -1,6 +1,4 @@ plugins/module_utils/cloud.py pylint:bad-option-value # a pylint test that is disabled was modified over time -plugins/module_utils/_mount.py future-import-boilerplate -plugins/module_utils/_mount.py metaclass-boilerplate plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen plugins/modules/cloud/rackspace/rax.py use-argspec-type-path From 1b94d092099be2b882a83b750c87d7c3438b28ab Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 27 Jun 2021 09:57:51 +0200 Subject: [PATCH 0403/3093] Add option type validation. (#2878) --- .../fragments/2878-validate-certs-bool.yml | 2 ++ plugins/callback/nrdp.py | 33 +++++++++++-------- 2 files changed, 21 insertions(+), 14 deletions(-) create mode 100644 changelogs/fragments/2878-validate-certs-bool.yml diff --git a/changelogs/fragments/2878-validate-certs-bool.yml b/changelogs/fragments/2878-validate-certs-bool.yml new file mode 100644 index 0000000000..e636f4981b --- /dev/null +++ b/changelogs/fragments/2878-validate-certs-bool.yml @@ -0,0 +1,2 @@ +minor_changes: +- "nrdp callback plugin - parameters are now converted to strings, except ``validate_certs`` which is converted to boolean (https://github.com/ansible-collections/community.general/pull/2878)." diff --git a/plugins/callback/nrdp.py b/plugins/callback/nrdp.py index f17785a92f..744c2d2ed4 100644 --- a/plugins/callback/nrdp.py +++ b/plugins/callback/nrdp.py @@ -10,22 +10,23 @@ DOCUMENTATION = ''' name: nrdp type: notification author: "Remi VERCHERE (@rverchere)" - short_description: post task result to a nagios server through nrdp + short_description: Post task results to a Nagios server through nrdp description: - - this callback send playbook result to nagios - - nagios shall use NRDP to recive passive events - - the passive check is sent to a dedicated host/service for ansible + - This callback send playbook result to Nagios. + - Nagios shall use NRDP to recive passive events. + - The passive check is sent to a dedicated host/service for Ansible. options: url: - description: url of the nrdp server - required: True + description: URL of the nrdp server. + required: true env: - name : NRDP_URL ini: - section: callback_nrdp key: url + type: string validate_certs: - description: (bool) validate the SSL certificate of the nrdp server. (For HTTPS url) + description: Validate the SSL certificate of the nrdp server. (Used for HTTPS URLs.) env: - name: NRDP_VALIDATE_CERTS ini: @@ -33,32 +34,36 @@ DOCUMENTATION = ''' key: validate_nrdp_certs - section: callback_nrdp key: validate_certs - default: False + type: boolean + default: false aliases: [ validate_nrdp_certs ] token: - description: token to be allowed to push nrdp events - required: True + description: Token to be allowed to push nrdp events. + required: true env: - name: NRDP_TOKEN ini: - section: callback_nrdp key: token + type: string hostname: - description: hostname where the passive check is linked to - required: True + description: Hostname where the passive check is linked to. + required: true env: - name : NRDP_HOSTNAME ini: - section: callback_nrdp key: hostname + type: string servicename: - description: service where the passive check is linked to - required: True + description: Service where the passive check is linked to. + required: true env: - name : NRDP_SERVICENAME ini: - section: callback_nrdp key: servicename + type: string ''' import os From 147425ef936faf2c7c20a0565cdf61781fac08df Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 27 Jun 2021 10:00:01 +0200 Subject: [PATCH 0404/3093] ini_file: fix regression reported in #2578 (#2875) * Add regression test. * Add more Unicode tests. * Add fix. * Add changelog. * Work completely with Unicode. * Update plugins/modules/files/ini_file.py Co-authored-by: quidame Co-authored-by: quidame --- .../fragments/2875-ini_file-unicode.yml | 2 + plugins/modules/files/ini_file.py | 51 +++++++++++-------- .../targets/ini_file/tasks/main.yml | 40 +++++++++++++++ 3 files changed, 73 insertions(+), 20 deletions(-) create mode 100644 changelogs/fragments/2875-ini_file-unicode.yml diff --git a/changelogs/fragments/2875-ini_file-unicode.yml b/changelogs/fragments/2875-ini_file-unicode.yml new file mode 100644 index 0000000000..eaf1ff9ffb --- /dev/null +++ b/changelogs/fragments/2875-ini_file-unicode.yml @@ -0,0 +1,2 @@ +bugfixes: +- "ini_file - fix Unicode processing for Python 2 (https://github.com/ansible-collections/community.general/pull/2875)." \ No newline at end of file diff --git a/plugins/modules/files/ini_file.py b/plugins/modules/files/ini_file.py index d318d04d57..7d6a988e85 100644 --- a/plugins/modules/files/ini_file.py +++ b/plugins/modules/files/ini_file.py @@ -112,6 +112,7 @@ import tempfile import traceback from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_bytes, to_text def match_opt(option, line): @@ -128,6 +129,13 @@ def do_ini(module, filename, section=None, option=None, value=None, state='present', backup=False, no_extra_spaces=False, create=True, allow_no_value=False): + if section is not None: + section = to_text(section) + if option is not None: + option = to_text(option) + if value is not None: + value = to_text(value) + diff = dict( before='', after='', @@ -144,33 +152,33 @@ def do_ini(module, filename, section=None, option=None, value=None, ini_lines = [] else: with io.open(filename, 'r', encoding="utf-8-sig") as ini_file: - ini_lines = ini_file.readlines() + ini_lines = [to_text(line) for line in ini_file.readlines()] if module._diff: - diff['before'] = ''.join(ini_lines) + diff['before'] = u''.join(ini_lines) changed = False # ini file could be empty if not ini_lines: - ini_lines.append('\n') + ini_lines.append(u'\n') # last line of file may not contain a trailing newline - if ini_lines[-1] == "" or ini_lines[-1][-1] != '\n': - ini_lines[-1] += '\n' + if ini_lines[-1] == u"" or ini_lines[-1][-1] != u'\n': + ini_lines[-1] += u'\n' changed = True # append fake section lines to simplify the logic # At top: # Fake random section to do not match any other in the file # Using commit hash as fake section name - fake_section_name = "ad01e11446efb704fcdbdb21f2c43757423d91c5" + fake_section_name = u"ad01e11446efb704fcdbdb21f2c43757423d91c5" # Insert it at the beginning - ini_lines.insert(0, '[%s]' % fake_section_name) + ini_lines.insert(0, u'[%s]' % fake_section_name) # At bottom: - ini_lines.append('[') + ini_lines.append(u'[') # If no section is defined, fake section is used if not section: @@ -180,21 +188,23 @@ def do_ini(module, filename, section=None, option=None, value=None, section_start = 0 msg = 'OK' if no_extra_spaces: - assignment_format = '%s=%s\n' + assignment_format = u'%s=%s\n' else: - assignment_format = '%s = %s\n' + assignment_format = u'%s = %s\n' + + non_blank_non_comment_pattern = re.compile(to_text(r'^[ \t]*([#;].*)?$')) for index, line in enumerate(ini_lines): - if line.startswith('[%s]' % section): + if line.startswith(u'[%s]' % section): within_section = True section_start = index - elif line.startswith('['): + elif line.startswith(u'['): if within_section: if state == 'present': # insert missing option line at the end of the section for i in range(index, 0, -1): # search backwards for previous non-blank or non-comment line - if not re.match(r'^[ \t]*([#;].*)?$', ini_lines[i - 1]): + if not non_blank_non_comment_pattern.match(ini_lines[i - 1]): if option and value: ini_lines.insert(i, assignment_format % (option, value)) msg = 'option added' @@ -216,7 +226,7 @@ def do_ini(module, filename, section=None, option=None, value=None, # change the existing option line if match_opt(option, line): if not value and allow_no_value: - newline = '%s\n' % option + newline = u'%s\n' % option else: newline = assignment_format % (option, value) option_changed = ini_lines[index] != newline @@ -229,7 +239,7 @@ def do_ini(module, filename, section=None, option=None, value=None, index = index + 1 while index < len(ini_lines): line = ini_lines[index] - if line.startswith('['): + if line.startswith(u'['): break if match_active_opt(option, line): del ini_lines[index] @@ -249,28 +259,29 @@ def do_ini(module, filename, section=None, option=None, value=None, del ini_lines[-1:] if not within_section and state == 'present': - ini_lines.append('[%s]\n' % section) + ini_lines.append(u'[%s]\n' % section) msg = 'section and option added' if option and value is not None: ini_lines.append(assignment_format % (option, value)) elif option and value is None and allow_no_value: - ini_lines.append('%s\n' % option) + ini_lines.append(u'%s\n' % option) else: msg = 'only section added' changed = True if module._diff: - diff['after'] = ''.join(ini_lines) + diff['after'] = u''.join(ini_lines) backup_file = None if changed and not module.check_mode: if backup: backup_file = module.backup_local(filename) + encoded_ini_lines = [to_bytes(line) for line in ini_lines] try: tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir) - f = os.fdopen(tmpfd, 'w') - f.writelines(ini_lines) + f = os.fdopen(tmpfd, 'wb') + f.writelines(encoded_ini_lines) f.close() except IOError: module.fail_json(msg="Unable to create temporary file %s", traceback=traceback.format_exc()) diff --git a/tests/integration/targets/ini_file/tasks/main.yml b/tests/integration/targets/ini_file/tasks/main.yml index be5835669b..210dafe2ca 100644 --- a/tests/integration/targets/ini_file/tasks/main.yml +++ b/tests/integration/targets/ini_file/tasks/main.yml @@ -514,3 +514,43 @@ assert: that: - content16 == expected16 + +# Regression test for https://github.com/ansible-collections/community.general/pull/2578#issuecomment-868092282 +- name: Create UTF-8 test file + copy: + content: !!binary | + W2FwcDptYWluXQphdmFpbGFibGVfbGFuZ3VhZ2VzID0gZW4gZnIgZXMgZGUgcHQgamEgbHQgemhf + VFcgaWQgZGEgcHRfQlIgcnUgc2wgaXQgbmxfTkwgdWsgdGEgc2kgY3MgbmIgaHUKIyBGdWxsIGxh + bmd1YWdlIG5hbWVzIGluIG5hdGl2ZSBsYW5ndWFnZSAoY29tbWEgc2VwYXJhdGVkKQphdmFpbGFi + bGVfbGFuZ3VhZ2VzX2Z1bGwgPSBFbmdsaXNoLCBGcmFuw6dhaXMsIEVzcGHDsW9sLCBEZXV0c2No + LCBQb3J0dWd1w6pzLCDml6XmnKzoqp4sIExpZXR1dm9zLCDkuK3mlocsIEluZG9uZXNpYSwgRGFu + c2ssIFBvcnR1Z3XDqnMgKEJyYXNpbCksINCg0YPRgdGB0LrQuNC5LCBTbG92ZW7FocSNaW5hLCBJ + dGFsaWFubywgTmVkZXJsYW5kcywg0KPQutGA0LDRl9C90YHRjNC60LAsIOCupOCuruCuv+CutOCv + jSwg4LeD4LeS4LaC4LeE4La9LCDEjGVza3ksIEJva23DpWwsIE1hZ3lhcgo= + dest: '{{ output_file }}' +- name: Add entries + ini_file: + section: "{{ item.section }}" + option: "{{ item.option }}" + value: "{{ item.value }}" + path: '{{ output_file }}' + create: true + loop: + - section: app:main + option: sqlalchemy.url + value: postgresql://app:secret@database/app + - section: handler_filelog + option: args + value: (sys.stderr,) + - section: handler_filelog + option: class + value: StreamHandler + - section: handler_exc_handler + option: args + value: (sys.stderr,) + - section: båz + option: fföø + value: ḃâŗ + - section: båz + option: fföø + value: bar From 9dd91f949a9cf8ac5becc12d8b079bbc6ccb9dc7 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 27 Jun 2021 22:38:04 +1200 Subject: [PATCH 0405/3093] terraform - added check_destroy (#2874) * added check_destroy This is based on the work done in PR 398. Authors from that PR: Co-authored-by: effaamponsah Co-authored-by: m-yosefpor Co-authored-by: Felix Fontein Co-authored-by: Andrew Klychkov * added changelog fragment * Update plugins/modules/cloud/misc/terraform.py Co-authored-by: Ajpantuso * Update changelogs/fragments/2874-terraform-check-destroy.yml Co-authored-by: Amin Vakil Co-authored-by: effaamponsah Co-authored-by: Felix Fontein Co-authored-by: Andrew Klychkov Co-authored-by: Ajpantuso Co-authored-by: Amin Vakil --- .../2874-terraform-check-destroy.yml | 2 ++ plugins/modules/cloud/misc/terraform.py | 26 ++++++++++++++----- 2 files changed, 21 insertions(+), 7 deletions(-) create mode 100644 changelogs/fragments/2874-terraform-check-destroy.yml diff --git a/changelogs/fragments/2874-terraform-check-destroy.yml b/changelogs/fragments/2874-terraform-check-destroy.yml new file mode 100644 index 0000000000..e41d1aebc0 --- /dev/null +++ b/changelogs/fragments/2874-terraform-check-destroy.yml @@ -0,0 +1,2 @@ +minor_changes: + - terraform - add ``check_destroy`` optional parameter to check for deletion of resources before it is applied (https://github.com/ansible-collections/community.general/pull/2874). diff --git a/plugins/modules/cloud/misc/terraform.py b/plugins/modules/cloud/misc/terraform.py index 86521ed264..c5619b8eb0 100644 --- a/plugins/modules/cloud/misc/terraform.py +++ b/plugins/modules/cloud/misc/terraform.py @@ -130,6 +130,13 @@ options: default: false type: bool version_added: '1.3.0' + check_destroy: + description: + - Apply only when no resources are destroyed. Note that this only prevents "destroy" actions, + but not "destroy and re-create" actions. This option is ignored when I(state=absent). + type: bool + default: false + version_added: '3.3.0' notes: - To just run a `terraform plan`, use check mode. requirements: [ "terraform" ] @@ -313,7 +320,7 @@ def build_plan(command, project_path, variables_args, state_file, targets, state plan_command = [command[0], 'plan', '-input=false', '-no-color', '-detailed-exitcode', '-out', plan_path] - for t in (module.params.get('targets') or []): + for t in targets: plan_command.extend(['-target', t]) plan_command.extend(_state_args(state_file)) @@ -340,21 +347,22 @@ def main(): project_path=dict(required=True, type='path'), binary_path=dict(type='path'), plugin_paths=dict(type='list', elements='path'), - workspace=dict(required=False, type='str', default='default'), + workspace=dict(type='str', default='default'), purge_workspace=dict(type='bool', default=False), state=dict(default='present', choices=['present', 'absent', 'planned']), variables=dict(type='dict'), - variables_files=dict(aliases=['variables_file'], type='list', elements='path', default=None), + variables_files=dict(aliases=['variables_file'], type='list', elements='path'), plan_file=dict(type='path'), state_file=dict(type='path'), targets=dict(type='list', elements='str', default=[]), lock=dict(type='bool', default=True), lock_timeout=dict(type='int',), force_init=dict(type='bool', default=False), - backend_config=dict(type='dict', default=None), - backend_config_files=dict(type='list', elements='path', default=None), - init_reconfigure=dict(required=False, type='bool', default=False), + backend_config=dict(type='dict'), + backend_config_files=dict(type='list', elements='path'), + init_reconfigure=dict(type='bool', default=False), overwrite_init=dict(type='bool', default=True), + check_destroy=dict(type='bool', default=False), ), required_if=[('state', 'planned', ['plan_file'])], supports_check_mode=True, @@ -375,6 +383,7 @@ def main(): backend_config_files = module.params.get('backend_config_files') init_reconfigure = module.params.get('init_reconfigure') overwrite_init = module.params.get('overwrite_init') + check_destroy = module.params.get('check_destroy') if bin_path is not None: command = [bin_path] @@ -444,9 +453,12 @@ def main(): else: plan_file, needs_application, out, err, command = build_plan(command, project_path, variables_args, state_file, module.params.get('targets'), state, plan_file) + if state == 'present' and check_destroy and '- destroy' in out: + module.fail_json(msg="Aborting command because it would destroy some resources. " + "Consider switching the 'check_destroy' to false to suppress this error") command.append(plan_file) - if needs_application and not module.check_mode and not state == 'planned': + if needs_application and not module.check_mode and state != 'planned': rc, out, err = module.run_command(command, check_rc=False, cwd=project_path) if rc != 0: if workspace_ctx["current"] != workspace: From 2fb08775775b7ec3b8a4d4cad2781ee3b6a06263 Mon Sep 17 00:00:00 2001 From: Stef Graces Date: Sun, 27 Jun 2021 14:01:06 +0200 Subject: [PATCH 0406/3093] Fix/gitlab project user workspace (#2881) * Add ability to create project under a user * Add changelog * Change minor_changes in changelog As suggested in this comment https://github.com/ansible-collections/community.general/pull/2824#discussion_r653411741 * Fix user's namespace * Delete changelog * Add changelog * Fix changelog Co-authored-by: Felix Fontein Co-authored-by: Amin Vakil * Change user_group_id to namespace_group_id Co-authored-by: Felix Fontein * Change to namespace_id Co-authored-by: Felix Fontein Co-authored-by: Amin Vakil --- .../2881-gitlab_project-fix_workspace_user.yaml | 3 +++ .../source_control/gitlab/gitlab_project.py | 14 +++++++------- 2 files changed, 10 insertions(+), 7 deletions(-) create mode 100644 changelogs/fragments/2881-gitlab_project-fix_workspace_user.yaml diff --git a/changelogs/fragments/2881-gitlab_project-fix_workspace_user.yaml b/changelogs/fragments/2881-gitlab_project-fix_workspace_user.yaml new file mode 100644 index 0000000000..0de8368b7f --- /dev/null +++ b/changelogs/fragments/2881-gitlab_project-fix_workspace_user.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - gitlab_project - user projects are created using namespace ID now, instead of user ID (https://github.com/ansible-collections/community.general/pull/2881). diff --git a/plugins/modules/source_control/gitlab/gitlab_project.py b/plugins/modules/source_control/gitlab/gitlab_project.py index 73def710c3..61d1ac0cb1 100644 --- a/plugins/modules/source_control/gitlab/gitlab_project.py +++ b/plugins/modules/source_control/gitlab/gitlab_project.py @@ -345,22 +345,22 @@ def main(): gitlab_project = GitLabProject(module, gitlab_instance) namespace = None - user_group_id = None + namespace_id = None if group_identifier: group = findGroup(gitlab_instance, group_identifier) if group is None: module.fail_json(msg="Failed to create project: group %s doesn't exists" % group_identifier) - user_group_id = group.id + namespace_id = group.id else: - user = gitlab_instance.users.list(username=gitlab_instance.user.username)[0] - user_group_id = user.id + namespace = gitlab_instance.namespaces.list(search=gitlab_instance.user.username)[0] + namespace_id = namespace.id - if not user_group_id: - module.fail_json(msg="Failed to find the user/group id which required to find namespace") + if not namespace_id: + module.fail_json(msg="Failed to find the namespace or group ID which is required to look up the namespace") try: - namespace = gitlab_instance.namespaces.get(user_group_id) + namespace = gitlab_instance.namespaces.get(namespace_id) except gitlab.exceptions.GitlabGetError as e: module.fail_json(msg="Failed to find the namespace for the given user: %s" % to_native(e)) From 199ead85d0bfed2242cda1bb086f097603dce9c2 Mon Sep 17 00:00:00 2001 From: quidame Date: Sun, 27 Jun 2021 14:56:43 +0200 Subject: [PATCH 0407/3093] java_keystore: fix keystore type (#2516) * fix keystore type; update unit tests * add changelog fragment * document new param 'keystore_type' * add keystore_type support (backward compatible) * check JKS format with magic bytes * update integration tests * revert first changes in unit tests * update changelog fragment * fix magic bytes for python2/python3 * fix integration tests (irrelevant check_mode) * fix unit test (keystore removed before failure => changed=true) * fix typo * fix spelling * shorten a branch * mock is_jks_or_pkcs12 * fix function path in unit tests * Apply suggestions from code review (spelling) Co-authored-by: Ajpantuso * rename a method (module + unit tests) * move ArgumentSpec class content to main() * refactor create() to not loose existing keystore in case of error * update unit tests * add integration test (error handling) * fix keystore backup cleanup Co-authored-by: Ajpantuso --- .../2516_fix_2515_keystore_type_jks.yml | 4 + plugins/modules/system/java_keystore.py | 144 +++++++++++++----- .../targets/java_keystore/tasks/tests.yml | 115 ++++++++++++++ .../modules/system/test_java_keystore.py | 114 ++++++++++---- 4 files changed, 308 insertions(+), 69 deletions(-) create mode 100644 changelogs/fragments/2516_fix_2515_keystore_type_jks.yml diff --git a/changelogs/fragments/2516_fix_2515_keystore_type_jks.yml b/changelogs/fragments/2516_fix_2515_keystore_type_jks.yml new file mode 100644 index 0000000000..767081dac9 --- /dev/null +++ b/changelogs/fragments/2516_fix_2515_keystore_type_jks.yml @@ -0,0 +1,4 @@ +--- +bugfixes: + - "java_keystore - add parameter ``keystore_type`` to control output file format and override ``keytool``'s + default, which depends on Java version (https://github.com/ansible-collections/community.general/issues/2515)." diff --git a/plugins/modules/system/java_keystore.py b/plugins/modules/system/java_keystore.py index 8293801f1b..5cc2e9258a 100644 --- a/plugins/modules/system/java_keystore.py +++ b/plugins/modules/system/java_keystore.py @@ -1,8 +1,8 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# Copyright: (c) 2016, Guillaume Grossetie # Copyright: (c) 2021, quidame +# Copyright: (c) 2016, Guillaume Grossetie # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) @@ -97,6 +97,24 @@ options: - openssl - cryptography version_added: 3.1.0 + keystore_type: + description: + - Type of the Java keystore. + - When this option is omitted and the keystore doesn't already exist, the + behavior follows C(keytool)'s default store type which depends on + Java version; C(pkcs12) since Java 9 and C(jks) prior (may also + be C(pkcs12) if new default has been backported to this version). + - When this option is omitted and the keystore already exists, the current + type is left untouched, unless another option leads to overwrite the + keystore (in that case, this option behaves like for keystore creation). + - When I(keystore_type) is set, the keystore is created with this type if + it doesn't already exist, or is overwritten to match the given type in + case of mismatch. + type: str + choices: + - jks + - pkcs12 + version_added: 3.3.0 requirements: - openssl in PATH (when I(ssl_backend=openssl)) - keytool in PATH @@ -107,12 +125,16 @@ author: extends_documentation_fragment: - files seealso: + - module: community.crypto.openssl_pkcs12 - module: community.general.java_cert notes: - I(certificate) and I(private_key) require that their contents are available on the controller (either inline in a playbook, or with the C(file) lookup), while I(certificate_path) and I(private_key_path) require that the files are available on the target host. + - By design, any change of a value of options I(keystore_type), I(name) or + I(password), as well as changes of key or certificate materials will cause + the existing I(dest) to be overwritten. ''' EXAMPLES = ''' @@ -156,6 +178,12 @@ msg: type: str sample: "Unable to find the current certificate fingerprint in ..." +err: + description: Output from stderr of keytool/openssl command after error of given command. + returned: failure + type: str + sample: "Keystore password is too short - must be at least 6 characters\n" + rc: description: keytool/openssl command execution return value returned: changed and failure @@ -176,7 +204,7 @@ import tempfile from ansible.module_utils.six import PY2 from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text +from ansible.module_utils.common.text.converters import to_bytes, to_native try: from cryptography.hazmat.primitives.serialization.pkcs12 import serialize_key_and_certificates @@ -201,6 +229,7 @@ except ImportError: class JavaKeystore: def __init__(self, module): self.module = module + self.result = dict() self.keytool_bin = module.get_bin_path('keytool', True) @@ -211,6 +240,7 @@ class JavaKeystore: self.password = module.params['password'] self.private_key = module.params['private_key'] self.ssl_backend = module.params['ssl_backend'] + self.keystore_type = module.params['keystore_type'] if self.ssl_backend == 'openssl': self.openssl_bin = module.get_bin_path('openssl', True) @@ -312,6 +342,9 @@ class JavaKeystore: rc=rc ) + if self.keystore_type not in (None, self.current_type()): + return "keystore type mismatch" + stored_certificate_match = re.search(r"SHA256: ([\w:]+)", stored_certificate_fingerprint_out) if not stored_certificate_match: return self.module.fail_json( @@ -322,6 +355,14 @@ class JavaKeystore: return stored_certificate_match.group(1) + def current_type(self): + magic_bytes = b'\xfe\xed\xfe\xed' + with open(self.keystore_path, 'rb') as fd: + header = fd.read(4) + if header == magic_bytes: + return 'jks' + return 'pkcs12' + def cert_changed(self): current_certificate_fingerprint = self.read_certificate_fingerprint() stored_certificate_fingerprint = self.read_stored_certificate_fingerprint() @@ -389,6 +430,8 @@ class JavaKeystore: with open(keystore_p12_path, 'wb') as p12_file: p12_file.write(pkcs12_bundle) + self.result.update(msg="PKCS#12 bundle created by cryptography backend") + def openssl_create_pkcs12_bundle(self, keystore_p12_path): export_p12_cmd = [self.openssl_bin, "pkcs12", "-export", "-name", self.name, "-in", self.certificate_path, "-inkey", self.private_key_path, "-out", keystore_p12_path, "-passout", "stdin"] @@ -401,19 +444,22 @@ class JavaKeystore: cmd_stdin = "%s\n" % self.keypass cmd_stdin += "%s\n%s" % (self.password, self.password) - (rc, export_p12_out, dummy) = self.module.run_command( + (rc, export_p12_out, export_p12_err) = self.module.run_command( export_p12_cmd, data=cmd_stdin, environ_update=None, check_rc=False ) + self.result = dict(msg=export_p12_out, cmd=export_p12_cmd, rc=rc) if rc != 0: - self.module.fail_json(msg=export_p12_out, cmd=export_p12_cmd, rc=rc) + self.result['err'] = export_p12_err + self.module.fail_json(**self.result) def create(self): + """Create the keystore, or replace it with a rollback in case of + keytool failure. + """ if self.module.check_mode: - return {'changed': True} - - if os.path.exists(self.keystore_path): - os.remove(self.keystore_path) + self.result['changed'] = True + return self.result keystore_p12_path = create_path() self.module.add_cleanup_file(keystore_p12_path) @@ -423,6 +469,13 @@ class JavaKeystore: else: self.openssl_create_pkcs12_bundle(keystore_p12_path) + if self.keystore_type == 'pkcs12': + # Preserve properties of the destination file, if any. + self.module.atomic_move(keystore_p12_path, self.keystore_path) + self.update_permissions() + self.result['changed'] = True + return self.result + import_keystore_cmd = [self.keytool_bin, "-importkeystore", "-destkeystore", self.keystore_path, "-srckeystore", keystore_p12_path, @@ -430,19 +483,38 @@ class JavaKeystore: "-alias", self.name, "-noprompt"] - (rc, import_keystore_out, dummy) = self.module.run_command( + if self.keystore_type == 'jks': + keytool_help = self.module.run_command([self.keytool_bin, '-importkeystore', '-help']) + if '-deststoretype' in keytool_help[1] + keytool_help[2]: + import_keystore_cmd.insert(4, "-deststoretype") + import_keystore_cmd.insert(5, self.keystore_type) + + keystore_backup = None + if self.exists(): + keystore_backup = self.keystore_path + '.tmpbak' + # Preserve properties of the source file + self.module.preserved_copy(self.keystore_path, keystore_backup) + os.remove(self.keystore_path) + + (rc, import_keystore_out, import_keystore_err) = self.module.run_command( import_keystore_cmd, data='%s\n%s\n%s' % (self.password, self.password, self.password), check_rc=False ) - if rc != 0: - return self.module.fail_json(msg=import_keystore_out, cmd=import_keystore_cmd, rc=rc) + + self.result = dict(msg=import_keystore_out, cmd=import_keystore_cmd, rc=rc) + + # keytool may return 0 whereas the keystore has not been created. + if rc != 0 or not self.exists(): + if keystore_backup is not None: + self.module.preserved_copy(keystore_backup, self.keystore_path) + os.remove(keystore_backup) + self.result['err'] = import_keystore_err + return self.module.fail_json(**self.result) self.update_permissions() - return { - 'changed': True, - 'msg': import_keystore_out, - 'cmd': import_keystore_cmd, - 'rc': rc - } + if keystore_backup is not None: + os.remove(keystore_backup) + self.result['changed'] = True + return self.result def exists(self): return os.path.exists(self.keystore_path) @@ -465,15 +537,15 @@ def create_file(content): def hex_decode(s): if PY2: return s.decode('hex') - else: - return s.hex() + return s.hex() -class ArgumentSpec(object): - def __init__(self): - self.supports_check_mode = True - self.add_file_common_args = True - argument_spec = dict( +def main(): + choose_between = (['certificate', 'certificate_path'], + ['private_key', 'private_key_path']) + + module = AnsibleModule( + argument_spec=dict( name=dict(type='str', required=True), dest=dict(type='path', required=True), certificate=dict(type='str', no_log=True), @@ -483,25 +555,13 @@ class ArgumentSpec(object): private_key_passphrase=dict(type='str', no_log=True), password=dict(type='str', required=True, no_log=True), ssl_backend=dict(type='str', default='openssl', choices=['openssl', 'cryptography']), + keystore_type=dict(type='str', choices=['jks', 'pkcs12']), force=dict(type='bool', default=False), - ) - choose_between = ( - ['certificate', 'certificate_path'], - ['private_key', 'private_key_path'], - ) - self.argument_spec = argument_spec - self.required_one_of = choose_between - self.mutually_exclusive = choose_between - - -def main(): - spec = ArgumentSpec() - module = AnsibleModule( - argument_spec=spec.argument_spec, - required_one_of=spec.required_one_of, - mutually_exclusive=spec.mutually_exclusive, - supports_check_mode=spec.supports_check_mode, - add_file_common_args=spec.add_file_common_args, + ), + required_one_of=choose_between, + mutually_exclusive=choose_between, + supports_check_mode=True, + add_file_common_args=True, ) module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') diff --git a/tests/integration/targets/java_keystore/tasks/tests.yml b/tests/integration/targets/java_keystore/tasks/tests.yml index b892dd1d29..8510a64165 100644 --- a/tests/integration/targets/java_keystore/tasks/tests.yml +++ b/tests/integration/targets/java_keystore/tasks/tests.yml @@ -24,6 +24,7 @@ private_key_passphrase: "{{ item.passphrase | d(omit) }}" password: changeit ssl_backend: "{{ ssl_backend }}" + keystore_type: "{{ item.keystore_type | d(omit) }}" loop: "{{ java_keystore_certs }}" check_mode: yes register: result_check @@ -91,6 +92,98 @@ loop: "{{ java_keystore_new_certs }}" register: result_pw_change + +- name: Create a Java keystore for the given certificates (force keystore type pkcs12, check mode) + community.general.java_keystore: + <<: *java_keystore_params + name: foobar + password: hunter2 + keystore_type: pkcs12 + loop: "{{ java_keystore_new_certs }}" + check_mode: yes + register: result_type_pkcs12_check + +- name: Create a Java keystore for the given certificates (force keystore type jks, check mode) + community.general.java_keystore: + <<: *java_keystore_params + name: foobar + password: hunter2 + keystore_type: jks + loop: "{{ java_keystore_new_certs }}" + check_mode: yes + register: result_type_jks_check + +- name: Create a Java keystore for the given certificates (force keystore type jks) + community.general.java_keystore: + <<: *java_keystore_params + name: foobar + password: hunter2 + keystore_type: jks + loop: "{{ java_keystore_new_certs }}" + register: result_type_jks + + +- name: Stat keystore (before failure) + ansible.builtin.stat: + path: "{{ output_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.jks' }}" + loop: "{{ java_keystore_new_certs }}" + register: result_stat_before + +- name: Fail to create a Java keystore for the given certificates (password too short) + community.general.java_keystore: + <<: *java_keystore_params + name: foobar + password: short + keystore_type: jks + loop: "{{ java_keystore_new_certs }}" + register: result_fail_jks + ignore_errors: true + +- name: Stat keystore (after failure) + ansible.builtin.stat: + path: "{{ output_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.jks' }}" + loop: "{{ java_keystore_new_certs }}" + register: result_stat_after + + +- name: Create a Java keystore for the given certificates (keystore type changed, check mode) + community.general.java_keystore: + <<: *java_keystore_params + name: foobar + password: hunter2 + keystore_type: pkcs12 + loop: "{{ java_keystore_new_certs }}" + check_mode: yes + register: result_type_change_check + +- name: Create a Java keystore for the given certificates (keystore type changed) + community.general.java_keystore: + <<: *java_keystore_params + name: foobar + password: hunter2 + keystore_type: pkcs12 + loop: "{{ java_keystore_new_certs }}" + register: result_type_change + + +- name: Create a Java keystore for the given certificates (omit keystore type, check mode) + community.general.java_keystore: + <<: *java_keystore_params + name: foobar + password: hunter2 + loop: "{{ java_keystore_new_certs }}" + check_mode: yes + register: result_type_omit_check + +- name: Create a Java keystore for the given certificates (omit keystore type) + community.general.java_keystore: + <<: *java_keystore_params + name: foobar + password: hunter2 + loop: "{{ java_keystore_new_certs }}" + register: result_type_omit + + - name: Check that the remote certificates have not been removed ansible.builtin.file: path: "{{ output_dir ~ '/' ~ item.name ~ '.pem' }}" @@ -118,3 +211,25 @@ - result_alias_change_check is changed - result_pw_change is changed - result_pw_change_check is changed + + # We don't know if we start from jks or pkcs12 format, anyway check mode + # and actual mode must return the same 'changed' state, and 'jks' and + # 'pkcs12' must give opposite results on a same host. + - result_type_jks_check.changed != result_type_pkcs12_check.changed + - result_type_jks_check.changed == result_type_jks.changed + + - result_type_change is changed + - result_type_change_check is changed + - result_type_omit is not changed + - result_type_omit_check is not changed + + # keystore properties must remain the same after failure + - result_fail_jks is failed + - result_stat_before.results[0].stat.uid == result_stat_after.results[0].stat.uid + - result_stat_before.results[1].stat.uid == result_stat_after.results[1].stat.uid + - result_stat_before.results[0].stat.gid == result_stat_after.results[0].stat.gid + - result_stat_before.results[1].stat.gid == result_stat_after.results[1].stat.gid + - result_stat_before.results[0].stat.mode == result_stat_after.results[0].stat.mode + - result_stat_before.results[1].stat.mode == result_stat_after.results[1].stat.mode + - result_stat_before.results[0].stat.checksum == result_stat_after.results[0].stat.checksum + - result_stat_before.results[1].stat.checksum == result_stat_after.results[1].stat.checksum diff --git a/tests/unit/plugins/modules/system/test_java_keystore.py b/tests/unit/plugins/modules/system/test_java_keystore.py index 7d582a3e99..7d078ac0f9 100644 --- a/tests/unit/plugins/modules/system/test_java_keystore.py +++ b/tests/unit/plugins/modules/system/test_java_keystore.py @@ -14,7 +14,25 @@ from ansible_collections.community.general.tests.unit.plugins.modules.utils impo from ansible_collections.community.general.tests.unit.compat.mock import patch from ansible_collections.community.general.tests.unit.compat.mock import Mock from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.modules.system.java_keystore import JavaKeystore, ArgumentSpec +from ansible_collections.community.general.plugins.modules.system.java_keystore import JavaKeystore + + +module_argument_spec = dict( + name=dict(type='str', required=True), + dest=dict(type='path', required=True), + certificate=dict(type='str', no_log=True), + certificate_path=dict(type='path'), + private_key=dict(type='str', no_log=True), + private_key_path=dict(type='path', no_log=False), + private_key_passphrase=dict(type='str', no_log=True), + password=dict(type='str', required=True, no_log=True), + ssl_backend=dict(type='str', default='openssl', choices=['openssl', 'cryptography']), + keystore_type=dict(type='str', choices=['jks', 'pkcs12']), + force=dict(type='bool', default=False), +) +module_supports_check_mode = True +module_choose_between = (['certificate', 'certificate_path'], + ['private_key', 'private_key_path']) class TestCreateJavaKeystore(ModuleTestCase): @@ -25,11 +43,13 @@ class TestCreateJavaKeystore(ModuleTestCase): super(TestCreateJavaKeystore, self).setUp() orig_exists = os.path.exists - self.spec = ArgumentSpec() self.mock_create_file = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.create_file') self.mock_create_path = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.create_path') + self.mock_current_type = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.JavaKeystore.current_type') self.mock_run_command = patch('ansible.module_utils.basic.AnsibleModule.run_command') self.mock_get_bin_path = patch('ansible.module_utils.basic.AnsibleModule.get_bin_path') + self.mock_preserved_copy = patch('ansible.module_utils.basic.AnsibleModule.preserved_copy') + self.mock_atomic_move = patch('ansible.module_utils.basic.AnsibleModule.atomic_move') self.mock_os_path_exists = patch('os.path.exists', side_effect=lambda path: True if path == '/path/to/keystore.jks' else orig_exists(path)) self.mock_selinux_context = patch('ansible.module_utils.basic.AnsibleModule.selinux_context', @@ -38,8 +58,11 @@ class TestCreateJavaKeystore(ModuleTestCase): side_effect=lambda path: (False, None)) self.run_command = self.mock_run_command.start() self.get_bin_path = self.mock_get_bin_path.start() + self.preserved_copy = self.mock_preserved_copy.start() + self.atomic_move = self.mock_atomic_move.start() self.create_file = self.mock_create_file.start() self.create_path = self.mock_create_path.start() + self.current_type = self.mock_current_type.start() self.selinux_context = self.mock_selinux_context.start() self.is_special_selinux_path = self.mock_is_special_selinux_path.start() self.os_path_exists = self.mock_os_path_exists.start() @@ -49,8 +72,11 @@ class TestCreateJavaKeystore(ModuleTestCase): super(TestCreateJavaKeystore, self).tearDown() self.mock_create_file.stop() self.mock_create_path.stop() + self.mock_current_type.stop() self.mock_run_command.stop() self.mock_get_bin_path.stop() + self.mock_preserved_copy.stop() + self.mock_atomic_move.stop() self.mock_selinux_context.stop() self.mock_is_special_selinux_path.stop() self.mock_os_path_exists.stop() @@ -65,8 +91,10 @@ class TestCreateJavaKeystore(ModuleTestCase): )) module = AnsibleModule( - argument_spec=self.spec.argument_spec, - supports_check_mode=self.spec.supports_check_mode + argument_spec=module_argument_spec, + supports_check_mode=module_supports_check_mode, + mutually_exclusive=module_choose_between, + required_one_of=module_choose_between ) with patch('os.remove', return_value=True): @@ -96,8 +124,10 @@ class TestCreateJavaKeystore(ModuleTestCase): )) module = AnsibleModule( - argument_spec=self.spec.argument_spec, - supports_check_mode=self.spec.supports_check_mode + argument_spec=module_argument_spec, + supports_check_mode=module_supports_check_mode, + mutually_exclusive=module_choose_between, + required_one_of=module_choose_between ) module.exit_json = Mock() @@ -106,7 +136,7 @@ class TestCreateJavaKeystore(ModuleTestCase): with patch('os.remove', return_value=True): self.create_path.side_effect = ['/tmp/tmp1cyp12xa'] self.create_file.side_effect = ['/tmp/tmpvalcrt32', '/tmp/tmpwh4key0c', ''] - self.run_command.side_effect = [(1, '', ''), (0, '', '')] + self.run_command.side_effect = [(1, '', 'Oops'), (0, '', '')] self.get_bin_path.side_effect = ['keytool', 'openssl', ''] jks = JavaKeystore(module) jks.create() @@ -118,6 +148,7 @@ class TestCreateJavaKeystore(ModuleTestCase): "-passout", "stdin", "-passin", "stdin"], msg='', + err='Oops', rc=1 ) @@ -131,8 +162,10 @@ class TestCreateJavaKeystore(ModuleTestCase): )) module = AnsibleModule( - argument_spec=self.spec.argument_spec, - supports_check_mode=self.spec.supports_check_mode + argument_spec=module_argument_spec, + supports_check_mode=module_supports_check_mode, + mutually_exclusive=module_choose_between, + required_one_of=module_choose_between ) module.exit_json = Mock() @@ -141,7 +174,7 @@ class TestCreateJavaKeystore(ModuleTestCase): with patch('os.remove', return_value=True): self.create_path.side_effect = ['/tmp/tmp1cyp12xa'] self.create_file.side_effect = ['/tmp/tmpvalcrt32', '/tmp/tmpwh4key0c', ''] - self.run_command.side_effect = [(1, '', ''), (0, '', '')] + self.run_command.side_effect = [(1, '', 'Oops'), (0, '', '')] self.get_bin_path.side_effect = ['keytool', 'openssl', ''] jks = JavaKeystore(module) jks.create() @@ -152,6 +185,7 @@ class TestCreateJavaKeystore(ModuleTestCase): "-out", "/tmp/tmp1cyp12xa", "-passout", "stdin"], msg='', + err='Oops', rc=1 ) @@ -165,8 +199,10 @@ class TestCreateJavaKeystore(ModuleTestCase): )) module = AnsibleModule( - argument_spec=self.spec.argument_spec, - supports_check_mode=self.spec.supports_check_mode + argument_spec=module_argument_spec, + supports_check_mode=module_supports_check_mode, + mutually_exclusive=module_choose_between, + required_one_of=module_choose_between ) module.exit_json = Mock() @@ -175,7 +211,7 @@ class TestCreateJavaKeystore(ModuleTestCase): with patch('os.remove', return_value=True): self.create_path.side_effect = ['/tmp/tmpgrzm2ah7'] self.create_file.side_effect = ['/tmp/etacifitrec', '/tmp/yek_etavirp', ''] - self.run_command.side_effect = [(0, '', ''), (1, '', '')] + self.run_command.side_effect = [(0, '', ''), (1, '', 'Oops')] self.get_bin_path.side_effect = ['keytool', 'openssl', ''] jks = JavaKeystore(module) jks.create() @@ -185,6 +221,7 @@ class TestCreateJavaKeystore(ModuleTestCase): "-srckeystore", "/tmp/tmpgrzm2ah7", "-srcstoretype", "pkcs12", "-alias", "test", "-noprompt"], msg='', + err='Oops', rc=1 ) @@ -195,20 +232,28 @@ class TestCertChanged(ModuleTestCase): def setUp(self): """Setup.""" super(TestCertChanged, self).setUp() - self.spec = ArgumentSpec() self.mock_create_file = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.create_file') + self.mock_current_type = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.JavaKeystore.current_type') self.mock_run_command = patch('ansible.module_utils.basic.AnsibleModule.run_command') self.mock_get_bin_path = patch('ansible.module_utils.basic.AnsibleModule.get_bin_path') + self.mock_preserved_copy = patch('ansible.module_utils.basic.AnsibleModule.preserved_copy') + self.mock_atomic_move = patch('ansible.module_utils.basic.AnsibleModule.atomic_move') self.run_command = self.mock_run_command.start() self.create_file = self.mock_create_file.start() self.get_bin_path = self.mock_get_bin_path.start() + self.current_type = self.mock_current_type.start() + self.preserved_copy = self.mock_preserved_copy.start() + self.atomic_move = self.mock_atomic_move.start() def tearDown(self): """Teardown.""" super(TestCertChanged, self).tearDown() self.mock_create_file.stop() + self.mock_current_type.stop() self.mock_run_command.stop() self.mock_get_bin_path.stop() + self.mock_preserved_copy.stop() + self.mock_atomic_move.stop() def test_cert_unchanged_same_fingerprint(self): set_module_args(dict( @@ -220,14 +265,17 @@ class TestCertChanged(ModuleTestCase): )) module = AnsibleModule( - argument_spec=self.spec.argument_spec, - supports_check_mode=self.spec.supports_check_mode + argument_spec=module_argument_spec, + supports_check_mode=module_supports_check_mode, + mutually_exclusive=module_choose_between, + required_one_of=module_choose_between ) with patch('os.remove', return_value=True): self.create_file.side_effect = ['/tmp/placeholder', ''] self.run_command.side_effect = [(0, 'foo=abcd:1234:efgh', ''), (0, 'SHA256: abcd:1234:efgh', '')] self.get_bin_path.side_effect = ['keytool', 'openssl', ''] + self.current_type.side_effect = ['jks'] jks = JavaKeystore(module) result = jks.cert_changed() self.assertFalse(result, 'Fingerprint is identical') @@ -242,19 +290,22 @@ class TestCertChanged(ModuleTestCase): )) module = AnsibleModule( - argument_spec=self.spec.argument_spec, - supports_check_mode=self.spec.supports_check_mode + argument_spec=module_argument_spec, + supports_check_mode=module_supports_check_mode, + mutually_exclusive=module_choose_between, + required_one_of=module_choose_between ) with patch('os.remove', return_value=True): self.create_file.side_effect = ['/tmp/placeholder', ''] self.run_command.side_effect = [(0, 'foo=abcd:1234:efgh', ''), (0, 'SHA256: wxyz:9876:stuv', '')] self.get_bin_path.side_effect = ['keytool', 'openssl', ''] + self.current_type.side_effect = ['jks'] jks = JavaKeystore(module) result = jks.cert_changed() self.assertTrue(result, 'Fingerprint mismatch') - def test_cert_changed_fail_alias_does_not_exist(self): + def test_cert_changed_alias_does_not_exist(self): set_module_args(dict( certificate='cert-foo', private_key='private-foo', @@ -264,8 +315,10 @@ class TestCertChanged(ModuleTestCase): )) module = AnsibleModule( - argument_spec=self.spec.argument_spec, - supports_check_mode=self.spec.supports_check_mode + argument_spec=module_argument_spec, + supports_check_mode=module_supports_check_mode, + mutually_exclusive=module_choose_between, + required_one_of=module_choose_between ) with patch('os.remove', return_value=True): @@ -287,8 +340,10 @@ class TestCertChanged(ModuleTestCase): )) module = AnsibleModule( - argument_spec=self.spec.argument_spec, - supports_check_mode=self.spec.supports_check_mode + argument_spec=module_argument_spec, + supports_check_mode=module_supports_check_mode, + mutually_exclusive=module_choose_between, + required_one_of=module_choose_between ) with patch('os.remove', return_value=True): @@ -310,8 +365,10 @@ class TestCertChanged(ModuleTestCase): )) module = AnsibleModule( - argument_spec=self.spec.argument_spec, - supports_check_mode=self.spec.supports_check_mode + argument_spec=module_argument_spec, + supports_check_mode=module_supports_check_mode, + mutually_exclusive=module_choose_between, + required_one_of=module_choose_between ) module.exit_json = Mock() @@ -321,6 +378,7 @@ class TestCertChanged(ModuleTestCase): self.create_file.side_effect = ['/tmp/tmpdj6bvvme', ''] self.run_command.side_effect = [(1, '', 'Oops'), (0, 'SHA256: wxyz:9876:stuv', '')] self.get_bin_path.side_effect = ['keytool', 'openssl', ''] + self.current_type.side_effect = ['jks'] jks = JavaKeystore(module) jks.cert_changed() module.fail_json.assert_called_once_with( @@ -340,8 +398,10 @@ class TestCertChanged(ModuleTestCase): )) module = AnsibleModule( - argument_spec=self.spec.argument_spec, - supports_check_mode=self.spec.supports_check_mode + argument_spec=module_argument_spec, + supports_check_mode=module_supports_check_mode, + mutually_exclusive=module_choose_between, + required_one_of=module_choose_between ) module.exit_json = Mock() From c7cf6f2eb7b3795d6e9401104b9f7b3cbd46359d Mon Sep 17 00:00:00 2001 From: Stef Graces Date: Sun, 27 Jun 2021 16:09:41 +0200 Subject: [PATCH 0408/3093] gitlab_project - Add ability to create project under a user (#2824) * Add ability to create project under a user * Add changelog * Add username option * Update changelogs/fragments/2824-gitlab_project-project-under-user.yml Co-authored-by: Felix Fontein * Make group and username mutually exclusive Co-authored-by: Felix Fontein --- .../2824-gitlab_project-project-under-user.yml | 3 +++ .../modules/source_control/gitlab/gitlab_project.py | 13 ++++++++++++- 2 files changed, 15 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2824-gitlab_project-project-under-user.yml diff --git a/changelogs/fragments/2824-gitlab_project-project-under-user.yml b/changelogs/fragments/2824-gitlab_project-project-under-user.yml new file mode 100644 index 0000000000..7fa18941a0 --- /dev/null +++ b/changelogs/fragments/2824-gitlab_project-project-under-user.yml @@ -0,0 +1,3 @@ +--- +minor_changes: + - gitlab_project - projects can be created under other user's namespaces with the new ``username`` option (https://github.com/ansible-collections/community.general/pull/2824). diff --git a/plugins/modules/source_control/gitlab/gitlab_project.py b/plugins/modules/source_control/gitlab/gitlab_project.py index 61d1ac0cb1..c916246b78 100644 --- a/plugins/modules/source_control/gitlab/gitlab_project.py +++ b/plugins/modules/source_control/gitlab/gitlab_project.py @@ -109,6 +109,11 @@ options: required: false default: false version_added: "2.0.0" + username: + description: + - Used to create a personal project under a user's name. + type: str + version_added: "3.3.0" ''' EXAMPLES = r''' @@ -302,6 +307,7 @@ def main(): import_url=dict(type='str'), state=dict(type='str', default="present", choices=["absent", "present"]), lfs_enabled=dict(default=False, type='bool'), + username=dict(type='str'), )) module = AnsibleModule( @@ -309,6 +315,7 @@ def main(): mutually_exclusive=[ ['api_username', 'api_token'], ['api_password', 'api_token'], + ['group', 'username'], ], required_together=[ ['api_username', 'api_password'], @@ -332,6 +339,7 @@ def main(): import_url = module.params['import_url'] state = module.params['state'] lfs_enabled = module.params['lfs_enabled'] + username = module.params['username'] if not HAS_GITLAB_PACKAGE: module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) @@ -353,7 +361,10 @@ def main(): namespace_id = group.id else: - namespace = gitlab_instance.namespaces.list(search=gitlab_instance.user.username)[0] + if username: + namespace = gitlab_instance.namespaces.list(search=username)[0] + else: + namespace = gitlab_instance.namespaces.list(search=gitlab_instance.user.username)[0] namespace_id = namespace.id if not namespace_id: From 0a9cf3811880bd3e7640b637ffef2978df8f5429 Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Sun, 27 Jun 2021 19:10:49 +0430 Subject: [PATCH 0409/3093] yum_versionlock: fix idempotency when using wildcard (asterisk) (#2787) * Check idempotency on yum_versionlock * Lock packages wildcard * fix formatting Co-authored-by: Felix Fontein * Fix formatting in asserts * little closer but not still there * Import fnmatch * Change check_mode logic * Add check_mode for add * Add changelog Co-authored-by: Felix Fontein --- ...ck-fix_idempotency_when_using_wildcard.yml | 3 +++ .../modules/packaging/os/yum_versionlock.py | 21 +++++++++--------- .../targets/yum_versionlock/tasks/main.yml | 22 +++++++++++++++---- 3 files changed, 32 insertions(+), 14 deletions(-) create mode 100644 changelogs/fragments/2787-yum_versionlock-fix_idempotency_when_using_wildcard.yml diff --git a/changelogs/fragments/2787-yum_versionlock-fix_idempotency_when_using_wildcard.yml b/changelogs/fragments/2787-yum_versionlock-fix_idempotency_when_using_wildcard.yml new file mode 100644 index 0000000000..9fb569ec42 --- /dev/null +++ b/changelogs/fragments/2787-yum_versionlock-fix_idempotency_when_using_wildcard.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - yum_versionlock - fix idempotency when using wildcard (asterisk) in ``name`` option (https://github.com/ansible-collections/community.general/issues/2761). diff --git a/plugins/modules/packaging/os/yum_versionlock.py b/plugins/modules/packaging/os/yum_versionlock.py index 6dfb3d20ba..62110bf00a 100644 --- a/plugins/modules/packaging/os/yum_versionlock.py +++ b/plugins/modules/packaging/os/yum_versionlock.py @@ -76,6 +76,7 @@ state: from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native +from fnmatch import fnmatch class YumVersionLock: @@ -125,23 +126,23 @@ def main(): if state in ('present'): command = 'add' for single_pkg in packages: - if single_pkg not in versionlock_packages: - if module.check_mode: - changed = True - continue + if not any(fnmatch(pkg.split(":", 1)[-1], single_pkg) for pkg in versionlock_packages.split()): packages_list.append(single_pkg) if packages_list: - changed = yum_v.ensure_state(packages_list, command) + if module.check_mode: + changed = True + else: + changed = yum_v.ensure_state(packages_list, command) elif state in ('absent'): command = 'delete' for single_pkg in packages: - if single_pkg in versionlock_packages: - if module.check_mode: - changed = True - continue + if any(fnmatch(pkg, single_pkg) for pkg in versionlock_packages.split()): packages_list.append(single_pkg) if packages_list: - changed = yum_v.ensure_state(packages_list, command) + if module.check_mode: + changed = True + else: + changed = yum_v.ensure_state(packages_list, command) module.exit_json( changed=changed, diff --git a/tests/integration/targets/yum_versionlock/tasks/main.yml b/tests/integration/targets/yum_versionlock/tasks/main.yml index d1a1522087..2e551b48ca 100644 --- a/tests/integration/targets/yum_versionlock/tasks/main.yml +++ b/tests/integration/targets/yum_versionlock/tasks/main.yml @@ -29,6 +29,18 @@ state: present register: lock_all_packages + - name: Lock all packages again + community.general.yum_versionlock: + name: "{{ yum_updates.results | map(attribute='name') | list }}" + state: present + register: lock_all_packages_again + + - name: Lock packages wildcard + community.general.yum_versionlock: + name: "nss*" + state: present + register: lock_nss_wildcard + # This should fail when it needs user interaction and missing -y is on purpose. - name: Update all packages (not really) command: yum update --setopt=obsoletes=0 @@ -54,10 +66,12 @@ - name: Assert everything is fine assert: that: - - "{{ lock_all_packages.changed }}" - - "{{ not update_all_locked_packages.changed }}" - - "{{ unlock_all_packages.changed }}" - - "{{ update_all_packages.changed }}" + - lock_all_packages is changed + - lock_all_packages_again is not changed + - lock_nss_wildcard is not changed + - update_all_locked_packages is not changed + - unlock_all_packages is changed + - update_all_packages is changed when: yum_updates.results | length != 0 - name: Remove installed packages in case it was not installed From debb15efbe7b819a5dc01e6053aa3b2b964c0260 Mon Sep 17 00:00:00 2001 From: Martin Date: Sun, 27 Jun 2021 18:47:11 +0200 Subject: [PATCH 0410/3093] pacman: Descriptive state documentation (#2894) * pacman: Descriptive state documentation * Update plugins/modules/packaging/os/pacman.py Co-authored-by: Felix Fontein Co-authored-by: Martin Rys Co-authored-by: Felix Fontein --- plugins/modules/packaging/os/pacman.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/plugins/modules/packaging/os/pacman.py b/plugins/modules/packaging/os/pacman.py index 859c90a6c4..372d13cd49 100644 --- a/plugins/modules/packaging/os/pacman.py +++ b/plugins/modules/packaging/os/pacman.py @@ -30,9 +30,12 @@ options: state: description: - - Desired state of the package. + - Whether to install (C(present) or C(installed), C(latest)), or remove (C(absent) or C(removed)) a package. + - C(present) and C(installed) will simply ensure that a desired package is installed. + - C(latest) will update the specified package if it is not of the latest available version. + - C(absent) and C(removed) will remove the specified package. default: present - choices: [ absent, latest, present, installed, removed ] + choices: [ absent, installed, latest, present, removed ] type: str force: From 2d1527a56423d950ce2f12e66d37253dbfdf02e5 Mon Sep 17 00:00:00 2001 From: omula Date: Mon, 28 Jun 2021 20:46:44 +0200 Subject: [PATCH 0411/3093] [nmcli] add connection.slave-type for teamed devices (#2827) * [nmcli] add connection.slave-type for teamed devices * [nmcli] add fragment with changes for #2827 * [nmcli] add tests for network team * [nmcli] fix testing Co-authored-by: Oriol MULA VALLS --- .../fragments/2827-nmcli_fix_team_slave.yml | 2 + plugins/modules/net_tools/nmcli.py | 4 + .../plugins/modules/net_tools/test_nmcli.py | 145 ++++++++++++++++++ 3 files changed, 151 insertions(+) create mode 100644 changelogs/fragments/2827-nmcli_fix_team_slave.yml diff --git a/changelogs/fragments/2827-nmcli_fix_team_slave.yml b/changelogs/fragments/2827-nmcli_fix_team_slave.yml new file mode 100644 index 0000000000..02f001c4f5 --- /dev/null +++ b/changelogs/fragments/2827-nmcli_fix_team_slave.yml @@ -0,0 +1,2 @@ +bugfixes: + - nmcli - fixes team-slave configuration by adding connection.slave-type (https://github.com/ansible-collections/community.general/issues/766). diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index 657df3bd2a..7ed515fc75 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -833,6 +833,10 @@ class Nmcli(object): 'bridge-port.hairpin-mode': self.hairpin, 'bridge-port.priority': self.slavepriority, }) + elif self.type == 'team-slave': + options.update({ + 'connection.slave-type': 'team', + }) elif self.tunnel_conn_type: options.update({ 'ip-tunnel.local': self.ip_tunnel_local, diff --git a/tests/unit/plugins/modules/net_tools/test_nmcli.py b/tests/unit/plugins/modules/net_tools/test_nmcli.py index 911ffd1217..ba526b1d65 100644 --- a/tests/unit/plugins/modules/net_tools/test_nmcli.py +++ b/tests/unit/plugins/modules/net_tools/test_nmcli.py @@ -257,6 +257,50 @@ bridge-port.hairpin-mode: yes bridge-port.priority: 32 """ +TESTCASE_TEAM = [ + { + 'type': 'team', + 'conn_name': 'non_existent_nw_device', + 'ifname': 'team0_non_existant', + 'state': 'present', + '_ansible_check_mode': False, + } +] + +TESTCASE_TEAM_SHOW_OUTPUT = """\ +connection.id: non_existent_nw_device +connection.interface-name: team0_non_existant +connection.autoconnect: yes +connection.type: team +ipv4.ignore-auto-dns: no +ipv4.ignore-auto-routes: no +ipv4.never-default: no +ipv4.may-fail: yes +ipv6.method: auto +ipv6.ignore-auto-dns: no +ipv6.ignore-auto-routes: no +""" + +TESTCASE_TEAM_SLAVE = [ + { + 'type': 'team-slave', + 'conn_name': 'non_existent_nw_slaved_device', + 'ifname': 'generic_slaved_non_existant', + 'master': 'team0_non_existant', + 'state': 'present', + '_ansible_check_mode': False, + } +] + +TESTCASE_TEAM_SLAVE_SHOW_OUTPUT = """\ +connection.id: non_existent_nw_slaved_device +connection.interface-name: generic_slaved_non_existant +connection.autoconnect: yes +connection.master: team0_non_existant +connection.slave-type: team +802-3-ethernet.mtu: auto +""" + TESTCASE_VLAN = [ { 'type': 'vlan', @@ -495,6 +539,20 @@ def mocked_bridge_slave_unchanged(mocker): execute_return=(0, TESTCASE_BRIDGE_SLAVE_SHOW_OUTPUT, "")) +@pytest.fixture +def mocked_team_connection_unchanged(mocker): + mocker_set(mocker, + connection_exists=True, + execute_return=(0, TESTCASE_TEAM_SHOW_OUTPUT, "")) + + +@pytest.fixture +def mocked_team_slave_connection_unchanged(mocker): + mocker_set(mocker, + connection_exists=True, + execute_return=(0, TESTCASE_TEAM_SLAVE_SHOW_OUTPUT, "")) + + @pytest.fixture def mocked_vlan_connection_unchanged(mocker): mocker_set(mocker, @@ -952,6 +1010,93 @@ def test_bridge_slave_unchanged(mocked_bridge_slave_unchanged, capfd): assert not results['changed'] +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_TEAM, indirect=['patch_ansible_module']) +def test_team_connection_create(mocked_generic_connection_create, capfd): + """ + Test : Team connection created + """ + with pytest.raises(SystemExit): + nmcli.main() + + assert nmcli.Nmcli.execute_command.call_count == 1 + arg_list = nmcli.Nmcli.execute_command.call_args_list + args, kwargs = arg_list[0] + + assert args[0][0] == '/usr/bin/nmcli' + assert args[0][1] == 'con' + assert args[0][2] == 'add' + assert args[0][3] == 'type' + assert args[0][4] == 'team' + assert args[0][5] == 'con-name' + assert args[0][6] == 'non_existent_nw_device' + + for param in ['connection.autoconnect', 'connection.interface-name', 'team0_non_existant']: + assert param in args[0] + + out, err = capfd.readouterr() + results = json.loads(out) + assert not results.get('failed') + assert results['changed'] + + +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_TEAM, indirect=['patch_ansible_module']) +def test_team_connection_unchanged(mocked_team_connection_unchanged, capfd): + """ + Test : Team connection unchanged + """ + with pytest.raises(SystemExit): + nmcli.main() + + out, err = capfd.readouterr() + results = json.loads(out) + assert not results.get('failed') + assert not results['changed'] + + +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_TEAM_SLAVE, indirect=['patch_ansible_module']) +def test_create_team_slave(mocked_generic_connection_create, capfd): + """ + Test if Team_slave created + """ + + with pytest.raises(SystemExit): + nmcli.main() + + assert nmcli.Nmcli.execute_command.call_count == 1 + arg_list = nmcli.Nmcli.execute_command.call_args_list + args, kwargs = arg_list[0] + + assert args[0][0] == '/usr/bin/nmcli' + assert args[0][1] == 'con' + assert args[0][2] == 'add' + assert args[0][3] == 'type' + assert args[0][4] == 'team-slave' + assert args[0][5] == 'con-name' + assert args[0][6] == 'non_existent_nw_slaved_device' + + for param in ['connection.autoconnect', 'connection.interface-name', 'connection.master', 'team0_non_existant', 'connection.slave-type']: + assert param in args[0] + + out, err = capfd.readouterr() + results = json.loads(out) + assert not results.get('failed') + assert results['changed'] + + +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_TEAM_SLAVE, indirect=['patch_ansible_module']) +def test_team_slave_connection_unchanged(mocked_team_slave_connection_unchanged, capfd): + """ + Test : Team slave connection unchanged + """ + with pytest.raises(SystemExit): + nmcli.main() + + out, err = capfd.readouterr() + results = json.loads(out) + assert not results.get('failed') + assert not results['changed'] + + @pytest.mark.parametrize('patch_ansible_module', TESTCASE_VLAN, indirect=['patch_ansible_module']) def test_create_vlan_con(mocked_generic_connection_create, capfd): """ From 9c7b539ef62b4b18e31f3b8aeb1243fd77c9404b Mon Sep 17 00:00:00 2001 From: NivKeidan <51288016+NivKeidan@users.noreply.github.com> Date: Tue, 29 Jun 2021 08:56:59 +0300 Subject: [PATCH 0412/3093] Add fallback url for jenkins plugin (#1334) * uncoupled updates_url from plugin download urls added new parameters: versioned_plugins_url, latest_plugins_url * parameters updates_url, latest_plugins_url and versioned_plugins_url changed type to list of strings to implement fallback URLs usage added type conversion if they are string (backward compatibility) * removed type conversion this is handled by ansible validation fix: dont fail if first url fails * added fallback: if installation from plugin manager fails, try downloading the plugin manually * fixed test failures * PEP8 indent fix * changelog fragment * added debug outputs for new url fallback behavior * added version_added in description for latest_plugins_url Co-authored-by: Felix Fontein * added version_added in description for versioned_plugins_url Co-authored-by: Felix Fontein * Update changelogs/fragments/1334-jenkins-plugin-fallback-urls.yaml Co-authored-by: Felix Fontein * improve backwards-compatibility add optional arg to allow custom update-center.json targets * pep8 fixes * fix inconsistency in argument documentation * Apply suggestions from code review Co-authored-by: Amin Vakil * add unit tests * fix pep8 * Apply suggestions from code review Co-authored-by: Felix Fontein Co-authored-by: Amin Vakil --- .../1334-jenkins-plugin-fallback-urls.yaml | 2 + .../web_infrastructure/jenkins_plugin.py | 211 +++++++++++++----- .../web_infrastructure/test_jenkins_plugin.py | 37 +++ 3 files changed, 188 insertions(+), 62 deletions(-) create mode 100644 changelogs/fragments/1334-jenkins-plugin-fallback-urls.yaml diff --git a/changelogs/fragments/1334-jenkins-plugin-fallback-urls.yaml b/changelogs/fragments/1334-jenkins-plugin-fallback-urls.yaml new file mode 100644 index 0000000000..be0a86fa5b --- /dev/null +++ b/changelogs/fragments/1334-jenkins-plugin-fallback-urls.yaml @@ -0,0 +1,2 @@ +minor_changes: + - jenkins_plugin - add fallback url(s) for failure of plugin installation/download (https://github.com/ansible-collections/community.general/pull/1334). diff --git a/plugins/modules/web_infrastructure/jenkins_plugin.py b/plugins/modules/web_infrastructure/jenkins_plugin.py index 20fd8554bc..a280b50aa6 100644 --- a/plugins/modules/web_infrastructure/jenkins_plugin.py +++ b/plugins/modules/web_infrastructure/jenkins_plugin.py @@ -66,12 +66,33 @@ options: C(latest) is specified. default: 86400 updates_url: - type: str + type: list + elements: str description: - - URL of the Update Centre. - - Used as the base URL to download the plugins and the - I(update-center.json) JSON file. - default: https://updates.jenkins.io + - A list of base URL(s) to retrieve I(update-center.json), and direct plugin files from. + - This can be a list since community.general 3.3.0. + default: ['https://updates.jenkins.io', 'http://mirrors.jenkins.io'] + update_json_url_segment: + type: list + elements: str + description: + - A list of URL segment(s) to retrieve the update center json file from. + default: ['update-center.json', 'updates/update-center.json'] + version_added: 3.3.0 + latest_plugins_url_segments: + type: list + elements: str + description: + - Path inside the I(updates_url) to get latest plugins from. + default: ['latest'] + version_added: 3.3.0 + versioned_plugins_url_segments: + type: list + elements: str + description: + - Path inside the I(updates_url) to get specific version of plugins from. + default: ['download/plugins', 'plugins'] + version_added: 3.3.0 url: type: str description: @@ -283,6 +304,10 @@ import tempfile import time +class FailedInstallingWithPluginManager(Exception): + pass + + class JenkinsPlugin(object): def __init__(self, module): # To be able to call fail_json @@ -330,9 +355,42 @@ class JenkinsPlugin(object): return json_data + def _get_urls_data(self, urls, what=None, msg_status=None, msg_exception=None, **kwargs): + # Compose default messages + if msg_status is None: + msg_status = "Cannot get %s" % what + + if msg_exception is None: + msg_exception = "Retrieval of %s failed." % what + + errors = {} + for url in urls: + err_msg = None + try: + self.module.debug("fetching url: %s" % url) + response, info = fetch_url( + self.module, url, timeout=self.timeout, cookies=self.cookies, + headers=self.crumb, **kwargs) + + if info['status'] == 200: + return response + else: + err_msg = ("%s. fetching url %s failed. response code: %s" % (msg_status, url, info['status'])) + if info['status'] > 400: # extend error message + err_msg = "%s. response body: %s" % (err_msg, info['body']) + except Exception as e: + err_msg = "%s. fetching url %s failed. error msg: %s" % (msg_status, url, to_native(e)) + finally: + if err_msg is not None: + self.module.debug(err_msg) + errors[url] = err_msg + + # failed on all urls + self.module.fail_json(msg=msg_exception, details=errors) + def _get_url_data( self, url, what=None, msg_status=None, msg_exception=None, - **kwargs): + dont_fail=False, **kwargs): # Compose default messages if msg_status is None: msg_status = "Cannot get %s" % what @@ -347,9 +405,15 @@ class JenkinsPlugin(object): headers=self.crumb, **kwargs) if info['status'] != 200: - self.module.fail_json(msg=msg_status, details=info['msg']) + if dont_fail: + raise FailedInstallingWithPluginManager(info['msg']) + else: + self.module.fail_json(msg=msg_status, details=info['msg']) except Exception as e: - self.module.fail_json(msg=msg_exception, details=to_native(e)) + if dont_fail: + raise FailedInstallingWithPluginManager(e) + else: + self.module.fail_json(msg=msg_exception, details=to_native(e)) return response @@ -394,6 +458,39 @@ class JenkinsPlugin(object): break + def _install_with_plugin_manager(self): + if not self.module.check_mode: + # Install the plugin (with dependencies) + install_script = ( + 'd = Jenkins.instance.updateCenter.getPlugin("%s")' + '.deploy(); d.get();' % self.params['name']) + + if self.params['with_dependencies']: + install_script = ( + 'Jenkins.instance.updateCenter.getPlugin("%s")' + '.getNeededDependencies().each{it.deploy()}; %s' % ( + self.params['name'], install_script)) + + script_data = { + 'script': install_script + } + data = urlencode(script_data) + + # Send the installation request + r = self._get_url_data( + "%s/scriptText" % self.url, + msg_status="Cannot install plugin.", + msg_exception="Plugin installation has failed.", + data=data, + dont_fail=True) + + hpi_file = '%s/plugins/%s.hpi' % ( + self.params['jenkins_home'], + self.params['name']) + + if os.path.isfile(hpi_file): + os.remove(hpi_file) + def install(self): changed = False plugin_file = ( @@ -402,39 +499,13 @@ class JenkinsPlugin(object): self.params['name'])) if not self.is_installed and self.params['version'] in [None, 'latest']: - if not self.module.check_mode: - # Install the plugin (with dependencies) - install_script = ( - 'd = Jenkins.instance.updateCenter.getPlugin("%s")' - '.deploy(); d.get();' % self.params['name']) + try: + self._install_with_plugin_manager() + changed = True + except FailedInstallingWithPluginManager: # Fallback to manually downloading the plugin + pass - if self.params['with_dependencies']: - install_script = ( - 'Jenkins.instance.updateCenter.getPlugin("%s")' - '.getNeededDependencies().each{it.deploy()}; %s' % ( - self.params['name'], install_script)) - - script_data = { - 'script': install_script - } - data = urlencode(script_data) - - # Send the installation request - r = self._get_url_data( - "%s/scriptText" % self.url, - msg_status="Cannot install plugin.", - msg_exception="Plugin installation has failed.", - data=data) - - hpi_file = '%s/plugins/%s.hpi' % ( - self.params['jenkins_home'], - self.params['name']) - - if os.path.isfile(hpi_file): - os.remove(hpi_file) - - changed = True - else: + if not changed: # Check if the plugin directory exists if not os.path.isdir(self.params['jenkins_home']): self.module.fail_json( @@ -449,26 +520,17 @@ class JenkinsPlugin(object): if self.params['version'] in [None, 'latest']: # Take latest version - plugin_url = ( - "%s/latest/%s.hpi" % ( - self.params['updates_url'], - self.params['name'])) + plugin_urls = self._get_latest_plugin_urls() else: # Take specific version - plugin_url = ( - "{0}/download/plugins/" - "{1}/{2}/{1}.hpi".format( - self.params['updates_url'], - self.params['name'], - self.params['version'])) - + plugin_urls = self._get_versioned_plugin_urls() if ( self.params['updates_expiration'] == 0 or self.params['version'] not in [None, 'latest'] or checksum_old is None): # Download the plugin file directly - r = self._download_plugin(plugin_url) + r = self._download_plugin(plugin_urls) # Write downloaded plugin into file if checksums don't match if checksum_old is None: @@ -498,7 +560,7 @@ class JenkinsPlugin(object): # If the latest version changed, download it if checksum_old != to_bytes(plugin_data['sha1']): if not self.module.check_mode: - r = self._download_plugin(plugin_url) + r = self._download_plugin(plugin_urls) self._write_file(plugin_file, r) changed = True @@ -521,6 +583,27 @@ class JenkinsPlugin(object): return changed + def _get_latest_plugin_urls(self): + urls = [] + for base_url in self.params['updates_url']: + for update_segment in self.params['latest_plugins_url_segments']: + urls.append("{0}/{1}/{2}.hpi".format(base_url, update_segment, self.params['name'])) + return urls + + def _get_versioned_plugin_urls(self): + urls = [] + for base_url in self.params['updates_url']: + for versioned_segment in self.params['versioned_plugins_url_segments']: + urls.append("{0}/{1}/{2}/{3}/{2}.hpi".format(base_url, versioned_segment, self.params['name'], self.params['version'])) + return urls + + def _get_update_center_urls(self): + urls = [] + for base_url in self.params['updates_url']: + for update_json in self.params['update_json_url_segment']: + urls.append("{0}/{1}".format(base_url, update_json)) + return urls + def _download_updates(self): updates_filename = 'jenkins-plugin-cache.json' updates_dir = os.path.expanduser('~/.ansible/tmp') @@ -540,11 +623,11 @@ class JenkinsPlugin(object): # Download the updates file if needed if download_updates: - url = "%s/update-center.json" % self.params['updates_url'] + urls = self._get_update_center_urls() # Get the data - r = self._get_url_data( - url, + r = self._get_urls_data( + urls, msg_status="Remote updates not found.", msg_exception="Updates download failed.") @@ -602,15 +685,14 @@ class JenkinsPlugin(object): return data['plugins'][self.params['name']] - def _download_plugin(self, plugin_url): + def _download_plugin(self, plugin_urls): # Download the plugin - r = self._get_url_data( - plugin_url, + + return self._get_urls_data( + plugin_urls, msg_status="Plugin not found.", msg_exception="Plugin download failed.") - return r - def _write_file(self, f, data): # Store the plugin into a temp file and then move it tmp_f_fd, tmp_f = tempfile.mkstemp() @@ -721,7 +803,12 @@ def main(): default='present'), timeout=dict(default=30, type="int"), updates_expiration=dict(default=86400, type="int"), - updates_url=dict(default='https://updates.jenkins.io'), + updates_url=dict(type="list", elements="str", default=['https://updates.jenkins.io', + 'http://mirrors.jenkins.io']), + update_json_url_segment=dict(type="list", elements="str", default=['update-center.json', + 'updates/update-center.json']), + latest_plugins_url_segments=dict(type="list", elements="str", default=['latest']), + versioned_plugins_url_segments=dict(type="list", elements="str", default=['download/plugins', 'plugins']), url=dict(default='http://localhost:8080'), url_password=dict(no_log=True), version=dict(), diff --git a/tests/unit/plugins/modules/web_infrastructure/test_jenkins_plugin.py b/tests/unit/plugins/modules/web_infrastructure/test_jenkins_plugin.py index ccfeb24536..b928ad824c 100644 --- a/tests/unit/plugins/modules/web_infrastructure/test_jenkins_plugin.py +++ b/tests/unit/plugins/modules/web_infrastructure/test_jenkins_plugin.py @@ -151,3 +151,40 @@ def test__get_json_data(mocker): 'CSRF') assert isinstance(json_data, Mapping) + + +def test__new_fallback_urls(mocker): + "test generation of new fallback URLs" + + params = { + "url": "http://fake.jenkins.server", + "timeout": 30, + "name": "test-plugin", + "version": "1.2.3", + "updates_url": ["https://some.base.url"], + "latest_plugins_url_segments": ["test_latest"], + "versioned_plugins_url_segments": ["ansible", "versioned_plugins"], + "update_json_url_segment": ["unreachable", "updates/update-center.json"], + } + module = mocker.Mock() + module.params = params + + JenkinsPlugin._csrf_enabled = pass_function + JenkinsPlugin._get_installed_plugins = pass_function + + jenkins_plugin = JenkinsPlugin(module) + + latest_urls = jenkins_plugin._get_latest_plugin_urls() + assert isInList(latest_urls, "https://some.base.url/test_latest/test-plugin.hpi") + versioned_urls = jenkins_plugin._get_versioned_plugin_urls() + assert isInList(versioned_urls, "https://some.base.url/versioned_plugins/test-plugin/1.2.3/test-plugin.hpi") + json_urls = jenkins_plugin._get_update_center_urls() + assert isInList(json_urls, "https://some.base.url/updates/update-center.json") + + +def isInList(l, i): + print("checking if %s in %s" % (i, l)) + for item in l: + if item == i: + return True + return False From 677e88b2574b3f859aeb0dd3fbecaa9aa0d04638 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 29 Jun 2021 12:38:45 +0200 Subject: [PATCH 0413/3093] The next release will be 3.4.0. --- galaxy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/galaxy.yml b/galaxy.yml index c559415eb2..640f4151d3 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,6 +1,6 @@ namespace: community name: general -version: 3.3.0 +version: 3.4.0 readme: README.md authors: - Ansible (https://github.com/ansible) From 0e829e6a23548ee948bfccc456498554832a56af Mon Sep 17 00:00:00 2001 From: Gaetan2907 <48204380+Gaetan2907@users.noreply.github.com> Date: Wed, 30 Jun 2021 15:01:17 +0200 Subject: [PATCH 0414/3093] Fix bug when 2 identical executions in same auth flow (#2904) * Fix bug when 2 identical executions in same auth flow * Add changelog fragment * Fix unit tests * Update changelogs/fragments/2904-fix-bug-when-2-identical-executions-in-same-auth-flow.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- ...when-2-identical-executions-in-same-auth-flow.yml | 3 +++ .../identity/keycloak/keycloak_authentication.py | 12 ++++++------ .../keycloak/test_keycloak_authentication.py | 6 +++--- 3 files changed, 12 insertions(+), 9 deletions(-) create mode 100644 changelogs/fragments/2904-fix-bug-when-2-identical-executions-in-same-auth-flow.yml diff --git a/changelogs/fragments/2904-fix-bug-when-2-identical-executions-in-same-auth-flow.yml b/changelogs/fragments/2904-fix-bug-when-2-identical-executions-in-same-auth-flow.yml new file mode 100644 index 0000000000..21fde3eb58 --- /dev/null +++ b/changelogs/fragments/2904-fix-bug-when-2-identical-executions-in-same-auth-flow.yml @@ -0,0 +1,3 @@ +bugfixes: + - keycloak_authentication - fix bug when two identical executions are in the same authentication flow + (https://github.com/ansible-collections/community.general/pull/2904). diff --git a/plugins/modules/identity/keycloak/keycloak_authentication.py b/plugins/modules/identity/keycloak/keycloak_authentication.py index 98b6378dac..9fd04eb70b 100644 --- a/plugins/modules/identity/keycloak/keycloak_authentication.py +++ b/plugins/modules/identity/keycloak/keycloak_authentication.py @@ -200,11 +200,11 @@ def create_or_update_executions(kc, config, realm='master'): try: changed = False if "authenticationExecutions" in config: + # Get existing executions on the Keycloak server for this alias + existing_executions = kc.get_executions_representation(config, realm=realm) for new_exec_index, new_exec in enumerate(config["authenticationExecutions"], start=0): if new_exec["index"] is not None: new_exec_index = new_exec["index"] - # Get existing executions on the Keycloak server for this alias - existing_executions = kc.get_executions_representation(config, realm=realm) exec_found = False # Get flowalias parent if given if new_exec["flowAlias"] is not None: @@ -222,6 +222,9 @@ def create_or_update_executions(kc, config, realm='master'): # Compare the executions to see if it need changes if not is_struct_included(new_exec, existing_executions[exec_index], exclude_key) or exec_index != new_exec_index: changed = True + id_to_update = existing_executions[exec_index]["id"] + # Remove exec from list in case 2 exec with same name + existing_executions[exec_index].clear() elif new_exec["providerId"] is not None: kc.create_execution(new_exec, flowAlias=flow_alias_parent, realm=realm) changed = True @@ -229,13 +232,10 @@ def create_or_update_executions(kc, config, realm='master'): kc.create_subflow(new_exec["displayName"], flow_alias_parent, realm=realm) changed = True if changed: - # Get existing executions on the Keycloak server for this alias - existing_executions = kc.get_executions_representation(config, realm=realm) - exec_index = find_exec_in_executions(new_exec, existing_executions) if exec_index != -1: # Update the existing execution updated_exec = { - "id": existing_executions[exec_index]["id"] + "id": id_to_update } # add the execution configuration if new_exec["authenticationConfig"] is not None: diff --git a/tests/unit/plugins/modules/identity/keycloak/test_keycloak_authentication.py b/tests/unit/plugins/modules/identity/keycloak/test_keycloak_authentication.py index 91e34eea7b..db0168aa83 100644 --- a/tests/unit/plugins/modules/identity/keycloak/test_keycloak_authentication.py +++ b/tests/unit/plugins/modules/identity/keycloak/test_keycloak_authentication.py @@ -343,7 +343,7 @@ class TestKeycloakAuthentication(ModuleTestCase): self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1) self.assertEqual(len(mock_copy_auth_flow.mock_calls), 0) self.assertEqual(len(mock_create_empty_auth_flow.mock_calls), 1) - self.assertEqual(len(mock_get_executions_representation.mock_calls), 3) + self.assertEqual(len(mock_get_executions_representation.mock_calls), 2) self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 0) # Verify that the module's changed status matches what is expected @@ -434,7 +434,7 @@ class TestKeycloakAuthentication(ModuleTestCase): self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1) self.assertEqual(len(mock_copy_auth_flow.mock_calls), 0) self.assertEqual(len(mock_create_empty_auth_flow.mock_calls), 0) - self.assertEqual(len(mock_get_executions_representation.mock_calls), 3) + self.assertEqual(len(mock_get_executions_representation.mock_calls), 2) self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 0) # Verify that the module's changed status matches what is expected @@ -611,7 +611,7 @@ class TestKeycloakAuthentication(ModuleTestCase): self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1) self.assertEqual(len(mock_copy_auth_flow.mock_calls), 0) self.assertEqual(len(mock_create_empty_auth_flow.mock_calls), 1) - self.assertEqual(len(mock_get_executions_representation.mock_calls), 3) + self.assertEqual(len(mock_get_executions_representation.mock_calls), 2) self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 1) # Verify that the module's changed status matches what is expected From a97d82be88407384f977f50270bf314b1f76d22f Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Wed, 30 Jun 2021 17:36:56 +0430 Subject: [PATCH 0415/3093] Add integration tests for snap (#2907) * Add integration tests for snap * Also test on fedora and remove snapd if it was not installed * disable test for now --- tests/integration/targets/snap/aliases | 6 ++ tests/integration/targets/snap/tasks/main.yml | 72 +++++++++++++++++++ 2 files changed, 78 insertions(+) create mode 100644 tests/integration/targets/snap/aliases create mode 100644 tests/integration/targets/snap/tasks/main.yml diff --git a/tests/integration/targets/snap/aliases b/tests/integration/targets/snap/aliases new file mode 100644 index 0000000000..d7f5ce60c5 --- /dev/null +++ b/tests/integration/targets/snap/aliases @@ -0,0 +1,6 @@ +shippable/posix/group1 +skip/aix +skip/freebsd +skip/osx +skip/macos +disabled #FIXME 2609 diff --git a/tests/integration/targets/snap/tasks/main.yml b/tests/integration/targets/snap/tasks/main.yml new file mode 100644 index 0000000000..e015122ff2 --- /dev/null +++ b/tests/integration/targets/snap/tasks/main.yml @@ -0,0 +1,72 @@ +--- +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- name: install snapd + apt: + name: snapd + state: present + register: snapd_install_ubuntu + when: ansible_distribution == 'Ubuntu' + +- name: install snapd + dnf: + name: snapd + state: present + register: snapd_install_fedora + when: ansible_distribution == 'Fedora' + +- block: + - name: install package + community.general.snap: + name: hello-world + state: present + register: install + + - name: install package again + community.general.snap: + name: hello-world + state: present + register: install_again + + - name: Assert package has been installed just once + assert: + that: + - install is changed + - install_again is not changed + + - name: check package has been installed correctly + command: hello-world + + - name: remove package + community.general.snap: + name: hello-world + state: absent + register: remove + + - name: remove package again + community.general.snap: + name: hello-world + state: absent + register: remove_again + + - name: Assert package has been removed just once + assert: + that: + - remove is changed + - remove_again is not changed + when: ansible_distribution in ['Ubuntu','Fedora'] + +- name: Remove snapd in case it was not installed + apt: + name: snapd + state: absent + when: snapd_install_ubuntu is changed and ansible_distribution == 'Ubuntu' + +- name: Remove snapd in case it was not installed + dnf: + name: snapd + state: absent + when: snapd_install_fedora is changed and ansible_distribution == 'Fedora' From c63dc624b77a45585b323c67096d2980e687b9c4 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 1 Jul 2021 18:53:48 +0200 Subject: [PATCH 0416/3093] Fix snap module, and module helper behavior on rc != 0 in output (#2912) * Try some snap fixes. * Fix logic. * Try to run tests privileged. * Prevent failure on rc != 0. * Fix formatting. * Revert "Try to run tests privileged." This reverts commit 77ca91f5020233304a8ef6d9f504663de8e3136c. * Try to run tests on RHEL instead. * Make sure that snapd is running. * Add changelog fragment. * str -> to_native. * Make sure that installed binary is actually found. * Add check mode tests. * Mention #2835 in changelog fragment. --- .../fragments/2912-snap-module-helper.yml | 3 + plugins/module_utils/mh/base.py | 5 +- plugins/modules/packaging/os/snap.py | 26 +++--- tests/integration/targets/snap/aliases | 2 +- .../targets/snap/defaults/main.yml | 4 + .../targets/snap/handlers/main.yml | 5 ++ tests/integration/targets/snap/meta/main.yml | 3 + .../integration/targets/snap/tasks/Debian.yml | 1 + .../integration/targets/snap/tasks/Fedora.yml | 1 + .../integration/targets/snap/tasks/RedHat.yml | 1 + .../targets/snap/tasks/default.yml | 15 ++++ tests/integration/targets/snap/tasks/main.yml | 88 ++++++++++++------- .../targets/snap/tasks/nothing.yml | 2 + 13 files changed, 111 insertions(+), 45 deletions(-) create mode 100644 changelogs/fragments/2912-snap-module-helper.yml create mode 100644 tests/integration/targets/snap/defaults/main.yml create mode 100644 tests/integration/targets/snap/handlers/main.yml create mode 100644 tests/integration/targets/snap/meta/main.yml create mode 120000 tests/integration/targets/snap/tasks/Debian.yml create mode 120000 tests/integration/targets/snap/tasks/Fedora.yml create mode 120000 tests/integration/targets/snap/tasks/RedHat.yml create mode 100644 tests/integration/targets/snap/tasks/default.yml create mode 100644 tests/integration/targets/snap/tasks/nothing.yml diff --git a/changelogs/fragments/2912-snap-module-helper.yml b/changelogs/fragments/2912-snap-module-helper.yml new file mode 100644 index 0000000000..cb9935a5e4 --- /dev/null +++ b/changelogs/fragments/2912-snap-module-helper.yml @@ -0,0 +1,3 @@ +bugfixes: + - module_helper module utils - avoid failing when non-zero ``rc`` is present on regular exit (https://github.com/ansible-collections/community.general/pull/2912). + - snap - fix various bugs which prevented the module from working at all, and which resulted in ``state=absent`` fail on absent snaps (https://github.com/ansible-collections/community.general/issues/2835, https://github.com/ansible-collections/community.general/issues/2906, https://github.com/ansible-collections/community.general/pull/2912). diff --git a/plugins/module_utils/mh/base.py b/plugins/module_utils/mh/base.py index e0de7f2fdd..a120c2556e 100644 --- a/plugins/module_utils/mh/base.py +++ b/plugins/module_utils/mh/base.py @@ -59,4 +59,7 @@ class ModuleHelperBase(object): self.__init_module__() self.__run__() self.__quit_module__() - self.module.exit_json(changed=self.has_changed(), **self.output) + output = self.output + if 'failed' not in output: + output['failed'] = False + self.module.exit_json(changed=self.has_changed(), **output) diff --git a/plugins/modules/packaging/os/snap.py b/plugins/modules/packaging/os/snap.py index fab2558ccf..8051b90445 100644 --- a/plugins/modules/packaging/os/snap.py +++ b/plugins/modules/packaging/os/snap.py @@ -107,6 +107,8 @@ snaps_removed: import re +from ansible.module_utils.common.text.converters import to_native + from ansible_collections.community.general.plugins.module_utils.module_helper import ( CmdStateModuleHelper, ArgFormat, ModuleHelperException ) @@ -123,7 +125,7 @@ __state_map = dict( def _state_map(value): - return __state_map[value] + return [__state_map[value]] class Snap(CmdStateModuleHelper): @@ -163,20 +165,20 @@ class Snap(CmdStateModuleHelper): results[i].append(output[i]) return [ - '; '.join(results[0]), + '; '.join([to_native(x) for x in results[0]]), self._first_non_zero(results[1]), '\n'.join(results[2]), '\n'.join(results[3]), ] def snap_exists(self, snap_name): - return 0 == self.run_command(params=[{'state': 'info'}, {'name': [snap_name]}])[0] + return 0 == self.run_command(params=[{'state': 'info'}, {'name': snap_name}])[0] def is_snap_installed(self, snap_name): - return 0 == self.run_command(params=[{'state': 'list'}, {'name': [snap_name]}])[0] + return 0 == self.run_command(params=[{'state': 'list'}, {'name': snap_name}])[0] def is_snap_enabled(self, snap_name): - rc, out, err = self.run_command(params=[{'state': 'list'}, {'name': [snap_name]}]) + rc, out, err = self.run_command(params=[{'state': 'list'}, {'name': snap_name}]) if rc != 0: return None result = out.splitlines()[1] @@ -196,7 +198,7 @@ class Snap(CmdStateModuleHelper): self.validate_input_snaps() # if snap doesnt exist, it will explode when trying to install self.vars.meta('classic').set(output=True) self.vars.meta('channel').set(output=True) - actionable_snaps = [s for s in self.vars.name if self.is_snap_installed(s)] + actionable_snaps = [s for s in self.vars.name if not self.is_snap_installed(s)] if not actionable_snaps: return self.changed = True @@ -207,9 +209,9 @@ class Snap(CmdStateModuleHelper): has_one_pkg_params = bool(self.vars.classic) or self.vars.channel != 'stable' has_multiple_snaps = len(actionable_snaps) > 1 if has_one_pkg_params and has_multiple_snaps: - commands = [params + [s] for s in actionable_snaps] + commands = [params + [{'actionable_snaps': [s]}] for s in actionable_snaps] else: - commands = [params + actionable_snaps] + commands = [params + [{'actionable_snaps': actionable_snaps}]] self.vars.cmd, rc, out, err = self._run_multiple_commands(commands) if rc == 0: return @@ -227,7 +229,7 @@ class Snap(CmdStateModuleHelper): def state_absent(self): self.validate_input_snaps() # if snap doesnt exist, it will be absent by definition - actionable_snaps = [s for s in self.vars.name if not self.is_snap_installed(s)] + actionable_snaps = [s for s in self.vars.name if self.is_snap_installed(s)] if not actionable_snaps: return self.changed = True @@ -235,7 +237,7 @@ class Snap(CmdStateModuleHelper): if self.module.check_mode: return params = ['classic', 'channel', 'state'] # get base cmd parts - commands = [params + actionable_snaps] + commands = [params + [{'actionable_snaps': actionable_snaps}]] self.vars.cmd, rc, out, err = self._run_multiple_commands(commands) if rc == 0: return @@ -253,7 +255,7 @@ class Snap(CmdStateModuleHelper): if self.module.check_mode: return params = ['classic', 'channel', 'state'] # get base cmd parts - commands = [params + actionable_snaps] + commands = [params + [{'actionable_snaps': actionable_snaps}]] self.vars.cmd, rc, out, err = self._run_multiple_commands(commands) if rc == 0: return @@ -271,7 +273,7 @@ class Snap(CmdStateModuleHelper): if self.module.check_mode: return params = ['classic', 'channel', 'state'] # get base cmd parts - commands = [params + actionable_snaps] + commands = [params + [{'actionable_snaps': actionable_snaps}]] self.vars.cmd, rc, out, err = self._run_multiple_commands(commands) if rc == 0: return diff --git a/tests/integration/targets/snap/aliases b/tests/integration/targets/snap/aliases index d7f5ce60c5..ee303bf346 100644 --- a/tests/integration/targets/snap/aliases +++ b/tests/integration/targets/snap/aliases @@ -3,4 +3,4 @@ skip/aix skip/freebsd skip/osx skip/macos -disabled #FIXME 2609 +skip/docker diff --git a/tests/integration/targets/snap/defaults/main.yml b/tests/integration/targets/snap/defaults/main.yml new file mode 100644 index 0000000000..2290001f7e --- /dev/null +++ b/tests/integration/targets/snap/defaults/main.yml @@ -0,0 +1,4 @@ +has_snap: false + +snap_packages: + - snapd diff --git a/tests/integration/targets/snap/handlers/main.yml b/tests/integration/targets/snap/handlers/main.yml new file mode 100644 index 0000000000..a80cc98e49 --- /dev/null +++ b/tests/integration/targets/snap/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: Remove snapd + package: + name: "{{ snap_packages }}" + state: absent diff --git a/tests/integration/targets/snap/meta/main.yml b/tests/integration/targets/snap/meta/main.yml new file mode 100644 index 0000000000..0e51c36ebd --- /dev/null +++ b/tests/integration/targets/snap/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - setup_pkg_mgr + - setup_epel diff --git a/tests/integration/targets/snap/tasks/Debian.yml b/tests/integration/targets/snap/tasks/Debian.yml new file mode 120000 index 0000000000..0abaec1677 --- /dev/null +++ b/tests/integration/targets/snap/tasks/Debian.yml @@ -0,0 +1 @@ +default.yml \ No newline at end of file diff --git a/tests/integration/targets/snap/tasks/Fedora.yml b/tests/integration/targets/snap/tasks/Fedora.yml new file mode 120000 index 0000000000..0abaec1677 --- /dev/null +++ b/tests/integration/targets/snap/tasks/Fedora.yml @@ -0,0 +1 @@ +default.yml \ No newline at end of file diff --git a/tests/integration/targets/snap/tasks/RedHat.yml b/tests/integration/targets/snap/tasks/RedHat.yml new file mode 120000 index 0000000000..0abaec1677 --- /dev/null +++ b/tests/integration/targets/snap/tasks/RedHat.yml @@ -0,0 +1 @@ +default.yml \ No newline at end of file diff --git a/tests/integration/targets/snap/tasks/default.yml b/tests/integration/targets/snap/tasks/default.yml new file mode 100644 index 0000000000..4cc38f7bf2 --- /dev/null +++ b/tests/integration/targets/snap/tasks/default.yml @@ -0,0 +1,15 @@ +--- +- name: Install snapd + package: + name: "{{ snap_packages }}" + state: present + notify: Remove snapd + +- name: Make sure that snapd is running + service: + name: snapd + state: started + +- name: Inform that snap is installed + set_fact: + has_snap: true diff --git a/tests/integration/targets/snap/tasks/main.yml b/tests/integration/targets/snap/tasks/main.yml index e015122ff2..73604d3895 100644 --- a/tests/integration/targets/snap/tasks/main.yml +++ b/tests/integration/targets/snap/tasks/main.yml @@ -4,28 +4,46 @@ # and should not be used as examples of how to write Ansible roles # #################################################################### -- name: install snapd - apt: - name: snapd - state: present - register: snapd_install_ubuntu - when: ansible_distribution == 'Ubuntu' - -- name: install snapd - dnf: - name: snapd - state: present - register: snapd_install_fedora - when: ansible_distribution == 'Fedora' +- name: Include distribution specific tasks + include_tasks: "{{ lookup('first_found', params) }}" + vars: + params: + files: + - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml" + - "{{ ansible_facts.os_family }}-{{ ansible_facts.distribution_major_version }}.yml" + - "{{ ansible_facts.distribution }}.yml" + - "{{ ansible_facts.os_family }}.yml" + - "nothing.yml" + paths: + - "{{ role_path }}/tasks" - block: - - name: install package + - name: Make sure package is not installed + community.general.snap: + name: hello-world + state: absent + + - name: Install package (check mode) + community.general.snap: + name: hello-world + state: present + register: install_check + check_mode: true + + - name: Install package community.general.snap: name: hello-world state: present register: install - - name: install package again + - name: Install package again (check mode) + community.general.snap: + name: hello-world + state: present + register: install_again_check + check_mode: true + + - name: Install package again community.general.snap: name: hello-world state: present @@ -35,18 +53,36 @@ assert: that: - install is changed + - install_check is changed - install_again is not changed + - install_again_check is not changed - - name: check package has been installed correctly + - name: Check package has been installed correctly command: hello-world + environment: + PATH: /var/lib/snapd/snap/bin/ - - name: remove package + - name: Remove package (check mode) + community.general.snap: + name: hello-world + state: absent + register: remove_check + check_mode: true + + - name: Remove package community.general.snap: name: hello-world state: absent register: remove - - name: remove package again + - name: Remove package again (check mode) + community.general.snap: + name: hello-world + state: absent + register: remove_again_check + check_mode: true + + - name: Remove package again community.general.snap: name: hello-world state: absent @@ -56,17 +92,7 @@ assert: that: - remove is changed + - remove_check is changed - remove_again is not changed - when: ansible_distribution in ['Ubuntu','Fedora'] - -- name: Remove snapd in case it was not installed - apt: - name: snapd - state: absent - when: snapd_install_ubuntu is changed and ansible_distribution == 'Ubuntu' - -- name: Remove snapd in case it was not installed - dnf: - name: snapd - state: absent - when: snapd_install_fedora is changed and ansible_distribution == 'Fedora' + - remove_again_check is not changed + when: has_snap diff --git a/tests/integration/targets/snap/tasks/nothing.yml b/tests/integration/targets/snap/tasks/nothing.yml new file mode 100644 index 0000000000..11642d1fcd --- /dev/null +++ b/tests/integration/targets/snap/tasks/nothing.yml @@ -0,0 +1,2 @@ +--- +# Do nothing From 00aa1250eec0582953e8a39faab8228b4ffd6cab Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Fri, 2 Jul 2021 15:22:00 +0430 Subject: [PATCH 0417/3093] Add integration test for classic snap (#2920) * Add integration test for classic snap * Add comments and check remove without classic * Comment new tests for now --- tests/integration/targets/snap/tasks/main.yml | 47 +++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/tests/integration/targets/snap/tasks/main.yml b/tests/integration/targets/snap/tasks/main.yml index 73604d3895..6e877cd0de 100644 --- a/tests/integration/targets/snap/tasks/main.yml +++ b/tests/integration/targets/snap/tasks/main.yml @@ -95,4 +95,51 @@ - remove_check is changed - remove_again is not changed - remove_again_check is not changed + +# - name: Make sure package from classic snap is not installed +# community.general.snap: +# name: nvim +# state: absent +# +# - name: Install package from classic snap +# community.general.snap: +# name: nvim +# state: present +# classic: true +# register: classic_install +# +# # testing classic idempotency +# - name: Install package from classic snap again +# community.general.snap: +# name: nvim +# state: present +# classic: true +# register: classic_install_again +# +# - name: Assert package has been installed just once +# assert: +# that: +# - classic_install is changed +# - classic_install_again is not changed +# +# # this is just testing if a package which has been installed +# # with true classic can be removed without setting classic to true +# - name: Remove package from classic snap without setting classic to true +# community.general.snap: +# name: nvim +# state: absent +# register: classic_remove_without_true_classic +# +# - name: Remove package from classic snap with setting classic to true +# community.general.snap: +# name: nvim +# state: absent +# classic: true +# register: classic_remove_with_true_classic +# +# - name: Assert package has been removed without setting classic to true +# assert: +# that: +# - classic_remove_without_ture_classic is changed +# - classic_remove_with_ture_classic is not changed when: has_snap From ffe505a798a774473d7f5a200a8f1153c853fa3e Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Fri, 2 Jul 2021 15:30:40 -0400 Subject: [PATCH 0418/3093] archive - fix removal failures for nested files with tar archives (#2923) * Initial commit * Adding changelog fragment --- .../fragments/2923-archive-remove-bugfix.yml | 4 +++ plugins/modules/files/archive.py | 15 ++++---- .../targets/archive/tasks/remove.yml | 34 ++++++++++++++++++- 3 files changed, 45 insertions(+), 8 deletions(-) create mode 100644 changelogs/fragments/2923-archive-remove-bugfix.yml diff --git a/changelogs/fragments/2923-archive-remove-bugfix.yml b/changelogs/fragments/2923-archive-remove-bugfix.yml new file mode 100644 index 0000000000..4bef5ef459 --- /dev/null +++ b/changelogs/fragments/2923-archive-remove-bugfix.yml @@ -0,0 +1,4 @@ +--- +bugfixes: + - archive - fixed task failure when using the ``remove`` option with a ``path`` containing nested files for + ``format``s other than ``zip`` (https://github.com/ansible-collections/community.general/issues/2919). diff --git a/plugins/modules/files/archive.py b/plugins/modules/files/archive.py index 5cdd6630d1..a2d3376613 100644 --- a/plugins/modules/files/archive.py +++ b/plugins/modules/files/archive.py @@ -399,13 +399,14 @@ class Archive(object): def remove_targets(self): for path in self.successes: - try: - if os.path.isdir(path): - shutil.rmtree(path) - else: - os.remove(path) - except OSError: - self.errors.append(_to_native(path)) + if os.path.exists(path): + try: + if os.path.isdir(path): + shutil.rmtree(path) + else: + os.remove(path) + except OSError: + self.errors.append(_to_native(path)) for path in self.paths: try: if os.path.isdir(path): diff --git a/tests/integration/targets/archive/tasks/remove.yml b/tests/integration/targets/archive/tasks/remove.yml index 9600eb9f6d..9f085e901a 100644 --- a/tests/integration/targets/archive/tasks/remove.yml +++ b/tests/integration/targets/archive/tasks/remove.yml @@ -148,7 +148,39 @@ - name: verify that excluded sub file is still present file: path={{ output_dir }}/tmpdir/sub/subfile.txt state=file -- name: remove temporary directory +- name: prep our files in tmpdir again + copy: src={{ item }} dest={{ output_dir }}/tmpdir/{{ item }} + with_items: + - foo.txt + - bar.txt + - empty.txt + - sub + - sub/subfile.txt + +- name: archive using gz and remove src directory + archive: + path: + - "{{ output_dir }}/tmpdir/" + dest: "{{ output_dir }}/archive_remove_05.gz" + format: gz + remove: yes + exclude_path: "{{ output_dir }}/tmpdir/sub/subfile.txt" + register: archive_remove_result_05 + +- name: verify that the files archived + file: path={{ output_dir }}/archive_remove_05.gz state=file + +- name: Verify source files were removed file: path: "{{ output_dir }}/tmpdir" state: absent + register: archive_source_file_removal_05 + +- name: Verify that task status is success + assert: + that: + - archive_remove_result_05 is success + - archive_source_file_removal_05 is not changed + +- name: remove our gz + file: path="{{ output_dir }}/archive_remove_05.gz" state=absent From a0915036f9e98976061f4b65dfa5dd361e18ff0e Mon Sep 17 00:00:00 2001 From: Shahar Mor Date: Fri, 2 Jul 2021 22:42:50 +0300 Subject: [PATCH 0419/3093] npm - fix installing from package.json (#2924) correctly handle cases where a dependency does not have a `version` property because it is either missing or invalid --- .../fragments/2924-npm-fix-package-json.yml | 3 +++ plugins/modules/packaging/language/npm.py | 5 +++-- .../modules/packaging/language/test_npm.py | 19 +++++++++++++++++++ 3 files changed, 25 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/2924-npm-fix-package-json.yml diff --git a/changelogs/fragments/2924-npm-fix-package-json.yml b/changelogs/fragments/2924-npm-fix-package-json.yml new file mode 100644 index 0000000000..ce4a416cf7 --- /dev/null +++ b/changelogs/fragments/2924-npm-fix-package-json.yml @@ -0,0 +1,3 @@ +bugfixes: + - npm - correctly handle cases where a dependency does not have a ``version`` property because it is either missing or invalid + (https://github.com/ansible-collections/community.general/issues/2917). diff --git a/plugins/modules/packaging/language/npm.py b/plugins/modules/packaging/language/npm.py index 283b8e0be7..de316d397f 100644 --- a/plugins/modules/packaging/language/npm.py +++ b/plugins/modules/packaging/language/npm.py @@ -216,7 +216,6 @@ class Npm(object): self.module.fail_json(msg="Failed to parse NPM output with error %s" % to_native(e)) if 'dependencies' in data: for dep, props in data['dependencies'].items(): - dep_version = dep + '@' + str(props['version']) if 'missing' in props and props['missing']: missing.append(dep) @@ -224,7 +223,9 @@ class Npm(object): missing.append(dep) else: installed.append(dep) - installed.append(dep_version) + if 'version' in props and props['version']: + dep_version = dep + '@' + str(props['version']) + installed.append(dep_version) if self.name_version and self.name_version not in installed: missing.append(self.name) # Named dependency not installed diff --git a/tests/unit/plugins/modules/packaging/language/test_npm.py b/tests/unit/plugins/modules/packaging/language/test_npm.py index abdacc6aef..89de549915 100644 --- a/tests/unit/plugins/modules/packaging/language/test_npm.py +++ b/tests/unit/plugins/modules/packaging/language/test_npm.py @@ -52,6 +52,25 @@ class NPMModuleTestCase(ModuleTestCase): call(['/testbin/npm', 'install', '--global', 'coffee-script'], check_rc=True, cwd=None), ]) + def test_present_missing(self): + set_module_args({ + 'name': 'coffee-script', + 'global': 'true', + 'state': 'present', + }) + self.module_main_command.side_effect = [ + (0, '{"dependencies": {"coffee-script": {"missing" : true}}}', ''), + (0, '{}', ''), + ] + + result = self.module_main(AnsibleExitJson) + + self.assertTrue(result['changed']) + self.module_main_command.assert_has_calls([ + call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None), + call(['/testbin/npm', 'install', '--global', 'coffee-script'], check_rc=True, cwd=None), + ]) + def test_present_version(self): set_module_args({ 'name': 'coffee-script', From 9b02230477391cd4bda861bb3e6a5d2640407363 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 4 Jul 2021 02:31:30 +1200 Subject: [PATCH 0420/3093] snap - fixed param order (#2918) * fixed param order * added changelog fragment * rebased and uncommented tests per PR * added /snap link in RH * typo in tests * Update tests/integration/targets/snap/tasks/default.yml Co-authored-by: Felix Fontein --- .../fragments/2918-snap-param-order.yml | 2 + plugins/modules/packaging/os/snap.py | 8 +- .../targets/snap/tasks/default.yml | 6 ++ tests/integration/targets/snap/tasks/main.yml | 92 +++++++++---------- 4 files changed, 58 insertions(+), 50 deletions(-) create mode 100644 changelogs/fragments/2918-snap-param-order.yml diff --git a/changelogs/fragments/2918-snap-param-order.yml b/changelogs/fragments/2918-snap-param-order.yml new file mode 100644 index 0000000000..85b907f8b6 --- /dev/null +++ b/changelogs/fragments/2918-snap-param-order.yml @@ -0,0 +1,2 @@ +bugfixes: + - snap - fixed the order of the ``--classic`` parameter in the command line invocation (https://github.com/ansible-collections/community.general/issues/2916). diff --git a/plugins/modules/packaging/os/snap.py b/plugins/modules/packaging/os/snap.py index 8051b90445..6da8b0e766 100644 --- a/plugins/modules/packaging/os/snap.py +++ b/plugins/modules/packaging/os/snap.py @@ -133,10 +133,10 @@ class Snap(CmdStateModuleHelper): module = dict( argument_spec={ 'name': dict(type='list', elements='str', required=True), - 'state': dict(type='str', required=False, default='present', + 'state': dict(type='str', default='present', choices=['absent', 'present', 'enabled', 'disabled']), - 'classic': dict(type='bool', required=False, default=False), - 'channel': dict(type='str', required=False, default='stable'), + 'classic': dict(type='bool', default=False), + 'channel': dict(type='str', default='stable'), }, supports_check_mode=True, ) @@ -205,7 +205,7 @@ class Snap(CmdStateModuleHelper): self.vars.snaps_installed = actionable_snaps if self.module.check_mode: return - params = ['classic', 'channel', 'state'] # get base cmd parts + params = ['state', 'classic', 'channel'] # get base cmd parts has_one_pkg_params = bool(self.vars.classic) or self.vars.channel != 'stable' has_multiple_snaps = len(actionable_snaps) > 1 if has_one_pkg_params and has_multiple_snaps: diff --git a/tests/integration/targets/snap/tasks/default.yml b/tests/integration/targets/snap/tasks/default.yml index 4cc38f7bf2..938addc33a 100644 --- a/tests/integration/targets/snap/tasks/default.yml +++ b/tests/integration/targets/snap/tasks/default.yml @@ -10,6 +10,12 @@ name: snapd state: started +- name: Create link /snap + file: + src: /var/lib/snapd/snap + dest: /snap + state: link + - name: Inform that snap is installed set_fact: has_snap: true diff --git a/tests/integration/targets/snap/tasks/main.yml b/tests/integration/targets/snap/tasks/main.yml index 6e877cd0de..0f8c9b4c26 100644 --- a/tests/integration/targets/snap/tasks/main.yml +++ b/tests/integration/targets/snap/tasks/main.yml @@ -96,50 +96,50 @@ - remove_again is not changed - remove_again_check is not changed -# - name: Make sure package from classic snap is not installed -# community.general.snap: -# name: nvim -# state: absent -# -# - name: Install package from classic snap -# community.general.snap: -# name: nvim -# state: present -# classic: true -# register: classic_install -# -# # testing classic idempotency -# - name: Install package from classic snap again -# community.general.snap: -# name: nvim -# state: present -# classic: true -# register: classic_install_again -# -# - name: Assert package has been installed just once -# assert: -# that: -# - classic_install is changed -# - classic_install_again is not changed -# -# # this is just testing if a package which has been installed -# # with true classic can be removed without setting classic to true -# - name: Remove package from classic snap without setting classic to true -# community.general.snap: -# name: nvim -# state: absent -# register: classic_remove_without_true_classic -# -# - name: Remove package from classic snap with setting classic to true -# community.general.snap: -# name: nvim -# state: absent -# classic: true -# register: classic_remove_with_true_classic -# -# - name: Assert package has been removed without setting classic to true -# assert: -# that: -# - classic_remove_without_ture_classic is changed -# - classic_remove_with_ture_classic is not changed + - name: Make sure package from classic snap is not installed + community.general.snap: + name: nvim + state: absent + + - name: Install package from classic snap + community.general.snap: + name: nvim + state: present + classic: true + register: classic_install + + # testing classic idempotency + - name: Install package from classic snap again + community.general.snap: + name: nvim + state: present + classic: true + register: classic_install_again + + - name: Assert package has been installed just once + assert: + that: + - classic_install is changed + - classic_install_again is not changed + + # this is just testing if a package which has been installed + # with true classic can be removed without setting classic to true + - name: Remove package from classic snap without setting classic to true + community.general.snap: + name: nvim + state: absent + register: classic_remove_without_true_classic + + - name: Remove package from classic snap with setting classic to true + community.general.snap: + name: nvim + state: absent + classic: true + register: classic_remove_with_true_classic + + - name: Assert package has been removed without setting classic to true + assert: + that: + - classic_remove_without_true_classic is changed + - classic_remove_with_true_classic is not changed when: has_snap From b2b487753264417b161f9299d7386d71a1f857cf Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Mon, 5 Jul 2021 23:16:19 +0430 Subject: [PATCH 0421/3093] lvol: honor check_mode on thinpool (#2935) * lvol: support check_mode on thinpool * add changelog * Add %s when needed * correct changelog sentence Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../fragments/2935-lvol-support_check_mode_thinpool.yml | 3 +++ plugins/modules/system/lvol.py | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/2935-lvol-support_check_mode_thinpool.yml diff --git a/changelogs/fragments/2935-lvol-support_check_mode_thinpool.yml b/changelogs/fragments/2935-lvol-support_check_mode_thinpool.yml new file mode 100644 index 0000000000..3efbe59860 --- /dev/null +++ b/changelogs/fragments/2935-lvol-support_check_mode_thinpool.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - lvol - honor ``check_mode`` on thinpool (https://github.com/ansible-collections/community.general/issues/2934). diff --git a/plugins/modules/system/lvol.py b/plugins/modules/system/lvol.py index fafa7db38a..e8b0ab838b 100644 --- a/plugins/modules/system/lvol.py +++ b/plugins/modules/system/lvol.py @@ -471,9 +471,9 @@ def main(): if size_opt == 'l': module.fail_json(changed=False, msg="Thin volume sizing with percentage not supported.") size_opt = 'V' - cmd = "%s %s -n %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, yesopt, lv, size_opt, size, size_unit, opts, vg, thinpool) + cmd = "%s %s %s -n %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, thinpool) elif thinpool and not lv: - cmd = "%s %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, yesopt, size_opt, size, size_unit, opts, vg, thinpool) + cmd = "%s %s %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, opts, vg, thinpool) else: cmd = "%s %s %s -n %s -%s %s%s %s %s %s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, pvs) rc, dummy, err = module.run_command(cmd) From c0740ca3985d827e309d9bff868374a5fb86bce7 Mon Sep 17 00:00:00 2001 From: Amin Vakil Date: Wed, 7 Jul 2021 00:36:36 +0430 Subject: [PATCH 0422/3093] pacman: fix changed status when ignorepkg has been defined (#2936) * pacman: fix returned code when ignorepkg has been defined * add changelog * make ignored check preciser --- ...status_when_ignorepkg_has_been_defined.yml | 3 +++ plugins/modules/packaging/os/pacman.py | 21 ++++++++++++------- 2 files changed, 17 insertions(+), 7 deletions(-) create mode 100644 changelogs/fragments/2936-pacman-fix_changed_status_when_ignorepkg_has_been_defined.yml diff --git a/changelogs/fragments/2936-pacman-fix_changed_status_when_ignorepkg_has_been_defined.yml b/changelogs/fragments/2936-pacman-fix_changed_status_when_ignorepkg_has_been_defined.yml new file mode 100644 index 0000000000..815ffa4aee --- /dev/null +++ b/changelogs/fragments/2936-pacman-fix_changed_status_when_ignorepkg_has_been_defined.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - pacman - fix changed status when ignorepkg has been defined (https://github.com/ansible-collections/community.general/issues/1758). diff --git a/plugins/modules/packaging/os/pacman.py b/plugins/modules/packaging/os/pacman.py index 372d13cd49..ea138fa614 100644 --- a/plugins/modules/packaging/os/pacman.py +++ b/plugins/modules/packaging/os/pacman.py @@ -254,16 +254,23 @@ def upgrade(module, pacman_path): # e.g., "ansible 2.7.1-1 -> 2.7.2-1" regex = re.compile(r'([\w+\-.@]+) (\S+-\S+) -> (\S+-\S+)') for p in data: - m = regex.search(p) - packages.append(m.group(1)) - if module._diff: - diff['before'] += "%s-%s\n" % (m.group(1), m.group(2)) - diff['after'] += "%s-%s\n" % (m.group(1), m.group(3)) + if '[ignored]' not in p: + m = regex.search(p) + packages.append(m.group(1)) + if module._diff: + diff['before'] += "%s-%s\n" % (m.group(1), m.group(2)) + diff['after'] += "%s-%s\n" % (m.group(1), m.group(3)) if module.check_mode: - module.exit_json(changed=True, msg="%s package(s) would be upgraded" % (len(data)), packages=packages, diff=diff) + if packages: + module.exit_json(changed=True, msg="%s package(s) would be upgraded" % (len(data)), packages=packages, diff=diff) + else: + module.exit_json(changed=False, msg='Nothing to upgrade', packages=packages) rc, stdout, stderr = module.run_command(cmdupgrade, check_rc=False) if rc == 0: - module.exit_json(changed=True, msg='System upgraded', packages=packages, diff=diff) + if packages: + module.exit_json(changed=True, msg='System upgraded', packages=packages, diff=diff) + else: + module.exit_json(changed=False, msg='Nothing to upgrade', packages=packages) else: module.fail_json(msg="Could not upgrade") else: From 56acd4356fcd88186f8f6766ae072258012706ad Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 8 Jul 2021 17:20:01 +1200 Subject: [PATCH 0423/3093] MH - dicts and lists change-tracking is fixed (#2951) * dicts and lists change-tracking is fixed * added changelog fragment * fixed sanity check --- .../fragments/2951-mh-vars-deepcopy.yml | 2 ++ plugins/module_utils/mh/mixins/vars.py | 6 ++-- .../module_utils/test_module_helper.py | 28 +++++++++++++++++++ 3 files changed, 34 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/2951-mh-vars-deepcopy.yml diff --git a/changelogs/fragments/2951-mh-vars-deepcopy.yml b/changelogs/fragments/2951-mh-vars-deepcopy.yml new file mode 100644 index 0000000000..339cca3aa7 --- /dev/null +++ b/changelogs/fragments/2951-mh-vars-deepcopy.yml @@ -0,0 +1,2 @@ +bugfixes: + - module_helper module utils - fixed change-tracking for dictionaries and lists (https://github.com/ansible-collections/community.general/pull/2951). diff --git a/plugins/module_utils/mh/mixins/vars.py b/plugins/module_utils/mh/mixins/vars.py index 7c936e04ac..a11110ed60 100644 --- a/plugins/module_utils/mh/mixins/vars.py +++ b/plugins/module_utils/mh/mixins/vars.py @@ -6,6 +6,8 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type +import copy + class VarMeta(object): NOTHING = object() @@ -30,11 +32,11 @@ class VarMeta(object): if fact is not None: self.fact = fact if initial_value is not self.NOTHING: - self.initial_value = initial_value + self.initial_value = copy.deepcopy(initial_value) def set_value(self, value): if not self.init: - self.initial_value = value + self.initial_value = copy.deepcopy(value) self.init = True self.value = value return self diff --git a/tests/unit/plugins/module_utils/test_module_helper.py b/tests/unit/plugins/module_utils/test_module_helper.py index 6452784182..f40a0f10ee 100644 --- a/tests/unit/plugins/module_utils/test_module_helper.py +++ b/tests/unit/plugins/module_utils/test_module_helper.py @@ -151,17 +151,45 @@ def test_vardict(): assert vd.meta('a').diff is False assert vd.meta('a').change is False vd['b'] = 456 + assert vd.meta('b').output is True + assert vd.meta('b').diff is False + assert vd.meta('b').change is False vd.set_meta('a', diff=True, change=True) vd.set_meta('b', diff=True, output=False) vd['c'] = 789 + assert vd.has_changed('c') is False vd['a'] = 'new_a' + assert vd.has_changed('a') is True vd['c'] = 'new_c' + assert vd.has_changed('c') is False + vd['b'] = 'new_b' + assert vd.has_changed('b') is False assert vd.a == 'new_a' assert vd.c == 'new_c' assert vd.output() == {'a': 'new_a', 'c': 'new_c'} assert vd.diff() == {'before': {'a': 123}, 'after': {'a': 'new_a'}}, "diff={0}".format(vd.diff()) +def test_variable_meta_change(): + vd = VarDict() + vd.set('a', 123, change=True) + vd.set('b', [4, 5, 6], change=True) + vd.set('c', {'m': 7, 'n': 8, 'o': 9}, change=True) + vd.set('d', {'a1': {'a11': 33, 'a12': 34}}, change=True) + + vd.a = 1234 + assert vd.has_changed('a') is True + vd.b.append(7) + assert vd.b == [4, 5, 6, 7] + assert vd.has_changed('b') + vd.c.update({'p': 10}) + assert vd.c == {'m': 7, 'n': 8, 'o': 9, 'p': 10} + assert vd.has_changed('c') + vd.d['a1'].update({'a13': 35}) + assert vd.d == {'a1': {'a11': 33, 'a12': 34, 'a13': 35}} + assert vd.has_changed('d') + + class MockMH(object): changed = None From 518ace25621ee7ed08debd4d87509b0b2be33f2c Mon Sep 17 00:00:00 2001 From: John R Barker Date: Thu, 8 Jul 2021 13:10:33 +0100 Subject: [PATCH 0424/3093] Update commit-rights.md aminvakil is no longer involved with the Ansible Community due to United States export controls and economic sanctions laws apply to U.S. persons, entities, and controlled software and technology that is of U.S. origin or that enters the U.S., including open source software. --- commit-rights.md | 1 - 1 file changed, 1 deletion(-) diff --git a/commit-rights.md b/commit-rights.md index 9b39d47b2c..58743e5048 100644 --- a/commit-rights.md +++ b/commit-rights.md @@ -68,7 +68,6 @@ Individuals who have been asked to become a part of this group have generally be | Name | GitHub ID | IRC Nick | Other | | ------------------- | -------------------- | ------------------ | -------------------- | | Alexei Znamensky | russoz | russoz | | -| Amin Vakil | aminvakil | aminvakil | | | Andrew Klychkov | andersson007 | andersson007_ | | | Felix Fontein | felixfontein | felixfontein | | | John R Barker | gundalow | gundalow | | From d97a9b5961a9008c4d404810590cee7cb3e3703a Mon Sep 17 00:00:00 2001 From: Tong He <68936428+unnecessary-username@users.noreply.github.com> Date: Fri, 9 Jul 2021 02:32:46 -0400 Subject: [PATCH 0425/3093] jenkins_job_info: Remove necessities of password or token. (#2948) * Remove necessities on password or token. * Upper case letter -> Lower case letter Co-authored-by: Amin Vakil * Documentation update. * C -> I Co-authored-by: Amin Vakil --- ..._info-remove_necessities_on_password_or_token.yml | 2 ++ .../modules/web_infrastructure/jenkins_job_info.py | 12 +++++++----- 2 files changed, 9 insertions(+), 5 deletions(-) create mode 100644 changelogs/fragments/2948-jenkins_job_info-remove_necessities_on_password_or_token.yml diff --git a/changelogs/fragments/2948-jenkins_job_info-remove_necessities_on_password_or_token.yml b/changelogs/fragments/2948-jenkins_job_info-remove_necessities_on_password_or_token.yml new file mode 100644 index 0000000000..99259d6301 --- /dev/null +++ b/changelogs/fragments/2948-jenkins_job_info-remove_necessities_on_password_or_token.yml @@ -0,0 +1,2 @@ +minor_changes: + - jenkins_job_info - the ``password`` and ``token`` parameters can also be omitted to retrieve only public information (https://github.com/ansible-collections/community.general/pull/2948). diff --git a/plugins/modules/web_infrastructure/jenkins_job_info.py b/plugins/modules/web_infrastructure/jenkins_job_info.py index 9dcf5776c9..fc079857a6 100644 --- a/plugins/modules/web_infrastructure/jenkins_job_info.py +++ b/plugins/modules/web_infrastructure/jenkins_job_info.py @@ -33,12 +33,12 @@ options: type: str description: - Password to authenticate with the Jenkins server. - - This is a required parameter, if C(token) is not provided. + - This is mutually exclusive with I(token). token: type: str description: - API token used to authenticate with the Jenkins server. - - This is a required parameter, if C(password) is not provided. + - This is mutually exclusive with I(password). url: type: str description: @@ -59,6 +59,11 @@ author: ''' EXAMPLES = ''' +# Get all Jenkins jobs anonymously +- community.general.jenkins_job_info: + user: admin + register: my_jenkins_job_info + # Get all Jenkins jobs using basic auth - community.general.jenkins_job_info: user: admin @@ -232,9 +237,6 @@ def main(): ['password', 'token'], ['name', 'glob'], ], - required_one_of=[ - ['password', 'token'], - ], supports_check_mode=True, ) From 1b80a9c5879f343a915b281da0cffaff79c2ca22 Mon Sep 17 00:00:00 2001 From: Gaetan2907 <48204380+Gaetan2907@users.noreply.github.com> Date: Fri, 9 Jul 2021 07:33:35 +0100 Subject: [PATCH 0426/3093] Add option to the keycloak_client module (#2949) * Add authentication_flow_binding_overrides option to the keycloak_client module * Add changelog fragment * Update changelogs/fragments/2949-add_authentication-flow-binding_keycloak-client.yml Co-authored-by: Amin Vakil * Update plugins/modules/identity/keycloak/keycloak_client.py Co-authored-by: Amin Vakil * Update plugins/modules/identity/keycloak/keycloak_client.py Co-authored-by: Amin Vakil * Add unit test authentication_flow_binding_overrides feature on keycloak_client module Co-authored-by: Amin Vakil --- ...ntication-flow-binding_keycloak-client.yml | 3 + .../identity/keycloak/keycloak_client.py | 11 ++ .../identity/keycloak/test_keycloak_client.py | 150 ++++++++++++++++++ 3 files changed, 164 insertions(+) create mode 100644 changelogs/fragments/2949-add_authentication-flow-binding_keycloak-client.yml create mode 100644 tests/unit/plugins/modules/identity/keycloak/test_keycloak_client.py diff --git a/changelogs/fragments/2949-add_authentication-flow-binding_keycloak-client.yml b/changelogs/fragments/2949-add_authentication-flow-binding_keycloak-client.yml new file mode 100644 index 0000000000..cdc0d4ae69 --- /dev/null +++ b/changelogs/fragments/2949-add_authentication-flow-binding_keycloak-client.yml @@ -0,0 +1,3 @@ +--- +minor_changes: + - keycloak_client - add ``authentication_flow_binding_overrides`` option (https://github.com/ansible-collections/community.general/pull/2949). diff --git a/plugins/modules/identity/keycloak/keycloak_client.py b/plugins/modules/identity/keycloak/keycloak_client.py index e3e39fc173..e37997e752 100644 --- a/plugins/modules/identity/keycloak/keycloak_client.py +++ b/plugins/modules/identity/keycloak/keycloak_client.py @@ -318,6 +318,14 @@ options: aliases: - authorizationSettings + authentication_flow_binding_overrides: + description: + - Override realm authentication flow bindings. + type: dict + aliases: + - authenticationFlowBindingOverrides + version_added: 3.4.0 + protocol_mappers: description: - a list of dicts defining protocol mappers for this client. @@ -593,6 +601,8 @@ EXAMPLES = ''' default_roles: - test01 - test02 + authentication_flow_binding_overrides: + browser: 4c90336b-bf1d-4b87-916d-3677ba4e5fbb protocol_mappers: - config: access.token.claim: True @@ -745,6 +755,7 @@ def main(): use_template_config=dict(type='bool', aliases=['useTemplateConfig']), use_template_scope=dict(type='bool', aliases=['useTemplateScope']), use_template_mappers=dict(type='bool', aliases=['useTemplateMappers']), + authentication_flow_binding_overrides=dict(type='dict', aliases=['authenticationFlowBindingOverrides']), protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec, aliases=['protocolMappers']), authorization_settings=dict(type='dict', aliases=['authorizationSettings']), ) diff --git a/tests/unit/plugins/modules/identity/keycloak/test_keycloak_client.py b/tests/unit/plugins/modules/identity/keycloak/test_keycloak_client.py new file mode 100644 index 0000000000..e017a5985c --- /dev/null +++ b/tests/unit/plugins/modules/identity/keycloak/test_keycloak_client.py @@ -0,0 +1,150 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from contextlib import contextmanager + +from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.general.tests.unit.compat.mock import call, patch +from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, \ + ModuleTestCase, set_module_args + +from ansible_collections.community.general.plugins.modules.identity.keycloak import keycloak_client + +from itertools import count + +from ansible.module_utils.six import StringIO + + +@contextmanager +def patch_keycloak_api(get_client_by_clientid=None, get_client_by_id=None, update_client=None, create_client=None, + delete_client=None): + """Mock context manager for patching the methods in PwPolicyIPAClient that contact the IPA server + + Patches the `login` and `_post_json` methods + + Keyword arguments are passed to the mock object that patches `_post_json` + + No arguments are passed to the mock object that patches `login` because no tests require it + + Example:: + + with patch_ipa(return_value={}) as (mock_login, mock_post): + ... + """ + + obj = keycloak_client.KeycloakAPI + with patch.object(obj, 'get_client_by_clientid', side_effect=get_client_by_clientid) as mock_get_client_by_clientid: + with patch.object(obj, 'get_client_by_id', side_effect=get_client_by_id) as mock_get_client_by_id: + with patch.object(obj, 'create_client', side_effect=create_client) as mock_create_client: + with patch.object(obj, 'update_client', side_effect=update_client) as mock_update_client: + with patch.object(obj, 'delete_client', side_effect=delete_client) as mock_delete_client: + yield mock_get_client_by_clientid, mock_get_client_by_id, mock_create_client, mock_update_client, mock_delete_client + + +def get_response(object_with_future_response, method, get_id_call_count): + if callable(object_with_future_response): + return object_with_future_response() + if isinstance(object_with_future_response, dict): + return get_response( + object_with_future_response[method], method, get_id_call_count) + if isinstance(object_with_future_response, list): + call_number = next(get_id_call_count) + return get_response( + object_with_future_response[call_number], method, get_id_call_count) + return object_with_future_response + + +def build_mocked_request(get_id_user_count, response_dict): + def _mocked_requests(*args, **kwargs): + url = args[0] + method = kwargs['method'] + future_response = response_dict.get(url, None) + return get_response(future_response, method, get_id_user_count) + + return _mocked_requests + + +def create_wrapper(text_as_string): + """Allow to mock many times a call to one address. + Without this function, the StringIO is empty for the second call. + """ + + def _create_wrapper(): + return StringIO(text_as_string) + + return _create_wrapper + + +def mock_good_connection(): + token_response = { + 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper( + '{"access_token": "alongtoken"}'), } + return patch( + 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + side_effect=build_mocked_request(count(), token_response), + autospec=True + ) + + +class TestKeycloakRealm(ModuleTestCase): + def setUp(self): + super(TestKeycloakRealm, self).setUp() + self.module = keycloak_client + + def test_authentication_flow_binding_overrides_feature(self): + """Add a new realm""" + + module_args = { + 'auth_keycloak_url': 'https: // auth.example.com / auth', + 'token': '{{ access_token }}', + 'state': 'present', + 'realm': 'master', + 'client_id': 'test', + 'authentication_flow_binding_overrides': { + 'browser': '4c90336b-bf1d-4b87-916d-3677ba4e5fbb' + } + } + return_value_get_client_by_clientid = [ + None, + { + "authenticationFlowBindingOverrides": { + "browser": "f9502b6d-d76a-4efe-8331-2ddd853c9f9c" + }, + "clientId": "onboardingid", + "enabled": "true", + "protocol": "openid-connect", + "redirectUris": [ + "*" + ] + } + ] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_client_by_clientid=return_value_get_client_by_clientid) \ + as (mock_get_client_by_clientid, mock_get_client_by_id, mock_create_client, mock_update_client, mock_delete_client): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(mock_get_client_by_clientid.call_count, 2) + self.assertEqual(mock_get_client_by_id.call_count, 0) + self.assertEqual(mock_create_client.call_count, 1) + self.assertEqual(mock_update_client.call_count, 0) + self.assertEqual(mock_delete_client.call_count, 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + +if __name__ == '__main__': + unittest.main() From 288fe1cfc6a64165521840e72edbb9c513d62eb2 Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Sat, 10 Jul 2021 06:58:30 -0400 Subject: [PATCH 0427/3093] archive - adding dest_state return value and enhancing integration tests. (#2913) * Initial commit * Adding changelog fragment * fixing changelog fragment * Updating documentation * Applying review suggestions --- .../fragments/2913-archive-dest_state.yml | 4 + plugins/modules/files/archive.py | 11 + .../targets/archive/tasks/broken-link.yml | 22 - .../targets/archive/tasks/main.yml | 434 ++---------------- .../targets/archive/tasks/remove.yml | 186 -------- .../targets/archive/tests/broken-link.yml | 31 ++ .../targets/archive/tests/core.yml | 188 ++++++++ .../targets/archive/tests/exclusions.yml | 40 ++ .../targets/archive/tests/remove.yml | 207 +++++++++ 9 files changed, 521 insertions(+), 602 deletions(-) create mode 100644 changelogs/fragments/2913-archive-dest_state.yml delete mode 100644 tests/integration/targets/archive/tasks/broken-link.yml delete mode 100644 tests/integration/targets/archive/tasks/remove.yml create mode 100644 tests/integration/targets/archive/tests/broken-link.yml create mode 100644 tests/integration/targets/archive/tests/core.yml create mode 100644 tests/integration/targets/archive/tests/exclusions.yml create mode 100644 tests/integration/targets/archive/tests/remove.yml diff --git a/changelogs/fragments/2913-archive-dest_state.yml b/changelogs/fragments/2913-archive-dest_state.yml new file mode 100644 index 0000000000..9e9e67434e --- /dev/null +++ b/changelogs/fragments/2913-archive-dest_state.yml @@ -0,0 +1,4 @@ +--- +minor_changes: + - archive - added ``dest_state`` return value to describe final state of ``dest`` after successful task execution + (https://github.com/ansible-collections/community.general/pull/2913). diff --git a/plugins/modules/files/archive.py b/plugins/modules/files/archive.py index a2d3376613..822ea1cd9d 100644 --- a/plugins/modules/files/archive.py +++ b/plugins/modules/files/archive.py @@ -137,6 +137,16 @@ state: The state of the input C(path). type: str returned: always +dest_state: + description: + - The state of the I(dest) file. + - C(absent) when the file does not exist. + - C(archive) when the file is an archive. + - C(compress) when the file is compressed, but not an archive. + - C(incomplete) when the file is an archive, but some files under I(path) were not found. + type: str + returned: success + version_added: 3.4.0 missing: description: Any files that were missing from the source. type: list @@ -435,6 +445,7 @@ class Archive(object): return { 'archived': [_to_native(p) for p in self.successes], 'dest': _to_native(self.destination), + 'dest_state': self.destination_state, 'changed': self.changed, 'arcroot': _to_native(self.root), 'missing': [_to_native(p) for p in self.not_found], diff --git a/tests/integration/targets/archive/tasks/broken-link.yml b/tests/integration/targets/archive/tasks/broken-link.yml deleted file mode 100644 index b1e0fb752b..0000000000 --- a/tests/integration/targets/archive/tasks/broken-link.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -- name: Create broken link - file: - src: /nowhere - dest: "{{ output_dir }}/nowhere.txt" - state: link - force: yes - -- name: Archive broken link (tar.gz) - archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_broken_link.tar.gz" - -- name: Archive broken link (tar.bz2) - archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_broken_link.tar.bz2" - -- name: Archive broken link (zip) - archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_broken_link.zip" diff --git a/tests/integration/targets/archive/tasks/main.yml b/tests/integration/targets/archive/tasks/main.yml index 35a8f1edf3..e0757b0ead 100644 --- a/tests/integration/targets/archive/tasks/main.yml +++ b/tests/integration/targets/archive/tasks/main.yml @@ -22,6 +22,7 @@ # along with Ansible. If not, see . # Make sure we start fresh +# Test setup - name: Ensure zip is present to create test archive (yum) yum: name=zip state=latest when: ansible_facts.pkg_mgr == 'yum' @@ -82,400 +83,45 @@ - sub - sub/subfile.txt -- name: archive using gz - archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_01.gz" - format: gz - register: archive_gz_result_01 - -- debug: msg="{{ archive_gz_result_01 }}" - -- name: verify that the files archived - file: path={{output_dir}}/archive_01.gz state=file - -- name: check if gz file exists and includes all text files - assert: - that: - - "{{ archive_gz_result_01.changed }}" - - "{{ 'archived' in archive_gz_result_01 }}" - - "{{ archive_gz_result_01['archived'] | length }} == 3" - -- name: archive using zip - archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_01.zip" - format: zip - register: archive_zip_result_01 - -- debug: msg="{{ archive_zip_result_01 }}" - -- name: verify that the files archived - file: path={{output_dir}}/archive_01.zip state=file - -- name: check if zip file exists - assert: - that: - - "{{ archive_zip_result_01.changed }}" - - "{{ 'archived' in archive_zip_result_01 }}" - - "{{ archive_zip_result_01['archived'] | length }} == 3" - -- name: archive using bz2 - archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_01.bz2" - format: bz2 - register: archive_bz2_result_01 - -- debug: msg="{{ archive_bz2_result_01 }}" - -- name: verify that the files archived - file: path={{output_dir}}/archive_01.bz2 state=file - -- name: check if bzip file exists - assert: - that: - - "{{ archive_bz2_result_01.changed }}" - - "{{ 'archived' in archive_bz2_result_01 }}" - - "{{ archive_bz2_result_01['archived'] | length }} == 3" - -- name: archive using xz - archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_01.xz" - format: xz - register: archive_xz_result_01 - -- debug: msg="{{ archive_xz_result_01 }}" - -- name: verify that the files archived - file: path={{output_dir}}/archive_01.xz state=file - -- name: check if xz file exists - assert: - that: - - "{{ archive_xz_result_01.changed }}" - - "{{ 'archived' in archive_xz_result_01 }}" - - "{{ archive_xz_result_01['archived'] | length }} == 3" - -- name: archive and set mode to 0600 - archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_02.gz" - format: gz - mode: "u+rwX,g-rwx,o-rwx" - register: archive_bz2_result_02 - -- name: Test that the file modes were changed - stat: - path: "{{ output_dir }}/archive_02.gz" - register: archive_02_gz_stat - -- debug: msg="{{ archive_02_gz_stat}}" - -- name: Test that the file modes were changed - assert: - that: - - archive_02_gz_stat is not changed - - "archive_02_gz_stat.stat.mode == '0600'" - - "'archived' in archive_bz2_result_02" - - "{{ archive_bz2_result_02['archived']| length}} == 3" - -- name: remove our gz - file: path="{{ output_dir }}/archive_02.gz" state=absent - - -- name: archive and set mode to 0600 - archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_02.zip" - format: zip - mode: "u+rwX,g-rwx,o-rwx" - register: archive_zip_result_02 - -- name: Test that the file modes were changed - stat: - path: "{{ output_dir }}/archive_02.zip" - register: archive_02_zip_stat - -- name: Test that the file modes were changed - assert: - that: - - archive_02_zip_stat is not changed - - "archive_02_zip_stat.stat.mode == '0600'" - - "'archived' in archive_zip_result_02" - - "{{ archive_zip_result_02['archived']| length}} == 3" - -- name: remove our zip - file: path="{{ output_dir }}/archive_02.zip" state=absent - - -- name: archive and set mode to 0600 - archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_02.bz2" - format: bz2 - mode: "u+rwX,g-rwx,o-rwx" - register: archive_bz2_result_02 - -- name: Test that the file modes were changed - stat: - path: "{{ output_dir }}/archive_02.bz2" - register: archive_02_bz2_stat - -- name: Test that the file modes were changed - assert: - that: - - archive_02_bz2_stat is not changed - - "archive_02_bz2_stat.stat.mode == '0600'" - - "'archived' in archive_bz2_result_02" - - "{{ archive_bz2_result_02['archived']| length}} == 3" - -- name: remove our bz2 - file: path="{{ output_dir }}/archive_02.bz2" state=absent - -- name: archive and set mode to 0600 - archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_02.xz" - format: xz - mode: "u+rwX,g-rwx,o-rwx" - register: archive_xz_result_02 - -- name: Test that the file modes were changed - stat: - path: "{{ output_dir }}/archive_02.xz" - register: archive_02_xz_stat - -- name: Test that the file modes were changed - assert: - that: - - archive_02_xz_stat is not changed - - "archive_02_xz_stat.stat.mode == '0600'" - - "'archived' in archive_xz_result_02" - - "{{ archive_xz_result_02['archived']| length}} == 3" - -- name: remove our xz - file: path="{{ output_dir }}/archive_02.xz" state=absent - -- name: archive multiple files as list - archive: - path: - - "{{ output_dir }}/empty.txt" - - "{{ output_dir }}/foo.txt" - - "{{ output_dir }}/bar.txt" - dest: "{{ output_dir }}/archive_list.gz" - format: gz - register: archive_gz_list_result - -- name: verify that the files archived - file: path={{output_dir}}/archive_list.gz state=file - -- name: check if gz file exists and includes all text files - assert: - that: - - "{{ archive_gz_list_result.changed }}" - - "{{ 'archived' in archive_gz_list_result }}" - - "{{ archive_gz_list_result['archived'] | length }} == 3" - -- name: remove our gz - file: path="{{ output_dir }}/archive_list.gz" state=absent - -- name: test that gz archive that contains non-ascii filenames - archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/test-archive-nonascii-くらとみ.tar.gz" - format: gz - register: nonascii_result_0 - -- name: Check that file is really there - stat: - path: "{{ output_dir }}/test-archive-nonascii-くらとみ.tar.gz" - register: nonascii_stat0 - -- name: Assert that nonascii tests succeeded - assert: - that: - - nonascii_result_0 is changed - - "nonascii_stat0.stat.exists == true" - -- name: remove nonascii test - file: path="{{ output_dir }}/test-archive-nonascii-くらとみ.tar.gz" state=absent - -- name: test that bz2 archive that contains non-ascii filenames - archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/test-archive-nonascii-くらとみ.bz2" - format: bz2 - register: nonascii_result_1 - -- name: Check that file is really there - stat: - path: "{{ output_dir }}/test-archive-nonascii-くらとみ.bz2" - register: nonascii_stat_1 - -- name: Assert that nonascii tests succeeded - assert: - that: - - nonascii_result_1 is changed - - "nonascii_stat_1.stat.exists == true" - -- name: remove nonascii test - file: path="{{ output_dir }}/test-archive-nonascii-くらとみ.bz2" state=absent - -- name: test that xz archive that contains non-ascii filenames - archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/test-archive-nonascii-くらとみ.xz" - format: xz - register: nonascii_result_1 - -- name: Check that file is really there - stat: - path: "{{ output_dir }}/test-archive-nonascii-くらとみ.xz" - register: nonascii_stat_1 - -- name: Assert that nonascii tests succeeded - assert: - that: - - nonascii_result_1 is changed - - "nonascii_stat_1.stat.exists == true" - -- name: remove nonascii test - file: path="{{ output_dir }}/test-archive-nonascii-くらとみ.xz" state=absent - -- name: test that zip archive that contains non-ascii filenames - archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/test-archive-nonascii-くらとみ.zip" - format: zip - register: nonascii_result_2 - -- name: Check that file is really there - stat: - path: "{{ output_dir }}/test-archive-nonascii-くらとみ.zip" - register: nonascii_stat_2 - -- name: Assert that nonascii tests succeeded - assert: - that: - - nonascii_result_2 is changed - - "nonascii_stat_2.stat.exists == true" - -- name: remove nonascii test - file: path="{{ output_dir }}/test-archive-nonascii-くらとみ.zip" state=absent - -- name: Test exclusion_patterns option - archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/test-archive-exclusion-patterns.tgz" - exclusion_patterns: b?r.* - register: exclusion_patterns_result - -- name: Assert that exclusion_patterns only archives included files - assert: - that: - - exclusion_patterns_result is changed - - "'bar.txt' not in exclusion_patterns_result.archived" - -- name: Test that excluded paths do not influence archive root - archive: - path: - - "{{ output_dir }}/sub/subfile.txt" - - "{{ output_dir }}" - exclude_path: - - "{{ output_dir }}" - dest: "{{ output_dir }}/test-archive-root.tgz" - register: archive_root_result - -- name: Assert that excluded paths do not influence archive root - assert: - that: - - archive_root_result.arcroot != output_dir - -- name: Remove archive root test - file: - path: "{{ output_dir }}/test-archive-root.tgz" - state: absent - -- name: Test Single Target with format={{ item }} - archive: - path: "{{ output_dir }}/foo.txt" - dest: "{{ output_dir }}/test-single-target.{{ item }}" - format: "{{ item }}" - register: "single_target_test" - loop: - - zip - - tar - - gz - - bz2 - - xz - -# Dummy tests until ``dest_state`` result value can be implemented -- name: Assert that single target tests are effective - assert: - that: - - single_target_test.results[0] is changed - - single_target_test.results[1] is changed - - single_target_test.results[2] is changed - - single_target_test.results[3] is changed - - single_target_test.results[4] is changed - -- name: Retrieve contents of single target archives - ansible.builtin.unarchive: - src: "{{ output_dir }}/test-single-target.zip" - dest: . - list_files: true - check_mode: true - ignore_errors: true - register: single_target_test_contents - -- name: Assert that file names in single-file zip archives are preserved - assert: - that: - - "'oo.txt' not in single_target_test_contents.files" - - "'foo.txt' in single_target_test_contents.files" - # ``unarchive`` fails for RHEL and FreeBSD on ansible 2.x - when: single_target_test_contents is success and single_target_test_contents is not skipped - -- name: Remove single target test with format={{ item }} - file: - path: "{{ output_dir }}/test-single-target.{{ item }}" - state: absent - loop: - - zip - - tar - - gz - - bz2 - - xz - -- name: Test that missing files result in incomplete state - archive: - path: - - "{{ output_dir }}/*.txt" - - "{{ output_dir }}/dne.txt" - exclude_path: "{{ output_dir }}/foo.txt" - dest: "{{ output_dir }}/test-incomplete-archive.tgz" - register: incomplete_archive_result - -- name: Assert that incomplete archive has incomplete state - assert: - that: - - incomplete_archive_result is changed - - "'{{ output_dir }}/dne.txt' in incomplete_archive_result.missing" - - "'{{ output_dir }}/foo.txt' not in incomplete_archive_result.missing" - -- name: Remove incomplete archive - file: - path: "{{ output_dir }}/test-incomplete-archive.tgz" - state: absent - +- name: Define formats to test + set_fact: + formats: + - tar + - zip + - gz + - bz2 + - xz + +# Run tests +- name: Run core tests + include_tasks: + file: ../tests/core.yml + loop: "{{ formats }}" + loop_control: + loop_var: format + +- name: Run exclusions tests + include_tasks: + file: ../tests/exclusions.yml + loop: "{{ formats }}" + loop_control: + loop_var: format + +- name: Run remove tests + include_tasks: + file: ../tests/remove.yml + loop: "{{ formats }}" + loop_control: + loop_var: format + +- name: Run broken link tests + include_tasks: + file: ../tests/broken-link.yml + loop: "{{ formats }}" + loop_control: + loop_var: format + +# Test cleanup - name: Remove backports.lzma if previously installed (pip) pip: name=backports.lzma state=absent when: backports_lzma_pip is changed - -- name: import remove tests - import_tasks: remove.yml - -- name: import broken-link tests - import_tasks: broken-link.yml diff --git a/tests/integration/targets/archive/tasks/remove.yml b/tests/integration/targets/archive/tasks/remove.yml deleted file mode 100644 index 9f085e901a..0000000000 --- a/tests/integration/targets/archive/tasks/remove.yml +++ /dev/null @@ -1,186 +0,0 @@ ---- -- name: archive using gz and remove src files - archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_remove_01.gz" - format: gz - remove: yes - register: archive_remove_result_01 - -- debug: msg="{{ archive_remove_result_01 }}" - -- name: verify that the files archived - file: path={{ output_dir }}/archive_remove_01.gz state=file - -- name: check if gz file exists and includes all text files and src files has been removed - assert: - that: - - "{{ archive_remove_result_01.changed }}" - - "{{ 'archived' in archive_remove_result_01 }}" - - "{{ archive_remove_result_01['archived'] | length }} == 3" - -- name: remove our gz - file: path="{{ output_dir }}/archive_remove_01.gz" state=absent - -- name: check if src files has been removed - assert: - that: - - "'{{ output_dir }}/{{ item }}' is not exists" - with_items: - - foo.txt - - bar.txt - - empty.txt - -- name: prep our files again - copy: src={{ item }} dest={{ output_dir }}/{{ item }} - with_items: - - foo.txt - - bar.txt - - empty.txt - -- name: create a temporary directory to be check if it will be removed - file: - path: "{{ output_dir }}/tmpdir" - state: directory - -- name: prep our files in tmpdir - copy: src={{ item }} dest={{ output_dir }}/tmpdir/{{ item }} - with_items: - - foo.txt - - bar.txt - - empty.txt - -- name: archive using gz and remove src directory - archive: - path: "{{ output_dir }}/tmpdir" - dest: "{{ output_dir }}/archive_remove_02.gz" - format: gz - remove: yes - register: archive_remove_result_02 - -- debug: msg="{{ archive_remove_result_02 }}" - -- name: verify that the files archived - file: path={{ output_dir }}/archive_remove_02.gz state=file - -- name: check if gz file exists and includes all text files - assert: - that: - - "{{ archive_remove_result_02.changed }}" - - "{{ 'archived' in archive_remove_result_02 }}" - - "{{ archive_remove_result_02['archived'] | length }} == 3" - -- name: remove our gz - file: path="{{ output_dir }}/archive_remove_02.gz" state=absent - -- name: check if src folder has been removed - assert: - that: - - "'{{ output_dir }}/tmpdir' is not exists" - -- name: create temporary directory again - file: - path: "{{ output_dir }}/tmpdir" - state: directory - -- name: prep our files in tmpdir again - copy: src={{ item }} dest={{ output_dir }}/tmpdir/{{ item }} - with_items: - - foo.txt - - bar.txt - - empty.txt - -- name: archive using gz and remove src directory excluding one file - archive: - path: "{{ output_dir }}/tmpdir/*" - dest: "{{ output_dir }}/archive_remove_03.gz" - format: gz - remove: yes - exclude_path: "{{ output_dir }}/tmpdir/empty.txt" - register: archive_remove_result_03 - -- debug: msg="{{ archive_remove_result_03 }}" - -- name: verify that the files archived - file: path={{ output_dir }}/archive_remove_03.gz state=file - -- name: check if gz file exists and includes all text files - assert: - that: - - "{{ archive_remove_result_03.changed }}" - - "{{ 'archived' in archive_remove_result_03 }}" - - "{{ archive_remove_result_03['archived'] | length }} == 2" - -- name: remove our gz - file: path="{{ output_dir }}/archive_remove_03.gz" state=absent - -- name: verify that excluded file is still present - file: path={{ output_dir }}/tmpdir/empty.txt state=file - -- name: prep our files in tmpdir again - copy: src={{ item }} dest={{ output_dir }}/tmpdir/{{ item }} - with_items: - - foo.txt - - bar.txt - - empty.txt - - sub - - sub/subfile.txt - -- name: archive using gz and remove src directory - archive: - path: - - "{{ output_dir }}/tmpdir/*.txt" - - "{{ output_dir }}/tmpdir/sub/*" - dest: "{{ output_dir }}/archive_remove_04.gz" - format: gz - remove: yes - exclude_path: "{{ output_dir }}/tmpdir/sub/subfile.txt" - register: archive_remove_result_04 - -- debug: msg="{{ archive_remove_result_04 }}" - -- name: verify that the files archived - file: path={{ output_dir }}/archive_remove_04.gz state=file - -- name: remove our gz - file: path="{{ output_dir }}/archive_remove_04.gz" state=absent - -- name: verify that excluded sub file is still present - file: path={{ output_dir }}/tmpdir/sub/subfile.txt state=file - -- name: prep our files in tmpdir again - copy: src={{ item }} dest={{ output_dir }}/tmpdir/{{ item }} - with_items: - - foo.txt - - bar.txt - - empty.txt - - sub - - sub/subfile.txt - -- name: archive using gz and remove src directory - archive: - path: - - "{{ output_dir }}/tmpdir/" - dest: "{{ output_dir }}/archive_remove_05.gz" - format: gz - remove: yes - exclude_path: "{{ output_dir }}/tmpdir/sub/subfile.txt" - register: archive_remove_result_05 - -- name: verify that the files archived - file: path={{ output_dir }}/archive_remove_05.gz state=file - -- name: Verify source files were removed - file: - path: "{{ output_dir }}/tmpdir" - state: absent - register: archive_source_file_removal_05 - -- name: Verify that task status is success - assert: - that: - - archive_remove_result_05 is success - - archive_source_file_removal_05 is not changed - -- name: remove our gz - file: path="{{ output_dir }}/archive_remove_05.gz" state=absent diff --git a/tests/integration/targets/archive/tests/broken-link.yml b/tests/integration/targets/archive/tests/broken-link.yml new file mode 100644 index 0000000000..cc1e07aaf1 --- /dev/null +++ b/tests/integration/targets/archive/tests/broken-link.yml @@ -0,0 +1,31 @@ +--- +- block: + - name: Create link - broken link ({{ format }}) + file: + src: /nowhere + dest: "{{ output_dir }}/nowhere.txt" + state: link + force: yes + + - name: Archive - broken link ({{ format }}) + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/archive_broken_link.{{ format }}" + format: "{{ format }}" + + - name: Verify archive exists - broken link ({{ format }}) + file: + path: "{{ output_dir }}/archive_broken_link.{{ format }}" + state: file + + - name: Remove archive - broken link ({{ format }}) + file: + path: "{{ output_dir }}/archive_broken_link.{{ format }}" + state: absent + + - name: Remove link - broken link ({{ format }}) + file: + path: "{{ output_dir }}/nowhere.txt" + state: absent + # 'zip' does not support symlink's + when: format != 'zip' diff --git a/tests/integration/targets/archive/tests/core.yml b/tests/integration/targets/archive/tests/core.yml new file mode 100644 index 0000000000..f12e5083cc --- /dev/null +++ b/tests/integration/targets/archive/tests/core.yml @@ -0,0 +1,188 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +# Test code for the archive module. +# (c) 2017, Abhijeet Kasurde + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# Make sure we start fresh + +# Core functionality tests +- name: Archive - no options ({{ format }}) + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/archive_no_opts.{{ format }}" + format: "{{ format }}" + register: archive_no_options + +- name: Verify that archive exists - no options ({{ format }}) + file: + path: "{{output_dir}}/archive_no_opts.{{ format }}" + state: file + +- name: Verify that archive result is changed and includes all files - no options ({{ format }}) + assert: + that: + - archive_no_options is changed + - "archive_no_options.dest_state == 'archive'" + - "{{ archive_no_options.archived | length }} == 3" + - +- name: Remove the archive - no options ({{ format }}) + file: + path: "{{ output_dir }}/archive_no_options.{{ format }}" + state: absent + +- name: Archive - file options ({{ format }}) + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/archive_file_options.{{ format }}" + format: "{{ format }}" + mode: "u+rwX,g-rwx,o-rwx" + register: archive_file_options + +- name: Retrieve archive file information - file options ({{ format }}) + stat: + path: "{{ output_dir }}/archive_file_options.{{ format }}" + register: archive_file_options_stat + +- name: Test that the file modes were changed + assert: + that: + - archive_file_options_stat is not changed + - "archive_file_options.mode == '0600'" + - "{{ archive_file_options.archived | length }} == 3" + +- name: Remove the archive - file options ({{ format }}) + file: + path: "{{ output_dir }}/archive_file_options.{{ format }}" + state: absent + +- name: Archive - non-ascii ({{ format }}) + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/archive_nonascii_くらとみ.{{ format }}" + format: "{{ format }}" + register: archive_nonascii + +- name: Retrieve archive file information - non-ascii ({{ format }}) + stat: + path: "{{ output_dir }}/archive_nonascii_くらとみ.{{ format }}" + register: archive_nonascii_stat + +- name: Test that archive exists - non-ascii ({{ format }}) + assert: + that: + - archive_nonascii is changed + - archive_nonascii_stat.stat.exists == true + +- name: Remove the archive - non-ascii ({{ format }}) + file: + path: "{{ output_dir }}/archive_nonascii_くらとみ.{{ format }}" + state: absent + +- name: Archive - single target ({{ format }}) + archive: + path: "{{ output_dir }}/foo.txt" + dest: "{{ output_dir }}/archive_single_target.{{ format }}" + format: "{{ format }}" + register: archive_single_target + +- name: Assert archive has correct state - single target ({{ format }}) + assert: + that: + - archive_single_target.dest_state == state_map[format] + vars: + state_map: + tar: archive + zip: archive + gz: compress + bz2: compress + xz: compress + +- block: + - name: Retrieve contents of archive - single target ({{ format }}) + ansible.builtin.unarchive: + src: "{{ output_dir }}/archive_single_target.{{ format }}" + dest: . + list_files: true + check_mode: true + ignore_errors: true + register: archive_single_target_contents + + - name: Assert that file names are preserved - single target ({{ format }}) + assert: + that: + - "'oo.txt' not in archive_single_target_contents.files" + - "'foo.txt' in archive_single_target_contents.files" + # ``unarchive`` fails for RHEL and FreeBSD on ansible 2.x + when: archive_single_target_contents is success and archive_single_target_contents is not skipped + when: "format == 'zip'" + +- name: Remove archive - single target ({{ format }}) + file: + path: "{{ output_dir }}/archive_single_target.{{ format }}" + state: absent + +- name: Archive - path list ({{ format }}) + archive: + path: + - "{{ output_dir }}/empty.txt" + - "{{ output_dir }}/foo.txt" + - "{{ output_dir }}/bar.txt" + dest: "{{ output_dir }}/archive_path_list.{{ format }}" + format: "{{ format }}" + register: archive_path_list + +- name: Verify that archive exists - path list ({{ format }}) + file: + path: "{{output_dir}}/archive_path_list.{{ format }}" + state: file + +- name: Assert that archive contains all files - path list ({{ format }}) + assert: + that: + - archive_path_list is changed + - "{{ archive_path_list.archived | length }} == 3" + +- name: Remove archive - path list ({{ format }}) + file: + path: "{{ output_dir }}/archive_path_list.{{ format }}" + state: absent + +- name: Archive - missing paths ({{ format }}) + archive: + path: + - "{{ output_dir }}/*.txt" + - "{{ output_dir }}/dne.txt" + exclude_path: "{{ output_dir }}/foo.txt" + dest: "{{ output_dir }}/archive_missing_paths.{{ format }}" + format: "{{ format }}" + register: archive_missing_paths + +- name: Assert that incomplete archive has incomplete state - missing paths ({{ format }}) + assert: + that: + - archive_missing_paths is changed + - "archive_missing_paths.dest_state == 'incomplete'" + - "'{{ output_dir }}/dne.txt' in archive_missing_paths.missing" + - "'{{ output_dir }}/foo.txt' not in archive_missing_paths.missing" + +- name: Remove archive - missing paths ({{ format }}) + file: + path: "{{ output_dir }}/archive_missing_paths.{{ format }}" + state: absent diff --git a/tests/integration/targets/archive/tests/exclusions.yml b/tests/integration/targets/archive/tests/exclusions.yml new file mode 100644 index 0000000000..0b65f85851 --- /dev/null +++ b/tests/integration/targets/archive/tests/exclusions.yml @@ -0,0 +1,40 @@ +--- +- name: Archive - exclusion patterns ({{ format }}) + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/archive_exclusion_patterns.{{ format }}" + format: "{{ format }}" + exclusion_patterns: b?r.* + register: archive_exclusion_patterns + +- name: Assert that only included files are archived - exclusion patterns ({{ format }}) + assert: + that: + - archive_exclusion_patterns is changed + - "'bar.txt' not in archive_exclusion_patterns.archived" + +- name: Remove archive - exclusion patterns ({{ format }}) + file: + path: "{{ output_dir }}/archive_exclusion_patterns.{{ format }}" + state: absent + +- name: Archive - exclude path ({{ format }}) + archive: + path: + - "{{ output_dir }}/sub/subfile.txt" + - "{{ output_dir }}" + exclude_path: + - "{{ output_dir }}" + dest: "{{ output_dir }}/archive_exclude_paths.{{ format }}" + format: "{{ format }}" + register: archive_excluded_paths + +- name: Assert that excluded paths do not influence archive root - exclude path ({{ format }}) + assert: + that: + - archive_excluded_paths.arcroot != output_dir + +- name: Remove archive - exclude path ({{ format }}) + file: + path: "{{ output_dir }}/archive_exclude_paths.{{ format }}" + state: absent diff --git a/tests/integration/targets/archive/tests/remove.yml b/tests/integration/targets/archive/tests/remove.yml new file mode 100644 index 0000000000..26849ac850 --- /dev/null +++ b/tests/integration/targets/archive/tests/remove.yml @@ -0,0 +1,207 @@ +--- +- name: Archive - remove source files ({{ format }}) + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/archive_remove_source_files.{{ format }}" + format: "{{ format }}" + remove: yes + register: archive_remove_source_files + +- name: Verify archive exists - remove source files ({{ format }}) + file: + path: "{{ output_dir }}/archive_remove_source_files.{{ format }}" + state: file + +- name: Verify all files were archived - remove source files ({{ format }}) + assert: + that: + - archive_remove_source_files is changed + - "{{ archive_remove_source_files.archived | length }} == 3" + +- name: Remove Archive - remove source files ({{ format }}) + file: + path: "{{ output_dir }}/archive_remove_source_files.{{ format }}" + state: absent + +- name: Assert that source files were removed - remove source files ({{ format }}) + assert: + that: + - "'{{ output_dir }}/{{ item }}' is not exists" + with_items: + - foo.txt + - bar.txt + - empty.txt + +- name: Copy source files - remove source directory ({{ format }}) + copy: + src: "{{ item }}" + dest: "{{ output_dir }}/{{ item }}" + with_items: + - foo.txt + - bar.txt + - empty.txt + +- name: Create temporary directory - remove source directory ({{ format }}) + file: + path: "{{ output_dir }}/tmpdir" + state: directory + +- name: Copy source files to temporary directory - remove source directory ({{ format }}) + copy: + src: "{{ item }}" + dest: "{{ output_dir }}/tmpdir/{{ item }}" + with_items: + - foo.txt + - bar.txt + - empty.txt + +- name: Archive - remove source directory ({{ format }}) + archive: + path: "{{ output_dir }}/tmpdir" + dest: "{{ output_dir }}/archive_remove_source_directory.{{ format }}" + format: "{{ format }}" + remove: yes + register: archive_remove_source_directory + +- name: Verify archive exists - remove source directory ({{ format }}) + file: + path: "{{ output_dir }}/archive_remove_source_directory.{{ format }}" + state: file + +- name: Verify archive contains all files - remove source directory ({{ format }}) + assert: + that: + - archive_remove_source_directory is changed + - "{{ archive_remove_source_directory.archived | length }} == 3" + +- name: Remove archive - remove source directory ({{ format }}) + file: + path: "{{ output_dir }}/archive_remove_source_directory.{{ format }}" + state: absent + +- name: Verify source directory was removed - remove source directory ({{ format }}) + assert: + that: + - "'{{ output_dir }}/tmpdir' is not exists" + +- name: Create temporary directory - remove source excluding path ({{ format }}) + file: + path: "{{ output_dir }}/tmpdir" + state: directory + +- name: Copy source files to temporary directory - remove source excluding path ({{ format }}) + copy: + src: "{{ item }}" + dest: "{{ output_dir }}/tmpdir/{{ item }}" + with_items: + - foo.txt + - bar.txt + - empty.txt + +- name: Archive - remove source excluding path ({{ format }}) + archive: + path: "{{ output_dir }}/tmpdir/*" + dest: "{{ output_dir }}/archive_remove_source_excluding_path.{{ format }}" + format: "{{ format }}" + remove: yes + exclude_path: "{{ output_dir }}/tmpdir/empty.txt" + register: archive_remove_source_excluding_path + +- name: Verify archive exists - remove source excluding path ({{ format }}) + file: + path: "{{ output_dir }}/archive_remove_source_excluding_path.{{ format }}" + state: file + +- name: Verify all files except excluded are archived - remove source excluding path ({{ format }}) + assert: + that: + - archive_remove_source_excluding_path is changed + - "{{ archive_remove_source_excluding_path.archived | length }} == 2" + +- name: Remove archive - remove source excluding path ({{ format }}) + file: + path: "{{ output_dir }}/archive_remove_source_excluding_path.{{ format }}" + state: absent + +- name: Verify that excluded file still exists - remove source excluding path ({{ format }}) + file: + path: "{{ output_dir }}/tmpdir/empty.txt" + state: file + +- name: Copy source files to temporary directory - remove source excluding sub path ({{ format }}) + copy: + src: "{{ item }}" + dest: "{{ output_dir }}/tmpdir/{{ item }}" + with_items: + - foo.txt + - bar.txt + - empty.txt + - sub + - sub/subfile.txt + +- name: Archive - remove source excluding sub path ({{ format }}) + archive: + path: + - "{{ output_dir }}/tmpdir/*.txt" + - "{{ output_dir }}/tmpdir/sub/*" + dest: "{{ output_dir }}/archive_remove_source_excluding_sub_path.{{ format }}" + format: "{{ format }}" + remove: yes + exclude_path: "{{ output_dir }}/tmpdir/sub/subfile.txt" + register: archive_remove_source_excluding_sub_path + +- name: Verify archive exists - remove source excluding sub path ({{ format }}) + file: + path: "{{ output_dir }}/archive_remove_source_excluding_sub_path.{{ format }}" + state: file + +- name: Remove archive - remove source excluding sub path ({{ format }}) + file: + path: "{{ output_dir }}/archive_remove_source_excluding_sub_path.{{ format }}" + state: absent + +- name: Verify that sub path still exists - remove source excluding sub path ({{ format }}) + file: + path: "{{ output_dir }}/tmpdir/sub/subfile.txt" + state: file + +- name: Copy source files to temporary directory - remove source with nested paths ({{ format }}) + copy: + src: "{{ item }}" + dest: "{{ output_dir }}/tmpdir/{{ item }}" + with_items: + - foo.txt + - bar.txt + - empty.txt + - sub + - sub/subfile.txt + +- name: Archive - remove source with nested paths ({{ format }}) + archive: + path: "{{ output_dir }}/tmpdir/" + dest: "{{ output_dir }}/archive_remove_source_nested_paths.{{ format }}" + format: "{{ format }}" + remove: yes + register: archive_remove_nested_paths + +- name: Verify archive exists - remove source with nested paths ({{ format }}) + file: + path: "{{ output_dir }}/archive_remove_source_nested_paths.{{ format }}" + state: file + +- name: Verify source files were removed - remove source with nested paths ({{ format }}) + file: + path: "{{ output_dir }}/tmpdir" + state: absent + register: archive_remove_nested_paths_status + +- name: Assert tasks status - remove source with nested paths ({{ format }}) + assert: + that: + - archive_remove_nested_paths is success + - archive_remove_nested_paths_status is not changed + +- name: Remove archive - remove source with nested paths ({{ format }}) + file: + path: "{{ output_dir }}/archive_remove_source_nested_paths.{{ format }}" + state: absent From ad8c4e4de6cbbf6d2f6a366cb70451682e5a2684 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sat, 10 Jul 2021 23:01:16 +1200 Subject: [PATCH 0428/3093] added comments to the ignore files (#2972) --- tests/sanity/ignore-2.10.txt | 20 ++++++++++---------- tests/sanity/ignore-2.11.txt | 20 ++++++++++---------- tests/sanity/ignore-2.12.txt | 20 ++++++++++---------- tests/sanity/ignore-2.9.txt | 18 +++++++++--------- 4 files changed, 39 insertions(+), 39 deletions(-) diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index bdb3ca4e9a..6c60a4c6f8 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -18,17 +18,17 @@ plugins/modules/cloud/univention/udm_user.py validate-modules:parameter-list-no- plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-state-invalid-choice -plugins/modules/notification/grove.py validate-modules:invalid-argument-name +plugins/modules/notification/grove.py validate-modules:invalid-argument-name # invalid alias - removed in 4.0.0 plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid -plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid -plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid -plugins/modules/packaging/os/homebrew_cask.py validate-modules:parameter-invalid -plugins/modules/packaging/os/opkg.py validate-modules:parameter-invalid -plugins/modules/packaging/os/pacman.py validate-modules:parameter-invalid +plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/homebrew_cask.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/opkg.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/pacman.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/packaging/os/redhat_subscription.py validate-modules:return-syntax-error -plugins/modules/packaging/os/slackpkg.py validate-modules:parameter-invalid -plugins/modules/packaging/os/urpmi.py validate-modules:parameter-invalid -plugins/modules/packaging/os/xbps.py validate-modules:parameter-invalid +plugins/modules/packaging/os/slackpkg.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/urpmi.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/xbps.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/remote_management/hpilo/hpilo_boot.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hpilo_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hponcfg.py validate-modules:parameter-type-not-in-doc @@ -47,7 +47,7 @@ plugins/modules/system/parted.py validate-modules:parameter-state-invalid-choice plugins/modules/system/puppet.py use-argspec-type-path plugins/modules/system/puppet.py validate-modules:doc-default-does-not-match-spec # show_diff is not documented plugins/modules/system/puppet.py validate-modules:parameter-type-not-in-doc -plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc +plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc # param removed in 4.0.0 plugins/modules/system/ssh_config.py use-argspec-type-path # Required since module uses other methods to specify path plugins/modules/system/xfconf.py validate-modules:parameter-state-invalid-choice plugins/modules/system/xfconf.py validate-modules:return-syntax-error diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 34889a2651..e3785767b1 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -17,17 +17,17 @@ plugins/modules/cloud/univention/udm_user.py validate-modules:parameter-list-no- plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-state-invalid-choice -plugins/modules/notification/grove.py validate-modules:invalid-argument-name +plugins/modules/notification/grove.py validate-modules:invalid-argument-name # invalid alias - removed in 4.0.0 plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid -plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid -plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid -plugins/modules/packaging/os/homebrew_cask.py validate-modules:parameter-invalid -plugins/modules/packaging/os/opkg.py validate-modules:parameter-invalid -plugins/modules/packaging/os/pacman.py validate-modules:parameter-invalid +plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/homebrew_cask.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/opkg.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/pacman.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/packaging/os/redhat_subscription.py validate-modules:return-syntax-error -plugins/modules/packaging/os/slackpkg.py validate-modules:parameter-invalid -plugins/modules/packaging/os/urpmi.py validate-modules:parameter-invalid -plugins/modules/packaging/os/xbps.py validate-modules:parameter-invalid +plugins/modules/packaging/os/slackpkg.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/urpmi.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/xbps.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/remote_management/hpilo/hpilo_boot.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hpilo_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hponcfg.py validate-modules:parameter-type-not-in-doc @@ -46,7 +46,7 @@ plugins/modules/system/parted.py validate-modules:parameter-state-invalid-choice plugins/modules/system/puppet.py use-argspec-type-path plugins/modules/system/puppet.py validate-modules:doc-default-does-not-match-spec # show_diff is not documented plugins/modules/system/puppet.py validate-modules:parameter-type-not-in-doc -plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc +plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc # param removed in 4.0.0 plugins/modules/system/ssh_config.py use-argspec-type-path # Required since module uses other methods to specify path plugins/modules/system/xfconf.py validate-modules:parameter-state-invalid-choice plugins/modules/system/xfconf.py validate-modules:return-syntax-error diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt index 6e14759c9d..197868474b 100644 --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -17,17 +17,17 @@ plugins/modules/cloud/univention/udm_user.py validate-modules:parameter-list-no- plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-state-invalid-choice -plugins/modules/notification/grove.py validate-modules:invalid-argument-name +plugins/modules/notification/grove.py validate-modules:invalid-argument-name # invalid alias - removed in 4.0.0 plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid -plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid -plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid -plugins/modules/packaging/os/homebrew_cask.py validate-modules:parameter-invalid -plugins/modules/packaging/os/opkg.py validate-modules:parameter-invalid -plugins/modules/packaging/os/pacman.py validate-modules:parameter-invalid +plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/homebrew_cask.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/opkg.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/pacman.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/packaging/os/redhat_subscription.py validate-modules:return-syntax-error -plugins/modules/packaging/os/slackpkg.py validate-modules:parameter-invalid -plugins/modules/packaging/os/urpmi.py validate-modules:parameter-invalid -plugins/modules/packaging/os/xbps.py validate-modules:parameter-invalid +plugins/modules/packaging/os/slackpkg.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/urpmi.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/xbps.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/remote_management/hpilo/hpilo_boot.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hpilo_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hponcfg.py validate-modules:parameter-type-not-in-doc @@ -46,7 +46,7 @@ plugins/modules/system/parted.py validate-modules:parameter-state-invalid-choice plugins/modules/system/puppet.py use-argspec-type-path plugins/modules/system/puppet.py validate-modules:doc-default-does-not-match-spec # show_diff is not documented plugins/modules/system/puppet.py validate-modules:parameter-type-not-in-doc -plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc +plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc # param removed in 4.0.0 plugins/modules/system/ssh_config.py use-argspec-type-path # Required since module uses other methods to specify path plugins/modules/system/xfconf.py validate-modules:parameter-state-invalid-choice plugins/modules/system/xfconf.py validate-modules:return-syntax-error diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 33f3d183d4..e21faf2ce3 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -12,15 +12,15 @@ plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:undo plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid -plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid -plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid -plugins/modules/packaging/os/homebrew_cask.py validate-modules:parameter-invalid -plugins/modules/packaging/os/opkg.py validate-modules:parameter-invalid -plugins/modules/packaging/os/pacman.py validate-modules:parameter-invalid +plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/homebrew_cask.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/opkg.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/pacman.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/packaging/os/redhat_subscription.py validate-modules:return-syntax-error -plugins/modules/packaging/os/slackpkg.py validate-modules:parameter-invalid -plugins/modules/packaging/os/urpmi.py validate-modules:parameter-invalid -plugins/modules/packaging/os/xbps.py validate-modules:parameter-invalid +plugins/modules/packaging/os/slackpkg.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/urpmi.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 +plugins/modules/packaging/os/xbps.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/remote_management/hpilo/hpilo_boot.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hpilo_info.py validate-modules:parameter-type-not-in-doc plugins/modules/remote_management/hpilo/hponcfg.py validate-modules:parameter-type-not-in-doc @@ -65,7 +65,7 @@ plugins/modules/system/iptables_state.py validate-modules:undocumented-parameter plugins/modules/system/launchd.py use-argspec-type-path # False positive plugins/modules/system/puppet.py use-argspec-type-path plugins/modules/system/puppet.py validate-modules:parameter-type-not-in-doc -plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc +plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc # deprecated param - removed in 4.0.0 plugins/modules/system/ssh_config.py use-argspec-type-path # Required since module uses other methods to specify path plugins/modules/system/xfconf.py validate-modules:return-syntax-error plugins/modules/web_infrastructure/jenkins_plugin.py use-argspec-type-path From 1990f79d8a69ae6fef7457d853c54d9348aad2fa Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sat, 10 Jul 2021 23:03:41 +1200 Subject: [PATCH 0429/3093] launchd - fixed validation check (#2960) * replaced use of expanduser() with value from HOME var * fixed sanity check * added changelog fragment --- changelogs/fragments/2960-launchd-validation-check.yaml | 2 ++ plugins/modules/system/launchd.py | 2 +- tests/sanity/ignore-2.10.txt | 1 - tests/sanity/ignore-2.11.txt | 1 - tests/sanity/ignore-2.12.txt | 1 - tests/sanity/ignore-2.9.txt | 1 - 6 files changed, 3 insertions(+), 5 deletions(-) create mode 100644 changelogs/fragments/2960-launchd-validation-check.yaml diff --git a/changelogs/fragments/2960-launchd-validation-check.yaml b/changelogs/fragments/2960-launchd-validation-check.yaml new file mode 100644 index 0000000000..15cb3c3fa5 --- /dev/null +++ b/changelogs/fragments/2960-launchd-validation-check.yaml @@ -0,0 +1,2 @@ +bugfixes: + - launchd - fixed sanity check in the module's code (https://github.com/ansible-collections/community.general/pull/2960). diff --git a/plugins/modules/system/launchd.py b/plugins/modules/system/launchd.py index 30a5ed02b2..e8d82ff318 100644 --- a/plugins/modules/system/launchd.py +++ b/plugins/modules/system/launchd.py @@ -159,7 +159,7 @@ class Plist: """Finds the plist file associated with a service""" launchd_paths = [ - os.path.expanduser('~/Library/LaunchAgents'), + os.path.join(os.getenv('HOME'), 'Library/LaunchAgents'), '/Library/LaunchAgents', '/Library/LaunchDaemons', '/System/Library/LaunchAgents', diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 6c60a4c6f8..f313df3620 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -41,7 +41,6 @@ plugins/modules/remote_management/manageiq/manageiq_tags.py validate-modules:par plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-invalid plugins/modules/system/gconftool2.py validate-modules:parameter-state-invalid-choice plugins/modules/system/iptables_state.py validate-modules:undocumented-parameter -plugins/modules/system/launchd.py use-argspec-type-path # False positive plugins/modules/system/osx_defaults.py validate-modules:parameter-state-invalid-choice plugins/modules/system/parted.py validate-modules:parameter-state-invalid-choice plugins/modules/system/puppet.py use-argspec-type-path diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index e3785767b1..6858d92104 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -40,7 +40,6 @@ plugins/modules/remote_management/manageiq/manageiq_tags.py validate-modules:par plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-invalid plugins/modules/system/gconftool2.py validate-modules:parameter-state-invalid-choice plugins/modules/system/iptables_state.py validate-modules:undocumented-parameter -plugins/modules/system/launchd.py use-argspec-type-path # False positive plugins/modules/system/osx_defaults.py validate-modules:parameter-state-invalid-choice plugins/modules/system/parted.py validate-modules:parameter-state-invalid-choice plugins/modules/system/puppet.py use-argspec-type-path diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt index 197868474b..9b0e047d57 100644 --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -40,7 +40,6 @@ plugins/modules/remote_management/manageiq/manageiq_tags.py validate-modules:par plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-invalid plugins/modules/system/gconftool2.py validate-modules:parameter-state-invalid-choice plugins/modules/system/iptables_state.py validate-modules:undocumented-parameter -plugins/modules/system/launchd.py use-argspec-type-path # False positive plugins/modules/system/osx_defaults.py validate-modules:parameter-state-invalid-choice plugins/modules/system/parted.py validate-modules:parameter-state-invalid-choice plugins/modules/system/puppet.py use-argspec-type-path diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index e21faf2ce3..65611001b1 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -62,7 +62,6 @@ plugins/modules/net_tools/nios/nios_zone.py validate-modules:deprecation-mismatc plugins/modules/net_tools/nios/nios_zone.py validate-modules:invalid-documentation plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-invalid plugins/modules/system/iptables_state.py validate-modules:undocumented-parameter -plugins/modules/system/launchd.py use-argspec-type-path # False positive plugins/modules/system/puppet.py use-argspec-type-path plugins/modules/system/puppet.py validate-modules:parameter-type-not-in-doc plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc # deprecated param - removed in 4.0.0 From 0e90ff48b5e0d3c3b51542fc843d3d873d04a2d5 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sat, 10 Jul 2021 23:05:20 +1200 Subject: [PATCH 0430/3093] rax_mon_notification_plan - fixed validation check (#2955) * fixed validation-modules for plugins/modules/cloud/rackspace/rax_mon_notification_plan.py * fixed sanity check * added changelog fragment --- ..._notification_plan-added-elements-to-list-params.yaml | 2 ++ .../modules/cloud/rackspace/rax_mon_notification_plan.py | 9 ++++++--- tests/sanity/ignore-2.10.txt | 1 - tests/sanity/ignore-2.11.txt | 1 - tests/sanity/ignore-2.12.txt | 1 - 5 files changed, 8 insertions(+), 6 deletions(-) create mode 100644 changelogs/fragments/2955-rax_mon_notification_plan-added-elements-to-list-params.yaml diff --git a/changelogs/fragments/2955-rax_mon_notification_plan-added-elements-to-list-params.yaml b/changelogs/fragments/2955-rax_mon_notification_plan-added-elements-to-list-params.yaml new file mode 100644 index 0000000000..9ff6f01f7d --- /dev/null +++ b/changelogs/fragments/2955-rax_mon_notification_plan-added-elements-to-list-params.yaml @@ -0,0 +1,2 @@ +minor_changes: + - rax_mon_notification_plan - fixed validation checks by specifying type ``str`` as the ``elements`` of parameters ``ok_state``, ``warning_state`` and ``critical_state`` (https://github.com/ansible-collections/community.general/pull/2955). diff --git a/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py b/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py index a4b8920dc7..d5294cd509 100644 --- a/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py +++ b/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py @@ -32,16 +32,19 @@ options: required: true critical_state: type: list + elements: str description: - Notification list to use when the alarm state is CRITICAL. Must be an array of valid rax_mon_notification ids. warning_state: type: list + elements: str description: - Notification list to use when the alarm state is WARNING. Must be an array of valid rax_mon_notification ids. ok_state: type: list + elements: str description: - Notification list to use when the alarm state is OK. Must be an array of valid rax_mon_notification ids. @@ -150,9 +153,9 @@ def main(): dict( state=dict(default='present', choices=['present', 'absent']), label=dict(required=True), - critical_state=dict(type='list'), - warning_state=dict(type='list'), - ok_state=dict(type='list') + critical_state=dict(type='list', elements='str'), + warning_state=dict(type='list', elements='str'), + ok_state=dict(type='list', elements='str'), ) ) diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index f313df3620..d01c3762dc 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -5,7 +5,6 @@ plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-cho plugins/modules/cloud/rackspace/rax.py use-argspec-type-path # fix needed plugins/modules/cloud/rackspace/rax_files.py validate-modules:parameter-state-invalid-choice plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path -plugins/modules/cloud/rackspace/rax_mon_notification_plan.py validate-modules:parameter-list-no-elements plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path # fix needed, expanduser() applied to dict values plugins/modules/cloud/scaleway/scaleway_organization_info.py validate-modules:return-syntax-error plugins/modules/cloud/smartos/vmadm.py validate-modules:parameter-type-not-in-doc diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 6858d92104..2106c993d3 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -4,7 +4,6 @@ plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-cho plugins/modules/cloud/rackspace/rax.py use-argspec-type-path # fix needed plugins/modules/cloud/rackspace/rax_files.py validate-modules:parameter-state-invalid-choice plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path -plugins/modules/cloud/rackspace/rax_mon_notification_plan.py validate-modules:parameter-list-no-elements plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path # fix needed, expanduser() applied to dict values plugins/modules/cloud/scaleway/scaleway_organization_info.py validate-modules:return-syntax-error plugins/modules/cloud/smartos/vmadm.py validate-modules:parameter-type-not-in-doc diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt index 9b0e047d57..a30ff2e4ed 100644 --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -4,7 +4,6 @@ plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-cho plugins/modules/cloud/rackspace/rax.py use-argspec-type-path # fix needed plugins/modules/cloud/rackspace/rax_files.py validate-modules:parameter-state-invalid-choice plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path -plugins/modules/cloud/rackspace/rax_mon_notification_plan.py validate-modules:parameter-list-no-elements plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path # fix needed, expanduser() applied to dict values plugins/modules/cloud/scaleway/scaleway_organization_info.py validate-modules:return-syntax-error plugins/modules/cloud/smartos/vmadm.py validate-modules:parameter-type-not-in-doc From 4ae392e5de059fcb2cd4a5d6a6127d23d4417f6c Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sat, 10 Jul 2021 13:31:54 +0200 Subject: [PATCH 0431/3093] Temporarily disable passwordstore lookup tests on macOS and OSX. (#2979) --- tests/integration/targets/lookup_passwordstore/aliases | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/integration/targets/lookup_passwordstore/aliases b/tests/integration/targets/lookup_passwordstore/aliases index 8b108917a0..7cc72b73d4 100644 --- a/tests/integration/targets/lookup_passwordstore/aliases +++ b/tests/integration/targets/lookup_passwordstore/aliases @@ -3,3 +3,5 @@ destructive skip/aix skip/rhel skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller +skip/osx # FIXME https://github.com/ansible-collections/community.general/issues/2978 +skip/macos # FIXME https://github.com/ansible-collections/community.general/issues/2978 From 9023d4dba1c635e9839448e975a8c0c0fdf1fdff Mon Sep 17 00:00:00 2001 From: quidame Date: Sat, 10 Jul 2021 16:37:31 +0200 Subject: [PATCH 0432/3093] filesystem: extend support for FreeBSD (#2902) * extend support for FreeBSD * Check if FS exists with `fstyp` if `blkid` fails to find FS signature (fix a potential data loss) * Add support for FreeBSD special devices (character devices). * Add support for FreeBSD native fstype (UFS). * Update DOCUMENTATION accordingly. * add/update integration tests * Add tests for `fstype=ufs` on FreeBSD. * Run `remove_fs` tests (`state=absent`) on FreeBSD. * Run `overwrite_another_fs` tests on FreeBSD. * add a changelog fragment * fix indentation * restrict new tests to regular files * fix typo * fix searching of providersize (block count) * add '-y' option to growfs command * remove references to versions older than the collection itself * bump version adding new feats to 3.4.0 * reformat *collection* and *version added* for better DOCUMENTATION parsing * skip tests for FreeBSD < 12.2 * run tests for FreeBSD >= 12.2 * re-enable tests for FreeBSD < 12.2 and give it a try with group1 * util-linux not available on FreeBSD < 12.2 --- ...2902-filesystem_extend_freebsd_support.yml | 6 ++ plugins/modules/system/filesystem.py | 93 ++++++++++++++----- tests/integration/targets/filesystem/aliases | 2 +- .../targets/filesystem/defaults/main.yml | 6 ++ .../filesystem/tasks/create_device.yml | 21 ++++- .../targets/filesystem/tasks/create_fs.yml | 21 +++-- .../filesystem/tasks/freebsd_setup.yml | 10 ++ .../targets/filesystem/tasks/main.yml | 25 +++++ .../filesystem/tasks/overwrite_another_fs.yml | 12 +-- .../targets/filesystem/tasks/remove_fs.yml | 12 +-- 10 files changed, 162 insertions(+), 46 deletions(-) create mode 100644 changelogs/fragments/2902-filesystem_extend_freebsd_support.yml create mode 100644 tests/integration/targets/filesystem/tasks/freebsd_setup.yml diff --git a/changelogs/fragments/2902-filesystem_extend_freebsd_support.yml b/changelogs/fragments/2902-filesystem_extend_freebsd_support.yml new file mode 100644 index 0000000000..1518d0190f --- /dev/null +++ b/changelogs/fragments/2902-filesystem_extend_freebsd_support.yml @@ -0,0 +1,6 @@ +--- +minor_changes: + - filesystem - extend support for FreeBSD. Avoid potential data loss by checking + existence of a filesystem with ``fstyp`` (native command) if ``blkid`` (foreign + command) doesn't find one. Add support for character devices and ``ufs`` filesystem + type (https://github.com/ansible-collections/community.general/pull/2902). diff --git a/plugins/modules/system/filesystem.py b/plugins/modules/system/filesystem.py index cbb0e5e95e..4f1d6ee0d1 100644 --- a/plugins/modules/system/filesystem.py +++ b/plugins/modules/system/filesystem.py @@ -1,6 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- +# Copyright: (c) 2021, quidame # Copyright: (c) 2013, Alexander Bulimov # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -12,6 +13,7 @@ DOCUMENTATION = ''' --- author: - Alexander Bulimov (@abulimov) + - quidame (@quidame) module: filesystem short_description: Makes a filesystem description: @@ -30,25 +32,22 @@ options: default: present version_added: 1.3.0 fstype: - choices: [ btrfs, ext2, ext3, ext4, ext4dev, f2fs, lvm, ocfs2, reiserfs, xfs, vfat, swap ] + choices: [ btrfs, ext2, ext3, ext4, ext4dev, f2fs, lvm, ocfs2, reiserfs, xfs, vfat, swap, ufs ] description: - Filesystem type to be created. This option is required with C(state=present) (or if I(state) is omitted). - - reiserfs support was added in 2.2. - - lvm support was added in 2.5. - - since 2.5, I(dev) can be an image file. - - vfat support was added in 2.5 - - ocfs2 support was added in 2.6 - - f2fs support was added in 2.7 - - swap support was added in 2.8 + - ufs support has been added in community.general 3.4.0. type: str aliases: [type] dev: description: - - Target path to block device or regular file. - - On systems not using block devices but character devices instead (as - FreeBSD), this module only works when applying to regular files, aka - disk images. + - Target path to block device (Linux) or character device (FreeBSD) or + regular file (both). + - When setting Linux-specific filesystem types on FreeBSD, this module + only works when applying to regular files, aka disk images. + - Currently C(lvm) (Linux-only) and C(ufs) (FreeBSD-only) don't support + a regular file as their target I(dev). + - Support for character devices on FreeBSD has been added in community.general 3.4.0. type: path required: yes aliases: [device] @@ -60,7 +59,7 @@ options: resizefs: description: - If C(yes), if the block device and filesystem size differ, grow the filesystem into the space. - - Supported for C(ext2), C(ext3), C(ext4), C(ext4dev), C(f2fs), C(lvm), C(xfs) and C(vfat) filesystems. + - Supported for C(ext2), C(ext3), C(ext4), C(ext4dev), C(f2fs), C(lvm), C(xfs), C(ufs) and C(vfat) filesystems. Attempts to resize other filesystem types will fail. - XFS Will only grow if mounted. Currently, the module is based on commands from C(util-linux) package to perform operations, so resizing of XFS is @@ -73,16 +72,24 @@ options: - List of options to be passed to mkfs command. type: str requirements: - - Uses tools related to the I(fstype) (C(mkfs)) and the C(blkid) command. - - When I(resizefs) is enabled, C(blockdev) command is required too. + - Uses specific tools related to the I(fstype) for creating or resizing a + filesystem (from packages e2fsprogs, xfsprogs, dosfstools, and so on). + - Uses generic tools mostly related to the Operating System (Linux or + FreeBSD) or available on both, as C(blkid). + - On FreeBSD, either C(util-linux) or C(e2fsprogs) package is required. notes: - - Potential filesystem on I(dev) are checked using C(blkid). In case C(blkid) - isn't able to detect an existing filesystem, this filesystem is overwritten - even if I(force) is C(no). - - On FreeBSD systems, either C(e2fsprogs) or C(util-linux) packages provide - a C(blkid) command that is compatible with this module, when applied to - regular files. + - Potential filesystems on I(dev) are checked using C(blkid). In case C(blkid) + is unable to detect a filesystem (and in case C(fstyp) on FreeBSD is also + unable to detect a filesystem), this filesystem is overwritten even if + I(force) is C(no). + - On FreeBSD systems, both C(e2fsprogs) and C(util-linux) packages provide + a C(blkid) command that is compatible with this module. However, these + packages conflict with each other, and only the C(util-linux) package + provides the command required to not fail when I(state=absent). - This module supports I(check_mode). +seealso: + - module: community.general.filesize + - module: ansible.posix.mount ''' EXAMPLES = ''' @@ -101,6 +108,11 @@ EXAMPLES = ''' community.general.filesystem: dev: /dev/sdb1 state: absent + +- name: Create a filesystem on top of a regular file + community.general.filesystem: + dev: /path/to/disk.img + fstype: vfat ''' from distutils.version import LooseVersion @@ -125,6 +137,10 @@ class Device(object): blockdev_cmd = self.module.get_bin_path("blockdev", required=True) dummy, out, dummy = self.module.run_command([blockdev_cmd, "--getsize64", self.path], check_rc=True) devsize_in_bytes = int(out) + elif stat.S_ISCHR(statinfo.st_mode) and platform.system() == 'FreeBSD': + diskinfo_cmd = self.module.get_bin_path("diskinfo", required=True) + dummy, out, dummy = self.module.run_command([diskinfo_cmd, self.path], check_rc=True) + devsize_in_bytes = int(out.split()[2]) elif os.path.isfile(self.path): devsize_in_bytes = os.path.getsize(self.path) else: @@ -423,6 +439,31 @@ class Swap(Filesystem): MKFS_FORCE_FLAGS = ['-f'] +class UFS(Filesystem): + MKFS = 'newfs' + INFO = 'dumpfs' + GROW = 'growfs' + GROW_MAX_SPACE_FLAGS = ['-y'] + + def get_fs_size(self, dev): + """Get providersize and fragment size and return their product.""" + cmd = self.module.get_bin_path(self.INFO, required=True) + dummy, out, dummy = self.module.run_command([cmd, str(dev)], check_rc=True, environ_update=self.LANG_ENV) + + fragmentsize = providersize = None + for line in out.splitlines(): + if line.startswith('fsize'): + fragmentsize = int(line.split()[1]) + elif 'providersize' in line: + providersize = int(line.split()[-1]) + if None not in (fragmentsize, providersize): + break + else: + raise ValueError(out) + + return fragmentsize * providersize + + FILESYSTEMS = { 'ext2': Ext2, 'ext3': Ext3, @@ -436,6 +477,7 @@ FILESYSTEMS = { 'ocfs2': Ocfs2, 'LVM2_member': LVM, 'swap': Swap, + 'ufs': UFS, } @@ -484,11 +526,16 @@ def main(): dev = Device(module, dev) + # In case blkid/fstyp isn't able to identify an existing filesystem, device + # is considered as empty, then this existing filesystem would be overwritten + # even if force isn't enabled. cmd = module.get_bin_path('blkid', required=True) rc, raw_fs, err = module.run_command([cmd, '-c', os.devnull, '-o', 'value', '-s', 'TYPE', str(dev)]) - # In case blkid isn't able to identify an existing filesystem, device is considered as empty, - # then this existing filesystem would be overwritten even if force isn't enabled. fs = raw_fs.strip() + if not fs and platform.system() == 'FreeBSD': + cmd = module.get_bin_path('fstyp', required=True) + rc, raw_fs, err = module.run_command([cmd, str(dev)]) + fs = raw_fs.strip() if state == "present": if fstype in friendly_names: diff --git a/tests/integration/targets/filesystem/aliases b/tests/integration/targets/filesystem/aliases index 1c80472f94..1ef4c3619a 100644 --- a/tests/integration/targets/filesystem/aliases +++ b/tests/integration/targets/filesystem/aliases @@ -1,5 +1,5 @@ destructive -shippable/posix/group3 +shippable/posix/group1 skip/aix skip/osx skip/macos diff --git a/tests/integration/targets/filesystem/defaults/main.yml b/tests/integration/targets/filesystem/defaults/main.yml index 15ef85aa0e..27672bbea6 100644 --- a/tests/integration/targets/filesystem/defaults/main.yml +++ b/tests/integration/targets/filesystem/defaults/main.yml @@ -23,3 +23,9 @@ tested_filesystems: f2fs: {fssize: '{{ f2fs_fssize|default(60) }}', grow: 'f2fs_version is version("1.10.0", ">=")'} lvm: {fssize: 20, grow: True} swap: {fssize: 10, grow: False} # grow not implemented + ufs: {fssize: 10, grow: True} + + +get_uuid_any: "blkid -c /dev/null -o value -s UUID {{ dev }}" +get_uuid_ufs: "dumpfs {{ dev }} | awk -v sb=superblock -v id=id '$1 == sb && $4 == id {print $6$7}'" +get_uuid_cmd: "{{ get_uuid_ufs if fstype == 'ufs' else get_uuid_any }}" diff --git a/tests/integration/targets/filesystem/tasks/create_device.yml b/tests/integration/targets/filesystem/tasks/create_device.yml index 30fd62e33a..ae314221a5 100644 --- a/tests/integration/targets/filesystem/tasks/create_device.yml +++ b/tests/integration/targets/filesystem/tasks/create_device.yml @@ -19,6 +19,17 @@ ansible.builtin.set_fact: dev: "{{ loop_device_cmd.stdout }}" + - when: fstype == 'ufs' + block: + - name: 'Create a memory disk for UFS' + ansible.builtin.command: + cmd: 'mdconfig -a -f {{ dev }}' + register: memory_disk_cmd + + - name: 'Switch to memory disk target for further tasks' + ansible.builtin.set_fact: + dev: "/dev/{{ memory_disk_cmd.stdout }}" + - include_tasks: '{{ action }}.yml' always: @@ -28,10 +39,16 @@ removes: '{{ dev }}' when: fstype == 'lvm' - - name: 'Clean correct device for LVM' + - name: 'Detach memory disk used for UFS' + ansible.builtin.command: + cmd: 'mdconfig -d -u {{ dev }}' + removes: '{{ dev }}' + when: fstype == 'ufs' + + - name: 'Clean correct device for LVM and UFS' ansible.builtin.set_fact: dev: '{{ image_file }}' - when: fstype == 'lvm' + when: fstype in ['lvm', 'ufs'] - name: 'Remove disk image file' ansible.builtin.file: diff --git a/tests/integration/targets/filesystem/tasks/create_fs.yml b/tests/integration/targets/filesystem/tasks/create_fs.yml index de1a9f18a0..3c92197c0a 100644 --- a/tests/integration/targets/filesystem/tasks/create_fs.yml +++ b/tests/integration/targets/filesystem/tasks/create_fs.yml @@ -12,8 +12,8 @@ - 'fs_result is success' - name: "Get UUID of created filesystem" - ansible.builtin.command: - cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + ansible.builtin.shell: + cmd: "{{ get_uuid_cmd }}" changed_when: false register: uuid @@ -24,8 +24,8 @@ register: fs2_result - name: "Get UUID of the filesystem" - ansible.builtin.command: - cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + ansible.builtin.shell: + cmd: "{{ get_uuid_cmd }}" changed_when: false register: uuid2 @@ -44,8 +44,8 @@ register: fs3_result - name: "Get UUID of the new filesystem" - ansible.builtin.command: - cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + ansible.builtin.shell: + cmd: "{{ get_uuid_cmd }}" changed_when: false register: uuid3 @@ -71,6 +71,11 @@ cmd: 'losetup -c {{ dev }}' when: fstype == 'lvm' + - name: "Resize memory disk for UFS" + ansible.builtin.command: + cmd: 'mdconfig -r -u {{ dev }} -s {{ fssize | int + 1 }}M' + when: fstype == 'ufs' + - name: "Expand filesystem" community.general.filesystem: dev: '{{ dev }}' @@ -79,8 +84,8 @@ register: fs4_result - name: "Get UUID of the filesystem" - ansible.builtin.command: - cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + ansible.builtin.shell: + cmd: "{{ get_uuid_cmd }}" changed_when: false register: uuid4 diff --git a/tests/integration/targets/filesystem/tasks/freebsd_setup.yml b/tests/integration/targets/filesystem/tasks/freebsd_setup.yml new file mode 100644 index 0000000000..e08beca4a8 --- /dev/null +++ b/tests/integration/targets/filesystem/tasks/freebsd_setup.yml @@ -0,0 +1,10 @@ +--- +- name: "Uninstall e2fsprogs" + ansible.builtin.package: + name: e2fsprogs + state: absent + +- name: "Install util-linux" + ansible.builtin.package: + name: util-linux + state: present diff --git a/tests/integration/targets/filesystem/tasks/main.yml b/tests/integration/targets/filesystem/tasks/main.yml index d836c8a15d..4b2c5bdc2a 100644 --- a/tests/integration/targets/filesystem/tasks/main.yml +++ b/tests/integration/targets/filesystem/tasks/main.yml @@ -35,6 +35,10 @@ # Available on FreeBSD but not on testbed (util-linux conflicts with e2fsprogs): wipefs, mkfs.minix - 'not (ansible_system == "FreeBSD" and item.1 in ["overwrite_another_fs", "remove_fs"])' + # Linux limited support + # Not available: ufs (this is FreeBSD's native fs) + - 'not (ansible_system == "Linux" and item.0.key == "ufs")' + # Other limitations and corner cases # f2fs-tools and reiserfs-utils packages not available with RHEL/CentOS on CI @@ -59,3 +63,24 @@ item.0.key == "xfs" and ansible_python.version.major == 2)' loop: "{{ query('dict', tested_filesystems)|product(['create_fs', 'overwrite_another_fs', 'remove_fs'])|list }}" + + +# With FreeBSD extended support (util-linux is not available before 12.2) + +- include_tasks: freebsd_setup.yml + when: + - 'ansible_system == "FreeBSD"' + - 'ansible_distribution_version is version("12.2", ">=")' + +- include_tasks: create_device.yml + vars: + image_file: '{{ remote_tmp_dir }}/img' + fstype: '{{ item.0.key }}' + fssize: '{{ item.0.value.fssize }}' + grow: '{{ item.0.value.grow }}' + action: '{{ item.1 }}' + when: + - 'ansible_system == "FreeBSD"' + - 'ansible_distribution_version is version("12.2", ">=")' + - 'item.0.key in ["xfs", "vfat"]' + loop: "{{ query('dict', tested_filesystems)|product(['create_fs', 'overwrite_another_fs', 'remove_fs'])|list }}" diff --git a/tests/integration/targets/filesystem/tasks/overwrite_another_fs.yml b/tests/integration/targets/filesystem/tasks/overwrite_another_fs.yml index 4bf92836bb..83a623fa75 100644 --- a/tests/integration/targets/filesystem/tasks/overwrite_another_fs.yml +++ b/tests/integration/targets/filesystem/tasks/overwrite_another_fs.yml @@ -10,8 +10,8 @@ cmd: 'mkfs.minix {{ dev }}' - name: 'Get UUID of the new filesystem' - ansible.builtin.command: - cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + ansible.builtin.shell: + cmd: "{{ get_uuid_cmd }}" changed_when: false register: uuid @@ -23,8 +23,8 @@ ignore_errors: True - name: 'Get UUID of the filesystem' - ansible.builtin.command: - cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + ansible.builtin.shell: + cmd: "{{ get_uuid_cmd }}" changed_when: false register: uuid2 @@ -42,8 +42,8 @@ register: fs_result2 - name: 'Get UUID of the new filesystem' - ansible.builtin.command: - cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + ansible.builtin.shell: + cmd: "{{ get_uuid_cmd }}" changed_when: false register: uuid3 diff --git a/tests/integration/targets/filesystem/tasks/remove_fs.yml b/tests/integration/targets/filesystem/tasks/remove_fs.yml index 338d439d60..3127dce559 100644 --- a/tests/integration/targets/filesystem/tasks/remove_fs.yml +++ b/tests/integration/targets/filesystem/tasks/remove_fs.yml @@ -7,8 +7,8 @@ fstype: '{{ fstype }}' - name: "Get filesystem UUID with 'blkid'" - ansible.builtin.command: - cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + ansible.builtin.shell: + cmd: "{{ get_uuid_cmd }}" changed_when: false register: blkid_ref @@ -27,8 +27,8 @@ check_mode: yes - name: "Get filesystem UUID with 'blkid' (should remain the same)" - ansible.builtin.command: - cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + ansible.builtin.shell: + cmd: "{{ get_uuid_cmd }}" changed_when: false register: blkid @@ -46,8 +46,8 @@ register: wipefs - name: "Get filesystem UUID with 'blkid' (should be empty)" - ansible.builtin.command: - cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}' + ansible.builtin.shell: + cmd: "{{ get_uuid_cmd }}" changed_when: false failed_when: false register: blkid From 111c5de55006cf3af43599eea60edb15b9f66954 Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Sat, 10 Jul 2021 10:39:51 -0400 Subject: [PATCH 0433/3093] proxmox inventory - fix parsing for offline nodes (#2967) * Initial commit * Adding changelog fragment * Applying initial review suggestions --- .../2967-proxmox_inventory-offline-node-fix.yml | 3 +++ plugins/inventory/proxmox.py | 3 +++ tests/unit/plugins/inventory/test_proxmox.py | 12 +++++++++--- 3 files changed, 15 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/2967-proxmox_inventory-offline-node-fix.yml diff --git a/changelogs/fragments/2967-proxmox_inventory-offline-node-fix.yml b/changelogs/fragments/2967-proxmox_inventory-offline-node-fix.yml new file mode 100644 index 0000000000..d52fef4d8a --- /dev/null +++ b/changelogs/fragments/2967-proxmox_inventory-offline-node-fix.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - proxmox inventory plugin - fixed parsing failures when some cluster nodes are offline (https://github.com/ansible-collections/community.general/issues/2931). diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py index be3ecd4365..c99962bcdd 100644 --- a/plugins/inventory/proxmox.py +++ b/plugins/inventory/proxmox.py @@ -369,6 +369,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): if node['type'] == 'node': self.inventory.add_child(nodes_group, node['node']) + if node['status'] == 'offline': + continue + # get node IP address if self.get_option("want_proxmox_nodes_ansible_host"): ip = self._get_node_ip(node['node']) diff --git a/tests/unit/plugins/inventory/test_proxmox.py b/tests/unit/plugins/inventory/test_proxmox.py index c2b0408138..87d47a3cff 100644 --- a/tests/unit/plugins/inventory/test_proxmox.py +++ b/tests/unit/plugins/inventory/test_proxmox.py @@ -9,7 +9,6 @@ __metaclass__ = type import pytest -from ansible.errors import AnsibleError, AnsibleParserError from ansible.inventory.data import InventoryData from ansible_collections.community.general.plugins.inventory.proxmox import InventoryModule @@ -52,7 +51,12 @@ def get_json(url): "disk": 1000, "maxmem": 1000, "uptime": 10000, - "level": ""}] + "level": ""}, + {"type": "node", + "node": "testnode2", + "id": "node/testnode2", + "status": "offline", + "ssl_fingerprint": "yy"}] elif url == "https://localhost:8006/api2/json/pools": # _get_pools return [{"poolid": "test"}] @@ -554,7 +558,6 @@ def test_populate(inventory, mocker): host_qemu_multi_nic = inventory.inventory.get_host('test-qemu-multi-nic') host_qemu_template = inventory.inventory.get_host('test-qemu-template') host_lxc = inventory.inventory.get_host('test-lxc') - host_node = inventory.inventory.get_host('testnode') # check if qemu-test is in the proxmox_pool_test group assert 'proxmox_pool_test' in inventory.inventory.groups @@ -584,3 +587,6 @@ def test_populate(inventory, mocker): # check if qemu template is not present assert host_qemu_template is None + + # check that offline node is in inventory + assert inventory.inventory.get_host('testnode2') From 7a41833e599e04d1f24ad90c17843ad5aec8a958 Mon Sep 17 00:00:00 2001 From: Tyler Schwend Date: Sat, 10 Jul 2021 13:24:09 -0400 Subject: [PATCH 0434/3093] feat: support datadog_monitor composite type (#2958) * feat: support datadog_monitor composite type * docs: note support for composite types * lint * lint: line lengths * doc: changelog frag --- .../2958-datadog_monitor_support_composites.yml | 3 +++ .../modules/monitoring/datadog/datadog_monitor.py | 15 +++++++++++++-- 2 files changed, 16 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/2958-datadog_monitor_support_composites.yml diff --git a/changelogs/fragments/2958-datadog_monitor_support_composites.yml b/changelogs/fragments/2958-datadog_monitor_support_composites.yml new file mode 100644 index 0000000000..394a589994 --- /dev/null +++ b/changelogs/fragments/2958-datadog_monitor_support_composites.yml @@ -0,0 +1,3 @@ +minor_changes: + - datadog_monitor - allow creation of composite datadog monitors + (https://github.com/ansible-collections/community.general/issues/2956). diff --git a/plugins/modules/monitoring/datadog/datadog_monitor.py b/plugins/modules/monitoring/datadog/datadog_monitor.py index 6c0f8cdb02..ab25777ecd 100644 --- a/plugins/modules/monitoring/datadog/datadog_monitor.py +++ b/plugins/modules/monitoring/datadog/datadog_monitor.py @@ -51,7 +51,17 @@ options: description: - The type of the monitor. - The types C(query alert), C(trace-analytics alert) and C(rum alert) were added in community.general 2.1.0. - choices: ['metric alert', 'service check', 'event alert', 'process alert', 'log alert', 'query alert', 'trace-analytics alert', 'rum alert'] + - The type C(composite) was added in community.general 3.4.0. + choices: + - metric alert + - service check + - event alert + - process alert + - log alert + - query alert + - trace-analytics alert + - rum alert + - composite type: str query: description: @@ -209,7 +219,8 @@ def main(): app_key=dict(required=True, no_log=True), state=dict(required=True, choices=['present', 'absent', 'mute', 'unmute']), type=dict(choices=['metric alert', 'service check', 'event alert', 'process alert', - 'log alert', 'query alert', 'trace-analytics alert', 'rum alert']), + 'log alert', 'query alert', 'trace-analytics alert', + 'rum alert', 'composite']), name=dict(required=True), query=dict(), notification_message=dict(no_log=True), From c5cbe2943be0665ba1297c588b51d4d275c73ef4 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 11 Jul 2021 11:43:40 +1200 Subject: [PATCH 0435/3093] =?UTF-8?q?module=5Fhelper=20cmd=20-=20added=20f?= =?UTF-8?q?eature=20flag=20to=20control=20whether=20CmdMixin=20adds=20rc,?= =?UTF-8?q?=20out=20and=20err=20t=E2=80=A6=20(#2922)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * added feature flag to control whether CmdMixin adds rc, out and err to the result of the module * added changelog fragment * changed from a global flag to parameters in run_command * updated changelog * fixed brainless copy-paste of yours truly --- .../2922-mh-cmd-output-feature-flag.yml | 2 ++ plugins/module_utils/mh/mixins/cmd.py | 16 ++++++++++++++-- 2 files changed, 16 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/2922-mh-cmd-output-feature-flag.yml diff --git a/changelogs/fragments/2922-mh-cmd-output-feature-flag.yml b/changelogs/fragments/2922-mh-cmd-output-feature-flag.yml new file mode 100644 index 0000000000..e071e3413b --- /dev/null +++ b/changelogs/fragments/2922-mh-cmd-output-feature-flag.yml @@ -0,0 +1,2 @@ +minor_changes: + - module_helper module utils - added feature flag parameters to ``CmdMixin`` to control whether ``rc``, ``out`` and ``err`` are automatically added to the module output (https://github.com/ansible-collections/community.general/pull/2922). diff --git a/plugins/module_utils/mh/mixins/cmd.py b/plugins/module_utils/mh/mixins/cmd.py index 0367b6173c..aed4174c4f 100644 --- a/plugins/module_utils/mh/mixins/cmd.py +++ b/plugins/module_utils/mh/mixins/cmd.py @@ -152,7 +152,14 @@ class CmdMixin(object): def process_command_output(self, rc, out, err): return rc, out, err - def run_command(self, extra_params=None, params=None, process_output=None, *args, **kwargs): + def run_command(self, + extra_params=None, + params=None, + process_output=None, + publish_rc=True, + publish_out=True, + publish_err=True, + *args, **kwargs): self.vars.cmd_args = self._calculate_args(extra_params, params) options = dict(self.run_command_fixed_options) options['check_rc'] = options.get('check_rc', self.check_rc) @@ -166,7 +173,12 @@ class CmdMixin(object): self.update_output(force_lang=self.force_lang) options['environ_update'] = env_update rc, out, err = self.module.run_command(self.vars.cmd_args, *args, **options) - self.update_output(rc=rc, stdout=out, stderr=err) + if publish_rc: + self.update_output(rc=rc) + if publish_out: + self.update_output(stdout=out) + if publish_err: + self.update_output(stderr=err) if process_output is None: _process = self.process_command_output else: From d56d34bce6249e4b3cbb7ddd9eb13602b2557fec Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 12 Jul 2021 02:34:59 +1200 Subject: [PATCH 0436/3093] added missing copyright notes to MH integration tests (#2990) --- tests/integration/targets/module_helper/tasks/main.yml | 3 +++ tests/integration/targets/module_helper/tasks/mdepfail.yml | 3 +++ tests/integration/targets/module_helper/tasks/msimple.yml | 3 +++ tests/integration/targets/module_helper/tasks/mstate.yml | 3 +++ 4 files changed, 12 insertions(+) diff --git a/tests/integration/targets/module_helper/tasks/main.yml b/tests/integration/targets/module_helper/tasks/main.yml index 05c41c2a38..8ac7c8ae60 100644 --- a/tests/integration/targets/module_helper/tasks/main.yml +++ b/tests/integration/targets/module_helper/tasks/main.yml @@ -1,3 +1,6 @@ +# (c) 2021, Alexei Znamensky +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + - include_tasks: msimple.yml - include_tasks: mdepfail.yml - include_tasks: mstate.yml diff --git a/tests/integration/targets/module_helper/tasks/mdepfail.yml b/tests/integration/targets/module_helper/tasks/mdepfail.yml index d22738a778..ad8fc5d57d 100644 --- a/tests/integration/targets/module_helper/tasks/mdepfail.yml +++ b/tests/integration/targets/module_helper/tasks/mdepfail.yml @@ -1,3 +1,6 @@ +# (c) 2021, Alexei Znamensky +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + - name: test failing dependency mdepfail: a: 123 diff --git a/tests/integration/targets/module_helper/tasks/msimple.yml b/tests/integration/targets/module_helper/tasks/msimple.yml index deb386f2b5..4f032fd177 100644 --- a/tests/integration/targets/module_helper/tasks/msimple.yml +++ b/tests/integration/targets/module_helper/tasks/msimple.yml @@ -1,3 +1,6 @@ +# (c) 2021, Alexei Znamensky +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + - name: test msimple 1 msimple: a: 80 diff --git a/tests/integration/targets/module_helper/tasks/mstate.yml b/tests/integration/targets/module_helper/tasks/mstate.yml index 53329a3c70..c4dfdb9a0e 100644 --- a/tests/integration/targets/module_helper/tasks/mstate.yml +++ b/tests/integration/targets/module_helper/tasks/mstate.yml @@ -1,3 +1,6 @@ +# (c) 2021, Alexei Znamensky +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + - name: test mstate 1 mstate: a: 80 From 5079ef0e82c1fb7acfd23e0dbb82b1ecdfee858d Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sun, 11 Jul 2021 16:38:58 +0200 Subject: [PATCH 0437/3093] feature request template - replace ansible-core with community.general - looks like a C&P error (#2992) --- .github/ISSUE_TEMPLATE/feature_request.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index 5f89dec77a..e676ff25ef 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -21,7 +21,7 @@ body: placeholder: >- I am trying to do X with the collection from the main branch on GitHub and I think that implementing a feature Y would be very helpful for me and - every other user of ansible-core because of Z. + every other user of community.general because of Z. validations: required: true From 3fc97bf80aae8c695288e8920df3ea48b151a369 Mon Sep 17 00:00:00 2001 From: Gaetan2907 <48204380+Gaetan2907@users.noreply.github.com> Date: Tue, 13 Jul 2021 05:57:16 +0100 Subject: [PATCH 0438/3093] Keycloak: Improve diff mode on keycloak_authentication module (#2963) * Fix diff mode when updating authentication flow with keycloak_authentication module * Add changelog fragment * Fix unit test * Update plugins/modules/identity/keycloak/keycloak_authentication.py Co-authored-by: Ajpantuso * Update changelogs/fragments/2963-improve-diff-mode-on-keycloak_authentication.yml Co-authored-by: Ajpantuso * Update documentation of create_or_update_executions function (return tuple instead of dict) * Update plugins/modules/identity/keycloak/keycloak_authentication.py Co-authored-by: Ajpantuso * Update plugins/modules/identity/keycloak/keycloak_authentication.py Co-authored-by: Ajpantuso Co-authored-by: Ajpantuso --- ...e-diff-mode-on-keycloak_authentication.yml | 3 +++ .../keycloak/keycloak_authentication.py | 25 ++++++++++++++----- .../keycloak/test_keycloak_authentication.py | 6 ++--- 3 files changed, 25 insertions(+), 9 deletions(-) create mode 100644 changelogs/fragments/2963-improve-diff-mode-on-keycloak_authentication.yml diff --git a/changelogs/fragments/2963-improve-diff-mode-on-keycloak_authentication.yml b/changelogs/fragments/2963-improve-diff-mode-on-keycloak_authentication.yml new file mode 100644 index 0000000000..fa5f133d7d --- /dev/null +++ b/changelogs/fragments/2963-improve-diff-mode-on-keycloak_authentication.yml @@ -0,0 +1,3 @@ +--- +minor_changes: +- keycloak_authentication - enhanced diff mode to also return before and after state when the authentication flow is updated (https://github.com/ansible-collections/community.general/pull/2963). diff --git a/plugins/modules/identity/keycloak/keycloak_authentication.py b/plugins/modules/identity/keycloak/keycloak_authentication.py index 9fd04eb70b..8a33409b58 100644 --- a/plugins/modules/identity/keycloak/keycloak_authentication.py +++ b/plugins/modules/identity/keycloak/keycloak_authentication.py @@ -196,9 +196,15 @@ def create_or_update_executions(kc, config, realm='master'): :param config: Representation of the authentication flow including it's executions. :param realm: Realm :return: True if executions have been modified. False otherwise. + :return: tuple (changed, dict(before, after) + WHERE + bool changed indicates if changes have been made + dict(str, str) shows state before and after creation/update """ try: changed = False + after = "" + before = "" if "authenticationExecutions" in config: # Get existing executions on the Keycloak server for this alias existing_executions = kc.get_executions_representation(config, realm=realm) @@ -221,17 +227,21 @@ def create_or_update_executions(kc, config, realm='master'): exclude_key.append(key) # Compare the executions to see if it need changes if not is_struct_included(new_exec, existing_executions[exec_index], exclude_key) or exec_index != new_exec_index: - changed = True + exec_found = True + before += str(existing_executions[exec_index]) + '\n' id_to_update = existing_executions[exec_index]["id"] # Remove exec from list in case 2 exec with same name existing_executions[exec_index].clear() elif new_exec["providerId"] is not None: kc.create_execution(new_exec, flowAlias=flow_alias_parent, realm=realm) - changed = True + exec_found = True + after += str(new_exec) + '\n' elif new_exec["displayName"] is not None: kc.create_subflow(new_exec["displayName"], flow_alias_parent, realm=realm) + exec_found = True + after += str(new_exec) + '\n' + if exec_found: changed = True - if changed: if exec_index != -1: # Update the existing execution updated_exec = { @@ -248,7 +258,8 @@ def create_or_update_executions(kc, config, realm='master'): kc.update_authentication_executions(flow_alias_parent, updated_exec, realm=realm) diff = exec_index - new_exec_index kc.change_execution_priority(updated_exec["id"], diff, realm=realm) - return changed + after += str(kc.get_executions_representation(config, realm=realm)[new_exec_index]) + '\n' + return changed, dict(before=before, after=after) except Exception as e: kc.module.fail_json(msg='Could not create or update executions for authentication flow %s in realm %s: %s' % (config["alias"], realm, str(e))) @@ -358,8 +369,10 @@ def main(): # Configure the executions for the flow if module.check_mode: module.exit_json(**result) - if create_or_update_executions(kc=kc, config=new_auth_repr, realm=realm): - result['changed'] = True + changed, diff = create_or_update_executions(kc=kc, config=new_auth_repr, realm=realm) + result['changed'] |= changed + if module._diff: + result['diff'] = diff # Get executions created exec_repr = kc.get_executions_representation(config=new_auth_repr, realm=realm) if exec_repr is not None: diff --git a/tests/unit/plugins/modules/identity/keycloak/test_keycloak_authentication.py b/tests/unit/plugins/modules/identity/keycloak/test_keycloak_authentication.py index db0168aa83..91e34eea7b 100644 --- a/tests/unit/plugins/modules/identity/keycloak/test_keycloak_authentication.py +++ b/tests/unit/plugins/modules/identity/keycloak/test_keycloak_authentication.py @@ -343,7 +343,7 @@ class TestKeycloakAuthentication(ModuleTestCase): self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1) self.assertEqual(len(mock_copy_auth_flow.mock_calls), 0) self.assertEqual(len(mock_create_empty_auth_flow.mock_calls), 1) - self.assertEqual(len(mock_get_executions_representation.mock_calls), 2) + self.assertEqual(len(mock_get_executions_representation.mock_calls), 3) self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 0) # Verify that the module's changed status matches what is expected @@ -434,7 +434,7 @@ class TestKeycloakAuthentication(ModuleTestCase): self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1) self.assertEqual(len(mock_copy_auth_flow.mock_calls), 0) self.assertEqual(len(mock_create_empty_auth_flow.mock_calls), 0) - self.assertEqual(len(mock_get_executions_representation.mock_calls), 2) + self.assertEqual(len(mock_get_executions_representation.mock_calls), 3) self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 0) # Verify that the module's changed status matches what is expected @@ -611,7 +611,7 @@ class TestKeycloakAuthentication(ModuleTestCase): self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1) self.assertEqual(len(mock_copy_auth_flow.mock_calls), 0) self.assertEqual(len(mock_create_empty_auth_flow.mock_calls), 1) - self.assertEqual(len(mock_get_executions_representation.mock_calls), 2) + self.assertEqual(len(mock_get_executions_representation.mock_calls), 3) self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 1) # Verify that the module's changed status matches what is expected From 9ffc1ef393b56d35e65f8ea48723b903da3d1fe8 Mon Sep 17 00:00:00 2001 From: omula Date: Wed, 14 Jul 2021 08:24:27 +0200 Subject: [PATCH 0439/3093] [nmcli] add runner and runner-hwaddr-policy for network teaming (#2901) * [nmcli] add runner and runner-hwaddr-policy for network teaming * [nmcli] delete extra space * Update plugins/modules/net_tools/nmcli.py * Update plugins/modules/net_tools/nmcli.py * [nmcli] add changelog fragment * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Amin Vakil Co-authored-by: Oriol MULA VALLS Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> Co-authored-by: Amin Vakil --- changelogs/fragments/2901-nmcli_teaming.yml | 2 ++ plugins/modules/net_tools/nmcli.py | 32 ++++++++++++++++++- .../plugins/modules/net_tools/test_nmcli.py | 26 +++++++++++++++ 3 files changed, 59 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/2901-nmcli_teaming.yml diff --git a/changelogs/fragments/2901-nmcli_teaming.yml b/changelogs/fragments/2901-nmcli_teaming.yml new file mode 100644 index 0000000000..4178b9c6f5 --- /dev/null +++ b/changelogs/fragments/2901-nmcli_teaming.yml @@ -0,0 +1,2 @@ +minor_changes: + - nmcli - add ``runner`` and ``runner_hwaddr_policy`` options (https://github.com/ansible-collections/community.general/issues/2901). diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index 7ed515fc75..1750f9f99f 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -57,7 +57,7 @@ options: choices: [ bond, bond-slave, bridge, bridge-slave, ethernet, generic, infiniband, ipip, sit, team, team-slave, vlan, vxlan, wifi ] mode: description: - - This is the type of device or network connection that you wish to create for a bond, team or bridge. + - This is the type of device or network connection that you wish to create for a bond or bridge. type: str choices: [ 802.3ad, active-backup, balance-alb, balance-rr, balance-tlb, balance-xor, broadcast ] default: balance-rr @@ -265,6 +265,20 @@ options: frame was received on. type: bool default: yes + runner: + description: + - This is the type of device or network connection that you wish to create for a team. + type: str + choices: [ broadcast, roundrobin, activebackup, loadbalance, lacp ] + default: roundrobin + version_added: 3.4.0 + runner_hwaddr_policy: + description: + - This defines the policy of how hardware addresses of team device and port devices + should be set during the team lifetime. + type: str + choices: [ same_all, by_active, only_active ] + version_added: 3.4.0 vlanid: description: - This is only used with VLAN - VLAN ID in range <0-4095>. @@ -719,6 +733,8 @@ class Nmcli(object): self.hairpin = module.params['hairpin'] self.path_cost = module.params['path_cost'] self.mac = module.params['mac'] + self.runner = module.params['runner'] + self.runner_hwaddr_policy = module.params['runner_hwaddr_policy'] self.vlanid = module.params['vlanid'] self.vlandev = module.params['vlandev'] self.flags = module.params['flags'] @@ -826,6 +842,11 @@ class Nmcli(object): 'bridge.priority': self.priority, 'bridge.stp': self.stp, }) + elif self.type == 'team': + options.update({ + 'team.runner': self.runner, + 'team.runner-hwaddr-policy': self.runner_hwaddr_policy, + }) elif self.type == 'bridge-slave': options.update({ 'connection.slave-type': 'bridge', @@ -1214,6 +1235,11 @@ def main(): ageingtime=dict(type='int', default=300), hairpin=dict(type='bool', default=True), path_cost=dict(type='int', default=100), + # team specific vars + runner=dict(type='str', default='roundrobin', + choices=['broadcast', 'roundrobin', 'activebackup', 'loadbalance', 'lacp']), + # team active-backup runner specific options + runner_hwaddr_policy=dict(type='str', choices=['same_all', 'by_active', 'only_active']), # vlan specific vars vlanid=dict(type='int'), vlandev=dict(type='str'), @@ -1245,6 +1271,10 @@ def main(): # check for issues if nmcli.conn_name is None: nmcli.module.fail_json(msg="Please specify a name for the connection") + # team checks + if nmcli.type == "team": + if nmcli.runner_hwaddr_policy and not nmcli.runner == "activebackup": + nmcli.module.fail_json(msg="Runner-hwaddr-policy is only allowed for runner activebackup") # team-slave checks if nmcli.type == 'team-slave': if nmcli.master is None: diff --git a/tests/unit/plugins/modules/net_tools/test_nmcli.py b/tests/unit/plugins/modules/net_tools/test_nmcli.py index ba526b1d65..63ec60537c 100644 --- a/tests/unit/plugins/modules/net_tools/test_nmcli.py +++ b/tests/unit/plugins/modules/net_tools/test_nmcli.py @@ -279,8 +279,20 @@ ipv4.may-fail: yes ipv6.method: auto ipv6.ignore-auto-dns: no ipv6.ignore-auto-routes: no +team.runner: roundrobin """ +TESTCASE_TEAM_HWADDR_POLICY_FAILS = [ + { + 'type': 'team', + 'conn_name': 'non_existent_nw_device', + 'ifname': 'team0_non_existant', + 'runner_hwaddr_policy': 'by_active', + 'state': 'present', + '_ansible_check_mode': False, + } +] + TESTCASE_TEAM_SLAVE = [ { 'type': 'team-slave', @@ -1053,6 +1065,20 @@ def test_team_connection_unchanged(mocked_team_connection_unchanged, capfd): assert not results['changed'] +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_TEAM_HWADDR_POLICY_FAILS, indirect=['patch_ansible_module']) +def test_team_connection_create_hwaddr_policy_fails(mocked_generic_connection_create, capfd): + """ + Test : Team connection created + """ + with pytest.raises(SystemExit): + nmcli.main() + + out, err = capfd.readouterr() + results = json.loads(out) + assert results.get('failed') + assert results['msg'] == "Runner-hwaddr-policy is only allowed for runner activebackup" + + @pytest.mark.parametrize('patch_ansible_module', TESTCASE_TEAM_SLAVE, indirect=['patch_ansible_module']) def test_create_team_slave(mocked_generic_connection_create, capfd): """ From 28193b699ba0fc207fc4352b5cf99e46e5d3f707 Mon Sep 17 00:00:00 2001 From: Andrew Klychkov Date: Wed, 14 Jul 2021 09:26:12 +0300 Subject: [PATCH 0440/3093] Update README.md (#3003) --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 6f13fe150c..26a63ab4b2 100644 --- a/README.md +++ b/README.md @@ -58,7 +58,9 @@ See [Ansible Using collections](https://docs.ansible.com/ansible/latest/user_gui ## Contributing to this collection -The content of this collection is made by good people like you, a community of individuals collaborating on making the world better through developing automation software. +The content of this collection is made by good people just like you, a community of individuals collaborating on making the world better through developing automation software. + +We are actively accepting new contributors. All types of contributions are very welcome. From a3a40f6de316716acc1c61f94683a546202aede1 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Wed, 14 Jul 2021 23:04:35 +1200 Subject: [PATCH 0441/3093] pamd - fixed single line issue (#2989) * fixed pamd single line issue * added changelog fragment * supported case for 0 lines, improved test --- .../fragments/2989-pamd-single-line.yaml | 2 + plugins/modules/system/pamd.py | 13 +++-- tests/integration/targets/pamd/aliases | 5 ++ tests/integration/targets/pamd/tasks/main.yml | 56 +++++++++++++++++++ 4 files changed, 72 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/2989-pamd-single-line.yaml create mode 100644 tests/integration/targets/pamd/aliases create mode 100644 tests/integration/targets/pamd/tasks/main.yml diff --git a/changelogs/fragments/2989-pamd-single-line.yaml b/changelogs/fragments/2989-pamd-single-line.yaml new file mode 100644 index 0000000000..359e160785 --- /dev/null +++ b/changelogs/fragments/2989-pamd-single-line.yaml @@ -0,0 +1,2 @@ +bugfixes: + - pamd - fixed problem with files containing only one or two lines (https://github.com/ansible-collections/community.general/issues/2925). diff --git a/plugins/modules/system/pamd.py b/plugins/modules/system/pamd.py index 39b3f32e44..738a23ee43 100644 --- a/plugins/modules/system/pamd.py +++ b/plugins/modules/system/pamd.py @@ -733,14 +733,19 @@ class PamdService(object): lines = [] current_line = self._head + mark = "# Updated by Ansible - %s" % datetime.now().isoformat() while current_line is not None: lines.append(str(current_line)) current_line = current_line.next - if lines[1].startswith("# Updated by Ansible"): - lines.pop(1) - - lines.insert(1, "# Updated by Ansible - " + datetime.now().isoformat()) + if len(lines) <= 1: + lines.insert(0, "") + lines.insert(1, mark) + else: + if lines[1].startswith("# Updated by Ansible"): + lines[1] = mark + else: + lines.insert(1, mark) return '\n'.join(lines) + '\n' diff --git a/tests/integration/targets/pamd/aliases b/tests/integration/targets/pamd/aliases new file mode 100644 index 0000000000..abe0a21e22 --- /dev/null +++ b/tests/integration/targets/pamd/aliases @@ -0,0 +1,5 @@ +shippable/posix/group1 +skip/aix +skip/freebsd +skip/osx +skip/macos diff --git a/tests/integration/targets/pamd/tasks/main.yml b/tests/integration/targets/pamd/tasks/main.yml new file mode 100644 index 0000000000..3e0fb4ee32 --- /dev/null +++ b/tests/integration/targets/pamd/tasks/main.yml @@ -0,0 +1,56 @@ +# (c) 2021, Alexei Znamensky +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +- name: Set value for temp limit configuration + set_fact: + test_pamd_file: "/tmp/pamd_file" + +- name: Copy temporary pam.d file + copy: + content: "session required pam_lastlog.so silent showfailed" + dest: "{{ test_pamd_file }}" + +- name: Test working on a single-line file works (2925) + community.general.pamd: + path: /tmp + name: pamd_file + type: session + control: required + module_path: pam_lastlog.so + module_arguments: silent + state: args_absent + register: pamd_file_output + +- name: Check if changes made + assert: + that: + - pamd_file_output is changed + +- name: Copy temporary pam.d file + copy: + content: "" + dest: "{{ test_pamd_file }}" + +# This test merely demonstrates that, as-is, module will not perform any changes on an empty file +# All the existing values for "state" will first search for a rule matching type, control, module_path +# and will not perform any change whatsoever if no existing rules match. +- name: Test working on a empty file works (2925) + community.general.pamd: + path: /tmp + name: pamd_file + type: session + control: required + module_path: pam_lastlog.so + module_arguments: silent + register: pamd_file_output_empty + +- name: Read back the file + slurp: + src: "{{ test_pamd_file }}" + register: pamd_file_slurp + +- name: Check if changes made + assert: + that: + - pamd_file_output_empty is not changed + - pamd_file_slurp.content|b64decode == '' From ea822c7bdd9cbeccc4541c2f95280442c6f213ab Mon Sep 17 00:00:00 2001 From: Scott Seekamp Date: Fri, 16 Jul 2021 11:02:34 -0600 Subject: [PATCH 0442/3093] Redfish Bootoverride Disable behaves incorrectly (#3006) * https://github.com/ansible-collections/community.general/issues/3005 Bypass the boot device argument check when the command is: DisableBootOverride as it isn't needed to perform this operation. * Add changelog fragment --- ...dfish_command-bootoverride-argument-check.yaml | 3 +++ plugins/module_utils/redfish_utils.py | 15 ++++++++------- 2 files changed, 11 insertions(+), 7 deletions(-) create mode 100644 changelogs/fragments/3006-redfish_command-bootoverride-argument-check.yaml diff --git a/changelogs/fragments/3006-redfish_command-bootoverride-argument-check.yaml b/changelogs/fragments/3006-redfish_command-bootoverride-argument-check.yaml new file mode 100644 index 0000000000..680d3dea83 --- /dev/null +++ b/changelogs/fragments/3006-redfish_command-bootoverride-argument-check.yaml @@ -0,0 +1,3 @@ +bugfixes: + - redfish_command - fix extraneous error caused by missing ``bootdevice`` argument + when using the ``DisableBootOverride`` sub-command (https://github.com/ansible-collections/community.general/issues/3005). diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index c39c02a42e..8d293f0056 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -1582,13 +1582,14 @@ class RedfishUtils(object): boot = data[key] - annotation = 'BootSourceOverrideTarget@Redfish.AllowableValues' - if annotation in boot: - allowable_values = boot[annotation] - if isinstance(allowable_values, list) and bootdevice not in allowable_values: - return {'ret': False, - 'msg': "Boot device %s not in list of allowable values (%s)" % - (bootdevice, allowable_values)} + if override_enabled != 'Disabled': + annotation = 'BootSourceOverrideTarget@Redfish.AllowableValues' + if annotation in boot: + allowable_values = boot[annotation] + if isinstance(allowable_values, list) and bootdevice not in allowable_values: + return {'ret': False, + 'msg': "Boot device %s not in list of allowable values (%s)" % + (bootdevice, allowable_values)} # read existing values cur_enabled = boot.get('BootSourceOverrideEnabled') From 9b1c6f0743f87264ce658b5d548e358dfe9af740 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Fri, 16 Jul 2021 19:29:00 +0200 Subject: [PATCH 0443/3093] Enable tests (#3015) * Enable tests. * Fix error message check. * Fix boolean tests. * Adjust to Python version. --- tests/integration/targets/filter_groupby/aliases | 2 +- .../integration/targets/filter_groupby/tasks/main.yml | 2 +- tests/integration/targets/module_helper/aliases | 2 +- .../targets/module_helper/tasks/mdepfail.yml | 4 ++-- .../targets/module_helper/tasks/msimple.yml | 10 +++++----- .../integration/targets/module_helper/tasks/mstate.yml | 10 +++++----- 6 files changed, 15 insertions(+), 15 deletions(-) diff --git a/tests/integration/targets/filter_groupby/aliases b/tests/integration/targets/filter_groupby/aliases index 6e79abdd02..3e81d77f98 100644 --- a/tests/integration/targets/filter_groupby/aliases +++ b/tests/integration/targets/filter_groupby/aliases @@ -1,2 +1,2 @@ -shippable/posix/group4 +shippable/posix/group3 skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller diff --git a/tests/integration/targets/filter_groupby/tasks/main.yml b/tests/integration/targets/filter_groupby/tasks/main.yml index 29036a3bc5..219e047d4d 100644 --- a/tests/integration/targets/filter_groupby/tasks/main.yml +++ b/tests/integration/targets/filter_groupby/tasks/main.yml @@ -42,4 +42,4 @@ - assert: that: - - result.msg == "Multiple sequence entries have attribute value 'a'" + - result.msg == "Multiple sequence entries have attribute value 'a'" or result.msg == "Multiple sequence entries have attribute value u'a'" diff --git a/tests/integration/targets/module_helper/aliases b/tests/integration/targets/module_helper/aliases index 3005e4b26d..765b70da79 100644 --- a/tests/integration/targets/module_helper/aliases +++ b/tests/integration/targets/module_helper/aliases @@ -1 +1 @@ -shippable/posix/group4 +shippable/posix/group2 diff --git a/tests/integration/targets/module_helper/tasks/mdepfail.yml b/tests/integration/targets/module_helper/tasks/mdepfail.yml index ad8fc5d57d..01523513a3 100644 --- a/tests/integration/targets/module_helper/tasks/mdepfail.yml +++ b/tests/integration/targets/module_helper/tasks/mdepfail.yml @@ -10,8 +10,8 @@ - name: assert failing dependency assert: that: - - result.failed is true + - result is failed - '"Failed to import" in result.msg' - '"nopackagewiththisname" in result.msg' - - '"ModuleNotFoundError:" in result.exception' + - '"ModuleNotFoundError:" in result.exception or "ImportError:" in result.exception' - '"nopackagewiththisname" in result.exception' diff --git a/tests/integration/targets/module_helper/tasks/msimple.yml b/tests/integration/targets/module_helper/tasks/msimple.yml index 4f032fd177..4d2ff9b798 100644 --- a/tests/integration/targets/module_helper/tasks/msimple.yml +++ b/tests/integration/targets/module_helper/tasks/msimple.yml @@ -11,7 +11,7 @@ that: - simple1.a == 80 - simple1.abc == "abc" - - simple1.changed is false + - simple1 is not changed - simple1.value is none - name: test msimple 2 @@ -26,8 +26,8 @@ - simple2.a == 101 - 'simple2.msg == "Module failed with exception: a >= 100"' - simple2.abc == "abc" - - simple2.failed is true - - simple2.changed is false + - simple2 is failed + - simple2 is not changed - simple2.value is none - name: test msimple 3 @@ -42,7 +42,7 @@ - simple3.a == 2 - simple3.b == "potatoespotatoes" - simple3.c == "NoneNone" - - simple3.changed is false + - simple3 is not changed - name: test msimple 4 msimple: @@ -54,4 +54,4 @@ that: - simple4.c == "abc change" - simple4.abc == "changed abc" - - simple4.changed is true + - simple4 is changed diff --git a/tests/integration/targets/module_helper/tasks/mstate.yml b/tests/integration/targets/module_helper/tasks/mstate.yml index c4dfdb9a0e..6476f76429 100644 --- a/tests/integration/targets/module_helper/tasks/mstate.yml +++ b/tests/integration/targets/module_helper/tasks/mstate.yml @@ -16,7 +16,7 @@ - state1.b == "banana" - state1.c == "cashew" - state1.result == "abc" - - state1.changed is false + - state1 is not changed - name: test mstate 2 mstate: @@ -32,7 +32,7 @@ - state2.b == "banana" - state2.c == "cashew" - state2.result == "80bananacashew" - - state2.changed is true + - state2 is changed - name: test mstate 3 mstate: @@ -47,7 +47,7 @@ - state3.a == 3 - state3.b == "banana" - state3.result == "bananabananabanana" - - state3.changed is true + - state3 is changed - name: test mstate 4 mstate: @@ -62,7 +62,7 @@ - state4.a == 4 - state4.c == "cashew" - state4.result == "cashewcashewcashewcashew" - - state4.changed is true + - state4 is changed - name: test mstate 5 mstate: @@ -79,4 +79,4 @@ - state5.b == "foo" - state5.c == "bar" - state5.result == "foobarfoobarfoobarfoobarfoobar" - - state5.changed is true + - state5 is changed From 27ba98a68eafe5f1563cafb7b720f02d3f7c1f12 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Fri, 16 Jul 2021 19:52:09 +0200 Subject: [PATCH 0444/3093] Check targets (#3019) * Add extra sanity test to check aliases files. * Remove invalid target name. --- tests/integration/targets/mqtt/aliases | 1 - tests/sanity/extra/aliases.json | 11 +++++ tests/sanity/extra/aliases.py | 63 ++++++++++++++++++++++++++ 3 files changed, 74 insertions(+), 1 deletion(-) create mode 100644 tests/sanity/extra/aliases.json create mode 100755 tests/sanity/extra/aliases.py diff --git a/tests/integration/targets/mqtt/aliases b/tests/integration/targets/mqtt/aliases index 0a4db0379e..9a30a5a281 100644 --- a/tests/integration/targets/mqtt/aliases +++ b/tests/integration/targets/mqtt/aliases @@ -1,4 +1,3 @@ -notification/mqtt shippable/posix/group1 skip/aix skip/osx diff --git a/tests/sanity/extra/aliases.json b/tests/sanity/extra/aliases.json new file mode 100644 index 0000000000..dabdcd6a1d --- /dev/null +++ b/tests/sanity/extra/aliases.json @@ -0,0 +1,11 @@ +{ + "include_symlinks": false, + "prefixes": [ + ".azure-pipelines/azure-pipelines.yml", + "tests/integration/targets/" + ], + "output": "path-message", + "requirements": [ + "PyYAML" + ] +} diff --git a/tests/sanity/extra/aliases.py b/tests/sanity/extra/aliases.py new file mode 100755 index 0000000000..8791238f5f --- /dev/null +++ b/tests/sanity/extra/aliases.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +"""Check extra collection docs with antsibull-lint.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import sys + +import yaml + + +def main(): + """Main entry point.""" + paths = sys.argv[1:] or sys.stdin.read().splitlines() + paths = [path for path in paths if path.endswith('/aliases')] + + with open('.azure-pipelines/azure-pipelines.yml', 'rb') as f: + azp = yaml.safe_load(f) + + allowed_targets = set(['shippable/cloud/group1']) + for stage in azp['stages']: + if stage['stage'].startswith(('Sanity', 'Unit', 'Cloud', 'Summary')): + continue + for job in stage['jobs']: + for group in job['parameters']['groups']: + allowed_targets.add('shippable/posix/group{0}'.format(group)) + + for path in paths: + targets = [] + skip = False + with open(path, 'r') as f: + for line in f: + if '#' in line: + line = line[:line.find('#')] + line = line.strip() + if line.startswith('needs/'): + continue + if line.startswith('skip/'): + continue + if line.startswith('cloud/'): + continue + if line in ('unsupported', 'disabled', 'hidden'): + skip = True + if line in ('destructive', ): + continue + if '/' not in line: + continue + targets.append(line) + if skip: + continue + if not targets: + if 'targets/setup_' in path: + continue + print('%s: %s' % (path, 'found no targets')) + for target in targets: + if target not in allowed_targets: + print('%s: %s' % (path, 'found invalid target "{0}"'.format(target))) + + +if __name__ == '__main__': + main() From 7734430f23a8c2472583543d4e4919aa37bf632f Mon Sep 17 00:00:00 2001 From: Werner Dijkerman Date: Sat, 17 Jul 2021 08:49:09 +0200 Subject: [PATCH 0445/3093] Added module for creating protected branches (#2781) * Added module for creating protected branches * Applied some changes due to comments and added a test that currently fails * Changing no_access to nobody due to comment on PR * Changing the description to clarify it a bit more * Added working tests for module 'gitlab_protected_branch' * Fixing lint issues * Added doc that minimum of v2.3.0 is needed to work correctly * Fixed the requirements notation * Check the version of the module * Hopefully fixed the tests by skipping it when lower version of 2.3.0 is installed * Fix lint issues * Applying changes due to comments in PR * Remove commented code * Removing the trailing dot ... Co-authored-by: jenkins-x-bot Co-authored-by: Werner Dijkerman --- plugins/modules/gitlab_protected_branch.py | 1 + .../gitlab/gitlab_protected_branch.py | 201 ++++++++++++++++++ .../modules/source_control/gitlab/gitlab.py | 38 +++- .../gitlab/test_gitlab_protected_branch.py | 81 +++++++ 4 files changed, 319 insertions(+), 2 deletions(-) create mode 120000 plugins/modules/gitlab_protected_branch.py create mode 100644 plugins/modules/source_control/gitlab/gitlab_protected_branch.py create mode 100644 tests/unit/plugins/modules/source_control/gitlab/test_gitlab_protected_branch.py diff --git a/plugins/modules/gitlab_protected_branch.py b/plugins/modules/gitlab_protected_branch.py new file mode 120000 index 0000000000..7af5b500ce --- /dev/null +++ b/plugins/modules/gitlab_protected_branch.py @@ -0,0 +1 @@ +source_control/gitlab/gitlab_protected_branch.py \ No newline at end of file diff --git a/plugins/modules/source_control/gitlab/gitlab_protected_branch.py b/plugins/modules/source_control/gitlab/gitlab_protected_branch.py new file mode 100644 index 0000000000..f61f2b9fa1 --- /dev/null +++ b/plugins/modules/source_control/gitlab/gitlab_protected_branch.py @@ -0,0 +1,201 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Werner Dijkerman (ikben@werner-dijkerman.nl) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: gitlab_protected_branch +short_description: (un)Marking existing branches for protection +version_added: 3.4.0 +description: + - (un)Marking existing branches for protection. +author: + - "Werner Dijkerman (@dj-wasabi)" +requirements: + - python >= 2.7 + - python-gitlab >= 2.3.0 +extends_documentation_fragment: +- community.general.auth_basic + +options: + state: + description: + - Create or delete proteced branch. + default: present + type: str + choices: ["present", "absent"] + api_token: + description: + - GitLab access token with API permissions. + required: true + type: str + project: + description: + - The path and name of the project. + required: true + type: str + name: + description: + - The name of the branch that needs to be protected. + - Can make use a wildcard charachter for like C(production/*) or just have C(main) or C(develop) as value. + required: true + type: str + merge_access_levels: + description: + - Access levels allowed to merge. + default: maintainer + type: str + choices: ["maintainer", "developer", "nobody"] + push_access_level: + description: + - Access levels allowed to push. + default: maintainer + type: str + choices: ["maintainer", "developer", "nobody"] +''' + + +EXAMPLES = ''' +- name: Create protected branch on main + community.general.gitlab_protected_branch: + api_url: https://gitlab.com + api_token: secret_access_token + project: "dj-wasabi/collection.general" + name: main + merge_access_levels: maintainer + push_access_level: nobody + +''' + +RETURN = ''' +''' + +import traceback + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.api import basic_auth_argument_spec +from distutils.version import LooseVersion + +GITLAB_IMP_ERR = None +try: + import gitlab + HAS_GITLAB_PACKAGE = True +except Exception: + GITLAB_IMP_ERR = traceback.format_exc() + HAS_GITLAB_PACKAGE = False + +from ansible_collections.community.general.plugins.module_utils.gitlab import gitlabAuthentication + + +class GitlabProtectedBranch(object): + + def __init__(self, module, project, gitlab_instance): + self.repo = gitlab_instance + self._module = module + self.project = self.get_project(project) + self.ACCESS_LEVEL = { + 'nobody': gitlab.NO_ACCESS, + 'developer': gitlab.DEVELOPER_ACCESS, + 'maintainer': gitlab.MAINTAINER_ACCESS + } + + def get_project(self, project_name): + return self.repo.projects.get(project_name) + + def protected_branch_exist(self, name): + try: + return self.project.protectedbranches.get(name) + except Exception as e: + return False + + def create_protected_branch(self, name, merge_access_levels, push_access_level): + if self._module.check_mode: + return True + merge = self.ACCESS_LEVEL[merge_access_levels] + push = self.ACCESS_LEVEL[push_access_level] + self.project.protectedbranches.create({ + 'name': name, + 'merge_access_level': merge, + 'push_access_level': push + }) + + def compare_protected_branch(self, name, merge_access_levels, push_access_level): + configured_merge = self.ACCESS_LEVEL[merge_access_levels] + configured_push = self.ACCESS_LEVEL[push_access_level] + current = self.protected_branch_exist(name=name) + current_merge = current.merge_access_levels[0]['access_level'] + current_push = current.push_access_levels[0]['access_level'] + if current: + if current.name == name and current_merge == configured_merge and current_push == configured_push: + return True + return False + + def delete_protected_branch(self, name): + if self._module.check_mode: + return True + return self.project.protectedbranches.delete(name) + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update( + api_token=dict(type='str', required=True, no_log=True), + project=dict(type='str', required=True), + name=dict(type='str', required=True), + merge_access_levels=dict(type='str', default="maintainer", choices=["maintainer", "developer", "nobody"]), + push_access_level=dict(type='str', default="maintainer", choices=["maintainer", "developer", "nobody"]), + state=dict(type='str', default="present", choices=["absent", "present"]), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_password', 'api_token'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token'] + ], + supports_check_mode=True + ) + + project = module.params['project'] + name = module.params['name'] + merge_access_levels = module.params['merge_access_levels'] + push_access_level = module.params['push_access_level'] + state = module.params['state'] + + if not HAS_GITLAB_PACKAGE: + module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) + + gitlab_version = gitlab.__version__ + if LooseVersion(gitlab_version) < LooseVersion('2.3.0'): + module.fail_json(msg="community.general.gitlab_proteched_branch requires python-gitlab Python module >= 2.3.0 (installed version: [%s])." + " Please upgrade python-gitlab to version 2.3.0 or above." % gitlab_version) + + gitlab_instance = gitlabAuthentication(module) + this_gitlab = GitlabProtectedBranch(module=module, project=project, gitlab_instance=gitlab_instance) + + p_branch = this_gitlab.protected_branch_exist(name=name) + if not p_branch and state == "present": + this_gitlab.create_protected_branch(name=name, merge_access_levels=merge_access_levels, push_access_level=push_access_level) + module.exit_json(changed=True, msg="Created the proteched branch.") + elif p_branch and state == "present": + if not this_gitlab.compare_protected_branch(name, merge_access_levels, push_access_level): + this_gitlab.delete_protected_branch(name=name) + this_gitlab.create_protected_branch(name=name, merge_access_levels=merge_access_levels, push_access_level=push_access_level) + module.exit_json(changed=True, msg="Recreated the proteched branch.") + elif p_branch and state == "absent": + this_gitlab.delete_protected_branch(name=name) + module.exit_json(changed=True, msg="Deleted the proteched branch.") + module.exit_json(changed=False, msg="No changes are needed.") + + +if __name__ == '__main__': + main() diff --git a/tests/unit/plugins/modules/source_control/gitlab/gitlab.py b/tests/unit/plugins/modules/source_control/gitlab/gitlab.py index eb8099d37b..5feff78b43 100644 --- a/tests/unit/plugins/modules/source_control/gitlab/gitlab.py +++ b/tests/unit/plugins/modules/source_control/gitlab/gitlab.py @@ -13,7 +13,7 @@ from httmock import urlmatch # noqa from ansible_collections.community.general.tests.unit.compat import unittest -from gitlab import Gitlab +import gitlab class FakeAnsibleModule(object): @@ -33,7 +33,7 @@ class GitlabModuleTestCase(unittest.TestCase): self.mock_module = FakeAnsibleModule() - self.gitlab_instance = Gitlab("http://localhost", private_token="private_token", api_version=4) + self.gitlab_instance = gitlab.Gitlab("http://localhost", private_token="private_token", api_version=4) # Python 2.7+ is needed for python-gitlab @@ -45,6 +45,14 @@ def python_version_match_requirement(): return sys.version_info >= GITLAB_MINIMUM_PYTHON_VERSION +def python_gitlab_module_version(): + return gitlab.__version__ + + +def python_gitlab_version_match_requirement(): + return "2.3.0" + + # Skip unittest test case if python version don't match requirement def unitest_python_version_check_requirement(unittest_testcase): if not python_version_match_requirement(): @@ -467,6 +475,32 @@ def resp_delete_project(url, request): return response(204, content, headers, None, 5, request) +@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/protected_branches/master", method="get") +def resp_get_protected_branch(url, request): + headers = {'content-type': 'application/json'} + content = ('{"id": 1, "name": "master", "push_access_levels": [{"access_level": 40, "access_level_description": "Maintainers"}],' + '"merge_access_levels": [{"access_level": 40, "access_level_description": "Maintainers"}],' + '"allow_force_push":false, "code_owner_approval_required": false}') + content = content.encode("utf-8") + return response(200, content, headers, None, 5, request) + + +@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/protected_branches/master", method="get") +def resp_get_protected_branch_not_exist(url, request): + headers = {'content-type': 'application/json'} + content = ('') + content = content.encode("utf-8") + return response(404, content, headers, None, 5, request) + + +@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/protected_branches/master", method="delete") +def resp_delete_protected_branch(url, request): + headers = {'content-type': 'application/json'} + content = ('') + content = content.encode("utf-8") + return response(204, content, headers, None, 5, request) + + ''' HOOK API ''' diff --git a/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_protected_branch.py b/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_protected_branch.py new file mode 100644 index 0000000000..026efb19d8 --- /dev/null +++ b/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_protected_branch.py @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest +from distutils.version import LooseVersion + +from ansible_collections.community.general.plugins.modules.source_control.gitlab.gitlab_protected_branch import GitlabProtectedBranch + + +def _dummy(x): + """Dummy function. Only used as a placeholder for toplevel definitions when the test is going + to be skipped anyway""" + return x + + +pytestmark = [] +try: + from .gitlab import (GitlabModuleTestCase, + python_version_match_requirement, python_gitlab_module_version, + python_gitlab_version_match_requirement, + resp_get_protected_branch, resp_get_project_by_name, + resp_get_protected_branch_not_exist, + resp_delete_protected_branch, resp_get_user) + + # GitLab module requirements + if python_version_match_requirement(): + from gitlab.v4.objects import Project + gitlab_req_version = python_gitlab_version_match_requirement() + gitlab_module_version = python_gitlab_module_version() + if LooseVersion(gitlab_module_version) < LooseVersion(gitlab_req_version): + pytestmark.append(pytest.mark.skip("Could not load gitlab module required for testing (Wrong version)")) +except ImportError: + pytestmark.append(pytest.mark.skip("Could not load gitlab module required for testing")) + +# Unit tests requirements +try: + from httmock import with_httmock # noqa +except ImportError: + pytestmark.append(pytest.mark.skip("Could not load httmock module required for testing")) + with_httmock = _dummy + + +class TestGitlabProtectedBranch(GitlabModuleTestCase): + @with_httmock(resp_get_project_by_name) + @with_httmock(resp_get_user) + def setUp(self): + super(TestGitlabProtectedBranch, self).setUp() + + self.gitlab_instance.user = self.gitlab_instance.users.get(1) + self.moduleUtil = GitlabProtectedBranch(module=self.mock_module, project="foo-bar/diaspora-client", gitlab_instance=self.gitlab_instance) + + @with_httmock(resp_get_protected_branch) + def test_protected_branch_exist(self): + rvalue = self.moduleUtil.protected_branch_exist(name="master") + self.assertEqual(rvalue.name, "master") + + @with_httmock(resp_get_protected_branch_not_exist) + def test_protected_branch_exist_not_exist(self): + rvalue = self.moduleUtil.protected_branch_exist(name="master") + self.assertEqual(rvalue, False) + + @with_httmock(resp_get_protected_branch) + def test_compare_protected_branch(self): + rvalue = self.moduleUtil.compare_protected_branch(name="master", merge_access_levels="maintainer", push_access_level="maintainer") + self.assertEqual(rvalue, True) + + @with_httmock(resp_get_protected_branch) + def test_compare_protected_branch_different_settings(self): + rvalue = self.moduleUtil.compare_protected_branch(name="master", merge_access_levels="developer", push_access_level="maintainer") + self.assertEqual(rvalue, False) + + @with_httmock(resp_get_protected_branch) + @with_httmock(resp_delete_protected_branch) + def test_delete_protected_branch(self): + rvalue = self.moduleUtil.delete_protected_branch(name="master") + self.assertEqual(rvalue, None) From 7b9687f75885f168de52f935dc36f1d35b8b7ed7 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 19 Jul 2021 03:36:59 +0200 Subject: [PATCH 0446/3093] Fix snap's channel option. (#3028) --- changelogs/fragments/3028-snap-channel.yml | 2 ++ plugins/modules/packaging/os/snap.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/3028-snap-channel.yml diff --git a/changelogs/fragments/3028-snap-channel.yml b/changelogs/fragments/3028-snap-channel.yml new file mode 100644 index 0000000000..c3aea4b5a0 --- /dev/null +++ b/changelogs/fragments/3028-snap-channel.yml @@ -0,0 +1,2 @@ +bugfixes: +- "snap - fix formatting of ``--channel`` argument when the ``channel`` option is used (https://github.com/ansible-collections/community.general/pull/3028)." diff --git a/plugins/modules/packaging/os/snap.py b/plugins/modules/packaging/os/snap.py index 6da8b0e766..de6fedccdc 100644 --- a/plugins/modules/packaging/os/snap.py +++ b/plugins/modules/packaging/os/snap.py @@ -145,7 +145,7 @@ class Snap(CmdStateModuleHelper): actionable_snaps=dict(fmt=lambda v: v), state=dict(fmt=_state_map), classic=dict(fmt="--classic", style=ArgFormat.BOOLEAN), - channel=dict(fmt=lambda v: [] if v == 'stable' else ['--channel', '{0}']), + channel=dict(fmt=lambda v: [] if v == 'stable' else ['--channel', '{0}'.format(v)]), ) check_rc = False From 9fd2ba60df27b3ae2b194f4e0372c438957be572 Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Mon, 19 Jul 2021 02:14:23 -0400 Subject: [PATCH 0447/3093] archive - staging idempotency fix (#2987) * Initial Commit * Fixing PY26 filter * Adding changelog fragment * Removing checksum related code * Removing list comparisons due to Jinja errors * Applying review suggestions * Applying review suggestions - typos --- .../2987-archive-stage-idempotency-fix.yml | 4 + plugins/modules/files/archive.py | 44 +++--- .../targets/archive/tasks/main.yml | 7 + .../targets/archive/tests/core.yml | 2 +- .../targets/archive/tests/idempotency.yml | 141 ++++++++++++++++++ 5 files changed, 179 insertions(+), 19 deletions(-) create mode 100644 changelogs/fragments/2987-archive-stage-idempotency-fix.yml create mode 100644 tests/integration/targets/archive/tests/idempotency.yml diff --git a/changelogs/fragments/2987-archive-stage-idempotency-fix.yml b/changelogs/fragments/2987-archive-stage-idempotency-fix.yml new file mode 100644 index 0000000000..5c9e980935 --- /dev/null +++ b/changelogs/fragments/2987-archive-stage-idempotency-fix.yml @@ -0,0 +1,4 @@ +--- +minor_changes: + - archive - refactoring prior to fix for idempotency checks. The fix will be a breaking change and only appear + in community.general 4.0.0 (https://github.com/ansible-collections/community.general/pull/2987). diff --git a/plugins/modules/files/archive.py b/plugins/modules/files/archive.py index 822ea1cd9d..91a8f688f5 100644 --- a/plugins/modules/files/archive.py +++ b/plugins/modules/files/archive.py @@ -298,6 +298,8 @@ class Archive(object): msg='Error, must specify "dest" when archiving multiple files or trees' ) + self.original_size = self.destination_size() + def add(self, path, archive_name): try: self._add(_to_native_ascii(path), _to_native(archive_name)) @@ -315,7 +317,7 @@ class Archive(object): self.destination_state = STATE_ARCHIVED else: try: - f_out = self._open_compressed_file(_to_native_ascii(self.destination)) + f_out = self._open_compressed_file(_to_native_ascii(self.destination), 'wb') with open(path, 'rb') as f_in: shutil.copyfileobj(f_in, f_out) f_out.close() @@ -368,9 +370,15 @@ class Archive(object): msg='Errors when writing archive at %s: %s' % (_to_native(self.destination), '; '.join(self.errors)) ) + def compare_with_original(self): + self.changed |= self.original_size != self.destination_size() + def destination_exists(self): return self.destination and os.path.exists(self.destination) + def destination_readable(self): + return self.destination and os.access(self.destination, os.R_OK) + def destination_size(self): return os.path.getsize(self.destination) if self.destination_exists() else 0 @@ -407,6 +415,15 @@ class Archive(object): def has_unfound_targets(self): return bool(self.not_found) + def remove_single_target(self, path): + try: + os.remove(path) + except OSError as e: + self.module.fail_json( + path=_to_native(path), + msg='Unable to remove source file: %s' % _to_native(e), exception=format_exc() + ) + def remove_targets(self): for path in self.successes: if os.path.exists(path): @@ -453,14 +470,14 @@ class Archive(object): 'expanded_exclude_paths': [_to_native(p) for p in self.expanded_exclude_paths], } - def _open_compressed_file(self, path): + def _open_compressed_file(self, path, mode): f = None if self.format == 'gz': - f = gzip.open(path, 'wb') + f = gzip.open(path, mode) elif self.format == 'bz2': - f = bz2.BZ2File(path, 'wb') + f = bz2.BZ2File(path, mode) elif self.format == 'xz': - f = lzma.LZMAFile(path, 'wb') + f = lzma.LZMAFile(path, mode) else: self.module.fail_json(msg="%s is not a valid format" % self.format) @@ -542,7 +559,7 @@ class TarArchive(Archive): return None if matches_exclusion_patterns(tarinfo.name, self.exclusion_patterns) else tarinfo def py26_filter(path): - return matches_exclusion_patterns(path, self.exclusion_patterns) + return legacy_filter(path, self.exclusion_patterns) if PY27: self.file.add(path, archive_name, recursive=False, filter=py27_filter) @@ -580,7 +597,6 @@ def main(): check_mode = module.check_mode archive = get_archive(module) - size = archive.destination_size() archive.find_targets() if not archive.has_targets(): @@ -592,10 +608,9 @@ def main(): else: archive.add_targets() archive.destination_state = STATE_INCOMPLETE if archive.has_unfound_targets() else STATE_ARCHIVED + archive.compare_with_original() if archive.remove: archive.remove_targets() - if archive.destination_size() != size: - archive.changed = True else: if check_mode: if not archive.destination_exists(): @@ -603,16 +618,9 @@ def main(): else: path = archive.paths[0] archive.add_single_target(path) - if archive.destination_size() != size: - archive.changed = True + archive.compare_with_original() if archive.remove: - try: - os.remove(path) - except OSError as e: - module.fail_json( - path=_to_native(path), - msg='Unable to remove source file: %s' % _to_native(e), exception=format_exc() - ) + archive.remove_single_target(path) if archive.destination_exists(): archive.update_permissions() diff --git a/tests/integration/targets/archive/tasks/main.yml b/tests/integration/targets/archive/tasks/main.yml index e0757b0ead..1e2c9f9c27 100644 --- a/tests/integration/targets/archive/tasks/main.yml +++ b/tests/integration/targets/archive/tasks/main.yml @@ -121,6 +121,13 @@ loop_control: loop_var: format +- name: Run Idempotency tests + include_tasks: + file: ../tests/idempotency.yml + loop: "{{ formats }}" + loop_control: + loop_var: format + # Test cleanup - name: Remove backports.lzma if previously installed (pip) pip: name=backports.lzma state=absent diff --git a/tests/integration/targets/archive/tests/core.yml b/tests/integration/targets/archive/tests/core.yml index f12e5083cc..d008e9c122 100644 --- a/tests/integration/targets/archive/tests/core.yml +++ b/tests/integration/targets/archive/tests/core.yml @@ -41,7 +41,7 @@ - archive_no_options is changed - "archive_no_options.dest_state == 'archive'" - "{{ archive_no_options.archived | length }} == 3" - - + - name: Remove the archive - no options ({{ format }}) file: path: "{{ output_dir }}/archive_no_options.{{ format }}" diff --git a/tests/integration/targets/archive/tests/idempotency.yml b/tests/integration/targets/archive/tests/idempotency.yml new file mode 100644 index 0000000000..f53f768164 --- /dev/null +++ b/tests/integration/targets/archive/tests/idempotency.yml @@ -0,0 +1,141 @@ +--- +- name: Archive - file content idempotency ({{ format }}) + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/archive_file_content_idempotency.{{ format }}" + format: "{{ format }}" + register: file_content_idempotency_before + +- name: Modify file - file content idempotency ({{ format }}) + lineinfile: + line: bar.txt + regexp: "^foo.txt$" + path: "{{ output_dir }}/foo.txt" + +- name: Archive second time - file content idempotency ({{ format }}) + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/archive_file_content_idempotency.{{ format }}" + format: "{{ format }}" + register: file_content_idempotency_after + +# After idempotency fix result will be reliably changed for all formats +- name: Assert task status is changed - file content idempotency ({{ format }}) + assert: + that: + - file_content_idempotency_after is not changed + when: "format in ('tar', 'zip')" + +- name: Remove archive - file content idempotency ({{ format }}) + file: + path: "{{ output_dir }}/archive_file_content_idempotency.{{ format }}" + state: absent + +- name: Modify file back - file content idempotency ({{ format }}) + lineinfile: + line: foo.txt + regexp: "^bar.txt$" + path: "{{ output_dir }}/foo.txt" + +- name: Archive - file name idempotency ({{ format }}) + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/archive_file_name_idempotency.{{ format }}" + format: "{{ format }}" + register: file_name_idempotency_before + +- name: Rename file - file name idempotency ({{ format }}) + command: "mv {{ output_dir}}/foo.txt {{ output_dir }}/fii.txt" + +- name: Archive again - file name idempotency ({{ format }}) + archive: + path: "{{ output_dir }}/*.txt" + dest: "{{ output_dir }}/archive_file_name_idempotency.{{ format }}" + format: "{{ format }}" + register: file_name_idempotency_after + +# After idempotency fix result will be reliably changed for all formats +- name: Check task status - file name idempotency ({{ format }}) + assert: + that: + - file_name_idempotency_after is not changed + when: "format in ('tar', 'zip')" + +- name: Remove archive - file name idempotency ({{ format }}) + file: + path: "{{ output_dir }}/archive_file_name_idempotency.{{ format }}" + state: absent + +- name: Rename file back - file name idempotency ({{ format }}) + command: "mv {{ output_dir }}/fii.txt {{ output_dir }}/foo.txt" + +- name: Archive - single file content idempotency ({{ format }}) + archive: + path: "{{ output_dir }}/foo.txt" + dest: "{{ output_dir }}/archive_single_file_content_idempotency.{{ format }}" + format: "{{ format }}" + register: single_file_content_idempotency_before + +- name: Modify file - single file content idempotency ({{ format }}) + lineinfile: + line: bar.txt + regexp: "^foo.txt$" + path: "{{ output_dir }}/foo.txt" + +- name: Archive second time - single file content idempotency ({{ format }}) + archive: + path: "{{ output_dir }}/foo.txt" + dest: "{{ output_dir }}/archive_single_file_content_idempotency.{{ format }}" + format: "{{ format }}" + register: single_file_content_idempotency_after + +# After idempotency fix result will be reliably changed for all formats +- name: Assert task status is changed - single file content idempotency ({{ format }}) + assert: + that: + - single_file_content_idempotency_after is not changed + when: "format in ('tar', 'zip')" + +- name: Remove archive - single file content idempotency ({{ format }}) + file: + path: "{{ output_dir }}/archive_single_file_content_idempotency.{{ format }}" + state: absent + +- name: Modify file back - single file content idempotency ({{ format }}) + lineinfile: + line: foo.txt + regexp: "^bar.txt$" + path: "{{ output_dir }}/foo.txt" + +- name: Archive - single file name idempotency ({{ format }}) + archive: + path: "{{ output_dir }}/foo.txt" + dest: "{{ output_dir }}/archive_single_file_name_idempotency.{{ format }}" + format: "{{ format }}" + register: single_file_name_idempotency_before + +- name: Rename file - single file name idempotency ({{ format }}) + command: "mv {{ output_dir}}/foo.txt {{ output_dir }}/fii.txt" + +- name: Archive again - single file name idempotency ({{ format }}) + archive: + path: "{{ output_dir }}/fii.txt" + dest: "{{ output_dir }}/archive_single_file_name_idempotency.{{ format }}" + format: "{{ format }}" + register: single_file_name_idempotency_after + + +# After idempotency fix result will be reliably changed for all formats +- name: Check task status - single file name idempotency ({{ format }}) + assert: + that: + - single_file_name_idempotency_after is not changed + when: "format in ('tar', 'zip')" + +- name: Remove archive - single file name idempotency ({{ format }}) + file: + path: "{{ output_dir }}/archive_single_file_name_idempotency.{{ format }}" + state: absent + +- name: Rename file back - single file name idempotency ({{ format }}) + command: "mv {{ output_dir }}/fii.txt {{ output_dir }}/foo.txt" From a3607a745e4856117173b9115de65336d4175a4b Mon Sep 17 00:00:00 2001 From: suukit Date: Mon, 19 Jul 2021 11:52:32 +0200 Subject: [PATCH 0448/3093] Feature/gitlab project configuration (#3002) * added - only_allow_merge_if_all_discussions_are_resolved - only_allow_merge_if_all_discussions_are_resolved - only_allow_merge_if_pipeline_succeeds - only_allow_merge_if_pipeline_succeeds - packages_enabled - remove_source_branch_after_merge - squash_option * minor fix * added changelog * Fixedlinter findings * changed version_added to 3.4 -> check requires to do so * Update changelogs/fragments/3001-enhance_gitlab_module.yml Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_project.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_project.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_project.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_project.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_project.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_project.py Co-authored-by: Felix Fontein * rework due to review of felixfontein: - changed option description to full sentences - change default behaviour of new properties * Requested changes Co-authored-by: Max Bidlingmaier Co-authored-by: Felix Fontein --- .../fragments/3001-enhance_gitlab_module.yml | 2 + .../source_control/gitlab/gitlab_project.py | 72 ++++++++++++++++++- 2 files changed, 73 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/3001-enhance_gitlab_module.yml diff --git a/changelogs/fragments/3001-enhance_gitlab_module.yml b/changelogs/fragments/3001-enhance_gitlab_module.yml new file mode 100644 index 0000000000..e39985530e --- /dev/null +++ b/changelogs/fragments/3001-enhance_gitlab_module.yml @@ -0,0 +1,2 @@ +minor_changes: + - gitlab_project - add new options ``allow_merge_on_skipped_pipeline``, ``only_allow_merge_if_all_discussions_are_resolved``, ``only_allow_merge_if_pipeline_succeeds``, ``packages_enabled``, ``remove_source_branch_after_merge``, ``squash_option`` (https://github.com/ansible-collections/community.general/pull/3002). diff --git a/plugins/modules/source_control/gitlab/gitlab_project.py b/plugins/modules/source_control/gitlab/gitlab_project.py index c916246b78..b3a6ca2064 100644 --- a/plugins/modules/source_control/gitlab/gitlab_project.py +++ b/plugins/modules/source_control/gitlab/gitlab_project.py @@ -114,6 +114,38 @@ options: - Used to create a personal project under a user's name. type: str version_added: "3.3.0" + allow_merge_on_skipped_pipeline: + description: + - Allow merge when skipped pipelines exist. + type: bool + version_added: "3.4.0" + only_allow_merge_if_all_discussions_are_resolved: + description: + - All discussions on a merge request (MR) have to be resolved. + type: bool + version_added: "3.4.0" + only_allow_merge_if_pipeline_succeeds: + description: + - Only allow merges if pipeline succeeded. + type: bool + version_added: "3.4.0" + packages_enabled: + description: + - Enable GitLab package repository. + type: bool + version_added: "3.4.0" + remove_source_branch_after_merge: + description: + - Remove the source branch after merge. + type: bool + version_added: "3.4.0" + squash_option: + description: + - Squash commits when merging. + type: str + choices: ["never", "always", "default_off", "default_on"] + version_added: "3.4.0" + ''' EXAMPLES = r''' @@ -214,6 +246,12 @@ class GitLabProject(object): 'snippets_enabled': options['snippets_enabled'], 'visibility': options['visibility'], 'lfs_enabled': options['lfs_enabled'], + 'allow_merge_on_skipped_pipeline': options['allow_merge_on_skipped_pipeline'], + 'only_allow_merge_if_all_discussions_are_resolved': options['only_allow_merge_if_all_discussions_are_resolved'], + 'only_allow_merge_if_pipeline_succeeds': options['only_allow_merge_if_pipeline_succeeds'], + 'packages_enabled': options['packages_enabled'], + 'remove_source_branch_after_merge': options['remove_source_branch_after_merge'], + 'squash_option': options['squash_option'], } # Because we have already call userExists in main() if self.projectObject is None: @@ -221,6 +259,7 @@ class GitLabProject(object): 'path': options['path'], 'import_url': options['import_url'], }) + project_options = self.getOptionsWithValue(project_options) project = self.createProject(namespace, project_options) changed = True else: @@ -254,6 +293,17 @@ class GitLabProject(object): return project + ''' + @param arguments Attributes of the project + ''' + def getOptionsWithValue(self, arguments): + ret_arguments = dict() + for arg_key, arg_value in arguments.items(): + if arguments[arg_key] is not None: + ret_arguments[arg_key] = arg_value + + return ret_arguments + ''' @param project Project Object @param arguments Attributes of the project @@ -308,6 +358,12 @@ def main(): state=dict(type='str', default="present", choices=["absent", "present"]), lfs_enabled=dict(default=False, type='bool'), username=dict(type='str'), + allow_merge_on_skipped_pipeline=dict(type='bool'), + only_allow_merge_if_all_discussions_are_resolved=dict(type='bool'), + only_allow_merge_if_pipeline_succeeds=dict(type='bool'), + packages_enabled=dict(type='bool'), + remove_source_branch_after_merge=dict(type='bool'), + squash_option=dict(type='str', choices=['never', 'always', 'default_off', 'default_on']), )) module = AnsibleModule( @@ -340,6 +396,12 @@ def main(): state = module.params['state'] lfs_enabled = module.params['lfs_enabled'] username = module.params['username'] + allow_merge_on_skipped_pipeline = module.params['allow_merge_on_skipped_pipeline'] + only_allow_merge_if_all_discussions_are_resolved = module.params['only_allow_merge_if_all_discussions_are_resolved'] + only_allow_merge_if_pipeline_succeeds = module.params['only_allow_merge_if_pipeline_succeeds'] + packages_enabled = module.params['packages_enabled'] + remove_source_branch_after_merge = module.params['remove_source_branch_after_merge'] + squash_option = module.params['squash_option'] if not HAS_GITLAB_PACKAGE: module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) @@ -386,6 +448,7 @@ def main(): module.exit_json(changed=False, msg="Project deleted or does not exists") if state == 'present': + if gitlab_project.createOrUpdateProject(project_name, namespace, { "path": project_path, "description": project_description, @@ -396,7 +459,14 @@ def main(): "snippets_enabled": snippets_enabled, "visibility": visibility, "import_url": import_url, - "lfs_enabled": lfs_enabled}): + "lfs_enabled": lfs_enabled, + "allow_merge_on_skipped_pipeline": allow_merge_on_skipped_pipeline, + "only_allow_merge_if_all_discussions_are_resolved": only_allow_merge_if_all_discussions_are_resolved, + "only_allow_merge_if_pipeline_succeeds": only_allow_merge_if_pipeline_succeeds, + "packages_enabled": packages_enabled, + "remove_source_branch_after_merge": remove_source_branch_after_merge, + "squash_option": squash_option, + }): module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name, project=gitlab_project.projectObject._attrs) module.exit_json(changed=False, msg="No need to update the project %s" % project_name, project=gitlab_project.projectObject._attrs) From d7c6ba89f89cf7bf56a2a798d30f1c450d5a6611 Mon Sep 17 00:00:00 2001 From: Laurent Paumier <30328363+laurpaum@users.noreply.github.com> Date: Mon, 19 Jul 2021 23:17:39 +0200 Subject: [PATCH 0449/3093] Add Keycloak roles module (#2930) * implement simple realm and client role * fix documentation * code cleanup * separate realm and client roles functions * remove blank lines * add tests * fix linefeeds * fix indentation * fix error message * fix documentation * fix documentation * keycloak_role integration tests * keycloak_role integration tests * remove extra blank line * add version_added tag Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../identity/keycloak/keycloak.py | 195 +++++++++- .../identity/keycloak/keycloak_role.py | 363 ++++++++++++++++++ plugins/modules/keycloak_role.py | 1 + .../integration/targets/keycloak_role/aliases | 1 + .../targets/keycloak_role/tasks/main.yml | 246 ++++++++++++ .../targets/keycloak_role/vars/main.yml | 10 + .../identity/keycloak/test_keycloak_role.py | 326 ++++++++++++++++ 7 files changed, 1141 insertions(+), 1 deletion(-) create mode 100644 plugins/modules/identity/keycloak/keycloak_role.py create mode 120000 plugins/modules/keycloak_role.py create mode 100644 tests/integration/targets/keycloak_role/aliases create mode 100644 tests/integration/targets/keycloak_role/tasks/main.yml create mode 100644 tests/integration/targets/keycloak_role/vars/main.yml create mode 100644 tests/unit/plugins/modules/identity/keycloak/test_keycloak_role.py diff --git a/plugins/module_utils/identity/keycloak/keycloak.py b/plugins/module_utils/identity/keycloak/keycloak.py index b11289a634..8521650f16 100644 --- a/plugins/module_utils/identity/keycloak/keycloak.py +++ b/plugins/module_utils/identity/keycloak/keycloak.py @@ -43,8 +43,14 @@ URL_REALM = "{url}/admin/realms/{realm}" URL_TOKEN = "{url}/realms/{realm}/protocol/openid-connect/token" URL_CLIENT = "{url}/admin/realms/{realm}/clients/{id}" URL_CLIENTS = "{url}/admin/realms/{realm}/clients" + URL_CLIENT_ROLES = "{url}/admin/realms/{realm}/clients/{id}/roles" +URL_CLIENT_ROLE = "{url}/admin/realms/{realm}/clients/{id}/roles/{name}" +URL_CLIENT_ROLE_COMPOSITES = "{url}/admin/realms/{realm}/clients/{id}/roles/{name}/composites" + URL_REALM_ROLES = "{url}/admin/realms/{realm}/roles" +URL_REALM_ROLE = "{url}/admin/realms/{realm}/roles/{name}" +URL_REALM_ROLE_COMPOSITES = "{url}/admin/realms/{realm}/roles/{name}/composites" URL_CLIENTTEMPLATE = "{url}/admin/realms/{realm}/client-templates/{id}" URL_CLIENTTEMPLATES = "{url}/admin/realms/{realm}/client-templates" @@ -632,10 +638,197 @@ class KeycloakAPI(object): try: return open_url(group_url, method='DELETE', headers=self.restheaders, validate_certs=self.validate_certs) - except Exception as e: self.module.fail_json(msg="Unable to delete group %s: %s" % (groupid, str(e))) + def get_realm_roles(self, realm='master'): + """ Obtains role representations for roles in a realm + + :param realm: realm to be queried + :return: list of dicts of role representations + """ + rolelist_url = URL_REALM_ROLES.format(url=self.baseurl, realm=realm) + try: + return json.loads(to_native(open_url(rolelist_url, method='GET', headers=self.restheaders, + validate_certs=self.validate_certs).read())) + except ValueError as e: + self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of roles for realm %s: %s' + % (realm, str(e))) + except Exception as e: + self.module.fail_json(msg='Could not obtain list of roles for realm %s: %s' + % (realm, str(e))) + + def get_realm_role(self, name, realm='master'): + """ Fetch a keycloak role from the provided realm using the role's name. + + If the role does not exist, None is returned. + :param name: Name of the role to fetch. + :param realm: Realm in which the role resides; default 'master'. + """ + role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=name) + try: + return json.loads(to_native(open_url(role_url, method="GET", headers=self.restheaders, + validate_certs=self.validate_certs).read())) + except HTTPError as e: + if e.code == 404: + return None + else: + self.module.fail_json(msg='Could not fetch role %s in realm %s: %s' + % (name, realm, str(e))) + except Exception as e: + self.module.fail_json(msg='Could not fetch role %s in realm %s: %s' + % (name, realm, str(e))) + + def create_realm_role(self, rolerep, realm='master'): + """ Create a Keycloak realm role. + + :param rolerep: a RoleRepresentation of the role to be created. Must contain at minimum the field name. + :return: HTTPResponse object on success + """ + roles_url = URL_REALM_ROLES.format(url=self.baseurl, realm=realm) + try: + return open_url(roles_url, method='POST', headers=self.restheaders, + data=json.dumps(rolerep), validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Could not create role %s in realm %s: %s' + % (rolerep['name'], realm, str(e))) + + def update_realm_role(self, rolerep, realm='master'): + """ Update an existing realm role. + + :param rolerep: A RoleRepresentation of the updated role. + :return HTTPResponse object on success + """ + role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=rolerep['name']) + try: + return open_url(role_url, method='PUT', headers=self.restheaders, + data=json.dumps(rolerep), validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Could not update role %s in realm %s: %s' + % (rolerep['name'], realm, str(e))) + + def delete_realm_role(self, name, realm='master'): + """ Delete a realm role. + + :param name: The name of the role. + :param realm: The realm in which this role resides, default "master". + """ + role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=name) + try: + return open_url(role_url, method='DELETE', headers=self.restheaders, + validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Unable to delete role %s in realm %s: %s' + % (name, realm, str(e))) + + def get_client_roles(self, clientid, realm='master'): + """ Obtains role representations for client roles in a specific client + + :param clientid: Client id to be queried + :param realm: Realm to be queried + :return: List of dicts of role representations + """ + cid = self.get_client_id(clientid, realm=realm) + if cid is None: + self.module.fail_json(msg='Could not find client %s in realm %s' + % (clientid, realm)) + rolelist_url = URL_CLIENT_ROLES.format(url=self.baseurl, realm=realm, id=cid) + try: + return json.loads(to_native(open_url(rolelist_url, method='GET', headers=self.restheaders, + validate_certs=self.validate_certs).read())) + except ValueError as e: + self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of roles for client %s in realm %s: %s' + % (clientid, realm, str(e))) + except Exception as e: + self.module.fail_json(msg='Could not obtain list of roles for client %s in realm %s: %s' + % (clientid, realm, str(e))) + + def get_client_role(self, name, clientid, realm='master'): + """ Fetch a keycloak client role from the provided realm using the role's name. + + :param name: Name of the role to fetch. + :param clientid: Client id for the client role + :param realm: Realm in which the role resides + :return: Dict of role representation + If the role does not exist, None is returned. + """ + cid = self.get_client_id(clientid, realm=realm) + if cid is None: + self.module.fail_json(msg='Could not find client %s in realm %s' + % (clientid, realm)) + role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=name) + try: + return json.loads(to_native(open_url(role_url, method="GET", headers=self.restheaders, + validate_certs=self.validate_certs).read())) + except HTTPError as e: + if e.code == 404: + return None + else: + self.module.fail_json(msg='Could not fetch role %s in client %s of realm %s: %s' + % (name, clientid, realm, str(e))) + except Exception as e: + self.module.fail_json(msg='Could not fetch role %s for client %s in realm %s: %s' + % (name, clientid, realm, str(e))) + + def create_client_role(self, rolerep, clientid, realm='master'): + """ Create a Keycloak client role. + + :param rolerep: a RoleRepresentation of the role to be created. Must contain at minimum the field name. + :param clientid: Client id for the client role + :param realm: Realm in which the role resides + :return: HTTPResponse object on success + """ + cid = self.get_client_id(clientid, realm=realm) + if cid is None: + self.module.fail_json(msg='Could not find client %s in realm %s' + % (clientid, realm)) + roles_url = URL_CLIENT_ROLES.format(url=self.baseurl, realm=realm, id=cid) + try: + return open_url(roles_url, method='POST', headers=self.restheaders, + data=json.dumps(rolerep), validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Could not create role %s for client %s in realm %s: %s' + % (rolerep['name'], clientid, realm, str(e))) + + def update_client_role(self, rolerep, clientid, realm="master"): + """ Update an existing client role. + + :param rolerep: A RoleRepresentation of the updated role. + :param clientid: Client id for the client role + :param realm: Realm in which the role resides + :return HTTPResponse object on success + """ + cid = self.get_client_id(clientid, realm=realm) + if cid is None: + self.module.fail_json(msg='Could not find client %s in realm %s' + % (clientid, realm)) + role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=rolerep['name']) + try: + return open_url(role_url, method='PUT', headers=self.restheaders, + data=json.dumps(rolerep), validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Could not update role %s for client %s in realm %s: %s' + % (rolerep['name'], clientid, realm, str(e))) + + def delete_client_role(self, name, clientid, realm="master"): + """ Delete a role. One of name or roleid must be provided. + + :param name: The name of the role. + :param clientid: Client id for the client role + :param realm: Realm in which the role resides + """ + cid = self.get_client_id(clientid, realm=realm) + if cid is None: + self.module.fail_json(msg='Could not find client %s in realm %s' + % (clientid, realm)) + role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=name) + try: + return open_url(role_url, method='DELETE', headers=self.restheaders, + validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Unable to delete role %s for client %s in realm %s: %s' + % (name, clientid, realm, str(e))) + def get_authentication_flow_by_alias(self, alias, realm='master'): """ Get an authentication flow by it's alias diff --git a/plugins/modules/identity/keycloak/keycloak_role.py b/plugins/modules/identity/keycloak/keycloak_role.py new file mode 100644 index 0000000000..23ed7cfeed --- /dev/null +++ b/plugins/modules/identity/keycloak/keycloak_role.py @@ -0,0 +1,363 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2019, Adam Goossens +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: keycloak_role + +short_description: Allows administration of Keycloak roles via Keycloak API + +version_added: 3.4.0 + +description: + - This module allows you to add, remove or modify Keycloak roles via the Keycloak REST API. + It requires access to the REST API via OpenID Connect; the user connecting and the client being + used must have the requisite access rights. In a default Keycloak installation, admin-cli + and an admin user would work, as would a separate client definition with the scope tailored + to your needs and a user having the expected roles. + + - The names of module options are snake_cased versions of the camelCase ones found in the + Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). + + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will + be returned that way by this module. You may pass single values for attributes when calling the module, + and this will be translated into a list suitable for the API. + + +options: + state: + description: + - State of the role. + - On C(present), the role will be created if it does not yet exist, or updated with the parameters you provide. + - On C(absent), the role will be removed if it exists. + default: 'present' + type: str + choices: + - present + - absent + + name: + type: str + required: true + description: + - Name of the role. + - This parameter is required. + + description: + type: str + description: + - The role description. + + realm: + type: str + description: + - The Keycloak realm under which this role resides. + default: 'master' + + client_id: + type: str + description: + - If the role is a client role, the client id under which it resides. + - If this parameter is absent, the role is considered a realm role. + + attributes: + type: dict + description: + - A dict of key/value pairs to set as custom attributes for the role. + - Values may be single values (e.g. a string) or a list of strings. + +extends_documentation_fragment: +- community.general.keycloak + + +author: + - Laurent Paumier (@laurpaum) +''' + +EXAMPLES = ''' +- name: Create a Keycloak realm role, authentication with credentials + community.general.keycloak_role: + name: my-new-kc-role + realm: MyCustomRealm + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Create a Keycloak realm role, authentication with token + community.general.keycloak_role: + name: my-new-kc-role + realm: MyCustomRealm + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + delegate_to: localhost + +- name: Create a Keycloak client role + community.general.keycloak_role: + name: my-new-kc-role + realm: MyCustomRealm + client_id: MyClient + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Delete a Keycloak role + community.general.keycloak_role: + name: my-role-for-deletion + state: absent + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Create a keycloak role with some custom attributes + community.general.keycloak_role: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + name: my-new-role + attributes: + attrib1: value1 + attrib2: value2 + attrib3: + - with + - numerous + - individual + - list + - items + delegate_to: localhost +''' + +RETURN = ''' +msg: + description: Message as to what action was taken + returned: always + type: str + sample: "Role myrole has been updated" + +proposed: + description: Role representation of proposed changes to role + returned: always + type: dict + sample: { + "description": "My updated test description" + } +existing: + description: Role representation of existing role + returned: always + type: dict + sample: { + "attributes": {}, + "clientRole": true, + "composite": false, + "containerId": "9f03eb61-a826-4771-a9fd-930e06d2d36a", + "description": "My client test role", + "id": "561703dd-0f38-45ff-9a5a-0c978f794547", + "name": "myrole" + } +end_state: + description: Role representation of role after module execution (sample is truncated) + returned: always + type: dict + sample: { + "attributes": {}, + "clientRole": true, + "composite": false, + "containerId": "9f03eb61-a826-4771-a9fd-930e06d2d36a", + "description": "My updated client test role", + "id": "561703dd-0f38-45ff-9a5a-0c978f794547", + "name": "myrole" + } +''' + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + meta_args = dict( + state=dict(type='str', default='present', choices=['present', 'absent']), + name=dict(type='str', required=True), + description=dict(type='str'), + realm=dict(type='str', default='master'), + client_id=dict(type='str'), + attributes=dict(type='dict'), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), + required_together=([['auth_realm', 'auth_username', 'auth_password']])) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + clientid = module.params.get('client_id') + name = module.params.get('name') + state = module.params.get('state') + + # attributes in Keycloak have their values returned as lists + # via the API. attributes is a dict, so we'll transparently convert + # the values to lists. + if module.params.get('attributes') is not None: + for key, val in module.params['attributes'].items(): + module.params['attributes'][key] = [val] if not isinstance(val, list) else val + + # convert module parameters to client representation parameters (if they belong in there) + role_params = [x for x in module.params + if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'client_id', 'composites'] and + module.params.get(x) is not None] + + # does the role already exist? + if clientid is None: + before_role = kc.get_realm_role(name, realm) + else: + before_role = kc.get_client_role(name, clientid, realm) + + if before_role is None: + before_role = dict() + + # build a changeset + changeset = dict() + + for param in role_params: + new_param_value = module.params.get(param) + old_value = before_role[param] if param in before_role else None + if new_param_value != old_value: + changeset[camel(param)] = new_param_value + + # prepare the new role + updated_role = before_role.copy() + updated_role.update(changeset) + + result['proposed'] = changeset + result['existing'] = before_role + + # if before_role is none, the role doesn't exist. + if before_role == dict(): + if state == 'absent': + # nothing to do. + if module._diff: + result['diff'] = dict(before='', after='') + result['changed'] = False + result['end_state'] = dict() + result['msg'] = 'Role does not exist; doing nothing.' + module.exit_json(**result) + + # for 'present', create a new role. + result['changed'] = True + + if name is None: + module.fail_json(msg='name must be specified when creating a new role') + + if module._diff: + result['diff'] = dict(before='', after=updated_role) + + if module.check_mode: + module.exit_json(**result) + + # do it for real! + if clientid is None: + kc.create_realm_role(updated_role, realm) + after_role = kc.get_realm_role(name, realm) + else: + kc.create_client_role(updated_role, clientid, realm) + after_role = kc.get_client_role(name, clientid, realm) + + result['end_state'] = after_role + + result['msg'] = 'Role {name} has been created'.format(name=name) + module.exit_json(**result) + + else: + if state == 'present': + # no changes + if updated_role == before_role: + result['changed'] = False + result['end_state'] = updated_role + result['msg'] = "No changes required to role {name}.".format(name=name) + module.exit_json(**result) + + # update the existing role + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=before_role, after=updated_role) + + if module.check_mode: + module.exit_json(**result) + + # do the update + if clientid is None: + kc.update_realm_role(updated_role, realm) + after_role = kc.get_realm_role(name, realm) + else: + kc.update_client_role(updated_role, clientid, realm) + after_role = kc.get_client_role(name, clientid, realm) + + result['end_state'] = after_role + + result['msg'] = "Role {name} has been updated".format(name=name) + module.exit_json(**result) + + elif state == 'absent': + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=before_role, after='') + + if module.check_mode: + module.exit_json(**result) + + # delete for real + if clientid is None: + kc.delete_realm_role(name, realm) + else: + kc.delete_client_role(name, clientid, realm) + + result['end_state'] = dict() + + result['msg'] = "Role {name} has been deleted".format(name=name) + module.exit_json(**result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_role.py b/plugins/modules/keycloak_role.py new file mode 120000 index 0000000000..48554b3a5f --- /dev/null +++ b/plugins/modules/keycloak_role.py @@ -0,0 +1 @@ +./identity/keycloak/keycloak_role.py \ No newline at end of file diff --git a/tests/integration/targets/keycloak_role/aliases b/tests/integration/targets/keycloak_role/aliases new file mode 100644 index 0000000000..ad7ccf7ada --- /dev/null +++ b/tests/integration/targets/keycloak_role/aliases @@ -0,0 +1 @@ +unsupported diff --git a/tests/integration/targets/keycloak_role/tasks/main.yml b/tests/integration/targets/keycloak_role/tasks/main.yml new file mode 100644 index 0000000000..683cfc8677 --- /dev/null +++ b/tests/integration/targets/keycloak_role/tasks/main.yml @@ -0,0 +1,246 @@ +--- +- name: Create realm + community.general.keycloak_realm: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + id: "{{ realm }}" + realm: "{{ realm }}" + state: present + +- name: Create client + community.general.keycloak_client: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_id }}" + state: present + register: client + +- name: Create new realm role + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + name: "{{ role }}" + description: "{{ description_1 }}" + state: present + register: result + +- name: Debug + debug: + var: result + +- name: Assert realm role created + assert: + that: + - result is changed + - result.existing == {} + - result.end_state.name == "{{ role }}" + - result.end_state.containerId == "{{ realm }}" + +- name: Create existing realm role + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + name: "{{ role }}" + description: "{{ description_1 }}" + state: present + register: result + +- name: Debug + debug: + var: result + +- name: Assert realm role unchanged + assert: + that: + - result is not changed + +- name: Update realm role + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + name: "{{ role }}" + description: "{{ description_2 }}" + state: present + register: result + +- name: Debug + debug: + var: result + +- name: Assert realm role updated + assert: + that: + - result is changed + - result.existing.description == "{{ description_1 }}" + - result.end_state.description == "{{ description_2 }}" + +- name: Delete existing realm role + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + name: "{{ role }}" + state: absent + register: result + +- name: Debug + debug: + var: result + +- name: Assert realm role deleted + assert: + that: + - result is changed + - result.end_state == {} + +- name: Delete absent realm role + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + name: "{{ role }}" + state: absent + register: result + +- name: Debug + debug: + var: result + +- name: Assert realm role unchanged + assert: + that: + - result is not changed + - result.end_state == {} + +- name: Create new client role + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_id }}" + name: "{{ role }}" + description: "{{ description_1 }}" + state: present + register: result + +- name: Debug + debug: + var: result + +- name: Assert client role created + assert: + that: + - result is changed + - result.existing == {} + - result.end_state.name == "{{ role }}" + - result.end_state.containerId == "{{ client.end_state.id }}" + +- name: Create existing client role + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_id }}" + name: "{{ role }}" + description: "{{ description_1 }}" + state: present + register: result + +- name: Debug + debug: + var: result + +- name: Assert client role unchanged + assert: + that: + - result is not changed + +- name: Update client role + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_id }}" + name: "{{ role }}" + description: "{{ description_2 }}" + state: present + register: result + +- name: Debug + debug: + var: result + +- name: Assert client role updated + assert: + that: + - result is changed + - result.existing.description == "{{ description_1 }}" + - result.end_state.description == "{{ description_2 }}" + +- name: Delete existing client role + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_id }}" + name: "{{ role }}" + state: absent + register: result + +- name: Debug + debug: + var: result + +- name: Assert client role deleted + assert: + that: + - result is changed + - result.end_state == {} + +- name: Delete absent client role + community.general.keycloak_role: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + client_id: "{{ client_id }}" + name: "{{ role }}" + state: absent + register: result + +- name: Debug + debug: + var: result + +- name: Assert client role unchanged + assert: + that: + - result is not changed + - result.end_state == {} diff --git a/tests/integration/targets/keycloak_role/vars/main.yml b/tests/integration/targets/keycloak_role/vars/main.yml new file mode 100644 index 0000000000..0a725dc4a6 --- /dev/null +++ b/tests/integration/targets/keycloak_role/vars/main.yml @@ -0,0 +1,10 @@ +--- +url: http://localhost:8080/auth +admin_realm: master +admin_user: admin +admin_password: password +realm: myrealm +client_id: myclient +role: myrole +description_1: desc 1 +description_2: desc 2 diff --git a/tests/unit/plugins/modules/identity/keycloak/test_keycloak_role.py b/tests/unit/plugins/modules/identity/keycloak/test_keycloak_role.py new file mode 100644 index 0000000000..cffae17807 --- /dev/null +++ b/tests/unit/plugins/modules/identity/keycloak/test_keycloak_role.py @@ -0,0 +1,326 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from contextlib import contextmanager + +from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.general.tests.unit.compat.mock import call, patch +from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args + +from ansible_collections.community.general.plugins.modules.identity.keycloak import keycloak_role + +from itertools import count + +from ansible.module_utils.six import StringIO + + +@contextmanager +def patch_keycloak_api(get_realm_role, create_realm_role=None, update_realm_role=None, delete_realm_role=None): + """Mock context manager for patching the methods in PwPolicyIPAClient that contact the IPA server + + Patches the `login` and `_post_json` methods + + Keyword arguments are passed to the mock object that patches `_post_json` + + No arguments are passed to the mock object that patches `login` because no tests require it + + Example:: + + with patch_ipa(return_value={}) as (mock_login, mock_post): + ... + """ + + obj = keycloak_role.KeycloakAPI + with patch.object(obj, 'get_realm_role', side_effect=get_realm_role) as mock_get_realm_role: + with patch.object(obj, 'create_realm_role', side_effect=create_realm_role) as mock_create_realm_role: + with patch.object(obj, 'update_realm_role', side_effect=update_realm_role) as mock_update_realm_role: + with patch.object(obj, 'delete_realm_role', side_effect=delete_realm_role) as mock_delete_realm_role: + yield mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role + + +def get_response(object_with_future_response, method, get_id_call_count): + if callable(object_with_future_response): + return object_with_future_response() + if isinstance(object_with_future_response, dict): + return get_response( + object_with_future_response[method], method, get_id_call_count) + if isinstance(object_with_future_response, list): + call_number = next(get_id_call_count) + return get_response( + object_with_future_response[call_number], method, get_id_call_count) + return object_with_future_response + + +def build_mocked_request(get_id_user_count, response_dict): + def _mocked_requests(*args, **kwargs): + url = args[0] + method = kwargs['method'] + future_response = response_dict.get(url, None) + return get_response(future_response, method, get_id_user_count) + return _mocked_requests + + +def create_wrapper(text_as_string): + """Allow to mock many times a call to one address. + Without this function, the StringIO is empty for the second call. + """ + def _create_wrapper(): + return StringIO(text_as_string) + return _create_wrapper + + +def mock_good_connection(): + token_response = { + 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), } + return patch( + 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + side_effect=build_mocked_request(count(), token_response), + autospec=True + ) + + +class TestKeycloakRealmRole(ModuleTestCase): + def setUp(self): + super(TestKeycloakRealmRole, self).setUp() + self.module = keycloak_role + + def test_create_when_absent(self): + """Add a new realm role""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + 'validate_certs': True, + 'realm': 'realm-name', + 'name': 'role-name', + 'description': 'role-description', + } + return_value_absent = [ + None, + { + "attributes": {}, + "clientRole": False, + "composite": False, + "containerId": "realm-name", + "description": "role-description", + "id": "90f1cdb6-be88-496e-89c6-da1fb6bc6966", + "name": "role-name", + } + ] + return_value_created = [None] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_realm_role=return_value_absent, create_realm_role=return_value_created) \ + as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_realm_role.mock_calls), 2) + self.assertEqual(len(mock_create_realm_role.mock_calls), 1) + self.assertEqual(len(mock_update_realm_role.mock_calls), 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_create_when_present_with_change(self): + """Update with change a realm role""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + 'validate_certs': True, + 'realm': 'realm-name', + 'name': 'role-name', + 'description': 'new-role-description', + } + return_value_present = [ + { + "attributes": {}, + "clientRole": False, + "composite": False, + "containerId": "realm-name", + "description": "role-description", + "id": "90f1cdb6-be88-496e-89c6-da1fb6bc6966", + "name": "role-name", + }, + { + "attributes": {}, + "clientRole": False, + "composite": False, + "containerId": "realm-name", + "description": "new-role-description", + "id": "90f1cdb6-be88-496e-89c6-da1fb6bc6966", + "name": "role-name", + } + ] + return_value_updated = [None] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_realm_role=return_value_present, update_realm_role=return_value_updated) \ + as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_realm_role.mock_calls), 2) + self.assertEqual(len(mock_create_realm_role.mock_calls), 0) + self.assertEqual(len(mock_update_realm_role.mock_calls), 1) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_create_when_present_no_change(self): + """Update without change a realm role""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + 'validate_certs': True, + 'realm': 'realm-name', + 'name': 'role-name', + 'description': 'role-description', + } + return_value_present = [ + { + "attributes": {}, + "clientRole": False, + "composite": False, + "containerId": "realm-name", + "description": "role-description", + "id": "90f1cdb6-be88-496e-89c6-da1fb6bc6966", + "name": "role-name", + }, + { + "attributes": {}, + "clientRole": False, + "composite": False, + "containerId": "realm-name", + "description": "role-description", + "id": "90f1cdb6-be88-496e-89c6-da1fb6bc6966", + "name": "role-name", + } + ] + return_value_updated = [None] + changed = False + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_realm_role=return_value_present, update_realm_role=return_value_updated) \ + as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_realm_role.mock_calls), 1) + self.assertEqual(len(mock_create_realm_role.mock_calls), 0) + self.assertEqual(len(mock_update_realm_role.mock_calls), 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_delete_when_absent(self): + """Remove an absent realm role""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + 'validate_certs': True, + 'realm': 'realm-name', + 'name': 'role-name', + 'state': 'absent' + } + return_value_absent = [None] + return_value_deleted = [None] + changed = False + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_realm_role=return_value_absent, delete_realm_role=return_value_deleted) \ + as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_realm_role.mock_calls), 1) + self.assertEqual(len(mock_delete_realm_role.mock_calls), 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_delete_when_present(self): + """Remove a present realm role""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + 'validate_certs': True, + 'realm': 'realm-name', + 'name': 'role-name', + 'state': 'absent' + } + return_value_absent = [ + { + "attributes": {}, + "clientRole": False, + "composite": False, + "containerId": "realm-name", + "description": "role-description", + "id": "90f1cdb6-be88-496e-89c6-da1fb6bc6966", + "name": "role-name", + } + ] + return_value_deleted = [None] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_realm_role=return_value_absent, delete_realm_role=return_value_deleted) \ + as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_realm_role.mock_calls), 1) + self.assertEqual(len(mock_delete_realm_role.mock_calls), 1) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + +if __name__ == '__main__': + unittest.main() From 4a392372a873c349bcba020f4ba1eb6d28e27a4e Mon Sep 17 00:00:00 2001 From: Gaetan2907 <48204380+Gaetan2907@users.noreply.github.com> Date: Mon, 19 Jul 2021 22:39:02 +0100 Subject: [PATCH 0450/3093] Keycloak: add clientscope management (#2905) * Add new keycloak_clienscope module * Add description and protocol parameter + Indentation Fix * Add protocolMappers parameter * Add documentation and Fix updatating of protocolMappers * Update plugins/modules/identity/keycloak/keycloak_clientscope.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_clientscope.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_clientscope.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_clientscope.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_clientscope.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_clientscope.py Co-authored-by: Felix Fontein * Add sanitize_cr(clientscoperep) function to sanitize the clientscope representation * Add unit tests for clientscope Keycloak module * Apply suggestions from code review Co-authored-by: Felix Fontein --- .../identity/keycloak/keycloak.py | 238 +++++++ .../identity/keycloak/keycloak_clientscope.py | 492 ++++++++++++++ plugins/modules/keycloak_clientscope.py | 1 + .../keycloak/test_keycloak_clientscope.py | 614 ++++++++++++++++++ 4 files changed, 1345 insertions(+) create mode 100644 plugins/modules/identity/keycloak/keycloak_clientscope.py create mode 120000 plugins/modules/keycloak_clientscope.py create mode 100644 tests/unit/plugins/modules/identity/keycloak/test_keycloak_clientscope.py diff --git a/plugins/module_utils/identity/keycloak/keycloak.py b/plugins/module_utils/identity/keycloak/keycloak.py index 8521650f16..75ef2bba02 100644 --- a/plugins/module_utils/identity/keycloak/keycloak.py +++ b/plugins/module_utils/identity/keycloak/keycloak.py @@ -57,6 +57,11 @@ URL_CLIENTTEMPLATES = "{url}/admin/realms/{realm}/client-templates" URL_GROUPS = "{url}/admin/realms/{realm}/groups" URL_GROUP = "{url}/admin/realms/{realm}/groups/{groupid}" +URL_CLIENTSCOPES = "{url}/admin/realms/{realm}/client-scopes" +URL_CLIENTSCOPE = "{url}/admin/realms/{realm}/client-scopes/{id}" +URL_CLIENTSCOPE_PROTOCOLMAPPERS = "{url}/admin/realms/{realm}/client-scopes/{id}/protocol-mappers/models" +URL_CLIENTSCOPE_PROTOCOLMAPPER = "{url}/admin/realms/{realm}/client-scopes/{id}/protocol-mappers/models/{mapper_id}" + URL_AUTHENTICATION_FLOWS = "{url}/admin/realms/{realm}/authentication/flows" URL_AUTHENTICATION_FLOW = "{url}/admin/realms/{realm}/authentication/flows/{id}" URL_AUTHENTICATION_FLOW_COPY = "{url}/admin/realms/{realm}/authentication/flows/{copyfrom}/copy" @@ -511,6 +516,239 @@ class KeycloakAPI(object): self.module.fail_json(msg='Could not delete client template %s in realm %s: %s' % (id, realm, str(e))) + def get_clientscopes(self, realm="master"): + """ Fetch the name and ID of all clientscopes on the Keycloak server. + + To fetch the full data of the group, make a subsequent call to + get_clientscope_by_clientscopeid, passing in the ID of the group you wish to return. + + :param realm: Realm in which the clientscope resides; default 'master'. + :return The clientscopes of this realm (default "master") + """ + clientscopes_url = URL_CLIENTSCOPES.format(url=self.baseurl, realm=realm) + try: + return json.loads(to_native(open_url(clientscopes_url, method="GET", headers=self.restheaders, + validate_certs=self.validate_certs).read())) + except Exception as e: + self.module.fail_json(msg="Could not fetch list of clientscopes in realm %s: %s" + % (realm, str(e))) + + def get_clientscope_by_clientscopeid(self, cid, realm="master"): + """ Fetch a keycloak clientscope from the provided realm using the clientscope's unique ID. + + If the clientscope does not exist, None is returned. + + gid is a UUID provided by the Keycloak API + :param cid: UUID of the clientscope to be returned + :param realm: Realm in which the clientscope resides; default 'master'. + """ + clientscope_url = URL_CLIENTSCOPE.format(url=self.baseurl, realm=realm, id=cid) + try: + return json.loads(to_native(open_url(clientscope_url, method="GET", headers=self.restheaders, + validate_certs=self.validate_certs).read())) + + except HTTPError as e: + if e.code == 404: + return None + else: + self.module.fail_json(msg="Could not fetch clientscope %s in realm %s: %s" + % (cid, realm, str(e))) + except Exception as e: + self.module.fail_json(msg="Could not clientscope group %s in realm %s: %s" + % (cid, realm, str(e))) + + def get_clientscope_by_name(self, name, realm="master"): + """ Fetch a keycloak clientscope within a realm based on its name. + + The Keycloak API does not allow filtering of the clientscopes resource by name. + As a result, this method first retrieves the entire list of clientscopes - name and ID - + then performs a second query to fetch the group. + + If the clientscope does not exist, None is returned. + :param name: Name of the clientscope to fetch. + :param realm: Realm in which the clientscope resides; default 'master' + """ + try: + all_clientscopes = self.get_clientscopes(realm=realm) + + for clientscope in all_clientscopes: + if clientscope['name'] == name: + return self.get_clientscope_by_clientscopeid(clientscope['id'], realm=realm) + + return None + + except Exception as e: + self.module.fail_json(msg="Could not fetch clientscope %s in realm %s: %s" + % (name, realm, str(e))) + + def create_clientscope(self, clientscoperep, realm="master"): + """ Create a Keycloak clientscope. + + :param clientscoperep: a ClientScopeRepresentation of the clientscope to be created. Must contain at minimum the field name. + :return: HTTPResponse object on success + """ + clientscopes_url = URL_CLIENTSCOPES.format(url=self.baseurl, realm=realm) + try: + return open_url(clientscopes_url, method='POST', headers=self.restheaders, + data=json.dumps(clientscoperep), validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg="Could not create clientscope %s in realm %s: %s" + % (clientscoperep['name'], realm, str(e))) + + def update_clientscope(self, clientscoperep, realm="master"): + """ Update an existing clientscope. + + :param grouprep: A GroupRepresentation of the updated group. + :return HTTPResponse object on success + """ + clientscope_url = URL_CLIENTSCOPE.format(url=self.baseurl, realm=realm, id=clientscoperep['id']) + + try: + return open_url(clientscope_url, method='PUT', headers=self.restheaders, + data=json.dumps(clientscoperep), validate_certs=self.validate_certs) + + except Exception as e: + self.module.fail_json(msg='Could not update clientscope %s in realm %s: %s' + % (clientscoperep['name'], realm, str(e))) + + def delete_clientscope(self, name=None, cid=None, realm="master"): + """ Delete a clientscope. One of name or cid must be provided. + + Providing the clientscope ID is preferred as it avoids a second lookup to + convert a clientscope name to an ID. + + :param name: The name of the clientscope. A lookup will be performed to retrieve the clientscope ID. + :param cid: The ID of the clientscope (preferred to name). + :param realm: The realm in which this group resides, default "master". + """ + + if cid is None and name is None: + # prefer an exception since this is almost certainly a programming error in the module itself. + raise Exception("Unable to delete group - one of group ID or name must be provided.") + + # only lookup the name if cid isn't provided. + # in the case that both are provided, prefer the ID, since it's one + # less lookup. + if cid is None and name is not None: + for clientscope in self.get_clientscopes(realm=realm): + if clientscope['name'] == name: + cid = clientscope['id'] + break + + # if the group doesn't exist - no problem, nothing to delete. + if cid is None: + return None + + # should have a good cid by here. + clientscope_url = URL_CLIENTSCOPE.format(realm=realm, id=cid, url=self.baseurl) + try: + return open_url(clientscope_url, method='DELETE', headers=self.restheaders, + validate_certs=self.validate_certs) + + except Exception as e: + self.module.fail_json(msg="Unable to delete clientscope %s: %s" % (cid, str(e))) + + def get_clientscope_protocolmappers(self, cid, realm="master"): + """ Fetch the name and ID of all clientscopes on the Keycloak server. + + To fetch the full data of the group, make a subsequent call to + get_clientscope_by_clientscopeid, passing in the ID of the group you wish to return. + + :param cid: id of clientscope (not name). + :param realm: Realm in which the clientscope resides; default 'master'. + :return The protocolmappers of this realm (default "master") + """ + protocolmappers_url = URL_CLIENTSCOPE_PROTOCOLMAPPERS.format(id=cid, url=self.baseurl, realm=realm) + try: + return json.loads(to_native(open_url(protocolmappers_url, method="GET", headers=self.restheaders, + validate_certs=self.validate_certs).read())) + except Exception as e: + self.module.fail_json(msg="Could not fetch list of protocolmappers in realm %s: %s" + % (realm, str(e))) + + def get_clientscope_protocolmapper_by_protocolmapperid(self, pid, cid, realm="master"): + """ Fetch a keycloak clientscope from the provided realm using the clientscope's unique ID. + + If the clientscope does not exist, None is returned. + + gid is a UUID provided by the Keycloak API + + :param cid: UUID of the protocolmapper to be returned + :param cid: UUID of the clientscope to be returned + :param realm: Realm in which the clientscope resides; default 'master'. + """ + protocolmapper_url = URL_CLIENTSCOPE_PROTOCOLMAPPER.format(url=self.baseurl, realm=realm, id=cid, mapper_id=pid) + try: + return json.loads(to_native(open_url(protocolmapper_url, method="GET", headers=self.restheaders, + validate_certs=self.validate_certs).read())) + + except HTTPError as e: + if e.code == 404: + return None + else: + self.module.fail_json(msg="Could not fetch protocolmapper %s in realm %s: %s" + % (pid, realm, str(e))) + except Exception as e: + self.module.fail_json(msg="Could not fetch protocolmapper %s in realm %s: %s" + % (cid, realm, str(e))) + + def get_clientscope_protocolmapper_by_name(self, cid, name, realm="master"): + """ Fetch a keycloak clientscope within a realm based on its name. + + The Keycloak API does not allow filtering of the clientscopes resource by name. + As a result, this method first retrieves the entire list of clientscopes - name and ID - + then performs a second query to fetch the group. + + If the clientscope does not exist, None is returned. + :param cid: Id of the clientscope (not name). + :param name: Name of the protocolmapper to fetch. + :param realm: Realm in which the clientscope resides; default 'master' + """ + try: + all_protocolmappers = self.get_clientscope_protocolmappers(cid, realm=realm) + + for protocolmapper in all_protocolmappers: + if protocolmapper['name'] == name: + return self.get_clientscope_protocolmapper_by_protocolmapperid(protocolmapper['id'], cid, realm=realm) + + return None + + except Exception as e: + self.module.fail_json(msg="Could not fetch protocolmapper %s in realm %s: %s" + % (name, realm, str(e))) + + def create_clientscope_protocolmapper(self, cid, mapper_rep, realm="master"): + """ Create a Keycloak clientscope protocolmapper. + + :param cid: Id of the clientscope. + :param mapper_rep: a ProtocolMapperRepresentation of the protocolmapper to be created. Must contain at minimum the field name. + :return: HTTPResponse object on success + """ + protocolmappers_url = URL_CLIENTSCOPE_PROTOCOLMAPPERS.format(url=self.baseurl, id=cid, realm=realm) + try: + return open_url(protocolmappers_url, method='POST', headers=self.restheaders, + data=json.dumps(mapper_rep), validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg="Could not create protocolmapper %s in realm %s: %s" + % (mapper_rep['name'], realm, str(e))) + + def update_clientscope_protocolmappers(self, cid, mapper_rep, realm="master"): + """ Update an existing clientscope. + + :param cid: Id of the clientscope. + :param mapper_rep: A ProtocolMapperRepresentation of the updated protocolmapper. + :return HTTPResponse object on success + """ + protocolmapper_url = URL_CLIENTSCOPE_PROTOCOLMAPPER.format(url=self.baseurl, realm=realm, id=cid, mapper_id=mapper_rep['id']) + + try: + return open_url(protocolmapper_url, method='PUT', headers=self.restheaders, + data=json.dumps(mapper_rep), validate_certs=self.validate_certs) + + except Exception as e: + self.module.fail_json(msg='Could not update protocolmappers for clientscope %s in realm %s: %s' + % (mapper_rep, realm, str(e))) + def get_groups(self, realm="master"): """ Fetch the name and ID of all groups on the Keycloak server. diff --git a/plugins/modules/identity/keycloak/keycloak_clientscope.py b/plugins/modules/identity/keycloak/keycloak_clientscope.py new file mode 100644 index 0000000000..c05050aae5 --- /dev/null +++ b/plugins/modules/identity/keycloak/keycloak_clientscope.py @@ -0,0 +1,492 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: keycloak_clientscope + +short_description: Allows administration of Keycloak client_scopes via Keycloak API + +version_added: 3.4.0 + +description: + - This module allows you to add, remove or modify Keycloak client_scopes via the Keycloak REST API. + It requires access to the REST API via OpenID Connect; the user connecting and the client being + used must have the requisite access rights. In a default Keycloak installation, admin-cli + and an admin user would work, as would a separate client definition with the scope tailored + to your needs and a user having the expected roles. + + - The names of module options are snake_cased versions of the camelCase ones found in the + Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). + + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will + be returned that way by this module. You may pass single values for attributes when calling the module, + and this will be translated into a list suitable for the API. + + - When updating a client_scope, where possible provide the client_scope ID to the module. This removes a lookup + to the API to translate the name into the client_scope ID. + + +options: + state: + description: + - State of the client_scope. + - On C(present), the client_scope will be created if it does not yet exist, or updated with the parameters you provide. + - On C(absent), the client_scope will be removed if it exists. + default: 'present' + type: str + choices: + - present + - absent + + name: + type: str + description: + - Name of the client_scope. + - This parameter is required only when creating or updating the client_scope. + + realm: + type: str + description: + - They Keycloak realm under which this client_scope resides. + default: 'master' + + id: + type: str + description: + - The unique identifier for this client_scope. + - This parameter is not required for updating or deleting a client_scope but + providing it will reduce the number of API calls required. + + description: + type: str + description: + - Description for this client_scope. + - This parameter is not required for updating or deleting a client_scope. + + protocol: + description: + - Type of client. + choices: ['openid-connect', 'saml', 'wsfed'] + type: str + + protocol_mappers: + description: + - A list of dicts defining protocol mappers for this client. + - This is 'protocolMappers' in the Keycloak REST API. + aliases: + - protocolMappers + type: list + elements: dict + suboptions: + protocol: + description: + - This specifies for which protocol this protocol mapper + - is active. + choices: ['openid-connect', 'saml', 'wsfed'] + type: str + + protocolMapper: + description: + - "The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is + impossible to provide since this may be extended through SPIs by the user of Keycloak, + by default Keycloak as of 3.4 ships with at least:" + - C(docker-v2-allow-all-mapper) + - C(oidc-address-mapper) + - C(oidc-full-name-mapper) + - C(oidc-group-membership-mapper) + - C(oidc-hardcoded-claim-mapper) + - C(oidc-hardcoded-role-mapper) + - C(oidc-role-name-mapper) + - C(oidc-script-based-protocol-mapper) + - C(oidc-sha256-pairwise-sub-mapper) + - C(oidc-usermodel-attribute-mapper) + - C(oidc-usermodel-client-role-mapper) + - C(oidc-usermodel-property-mapper) + - C(oidc-usermodel-realm-role-mapper) + - C(oidc-usersessionmodel-note-mapper) + - C(saml-group-membership-mapper) + - C(saml-hardcode-attribute-mapper) + - C(saml-hardcode-role-mapper) + - C(saml-role-list-mapper) + - C(saml-role-name-mapper) + - C(saml-user-attribute-mapper) + - C(saml-user-property-mapper) + - C(saml-user-session-note-mapper) + - An exhaustive list of available mappers on your installation can be obtained on + the admin console by going to Server Info -> Providers and looking under + 'protocol-mapper'. + type: str + + name: + description: + - The name of this protocol mapper. + type: str + + id: + description: + - Usually a UUID specifying the internal ID of this protocol mapper instance. + type: str + + config: + description: + - Dict specifying the configuration options for the protocol mapper; the + contents differ depending on the value of I(protocolMapper) and are not documented + other than by the source of the mappers and its parent class(es). An example is given + below. It is easiest to obtain valid config values by dumping an already-existing + protocol mapper configuration through check-mode in the C(existing) return value. + type: dict + + attributes: + type: dict + description: + - A dict of key/value pairs to set as custom attributes for the client_scope. + - Values may be single values (for example a string) or a list of strings. + +extends_documentation_fragment: +- community.general.keycloak + + +author: + - Gaëtan Daubresse (@Gaetan2907) +''' + +EXAMPLES = ''' +- name: Create a Keycloak client_scopes, authentication with credentials + community.general.keycloak_clientscope: + name: my-new-kc-clientscope + realm: MyCustomRealm + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Create a Keycloak client_scopes, authentication with token + community.general.keycloak_clientscope: + name: my-new-kc-clientscope + realm: MyCustomRealm + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + delegate_to: localhost + +- name: Delete a keycloak client_scopes + community.general.keycloak_clientscope: + id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' + state: absent + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Delete a Keycloak client_scope based on name + community.general.keycloak_clientscope: + name: my-clientscope-for-deletion + state: absent + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Update the name of a Keycloak client_scope + community.general.keycloak_clientscope: + id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd' + name: an-updated-kc-clientscope-name + state: present + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + delegate_to: localhost + +- name: Create a Keycloak client_scope with some custom attributes + community.general.keycloak_clientscope: + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + name: my-new_clientscope + description: description-of-clientscope + protocol: openid-connect + protocol_mappers: + - config: + access.token.claim: True + claim.name: "family_name" + id.token.claim: True + jsonType.label: String + user.attribute: lastName + userinfo.token.claim: True + name: family name + protocol: openid-connect + protocolMapper: oidc-usermodel-property-mapper + - config: + attribute.name: Role + attribute.nameformat: Basic + single: false + name: role list + protocol: saml + protocolMapper: saml-role-list-mapper + attributes: + attrib1: value1 + attrib2: value2 + attrib3: + - with + - numerous + - individual + - list + - items + delegate_to: localhost +''' + +RETURN = ''' +msg: + description: Message as to what action was taken + returned: always + type: str + sample: "Client_scope testclientscope has been updated" + +proposed: + description: client_scope representation of proposed changes to client_scope + returned: always + type: dict + sample: { + clientId: "test" + } +existing: + description: client_scope representation of existing client_scope (sample is truncated) + returned: always + type: dict + sample: { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256", + } + } +end_state: + description: client_scope representation of client_scope after module execution (sample is truncated) + returned: always + type: dict + sample: { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256", + } + } +''' + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError, is_struct_included +from ansible.module_utils.basic import AnsibleModule + + +def sanitize_cr(clientscoperep): + """ Removes probably sensitive details from a clientscoperep representation + + :param clientscoperep: the clientscoperep dict to be sanitized + :return: sanitized clientrep dict + """ + result = clientscoperep.copy() + if 'secret' in result: + result['secret'] = 'no_log' + if 'attributes' in result: + if 'saml.signing.private.key' in result['attributes']: + result['attributes']['saml.signing.private.key'] = 'no_log' + return result + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + protmapper_spec = dict( + id=dict(type='str'), + name=dict(type='str'), + protocol=dict(type='str', choices=['openid-connect', 'saml', 'wsfed']), + protocolMapper=dict(type='str'), + config=dict(type='dict'), + ) + + meta_args = dict( + state=dict(default='present', choices=['present', 'absent']), + realm=dict(default='master'), + id=dict(type='str'), + name=dict(type='str'), + description=dict(type='str'), + protocol=dict(type='str', choices=['openid-connect', 'saml', 'wsfed']), + attributes=dict(type='dict'), + protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec, aliases=['protocolMappers']), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['id', 'name'], + ['token', 'auth_realm', 'auth_username', 'auth_password']]), + required_together=([['auth_realm', 'auth_username', 'auth_password']])) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + state = module.params.get('state') + cid = module.params.get('id') + name = module.params.get('name') + protocol_mappers = module.params.get('protocol_mappers') + + before_clientscope = None # current state of the clientscope, for merging. + + # does the clientscope already exist? + if cid is None: + before_clientscope = kc.get_clientscope_by_name(name, realm=realm) + else: + before_clientscope = kc.get_clientscope_by_clientscopeid(cid, realm=realm) + + before_clientscope = {} if before_clientscope is None else before_clientscope + + clientscope_params = [x for x in module.params + if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm'] and + module.params.get(x) is not None] + + # Build a proposed changeset from parameters given to this module + changeset = dict() + + for clientscope_param in clientscope_params: + new_param_value = module.params.get(clientscope_param) + + # some lists in the Keycloak API are sorted, some are not. + if isinstance(new_param_value, list): + if clientscope_param in ['attributes']: + try: + new_param_value = sorted(new_param_value) + except TypeError: + pass + # Unfortunately, the ansible argument spec checker introduces variables with null values when + # they are not specified + if clientscope_param == 'protocol_mappers': + new_param_value = [dict((k, v) for k, v in x.items() if x[k] is not None) for x in new_param_value] + changeset[camel(clientscope_param)] = new_param_value + + # prepare the new clientscope + updated_clientscope = before_clientscope.copy() + updated_clientscope.update(changeset) + + # if before_clientscope is none, the clientscope doesn't exist. + if before_clientscope == {}: + if state == 'absent': + # nothing to do. + if module._diff: + result['diff'] = dict(before='', after='') + result['msg'] = 'Clientscope does not exist; doing nothing.' + result['end_state'] = dict() + module.exit_json(**result) + + # for 'present', create a new clientscope. + result['changed'] = True + if name is None: + module.fail_json(msg='name must be specified when creating a new clientscope') + + if module._diff: + result['diff'] = dict(before='', after=sanitize_cr(updated_clientscope)) + + if module.check_mode: + module.exit_json(**result) + + # do it for real! + kc.create_clientscope(updated_clientscope, realm=realm) + after_clientscope = kc.get_clientscope_by_name(name, realm) + + result['end_state'] = sanitize_cr(after_clientscope) + result['msg'] = 'Clientscope {name} has been created with ID {id}'.format(name=after_clientscope['name'], + id=after_clientscope['id']) + + else: + if state == 'present': + # no changes + if updated_clientscope == before_clientscope: + result['changed'] = False + result['end_state'] = sanitize_cr(updated_clientscope) + result['msg'] = "No changes required to clientscope {name}.".format(name=before_clientscope['name']) + module.exit_json(**result) + + # update the existing clientscope + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=sanitize_cr(before_clientscope), after=sanitize_cr(updated_clientscope)) + + if module.check_mode: + module.exit_json(**result) + + # do the clientscope update + kc.update_clientscope(updated_clientscope, realm=realm) + + # do the protocolmappers update + if protocol_mappers is not None: + for protocol_mapper in protocol_mappers: + # update if protocolmapper exist + current_protocolmapper = kc.get_clientscope_protocolmapper_by_name(updated_clientscope['id'], protocol_mapper['name'], realm=realm) + if current_protocolmapper is not None: + protocol_mapper['id'] = current_protocolmapper['id'] + kc.update_clientscope_protocolmappers(updated_clientscope['id'], protocol_mapper, realm=realm) + # create otherwise + else: + kc.create_clientscope_protocolmapper(updated_clientscope['id'], protocol_mapper, realm=realm) + + after_clientscope = kc.get_clientscope_by_clientscopeid(updated_clientscope['id'], realm=realm) + + result['end_state'] = after_clientscope + result['msg'] = "Clientscope {id} has been updated".format(id=after_clientscope['id']) + + module.exit_json(**result) + + elif state == 'absent': + result['end_state'] = dict() + + if module._diff: + result['diff'] = dict(before=sanitize_cr(before_clientscope), after='') + + if module.check_mode: + module.exit_json(**result) + + # delete for real + cid = before_clientscope['id'] + kc.delete_clientscope(cid=cid, realm=realm) + + result['changed'] = True + result['msg'] = "Clientscope {name} has been deleted".format(name=before_clientscope['name']) + + module.exit_json(**result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_clientscope.py b/plugins/modules/keycloak_clientscope.py new file mode 120000 index 0000000000..01468a5c8e --- /dev/null +++ b/plugins/modules/keycloak_clientscope.py @@ -0,0 +1 @@ +identity/keycloak/keycloak_clientscope.py \ No newline at end of file diff --git a/tests/unit/plugins/modules/identity/keycloak/test_keycloak_clientscope.py b/tests/unit/plugins/modules/identity/keycloak/test_keycloak_clientscope.py new file mode 100644 index 0000000000..0954562d95 --- /dev/null +++ b/tests/unit/plugins/modules/identity/keycloak/test_keycloak_clientscope.py @@ -0,0 +1,614 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from contextlib import contextmanager + +from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.general.tests.unit.compat.mock import call, patch +from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, \ + ModuleTestCase, set_module_args + +from ansible_collections.community.general.plugins.modules.identity.keycloak import keycloak_clientscope + +from itertools import count + +from ansible.module_utils.six import StringIO + + +@contextmanager +def patch_keycloak_api(get_clientscope_by_name=None, get_clientscope_by_clientscopeid=None, create_clientscope=None, + update_clientscope=None, get_clientscope_protocolmapper_by_name=None, + update_clientscope_protocolmappers=None, create_clientscope_protocolmapper=None, + delete_clientscope=None): + """Mock context manager for patching the methods in PwPolicyIPAClient that contact the IPA server + + Patches the `login` and `_post_json` methods + + Keyword arguments are passed to the mock object that patches `_post_json` + + No arguments are passed to the mock object that patches `login` because no tests require it + + Example:: + + with patch_ipa(return_value={}) as (mock_login, mock_post): + ... + """ + + """ + get_clientscope_by_clientscopeid + delete_clientscope + """ + + obj = keycloak_clientscope.KeycloakAPI + with patch.object(obj, 'get_clientscope_by_name', side_effect=get_clientscope_by_name) \ + as mock_get_clientscope_by_name: + with patch.object(obj, 'get_clientscope_by_clientscopeid', side_effect=get_clientscope_by_clientscopeid) \ + as mock_get_clientscope_by_clientscopeid: + with patch.object(obj, 'create_clientscope', side_effect=create_clientscope) \ + as mock_create_clientscope: + with patch.object(obj, 'update_clientscope', return_value=update_clientscope) \ + as mock_update_clientscope: + with patch.object(obj, 'get_clientscope_protocolmapper_by_name', + side_effect=get_clientscope_protocolmapper_by_name) \ + as mock_get_clientscope_protocolmapper_by_name: + with patch.object(obj, 'update_clientscope_protocolmappers', + side_effect=update_clientscope_protocolmappers) \ + as mock_update_clientscope_protocolmappers: + with patch.object(obj, 'create_clientscope_protocolmapper', + side_effect=create_clientscope_protocolmapper) \ + as mock_create_clientscope_protocolmapper: + with patch.object(obj, 'delete_clientscope', side_effect=delete_clientscope) \ + as mock_delete_clientscope: + yield mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope, \ + mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name, mock_update_clientscope_protocolmappers, \ + mock_create_clientscope_protocolmapper, mock_delete_clientscope + + +def get_response(object_with_future_response, method, get_id_call_count): + if callable(object_with_future_response): + return object_with_future_response() + if isinstance(object_with_future_response, dict): + return get_response( + object_with_future_response[method], method, get_id_call_count) + if isinstance(object_with_future_response, list): + call_number = next(get_id_call_count) + return get_response( + object_with_future_response[call_number], method, get_id_call_count) + return object_with_future_response + + +def build_mocked_request(get_id_user_count, response_dict): + def _mocked_requests(*args, **kwargs): + url = args[0] + method = kwargs['method'] + future_response = response_dict.get(url, None) + return get_response(future_response, method, get_id_user_count) + + return _mocked_requests + + +def create_wrapper(text_as_string): + """Allow to mock many times a call to one address. + Without this function, the StringIO is empty for the second call. + """ + + def _create_wrapper(): + return StringIO(text_as_string) + + return _create_wrapper + + +def mock_good_connection(): + token_response = { + 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper( + '{"access_token": "alongtoken"}'), } + return patch( + 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + side_effect=build_mocked_request(count(), token_response), + autospec=True + ) + + +class TestKeycloakAuthentication(ModuleTestCase): + def setUp(self): + super(TestKeycloakAuthentication, self).setUp() + self.module = keycloak_clientscope + + def test_create_clientscope(self): + """Add a new authentication flow from copy of an other flow""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'realm': 'realm-name', + 'state': 'present', + 'name': 'my-new-kc-clientscope' + } + return_value_get_clientscope_by_name = [ + None, + { + "attributes": {}, + "id": "73fec1d2-f032-410c-8177-583104d01305", + "name": "my-new-kc-clientscope" + }] + + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) \ + as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope, + mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name, + mock_update_clientscope_protocolmappers, + mock_create_clientscope_protocolmapper, mock_delete_clientscope): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + # Verify number of call on each mock + self.assertEqual(mock_get_clientscope_by_name.call_count, 2) + self.assertEqual(mock_create_clientscope.call_count, 1) + self.assertEqual(mock_get_clientscope_by_clientscopeid.call_count, 0) + self.assertEqual(mock_update_clientscope.call_count, 0) + self.assertEqual(mock_get_clientscope_protocolmapper_by_name.call_count, 0) + self.assertEqual(mock_update_clientscope_protocolmappers.call_count, 0) + self.assertEqual(mock_create_clientscope_protocolmapper.call_count, 0) + self.assertEqual(mock_delete_clientscope.call_count, 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_create_clientscope_idempotency(self): + """Add a new authentication flow from copy of an other flow""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'realm': 'realm-name', + 'state': 'present', + 'name': 'my-new-kc-clientscope' + } + return_value_get_clientscope_by_name = [{ + "attributes": {}, + "id": "73fec1d2-f032-410c-8177-583104d01305", + "name": "my-new-kc-clientscope" + }] + + changed = False + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) \ + as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope, + mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name, + mock_update_clientscope_protocolmappers, + mock_create_clientscope_protocolmapper, mock_delete_clientscope): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + # Verify number of call on each mock + self.assertEqual(mock_get_clientscope_by_name.call_count, 1) + self.assertEqual(mock_create_clientscope.call_count, 0) + self.assertEqual(mock_get_clientscope_by_clientscopeid.call_count, 0) + self.assertEqual(mock_update_clientscope.call_count, 0) + self.assertEqual(mock_get_clientscope_protocolmapper_by_name.call_count, 0) + self.assertEqual(mock_update_clientscope_protocolmappers.call_count, 0) + self.assertEqual(mock_create_clientscope_protocolmapper.call_count, 0) + self.assertEqual(mock_delete_clientscope.call_count, 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_delete_clientscope(self): + """Add a new authentication flow from copy of an other flow""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'realm': 'realm-name', + 'state': 'absent', + 'name': 'my-new-kc-clientscope' + } + return_value_get_clientscope_by_name = [{ + "attributes": {}, + "id": "73fec1d2-f032-410c-8177-583104d01305", + "name": "my-new-kc-clientscope" + }] + + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) \ + as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope, + mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name, + mock_update_clientscope_protocolmappers, + mock_create_clientscope_protocolmapper, mock_delete_clientscope): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + # Verify number of call on each mock + self.assertEqual(mock_get_clientscope_by_name.call_count, 1) + self.assertEqual(mock_create_clientscope.call_count, 0) + self.assertEqual(mock_get_clientscope_by_clientscopeid.call_count, 0) + self.assertEqual(mock_update_clientscope.call_count, 0) + self.assertEqual(mock_get_clientscope_protocolmapper_by_name.call_count, 0) + self.assertEqual(mock_update_clientscope_protocolmappers.call_count, 0) + self.assertEqual(mock_create_clientscope_protocolmapper.call_count, 0) + self.assertEqual(mock_delete_clientscope.call_count, 1) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_delete_clientscope_idempotency(self): + """Add a new authentication flow from copy of an other flow""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'realm': 'realm-name', + 'state': 'absent', + 'name': 'my-new-kc-clientscope' + } + return_value_get_clientscope_by_name = [None] + + changed = False + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) \ + as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope, + mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name, + mock_update_clientscope_protocolmappers, + mock_create_clientscope_protocolmapper, mock_delete_clientscope): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + # Verify number of call on each mock + self.assertEqual(mock_get_clientscope_by_name.call_count, 1) + self.assertEqual(mock_create_clientscope.call_count, 0) + self.assertEqual(mock_get_clientscope_by_clientscopeid.call_count, 0) + self.assertEqual(mock_update_clientscope.call_count, 0) + self.assertEqual(mock_get_clientscope_protocolmapper_by_name.call_count, 0) + self.assertEqual(mock_update_clientscope_protocolmappers.call_count, 0) + self.assertEqual(mock_create_clientscope_protocolmapper.call_count, 0) + self.assertEqual(mock_delete_clientscope.call_count, 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_create_clientscope_with_protocolmappers(self): + """Add a new authentication flow from copy of an other flow""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'realm': 'realm-name', + 'state': 'present', + 'name': 'my-new-kc-clientscope', + 'protocolMappers': [ + { + 'protocol': 'openid-connect', + 'config': { + 'full.path': 'true', + 'id.token.claim': 'true', + 'access.token.claim': 'true', + 'userinfo.token.claim': 'true', + 'claim.name': 'protocol1', + }, + 'name': 'protocol1', + 'protocolMapper': 'oidc-group-membership-mapper', + }, + { + 'protocol': 'openid-connect', + 'config': { + 'full.path': 'false', + 'id.token.claim': 'false', + 'access.token.claim': 'false', + 'userinfo.token.claim': 'false', + 'claim.name': 'protocol2', + }, + 'name': 'protocol2', + 'protocolMapper': 'oidc-group-membership-mapper', + }, + { + 'protocol': 'openid-connect', + 'config': { + 'full.path': 'true', + 'id.token.claim': 'false', + 'access.token.claim': 'true', + 'userinfo.token.claim': 'false', + 'claim.name': 'protocol3', + }, + 'name': 'protocol3', + 'protocolMapper': 'oidc-group-membership-mapper', + }, + ] + } + return_value_get_clientscope_by_name = [ + None, + { + "attributes": {}, + "id": "890ec72e-fe1d-4308-9f27-485ef7eaa182", + "name": "my-new-kc-clientscope", + "protocolMappers": [ + { + "config": { + "access.token.claim": "false", + "claim.name": "protocol2", + "full.path": "false", + "id.token.claim": "false", + "userinfo.token.claim": "false" + }, + "consentRequired": "false", + "id": "a7f19adb-cc58-41b1-94ce-782dc255139b", + "name": "protocol2", + "protocol": "openid-connect", + "protocolMapper": "oidc-group-membership-mapper" + }, + { + "config": { + "access.token.claim": "true", + "claim.name": "protocol3", + "full.path": "true", + "id.token.claim": "false", + "userinfo.token.claim": "false" + }, + "consentRequired": "false", + "id": "2103a559-185a-40f4-84ae-9ab311d5b812", + "name": "protocol3", + "protocol": "openid-connect", + "protocolMapper": "oidc-group-membership-mapper" + }, + { + "config": { + "access.token.claim": "true", + "claim.name": "protocol1", + "full.path": "true", + "id.token.claim": "true", + "userinfo.token.claim": "true" + }, + "consentRequired": "false", + "id": "bbf6390f-e95f-4c20-882b-9dad328363b9", + "name": "protocol1", + "protocol": "openid-connect", + "protocolMapper": "oidc-group-membership-mapper" + }] + }] + + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) \ + as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope, + mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name, + mock_update_clientscope_protocolmappers, + mock_create_clientscope_protocolmapper, mock_delete_clientscope): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + # Verify number of call on each mock + self.assertEqual(mock_get_clientscope_by_name.call_count, 2) + self.assertEqual(mock_create_clientscope.call_count, 1) + self.assertEqual(mock_get_clientscope_by_clientscopeid.call_count, 0) + self.assertEqual(mock_update_clientscope.call_count, 0) + self.assertEqual(mock_get_clientscope_protocolmapper_by_name.call_count, 0) + self.assertEqual(mock_update_clientscope_protocolmappers.call_count, 0) + self.assertEqual(mock_create_clientscope_protocolmapper.call_count, 0) + self.assertEqual(mock_delete_clientscope.call_count, 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_update_clientscope_with_protocolmappers(self): + """Add a new authentication flow from copy of an other flow""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_username': 'admin', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'realm': 'realm-name', + 'state': 'present', + 'name': 'my-new-kc-clientscope', + 'protocolMappers': [ + { + 'protocol': 'openid-connect', + 'config': { + 'full.path': 'false', + 'id.token.claim': 'false', + 'access.token.claim': 'false', + 'userinfo.token.claim': 'false', + 'claim.name': 'protocol1_updated', + }, + 'name': 'protocol1', + 'protocolMapper': 'oidc-group-membership-mapper', + }, + { + 'protocol': 'openid-connect', + 'config': { + 'full.path': 'true', + 'id.token.claim': 'false', + 'access.token.claim': 'false', + 'userinfo.token.claim': 'false', + 'claim.name': 'protocol2_updated', + }, + 'name': 'protocol2', + 'protocolMapper': 'oidc-group-membership-mapper', + }, + { + 'protocol': 'openid-connect', + 'config': { + 'full.path': 'true', + 'id.token.claim': 'true', + 'access.token.claim': 'true', + 'userinfo.token.claim': 'true', + 'claim.name': 'protocol3_updated', + }, + 'name': 'protocol3', + 'protocolMapper': 'oidc-group-membership-mapper', + }, + ] + } + return_value_get_clientscope_by_name = [{ + "attributes": {}, + "id": "890ec72e-fe1d-4308-9f27-485ef7eaa182", + "name": "my-new-kc-clientscope", + "protocolMappers": [ + { + "config": { + "access.token.claim": "true", + "claim.name": "groups", + "full.path": "true", + "id.token.claim": "true", + "userinfo.token.claim": "true" + }, + "consentRequired": "false", + "id": "e077007a-367a-444f-91ef-70277a1d868d", + "name": "groups", + "protocol": "saml", + "protocolMapper": "oidc-group-membership-mapper" + }, + { + "config": { + "access.token.claim": "true", + "claim.name": "groups", + "full.path": "true", + "id.token.claim": "true", + "userinfo.token.claim": "true" + }, + "consentRequired": "false", + "id": "06c518aa-c627-43cc-9a82-d8467b508d34", + "name": "groups", + "protocol": "openid-connect", + "protocolMapper": "oidc-group-membership-mapper" + }, + { + "config": { + "access.token.claim": "true", + "claim.name": "groups", + "full.path": "true", + "id.token.claim": "true", + "userinfo.token.claim": "true" + }, + "consentRequired": "false", + "id": "1d03c557-d97e-40f4-ac35-6cecd74ea70d", + "name": "groups", + "protocol": "wsfed", + "protocolMapper": "oidc-group-membership-mapper" + } + ] + }] + return_value_get_clientscope_by_clientscopeid = [{ + "attributes": {}, + "id": "2286032f-451e-44d5-8be6-e45aac7983a1", + "name": "my-new-kc-clientscope", + "protocolMappers": [ + { + "config": { + "access.token.claim": "true", + "claim.name": "protocol1_updated", + "full.path": "true", + "id.token.claim": "false", + "userinfo.token.claim": "false" + }, + "consentRequired": "false", + "id": "a7f19adb-cc58-41b1-94ce-782dc255139b", + "name": "protocol2", + "protocol": "openid-connect", + "protocolMapper": "oidc-group-membership-mapper" + }, + { + "config": { + "access.token.claim": "true", + "claim.name": "protocol1_updated", + "full.path": "true", + "id.token.claim": "false", + "userinfo.token.claim": "false" + }, + "consentRequired": "false", + "id": "2103a559-185a-40f4-84ae-9ab311d5b812", + "name": "protocol3", + "protocol": "openid-connect", + "protocolMapper": "oidc-group-membership-mapper" + }, + { + "config": { + "access.token.claim": "false", + "claim.name": "protocol1_updated", + "full.path": "false", + "id.token.claim": "false", + "userinfo.token.claim": "false" + }, + "consentRequired": "false", + "id": "bbf6390f-e95f-4c20-882b-9dad328363b9", + "name": "protocol1", + "protocol": "openid-connect", + "protocolMapper": "oidc-group-membership-mapper" + } + ] + }] + + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name, + get_clientscope_by_clientscopeid=return_value_get_clientscope_by_clientscopeid) \ + as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope, + mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name, + mock_update_clientscope_protocolmappers, + mock_create_clientscope_protocolmapper, mock_delete_clientscope): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + # Verify number of call on each mock + self.assertEqual(mock_get_clientscope_by_name.call_count, 1) + self.assertEqual(mock_create_clientscope.call_count, 0) + self.assertEqual(mock_get_clientscope_by_clientscopeid.call_count, 1) + self.assertEqual(mock_update_clientscope.call_count, 1) + self.assertEqual(mock_get_clientscope_protocolmapper_by_name.call_count, 3) + self.assertEqual(mock_update_clientscope_protocolmappers.call_count, 3) + self.assertEqual(mock_create_clientscope_protocolmapper.call_count, 0) + self.assertEqual(mock_delete_clientscope.call_count, 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + +if __name__ == '__main__': + unittest.main() From 11cdb1b661b302aca06edb62b17f0f42a4daa609 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 19 Jul 2021 23:39:57 +0200 Subject: [PATCH 0451/3093] Next expected release is 3.5.0. --- galaxy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/galaxy.yml b/galaxy.yml index 640f4151d3..0f19d8d443 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,6 +1,6 @@ namespace: community name: general -version: 3.4.0 +version: 3.5.0 readme: README.md authors: - Ansible (https://github.com/ansible) From 38e70ae0e4c6086c5470583ee4fc5c7e7684284d Mon Sep 17 00:00:00 2001 From: Max Bidlingmaier Date: Thu, 22 Jul 2021 16:55:09 +0200 Subject: [PATCH 0452/3093] github_repo: support GitHub on premise installations (#3039) * added possibility to manage on prem github * added changelog * fixed module tests * Update changelogs/fragments/3038-enhance_github_repo_api_url.yml Co-authored-by: Felix Fontein * Update plugins/modules/source_control/github/github_repo.py Co-authored-by: Felix Fontein Co-authored-by: Max Bidlingmaier Co-authored-by: Felix Fontein --- .../3038-enhance_github_repo_api_url.yml | 2 ++ .../source_control/github/github_repo.py | 19 +++++++++++++++---- .../source_control/github/test_github_repo.py | 18 ++++++++++++------ 3 files changed, 29 insertions(+), 10 deletions(-) create mode 100644 changelogs/fragments/3038-enhance_github_repo_api_url.yml diff --git a/changelogs/fragments/3038-enhance_github_repo_api_url.yml b/changelogs/fragments/3038-enhance_github_repo_api_url.yml new file mode 100644 index 0000000000..19eda0f66d --- /dev/null +++ b/changelogs/fragments/3038-enhance_github_repo_api_url.yml @@ -0,0 +1,2 @@ +minor_changes: + - github_repo - add new option ``api_url`` to allow working with on premises installations (https://github.com/ansible-collections/community.general/pull/3038). diff --git a/plugins/modules/source_control/github/github_repo.py b/plugins/modules/source_control/github/github_repo.py index 587111fe5a..b5403c6a8d 100644 --- a/plugins/modules/source_control/github/github_repo.py +++ b/plugins/modules/source_control/github/github_repo.py @@ -66,6 +66,12 @@ options: - When I(state) is C(present), the repository will be created in the current user profile. type: str required: false + api_url: + description: + - URL to the GitHub API if not using github.com but you own instance. + type: str + default: 'https://api.github.com' + version_added: "3.5.0" requirements: - PyGithub>=1.54 notes: @@ -119,11 +125,14 @@ except Exception: HAS_GITHUB_PACKAGE = False -def authenticate(username=None, password=None, access_token=None): +def authenticate(username=None, password=None, access_token=None, api_url=None): + if not api_url: + return None + if access_token: - return Github(base_url="https://api.github.com", login_or_token=access_token) + return Github(base_url=api_url, login_or_token=access_token) else: - return Github(base_url="https://api.github.com", login_or_token=username, password=password) + return Github(base_url=api_url, login_or_token=username, password=password) def create_repo(gh, name, organization=None, private=False, description='', check_mode=False): @@ -185,7 +194,8 @@ def delete_repo(gh, name, organization=None, check_mode=False): def run_module(params, check_mode=False): gh = authenticate( - username=params['username'], password=params['password'], access_token=params['access_token']) + username=params['username'], password=params['password'], access_token=params['access_token'], + api_url=params['api_url']) if params['state'] == "absent": return delete_repo( gh=gh, @@ -216,6 +226,7 @@ def main(): organization=dict(type='str', required=False, default=None), private=dict(type='bool', required=False, default=False), description=dict(type='str', required=False, default=''), + api_url=dict(type='str', required=False, default='https://api.github.com'), ) module = AnsibleModule( argument_spec=module_args, diff --git a/tests/unit/plugins/modules/source_control/github/test_github_repo.py b/tests/unit/plugins/modules/source_control/github/test_github_repo.py index 56ec9b7ec7..b3e4f9027f 100644 --- a/tests/unit/plugins/modules/source_control/github/test_github_repo.py +++ b/tests/unit/plugins/modules/source_control/github/test_github_repo.py @@ -159,7 +159,8 @@ class TestGithubRepo(unittest.TestCase): "name": "myrepo", "description": "Just for fun", "private": False, - "state": "present" + "state": "present", + "api_url": "https://api.github.com" }) self.assertEqual(result['changed'], True) @@ -177,7 +178,8 @@ class TestGithubRepo(unittest.TestCase): "name": "myrepo", "description": "Just for fun", "private": True, - "state": "present" + "state": "present", + "api_url": "https://api.github.com" }) self.assertEqual(result['changed'], True) self.assertEqual(result['repo']['private'], True) @@ -194,7 +196,8 @@ class TestGithubRepo(unittest.TestCase): "name": "myrepo", "description": "Just for fun", "private": True, - "state": "present" + "state": "present", + "api_url": "https://api.github.com" }) self.assertEqual(result['changed'], True) self.assertEqual(result['repo']['private'], True) @@ -211,7 +214,8 @@ class TestGithubRepo(unittest.TestCase): "name": "myrepo", "description": "Just for fun", "private": False, - "state": "absent" + "state": "absent", + "api_url": "https://api.github.com" }) self.assertEqual(result['changed'], True) @@ -227,7 +231,8 @@ class TestGithubRepo(unittest.TestCase): "name": "myrepo", "description": "Just for fun", "private": False, - "state": "absent" + "state": "absent", + "api_url": "https://api.github.com" }) self.assertEqual(result['changed'], True) @@ -243,7 +248,8 @@ class TestGithubRepo(unittest.TestCase): "name": "myrepo", "description": "Just for fun", "private": True, - "state": "absent" + "state": "absent", + "api_url": "https://api.github.com" }) self.assertEqual(result['changed'], False) From 32e9a0c25066099887dbd21679d1c83ac85c9f45 Mon Sep 17 00:00:00 2001 From: Jeffrey van Pelt Date: Thu, 22 Jul 2021 22:55:07 +0200 Subject: [PATCH 0453/3093] Proxmox inventory: Added snapshots fact (#3044) * Added snapshots fact * Added changelog * Made linter happy again * Processed feedback * Fix changelog type * Punctuation ;-) * Punctuation ;-), take 2 --- .../3044-proxmox-inventory-snapshots.yml | 2 ++ plugins/inventory/proxmox.py | 15 +++++++++++++-- tests/unit/plugins/inventory/test_proxmox.py | 16 ++++++++++++++++ 3 files changed, 31 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/3044-proxmox-inventory-snapshots.yml diff --git a/changelogs/fragments/3044-proxmox-inventory-snapshots.yml b/changelogs/fragments/3044-proxmox-inventory-snapshots.yml new file mode 100644 index 0000000000..d6a324ea30 --- /dev/null +++ b/changelogs/fragments/3044-proxmox-inventory-snapshots.yml @@ -0,0 +1,2 @@ +minor_changes: + - proxmox inventory plugin - added snapshots to host facts (https://github.com/ansible-collections/community.general/pull/3044). diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py index c99962bcdd..f0f5a4e418 100644 --- a/plugins/inventory/proxmox.py +++ b/plugins/inventory/proxmox.py @@ -325,6 +325,15 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): status_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), status_key.lower())) self.inventory.set_variable(name, status_key, status) + def _get_vm_snapshots(self, node, vmid, vmtype, name): + ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/snapshot" % (self.proxmox_url, node, vmtype, vmid)) + + snapshots_key = 'snapshots' + snapshots_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), snapshots_key.lower())) + + snapshots = [snapshot['name'] for snapshot in ret if snapshot['name'] != 'current'] + self.inventory.set_variable(name, snapshots_key, snapshots) + def to_safe(self, word): '''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups #> ProxmoxInventory.to_safe("foo-bar baz") @@ -393,9 +402,10 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): elif lxc['status'] == 'running': self.inventory.add_child(running_group, lxc['name']) - # get LXC config for facts + # get LXC config and snapshots for facts if self.get_option('want_facts'): self._get_vm_config(node['node'], lxc['vmid'], 'lxc', lxc['name']) + self._get_vm_snapshots(node['node'], lxc['vmid'], 'lxc', lxc['name']) self._apply_constructable(lxc["name"], self.inventory.get_host(lxc['name']).get_vars()) @@ -417,9 +427,10 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): elif qemu['status'] == 'running': self.inventory.add_child(running_group, qemu['name']) - # get QEMU config for facts + # get QEMU config and snapshots for facts if self.get_option('want_facts'): self._get_vm_config(node['node'], qemu['vmid'], 'qemu', qemu['name']) + self._get_vm_snapshots(node['node'], qemu['vmid'], 'qemu', qemu['name']) self._apply_constructable(qemu["name"], self.inventory.get_host(qemu['name']).get_vars()) diff --git a/tests/unit/plugins/inventory/test_proxmox.py b/tests/unit/plugins/inventory/test_proxmox.py index 87d47a3cff..12927551f8 100644 --- a/tests/unit/plugins/inventory/test_proxmox.py +++ b/tests/unit/plugins/inventory/test_proxmox.py @@ -522,6 +522,21 @@ def get_json(url): } +def get_vm_snapshots(node, vmtype, vmid, name): + return [ + {"description": "", + "name": "clean", + "snaptime": 1000, + "vmstate": 0 + }, + {"name": "current", + "digest": "1234689abcdf", + "running": 0, + "description": "You are here!", + "parent": "clean" + }] + + def get_vm_status(node, vmtype, vmid, name): return True @@ -549,6 +564,7 @@ def test_populate(inventory, mocker): inventory._get_auth = mocker.MagicMock(side_effect=get_auth) inventory._get_json = mocker.MagicMock(side_effect=get_json) inventory._get_vm_status = mocker.MagicMock(side_effect=get_vm_status) + inventory._get_vm_snapshots = mocker.MagicMock(side_effect=get_vm_snapshots) inventory.get_option = mocker.MagicMock(side_effect=get_option) inventory._populate() From 35e0a612179b3e96be456bb2b200de8c449c30f5 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Fri, 23 Jul 2021 18:16:14 +1200 Subject: [PATCH 0454/3093] missed composer when created commments in ignore files (#3051) --- tests/sanity/ignore-2.10.txt | 2 +- tests/sanity/ignore-2.11.txt | 2 +- tests/sanity/ignore-2.12.txt | 2 +- tests/sanity/ignore-2.9.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index d01c3762dc..6060d0f2d7 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -18,7 +18,7 @@ plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-state-invalid-choice plugins/modules/notification/grove.py validate-modules:invalid-argument-name # invalid alias - removed in 4.0.0 -plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid +plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/packaging/os/homebrew_cask.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 2106c993d3..7313abf061 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -17,7 +17,7 @@ plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-state-invalid-choice plugins/modules/notification/grove.py validate-modules:invalid-argument-name # invalid alias - removed in 4.0.0 -plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid +plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/packaging/os/homebrew_cask.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt index a30ff2e4ed..2ef7ced11e 100644 --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -17,7 +17,7 @@ plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-state-invalid-choice plugins/modules/notification/grove.py validate-modules:invalid-argument-name # invalid alias - removed in 4.0.0 -plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid +plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/packaging/os/homebrew_cask.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 65611001b1..b2846cc863 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -11,7 +11,7 @@ plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:para plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:undocumented-parameter plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter -plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid +plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 plugins/modules/packaging/os/homebrew_cask.py validate-modules:parameter-invalid # invalid alias - removed in 5.0.0 From 9631de49396ab0706df4a38c1cbab6a779a125dc Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Fri, 23 Jul 2021 18:17:14 +1200 Subject: [PATCH 0455/3093] fixed doc in xfconf (#3050) --- plugins/modules/system/xfconf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/modules/system/xfconf.py b/plugins/modules/system/xfconf.py index dc560e7775..e8aed0a759 100644 --- a/plugins/modules/system/xfconf.py +++ b/plugins/modules/system/xfconf.py @@ -14,7 +14,7 @@ author: - "Alexei Znamensky (@russoz)" short_description: Edit XFCE4 Configurations description: - - This module allows for the manipulation of Xfce 4 Configuration via + - This module allows for the manipulation of Xfce 4 Configuration with the help of xfconf-query. Please see the xfconf-query(1) man pages for more details. options: channel: From 99c28313e4fdfb4665cbaf2c6d35396c7fa1394d Mon Sep 17 00:00:00 2001 From: The Right Honourable Reverend Date: Sat, 24 Jul 2021 13:40:08 -0500 Subject: [PATCH 0456/3093] proxmox inventory plugin: Easy fix (#3052) * Don't know why this works but it does. Plugin was crashing on this line on Python 3.9.2 deployed on qemu image with debian bullseye. It doesn't crash anymore. * Create 3052_proxmox_inventory_plugin.yml * Update changelogs/fragments/3052_proxmox_inventory_plugin.yml Co-authored-by: Ajpantuso Co-authored-by: Ajpantuso --- changelogs/fragments/3052_proxmox_inventory_plugin.yml | 2 ++ plugins/inventory/proxmox.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/3052_proxmox_inventory_plugin.yml diff --git a/changelogs/fragments/3052_proxmox_inventory_plugin.yml b/changelogs/fragments/3052_proxmox_inventory_plugin.yml new file mode 100644 index 0000000000..dfd4dddea9 --- /dev/null +++ b/changelogs/fragments/3052_proxmox_inventory_plugin.yml @@ -0,0 +1,2 @@ +bugfixes: + - proxmox inventory plugin - fixed plugin failure when a ``qemu`` guest has no ``template`` key (https://github.com/ansible-collections/community.general/pull/3052). diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py index f0f5a4e418..33a564f333 100644 --- a/plugins/inventory/proxmox.py +++ b/plugins/inventory/proxmox.py @@ -413,7 +413,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): node_qemu_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), ('%s_qemu' % node['node']).lower())) self.inventory.add_group(node_qemu_group) for qemu in self._get_qemu_per_node(node['node']): - if qemu['template']: + if qemu.get('template'): continue self.inventory.add_host(qemu['name']) From d057b2e3b262af437b0115622069a60580919173 Mon Sep 17 00:00:00 2001 From: Max Bidlingmaier Date: Sat, 24 Jul 2021 21:13:09 +0200 Subject: [PATCH 0457/3093] gitlab_group_members/gitlab_project_members - fix pagination issue (#3054) * Fix * fixed linter stuff * typo in section name of changlog fragment Co-authored-by: Max Bidlingmaier --- .../3041-fix_gitlab_group_members_gitlab_project_mambers.yml | 3 +++ plugins/modules/source_control/gitlab/gitlab_group_members.py | 2 +- .../modules/source_control/gitlab/gitlab_project_members.py | 2 +- 3 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/3041-fix_gitlab_group_members_gitlab_project_mambers.yml diff --git a/changelogs/fragments/3041-fix_gitlab_group_members_gitlab_project_mambers.yml b/changelogs/fragments/3041-fix_gitlab_group_members_gitlab_project_mambers.yml new file mode 100644 index 0000000000..d1be8b78d3 --- /dev/null +++ b/changelogs/fragments/3041-fix_gitlab_group_members_gitlab_project_mambers.yml @@ -0,0 +1,3 @@ +bugfixes: + - gitlab_group_members - fixes issue when gitlab group has more then 20 members, pagination problem (https://github.com/ansible-collections/community.general/issues/3041). + - gitlab_project_members - fixes issue when gitlab group has more then 20 members, pagination problem (https://github.com/ansible-collections/community.general/issues/3041). diff --git a/plugins/modules/source_control/gitlab/gitlab_group_members.py b/plugins/modules/source_control/gitlab/gitlab_group_members.py index 8a3da2a41b..50779e6445 100644 --- a/plugins/modules/source_control/gitlab/gitlab_group_members.py +++ b/plugins/modules/source_control/gitlab/gitlab_group_members.py @@ -109,7 +109,7 @@ class GitLabGroup(object): # get all members in a group def get_members_in_a_group(self, gitlab_group_id): group = self._gitlab.groups.get(gitlab_group_id) - return group.members.list() + return group.members.list(all=True) # check if the user is a member of the group def is_user_a_member(self, members, gitlab_user_id): diff --git a/plugins/modules/source_control/gitlab/gitlab_project_members.py b/plugins/modules/source_control/gitlab/gitlab_project_members.py index 8e23dca426..0ae8f4b25c 100644 --- a/plugins/modules/source_control/gitlab/gitlab_project_members.py +++ b/plugins/modules/source_control/gitlab/gitlab_project_members.py @@ -130,7 +130,7 @@ class GitLabProjectMembers(object): # get all members in a project def get_members_in_a_project(self, gitlab_project_id): project = self._gitlab.projects.get(gitlab_project_id) - return project.members.list() + return project.members.list(all=True) # check if the user is a member of the project def is_user_a_member(self, members, gitlab_user_id): From 31189e96458f199d1cd0a1c384057a2a85a5ff8d Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Sat, 24 Jul 2021 16:10:56 -0400 Subject: [PATCH 0458/3093] archive - fixing determination of archive root when root is '/' (#3036) * Initial commit * Fixing units and path joins * Ensuring paths are consistently ordered * Adding changelog fragment * Using os.path.join to ensure trailing slashes are present * optimizing use of root in add_targets * Applying initial review suggestions --- .../fragments/3036-archive-root-path-fix.yml | 4 + plugins/modules/files/archive.py | 95 +++++++++---------- .../plugins/modules/files/test_archive.py | 73 ++++++++++++++ 3 files changed, 122 insertions(+), 50 deletions(-) create mode 100644 changelogs/fragments/3036-archive-root-path-fix.yml create mode 100644 tests/unit/plugins/modules/files/test_archive.py diff --git a/changelogs/fragments/3036-archive-root-path-fix.yml b/changelogs/fragments/3036-archive-root-path-fix.yml new file mode 100644 index 0000000000..fa460f82b9 --- /dev/null +++ b/changelogs/fragments/3036-archive-root-path-fix.yml @@ -0,0 +1,4 @@ +--- +bugfixes: + - archive - fixing archive root determination when longest common root is ``/`` + (https://github.com/ansible-collections/community.general/pull/3036). diff --git a/plugins/modules/files/archive.py b/plugins/modules/files/archive.py index 91a8f688f5..30c4de5aa8 100644 --- a/plugins/modules/files/archive.py +++ b/plugins/modules/files/archive.py @@ -204,7 +204,6 @@ else: LZMA_IMP_ERR = format_exc() HAS_LZMA = False -PATH_SEP = to_bytes(os.sep) PY27 = version_info[0:2] >= (2, 7) STATE_ABSENT = 'absent' @@ -213,16 +212,12 @@ STATE_COMPRESSED = 'compress' STATE_INCOMPLETE = 'incomplete' -def _to_bytes(s): - return to_bytes(s, errors='surrogate_or_strict') +def common_path(paths): + empty = b'' if paths and isinstance(paths[0], six.binary_type) else '' - -def _to_native(s): - return to_native(s, errors='surrogate_or_strict') - - -def _to_native_ascii(s): - return to_native(s, errors='surrogate_or_strict', encoding='ascii') + return os.path.join( + os.path.dirname(os.path.commonprefix([os.path.join(os.path.dirname(p), empty) for p in paths])), empty + ) def expand_paths(paths): @@ -239,10 +234,6 @@ def expand_paths(paths): return expanded_path, is_globby -def is_archive(path): - return re.search(br'\.(tar|tar\.(gz|bz2|xz)|tgz|tbz2|zip)$', os.path.basename(path), re.IGNORECASE) - - def legacy_filter(path, exclusion_patterns): return matches_exclusion_patterns(path, exclusion_patterns) @@ -251,6 +242,26 @@ def matches_exclusion_patterns(path, exclusion_patterns): return any(fnmatch(path, p) for p in exclusion_patterns) +def is_archive(path): + return re.search(br'\.(tar|tar\.(gz|bz2|xz)|tgz|tbz2|zip)$', os.path.basename(path), re.IGNORECASE) + + +def strip_prefix(prefix, string): + return string[len(prefix):] if string.startswith(prefix) else string + + +def _to_bytes(s): + return to_bytes(s, errors='surrogate_or_strict') + + +def _to_native(s): + return to_native(s, errors='surrogate_or_strict') + + +def _to_native_ascii(s): + return to_native(s, errors='surrogate_or_strict', encoding='ascii') + + @six.add_metaclass(abc.ABCMeta) class Archive(object): def __init__(self, module): @@ -266,7 +277,6 @@ class Archive(object): self.destination_state = STATE_ABSENT self.errors = [] self.file = None - self.root = b'' self.successes = [] self.targets = [] self.not_found = [] @@ -275,7 +285,7 @@ class Archive(object): self.expanded_paths, has_globs = expand_paths(paths) self.expanded_exclude_paths = expand_paths(module.params['exclude_path'])[0] - self.paths = list(set(self.expanded_paths) - set(self.expanded_exclude_paths)) + self.paths = sorted(set(self.expanded_paths) - set(self.expanded_exclude_paths)) if not self.paths: module.fail_json( @@ -285,6 +295,8 @@ class Archive(object): msg='Error, no source paths were found' ) + self.root = common_path(self.paths) + if not self.must_archive: self.must_archive = any([has_globs, os.path.isdir(self.paths[0]), len(self.paths) > 1]) @@ -298,6 +310,9 @@ class Archive(object): msg='Error, must specify "dest" when archiving multiple files or trees' ) + if self.remove: + self._check_removal_safety() + self.original_size = self.destination_size() def add(self, path, archive_name): @@ -310,9 +325,8 @@ class Archive(object): def add_single_target(self, path): if self.format in ('zip', 'tar'): - archive_name = re.sub(br'^%s' % re.escape(self.root), b'', path) self.open() - self.add(path, archive_name) + self.add(path, strip_prefix(self.root, path)) self.close() self.destination_state = STATE_ARCHIVED else: @@ -333,25 +347,18 @@ class Archive(object): def add_targets(self): self.open() try: - match_root = re.compile(br'^%s' % re.escape(self.root)) for target in self.targets: if os.path.isdir(target): for directory_path, directory_names, file_names in os.walk(target, topdown=True): - if not directory_path.endswith(PATH_SEP): - directory_path += PATH_SEP - for directory_name in directory_names: - full_path = directory_path + directory_name - archive_name = match_root.sub(b'', full_path) - self.add(full_path, archive_name) + full_path = os.path.join(directory_path, directory_name) + self.add(full_path, strip_prefix(self.root, full_path)) for file_name in file_names: - full_path = directory_path + file_name - archive_name = match_root.sub(b'', full_path) - self.add(full_path, archive_name) + full_path = os.path.join(directory_path, file_name) + self.add(full_path, strip_prefix(self.root, full_path)) else: - archive_name = match_root.sub(b'', target) - self.add(target, archive_name) + self.add(target, strip_prefix(self.root, target)) except Exception as e: if self.format in ('zip', 'tar'): archive_format = self.format @@ -384,26 +391,6 @@ class Archive(object): def find_targets(self): for path in self.paths: - # Use the longest common directory name among all the files as the archive root path - if self.root == b'': - self.root = os.path.dirname(path) + PATH_SEP - else: - for i in range(len(self.root)): - if path[i] != self.root[i]: - break - - if i < len(self.root): - self.root = os.path.dirname(self.root[0:i + 1]) - - self.root += PATH_SEP - # Don't allow archives to be created anywhere within paths to be removed - if self.remove and os.path.isdir(path): - prefix = path if path.endswith(PATH_SEP) else path + PATH_SEP - if self.destination.startswith(prefix): - self.module.fail_json( - path=', '.join(self.paths), - msg='Error, created archive can not be contained in source paths when remove=true' - ) if not os.path.lexists(path): self.not_found.append(path) else: @@ -470,6 +457,14 @@ class Archive(object): 'expanded_exclude_paths': [_to_native(p) for p in self.expanded_exclude_paths], } + def _check_removal_safety(self): + for path in self.paths: + if os.path.isdir(path) and self.destination.startswith(os.path.join(path, b'')): + self.module.fail_json( + path=b', '.join(self.paths), + msg='Error, created archive can not be contained in source paths when remove=true' + ) + def _open_compressed_file(self, path, mode): f = None if self.format == 'gz': diff --git a/tests/unit/plugins/modules/files/test_archive.py b/tests/unit/plugins/modules/files/test_archive.py new file mode 100644 index 0000000000..9fae51e7b7 --- /dev/null +++ b/tests/unit/plugins/modules/files/test_archive.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.tests.unit.compat.mock import Mock, patch +from ansible_collections.community.general.tests.unit.plugins.modules.utils import ModuleTestCase, set_module_args +from ansible_collections.community.general.plugins.modules.files.archive import get_archive, common_path + + +class TestArchive(ModuleTestCase): + def setUp(self): + super(TestArchive, self).setUp() + + self.mock_os_path_isdir = patch('os.path.isdir') + self.os_path_isdir = self.mock_os_path_isdir.start() + + def tearDown(self): + self.os_path_isdir = self.mock_os_path_isdir.stop() + + def test_archive_removal_safety(self): + set_module_args( + dict( + path=['/foo', '/bar', '/baz'], + dest='/foo/destination.tgz', + remove=True + ) + ) + + module = AnsibleModule( + argument_spec=dict( + path=dict(type='list', elements='path', required=True), + format=dict(type='str', default='gz', choices=['bz2', 'gz', 'tar', 'xz', 'zip']), + dest=dict(type='path'), + exclude_path=dict(type='list', elements='path', default=[]), + exclusion_patterns=dict(type='list', elements='path'), + force_archive=dict(type='bool', default=False), + remove=dict(type='bool', default=False), + ), + add_file_common_args=True, + supports_check_mode=True, + ) + + self.os_path_isdir.side_effect = [True, False, False, True] + + module.fail_json = Mock() + + archive = get_archive(module) + + module.fail_json.assert_called_once_with( + path=b', '.join(archive.paths), + msg='Error, created archive can not be contained in source paths when remove=true' + ) + + +PATHS = ( + ([], ''), + (['/'], '/'), + ([b'/'], b'/'), + (['/foo', '/bar', '/baz', '/foobar', '/barbaz', '/foo/bar'], '/'), + ([b'/foo', b'/bar', b'/baz', b'/foobar', b'/barbaz', b'/foo/bar'], b'/'), + (['/foo/bar/baz', '/foo/bar'], '/foo/'), + (['/foo/bar/baz', '/foo/bar/'], '/foo/bar/'), +) + + +@pytest.mark.parametrize("paths,root", PATHS) +def test_common_path(paths, root): + assert common_path(paths) == root From 20f46f76697d96fa2752d46e240f477069abd35c Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 25 Jul 2021 08:30:46 +1200 Subject: [PATCH 0459/3093] xfconf_info - new module (#3045) * xfconf_info initial commit * Update plugins/modules/system/xfconf_info.py Co-authored-by: Felix Fontein * Update plugins/modules/system/xfconf_info.py Co-authored-by: Felix Fontein * Update plugins/modules/system/xfconf_info.py Co-authored-by: Felix Fontein * added register to all examples * Update plugins/modules/system/xfconf_info.py Co-authored-by: Felix Fontein --- plugins/modules/system/xfconf_info.py | 190 ++++++++++++++++++ plugins/modules/xfconf_info.py | 1 + .../modules/system/test_xfconf_info.py | 171 ++++++++++++++++ 3 files changed, 362 insertions(+) create mode 100644 plugins/modules/system/xfconf_info.py create mode 120000 plugins/modules/xfconf_info.py create mode 100644 tests/unit/plugins/modules/system/test_xfconf_info.py diff --git a/plugins/modules/system/xfconf_info.py b/plugins/modules/system/xfconf_info.py new file mode 100644 index 0000000000..9cef821071 --- /dev/null +++ b/plugins/modules/system/xfconf_info.py @@ -0,0 +1,190 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# (c) 2021, Alexei Znamensky +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: xfconf_info +author: + - "Alexei Znamensky (@russoz)" +short_description: Retrieve XFCE4 configurations +version_added: 3.5.0 +description: + - This module allows retrieving Xfce 4 configurations with the help of C(xfconf-query). +options: + channel: + description: + - > + A Xfconf preference channel is a top-level tree key, inside of the + Xfconf repository that corresponds to the location for which all + application properties/keys are stored. + - If not provided, the module will list all available channels. + type: str + property: + description: + - > + A Xfce preference key is an element in the Xfconf repository + that corresponds to an application preference. + - If provided, then I(channel) is required. + - If not provided and a I(channel) is provided, then the module will list all available properties in that I(channel). + type: str +notes: + - See man xfconf-query(1) for more details. +''' + +EXAMPLES = """ +- name: Get list of all available channels + community.general.xfconf_info: {} + register: result + +- name: Get list of all properties in a specific channel + community.general.xfconf_info: + channel: xsettings + register: result + +- name: Retrieve the DPI value + community.general.xfconf_info: + channel: xsettings + property: /Xft/DPI + register: result + +- name: Get workspace names (4) + community.general.xfconf_info: + channel: xfwm4 + property: /general/workspace_names + register: result +""" + +RETURN = ''' + channels: + description: + - List of available channels. + - Returned when the module receives no parameter at all. + returned: success + type: list + elements: str + sample: + - xfce4-desktop + - displays + - xsettings + - xfwm4 + properties: + description: + - List of available properties for a specific channel. + - Returned by passed only the I(channel) parameter to the module. + returned: success + type: list + elements: str + sample: + - /Gdk/WindowScalingFactor + - /Gtk/ButtonImages + - /Gtk/CursorThemeSize + - /Gtk/DecorationLayout + - /Gtk/FontName + - /Gtk/MenuImages + - /Gtk/MonospaceFontName + - /Net/DoubleClickTime + - /Net/IconThemeName + - /Net/ThemeName + - /Xft/Antialias + - /Xft/Hinting + - /Xft/HintStyle + - /Xft/RGBA + is_array: + description: + - Flag indicating whether the property is an array or not. + returned: success + type: bool + value: + description: + - The value of the property. Empty if the property is of array type. + returned: success + type: str + sample: Monospace 10 + value_array: + description: + - The array value of the property. Empty if the property is not of array type. + returned: success + type: list + elements: str + sample: + - Main + - Work + - Tmp +''' + +from ansible_collections.community.general.plugins.module_utils.module_helper import CmdModuleHelper, ArgFormat + + +class XFConfException(Exception): + pass + + +class XFConfInfo(CmdModuleHelper): + module = dict( + argument_spec=dict( + channel=dict(type='str'), + property=dict(type='str'), + ), + required_by=dict( + property=['channel'] + ), + ) + + command = 'xfconf-query' + command_args_formats = dict( + channel=dict(fmt=['--channel', '{0}']), + property=dict(fmt=['--property', '{0}']), + _list_arg=dict(fmt="--list", style=ArgFormat.BOOLEAN), + ) + check_rc = True + + def __init_module__(self): + self.vars.set("_list_arg", False, output=False) + self.vars.set("is_array", False) + + def process_command_output(self, rc, out, err): + result = out.rstrip() + if "Value is an array with" in result: + result = result.split("\n") + result.pop(0) + result.pop(0) + self.vars.is_array = True + + return result + + def _process_list_properties(self, rc, out, err): + return out.splitlines() + + def _process_list_channels(self, rc, out, err): + lines = out.splitlines() + lines.pop(0) + lines = [s.lstrip() for s in lines] + return lines + + def __run__(self): + self.vars._list_arg = not (bool(self.vars.channel) and bool(self.vars.property)) + output = 'value' + proc = self.process_command_output + if self.vars.channel is None: + output = 'channels' + proc = self._process_list_channels + elif self.vars.property is None: + output = 'properties' + proc = self._process_list_properties + result = self.run_command(params=('_list_arg', 'channel', 'property'), process_output=proc) + if not self.vars._list_arg and self.vars.is_array: + output = "value_array" + self.vars.set(output, result) + + +def main(): + xfconf = XFConfInfo() + xfconf.run() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/xfconf_info.py b/plugins/modules/xfconf_info.py new file mode 120000 index 0000000000..5bf95b50b5 --- /dev/null +++ b/plugins/modules/xfconf_info.py @@ -0,0 +1 @@ +system/xfconf_info.py \ No newline at end of file diff --git a/tests/unit/plugins/modules/system/test_xfconf_info.py b/tests/unit/plugins/modules/system/test_xfconf_info.py new file mode 100644 index 0000000000..528622d0ee --- /dev/null +++ b/tests/unit/plugins/modules/system/test_xfconf_info.py @@ -0,0 +1,171 @@ +# Author: Alexei Znamensky (russoz@gmail.com) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + +from ansible_collections.community.general.plugins.modules.system import xfconf_info + +import pytest + +TESTED_MODULE = xfconf_info.__name__ + + +@pytest.fixture +def patch_xfconf_info(mocker): + """ + Function used for mocking some parts of redhat_subscribtion module + """ + mocker.patch('ansible_collections.community.general.plugins.module_utils.mh.module_helper.AnsibleModule.get_bin_path', + return_value='/testbin/xfconf-query') + + +TEST_CASES = [ + [ + {'channel': 'xfwm4', 'property': '/general/inactive_opacity'}, + { + 'id': 'test_simple_property_get', + 'run_command.calls': [ + ( + # Calling of following command will be asserted + ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/inactive_opacity'], + # Was return code checked? + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, + # Mock of returned code, stdout and stderr + (0, '100\n', '',), + ), + ], + 'is_array': False, + 'value': '100', + } + ], + [ + {'channel': 'xfwm4', 'property': '/general/i_dont_exist'}, + { + 'id': 'test_simple_property_get_nonexistent', + 'run_command.calls': [ + ( + # Calling of following command will be asserted + ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/i_dont_exist'], + # Was return code checked? + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, + # Mock of returned code, stdout and stderr + (1, '', 'Property "/general/i_dont_exist" does not exist on channel "xfwm4".\n',), + ), + ], + 'is_array': False, + } + ], + [ + {'property': '/general/i_dont_exist'}, + { + 'id': 'test_property_no_channel', + 'run_command.calls': [], + } + ], + [ + {'channel': 'xfwm4', 'property': '/general/workspace_names'}, + { + 'id': 'test_property_get_array', + 'run_command.calls': [ + ( + # Calling of following command will be asserted + ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/workspace_names'], + # Was return code checked? + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, + # Mock of returned code, stdout and stderr + (0, 'Value is an array with 3 items:\n\nMain\nWork\nTmp\n', '',), + ), + ], + 'is_array': True, + 'value_array': ['Main', 'Work', 'Tmp'], + }, + ], + [ + {}, + { + 'id': 'get_channels', + 'run_command.calls': [ + ( + # Calling of following command will be asserted + ['/testbin/xfconf-query', '--list'], + # Was return code checked? + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, + # Mock of returned code, stdout and stderr + (0, 'Channels:\n a\n b\n c\n', '',), + ), + ], + 'is_array': False, + 'channels': ['a', 'b', 'c'], + }, + ], + [ + {'channel': 'xfwm4'}, + { + 'id': 'get_properties', + 'run_command.calls': [ + ( + # Calling of following command will be asserted + ['/testbin/xfconf-query', '--list', '--channel', 'xfwm4'], + # Was return code checked? + {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True}, + # Mock of returned code, stdout and stderr + (0, '/general/wrap_cycle\n/general/wrap_layout\n/general/wrap_resistance\n/general/wrap_windows\n' + '/general/wrap_workspaces\n/general/zoom_desktop\n', '',), + ), + ], + 'is_array': False, + 'properties': [ + '/general/wrap_cycle', + '/general/wrap_layout', + '/general/wrap_resistance', + '/general/wrap_windows', + '/general/wrap_workspaces', + '/general/zoom_desktop', + ], + }, + ], +] +TEST_CASES_IDS = [item[1]['id'] for item in TEST_CASES] + + +@pytest.mark.parametrize('patch_ansible_module, testcase', + TEST_CASES, + ids=TEST_CASES_IDS, + indirect=['patch_ansible_module']) +@pytest.mark.usefixtures('patch_ansible_module') +def test_xfconf_info(mocker, capfd, patch_xfconf_info, testcase): + """ + Run unit tests for test cases listen in TEST_CASES + """ + + # Mock function used for running commands first + call_results = [item[2] for item in testcase['run_command.calls']] + mock_run_command = mocker.patch( + 'ansible_collections.community.general.plugins.module_utils.mh.module_helper.AnsibleModule.run_command', + side_effect=call_results) + + # Try to run test case + with pytest.raises(SystemExit): + xfconf_info.main() + + out, err = capfd.readouterr() + results = json.loads(out) + print("testcase =\n%s" % testcase) + print("results =\n%s" % results) + + for conditional_test_result in ('value_array', 'value', 'is_array', 'properties', 'channels'): + if conditional_test_result in testcase: + assert conditional_test_result in results, "'{0}' not found in {1}".format(conditional_test_result, results) + assert results[conditional_test_result] == testcase[conditional_test_result], \ + "'{0}': '{1}' != '{2}'".format(conditional_test_result, results[conditional_test_result], testcase[conditional_test_result]) + + assert mock_run_command.call_count == len(testcase['run_command.calls']) + if mock_run_command.call_count: + call_args_list = [(item[0][0], item[1]) for item in mock_run_command.call_args_list] + expected_call_args_list = [(item[0], item[1]) for item in testcase['run_command.calls']] + print("call args list =\n%s" % call_args_list) + print("expected args list =\n%s" % expected_call_args_list) + assert call_args_list == expected_call_args_list From dc3e16113d09e32fb750632b79ef224fccc8feaf Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 25 Jul 2021 10:00:10 +0200 Subject: [PATCH 0460/3093] Add BOTMETA extra sanity test (#3064) * Add BOTMETA sanity test. * Make compile with Python 2.6. --- .github/BOTMETA.yml | 7 +- tests/sanity/extra/botmeta.json | 11 ++ tests/sanity/extra/botmeta.py | 184 ++++++++++++++++++++++++++++++++ 3 files changed, 199 insertions(+), 3 deletions(-) create mode 100644 tests/sanity/extra/botmeta.json create mode 100755 tests/sanity/extra/botmeta.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 8df7297720..55f34d3041 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -159,11 +159,13 @@ files: $module_utils/redfish_utils.py: maintainers: $team_redfish labels: redfish_utils - $module_utils/remote_management/lxca/common.py: navalkp prabhosa + $module_utils/remote_management/lxca/common.py: + maintainers: navalkp prabhosa $module_utils/scaleway.py: maintainers: $team_scaleway labels: cloud scaleway - $module_utils/storage/hpe3par/hpe3par.py: farhan7500 gautamphegde + $module_utils/storage/hpe3par/hpe3par.py: + maintainers: farhan7500 gautamphegde $module_utils/utm_utils.py: maintainers: $team_e_spirit labels: utm_utils @@ -508,7 +510,6 @@ files: $modules/notification/osx_say.py: maintainers: ansible mpdehaan labels: _osx_say - deprecated: true $modules/notification/bearychat.py: maintainers: tonyseek $modules/notification/campfire.py: diff --git a/tests/sanity/extra/botmeta.json b/tests/sanity/extra/botmeta.json new file mode 100644 index 0000000000..cba49c90cd --- /dev/null +++ b/tests/sanity/extra/botmeta.json @@ -0,0 +1,11 @@ +{ + "include_symlinks": false, + "prefixes": [ + ".github/BOTMETA.yml" + ], + "output": "path-line-column-message", + "requirements": [ + "PyYAML", + "voluptuous==0.12.1" + ] +} diff --git a/tests/sanity/extra/botmeta.py b/tests/sanity/extra/botmeta.py new file mode 100755 index 0000000000..e8ea819394 --- /dev/null +++ b/tests/sanity/extra/botmeta.py @@ -0,0 +1,184 @@ +#!/usr/bin/env python +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +"""Check BOTMETA file.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import ast +import os +import re +import sys + +import yaml + +from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA +from voluptuous import Required, Schema, Invalid +from voluptuous.humanize import humanize_error + + +REPORT_MISSING_MAINTAINERS = False + +FILENAME = '.github/BOTMETA.yml' + +LIST_ENTRIES = frozenset(('supershipit', 'maintainers', 'labels', 'keywords', 'notify', 'ignore')) + +AUTHOR_REGEX = re.compile(r'^\w.*\(@([\w-]+)\)(?![\w.])$') + + +def read_authors(filename): + data = {} + try: + with open(filename, 'rb') as b_module_data: + M = ast.parse(b_module_data.read()) + + for child in M.body: + if isinstance(child, ast.Assign): + for t in child.targets: + try: + theid = t.id + except AttributeError: + # skip errors can happen when trying to use the normal code + continue + + if theid == 'DOCUMENTATION': + if isinstance(child.value, ast.Dict): + data = ast.literal_eval(child.value) + else: + data = yaml.safe_load(child.value.s) + + except Exception as e: + print('%s:%d:%d: Cannot load DOCUMENTATION: %s' % (filename, 0, 0, e)) + return [] + + author = data.get('author') or [] + if isinstance(author, str): + author = [author] + return author + + +def validate(filename, filedata): + if filename.startswith('plugins/doc_fragments/'): + return + # Compile lis tof all active and inactive maintainers + all_maintainers = filedata['maintainers'] + filedata['ignore'] + if not all_maintainers: + if REPORT_MISSING_MAINTAINERS: + print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'No (active or inactive) maintainer mentioned for %s' % filename)) + return + if filename.startswith('plugins/filter/'): + return + maintainers = read_authors(filename) + for maintainer in maintainers: + m = AUTHOR_REGEX.match(maintainer) + if m: + maintainer = m.group(1) + if maintainer not in all_maintainers: + msg = 'Author %s not mentioned as active or inactive maintainer for %s (mentioned are: %s)' % ( + maintainer, filename, ', '.join(all_maintainers)) + if REPORT_MISSING_MAINTAINERS: + print('%s:%d:%d: %s' % (FILENAME, 0, 0, msg)) + + +def main(): + """Main entry point.""" + paths = sys.argv[1:] or sys.stdin.read().splitlines() + paths = [path for path in paths if path.endswith('/aliases')] + + try: + with open(FILENAME, 'rb') as f: + botmeta = yaml.safe_load(f) + except yaml.error.MarkedYAMLError as ex: + print('%s:%d:%d: YAML load failed: %s' % (FILENAME, ex.context_mark.line + + 1, ex.context_mark.column + 1, re.sub(r'\s+', ' ', str(ex)))) + return + except Exception as ex: # pylint: disable=broad-except + print('%s:%d:%d: YAML load failed: %s' % + (FILENAME, 0, 0, re.sub(r'\s+', ' ', str(ex)))) + return + + # Validate schema + + MacroSchema = Schema({ + (str): str, + }, extra=PREVENT_EXTRA) + + FilesSchema = Schema({ + (str): { + ('supershipit'): str, + ('support'): Any('community'), + ('maintainers'): str, + ('labels'): str, + ('keywords'): str, + ('notify'): str, + ('ignore'): str, + }, + }, extra=PREVENT_EXTRA) + + schema = Schema({ + ('automerge'): bool, + ('macros'): MacroSchema, + ('files'): FilesSchema, + }, extra=PREVENT_EXTRA) + + try: + schema(botmeta) + except MultipleInvalid as ex: + for error in ex.errors: + # No way to get line/column numbers + print('%s:%d:%d: %s' % (FILENAME, 0, 0, humanize_error(botmeta, error))) + return + + # Preprocess (substitute macros, convert to lists) + macros = botmeta.get('macros') or {} + macro_re = re.compile(r'\$([a-zA-Z_]+)') + + def convert_macros(text, macros): + def f(m): + return macros[m.group(1)] + + return macro_re.sub(f, text) + + files = {} + try: + for file, filedata in (botmeta.get('files') or {}).items(): + file = convert_macros(file, macros) + filedata = dict((k, convert_macros(v, macros)) for k, v in filedata.items()) + files[file] = filedata + for k, v in filedata.items(): + if k in LIST_ENTRIES: + filedata[k] = v.split() + except KeyError as e: + print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'Found unknown macro %s' % e)) + return + + # Scan all files + for dirpath, dirnames, filenames in os.walk('plugins/'): + for file in filenames: + if file.endswith('.pyc'): + continue + filename = os.path.join(dirpath, file) + if os.path.islink(filename): + continue + if os.path.isfile(filename): + matching_files = [] + for file, filedata in files.items(): + if filename.startswith(file): + matching_files.append((file, filedata)) + if not matching_files: + print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'Did not find any entry for %s' % filename)) + + matching_files.sort(key=lambda kv: kv[0]) + filedata = dict() + for k in LIST_ENTRIES: + filedata[k] = [] + for dummy, data in matching_files: + for k, v in data.items(): + if k in LIST_ENTRIES: + v = filedata[k] + v + filedata[k] = v + validate(filename, filedata) + + +if __name__ == '__main__': + main() From d54d2fa4a6ae305b452246b6255dd0d1aaf6bb12 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 25 Jul 2021 21:14:30 +1200 Subject: [PATCH 0461/3093] xfconf - deprecate get state in favour of the xfconf_info module (#3049) * Deprecate get state in favour of the xfconf_info module * added changelog fragment * added comments in ignore files * Update changelogs/fragments/3049-xfconf-deprecate-get.yaml bummer, forgot that Co-authored-by: Felix Fontein * Update plugins/modules/system/xfconf.py Co-authored-by: Felix Fontein * Update plugins/modules/system/xfconf.py Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- changelogs/fragments/3049-xfconf-deprecate-get.yaml | 2 ++ plugins/modules/system/xfconf.py | 5 +++++ tests/sanity/ignore-2.10.txt | 2 +- tests/sanity/ignore-2.11.txt | 2 +- tests/sanity/ignore-2.12.txt | 2 +- 5 files changed, 10 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/3049-xfconf-deprecate-get.yaml diff --git a/changelogs/fragments/3049-xfconf-deprecate-get.yaml b/changelogs/fragments/3049-xfconf-deprecate-get.yaml new file mode 100644 index 0000000000..359b39301e --- /dev/null +++ b/changelogs/fragments/3049-xfconf-deprecate-get.yaml @@ -0,0 +1,2 @@ +deprecated_features: + - xfconf - deprecate the ``get`` state. The new module ``xfconf_info`` should be used instead (https://github.com/ansible-collections/community.general/pull/3049). diff --git a/plugins/modules/system/xfconf.py b/plugins/modules/system/xfconf.py index e8aed0a759..001613fc23 100644 --- a/plugins/modules/system/xfconf.py +++ b/plugins/modules/system/xfconf.py @@ -48,6 +48,7 @@ options: type: str description: - The action to take upon the property/value. + - State C(get) is deprecated and will be removed in community.general 5.0.0. Please use the module M(community.general.xfconf_info) instead. choices: [ get, present, absent ] default: "present" force_array: @@ -225,6 +226,10 @@ class XFConfProperty(CmdMixin, StateMixin, ModuleHelper): def state_get(self): self.vars.value = self.vars.previous_value self.vars.previous_value = None + self.module.deprecate( + msg="State 'get' is deprecated. Please use the module community.general.xfconf_info instead", + version="5.0.0", collection_name="community.general" + ) def state_absent(self): if not self.module.check_mode: diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt index 6060d0f2d7..9e6c483071 100644 --- a/tests/sanity/ignore-2.10.txt +++ b/tests/sanity/ignore-2.10.txt @@ -47,7 +47,7 @@ plugins/modules/system/puppet.py validate-modules:doc-default-does-not-match-spe plugins/modules/system/puppet.py validate-modules:parameter-type-not-in-doc plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc # param removed in 4.0.0 plugins/modules/system/ssh_config.py use-argspec-type-path # Required since module uses other methods to specify path -plugins/modules/system/xfconf.py validate-modules:parameter-state-invalid-choice +plugins/modules/system/xfconf.py validate-modules:parameter-state-invalid-choice # state get removed in 5.0.0 plugins/modules/system/xfconf.py validate-modules:return-syntax-error plugins/modules/web_infrastructure/jenkins_plugin.py use-argspec-type-path tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.6 # django generated code diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 7313abf061..79a90853f2 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -46,7 +46,7 @@ plugins/modules/system/puppet.py validate-modules:doc-default-does-not-match-spe plugins/modules/system/puppet.py validate-modules:parameter-type-not-in-doc plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc # param removed in 4.0.0 plugins/modules/system/ssh_config.py use-argspec-type-path # Required since module uses other methods to specify path -plugins/modules/system/xfconf.py validate-modules:parameter-state-invalid-choice +plugins/modules/system/xfconf.py validate-modules:parameter-state-invalid-choice # state get removed in 5.0.0 plugins/modules/system/xfconf.py validate-modules:return-syntax-error plugins/modules/web_infrastructure/jenkins_plugin.py use-argspec-type-path tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.6 # django generated code diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt index 2ef7ced11e..4d1d5a783c 100644 --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -46,6 +46,6 @@ plugins/modules/system/puppet.py validate-modules:doc-default-does-not-match-spe plugins/modules/system/puppet.py validate-modules:parameter-type-not-in-doc plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc # param removed in 4.0.0 plugins/modules/system/ssh_config.py use-argspec-type-path # Required since module uses other methods to specify path -plugins/modules/system/xfconf.py validate-modules:parameter-state-invalid-choice +plugins/modules/system/xfconf.py validate-modules:parameter-state-invalid-choice # state get removed in 5.0.0 plugins/modules/system/xfconf.py validate-modules:return-syntax-error plugins/modules/web_infrastructure/jenkins_plugin.py use-argspec-type-path From 20db4fc5604472f9014d670974e33e7e9d704276 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 25 Jul 2021 23:53:38 +1200 Subject: [PATCH 0462/3093] replace NBSP (non-blocking space) character with a regular SPACE char (#3071) --- plugins/modules/monitoring/stackdriver.py | 6 +++--- .../targets/alternatives/tasks/main.yml | 4 ++-- .../targets/alternatives/tasks/setup_test.yml | 4 ++-- .../targets/alternatives/tasks/test.yml | 8 ++++---- .../alternatives/tasks/tests_set_priority.yml | 6 +++--- .../integration/targets/filesystem/tasks/main.yml | 4 ++-- .../integration/targets/filesystem/tasks/setup.yml | 2 +- .../integration/targets/npm/tasks/no_bin_links.yml | 10 +++++----- tests/integration/targets/npm/tasks/test.yml | 10 +++++----- .../targets/setup_openldap/tasks/main.yml | 4 ++-- .../targets/setup_postgresql_db/tasks/main.yml | 2 +- .../targets/supervisorctl/tasks/install_Linux.yml | 2 +- .../targets/supervisorctl/tasks/main.yml | 4 ++-- .../supervisorctl/tasks/start_supervisord.yml | 2 +- .../supervisorctl/tasks/stop_supervisord.yml | 2 +- .../targets/supervisorctl/tasks/test.yml | 2 +- .../targets/supervisorctl/tasks/test_start.yml | 14 +++++++------- .../targets/supervisorctl/tasks/test_stop.yml | 2 +- .../supervisorctl/templates/supervisord.conf | 2 +- 19 files changed, 45 insertions(+), 45 deletions(-) diff --git a/plugins/modules/monitoring/stackdriver.py b/plugins/modules/monitoring/stackdriver.py index 8eacdbfe49..fa6bacb951 100644 --- a/plugins/modules/monitoring/stackdriver.py +++ b/plugins/modules/monitoring/stackdriver.py @@ -46,16 +46,16 @@ options: msg: type: str description: - - The contents of the annotation message, in plain text.  Limited to 256 characters. Required for annotation. + - The contents of the annotation message, in plain text. Limited to 256 characters. Required for annotation. annotated_by: type: str description: - - The person or robot who the annotation should be attributed to. + - The person or robot who the annotation should be attributed to. default: "Ansible" level: type: str description: - - one of INFO/WARN/ERROR, defaults to INFO if not supplied.  May affect display. + - one of INFO/WARN/ERROR, defaults to INFO if not supplied. May affect display. choices: ['INFO', 'WARN', 'ERROR'] default: 'INFO' instance_id: diff --git a/tests/integration/targets/alternatives/tasks/main.yml b/tests/integration/targets/alternatives/tasks/main.yml index 3dc799df3e..3503afe1b3 100644 --- a/tests/integration/targets/alternatives/tasks/main.yml +++ b/tests/integration/targets/alternatives/tasks/main.yml @@ -12,7 +12,7 @@ ############## # Test parameters: - # link parameter present / absent ('with_link' variable) + # link parameter present / absent ('with_link' variable) # with / without alternatives defined in alternatives file ('with_alternatives' variable) # auto / manual ('mode' variable) @@ -56,7 +56,7 @@ path: '{{ item }}' state: absent with_items: - - '{{ alternatives_dir }}/dummy' + - '{{ alternatives_dir }}/dummy' - file: path: '/usr/bin/dummy{{ item }}' diff --git a/tests/integration/targets/alternatives/tasks/setup_test.yml b/tests/integration/targets/alternatives/tasks/setup_test.yml index 6a55c6ba7e..4475514745 100644 --- a/tests/integration/targets/alternatives/tasks/setup_test.yml +++ b/tests/integration/targets/alternatives/tasks/setup_test.yml @@ -1,11 +1,11 @@ - template: src: dummy_alternative - dest: '{{ alternatives_dir }}/dummy' + dest: '{{ alternatives_dir }}/dummy' owner: root group: root mode: '0644' when: with_alternatives or ansible_os_family != 'RedHat' - file: - path: '{{ alternatives_dir }}/dummy' + path: '{{ alternatives_dir }}/dummy' state: absent when: not with_alternatives and ansible_os_family == 'RedHat' diff --git a/tests/integration/targets/alternatives/tasks/test.yml b/tests/integration/targets/alternatives/tasks/test.yml index e5cf2d99cc..92721a995d 100644 --- a/tests/integration/targets/alternatives/tasks/test.yml +++ b/tests/integration/targets/alternatives/tasks/test.yml @@ -5,7 +5,7 @@ - name: set alternative (using link parameter) alternatives: name: dummy - path: '/usr/bin/dummy{{ item }}' + path: '/usr/bin/dummy{{ item }}' link: '/usr/bin/dummy' register: alternative @@ -20,7 +20,7 @@ - name: set alternative (without link parameter) alternatives: name: dummy - path: '/usr/bin/dummy{{ item }}' + path: '/usr/bin/dummy{{ item }}' register: alternative - name: check expected command was executed @@ -40,11 +40,11 @@ - 'cmd.stdout == "dummy" ~ item' - name: 'check mode (manual: alternatives file existed, it has been updated)' - shell: 'head -n1 {{ alternatives_dir }}/dummy | grep "^manual$"' + shell: 'head -n1 {{ alternatives_dir }}/dummy | grep "^manual$"' when: ansible_os_family != 'RedHat' or with_alternatives or item != 1 - name: 'check mode (auto: alternatives file didn''t exist, it has been created)' - shell: 'head -n1 {{ alternatives_dir }}/dummy | grep "^auto$"' + shell: 'head -n1 {{ alternatives_dir }}/dummy | grep "^auto$"' when: ansible_os_family == 'RedHat' and not with_alternatives and item == 1 - name: check that alternative has been updated diff --git a/tests/integration/targets/alternatives/tasks/tests_set_priority.yml b/tests/integration/targets/alternatives/tasks/tests_set_priority.yml index 7e27817583..ab79f62a3c 100644 --- a/tests/integration/targets/alternatives/tasks/tests_set_priority.yml +++ b/tests/integration/targets/alternatives/tasks/tests_set_priority.yml @@ -3,7 +3,7 @@ name: dummy path: '/usr/bin/dummy{{ item }}' link: /usr/bin/dummy - priority: '{{ 60 + item|int }}' + priority: '{{ 60 + item|int }}' register: alternative - name: execute dummy command @@ -11,13 +11,13 @@ register: cmd - name: check if link group is in manual mode - shell: 'head -n1 {{ alternatives_dir }}/dummy | grep "^manual$"' + shell: 'head -n1 {{ alternatives_dir }}/dummy | grep "^manual$"' - name: check expected command was executed assert: that: - 'alternative is changed' - - 'cmd.stdout == "dummy{{ item }}"' + - 'cmd.stdout == "dummy{{ item }}"' - name: check that alternative has been updated command: "grep -Pzq '/bin/dummy{{ item }}\\n{{ 60 + item|int }}' '{{ alternatives_dir }}/dummy'" diff --git a/tests/integration/targets/filesystem/tasks/main.yml b/tests/integration/targets/filesystem/tasks/main.yml index 4b2c5bdc2a..24259107fd 100644 --- a/tests/integration/targets/filesystem/tasks/main.yml +++ b/tests/integration/targets/filesystem/tasks/main.yml @@ -5,9 +5,9 @@ #################################################################### - ansible.builtin.debug: - msg: '{{ role_name }}' + msg: '{{ role_name }}' - ansible.builtin.debug: - msg: '{{ role_path|basename }}' + msg: '{{ role_path|basename }}' - import_tasks: setup.yml - include_vars: "{{ lookup('first_found', search) }}" diff --git a/tests/integration/targets/filesystem/tasks/setup.yml b/tests/integration/targets/filesystem/tasks/setup.yml index 9ca4b983d0..597692e25a 100644 --- a/tests/integration/targets/filesystem/tasks/setup.yml +++ b/tests/integration/targets/filesystem/tasks/setup.yml @@ -100,7 +100,7 @@ - name: "Install dosfstools and lvm2 (Linux)" ansible.builtin.package: - name: '{{ item }}' + name: '{{ item }}' with_items: - dosfstools - lvm2 diff --git a/tests/integration/targets/npm/tasks/no_bin_links.yml b/tests/integration/targets/npm/tasks/no_bin_links.yml index fdbc88c4eb..5c89f70517 100644 --- a/tests/integration/targets/npm/tasks/no_bin_links.yml +++ b/tests/integration/targets/npm/tasks/no_bin_links.yml @@ -6,7 +6,7 @@ - vars: # sample: node-v8.2.0-linux-x64.tar.xz - node_path: '{{ remote_dir }}/{{ nodejs_path }}/bin' + node_path: '{{ remote_dir }}/{{ nodejs_path }}/bin' package: 'ncp' block: - shell: npm --version @@ -20,12 +20,12 @@ - name: 'Install simple package with no_bin_links disabled' npm: path: '{{ remote_dir }}' - executable: '{{ node_path }}/npm' + executable: '{{ node_path }}/npm' state: present name: '{{ package }}' no_bin_links: false environment: - PATH: '{{ node_path }}:{{ ansible_env.PATH }}' + PATH: '{{ node_path }}:{{ ansible_env.PATH }}' register: npm_install_no_bin_links_disabled - name: 'Make sure .bin folder has been created' @@ -41,12 +41,12 @@ - name: 'Install simple package with no_bin_links enabled' npm: path: '{{ remote_dir }}' - executable: '{{ node_path }}/npm' + executable: '{{ node_path }}/npm' state: present name: '{{ package }}' no_bin_links: true environment: - PATH: '{{ node_path }}:{{ ansible_env.PATH }}' + PATH: '{{ node_path }}:{{ ansible_env.PATH }}' register: npm_install_no_bin_links_enabled - name: 'Make sure .bin folder has not been created' diff --git a/tests/integration/targets/npm/tasks/test.yml b/tests/integration/targets/npm/tasks/test.yml index ea2dd5b9f9..d254710f0b 100644 --- a/tests/integration/targets/npm/tasks/test.yml +++ b/tests/integration/targets/npm/tasks/test.yml @@ -5,7 +5,7 @@ - vars: # sample: node-v8.2.0-linux-x64.tar.xz - node_path: '{{ remote_dir }}/{{ nodejs_path }}/bin' + node_path: '{{ remote_dir }}/{{ nodejs_path }}/bin' package: 'iconv-lite' block: - shell: npm --version @@ -19,11 +19,11 @@ - name: 'Install simple package without dependency' npm: path: '{{ remote_dir }}' - executable: '{{ node_path }}/npm' + executable: '{{ node_path }}/npm' state: present name: '{{ package }}' environment: - PATH: '{{ node_path }}:{{ ansible_env.PATH }}' + PATH: '{{ node_path }}:{{ ansible_env.PATH }}' register: npm_install - assert: @@ -38,7 +38,7 @@ state: present name: '{{ package }}' environment: - PATH: '{{ node_path }}:{{ ansible_env.PATH }}' + PATH: '{{ node_path }}:{{ ansible_env.PATH }}' register: npm_reinstall - name: Check there is no change @@ -59,7 +59,7 @@ state: present name: '{{ package }}' environment: - PATH: '{{ node_path }}:{{ ansible_env.PATH }}' + PATH: '{{ node_path }}:{{ ansible_env.PATH }}' register: npm_fix_install - name: Check result is changed and successful diff --git a/tests/integration/targets/setup_openldap/tasks/main.yml b/tests/integration/targets/setup_openldap/tasks/main.yml index 4fd27058a6..dcf2cc7834 100644 --- a/tests/integration/targets/setup_openldap/tasks/main.yml +++ b/tests/integration/targets/setup_openldap/tasks/main.yml @@ -50,8 +50,8 @@ - name: Copy initial config ldif file become: True copy: - src: 'files/{{ item }}' - dest: '/tmp/{{ item }}' + src: 'files/{{ item }}' + dest: '/tmp/{{ item }}' owner: root group: root mode: '0644' diff --git a/tests/integration/targets/setup_postgresql_db/tasks/main.yml b/tests/integration/targets/setup_postgresql_db/tasks/main.yml index f535ecdcf9..33e9024ba1 100644 --- a/tests/integration/targets/setup_postgresql_db/tasks/main.yml +++ b/tests/integration/targets/setup_postgresql_db/tasks/main.yml @@ -100,7 +100,7 @@ when: ansible_os_family == "RedHat" and ansible_service_mgr != "systemd" - name: Initialize postgres (Debian) - shell: . /usr/share/postgresql-common/maintscripts-functions && set_system_locale && /usr/bin/pg_createcluster -u postgres {{ pg_ver }} main + shell: . /usr/share/postgresql-common/maintscripts-functions && set_system_locale && /usr/bin/pg_createcluster -u postgres {{ pg_ver }} main args: creates: /etc/postgresql/{{ pg_ver }}/ when: ansible_os_family == 'Debian' diff --git a/tests/integration/targets/supervisorctl/tasks/install_Linux.yml b/tests/integration/targets/supervisorctl/tasks/install_Linux.yml index af1790ccad..ef2dab5eae 100644 --- a/tests/integration/targets/supervisorctl/tasks/install_Linux.yml +++ b/tests/integration/targets/supervisorctl/tasks/install_Linux.yml @@ -5,6 +5,6 @@ - name: disable supervisord system service service: - name: '{{ supervisor_service_name }}' + name: '{{ supervisor_service_name }}' state: stopped enabled: no diff --git a/tests/integration/targets/supervisorctl/tasks/main.yml b/tests/integration/targets/supervisorctl/tasks/main.yml index a6ad10bdad..2a7ecdcfc0 100644 --- a/tests/integration/targets/supervisorctl/tasks/main.yml +++ b/tests/integration/targets/supervisorctl/tasks/main.yml @@ -21,7 +21,7 @@ - '{{ ansible_os_family }}.yml' - 'defaults.yml' - - include_tasks: '{{ item }}' + - include_tasks: '{{ item }}' with_first_found: - files: - 'install_{{ ansible_distribution }}.yml' # CentOS @@ -39,7 +39,7 @@ when: ansible_os_family != 'RedHat' or ansible_distribution_major_version|int > 6 always: - - include_tasks: '{{ item }}' + - include_tasks: '{{ item }}' when: ansible_os_family != 'RedHat' or ansible_distribution_major_version|int > 6 with_first_found: - files: diff --git a/tests/integration/targets/supervisorctl/tasks/start_supervisord.yml b/tests/integration/targets/supervisorctl/tasks/start_supervisord.yml index 9067a27322..1354bc8632 100644 --- a/tests/integration/targets/supervisorctl/tasks/start_supervisord.yml +++ b/tests/integration/targets/supervisorctl/tasks/start_supervisord.yml @@ -1,5 +1,5 @@ - name: start supervisord - command: 'supervisord -c {{ remote_dir }}/supervisord.conf' + command: 'supervisord -c {{ remote_dir }}/supervisord.conf' - name: wait_for supervisord ansible.builtin.wait_for: diff --git a/tests/integration/targets/supervisorctl/tasks/stop_supervisord.yml b/tests/integration/targets/supervisorctl/tasks/stop_supervisord.yml index 1bf48f2139..4da09da222 100644 --- a/tests/integration/targets/supervisorctl/tasks/stop_supervisord.yml +++ b/tests/integration/targets/supervisorctl/tasks/stop_supervisord.yml @@ -1,2 +1,2 @@ - name: stop supervisord - command: "supervisorctl -c {{ remote_dir }}/supervisord.conf {% if credentials.username %}-u {{ credentials.username }} -p {{ credentials.password }}{% endif %} shutdown" + command: "supervisorctl -c {{ remote_dir }}/supervisord.conf {% if credentials.username %}-u {{ credentials.username }} -p {{ credentials.password }}{% endif %} shutdown" diff --git a/tests/integration/targets/supervisorctl/tasks/test.yml b/tests/integration/targets/supervisorctl/tasks/test.yml index bfd2a06e17..9b43c21dec 100644 --- a/tests/integration/targets/supervisorctl/tasks/test.yml +++ b/tests/integration/targets/supervisorctl/tasks/test.yml @@ -1,7 +1,7 @@ - name: generate supervisor configuration template: src: supervisord.conf - dest: '{{ remote_dir }}/supervisord.conf' + dest: '{{ remote_dir }}/supervisord.conf' - block: - import_tasks: start_supervisord.yml diff --git a/tests/integration/targets/supervisorctl/tasks/test_start.yml b/tests/integration/targets/supervisorctl/tasks/test_start.yml index cc56ac5a99..c05a7dd400 100644 --- a/tests/integration/targets/supervisorctl/tasks/test_start.yml +++ b/tests/integration/targets/supervisorctl/tasks/test_start.yml @@ -2,7 +2,7 @@ supervisorctl: name: 'pys:py1' state: started - config: '{{ remote_dir }}/supervisord.conf' + config: '{{ remote_dir }}/supervisord.conf' register: result when: credentials.username == '' @@ -16,7 +16,7 @@ register: result_with_auth when: credentials.username != '' -- command: "supervisorctl -c {{ remote_dir }}/supervisord.conf {% if credentials.username %}-u {{ credentials.username }} -p {{ credentials.password }}{% endif %} status" +- command: "supervisorctl -c {{ remote_dir }}/supervisord.conf {% if credentials.username %}-u {{ credentials.username }} -p {{ credentials.password }}{% endif %} status" - name: check that service is started assert: @@ -32,7 +32,7 @@ supervisorctl: name: pys:py1 state: started - config: '{{ remote_dir }}/supervisord.conf' + config: '{{ remote_dir }}/supervisord.conf' register: result when: credentials.username == '' @@ -65,7 +65,7 @@ supervisorctl: name: 'pys:py1' state: started - config: '{{ remote_dir }}/supervisord.conf' + config: '{{ remote_dir }}/supervisord.conf' register: result when: credentials.username == '' @@ -110,7 +110,7 @@ supervisorctl: name: 'pys:py1' state: started - config: '{{ remote_dir }}/supervisord_not_here.conf' + config: '{{ remote_dir }}/supervisord_not_here.conf' register: result failed_when: result is success or result is not failed @@ -118,7 +118,7 @@ supervisorctl: name: 'invalid' state: started - config: '{{ remote_dir }}/supervisord.conf' + config: '{{ remote_dir }}/supervisord.conf' register: result failed_when: result is skip or (result is success or result is not failed) when: credentials.username == '' @@ -127,7 +127,7 @@ supervisorctl: name: 'invalid' state: started - config: '{{ remote_dir }}/supervisord.conf' + config: '{{ remote_dir }}/supervisord.conf' username: '{{ credentials.username }}wrong_creds' password: '{{ credentials.password }}same_here' register: result diff --git a/tests/integration/targets/supervisorctl/tasks/test_stop.yml b/tests/integration/targets/supervisorctl/tasks/test_stop.yml index 5c76a6813c..729f0ebd42 100644 --- a/tests/integration/targets/supervisorctl/tasks/test_stop.yml +++ b/tests/integration/targets/supervisorctl/tasks/test_stop.yml @@ -18,7 +18,7 @@ register: result_with_auth when: credentials.username != '' -- command: "supervisorctl -c {{ remote_dir }}/supervisord.conf {% if credentials.username %}-u {{ credentials.username }} -p {{ credentials.password }}{% endif %} status" +- command: "supervisorctl -c {{ remote_dir }}/supervisord.conf {% if credentials.username %}-u {{ credentials.username }} -p {{ credentials.password }}{% endif %} status" - name: check that service is stopped assert: diff --git a/tests/integration/targets/supervisorctl/templates/supervisord.conf b/tests/integration/targets/supervisorctl/templates/supervisord.conf index 2f80e02b72..28b6ac09f9 100644 --- a/tests/integration/targets/supervisorctl/templates/supervisord.conf +++ b/tests/integration/targets/supervisorctl/templates/supervisord.conf @@ -36,7 +36,7 @@ password = {{ credentials.password }} {% endif %} [supervisorctl] -serverurl=unix://{{ supervisord_sock_path.path }}/supervisord.sock +serverurl=unix://{{ supervisord_sock_path.path }}/supervisord.sock [rpcinterface:supervisor] supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface From 95ceb53676b5359d03a1173fe5b92759c54cc9c3 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 26 Jul 2021 08:03:45 +1200 Subject: [PATCH 0463/3093] taiga_issue - bugfix + pythonification (#3067) * taiga_issue - bugfix + pythonification * added changelog fragment --- changelogs/fragments/3067-taiga-bugfix.yaml | 2 + .../modules/web_infrastructure/taiga_issue.py | 50 +++++++++---------- 2 files changed, 26 insertions(+), 26 deletions(-) create mode 100644 changelogs/fragments/3067-taiga-bugfix.yaml diff --git a/changelogs/fragments/3067-taiga-bugfix.yaml b/changelogs/fragments/3067-taiga-bugfix.yaml new file mode 100644 index 0000000000..dfd3b531b0 --- /dev/null +++ b/changelogs/fragments/3067-taiga-bugfix.yaml @@ -0,0 +1,2 @@ +bugfixes: + - taiga - some constructs in the module fixed to work also in Python 3 (https://github.com/ansible-collections/community.general/pull/3067). diff --git a/plugins/modules/web_infrastructure/taiga_issue.py b/plugins/modules/web_infrastructure/taiga_issue.py index f05550276e..729757590d 100644 --- a/plugins/modules/web_infrastructure/taiga_issue.py +++ b/plugins/modules/web_infrastructure/taiga_issue.py @@ -129,7 +129,7 @@ except ImportError: TAIGA_MODULE_IMPORTED = False -def manage_issue(module, taiga_host, project_name, issue_subject, issue_priority, +def manage_issue(taiga_host, project_name, issue_subject, issue_priority, issue_status, issue_type, issue_severity, issue_description, issue_attachment, issue_attachment_description, issue_tags, state, check_mode=False): @@ -157,34 +157,34 @@ def manage_issue(module, taiga_host, project_name, issue_subject, issue_priority username = getenv('TAIGA_USERNAME') password = getenv('TAIGA_PASSWORD') if not any([username, password]): - return (False, changed, "Missing credentials", {}) + return False, changed, "Missing credentials", {} api.auth(username=username, password=password) user_id = api.me().id - project_list = filter(lambda x: x.name == project_name, api.projects.list(member=user_id)) + project_list = list(filter(lambda x: x.name == project_name, api.projects.list(member=user_id))) if len(project_list) != 1: - return (False, changed, "Unable to find project %s" % project_name, {}) + return False, changed, "Unable to find project %s" % project_name, {} project = project_list[0] project_id = project.id - priority_list = filter(lambda x: x.name == issue_priority, api.priorities.list(project=project_id)) + priority_list = list(filter(lambda x: x.name == issue_priority, api.priorities.list(project=project_id))) if len(priority_list) != 1: - return (False, changed, "Unable to find issue priority %s for project %s" % (issue_priority, project_name), {}) + return False, changed, "Unable to find issue priority %s for project %s" % (issue_priority, project_name), {} priority_id = priority_list[0].id - status_list = filter(lambda x: x.name == issue_status, api.issue_statuses.list(project=project_id)) + status_list = list(filter(lambda x: x.name == issue_status, api.issue_statuses.list(project=project_id))) if len(status_list) != 1: - return (False, changed, "Unable to find issue status %s for project %s" % (issue_status, project_name), {}) + return False, changed, "Unable to find issue status %s for project %s" % (issue_status, project_name), {} status_id = status_list[0].id - type_list = filter(lambda x: x.name == issue_type, project.list_issue_types()) + type_list = list(filter(lambda x: x.name == issue_type, project.list_issue_types())) if len(type_list) != 1: - return (False, changed, "Unable to find issue type %s for project %s" % (issue_type, project_name), {}) + return False, changed, "Unable to find issue type %s for project %s" % (issue_type, project_name), {} type_id = type_list[0].id - severity_list = filter(lambda x: x.name == issue_severity, project.list_severities()) + severity_list = list(filter(lambda x: x.name == issue_severity, project.list_severities())) if len(severity_list) != 1: - return (False, changed, "Unable to find severity %s for project %s" % (issue_severity, project_name), {}) + return False, changed, "Unable to find severity %s for project %s" % (issue_severity, project_name), {} severity_id = severity_list[0].id issue = { @@ -199,7 +199,7 @@ def manage_issue(module, taiga_host, project_name, issue_subject, issue_priority } # An issue is identified by the project_name, the issue_subject and the issue_type - matching_issue_list = filter(lambda x: x.subject == issue_subject and x.type == type_id, project.list_issues()) + matching_issue_list = list(filter(lambda x: x.subject == issue_subject and x.type == type_id, project.list_issues())) matching_issue_list_len = len(matching_issue_list) if matching_issue_list_len == 0: @@ -209,16 +209,17 @@ def manage_issue(module, taiga_host, project_name, issue_subject, issue_priority changed = True if not check_mode: # Create the issue - new_issue = project.add_issue(issue_subject, priority_id, status_id, type_id, severity_id, tags=issue_tags, description=issue_description) + new_issue = project.add_issue(issue_subject, priority_id, status_id, type_id, severity_id, tags=issue_tags, + description=issue_description) if issue_attachment: new_issue.attach(issue_attachment, description=issue_attachment_description) issue["attachment"] = issue_attachment issue["attachment_description"] = issue_attachment_description - return (True, changed, "Issue created", issue) + return True, changed, "Issue created", issue else: # If does not exist, do nothing - return (True, changed, "Issue does not exist", {}) + return True, changed, "Issue does not exist", {} elif matching_issue_list_len == 1: # The issue exists in the project @@ -228,19 +229,19 @@ def manage_issue(module, taiga_host, project_name, issue_subject, issue_priority if not check_mode: # Delete the issue matching_issue_list[0].delete() - return (True, changed, "Issue deleted", {}) + return True, changed, "Issue deleted", {} else: # Do nothing - return (True, changed, "Issue already exists", {}) + return True, changed, "Issue already exists", {} else: # More than 1 matching issue - return (False, changed, "More than one issue with subject %s in project %s" % (issue_subject, project_name), {}) + return False, changed, "More than one issue with subject %s in project %s" % (issue_subject, project_name), {} except TaigaException as exc: msg = "An exception happened: %s" % to_native(exc) - return (False, changed, msg, {}) + return False, changed, msg, {} def main(): @@ -257,15 +258,13 @@ def main(): attachment=dict(type='path', required=False, default=None), attachment_description=dict(type='str', required=False, default=""), tags=dict(required=False, default=[], type='list', elements='str'), - state=dict(type='str', required=False, choices=['present', 'absent'], - default='present'), + state=dict(type='str', required=False, choices=['present', 'absent'], default='present'), ), supports_check_mode=True ) if not TAIGA_MODULE_IMPORTED: - module.fail_json(msg=missing_required_lib("python-taiga"), - exception=TAIGA_IMP_ERR) + module.fail_json(msg=missing_required_lib("python-taiga"), exception=TAIGA_IMP_ERR) taiga_host = module.params['taiga_host'] project_name = module.params['project'] @@ -285,7 +284,6 @@ def main(): state = module.params['state'] return_status, changed, msg, issue_attr_dict = manage_issue( - module, taiga_host, project_name, issue_subject, @@ -301,7 +299,7 @@ def main(): check_mode=module.check_mode ) if return_status: - if len(issue_attr_dict) > 0: + if issue_attr_dict: module.exit_json(changed=changed, msg=msg, issue=issue_attr_dict) else: module.exit_json(changed=changed, msg=msg) From c8b2d7c1e5682dc64f538953cbafa70d72b7e5c2 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 26 Jul 2021 08:04:23 +1200 Subject: [PATCH 0464/3093] supervisorctl - bugfix + using ansible validation + pythonification (#3068) * supervisorctl - bugfix + pythonification * added changelog fragment * rollback check on the binpath --- .../fragments/3068-supervisorctl-bugfix.yaml | 4 +++ .../web_infrastructure/supervisorctl.py | 35 +++++++++---------- 2 files changed, 21 insertions(+), 18 deletions(-) create mode 100644 changelogs/fragments/3068-supervisorctl-bugfix.yaml diff --git a/changelogs/fragments/3068-supervisorctl-bugfix.yaml b/changelogs/fragments/3068-supervisorctl-bugfix.yaml new file mode 100644 index 0000000000..6571e211b6 --- /dev/null +++ b/changelogs/fragments/3068-supervisorctl-bugfix.yaml @@ -0,0 +1,4 @@ +bugfixes: + - supervisorctl - state ``signalled`` was not working (https://github.com/ansible-collections/community.general/pull/3068). +minor_changes: + - supervisorctl - using standard Ansible mechanism to validate ``signalled`` state required parameter (https://github.com/ansible-collections/community.general/pull/3068). diff --git a/plugins/modules/web_infrastructure/supervisorctl.py b/plugins/modules/web_infrastructure/supervisorctl.py index 5524beea98..f44af0befe 100644 --- a/plugins/modules/web_infrastructure/supervisorctl.py +++ b/plugins/modules/web_infrastructure/supervisorctl.py @@ -101,16 +101,20 @@ from ansible.module_utils.basic import AnsibleModule, is_executable def main(): arg_spec = dict( name=dict(type='str', required=True), - config=dict(required=False, type='path'), - server_url=dict(type='str', required=False), - username=dict(type='str', required=False), - password=dict(type='str', required=False, no_log=True), - supervisorctl_path=dict(required=False, type='path'), + config=dict(type='path'), + server_url=dict(type='str'), + username=dict(type='str'), + password=dict(type='str', no_log=True), + supervisorctl_path=dict(type='path'), state=dict(type='str', required=True, choices=['present', 'started', 'restarted', 'stopped', 'absent', 'signalled']), - signal=dict(type='str', required=False) + signal=dict(type='str'), ) - module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) + module = AnsibleModule( + argument_spec=arg_spec, + supports_check_mode=True, + required_if=[('state', 'signalled', ['signal'])], + ) name = module.params['name'] is_group = False @@ -146,9 +150,6 @@ def main(): if password: supervisorctl_args.extend(['-p', password]) - if state == 'signalled' and not signal: - module.fail_json(msg="State 'signalled' requires a 'signal' value") - def run_supervisorctl(cmd, name=None, **kwargs): args = list(supervisorctl_args) # copy the master args args.append(cmd) @@ -231,26 +232,24 @@ def main(): if module.check_mode: module.exit_json(changed=True) run_supervisorctl('reread', check_rc=True) - rc, out, err = run_supervisorctl('add', name) + dummy, out, dummy = run_supervisorctl('add', name) if '%s: added process group' % name in out: module.exit_json(changed=True, name=name, state=state) else: module.fail_json(msg=out, name=name, state=state) + # from this point onwards, if there are no matching processes, module cannot go on. + if len(processes) == 0: + module.fail_json(name=name, msg="ERROR (no such process)") + if state == 'started': - if len(processes) == 0: - module.fail_json(name=name, msg="ERROR (no such process)") take_action_on_processes(processes, lambda s: s not in ('RUNNING', 'STARTING'), 'start', 'started') if state == 'stopped': - if len(processes) == 0: - module.fail_json(name=name, msg="ERROR (no such process)") take_action_on_processes(processes, lambda s: s in ('RUNNING', 'STARTING'), 'stop', 'stopped') if state == 'signalled': - if len(processes) == 0: - module.fail_json(name=name, msg="ERROR (no such process)") - take_action_on_processes(processes, lambda s: s in ('RUNNING'), "signal %s" % signal, 'signalled') + take_action_on_processes(processes, lambda s: s in ('RUNNING',), "signal %s" % signal, 'signalled') if __name__ == '__main__': From ac0388100259e6cfb054454658e78351bad6fa65 Mon Sep 17 00:00:00 2001 From: Yvan Watchman Date: Mon, 26 Jul 2021 06:33:01 +0200 Subject: [PATCH 0465/3093] Succesful clone from proxmox_kvm should return new vm id, not id from cloned vm. (#3034) * Clone sucess should return new vm id, not id from cloned vm. * add changelog fragment * Update changelogs/fragments/3034-promox-kvm-return-new-id.yaml Co-authored-by: Felix Fontein Co-authored-by: Yvan E. Watchman Co-authored-by: Felix Fontein --- changelogs/fragments/3034-promox-kvm-return-new-id.yaml | 3 +++ plugins/modules/cloud/misc/proxmox_kvm.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/3034-promox-kvm-return-new-id.yaml diff --git a/changelogs/fragments/3034-promox-kvm-return-new-id.yaml b/changelogs/fragments/3034-promox-kvm-return-new-id.yaml new file mode 100644 index 0000000000..8cbd769a04 --- /dev/null +++ b/changelogs/fragments/3034-promox-kvm-return-new-id.yaml @@ -0,0 +1,3 @@ +--- +bugfixes: + - proxmox_kvm - fix result of clone, now returns ``newid`` instead of ``vmid`` (https://github.com/ansible-collections/community.general/pull/3034). diff --git a/plugins/modules/cloud/misc/proxmox_kvm.py b/plugins/modules/cloud/misc/proxmox_kvm.py index 939c72a126..159968ce6e 100644 --- a/plugins/modules/cloud/misc/proxmox_kvm.py +++ b/plugins/modules/cloud/misc/proxmox_kvm.py @@ -1303,7 +1303,7 @@ def main(): if update: module.exit_json(changed=True, vmid=vmid, msg="VM %s with vmid %s updated" % (name, vmid)) elif clone is not None: - module.exit_json(changed=True, vmid=vmid, msg="VM %s with newid %s cloned from vm with vmid %s" % (name, newid, vmid)) + module.exit_json(changed=True, vmid=newid, msg="VM %s with newid %s cloned from vm with vmid %s" % (name, newid, vmid)) else: module.exit_json(changed=True, msg="VM %s with vmid %s deployed" % (name, vmid), **results) except Exception as e: From 21d5668c97306225e046e9da5ce8a0e623161eec Mon Sep 17 00:00:00 2001 From: quidame Date: Mon, 26 Jul 2021 11:42:13 +0200 Subject: [PATCH 0466/3093] java_cert: import certificate+key bundle from pkcs12 (#3080) * import certificate+key bundle from pkcs12 * fix typo/syntax * fix variable name * fix passwords order and improve error handling * add changelog fragment * enter keystore pass only once if keystore already exists, and twice at creation * nomalize tests - Replace `command` tasks by dedicated (community.crypto) modules. - Add spaces around jinja2 variable names. - Call modules by their FQCNs. * Add tests to check keystore has a private key fix tests for RedHat/CentOS < 8 (run openssl command as an alternative to `openssl_pkcs12` module) --- ...3080-java_cert-2460-import_private_key.yml | 4 + plugins/modules/system/java_cert.py | 61 ++++++- .../targets/java_cert/defaults/main.yml | 4 +- .../targets/java_cert/tasks/main.yml | 48 +++--- .../targets/java_cert/tasks/state_change.yml | 161 +++++++++++++----- 5 files changed, 203 insertions(+), 75 deletions(-) create mode 100644 changelogs/fragments/3080-java_cert-2460-import_private_key.yml diff --git a/changelogs/fragments/3080-java_cert-2460-import_private_key.yml b/changelogs/fragments/3080-java_cert-2460-import_private_key.yml new file mode 100644 index 0000000000..465c484673 --- /dev/null +++ b/changelogs/fragments/3080-java_cert-2460-import_private_key.yml @@ -0,0 +1,4 @@ +--- +bugfixes: + - java_cert - import private key as well as public certificate from PKCS#12 + (https://github.com/ansible-collections/community.general/issues/2460). diff --git a/plugins/modules/system/java_cert.py b/plugins/modules/system/java_cert.py index 1c507f9277..515d5269c9 100644 --- a/plugins/modules/system/java_cert.py +++ b/plugins/modules/system/java_cert.py @@ -11,15 +11,15 @@ DOCUMENTATION = r''' --- module: java_cert -short_description: Uses keytool to import/remove key from java keystore (cacerts) +short_description: Uses keytool to import/remove certificate to/from java keystore (cacerts) description: - - This is a wrapper module around keytool, which can be used to import/remove - certificates from a given java keystore. + - This is a wrapper module around keytool, which can be used to import certificates + and optionally private keys to a given java keystore, or remove them from it. options: cert_url: description: - Basic URL to fetch SSL certificate from. - - One of C(cert_url) or C(cert_path) is required to load certificate. + - Exactly one of C(cert_url), C(cert_path) or C(pkcs12_path) is required to load certificate. type: str cert_port: description: @@ -30,7 +30,7 @@ options: cert_path: description: - Local path to load certificate from. - - One of C(cert_url) or C(cert_path) is required to load certificate. + - Exactly one of C(cert_url), C(cert_path) or C(pkcs12_path) is required to load certificate. type: path cert_alias: description: @@ -46,6 +46,10 @@ options: pkcs12_path: description: - Local path to load PKCS12 keystore from. + - Unlike C(cert_url) and C(cert_path), the PKCS12 keystore embeds the private key matching + the certificate, and is used to import both the certificate and its private key into the + java keystore. + - Exactly one of C(cert_url), C(cert_path) or C(pkcs12_path) is required to load certificate. type: path pkcs12_password: description: @@ -267,6 +271,7 @@ def _export_public_cert_from_pkcs12(module, executable, pkcs_file, alias, passwo export_cmd = [ executable, "-list", + "-noprompt", "-keystore", pkcs_file, "-alias", @@ -336,6 +341,44 @@ def _download_cert_url(module, executable, url, port): return fetch_out +def import_pkcs12_path(module, executable, pkcs12_path, pkcs12_pass, pkcs12_alias, + keystore_path, keystore_pass, keystore_alias, keystore_type): + ''' Import pkcs12 from path into keystore located on + keystore_path as alias ''' + import_cmd = [ + executable, + "-importkeystore", + "-noprompt", + "-srcstoretype", + "pkcs12", + "-srckeystore", + pkcs12_path, + "-srcalias", + pkcs12_alias, + "-destkeystore", + keystore_path, + "-destalias", + keystore_alias + ] + import_cmd += _get_keystore_type_keytool_parameters(keystore_type) + + secret_data = "%s\n%s" % (keystore_pass, pkcs12_pass) + # Password of a new keystore must be entered twice, for confirmation + if not os.path.exists(keystore_path): + secret_data = "%s\n%s" % (keystore_pass, secret_data) + + # Use local certificate from local path and import it to a java keystore + (import_rc, import_out, import_err) = module.run_command(import_cmd, data=secret_data, check_rc=False) + + diff = {'before': '\n', 'after': '%s\n' % keystore_alias} + if import_rc == 0 and os.path.exists(keystore_path): + module.exit_json(changed=True, msg=import_out, + rc=import_rc, cmd=import_cmd, stdout=import_out, + error=import_err, diff=diff) + else: + module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd, error=import_err) + + def import_cert_path(module, executable, path, keystore_path, keystore_pass, alias, keystore_type, trust_cacert): ''' Import certificate from path into keystore located on keystore_path as alias ''' @@ -522,8 +565,12 @@ def main(): # The existing certificate must first be deleted before we insert the correct one delete_cert(module, executable, keystore_path, keystore_pass, cert_alias, keystore_type, exit_after=False) - import_cert_path(module, executable, new_certificate, keystore_path, - keystore_pass, cert_alias, keystore_type, trust_cacert) + if pkcs12_path: + import_pkcs12_path(module, executable, pkcs12_path, pkcs12_pass, pkcs12_alias, + keystore_path, keystore_pass, cert_alias, keystore_type) + else: + import_cert_path(module, executable, new_certificate, keystore_path, + keystore_pass, cert_alias, keystore_type, trust_cacert) module.exit_json(changed=False) diff --git a/tests/integration/targets/java_cert/defaults/main.yml b/tests/integration/targets/java_cert/defaults/main.yml index 6416f306af..8e63493600 100644 --- a/tests/integration/targets/java_cert/defaults/main.yml +++ b/tests/integration/targets/java_cert/defaults/main.yml @@ -5,9 +5,11 @@ test_keystore2_path: "{{ output_dir }}/keystore2.jks" test_keystore2_password: changeit test_cert_path: "{{ output_dir }}/cert.pem" test_key_path: "{{ output_dir }}/key.pem" +test_csr_path: "{{ output_dir }}/req.csr" test_cert2_path: "{{ output_dir }}/cert2.pem" test_key2_path: "{{ output_dir }}/key2.pem" +test_csr2_path: "{{ output_dir }}/req2.csr" test_pkcs_path: "{{ output_dir }}/cert.p12" test_pkcs2_path: "{{ output_dir }}/cert2.p12" test_ssl: setupSSLServer.py -test_ssl_port: 21500 \ No newline at end of file +test_ssl_port: 21500 diff --git a/tests/integration/targets/java_cert/tasks/main.yml b/tests/integration/targets/java_cert/tasks/main.yml index 8172db5c15..20550740da 100644 --- a/tests/integration/targets/java_cert/tasks/main.yml +++ b/tests/integration/targets/java_cert/tasks/main.yml @@ -7,32 +7,34 @@ block: - name: prep pkcs12 file - copy: src="{{ test_pkcs12_path }}" dest="{{output_dir}}/{{ test_pkcs12_path }}" + ansible.builtin.copy: + src: "{{ test_pkcs12_path }}" + dest: "{{ output_dir }}/{{ test_pkcs12_path }}" - name: import pkcs12 - java_cert: - pkcs12_path: "{{output_dir}}/{{ test_pkcs12_path }}" + community.general.java_cert: + pkcs12_path: "{{ output_dir }}/{{ test_pkcs12_path }}" pkcs12_password: changeit pkcs12_alias: default cert_alias: default - keystore_path: "{{output_dir}}/{{ test_keystore_path }}" + keystore_path: "{{ output_dir }}/{{ test_keystore_path }}" keystore_pass: changeme_keystore keystore_create: yes state: present register: result_success - name: verify success - assert: + ansible.builtin.assert: that: - result_success is successful - name: import pkcs12 with wrong password - java_cert: - pkcs12_path: "{{output_dir}}/{{ test_pkcs12_path }}" + community.general.java_cert: + pkcs12_path: "{{ output_dir }}/{{ test_pkcs12_path }}" pkcs12_password: wrong_pass pkcs12_alias: default cert_alias: default_new - keystore_path: "{{output_dir}}/{{ test_keystore_path }}" + keystore_path: "{{ output_dir }}/{{ test_keystore_path }}" keystore_pass: changeme_keystore keystore_create: yes state: present @@ -40,16 +42,16 @@ register: result_wrong_pass - name: verify fail with wrong import password - assert: + ansible.builtin.assert: that: - result_wrong_pass is failed - name: test fail on mutually exclusive params - java_cert: + community.general.java_cert: cert_path: ca.crt - pkcs12_path: "{{output_dir}}/{{ test_pkcs12_path }}" + pkcs12_path: "{{ output_dir }}/{{ test_pkcs12_path }}" cert_alias: default - keystore_path: "{{output_dir}}/{{ test_keystore_path }}" + keystore_path: "{{ output_dir }}/{{ test_keystore_path }}" keystore_pass: changeme_keystore keystore_create: yes state: present @@ -57,26 +59,26 @@ register: result_excl_params - name: verify failed exclusive params - assert: + ansible.builtin.assert: that: - result_excl_params is failed - name: test fail on missing required params - java_cert: - keystore_path: "{{output_dir}}/{{ test_keystore_path }}" + community.general.java_cert: + keystore_path: "{{ output_dir }}/{{ test_keystore_path }}" keystore_pass: changeme_keystore state: absent ignore_errors: true register: result_missing_required_param - name: verify failed missing required params - assert: + ansible.builtin.assert: that: - result_missing_required_param is failed - name: delete object based on cert_alias parameter - java_cert: - keystore_path: "{{output_dir}}/{{ test_keystore_path }}" + community.general.java_cert: + keystore_path: "{{ output_dir }}/{{ test_keystore_path }}" keystore_pass: changeme_keystore cert_alias: default state: absent @@ -84,15 +86,15 @@ register: result_alias_deleted - name: verify object successfully deleted - assert: + ansible.builtin.assert: that: - result_alias_deleted is successful - - name: include extended test suite + - name: include extended test suite import_tasks: state_change.yml - name: cleanup environment - file: + ansible.builtin.file: path: "{{ item }}" state: absent loop: @@ -101,7 +103,9 @@ - "{{ test_keystore2_path }}" - "{{ test_cert_path }}" - "{{ test_key_path }}" + - "{{ test_csr_path }}" - "{{ test_cert2_path }}" - "{{ test_key2_path }}" + - "{{ test_csr2_path }}" - "{{ test_pkcs_path }}" - - "{{ test_pkcs2_path }}" \ No newline at end of file + - "{{ test_pkcs2_path }}" diff --git a/tests/integration/targets/java_cert/tasks/state_change.yml b/tests/integration/targets/java_cert/tasks/state_change.yml index 8cee41106f..38ef62cd0f 100644 --- a/tests/integration/targets/java_cert/tasks/state_change.yml +++ b/tests/integration/targets/java_cert/tasks/state_change.yml @@ -1,36 +1,96 @@ --- -- name: Generate the self signed cert used as a place holder to create the java keystore - command: openssl req -x509 -newkey rsa:4096 -keyout {{ test_key_path }} -out {{ test_cert_path }} -days 365 -nodes -subj '/CN=localhost' - args: - creates: "{{ test_key_path }}" +# +# Prepare X509 and PKCS#12 materials +# + +- name: Create private keys + community.crypto.openssl_privatekey: + path: "{{ item }}" + mode: "u=rw,go=" + loop: + - "{{ test_key_path }}" + - "{{ test_key2_path }}" + +- name: Generate CSR for self-signed certificate used as a placeholder to create the java keystore + community.crypto.openssl_csr: + path: "{{ test_csr_path }}" + privatekey_path: "{{ test_key_path }}" + commonName: "localhost" + +- name: Generate CSR for self-signed certificate used for testing + community.crypto.openssl_csr: + path: "{{ test_csr2_path }}" + privatekey_path: "{{ test_key2_path }}" + commonName: "localhost" + +- name: Generate the self-signed cert used as a placeholder to create the java keystore + community.crypto.x509_certificate: + path: "{{ test_cert_path }}" + csr_path: "{{ test_csr_path }}" + privatekey_path: "{{ test_key_path }}" + provider: selfsigned - name: Generate the self signed cert we will use for testing - command: openssl req -x509 -newkey rsa:4096 -keyout '{{ test_key2_path }}' -out '{{ test_cert2_path }}' -days 365 -nodes -subj '/CN=localhost' - args: - creates: "{{ test_key2_path }}" + community.crypto.x509_certificate: + path: "{{ test_cert2_path }}" + csr_path: "{{ test_csr2_path }}" + privatekey_path: "{{ test_key2_path }}" + provider: selfsigned - name: Create the pkcs12 archive from the test x509 cert - command: > - openssl pkcs12 - -in {{ test_cert_path }} - -inkey {{ test_key_path }} - -export - -name test_pkcs12_cert - -out {{ test_pkcs_path }} - -passout pass:"{{ test_keystore2_password }}" + community.crypto.openssl_pkcs12: + name: "test_pkcs12_cert" + path: "{{ test_pkcs_path }}" + passphrase: "{{ test_keystore2_password }}" + certificate_path: "{{ test_cert_path }}" + privatekey_path: "{{ test_key_path }}" + when: + - "not (ansible_os_family == 'RedHat' and ansible_distribution_version is version('8.0', '<'))" + +- name: Create the pkcs12 archive from the test x509 cert (command) + ansible.builtin.command: + cmd: > + openssl pkcs12 -export + -in {{ test_cert_path }} + -inkey {{ test_key_path }} + -name test_pkcs12_cert + -out {{ test_pkcs_path }} + -passout stdin + stdin: "{{ test_keystore2_password }}" + when: + - "ansible_os_family == 'RedHat'" + - "ansible_distribution_version is version('8.0', '<')" - name: Create the pkcs12 archive from the certificate we will be trying to add to the keystore - command: > - openssl pkcs12 - -in {{ test_cert2_path }} - -inkey {{ test_key2_path }} - -export - -name test_pkcs12_cert - -out {{ test_pkcs2_path }} - -passout pass:"{{ test_keystore2_password }}" + community.crypto.openssl_pkcs12: + name: "test_pkcs12_cert" + path: "{{ test_pkcs2_path }}" + passphrase: "{{ test_keystore2_password }}" + certificate_path: "{{ test_cert2_path }}" + privatekey_path: "{{ test_key2_path }}" + when: + - "not (ansible_os_family == 'RedHat' and ansible_distribution_version is version('8.0', '<'))" + +- name: Create the pkcs12 archive from the certificate we will be trying to add to the keystore (command) + ansible.builtin.command: + cmd: > + openssl pkcs12 -export + -in {{ test_cert2_path }} + -inkey {{ test_key2_path }} + -name test_pkcs12_cert + -out {{ test_pkcs2_path }} + -passout stdin + stdin: "{{ test_keystore2_password }}" + when: + - "ansible_os_family == 'RedHat'" + - "ansible_distribution_version is version('8.0', '<')" + +# +# Run tests +# - name: try to create the test keystore based on the just created pkcs12, keystore_create flag not enabled - java_cert: + community.general.java_cert: cert_alias: test_pkcs12_cert pkcs12_alias: test_pkcs12_cert pkcs12_path: "{{ test_pkcs_path }}" @@ -41,12 +101,12 @@ register: result_x509_changed - name: Verify the x509 status is failed - assert: + ansible.builtin.assert: that: - result_x509_changed is failed - name: Create the test keystore based on the just created pkcs12 - java_cert: + community.general.java_cert: cert_alias: test_pkcs12_cert pkcs12_alias: test_pkcs12_cert pkcs12_path: "{{ test_pkcs_path }}" @@ -55,8 +115,19 @@ keystore_pass: "{{ test_keystore2_password }}" keystore_create: yes +- name: List newly created keystore content + ansible.builtin.command: + cmd: "keytool -list -keystore {{ test_keystore2_path }}" + stdin: "{{ test_keystore2_password }}" + register: keytool_list_keystore + +- name: Assert that the keystore has a private key entry + ansible.builtin.assert: + that: + - "keytool_list_keystore.stdout_lines[5] is match('test_pkcs12_cert,.*, PrivateKeyEntry, $')" + - name: try to import from pkcs12 a non existing alias - java_cert: + community.general.java_cert: cert_alias: test_pkcs12_cert pkcs12_alias: non_existing_alias pkcs12_path: "{{ test_pkcs_path }}" @@ -68,12 +139,12 @@ register: result_x509_changed - name: Verify the x509 status is failed - assert: + ansible.builtin.assert: that: - result_x509_changed is failed - name: import initial test certificate from file path - java_cert: + community.general.java_cert: cert_alias: test_cert cert_path: "{{ test_cert_path }}" keystore_path: "{{ test_keystore2_path }}" @@ -83,7 +154,7 @@ register: result_x509_changed - name: Verify the x509 status is changed - assert: + ansible.builtin.assert: that: - result_x509_changed is changed @@ -92,7 +163,7 @@ If the java_cert has been updated properly, then this task will report changed each time since the module will be comparing the hash of the certificate instead of validating that the alias simply exists - java_cert: + community.general.java_cert: cert_alias: test_cert cert_path: "{{ test_cert2_path }}" keystore_path: "{{ test_keystore2_path }}" @@ -101,13 +172,13 @@ register: result_x509_changed - name: Verify the x509 status is changed - assert: + ansible.builtin.assert: that: - result_x509_changed is changed - name: | We also want to make sure that the status doesnt change if we import the same cert - java_cert: + community.general.java_cert: cert_alias: test_cert cert_path: "{{ test_cert2_path }}" keystore_path: "{{ test_keystore2_path }}" @@ -116,13 +187,13 @@ register: result_x509_succeeded - name: Verify the x509 status is ok - assert: + ansible.builtin.assert: that: - result_x509_succeeded is succeeded - name: > Ensure the original pkcs12 cert is in the keystore - java_cert: + community.general.java_cert: cert_alias: test_pkcs12_cert pkcs12_alias: test_pkcs12_cert pkcs12_path: "{{ test_pkcs_path }}" @@ -134,7 +205,7 @@ - name: | Perform the same test, but we will now be testing the pkcs12 functionality If we add a different pkcs12 cert with the same alias, we should have a changed result, NOT the same - java_cert: + community.general.java_cert: cert_alias: test_pkcs12_cert pkcs12_alias: test_pkcs12_cert pkcs12_path: "{{ test_pkcs2_path }}" @@ -145,13 +216,13 @@ register: result_pkcs12_changed - name: Verify the pkcs12 status is changed - assert: + ansible.builtin.assert: that: - result_pkcs12_changed is changed - name: | We are requesting the same cert now, so the status should show OK - java_cert: + community.general.java_cert: cert_alias: test_pkcs12_cert pkcs12_alias: test_pkcs12_cert pkcs12_path: "{{ test_pkcs2_path }}" @@ -161,7 +232,7 @@ register: result_pkcs12_succeeded - name: Verify the pkcs12 status is ok - assert: + ansible.builtin.assert: that: - result_pkcs12_succeeded is succeeded @@ -178,7 +249,7 @@ - name: | Download the original cert.pem from our temporary server. The current cert should contain cert2.pem. Importing this cert should return a status of changed - java_cert: + community.general.java_cert: cert_alias: test_cert_localhost cert_url: localhost cert_port: "{{ test_ssl_port }}" @@ -188,12 +259,12 @@ register: result_url_changed - name: Verify that the url status is changed - assert: + ansible.builtin.assert: that: - result_url_changed is changed - name: Ensure we can remove the x509 cert - java_cert: + community.general.java_cert: cert_alias: test_cert keystore_path: "{{ test_keystore2_path }}" keystore_pass: "{{ test_keystore2_password }}" @@ -201,12 +272,12 @@ register: result_x509_absent - name: Verify the x509 cert is absent - assert: + ansible.builtin.assert: that: - result_x509_absent is changed - name: Ensure we can remove the certificate imported from pkcs12 archive - java_cert: + community.general.java_cert: cert_alias: test_pkcs12_cert keystore_path: "{{ test_keystore2_path }}" keystore_pass: "{{ test_keystore2_password }}" @@ -214,6 +285,6 @@ register: result_pkcs12_absent - name: Verify the pkcs12 archive is absent - assert: + ansible.builtin.assert: that: - result_pkcs12_absent is changed From 4982eaf935f9a41e054dbe8e8f2f1b1af8334d6b Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 26 Jul 2021 11:44:41 +0200 Subject: [PATCH 0467/3093] Update BOTMETA, fix some plugin authors, improve BOTMETA extra sanity test (#3069) * Update BOTMETA, fix some plugin authors, improve BOTMETA extra sanity test. * Linting. --- .github/BOTMETA.yml | 269 +++++++++++++++++++++++++------- plugins/become/doas.py | 2 +- plugins/become/dzdo.py | 2 +- plugins/become/ksu.py | 2 +- plugins/become/machinectl.py | 2 +- plugins/become/pbrun.py | 2 +- plugins/become/pfexec.py | 2 +- plugins/become/pmrun.py | 2 +- plugins/connection/funcd.py | 2 +- plugins/lookup/dependent.py | 1 + tests/sanity/extra/botmeta.json | 3 - tests/sanity/extra/botmeta.py | 93 ++++++----- 12 files changed, 276 insertions(+), 106 deletions(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 55f34d3041..b91d01d44e 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -2,6 +2,7 @@ automerge: true files: plugins/: supershipit: quidame Ajpantuso + changelogs/: {} changelogs/fragments/: support: community $actions: @@ -12,17 +13,63 @@ files: maintainers: nitzmahone samdoran aminvakil $becomes/: labels: become + $becomes/doas.py: + maintainers: $team_ansible_core + $becomes/dzdo.py: + maintainers: $team_ansible_core + $becomes/ksu.py: + maintainers: $team_ansible_core + $becomes/machinectl.py: + maintainers: $team_ansible_core + $becomes/pbrun.py: + maintainers: $team_ansible_core + $becomes/pfexec.py: + maintainers: $team_ansible_core + $becomes/pmrun.py: + maintainers: $team_ansible_core + $becomes/sesu.py: + maintainers: nekonyuu + $becomes/sudosu.py: + maintainers: dagwieers + $caches/: + labels: cache + $caches/memcached.py: {} + $caches/pickle.py: + maintainers: bcoca + $caches/redis.py: {} + $caches/yaml.py: + maintainers: bcoca $callbacks/: labels: callbacks + $callbacks/cgroup_memory_recap.py: {} + $callbacks/context_demo.py: {} + $callbacks/counter_enabled.py: {} + $callbacks/dense.py: + maintainers: dagwieers + $callbacks/diy.py: + maintainers: theque5t + $callbacks/hipchat.py: {} + $callbacks/jabber.py: {} $callbacks/loganalytics.py: maintainers: zhcli + $callbacks/logdna.py: {} + $callbacks/logentries.py: {} + $callbacks/log_plays.py: {} $callbacks/logstash.py: maintainers: ujenmr + $callbacks/mail.py: + maintainers: dagwieers + $callbacks/nrdp.py: + maintainers: rverchere + $callbacks/null.py: {} $callbacks/say.py: notify: chris-short maintainers: $team_macos labels: macos say keywords: brew cask darwin homebrew macosx macports osx + $callbacks/selective.py: {} + $callbacks/slack.py: {} + $callbacks/splunk.py: {} $callbacks/sumologic.py: maintainers: ryancurrah labels: sumologic @@ -31,16 +78,26 @@ files: $callbacks/unixy.py: maintainers: akatch labels: unixy + $callbacks/yaml.py: {} $connections/: labels: connections - $connections/kubectl.py: - maintainers: chouseknecht fabianvf flaper87 maxamillion - labels: k8s kubectl + $connections/chroot.py: {} + $connections/funcd.py: + maintainers: mscherer + $connections/iocage.py: {} + $connections/jail.py: + maintainers: $team_ansible_core + $connections/lxc.py: {} $connections/lxd.py: maintainers: mattclay labels: lxd + $connections/qubes.py: + maintainers: kushaldas $connections/saltstack.py: + maintainers: mscherer labels: saltstack + $connections/zone.py: + maintainers: $team_ansible_core $doc_fragments/: labels: docs_fragments $doc_fragments/hpe3par.py: @@ -60,6 +117,8 @@ files: maintainers: giner $filters/from_csv.py: maintainers: Ajpantuso + $filters/groupby: + maintainers: felixfontein $filters/hashids: maintainers: Ajpantuso $filters/jc.py: @@ -72,53 +131,83 @@ files: maintainers: resmo $filters/version_sort.py: maintainers: ericzolf - $httpapis/: - maintainers: $team_networking - labels: networking - $httpapis/ftd.py: - maintainers: $team_networking annikulin - labels: cisco ftd networking - keywords: firepower ftd $inventories/: labels: inventories + $inventories/cobbler.py: + maintainers: opoplawski + $inventories/gitlab_runners.py: + maintainers: morph027 $inventories/linode.py: maintainers: $team_linode labels: cloud linode keywords: linode dynamic inventory script $inventories/lxd.py: maintainers: conloos + $inventories/nmap.py: {} + $inventories/online.py: + maintainers: sieben $inventories/proxmox.py: maintainers: $team_virt ilijamt $inventories/scaleway.py: maintainers: $team_scaleway labels: cloud scaleway + $inventories/stackpath_compute.py: + maintainers: shayrybak + $inventories/virtualbox.py: {} $lookups/: labels: lookups - $lookups/onepass: - maintainers: samdoran - labels: onepassword - $lookups/conjur_variable.py: - notify: cyberark-bizdev - maintainers: $team_cyberark_conjur - labels: conjur_variable + $lookups/cartesian.py: {} + $lookups/chef_databag.py: {} + $lookups/consul_kv.py: {} + $lookups/credstash.py: {} $lookups/cyberarkpassword.py: notify: cyberark-bizdev labels: cyberarkpassword + $lookups/dependent.py: + maintainers: felixfontein $lookups/dig.py: maintainers: jpmens labels: dig - $lookups/tss.py: - maintainers: amigus + $lookups/dnstxt.py: + maintainers: jpmens $lookups/dsv.py: maintainers: amigus + $lookups/etcd3.py: + maintainers: eric-belhomme + $lookups/etcd.py: + maintainers: jpmens + $lookups/filetree.py: + maintainers: dagwieers + $lookups/flattened.py: {} + $lookups/hiera.py: + maintainers: jparrill + $lookups/keyring.py: {} + $lookups/lastpass.py: {} + $lookups/lmdb_kv.py: + maintainers: jpmens $lookups/manifold.py: maintainers: galanoff labels: manifold $lookups/nios: maintainers: $team_networking sganesh-infoblox labels: infoblox networking + $lookups/onepass: + maintainers: samdoran + labels: onepassword + $lookups/onepassword.py: + maintainers: azenk scottsb + $lookups/onepassword_raw.py: + maintainers: azenk scottsb + $lookups/passwordstore.py: {} + $lookups/random_pet.py: + maintainers: Akasurde $lookups/random_string.py: maintainers: Akasurde + $lookups/redis.py: + maintainers: jpmens + $lookups/shelvefile.py: {} + $lookups/tss.py: + maintainers: amigus $module_utils/: labels: module_utils $module_utils/gitlab.py: @@ -196,33 +285,27 @@ files: maintainers: zbal $modules/cloud/lxc/lxc_container.py: maintainers: cloudnull - $modules/cloud/lxc/lxc_profile.py: - maintainers: conloos $modules/cloud/lxd/: ignore: hnakamur + $modules/cloud/lxd/lxd_profile.py: + maintainers: conloos $modules/cloud/memset/: maintainers: glitchcrab $modules/cloud/misc/cloud_init_data_facts.py: maintainers: resmo - $modules/cloud/misc/proxmox.py: - maintainers: $team_virt UnderGreen - labels: proxmox virt - ignore: skvidal - keywords: kvm libvirt proxmox qemu - $modules/cloud/misc/proxmox_kvm.py: - maintainers: $team_virt helldorado - labels: proxmox_kvm virt - ignore: skvidal - keywords: kvm libvirt proxmox qemu - $modules/cloud/misc/proxmox_snap.py: + $modules/cloud/misc/proxmox: maintainers: $team_virt labels: proxmox virt keywords: kvm libvirt proxmox qemu - $modules/cloud/misc/proxmox_template.py: - maintainers: $team_virt UnderGreen - labels: proxmox_template virt + $modules/cloud/misc/proxmox.py: + maintainers: UnderGreen + ignore: skvidal + $modules/cloud/misc/proxmox_kvm.py: + maintainers: helldorado + ignore: skvidal + $modules/cloud/misc/proxmox_template.py: + maintainers: UnderGreen ignore: skvidal - keywords: kvm libvirt proxmox qemu $modules/cloud/misc/rhevm.py: maintainers: $team_virt TimothyVandenbrande labels: rhevm virt @@ -264,16 +347,40 @@ files: maintainers: omgjlk sivel $modules/cloud/rackspace/: ignore: ryansb sivel + $modules/cloud/rackspace/rax_cbs.py: + maintainers: claco + $modules/cloud/rackspace/rax_cbs_attachments.py: + maintainers: claco + $modules/cloud/rackspace/rax_cdb.py: + maintainers: jails + $modules/cloud/rackspace/rax_cdb_user.py: + maintainers: jails + $modules/cloud/rackspace/rax_cdb_database.py: + maintainers: jails $modules/cloud/rackspace/rax_clb.py: maintainers: claco $modules/cloud/rackspace/rax_clb_nodes.py: maintainers: neuroid $modules/cloud/rackspace/rax_clb_ssl.py: maintainers: smashwilson + $modules/cloud/rackspace/rax_files.py: + maintainers: angstwad + $modules/cloud/rackspace/rax_files_objects.py: + maintainers: angstwad $modules/cloud/rackspace/rax_identity.py: maintainers: claco $modules/cloud/rackspace/rax_network.py: maintainers: claco omgjlk + $modules/cloud/rackspace/rax_mon_alarm.py: + maintainers: smashwilson + $modules/cloud/rackspace/rax_mon_check.py: + maintainers: smashwilson + $modules/cloud/rackspace/rax_mon_entity.py: + maintainers: smashwilson + $modules/cloud/rackspace/rax_mon_notification.py: + maintainers: smashwilson + $modules/cloud/rackspace/rax_mon_notification_plan.py: + maintainers: smashwilson $modules/cloud/rackspace/rax_queue.py: maintainers: claco $modules/cloud/scaleway/: @@ -285,13 +392,17 @@ files: $modules/cloud/scaleway/scaleway_ip_info.py: maintainers: Spredzy $modules/cloud/scaleway/scaleway_organization_info.py: - maintainers: sieben + maintainers: sieben Spredzy $modules/cloud/scaleway/scaleway_security_group.py: maintainers: DenBeke $modules/cloud/scaleway/scaleway_security_group_info.py: - maintainers: sieben + maintainers: sieben Spredzy $modules/cloud/scaleway/scaleway_security_group_rule.py: maintainers: DenBeke + $modules/cloud/scaleway/scaleway_server_info.py: + maintainers: Spredzy + $modules/cloud/scaleway/scaleway_snapshot_info.py: + maintainers: Spredzy $modules/cloud/scaleway/scaleway_volume.py: labels: scaleway_volume ignore: hekonsek @@ -343,6 +454,8 @@ files: maintainers: john-westcott-iv $modules/database/misc/redis.py: maintainers: slok + $modules/database/misc/redis_info.py: + maintainers: levonet $modules/database/misc/riak.py: maintainers: drewkerrigan jsmartin $modules/database/mssql/mssql_db.py: @@ -358,10 +471,14 @@ files: maintainers: quidame $modules/files/ini_file.py: maintainers: jpmens noseka1 + $modules/files/iso_create.py: + maintainers: Tomorrow9 $modules/files/iso_extract.py: maintainers: dagwieers jhoekx ribbons $modules/files/read_csv.py: maintainers: dagwieers + $modules/files/sapcar_extract.py: + maintainers: RainerLeber $modules/files/xattr.py: maintainers: bcoca labels: xattr @@ -379,15 +496,22 @@ files: maintainers: jparrill $modules/identity/keycloak/: maintainers: $team_keycloak + $modules/identity/keycloak/keycloak_authentication.py: + maintainers: elfelip Gaetan2907 + $modules/identity/keycloak/keycloak_clientscope.py: + maintainers: Gaetan2907 $modules/identity/keycloak/keycloak_group.py: maintainers: adamgoossens $modules/identity/keycloak/keycloak_realm.py: maintainers: kris2kris + $modules/identity/keycloak/keycloak_role.py: + maintainers: laurpaum $modules/identity/onepassword_info.py: maintainers: Rylon $modules/identity/opendj/opendj_backendprop.py: maintainers: dj-wasabi $modules/monitoring/airbrake_deployment.py: + maintainers: phumpal labels: airbrake_deployment ignore: bpennypacker $modules/monitoring/bigpanda.py: @@ -398,6 +522,8 @@ files: maintainers: n0ts labels: datadog_event ignore: arturaz + $modules/monitoring/datadog/datadog_downtime.py: + maintainers: Datadog $modules/monitoring/datadog/datadog_monitor.py: maintainers: skornehl $modules/monitoring/honeybadger_deployment.py: @@ -461,6 +587,8 @@ files: maintainers: drcapulet $modules/net_tools/dnsmadeeasy.py: maintainers: briceburg + $modules/net_tools/gandi_livedns.py: + maintainers: gthiemonge $modules/net_tools/haproxy.py: maintainers: ravibhure Normo $modules/net_tools/: @@ -490,11 +618,25 @@ files: maintainers: nbuchwitz $modules/net_tools/omapi_host.py: maintainers: amasolov + $modules/net_tools/pritunl/: + maintainers: Lowess $modules/net_tools/nios/: maintainers: $team_networking labels: infoblox networking + $modules/net_tools/nios/nios_a_record.py: + maintainers: brampling + $modules/net_tools/nios/nios_aaaa_record.py: + maintainers: brampling + $modules/net_tools/nios/nios_cname_record.py: + maintainers: brampling $modules/net_tools/nios/nios_fixed_address.py: maintainers: sjaiswal + $modules/net_tools/nios/nios_member.py: + maintainers: krisvasudevan + $modules/net_tools/nios/nios_mx_record.py: + maintainers: brampling + $modules/net_tools/nios/nios_naptr_record.py: + maintainers: brampling $modules/net_tools/nios/nios_nsgroup.py: maintainers: ebirn sjaiswal $modules/net_tools/nios/nios_ptr_record.py: @@ -507,17 +649,16 @@ files: maintainers: alcamie101 $modules/net_tools/snmp_facts.py: maintainers: ogenstad ujwalkomarla - $modules/notification/osx_say.py: - maintainers: ansible mpdehaan - labels: _osx_say $modules/notification/bearychat.py: maintainers: tonyseek $modules/notification/campfire.py: maintainers: fabulops $modules/notification/catapult.py: maintainers: Jmainguy - $modules/notification/cisco_spark.py: + $modules/notification/cisco_webex.py: maintainers: drew-russell + $modules/notification/discord.py: + maintainers: cwollinger $modules/notification/flowdock.py: maintainers: mcodd $modules/notification/grove.py: @@ -545,7 +686,7 @@ files: $modules/notification/pushbullet.py: maintainers: willybarro $modules/notification/pushover.py: - maintainers: weaselkeeper + maintainers: weaselkeeper wopfel $modules/notification/rocketchat.py: maintainers: Deepakkothandan labels: rocketchat @@ -559,7 +700,7 @@ files: $modules/notification/syslogger.py: maintainers: garbled1 $modules/notification/telegram.py: - maintainers: tyouxa loms + maintainers: tyouxa loms lomserman $modules/notification/twilio.py: maintainers: makaimc $modules/notification/typetalk.py: @@ -597,6 +738,8 @@ files: maintainers: tdtrask labels: apk ignore: kbrebanov + $modules/packaging/os/apt_repo.py: + maintainers: obirvalger $modules/packaging/os/apt_rpm.py: maintainers: evgkrsk $modules/packaging/os/copr.py: @@ -788,6 +931,8 @@ files: maintainers: markuman $modules/source_control/gitlab/gitlab_runner.py: maintainers: SamyCoenen + $modules/source_control/gitlab/gitlab_user.py: + maintainers: LennertMertens stgrace $modules/source_control/hg.py: maintainers: yeukhon $modules/storage/emc/emc_vnx_sg_member.py: @@ -796,13 +941,6 @@ files: maintainers: farhan7500 gautamphegde $modules/storage/ibm/: maintainers: tzure - $modules/storage/infinidat/: - maintainers: vmalloc GR360RY - $modules/storage/netapp/: - maintainers: $team_netapp - $modules/storage/purestorage/: - maintainers: $team_purestorage - labels: pure_storage $modules/storage/vexata/: maintainers: vexata $modules/storage/zfs/: @@ -821,6 +959,8 @@ files: maintainers: mulby labels: alternatives ignore: DavidWittman + $modules/system/aix_lvol.py: + maintainers: adejoux $modules/system/awall.py: maintainers: tdtrask $modules/system/beadm.py: @@ -856,7 +996,7 @@ files: $modules/system/java_cert.py: maintainers: haad absynth76 $modules/system/java_keystore.py: - maintainers: Mogztter + maintainers: Mogztter quidame $modules/system/kernel_blacklist.py: maintainers: matze $modules/system/launchd.py: @@ -870,7 +1010,7 @@ files: $modules/system/lvg.py: maintainers: abulimov $modules/system/lvol.py: - maintainers: abulimov jhoekx + maintainers: abulimov jhoekx zigaSRC unkaputtbar112 $modules/system/make.py: maintainers: LinusU $modules/system/mksysb.py: @@ -924,6 +1064,8 @@ files: maintainers: $team_solaris pmarkham labels: solaris keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool + $modules/system/ssh_config.py: + maintainers: gaqzi Akasurde $modules/system/svc.py: maintainers: bcoca $modules/system/syspatch.py: @@ -939,10 +1081,13 @@ files: maintainers: ahtik ovcharenko pyykkis labels: ufw $modules/system/vdo.py: - maintainers: rhawalsh + maintainers: rhawalsh bgurney-rh $modules/system/xfconf.py: maintainers: russoz jbenden labels: xfconf + $modules/system/xfconf_info.py: + maintainers: russoz + labels: xfconf $modules/system/xfs_quota.py: maintainers: bushvin $modules/web_infrastructure/apache2_mod_proxy.py: @@ -964,6 +1109,8 @@ files: $modules/web_infrastructure/jboss.py: maintainers: $team_jboss jhoekx labels: jboss + $modules/web_infrastructure/jenkins_build.py: + maintainers: brettmilford unnecessary-username $modules/web_infrastructure/jenkins_job.py: maintainers: sermilrod $modules/web_infrastructure/jenkins_job_info.py: @@ -973,7 +1120,7 @@ files: $modules/web_infrastructure/jenkins_script.py: maintainers: hogarthj $modules/web_infrastructure/jira.py: - maintainers: Slezhuk tarka + maintainers: Slezhuk tarka pertoft DWSR labels: jira $modules/web_infrastructure/nginx_status_info.py: maintainers: resmo @@ -988,6 +1135,14 @@ files: $modules/web_infrastructure/sophos_utm/utm_proxy_exception.py: maintainers: $team_e_spirit RickS-C137 keywords: sophos utm + $modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py: + maintainers: stearz + $modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py: + maintainers: stearz + $modules/web_infrastructure/sophos_utm/utm_network_interface_address.py: + maintainers: steamx + $modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py: + maintainers: steamx $modules/web_infrastructure/supervisorctl.py: maintainers: inetfuture mattupstate $modules/web_infrastructure/taiga_issue.py: @@ -1007,17 +1162,18 @@ files: macros: actions: plugins/action becomes: plugins/become + caches: plugins/cache callbacks: plugins/callback cliconfs: plugins/cliconf connections: plugins/connection doc_fragments: plugins/doc_fragments filters: plugins/filter - httpapis: plugins/httpapi inventories: plugins/inventory lookups: plugins/lookup module_utils: plugins/module_utils modules: plugins/modules terminals: plugins/terminal + team_ansible_core: team_aix: MorrisA bcoca d-little flynn1973 gforster kairoaraujo marvin-sinister mator molekuul ramooncamacho wtcross team_bsd: JoergFiedler MacLemon bcoca dch jasperla mekanix opoplawski overhacked tuxillo team_consul: sgargan @@ -1033,7 +1189,6 @@ macros: team_linode: InTheCloudDan decentral1se displague rmcintosh Charliekenney23 LBGarber team_macos: Akasurde kyleabenson martinm82 danieljaouen indrajitr team_manageiq: abellotti cben gtanzillo yaacov zgalor dkorn evertmulder - team_netapp: amit0701 carchi8py hulquest lmprice lonico ndswartz schmots1 team_networking: NilashishC Qalthos danielmellado ganeshrn justjais trishnaguha sganesh-infoblox privateip team_opennebula: ilicmilan meerkampdvv rsmontero xorel nilsding team_oracle: manojmeda mross22 nalsaber diff --git a/plugins/become/doas.py b/plugins/become/doas.py index 431e33cd6d..7cf4a79c7b 100644 --- a/plugins/become/doas.py +++ b/plugins/become/doas.py @@ -9,7 +9,7 @@ DOCUMENTATION = ''' short_description: Do As user description: - This become plugins allows your remote/login user to execute commands as another user via the doas utility. - author: ansible (@core) + author: Ansible Core Team options: become_user: description: User you 'become' to execute the task diff --git a/plugins/become/dzdo.py b/plugins/become/dzdo.py index 05fcb6192d..1aef8edb69 100644 --- a/plugins/become/dzdo.py +++ b/plugins/become/dzdo.py @@ -8,7 +8,7 @@ DOCUMENTATION = ''' short_description: Centrify's Direct Authorize description: - This become plugins allows your remote/login user to execute commands as another user via the dzdo utility. - author: ansible (@core) + author: Ansible Core Team options: become_user: description: User you 'become' to execute the task diff --git a/plugins/become/ksu.py b/plugins/become/ksu.py index f5600c1d70..1ee47b0fa3 100644 --- a/plugins/become/ksu.py +++ b/plugins/become/ksu.py @@ -9,7 +9,7 @@ DOCUMENTATION = ''' short_description: Kerberos substitute user description: - This become plugins allows your remote/login user to execute commands as another user via the ksu utility. - author: ansible (@core) + author: Ansible Core Team options: become_user: description: User you 'become' to execute the task diff --git a/plugins/become/machinectl.py b/plugins/become/machinectl.py index f9a2873f63..aebb0891b0 100644 --- a/plugins/become/machinectl.py +++ b/plugins/become/machinectl.py @@ -9,7 +9,7 @@ DOCUMENTATION = ''' short_description: Systemd's machinectl privilege escalation description: - This become plugins allows your remote/login user to execute commands as another user via the machinectl utility. - author: ansible (@core) + author: Ansible Core Team options: become_user: description: User you 'become' to execute the task diff --git a/plugins/become/pbrun.py b/plugins/become/pbrun.py index a464309c0d..fe28e61c2b 100644 --- a/plugins/become/pbrun.py +++ b/plugins/become/pbrun.py @@ -9,7 +9,7 @@ DOCUMENTATION = ''' short_description: PowerBroker run description: - This become plugins allows your remote/login user to execute commands as another user via the pbrun utility. - author: ansible (@core) + author: Ansible Core Team options: become_user: description: User you 'become' to execute the task diff --git a/plugins/become/pfexec.py b/plugins/become/pfexec.py index 256275dca2..2b37044c93 100644 --- a/plugins/become/pfexec.py +++ b/plugins/become/pfexec.py @@ -9,7 +9,7 @@ DOCUMENTATION = ''' short_description: profile based execution description: - This become plugins allows your remote/login user to execute commands as another user via the pfexec utility. - author: ansible (@core) + author: Ansible Core Team options: become_user: description: diff --git a/plugins/become/pmrun.py b/plugins/become/pmrun.py index 597ea69d2f..8cb24fa937 100644 --- a/plugins/become/pmrun.py +++ b/plugins/become/pmrun.py @@ -9,7 +9,7 @@ DOCUMENTATION = ''' short_description: Privilege Manager run description: - This become plugins allows your remote/login user to execute commands as another user via the pmrun utility. - author: ansible (@core) + author: Ansible Core Team options: become_exe: description: Sudo executable diff --git a/plugins/connection/funcd.py b/plugins/connection/funcd.py index 109e251146..afea840ee8 100644 --- a/plugins/connection/funcd.py +++ b/plugins/connection/funcd.py @@ -8,7 +8,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = ''' - author: Michael Scherer (@msherer) + author: Michael Scherer (@mscherer) name: funcd short_description: Use funcd to connect to target description: diff --git a/plugins/lookup/dependent.py b/plugins/lookup/dependent.py index a22a98476c..c9ce58567d 100644 --- a/plugins/lookup/dependent.py +++ b/plugins/lookup/dependent.py @@ -7,6 +7,7 @@ __metaclass__ = type DOCUMENTATION = """ name: dependent short_description: Composes a list with nested elements of other lists or dicts which can depend on previous loop variables +author: Felix Fontein (@felixfontein) version_added: 3.1.0 description: - "Takes the input lists and returns a list with elements that are lists, dictionaries, diff --git a/tests/sanity/extra/botmeta.json b/tests/sanity/extra/botmeta.json index cba49c90cd..c546ab5fd7 100644 --- a/tests/sanity/extra/botmeta.json +++ b/tests/sanity/extra/botmeta.json @@ -1,8 +1,5 @@ { "include_symlinks": false, - "prefixes": [ - ".github/BOTMETA.yml" - ], "output": "path-line-column-message", "requirements": [ "PyYAML", diff --git a/tests/sanity/extra/botmeta.py b/tests/sanity/extra/botmeta.py index e8ea819394..43bd087aa5 100755 --- a/tests/sanity/extra/botmeta.py +++ b/tests/sanity/extra/botmeta.py @@ -57,8 +57,19 @@ def read_authors(filename): return author +def extract_author_name(author): + m = AUTHOR_REGEX.match(author) + if m: + return m.group(1) + if author == 'Ansible Core Team': + return '$team_ansible_core' + return None + + def validate(filename, filedata): - if filename.startswith('plugins/doc_fragments/'): + if not filename.startswith('plugins/'): + return + if filename.startswith(('plugins/doc_fragments/', 'plugins/module_utils/')): return # Compile lis tof all active and inactive maintainers all_maintainers = filedata['maintainers'] + filedata['ignore'] @@ -70,21 +81,16 @@ def validate(filename, filedata): return maintainers = read_authors(filename) for maintainer in maintainers: - m = AUTHOR_REGEX.match(maintainer) - if m: - maintainer = m.group(1) - if maintainer not in all_maintainers: - msg = 'Author %s not mentioned as active or inactive maintainer for %s (mentioned are: %s)' % ( - maintainer, filename, ', '.join(all_maintainers)) - if REPORT_MISSING_MAINTAINERS: - print('%s:%d:%d: %s' % (FILENAME, 0, 0, msg)) + maintainer = extract_author_name(maintainer) + if maintainer is not None and maintainer not in all_maintainers: + msg = 'Author %s not mentioned as active or inactive maintainer for %s (mentioned are: %s)' % ( + maintainer, filename, ', '.join(all_maintainers)) + if REPORT_MISSING_MAINTAINERS: + print('%s:%d:%d: %s' % (FILENAME, 0, 0, msg)) def main(): """Main entry point.""" - paths = sys.argv[1:] or sys.stdin.read().splitlines() - paths = [path for path in paths if path.endswith('/aliases')] - try: with open(FILENAME, 'rb') as f: botmeta = yaml.safe_load(f) @@ -100,7 +106,7 @@ def main(): # Validate schema MacroSchema = Schema({ - (str): str, + (str): Any(str, None), }, extra=PREVENT_EXTRA) FilesSchema = Schema({ @@ -135,7 +141,11 @@ def main(): def convert_macros(text, macros): def f(m): - return macros[m.group(1)] + macro = m.group(1) + replacement = (macros[macro] or '') + if macro == 'team_ansible_core': + return '$team_ansible_core %s' % replacement + return replacement return macro_re.sub(f, text) @@ -153,31 +163,38 @@ def main(): return # Scan all files - for dirpath, dirnames, filenames in os.walk('plugins/'): - for file in filenames: - if file.endswith('.pyc'): - continue - filename = os.path.join(dirpath, file) - if os.path.islink(filename): - continue - if os.path.isfile(filename): - matching_files = [] - for file, filedata in files.items(): - if filename.startswith(file): - matching_files.append((file, filedata)) - if not matching_files: - print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'Did not find any entry for %s' % filename)) + unmatched = set(files) + for dirs in ('plugins', 'tests', 'changelogs'): + for dirpath, dirnames, filenames in os.walk(dirs): + for file in sorted(filenames): + if file.endswith('.pyc'): + continue + filename = os.path.join(dirpath, file) + if os.path.islink(filename): + continue + if os.path.isfile(filename): + matching_files = [] + for file, filedata in files.items(): + if filename.startswith(file): + matching_files.append((file, filedata)) + if file in unmatched: + unmatched.remove(file) + if not matching_files: + print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'Did not find any entry for %s' % filename)) - matching_files.sort(key=lambda kv: kv[0]) - filedata = dict() - for k in LIST_ENTRIES: - filedata[k] = [] - for dummy, data in matching_files: - for k, v in data.items(): - if k in LIST_ENTRIES: - v = filedata[k] + v - filedata[k] = v - validate(filename, filedata) + matching_files.sort(key=lambda kv: kv[0]) + filedata = dict() + for k in LIST_ENTRIES: + filedata[k] = [] + for dummy, data in matching_files: + for k, v in data.items(): + if k in LIST_ENTRIES: + v = filedata[k] + v + filedata[k] = v + validate(filename, filedata) + + for file in unmatched: + print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'Entry %s was not used' % file)) if __name__ == '__main__': From 024e7419da8ec50a94799d5fa22004c2298b7935 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 26 Jul 2021 16:54:00 +0200 Subject: [PATCH 0468/3093] BOTMETA: enforce entries for new plugins/modules, add documentation for creating new plugins/modules (#3088) * More BOTMETA improvements. * Improve BOTMETA test, start reporting missing entries for new plugins/modules. * Add instructions for creating new plugins and modules. --- .github/BOTMETA.yml | 24 ++++++++++++++---------- CONTRIBUTING.md | 31 +++++++++++++++++++++++++++++++ tests/sanity/extra/botmeta.py | 24 ++++++++++-------------- 3 files changed, 55 insertions(+), 24 deletions(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index b91d01d44e..fb08599a13 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -123,10 +123,12 @@ files: maintainers: Ajpantuso $filters/jc.py: maintainers: kellyjonbrazil + $filters/json_query.py: {} $filters/list.py: maintainers: vbotka $filters/path_join_shim.py: maintainers: felixfontein + $filters/random_mac.py: {} $filters/time.py: maintainers: resmo $filters/version_sort.py: @@ -204,7 +206,7 @@ files: $lookups/random_string.py: maintainers: Akasurde $lookups/redis.py: - maintainers: jpmens + maintainers: $team_ansible_core jpmens $lookups/shelvefile.py: {} $lookups/tss.py: maintainers: amigus @@ -591,8 +593,6 @@ files: maintainers: gthiemonge $modules/net_tools/haproxy.py: maintainers: ravibhure Normo - $modules/net_tools/: - maintainers: nerzhul $modules/net_tools/infinity/infinity.py: maintainers: MeganLiu $modules/net_tools/ip_netns.py: @@ -616,8 +616,10 @@ files: ignore: andyhky $modules/net_tools/netcup_dns.py: maintainers: nbuchwitz + $modules/net_tools/nsupdate.py: + maintainers: nerzhul $modules/net_tools/omapi_host.py: - maintainers: amasolov + maintainers: amasolov nerzhul $modules/net_tools/pritunl/: maintainers: Lowess $modules/net_tools/nios/: @@ -692,7 +694,7 @@ files: labels: rocketchat ignore: ramondelafuente $modules/notification/say.py: - maintainers: ansible mpdehaan + maintainers: $team_ansible_core mpdehaan $modules/notification/sendgrid.py: maintainers: makaimc $modules/notification/slack.py: @@ -717,7 +719,7 @@ files: $modules/packaging/language/easy_install.py: maintainers: mattupstate $modules/packaging/language/gem.py: - maintainers: ansible johanwiren + maintainers: $team_ansible_core johanwiren labels: gem $modules/packaging/language/maven_artifact.py: maintainers: tumbl3w33d turb @@ -978,7 +980,7 @@ files: $modules/system/dpkg_divert.py: maintainers: quidame $modules/system/facter.py: - maintainers: ansible gamethis + maintainers: $team_ansible_core gamethis labels: facter $modules/system/filesystem.py: maintainers: pilou- abulimov quidame @@ -1023,7 +1025,7 @@ files: $modules/system/nosh.py: maintainers: tacatac $modules/system/ohai.py: - maintainers: ansible mpdehaan + maintainers: $team_ansible_core mpdehaan labels: ohai $modules/system/open_iscsi.py: maintainers: srvg @@ -1104,7 +1106,7 @@ files: $modules/web_infrastructure/gunicorn.py: maintainers: agmezr $modules/web_infrastructure/htpasswd.py: - maintainers: ansible + maintainers: $team_ansible_core labels: htpasswd $modules/web_infrastructure/jboss.py: maintainers: $team_jboss jhoekx @@ -1124,7 +1126,9 @@ files: labels: jira $modules/web_infrastructure/nginx_status_info.py: maintainers: resmo - $modules/web_infrastructure/: + $modules/web_infrastructure/rundeck_acl_policy.py: + maintainers: nerzhul + $modules/web_infrastructure/rundeck_project.py: maintainers: nerzhul $modules/web_infrastructure/sophos_utm/: maintainers: $team_e_spirit diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4dfde91fca..ba30ed1e02 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -34,3 +34,34 @@ You can also read [our Quick-start development guide](https://github.com/ansible If you want to test a PR locally, refer to [our testing guide](https://github.com/ansible/community-docs/blob/main/test_pr_locally_guide.rst) for instructions on how do it quickly. If you find any inconsistencies or places in this document which can be improved, feel free to raise an issue or pull request to fix it. + +## Creating new modules or plugins + +Creating new modules and plugins requires a bit more work than other Pull Requests. + +1. Please make sure that your new module or plugin is of interest to a larger audience. Very specialized modules or plugins that + can only be used by very few people should better be added to more specialized collections. + +2. When creating a new module or plugin, please make sure that you follow various guidelines: + + - Follow [development conventions](https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_best_practices.html); + - Follow [documentation standards](https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_documenting.html) and + the [Ansible style guide](https://docs.ansible.com/ansible/devel/dev_guide/style_guide/index.html#style-guide); + - Make sure your modules and plugins are [GPL-3.0-or-later](https://www.gnu.org/licenses/gpl-3.0-standalone.html) licensed + (new module_utils can also be [BSD-2-clause](https://opensource.org/licenses/BSD-2-Clause) licensed); + - Make sure that new plugins and modules have tests (unit tests, integration tests, or both); it is preferable to have some tests + which run in CI. + +3. For modules and action plugins, make sure to create your module/plugin in the correct subdirectory, and create a symbolic link + from `plugins/modules/` respectively `plugins/action/` to the actual module/plugin code. (Other plugin types should not use + subdirectories.) + + - Action plugins need to be accompanied by a module, even if the module file only contains documentation + (`DOCUMENTATION`, `EXAMPLES` and `RETURN`). The module must have the same name and directory path in `plugins/modules/` + than the action plugin has in `plugins/action/`. + +4. Make sure to add a BOTMETA entry for your new module/plugin in `.github/BOTMETA.yml`. Search for other plugins/modules in the + same directory to see how entries could look. You should list all authors either as `maintainers` or under `ignore`. People + listed as `maintainers` will be pinged for new issues and PRs that modify the module/plugin or its tests. + + When you add a new plugin/module, we expect that you perform maintainer duty for at least some time after contributing it. diff --git a/tests/sanity/extra/botmeta.py b/tests/sanity/extra/botmeta.py index 43bd087aa5..b5c49b5a4b 100755 --- a/tests/sanity/extra/botmeta.py +++ b/tests/sanity/extra/botmeta.py @@ -17,7 +17,7 @@ from voluptuous import Required, Schema, Invalid from voluptuous.humanize import humanize_error -REPORT_MISSING_MAINTAINERS = False +REPORT_NO_MAINTAINERS = False FILENAME = '.github/BOTMETA.yml' @@ -73,20 +73,16 @@ def validate(filename, filedata): return # Compile lis tof all active and inactive maintainers all_maintainers = filedata['maintainers'] + filedata['ignore'] - if not all_maintainers: - if REPORT_MISSING_MAINTAINERS: - print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'No (active or inactive) maintainer mentioned for %s' % filename)) - return - if filename.startswith('plugins/filter/'): - return - maintainers = read_authors(filename) - for maintainer in maintainers: - maintainer = extract_author_name(maintainer) - if maintainer is not None and maintainer not in all_maintainers: - msg = 'Author %s not mentioned as active or inactive maintainer for %s (mentioned are: %s)' % ( - maintainer, filename, ', '.join(all_maintainers)) - if REPORT_MISSING_MAINTAINERS: + if not filename.startswith('plugins/filter/'): + maintainers = read_authors(filename) + for maintainer in maintainers: + maintainer = extract_author_name(maintainer) + if maintainer is not None and maintainer not in all_maintainers: + msg = 'Author %s not mentioned as active or inactive maintainer for %s (mentioned are: %s)' % ( + maintainer, filename, ', '.join(all_maintainers)) print('%s:%d:%d: %s' % (FILENAME, 0, 0, msg)) + if not all_maintainers and REPORT_NO_MAINTAINERS: + print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'No (active or inactive) maintainer mentioned for %s' % filename)) def main(): From 7da2c16b4a48d420e8522bc0a5d292a0a9a9ca65 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Wed, 28 Jul 2021 04:24:29 +1200 Subject: [PATCH 0469/3093] added supports_check_mode=True to info/facts modules (#3084) * added supports_check_mode=True to info/facts modules * added changelog fragment * rolled back vertica_info * rolled back utm_proxy_*_info * updated changelog fragment with latest adjustments * Update changelogs/fragments/3084-info-checkmode.yaml Co-authored-by: Felix Fontein * added check mode to xenserver_facts + oneview_*_info * added check mode to utm_proxy_*_info * updated changelog Co-authored-by: Felix Fontein --- changelogs/fragments/3084-info-checkmode.yaml | 24 +++++++++++++++++++ plugins/module_utils/oneview.py | 4 ++-- .../cloud/alicloud/ali_instance_info.py | 5 +++- .../cloud/memset/memset_memstore_info.py | 2 +- .../cloud/memset/memset_server_info.py | 2 +- plugins/modules/cloud/misc/xenserver_facts.py | 4 +++- plugins/modules/cloud/rackspace/rax_facts.py | 1 + .../cloud/smartos/smartos_image_info.py | 2 +- plugins/modules/net_tools/snmp_facts.py | 2 +- .../oneview/oneview_datacenter_info.py | 5 +++- .../oneview/oneview_enclosure_info.py | 5 +++- .../oneview/oneview_ethernet_network_info.py | 5 +++- .../oneview/oneview_fc_network_info.py | 5 +++- .../oneview/oneview_fcoe_network_info.py | 5 +++- ...oneview_logical_interconnect_group_info.py | 5 +++- .../oneview/oneview_network_set_info.py | 5 +++- .../oneview/oneview_san_manager_info.py | 5 +++- .../redfish/idrac_redfish_info.py | 2 +- .../remote_management/redfish/redfish_info.py | 2 +- plugins/modules/system/xfconf_info.py | 1 + .../sophos_utm/utm_aaa_group_info.py | 3 ++- .../sophos_utm/utm_ca_host_key_cert_info.py | 3 ++- .../utm_network_interface_address_info.py | 3 ++- .../sophos_utm/utm_proxy_frontend_info.py | 5 ++-- .../sophos_utm/utm_proxy_location_info.py | 5 ++-- 25 files changed, 85 insertions(+), 25 deletions(-) create mode 100644 changelogs/fragments/3084-info-checkmode.yaml diff --git a/changelogs/fragments/3084-info-checkmode.yaml b/changelogs/fragments/3084-info-checkmode.yaml new file mode 100644 index 0000000000..4e9fa85075 --- /dev/null +++ b/changelogs/fragments/3084-info-checkmode.yaml @@ -0,0 +1,24 @@ +bugfixes: + - ali_instance_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - memset_memstore_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - memset_server_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - xenserver_facts - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - rax_facts - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - smartos_image_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - snmp_facts - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - oneview_datacenter_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - oneview_enclosure_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - oneview_ethernet_network_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - oneview_fc_network_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - oneview_fcoe_network_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - oneview_logical_interconnect_group_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - oneview_network_set_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - oneview_san_manager_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - idrac_redfish_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - redfish_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - xfconf_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - utm_aaa_group_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - utm_ca_host_key_cert_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - utm_network_interface_address_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - utm_proxy_frontend_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). + - utm_proxy_location_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084). diff --git a/plugins/module_utils/oneview.py b/plugins/module_utils/oneview.py index 3ebb057ca7..66e1d6d4c7 100644 --- a/plugins/module_utils/oneview.py +++ b/plugins/module_utils/oneview.py @@ -201,7 +201,7 @@ class OneViewModuleBase(object): resource_client = None - def __init__(self, additional_arg_spec=None, validate_etag_support=False): + def __init__(self, additional_arg_spec=None, validate_etag_support=False, supports_check_mode=False): """ OneViewModuleBase constructor. @@ -210,7 +210,7 @@ class OneViewModuleBase(object): """ argument_spec = self._build_argument_spec(additional_arg_spec, validate_etag_support) - self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) + self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=supports_check_mode) self._check_hpe_oneview_sdk() self._create_oneview_client() diff --git a/plugins/modules/cloud/alicloud/ali_instance_info.py b/plugins/modules/cloud/alicloud/ali_instance_info.py index 23665bbcad..06df6cb4f1 100644 --- a/plugins/modules/cloud/alicloud/ali_instance_info.py +++ b/plugins/modules/cloud/alicloud/ali_instance_info.py @@ -386,7 +386,10 @@ def main(): filters=dict(type='dict') ) ) - module = AnsibleModule(argument_spec=argument_spec) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) if HAS_FOOTMARK is False: module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR) diff --git a/plugins/modules/cloud/memset/memset_memstore_info.py b/plugins/modules/cloud/memset/memset_memstore_info.py index df5ede1a62..e880b46009 100644 --- a/plugins/modules/cloud/memset/memset_memstore_info.py +++ b/plugins/modules/cloud/memset/memset_memstore_info.py @@ -149,7 +149,7 @@ def main(): api_key=dict(required=True, type='str', no_log=True), name=dict(required=True, type='str') ), - supports_check_mode=False + supports_check_mode=True, ) # populate the dict with the user-provided vars. diff --git a/plugins/modules/cloud/memset/memset_server_info.py b/plugins/modules/cloud/memset/memset_server_info.py index 50fe39fd99..853e2c884d 100644 --- a/plugins/modules/cloud/memset/memset_server_info.py +++ b/plugins/modules/cloud/memset/memset_server_info.py @@ -274,7 +274,7 @@ def main(): api_key=dict(required=True, type='str', no_log=True), name=dict(required=True, type='str') ), - supports_check_mode=False + supports_check_mode=True, ) # populate the dict with the user-provided vars. diff --git a/plugins/modules/cloud/misc/xenserver_facts.py b/plugins/modules/cloud/misc/xenserver_facts.py index 25923cb288..bc01c56ecb 100644 --- a/plugins/modules/cloud/misc/xenserver_facts.py +++ b/plugins/modules/cloud/misc/xenserver_facts.py @@ -160,7 +160,9 @@ def get_srs(session): def main(): - module = AnsibleModule({}) + module = AnsibleModule( + supports_check_mode=True, + ) if not HAVE_XENAPI: module.fail_json(changed=False, msg="python xen api required for this module") diff --git a/plugins/modules/cloud/rackspace/rax_facts.py b/plugins/modules/cloud/rackspace/rax_facts.py index 386ca7cfa9..f9fd89556f 100644 --- a/plugins/modules/cloud/rackspace/rax_facts.py +++ b/plugins/modules/cloud/rackspace/rax_facts.py @@ -124,6 +124,7 @@ def main(): required_together=rax_required_together(), mutually_exclusive=[['address', 'id', 'name']], required_one_of=[['address', 'id', 'name']], + supports_check_mode=True, ) if not HAS_PYRAX: diff --git a/plugins/modules/cloud/smartos/smartos_image_info.py b/plugins/modules/cloud/smartos/smartos_image_info.py index f1c75bc26c..369559f52a 100644 --- a/plugins/modules/cloud/smartos/smartos_image_info.py +++ b/plugins/modules/cloud/smartos/smartos_image_info.py @@ -97,7 +97,7 @@ def main(): argument_spec=dict( filters=dict(default=None), ), - supports_check_mode=False, + supports_check_mode=True, ) image_facts = ImageFacts(module) diff --git a/plugins/modules/net_tools/snmp_facts.py b/plugins/modules/net_tools/snmp_facts.py index 221eda30f9..e9d0ebc94c 100644 --- a/plugins/modules/net_tools/snmp_facts.py +++ b/plugins/modules/net_tools/snmp_facts.py @@ -288,7 +288,7 @@ def main(): ['username', 'level', 'integrity', 'authkey'], ['privacy', 'privkey'], ), - supports_check_mode=False, + supports_check_mode=True, ) m_args = module.params diff --git a/plugins/modules/remote_management/oneview/oneview_datacenter_info.py b/plugins/modules/remote_management/oneview/oneview_datacenter_info.py index 13ab883330..04d4fc0c7e 100644 --- a/plugins/modules/remote_management/oneview/oneview_datacenter_info.py +++ b/plugins/modules/remote_management/oneview/oneview_datacenter_info.py @@ -116,7 +116,10 @@ class DatacenterInfoModule(OneViewModuleBase): ) def __init__(self): - super(DatacenterInfoModule, self).__init__(additional_arg_spec=self.argument_spec) + super(DatacenterInfoModule, self).__init__( + additional_arg_spec=self.argument_spec, + supports_check_mode=True, + ) def execute_module(self): diff --git a/plugins/modules/remote_management/oneview/oneview_enclosure_info.py b/plugins/modules/remote_management/oneview/oneview_enclosure_info.py index 1889dc1a4f..a9bbb8e799 100644 --- a/plugins/modules/remote_management/oneview/oneview_enclosure_info.py +++ b/plugins/modules/remote_management/oneview/oneview_enclosure_info.py @@ -163,7 +163,10 @@ class EnclosureInfoModule(OneViewModuleBase): ) def __init__(self): - super(EnclosureInfoModule, self).__init__(additional_arg_spec=self.argument_spec) + super(EnclosureInfoModule, self).__init__( + additional_arg_spec=self.argument_spec, + supports_check_mode=True, + ) def execute_module(self): diff --git a/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py b/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py index 4021b768f9..63a9e1efae 100644 --- a/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py +++ b/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py @@ -114,7 +114,10 @@ class EthernetNetworkInfoModule(OneViewModuleBase): ) def __init__(self): - super(EthernetNetworkInfoModule, self).__init__(additional_arg_spec=self.argument_spec) + super(EthernetNetworkInfoModule, self).__init__( + additional_arg_spec=self.argument_spec, + supports_check_mode=True, + ) self.resource_client = self.oneview_client.ethernet_networks diff --git a/plugins/modules/remote_management/oneview/oneview_fc_network_info.py b/plugins/modules/remote_management/oneview/oneview_fc_network_info.py index 21d9673b51..86430402fe 100644 --- a/plugins/modules/remote_management/oneview/oneview_fc_network_info.py +++ b/plugins/modules/remote_management/oneview/oneview_fc_network_info.py @@ -83,7 +83,10 @@ class FcNetworkInfoModule(OneViewModuleBase): params=dict(required=False, type='dict') ) - super(FcNetworkInfoModule, self).__init__(additional_arg_spec=argument_spec) + super(FcNetworkInfoModule, self).__init__( + additional_arg_spec=argument_spec, + supports_check_mode=True, + ) def execute_module(self): diff --git a/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py b/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py index e207670a9a..b0ede13820 100644 --- a/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py +++ b/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py @@ -82,7 +82,10 @@ class FcoeNetworkInfoModule(OneViewModuleBase): params=dict(type='dict'), ) - super(FcoeNetworkInfoModule, self).__init__(additional_arg_spec=argument_spec) + super(FcoeNetworkInfoModule, self).__init__( + additional_arg_spec=argument_spec, + supports_check_mode=True, + ) def execute_module(self): diff --git a/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py b/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py index 1f7f3c9613..e8670a33a8 100644 --- a/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py +++ b/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py @@ -96,7 +96,10 @@ class LogicalInterconnectGroupInfoModule(OneViewModuleBase): params=dict(type='dict'), ) - super(LogicalInterconnectGroupInfoModule, self).__init__(additional_arg_spec=argument_spec) + super(LogicalInterconnectGroupInfoModule, self).__init__( + additional_arg_spec=argument_spec, + supports_check_mode=True, + ) def execute_module(self): if self.module.params.get('name'): diff --git a/plugins/modules/remote_management/oneview/oneview_network_set_info.py b/plugins/modules/remote_management/oneview/oneview_network_set_info.py index bc76cb36b1..5cb7463b4c 100644 --- a/plugins/modules/remote_management/oneview/oneview_network_set_info.py +++ b/plugins/modules/remote_management/oneview/oneview_network_set_info.py @@ -135,7 +135,10 @@ class NetworkSetInfoModule(OneViewModuleBase): ) def __init__(self): - super(NetworkSetInfoModule, self).__init__(additional_arg_spec=self.argument_spec) + super(NetworkSetInfoModule, self).__init__( + additional_arg_spec=self.argument_spec, + supports_check_mode=True, + ) def execute_module(self): diff --git a/plugins/modules/remote_management/oneview/oneview_san_manager_info.py b/plugins/modules/remote_management/oneview/oneview_san_manager_info.py index 5dbc28afc2..c80ef474cc 100644 --- a/plugins/modules/remote_management/oneview/oneview_san_manager_info.py +++ b/plugins/modules/remote_management/oneview/oneview_san_manager_info.py @@ -90,7 +90,10 @@ class SanManagerInfoModule(OneViewModuleBase): ) def __init__(self): - super(SanManagerInfoModule, self).__init__(additional_arg_spec=self.argument_spec) + super(SanManagerInfoModule, self).__init__( + additional_arg_spec=self.argument_spec, + supports_check_mode=True, + ) self.resource_client = self.oneview_client.san_managers def execute_module(self): diff --git a/plugins/modules/remote_management/redfish/idrac_redfish_info.py b/plugins/modules/remote_management/redfish/idrac_redfish_info.py index cb1aa8f34f..fb137acca3 100644 --- a/plugins/modules/remote_management/redfish/idrac_redfish_info.py +++ b/plugins/modules/remote_management/redfish/idrac_redfish_info.py @@ -191,7 +191,7 @@ def main(): mutually_exclusive=[ ('username', 'auth_token'), ], - supports_check_mode=False + supports_check_mode=True, ) category = module.params['category'] diff --git a/plugins/modules/remote_management/redfish/redfish_info.py b/plugins/modules/remote_management/redfish/redfish_info.py index 41d5bfb04a..49bd7c6ee3 100644 --- a/plugins/modules/remote_management/redfish/redfish_info.py +++ b/plugins/modules/remote_management/redfish/redfish_info.py @@ -318,7 +318,7 @@ def main(): mutually_exclusive=[ ('username', 'auth_token'), ], - supports_check_mode=False + supports_check_mode=True, ) # admin credentials used for authentication diff --git a/plugins/modules/system/xfconf_info.py b/plugins/modules/system/xfconf_info.py index 9cef821071..766267dd3d 100644 --- a/plugins/modules/system/xfconf_info.py +++ b/plugins/modules/system/xfconf_info.py @@ -132,6 +132,7 @@ class XFConfInfo(CmdModuleHelper): required_by=dict( property=['channel'] ), + supports_check_mode=True, ) command = 'xfconf-query' diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py index 88356a2e54..d5660ab73c 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py @@ -110,7 +110,8 @@ def main(): module = UTMModule( argument_spec=dict( name=dict(type='str', required=True) - ) + ), + supports_check_mode=True, ) try: UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute() diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py index 02542532f7..9aa16d4aca 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py @@ -88,7 +88,8 @@ def main(): module = UTMModule( argument_spec=dict( name=dict(type='str', required=True) - ) + ), + supports_check_mode=True, ) try: # This is needed because the bool value only accepts int values in the backend diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py index 3f623d5a86..700799ab59 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py @@ -84,7 +84,8 @@ def main(): module = UTMModule( argument_spec=dict( name=dict(type='str', required=True) - ) + ), + supports_check_mode=True, ) try: UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute() diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py index 263b976045..62a832d7c6 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py @@ -128,8 +128,9 @@ def main(): key_to_check_for_changes = [] module = UTMModule( argument_spec=dict( - name=dict(type='str', required=True) - ) + name=dict(type='str', required=True), + ), + supports_check_mode=True, ) try: UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute() diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py index afc0f5efcd..99174a89b1 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py @@ -109,8 +109,9 @@ def main(): key_to_check_for_changes = [] module = UTMModule( argument_spec=dict( - name=dict(type='str', required=True) - ) + name=dict(type='str', required=True), + ), + supports_check_mode=True, ) try: UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute() From 87baa5860ad98811b03466ca78238afe85d9264b Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 27 Jul 2021 20:26:26 +0200 Subject: [PATCH 0470/3093] Actually enable BOTMETA sanity test to force new plugins/modules to have BOTMETA entry. (#3096) --- tests/sanity/extra/botmeta.py | 40 +++++++++++++++++++++++++++++++++-- 1 file changed, 38 insertions(+), 2 deletions(-) diff --git a/tests/sanity/extra/botmeta.py b/tests/sanity/extra/botmeta.py index b5c49b5a4b..f84c7535f2 100755 --- a/tests/sanity/extra/botmeta.py +++ b/tests/sanity/extra/botmeta.py @@ -17,7 +17,40 @@ from voluptuous import Required, Schema, Invalid from voluptuous.humanize import humanize_error -REPORT_NO_MAINTAINERS = False +IGNORE_NO_MAINTAINERS = [ + 'plugins/cache/memcached.py', + 'plugins/cache/redis.py', + 'plugins/callback/cgroup_memory_recap.py', + 'plugins/callback/context_demo.py', + 'plugins/callback/counter_enabled.py', + 'plugins/callback/hipchat.py', + 'plugins/callback/jabber.py', + 'plugins/callback/log_plays.py', + 'plugins/callback/logdna.py', + 'plugins/callback/logentries.py', + 'plugins/callback/null.py', + 'plugins/callback/selective.py', + 'plugins/callback/slack.py', + 'plugins/callback/splunk.py', + 'plugins/callback/yaml.py', + 'plugins/inventory/nmap.py', + 'plugins/inventory/virtualbox.py', + 'plugins/connection/chroot.py', + 'plugins/connection/iocage.py', + 'plugins/connection/lxc.py', + 'plugins/lookup/cartesian.py', + 'plugins/lookup/chef_databag.py', + 'plugins/lookup/consul_kv.py', + 'plugins/lookup/credstash.py', + 'plugins/lookup/cyberarkpassword.py', + 'plugins/lookup/flattened.py', + 'plugins/lookup/keyring.py', + 'plugins/lookup/lastpass.py', + 'plugins/lookup/passwordstore.py', + 'plugins/lookup/shelvefile.py', + 'plugins/filter/json_query.py', + 'plugins/filter/random_mac.py', +] FILENAME = '.github/BOTMETA.yml' @@ -81,8 +114,11 @@ def validate(filename, filedata): msg = 'Author %s not mentioned as active or inactive maintainer for %s (mentioned are: %s)' % ( maintainer, filename, ', '.join(all_maintainers)) print('%s:%d:%d: %s' % (FILENAME, 0, 0, msg)) - if not all_maintainers and REPORT_NO_MAINTAINERS: + should_have_no_maintainer = filename in IGNORE_NO_MAINTAINERS + if not all_maintainers and not should_have_no_maintainer: print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'No (active or inactive) maintainer mentioned for %s' % filename)) + if all_maintainers and should_have_no_maintainer: + print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'Please remove %s from the ignore list of %s' % (filename, sys.argv[0]))) def main(): From 5be4adc434643d024186f964d437208e29677546 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Wed, 28 Jul 2021 17:49:37 +1200 Subject: [PATCH 0471/3093] ejabberd_user - refactoring and simplification (#3093) * ejabberd_user - refactoring and simplification * added changelog fragment * Update changelogs/fragments/3093-ejabberd_user-refactor.yaml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../3093-ejabberd_user-refactor.yaml | 2 + .../web_infrastructure/ejabberd_user.py | 59 +++++-------------- 2 files changed, 16 insertions(+), 45 deletions(-) create mode 100644 changelogs/fragments/3093-ejabberd_user-refactor.yaml diff --git a/changelogs/fragments/3093-ejabberd_user-refactor.yaml b/changelogs/fragments/3093-ejabberd_user-refactor.yaml new file mode 100644 index 0000000000..875ef6da71 --- /dev/null +++ b/changelogs/fragments/3093-ejabberd_user-refactor.yaml @@ -0,0 +1,2 @@ +bugfixes: + - ejabberd_user - replaced in-code check with ``required_if``, using ``get_bin_path()`` for the command, passing args to ``run_command()`` as list instead of string (https://github.com/ansible-collections/community.general/pull/3093). diff --git a/plugins/modules/web_infrastructure/ejabberd_user.py b/plugins/modules/web_infrastructure/ejabberd_user.py index be63c92080..e6cdd72b5e 100644 --- a/plugins/modules/web_infrastructure/ejabberd_user.py +++ b/plugins/modules/web_infrastructure/ejabberd_user.py @@ -72,11 +72,6 @@ import syslog from ansible.module_utils.basic import AnsibleModule -class EjabberdUserException(Exception): - """ Base exception for EjabberdUser class object """ - pass - - class EjabberdUser(object): """ This object represents a user resource for an ejabberd server. The object manages user creation and deletion using ejabberdctl. The following @@ -99,12 +94,7 @@ class EjabberdUser(object): changed. It will return True if the user does not match the supplied credentials and False if it does not """ - try: - options = [self.user, self.host, self.pwd] - (rc, out, err) = self.run_command('check_password', options) - except EjabberdUserException: - (rc, out, err) = (1, None, "required attribute(s) missing") - return rc + return self.run_command('check_password', [self.user, self.host, self.pwd]) @property def exists(self): @@ -112,12 +102,7 @@ class EjabberdUser(object): host specified. If the user exists True is returned, otherwise False is returned """ - try: - options = [self.user, self.host] - (rc, out, err) = self.run_command('check_account', options) - except EjabberdUserException: - (rc, out, err) = (1, None, "required attribute(s) missing") - return not bool(int(rc)) + return self.run_command('check_account', [self.user, self.host]) def log(self, entry): """ This method will log information to the local syslog facility """ @@ -129,44 +114,25 @@ class EjabberdUser(object): """ This method will run the any command specified and return the returns using the Ansible common module """ - if not all(options): - raise EjabberdUserException - - cmd = 'ejabberdctl %s ' % cmd - cmd += " ".join(options) - self.log('command: %s' % cmd) - return self.module.run_command(cmd.split()) + cmd = [self.module.get_bin_path('ejabberdctl'), cmd] + options + self.log('command: %s' % " ".join(cmd)) + return self.module.run_command(cmd) def update(self): """ The update method will update the credentials for the user provided """ - try: - options = [self.user, self.host, self.pwd] - (rc, out, err) = self.run_command('change_password', options) - except EjabberdUserException: - (rc, out, err) = (1, None, "required attribute(s) missing") - return (rc, out, err) + return self.run_command('change_password', [self.user, self.host, self.pwd]) def create(self): """ The create method will create a new user on the host with the password provided """ - try: - options = [self.user, self.host, self.pwd] - (rc, out, err) = self.run_command('register', options) - except EjabberdUserException: - (rc, out, err) = (1, None, "required attribute(s) missing") - return (rc, out, err) + return self.run_command('register', [self.user, self.host, self.pwd]) def delete(self): """ The delete method will delete the user from the host """ - try: - options = [self.user, self.host] - (rc, out, err) = self.run_command('unregister', options) - except EjabberdUserException: - (rc, out, err) = (1, None, "required attribute(s) missing") - return (rc, out, err) + return self.run_command('unregister', [self.user, self.host]) def main(): @@ -174,11 +140,14 @@ def main(): argument_spec=dict( host=dict(required=True, type='str'), username=dict(required=True, type='str'), - password=dict(default=None, type='str', no_log=True), + password=dict(type='str', no_log=True), state=dict(default='present', choices=['present', 'absent']), - logging=dict(default=False, type='bool') + logging=dict(default=False, type='bool') # deprecate in favour of c.g.syslogger? ), - supports_check_mode=True + required_if=[ + ('state', 'present', ['password']), + ], + supports_check_mode=True, ) obj = EjabberdUser(module) From 0b70b3baff58f26e6a3dac12ed125d3fe4e9195a Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Wed, 28 Jul 2021 18:22:18 +1200 Subject: [PATCH 0472/3093] django_manage - using list instead of string in run_command() (#3098) * django_manage - using list instead of string in run_command() * added changelog fragment --- .../3098-django_manage-cmd-list.yaml | 2 + .../web_infrastructure/django_manage.py | 40 +++++++++---------- 2 files changed, 21 insertions(+), 21 deletions(-) create mode 100644 changelogs/fragments/3098-django_manage-cmd-list.yaml diff --git a/changelogs/fragments/3098-django_manage-cmd-list.yaml b/changelogs/fragments/3098-django_manage-cmd-list.yaml new file mode 100644 index 0000000000..8522059ff6 --- /dev/null +++ b/changelogs/fragments/3098-django_manage-cmd-list.yaml @@ -0,0 +1,2 @@ +bugfixes: + - django_manage - refactor to call ``run_command()`` passing command as a list instead of string (https://github.com/ansible-collections/community.general/pull/3098). diff --git a/plugins/modules/web_infrastructure/django_manage.py b/plugins/modules/web_infrastructure/django_manage.py index ba38abd90e..98ffdc446b 100644 --- a/plugins/modules/web_infrastructure/django_manage.py +++ b/plugins/modules/web_infrastructure/django_manage.py @@ -256,20 +256,20 @@ def main(): argument_spec=dict( command=dict(required=True, type='str'), project_path=dict(required=True, type='path', aliases=['app_path', 'chdir']), - settings=dict(default=None, required=False, type='path'), - pythonpath=dict(default=None, required=False, type='path', aliases=['python_path']), - virtualenv=dict(default=None, required=False, type='path', aliases=['virtual_env']), + settings=dict(type='path'), + pythonpath=dict(type='path', aliases=['python_path']), + virtualenv=dict(type='path', aliases=['virtual_env']), - apps=dict(default=None, required=False), - cache_table=dict(default=None, required=False, type='str'), - clear=dict(default=False, required=False, type='bool'), - database=dict(default=None, required=False, type='str'), - failfast=dict(default=False, required=False, type='bool', aliases=['fail_fast']), - fixtures=dict(default=None, required=False, type='str'), - testrunner=dict(default=None, required=False, type='str', aliases=['test_runner']), - skip=dict(default=None, required=False, type='bool'), - merge=dict(default=None, required=False, type='bool'), - link=dict(default=None, required=False, type='bool'), + apps=dict(), + cache_table=dict(type='str'), + clear=dict(default=False, type='bool'), + database=dict(type='str'), + failfast=dict(default=False, type='bool', aliases=['fail_fast']), + fixtures=dict(type='str'), + testrunner=dict(type='str', aliases=['test_runner']), + skip=dict(type='bool'), + merge=dict(type='bool'), + link=dict(type='bool'), ), ) @@ -279,8 +279,6 @@ def main(): for param in specific_params: value = module.params[param] - if param in specific_boolean_params: - value = module.boolean(value) if value and param not in command_allowed_param_map[command]: module.fail_json(msg='%s param is incompatible with command=%s' % (param, command)) @@ -290,23 +288,23 @@ def main(): _ensure_virtualenv(module) - cmd = "./manage.py %s" % (command, ) + cmd = ["./manage.py", command] if command in noinput_commands: - cmd = '%s --noinput' % cmd + cmd.append("--noinput") for param in general_params: if module.params[param]: - cmd = '%s --%s=%s' % (cmd, param, module.params[param]) + cmd.append('--%s=%s' % (param, module.params[param])) for param in specific_boolean_params: - if module.boolean(module.params[param]): - cmd = '%s --%s' % (cmd, param) + if module.params[param]: + cmd.append('--%s' % param) # these params always get tacked on the end of the command for param in end_of_command_params: if module.params[param]: - cmd = '%s %s' % (cmd, module.params[param]) + cmd.append(module.params[param]) rc, out, err = module.run_command(cmd, cwd=project_path) if rc != 0: From 549dfaae6415999755f2ca722b39e8c314c67aa1 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Wed, 28 Jul 2021 18:43:09 +1200 Subject: [PATCH 0473/3093] gunicorn - minor refactoring (#3092) * minor refactoring in gunicorn module * added changelog fragment * reworked the gunicorn bin path part of the code, per PR * Update changelogs/fragments/3092-gunicorn-refactor.yaml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../fragments/3092-gunicorn-refactor.yaml | 2 ++ .../modules/web_infrastructure/gunicorn.py | 19 +++++++------------ 2 files changed, 9 insertions(+), 12 deletions(-) create mode 100644 changelogs/fragments/3092-gunicorn-refactor.yaml diff --git a/changelogs/fragments/3092-gunicorn-refactor.yaml b/changelogs/fragments/3092-gunicorn-refactor.yaml new file mode 100644 index 0000000000..114e865add --- /dev/null +++ b/changelogs/fragments/3092-gunicorn-refactor.yaml @@ -0,0 +1,2 @@ +minor_changes: + - gunicorn - search for ``gunicorn`` binary in more paths (https://github.com/ansible-collections/community.general/pull/3092). diff --git a/plugins/modules/web_infrastructure/gunicorn.py b/plugins/modules/web_infrastructure/gunicorn.py index 5703055623..4c9e5da45b 100644 --- a/plugins/modules/web_infrastructure/gunicorn.py +++ b/plugins/modules/web_infrastructure/gunicorn.py @@ -101,14 +101,12 @@ gunicorn: import os import time -# import ansible utils from ansible.module_utils.basic import AnsibleModule def search_existing_config(config, option): ''' search in config file for specified option ''' if config and os.path.isfile(config): - data_config = None with open(config, 'r') as f: for line in f: if option in line: @@ -135,15 +133,12 @@ def main(): module = AnsibleModule( argument_spec=dict( app=dict(required=True, type='str', aliases=['name']), - venv=dict(required=False, type='path', default=None, aliases=['virtualenv']), - config=dict(required=False, default=None, type='path', aliases=['conf']), - chdir=dict(required=False, type='path', default=None), - pid=dict(required=False, type='path', default=None), - user=dict(required=False, type='str'), - worker=dict(required=False, - type='str', - choices=['sync', 'eventlet', 'gevent', 'tornado ', 'gthread', 'gaiohttp'] - ), + venv=dict(type='path', aliases=['virtualenv']), + config=dict(type='path', aliases=['conf']), + chdir=dict(type='path'), + pid=dict(type='path'), + user=dict(type='str'), + worker=dict(type='str', choices=['sync', 'eventlet', 'gevent', 'tornado ', 'gthread', 'gaiohttp']), ) ) @@ -165,7 +160,7 @@ def main(): if venv: gunicorn_command = "/".join((venv, 'bin', 'gunicorn')) else: - gunicorn_command = 'gunicorn' + gunicorn_command = module.get_bin_path('gunicorn') # to daemonize the process options = ["-D"] From cde95641635feeedeafce484bfef62cd668ad5bd Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Thu, 29 Jul 2021 17:49:52 +1200 Subject: [PATCH 0474/3093] deploy_helper - changed in-code condition to required_if (#3104) * changed in-code condition to required_if * added changelog fragment --- .../3104-deploy_helper-required_if.yaml | 2 ++ .../web_infrastructure/deploy_helper.py | 23 +++++++++---------- 2 files changed, 13 insertions(+), 12 deletions(-) create mode 100644 changelogs/fragments/3104-deploy_helper-required_if.yaml diff --git a/changelogs/fragments/3104-deploy_helper-required_if.yaml b/changelogs/fragments/3104-deploy_helper-required_if.yaml new file mode 100644 index 0000000000..ee48461003 --- /dev/null +++ b/changelogs/fragments/3104-deploy_helper-required_if.yaml @@ -0,0 +1,2 @@ +bugfixes: + - deploy_helper - improved parameter checking by using standard Ansible construct (https://github.com/ansible-collections/community.general/pull/3104). diff --git a/plugins/modules/web_infrastructure/deploy_helper.py b/plugins/modules/web_infrastructure/deploy_helper.py index f879594bc3..f73c9c1f18 100644 --- a/plugins/modules/web_infrastructure/deploy_helper.py +++ b/plugins/modules/web_infrastructure/deploy_helper.py @@ -359,8 +359,6 @@ class DeployHelper(object): self.module.fail_json(msg="%s exists but is not a symbolic link" % path) def create_link(self, source, link_name): - changed = False - if os.path.islink(link_name): norm_link = os.path.normpath(os.path.realpath(link_name)) norm_source = os.path.normpath(os.path.realpath(source)) @@ -458,15 +456,18 @@ def main(): module = AnsibleModule( argument_spec=dict( path=dict(aliases=['dest'], required=True, type='path'), - release=dict(required=False, type='str', default=None), - releases_path=dict(required=False, type='str', default='releases'), - shared_path=dict(required=False, type='path', default='shared'), - current_path=dict(required=False, type='path', default='current'), - keep_releases=dict(required=False, type='int', default=5), - clean=dict(required=False, type='bool', default=True), - unfinished_filename=dict(required=False, type='str', default='DEPLOY_UNFINISHED'), - state=dict(required=False, choices=['present', 'absent', 'clean', 'finalize', 'query'], default='present') + release=dict(type='str'), + releases_path=dict(type='str', default='releases'), + shared_path=dict(type='path', default='shared'), + current_path=dict(type='path', default='current'), + keep_releases=dict(type='int', default=5), + clean=dict(type='bool', default=True), + unfinished_filename=dict(type='str', default='DEPLOY_UNFINISHED'), + state=dict(choices=['present', 'absent', 'clean', 'finalize', 'query'], default='present') ), + required_if=[ + ('state', 'finalize', ['release']), + ], add_file_common_args=True, supports_check_mode=True ) @@ -493,8 +494,6 @@ def main(): result['ansible_facts'] = {'deploy_helper': facts} elif deploy_helper.state == 'finalize': - if not deploy_helper.release: - module.fail_json(msg="'release' is a required parameter for state=finalize (try the 'deploy_helper.new_release' fact)") if deploy_helper.keep_releases <= 0: module.fail_json(msg="'keep_releases' should be at least 1") From 2935b011edae0184e219dc157259bf986e9fb251 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Fri, 30 Jul 2021 08:30:20 +1200 Subject: [PATCH 0475/3093] ansible_galaxy_install - new module (#2933) * initial commit * multiple changes: - added a proper process_command_output() - adjusted the output_params fields (and removed other *_params fields) * added RETURN documentation, plus few adjustments * fixed sanity tests * updated BOTMETA.yml * further adjustments * integration tests - first commit * removed unused files from integration test * added role installation tests * removed extraneous cmd line option * added requirement-file installation tests * adjusted documentation and output variable names * fixed integration test * Update plugins/modules/packaging/language/ansible_galaxy_install.py Co-authored-by: Ajpantuso * Update plugins/modules/packaging/language/ansible_galaxy_install.py Co-authored-by: Ajpantuso * Update plugins/modules/packaging/language/ansible_galaxy_install.py Co-authored-by: Ajpantuso * Update plugins/modules/packaging/language/ansible_galaxy_install.py Co-authored-by: Ajpantuso * Update plugins/modules/packaging/language/ansible_galaxy_install.py Co-authored-by: Ajpantuso * Update plugins/modules/packaging/language/ansible_galaxy_install.py Co-authored-by: Ajpantuso * Update plugins/modules/packaging/language/ansible_galaxy_install.py Co-authored-by: Ajpantuso * Update plugins/modules/packaging/language/ansible_galaxy_install.py Co-authored-by: Felix Fontein * Update tests/integration/targets/ansible_galaxy_install/aliases Co-authored-by: Felix Fontein * Per comments in the PR: - fixed missing paths case - fixed install parsing (regexp) for ansible-galaxy collection install in v2.10 * changed the collection installed in test to something unlikely to come embedded in Ansible itself * fixed logic for Ansible 2.9 * kill trailing whitespace * changed default language from C.UTF-8 to en_US.UTF-8 * updated c.g version * skipping test in python 2.6, as ansible-galaxy no longer supports it in devel * Multiple changes: - improved docs on ansible 2.9 and python 2.6 - removed method __changed__() - unnecessary since tracking changes in the ansible29_change var - renamed methods __run29__() and __run210plus__() to __setup29__() and __setup210plus__(), respectively - ansible 2.9 warning for requirements_file only when type is "both" * sanity fix * further adjustments * removed extraneous doc * changed method to determine remote ansible version * do not allow type=both in Ansible 2.9 * Update plugins/modules/packaging/language/ansible_galaxy_install.py Co-authored-by: Felix Fontein * Update plugins/modules/packaging/language/ansible_galaxy_install.py Co-authored-by: Felix Fontein * changed method names per PR Co-authored-by: Ajpantuso Co-authored-by: Felix Fontein --- .github/BOTMETA.yml | 2 + plugins/modules/ansible_galaxy_install.py | 1 + .../language/ansible_galaxy_install.py | 318 ++++++++++++++++++ .../targets/ansible_galaxy_install/aliases | 3 + .../ansible_galaxy_install/files/test.yml | 11 + .../ansible_galaxy_install/tasks/main.yml | 95 ++++++ 6 files changed, 430 insertions(+) create mode 120000 plugins/modules/ansible_galaxy_install.py create mode 100644 plugins/modules/packaging/language/ansible_galaxy_install.py create mode 100644 tests/integration/targets/ansible_galaxy_install/aliases create mode 100644 tests/integration/targets/ansible_galaxy_install/files/test.yml create mode 100644 tests/integration/targets/ansible_galaxy_install/tasks/main.yml diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index fb08599a13..859d88bb84 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -707,6 +707,8 @@ files: maintainers: makaimc $modules/notification/typetalk.py: maintainers: tksmd + $modules/packaging/language/ansible_galaxy_install.py: + maintainers: russoz $modules/packaging/language/bower.py: maintainers: mwarkentin $modules/packaging/language/bundler.py: diff --git a/plugins/modules/ansible_galaxy_install.py b/plugins/modules/ansible_galaxy_install.py new file mode 120000 index 0000000000..369d39dbe1 --- /dev/null +++ b/plugins/modules/ansible_galaxy_install.py @@ -0,0 +1 @@ +packaging/language/ansible_galaxy_install.py \ No newline at end of file diff --git a/plugins/modules/packaging/language/ansible_galaxy_install.py b/plugins/modules/packaging/language/ansible_galaxy_install.py new file mode 100644 index 0000000000..9e9b5cc4f6 --- /dev/null +++ b/plugins/modules/packaging/language/ansible_galaxy_install.py @@ -0,0 +1,318 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# (c) 2021, Alexei Znamensky +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +module: ansible_galaxy_install +author: + - "Alexei Znamensky (@russoz)" +short_description: Install Ansible roles or collections using ansible-galaxy +version_added: 3.5.0 +description: + - This module allows the installation of Ansible collections or roles using C(ansible-galaxy). +notes: + - > + B(Ansible 2.9/2.10): The C(ansible-galaxy) command changed significantly between Ansible 2.9 and + ansible-base 2.10 (later ansible-core 2.11). See comments in the parameters. +requirements: + - Ansible 2.9, ansible-base 2.10, or ansible-core 2.11 or newer +options: + type: + description: + - The type of installation performed by C(ansible-galaxy). + - If I(type) is C(both), then I(requirements_file) must be passed and it may contain both roles and collections. + - "Note however that the opposite is not true: if using a I(requirements_file), then I(type) can be any of the three choices." + - "B(Ansible 2.9): The option C(both) will have the same effect as C(role)." + type: str + choices: [collection, role, both] + required: true + name: + description: + - Name of the collection or role being installed. + - Versions can be specified with C(ansible-galaxy) usual formats. For example, C(community.docker:1.6.1) or C(ansistrano.deploy,3.8.0). + - I(name) and I(requirements_file) are mutually exclusive. + type: str + requirements_file: + description: + - Path to a file containing a list of requirements to be installed. + - It works for I(type) equals to C(collection) and C(role). + - I(name) and I(requirements_file) are mutually exclusive. + - "B(Ansible 2.9): It can only be used to install either I(type=role) or I(type=collection), but not both at the same run." + type: path + dest: + description: + - The path to the directory containing your collections or roles, according to the value of I(type). + - > + Please notice that C(ansible-galaxy) will not install collections with I(type=both), when I(requirements_file) + contains both roles and collections and I(dest) is specified. + type: path + force: + description: + - Force overwriting an existing role or collection. + - Using I(force=true) is mandatory when downgrading. + - "B(Ansible 2.9 and 2.10): Must be C(true) to upgrade roles and collections." + type: bool + default: false + ack_ansible29: + description: + - Acknowledge using Ansible 2.9 with its limitations, and prevents the module from generating warnings about them. + - This option is completely ignored if using a version Ansible greater than C(2.9.x). + type: bool + default: false +""" + +EXAMPLES = """ +- name: Install collection community.network + community.general.ansible_galaxy_install: + type: collection + name: community.network + +- name: Install role at specific path + community.general.ansible_galaxy_install: + type: role + name: ansistrano.deploy + dest: /ansible/roles + +- name: Install collections and roles together + community.general.ansible_galaxy_install: + type: both + requirements_file: requirements.yml + +- name: Force-install collection community.network at specific version + community.general.ansible_galaxy_install: + type: collection + name: community.network:3.0.2 + force: true + +""" + +RETURN = """ + type: + description: The value of the I(type) parameter. + type: str + returned: always + name: + description: The value of the I(name) parameter. + type: str + returned: always + dest: + description: The value of the I(dest) parameter. + type: str + returned: always + requirements_file: + description: The value of the I(requirements_file) parameter. + type: str + returned: always + force: + description: The value of the I(force) parameter. + type: bool + returned: always + installed_roles: + description: + - If I(requirements_file) is specified instead, returns dictionary with all the roles installed per path. + - If I(name) is specified, returns that role name and the version installed per path. + - "B(Ansible 2.9): Returns empty because C(ansible-galaxy) has no C(list) subcommand." + type: dict + returned: always when installing roles + contains: + "": + description: Roles and versions for that path. + type: dict + sample: + /home/user42/.ansible/roles: + ansistrano.deploy: 3.9.0 + baztian.xfce: v0.0.3 + /custom/ansible/roles: + ansistrano.deploy: 3.8.0 + installed_collections: + description: + - If I(requirements_file) is specified instead, returns dictionary with all the collections installed per path. + - If I(name) is specified, returns that collection name and the version installed per path. + - "B(Ansible 2.9): Returns empty because C(ansible-galaxy) has no C(list) subcommand." + type: dict + returned: always when installing collections + contains: + "": + description: Collections and versions for that path + type: dict + sample: + /home/az/.ansible/collections/ansible_collections: + community.docker: 1.6.0 + community.general: 3.0.2 + /custom/ansible/ansible_collections: + community.general: 3.1.0 + new_collections: + description: New collections installed by this module. + returned: success + type: dict + sample: + community.general: 3.1.0 + community.docker: 1.6.1 + new_roles: + description: New roles installed by this module. + returned: success + type: dict + sample: + ansistrano.deploy: 3.8.0 + baztian.xfce: v0.0.3 +""" + +import re + +from ansible_collections.community.general.plugins.module_utils.module_helper import CmdModuleHelper, ArgFormat + + +class AnsibleGalaxyInstall(CmdModuleHelper): + _RE_GALAXY_VERSION = re.compile(r'^ansible-galaxy(?: \[core)? (?P\d+\.\d+\.\d+)(?:\.\w+)?(?:\])?') + _RE_LIST_PATH = re.compile(r'^# (?P.*)$') + _RE_LIST_COLL = re.compile(r'^(?P\w+\.\w+)\s+(?P[\d\.]+)\s*$') + _RE_LIST_ROLE = re.compile(r'^- (?P\w+\.\w+),\s+(?P[\d\.]+)\s*$') + _RE_INSTALL_OUTPUT = None # Set after determining ansible version, see __init_module__() + ansible_version = None + is_ansible29 = None + + output_params = ('type', 'name', 'dest', 'requirements_file', 'force') + module = dict( + argument_spec=dict( + type=dict(type='str', choices=('collection', 'role', 'both'), required=True), + name=dict(type='str'), + requirements_file=dict(type='path'), + dest=dict(type='path'), + force=dict(type='bool', default=False), + ack_ansible29=dict(type='bool', default=False), + ), + mutually_exclusive=[('name', 'requirements_file')], + required_one_of=[('name', 'requirements_file')], + required_if=[('type', 'both', ['requirements_file'])], + supports_check_mode=False, + ) + + command = 'ansible-galaxy' + command_args_formats = dict( + type=dict(fmt=lambda v: [] if v == 'both' else [v]), + galaxy_cmd=dict(), + requirements_file=dict(fmt=('-r', '{0}'),), + dest=dict(fmt=('-p', '{0}'),), + force=dict(fmt="--force", style=ArgFormat.BOOLEAN), + ) + force_lang = "en_US.UTF-8" + check_rc = True + + def _get_ansible_galaxy_version(self): + ansible_galaxy = self.module.get_bin_path("ansible-galaxy", required=True) + dummy, out, dummy = self.module.run_command([ansible_galaxy, "--version"], check_rc=True) + line = out.splitlines()[0] + match = self._RE_GALAXY_VERSION.match(line) + if not match: + raise RuntimeError("Unable to determine ansible-galaxy version from: {0}".format(line)) + version = match.group("version") + version = tuple(int(x) for x in version.split('.')[:3]) + return version + + def __init_module__(self): + self.ansible_version = self._get_ansible_galaxy_version() + self.is_ansible29 = self.ansible_version < (2, 10) + if self.is_ansible29: + self._RE_INSTALL_OUTPUT = re.compile(r"^(?:.*Installing '(?P\w+\.\w+):(?P[\d\.]+)'.*" + r'|- (?P\w+\.\w+) \((?P[\d\.]+)\)' + r' was installed successfully)$') + else: + # Collection install output changed: + # ansible-base 2.10: "coll.name (x.y.z)" + # ansible-core 2.11+: "coll.name:x.y.z" + self._RE_INSTALL_OUTPUT = re.compile(r'^(?:(?P\w+\.\w+)(?: \(|:)(?P[\d\.]+)\)?' + r'|- (?P\w+\.\w+) \((?P[\d\.]+)\))' + r' was installed successfully$') + + @staticmethod + def _process_output_list(*args): + if "None of the provided paths were usable" in args[1]: + return [] + return args[1].splitlines() + + def _list_element(self, _type, path_re, elem_re): + params = ({'type': _type}, {'galaxy_cmd': 'list'}, 'dest') + elems = self.run_command(params=params, + publish_rc=False, publish_out=False, publish_err=False, + process_output=self._process_output_list, + check_rc=False) + elems_dict = {} + current_path = None + for line in elems: + if line.startswith("#"): + match = path_re.match(line) + if not match: + continue + if self.vars.dest is not None and match.group('path') != self.vars.dest: + current_path = None + continue + current_path = match.group('path') if match else None + elems_dict[current_path] = {} + + elif current_path is not None: + match = elem_re.match(line) + if not match or (self.vars.name is not None and match.group('elem') != self.vars.name): + continue + elems_dict[current_path][match.group('elem')] = match.group('version') + return elems_dict + + def _list_collections(self): + return self._list_element('collection', self._RE_LIST_PATH, self._RE_LIST_COLL) + + def _list_roles(self): + return self._list_element('role', self._RE_LIST_PATH, self._RE_LIST_ROLE) + + def _setup29(self): + self.vars.set("new_collections", {}) + self.vars.set("new_roles", {}) + self.vars.set("ansible29_change", False, change=True, output=False) + if not self.vars.ack_ansible29: + self.module.warn("Ansible 2.9 or older: unable to retrieve lists of roles and collections already installed") + if self.vars.requirements_file is not None and self.vars.type == 'both': + self.module.warn("Ansible 2.9 or older: will install only roles from requirement files") + + def _setup210plus(self): + self.vars.set("new_collections", {}, change=True) + self.vars.set("new_roles", {}, change=True) + if self.vars.type != "collection": + self.vars.installed_roles = self._list_roles() + if self.vars.type != "roles": + self.vars.installed_collections = self._list_collections() + + def __run__(self): + if self.is_ansible29: + if self.vars.type == 'both': + raise ValueError("Type 'both' not supported in Ansible 2.9") + self._setup29() + else: + self._setup210plus() + params = ('type', {'galaxy_cmd': 'install'}, 'force', 'dest', 'requirements_file', 'name') + self.run_command(params=params) + + def process_command_output(self, rc, out, err): + for line in out.splitlines(): + match = self._RE_INSTALL_OUTPUT.match(line) + if not match: + continue + if match.group("collection"): + self.vars.new_collections[match.group("collection")] = match.group("cversion") + if self.is_ansible29: + self.vars.ansible29_change = True + elif match.group("role"): + self.vars.new_roles[match.group("role")] = match.group("rversion") + if self.is_ansible29: + self.vars.ansible29_change = True + + +def main(): + galaxy = AnsibleGalaxyInstall() + galaxy.run() + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/ansible_galaxy_install/aliases b/tests/integration/targets/ansible_galaxy_install/aliases new file mode 100644 index 0000000000..ca7873ddab --- /dev/null +++ b/tests/integration/targets/ansible_galaxy_install/aliases @@ -0,0 +1,3 @@ +destructive +shippable/posix/group3 +skip/python2.6 diff --git a/tests/integration/targets/ansible_galaxy_install/files/test.yml b/tests/integration/targets/ansible_galaxy_install/files/test.yml new file mode 100644 index 0000000000..9d2848e087 --- /dev/null +++ b/tests/integration/targets/ansible_galaxy_install/files/test.yml @@ -0,0 +1,11 @@ +--- +roles: + # Install a role from Ansible Galaxy. + - name: geerlingguy.java + version: 1.9.6 + +collections: + # Install a collection from Ansible Galaxy. + - name: geerlingguy.php_roles + version: 0.9.3 + source: https://galaxy.ansible.com diff --git a/tests/integration/targets/ansible_galaxy_install/tasks/main.yml b/tests/integration/targets/ansible_galaxy_install/tasks/main.yml new file mode 100644 index 0000000000..232c96aff5 --- /dev/null +++ b/tests/integration/targets/ansible_galaxy_install/tasks/main.yml @@ -0,0 +1,95 @@ +--- +################################################### +- name: Install collection netbox.netbox + community.general.ansible_galaxy_install: + type: collection + name: netbox.netbox + register: install_c0 + +- name: Assert collection was installed + assert: + that: + - install_c0 is changed + - '"netbox.netbox" in install_c0.new_collections' + +- name: Install collection netbox.netbox (again) + community.general.ansible_galaxy_install: + type: collection + name: netbox.netbox + register: install_c1 + +- name: Assert collection was not installed + assert: + that: + - install_c1 is not changed + +################################################### +- name: Install role ansistrano.deploy + community.general.ansible_galaxy_install: + type: role + name: ansistrano.deploy + register: install_r0 + +- name: Assert collection was installed + assert: + that: + - install_r0 is changed + - '"ansistrano.deploy" in install_r0.new_roles' + +- name: Install role ansistrano.deploy (again) + community.general.ansible_galaxy_install: + type: role + name: ansistrano.deploy + register: install_r1 + +- name: Assert role was not installed + assert: + that: + - install_r1 is not changed + +################################################### +- name: + set_fact: + reqs_file: '{{ output_dir }}/reqs.yaml' + +- name: Copy requirements file + copy: + src: 'files/test.yml' + dest: '{{ reqs_file }}' + +- name: Install from requirements file + community.general.ansible_galaxy_install: + type: both + requirements_file: "{{ reqs_file }}" + register: install_rq0 + ignore_errors: true + +- name: Assert requirements file was installed (Ansible >2.9) + assert: + that: + - install_rq0 is changed + - '"geerlingguy.java" in install_rq0.new_roles' + - '"geerlingguy.php_roles" in install_rq0.new_collections' + when: + - (ansible_version.major != 2 or ansible_version.minor != 9) + +- name: Assert requirements file was installed (Ansible 2.9) + assert: + that: + - install_rq0 is failed + - install_rq0 is not changed + when: + - ansible_version.major == 2 + - ansible_version.minor == 9 + +- name: Install from requirements file (again) + community.general.ansible_galaxy_install: + type: both + requirements_file: "{{ reqs_file }}" + register: install_rq1 + ignore_errors: true + +- name: Assert requirements file was not installed + assert: + that: + - install_rq1 is not changed From d974ca32ae1b2cd17066cd7e8dbb60f7c923ed67 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sat, 31 Jul 2021 04:00:14 +1200 Subject: [PATCH 0476/3093] removed extraneous dependency in integration test (#3114) --- tests/integration/targets/apache2_module/meta/main.yml | 2 -- tests/integration/targets/archive/meta/main.yml | 1 - tests/integration/targets/deploy_helper/meta/main.yml | 2 -- tests/integration/targets/flatpak/meta/main.yml | 1 - tests/integration/targets/flatpak_remote/meta/main.yml | 1 - tests/integration/targets/gem/meta/main.yml | 1 - tests/integration/targets/hg/meta/main.yml | 1 - tests/integration/targets/iso_create/meta/main.yml | 1 - tests/integration/targets/iso_extract/meta/main.yml | 1 - tests/integration/targets/launchd/meta/main.yml | 4 ---- tests/integration/targets/locale_gen/meta/main.yml | 2 -- tests/integration/targets/zypper/meta/main.yml | 2 -- tests/integration/targets/zypper_repository/meta/main.yml | 2 -- 13 files changed, 21 deletions(-) delete mode 100644 tests/integration/targets/apache2_module/meta/main.yml delete mode 100644 tests/integration/targets/deploy_helper/meta/main.yml delete mode 100644 tests/integration/targets/launchd/meta/main.yml delete mode 100644 tests/integration/targets/locale_gen/meta/main.yml delete mode 100644 tests/integration/targets/zypper/meta/main.yml delete mode 100644 tests/integration/targets/zypper_repository/meta/main.yml diff --git a/tests/integration/targets/apache2_module/meta/main.yml b/tests/integration/targets/apache2_module/meta/main.yml deleted file mode 100644 index 07faa21776..0000000000 --- a/tests/integration/targets/apache2_module/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - prepare_tests diff --git a/tests/integration/targets/archive/meta/main.yml b/tests/integration/targets/archive/meta/main.yml index ca521ab1ef..5438ced5c3 100644 --- a/tests/integration/targets/archive/meta/main.yml +++ b/tests/integration/targets/archive/meta/main.yml @@ -1,3 +1,2 @@ dependencies: - setup_pkg_mgr - - prepare_tests diff --git a/tests/integration/targets/deploy_helper/meta/main.yml b/tests/integration/targets/deploy_helper/meta/main.yml deleted file mode 100644 index 07faa21776..0000000000 --- a/tests/integration/targets/deploy_helper/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - prepare_tests diff --git a/tests/integration/targets/flatpak/meta/main.yml b/tests/integration/targets/flatpak/meta/main.yml index 314f77eba9..258cd4345c 100644 --- a/tests/integration/targets/flatpak/meta/main.yml +++ b/tests/integration/targets/flatpak/meta/main.yml @@ -1,3 +1,2 @@ dependencies: - - prepare_tests - setup_flatpak_remote diff --git a/tests/integration/targets/flatpak_remote/meta/main.yml b/tests/integration/targets/flatpak_remote/meta/main.yml index 314f77eba9..258cd4345c 100644 --- a/tests/integration/targets/flatpak_remote/meta/main.yml +++ b/tests/integration/targets/flatpak_remote/meta/main.yml @@ -1,3 +1,2 @@ dependencies: - - prepare_tests - setup_flatpak_remote diff --git a/tests/integration/targets/gem/meta/main.yml b/tests/integration/targets/gem/meta/main.yml index ca521ab1ef..5438ced5c3 100644 --- a/tests/integration/targets/gem/meta/main.yml +++ b/tests/integration/targets/gem/meta/main.yml @@ -1,3 +1,2 @@ dependencies: - setup_pkg_mgr - - prepare_tests diff --git a/tests/integration/targets/hg/meta/main.yml b/tests/integration/targets/hg/meta/main.yml index ca521ab1ef..5438ced5c3 100644 --- a/tests/integration/targets/hg/meta/main.yml +++ b/tests/integration/targets/hg/meta/main.yml @@ -1,3 +1,2 @@ dependencies: - setup_pkg_mgr - - prepare_tests diff --git a/tests/integration/targets/iso_create/meta/main.yml b/tests/integration/targets/iso_create/meta/main.yml index ca521ab1ef..5438ced5c3 100644 --- a/tests/integration/targets/iso_create/meta/main.yml +++ b/tests/integration/targets/iso_create/meta/main.yml @@ -1,3 +1,2 @@ dependencies: - setup_pkg_mgr - - prepare_tests diff --git a/tests/integration/targets/iso_extract/meta/main.yml b/tests/integration/targets/iso_extract/meta/main.yml index bdc4dfe016..0e51c36ebd 100644 --- a/tests/integration/targets/iso_extract/meta/main.yml +++ b/tests/integration/targets/iso_extract/meta/main.yml @@ -1,4 +1,3 @@ dependencies: - setup_pkg_mgr - - prepare_tests - setup_epel diff --git a/tests/integration/targets/launchd/meta/main.yml b/tests/integration/targets/launchd/meta/main.yml deleted file mode 100644 index 039249398e..0000000000 --- a/tests/integration/targets/launchd/meta/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- - -dependencies: - - prepare_tests diff --git a/tests/integration/targets/locale_gen/meta/main.yml b/tests/integration/targets/locale_gen/meta/main.yml deleted file mode 100644 index 07faa21776..0000000000 --- a/tests/integration/targets/locale_gen/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - prepare_tests diff --git a/tests/integration/targets/zypper/meta/main.yml b/tests/integration/targets/zypper/meta/main.yml deleted file mode 100644 index 07faa21776..0000000000 --- a/tests/integration/targets/zypper/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - prepare_tests diff --git a/tests/integration/targets/zypper_repository/meta/main.yml b/tests/integration/targets/zypper_repository/meta/main.yml deleted file mode 100644 index 07faa21776..0000000000 --- a/tests/integration/targets/zypper_repository/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - prepare_tests From d9533c44aa4895ba9a3303926153e375d571b80b Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sat, 31 Jul 2021 04:07:38 +1200 Subject: [PATCH 0477/3093] apache2_module - multiple improvements (#3106) * multiple improvements * added changelog fragment * comment and name in int test files * added notes to the documentation * removed the extraneous changelog frag * Update plugins/modules/web_infrastructure/apache2_module.py * adjusted doc text for sanity check * Update plugins/modules/web_infrastructure/apache2_module.py Co-authored-by: Felix Fontein * removed extraneous dependency in integration test Co-authored-by: Felix Fontein --- .../fragments/3106-apache2_module-review.yaml | 2 + .../web_infrastructure/apache2_module.py | 35 ++++----- .../apache2_module/tasks/actualtest.yml | 74 ++++++++----------- .../targets/apache2_module/tasks/main.yml | 24 +++++- 4 files changed, 71 insertions(+), 64 deletions(-) create mode 100644 changelogs/fragments/3106-apache2_module-review.yaml diff --git a/changelogs/fragments/3106-apache2_module-review.yaml b/changelogs/fragments/3106-apache2_module-review.yaml new file mode 100644 index 0000000000..d7840b2511 --- /dev/null +++ b/changelogs/fragments/3106-apache2_module-review.yaml @@ -0,0 +1,2 @@ +minor_changes: + - apache2_module - minor refactoring improving code quality, readability and speed (https://github.com/ansible-collections/community.general/pull/3106). diff --git a/plugins/modules/web_infrastructure/apache2_module.py b/plugins/modules/web_infrastructure/apache2_module.py index 4cc0ef8b37..d85ed0158f 100644 --- a/plugins/modules/web_infrastructure/apache2_module.py +++ b/plugins/modules/web_infrastructure/apache2_module.py @@ -49,6 +49,9 @@ options: type: bool default: False requirements: ["a2enmod","a2dismod"] +notes: + - This does not work on RedHat-based distributions. It does work on Debian- and SuSE-based distributions. + Whether it works on others depend on whether the C(a2enmod) and C(a2dismod) tools are available or not. ''' EXAMPLES = ''' @@ -109,13 +112,14 @@ import re # import module snippets from ansible.module_utils.basic import AnsibleModule +_re_threaded = re.compile(r'threaded: *yes') + def _run_threaded(module): control_binary = _get_ctl_binary(module) + result, stdout, stderr = module.run_command([control_binary, "-V"]) - result, stdout, stderr = module.run_command("%s -V" % control_binary) - - return bool(re.search(r'threaded:[ ]*yes', stdout)) + return bool(_re_threaded.search(stdout)) def _get_ctl_binary(module): @@ -124,15 +128,12 @@ def _get_ctl_binary(module): if ctl_binary is not None: return ctl_binary - module.fail_json( - msg="Neither of apache2ctl nor apachctl found." - " At least one apache control binary is necessary." - ) + module.fail_json(msg="Neither of apache2ctl nor apachctl found. At least one apache control binary is necessary.") def _module_is_enabled(module): control_binary = _get_ctl_binary(module) - result, stdout, stderr = module.run_command("%s -M" % control_binary) + result, stdout, stderr = module.run_command([control_binary, "-M"]) if result != 0: error_msg = "Error executing %s: %s" % (control_binary, stderr) @@ -168,7 +169,7 @@ def create_apache_identifier(name): # re expressions to extract subparts of names re_workarounds = [ - ('php', r'^(php\d)\.'), + ('php', re.compile(r'^(php\d)\.')), ] for a2enmod_spelling, module_name in text_workarounds: @@ -178,7 +179,7 @@ def create_apache_identifier(name): for search, reexpr in re_workarounds: if search in name: try: - rematch = re.search(reexpr, name) + rematch = reexpr.search(name) return rematch.group(1) + '_module' except AttributeError: pass @@ -201,15 +202,15 @@ def _set_state(module, state): result=success_msg, warnings=module.warnings) - a2mod_binary = module.get_bin_path(a2mod_binary) + a2mod_binary = [module.get_bin_path(a2mod_binary)] if a2mod_binary is None: module.fail_json(msg="%s not found. Perhaps this system does not use %s to manage apache" % (a2mod_binary, a2mod_binary)) if not want_enabled and force: # force exists only for a2dismod on debian - a2mod_binary += ' -f' + a2mod_binary.append('-f') - result, stdout, stderr = module.run_command("%s %s" % (a2mod_binary, name)) + result, stdout, stderr = module.run_command(a2mod_binary + [name]) if _module_is_enabled(module) == want_enabled: module.exit_json(changed=True, @@ -241,10 +242,10 @@ def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True), - identifier=dict(required=False, type='str'), - force=dict(required=False, type='bool', default=False), + identifier=dict(type='str'), + force=dict(type='bool', default=False), state=dict(default='present', choices=['absent', 'present']), - ignore_configcheck=dict(required=False, type='bool', default=False), + ignore_configcheck=dict(type='bool', default=False), ), supports_check_mode=True, ) @@ -253,7 +254,7 @@ def main(): name = module.params['name'] if name == 'cgi' and _run_threaded(module): - module.fail_json(msg="Your MPM seems to be threaded. No automatic actions on module %s possible." % name) + module.fail_json(msg="Your MPM seems to be threaded. No automatic actions on module cgi possible.") if not module.params['identifier']: module.params['identifier'] = create_apache_identifier(module.params['name']) diff --git a/tests/integration/targets/apache2_module/tasks/actualtest.yml b/tests/integration/targets/apache2_module/tasks/actualtest.yml index 24ba4f27cd..886e746f07 100644 --- a/tests/integration/targets/apache2_module/tasks/actualtest.yml +++ b/tests/integration/targets/apache2_module/tasks/actualtest.yml @@ -13,40 +13,25 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -- name: install apache via apt - apt: - name: "{{item}}" - state: present - when: "ansible_os_family == 'Debian'" - with_items: - - apache2 - - libapache2-mod-evasive - -- name: install apache via zypper - community.general.zypper: - name: apache2 - state: present - when: "ansible_os_family == 'Suse'" - - name: disable userdir module - apache2_module: + community.general.apache2_module: name: userdir state: absent register: userdir_first_disable - name: disable userdir module, second run - apache2_module: + community.general.apache2_module: name: userdir state: absent register: disable -- name: ensure apache2_module is idempotent +- name: ensure community.general.apache2_module is idempotent assert: that: - disable is not changed - name: enable userdir module - apache2_module: + community.general.apache2_module: name: userdir state: present register: enable @@ -57,18 +42,18 @@ - enable is changed - name: enable userdir module, second run - apache2_module: + community.general.apache2_module: name: userdir state: present register: enabletwo -- name: ensure apache2_module is idempotent +- name: ensure community.general.apache2_module is idempotent assert: that: - 'not enabletwo.changed' - name: disable userdir module, final run - apache2_module: + community.general.apache2_module: name: userdir state: absent register: disablefinal @@ -79,13 +64,13 @@ - 'disablefinal.changed' - name: set userdir to original state - apache2_module: + community.general.apache2_module: name: userdir state: present when: userdir_first_disable is changed - name: ensure autoindex enabled - apache2_module: + community.general.apache2_module: name: autoindex state: present @@ -93,55 +78,56 @@ when: "ansible_os_family == 'Debian'" block: - name: force disable of autoindex # bug #2499 - apache2_module: + community.general.apache2_module: name: autoindex state: absent force: True - name: reenable autoindex - apache2_module: + community.general.apache2_module: name: autoindex state: present - - name: enable evasive module, test https://github.com/ansible/ansible/issues/22635 - apache2_module: - name: evasive - state: present - + # mod_evasive is enabled by default upon the installation, so disable first and enable second, to preserve the config - name: disable evasive module - apache2_module: + community.general.apache2_module: name: evasive state: absent + - name: enable evasive module, test https://github.com/ansible/ansible/issues/22635 + community.general.apache2_module: + name: evasive + state: present + - name: use identifier to enable module, fix for https://github.com/ansible/ansible/issues/33669 - apache2_module: + community.general.apache2_module: name: dump_io state: present ignore_errors: True register: enable_dumpio_wrong - name: disable dump_io - apache2_module: + community.general.apache2_module: name: dump_io identifier: dumpio_module state: absent - name: use identifier to enable module, fix for https://github.com/ansible/ansible/issues/33669 - apache2_module: + community.general.apache2_module: name: dump_io identifier: dumpio_module state: present register: enable_dumpio_correct_1 - name: ensure idempotency with identifier - apache2_module: + community.general.apache2_module: name: dump_io identifier: dumpio_module state: present register: enable_dumpio_correct_2 - name: disable dump_io - apache2_module: + community.general.apache2_module: name: dump_io identifier: dumpio_module state: absent @@ -153,7 +139,7 @@ - enable_dumpio_correct_2 is not changed - name: disable mpm modules - apache2_module: + community.general.apache2_module: name: "{{ item }}" state: absent ignore_configcheck: True @@ -163,7 +149,7 @@ - mpm_prefork - name: enabled mpm_event - apache2_module: + community.general.apache2_module: name: mpm_event state: present ignore_configcheck: True @@ -175,7 +161,7 @@ - 'enabledmpmevent.changed' - name: switch between mpm_event and mpm_worker - apache2_module: + community.general.apache2_module: name: "{{ item.name }}" state: "{{ item.state }}" ignore_configcheck: True @@ -186,7 +172,7 @@ state: present - name: ensure mpm_worker is already enabled - apache2_module: + community.general.apache2_module: name: mpm_worker state: present register: enabledmpmworker @@ -197,7 +183,7 @@ - 'not enabledmpmworker.changed' - name: try to disable all mpm modules with configcheck - apache2_module: + community.general.apache2_module: name: "{{item}}" state: absent with_items: @@ -214,7 +200,7 @@ with_items: "{{ remove_with_configcheck.results }}" - name: try to disable all mpm modules without configcheck - apache2_module: + community.general.apache2_module: name: "{{item}}" state: absent ignore_configcheck: True @@ -224,7 +210,7 @@ - mpm_prefork - name: enabled mpm_event to restore previous state - apache2_module: + community.general.apache2_module: name: mpm_event state: present ignore_configcheck: True diff --git a/tests/integration/targets/apache2_module/tasks/main.yml b/tests/integration/targets/apache2_module/tasks/main.yml index 2ec308857a..d840ff60e8 100644 --- a/tests/integration/targets/apache2_module/tasks/main.yml +++ b/tests/integration/targets/apache2_module/tasks/main.yml @@ -5,8 +5,22 @@ #################################################################### +- name: install apache via apt + apt: + name: "{{item}}" + state: present + when: "ansible_os_family == 'Debian'" + with_items: + - apache2 + - libapache2-mod-evasive -- name: +- name: install apache via zypper + community.general.zypper: + name: apache2 + state: present + when: "ansible_os_family == 'Suse'" + +- name: test apache2_module block: - name: get list of enabled modules shell: apache2ctl -M | sort @@ -17,8 +31,12 @@ - name: get list of enabled modules shell: apache2ctl -M | sort register: modules_after - - debug: var=modules_before - - debug: var=modules_after + - name: modules_before + debug: + var: modules_before + - name: modules_after + debug: + var: modules_after - name: ensure that all test modules are disabled again assert: that: modules_before.stdout == modules_after.stdout From 43fe26d83cd405786ed6d000ecf278f3bb6a76c4 Mon Sep 17 00:00:00 2001 From: Gaetan2907 <48204380+Gaetan2907@users.noreply.github.com> Date: Fri, 30 Jul 2021 19:03:57 +0100 Subject: [PATCH 0478/3093] Keycloak: add client_rolemapping management (#2941) * Add Keycloak kc_client_rolemapping module * Fix documentation * Add unit tests for keycloak_client_rolemapping Keycloak module * Update plugins/modules/identity/keycloak/keycloak_client_rolemapping.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_client_rolemapping.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_client_rolemapping.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_client_rolemapping.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_client_rolemapping.py Co-authored-by: Felix Fontein * Fix documentation * Update plugins/modules/identity/keycloak/keycloak_client_rolemapping.py Co-authored-by: Felix Fontein * Minor fix * Add check mode * Refactoring: rename function from get_client_roles to get_client_roles_by_id * BOTMETA.yml: keycloak_client_rolemapping - add myself as maintainer * Update plugins/modules/identity/keycloak/keycloak_client_rolemapping.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_client_rolemapping.py Co-authored-by: Felix Fontein --- .github/BOTMETA.yml | 2 + .../identity/keycloak/keycloak.py | 123 +++- .../keycloak/keycloak_client_rolemapping.py | 347 +++++++++++ .../modules/keycloak_client_rolemapping.py | 1 + .../test_keycloak_client_rolemapping.py | 572 ++++++++++++++++++ 5 files changed, 1043 insertions(+), 2 deletions(-) create mode 100644 plugins/modules/identity/keycloak/keycloak_client_rolemapping.py create mode 120000 plugins/modules/keycloak_client_rolemapping.py create mode 100644 tests/unit/plugins/modules/identity/keycloak/test_keycloak_client_rolemapping.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 859d88bb84..4912a03ba4 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -502,6 +502,8 @@ files: maintainers: elfelip Gaetan2907 $modules/identity/keycloak/keycloak_clientscope.py: maintainers: Gaetan2907 + $modules/identity/keycloak/keycloak_client_rolemapping.py: + maintainers: Gaetan2907 $modules/identity/keycloak/keycloak_group.py: maintainers: adamgoossens $modules/identity/keycloak/keycloak_realm.py: diff --git a/plugins/module_utils/identity/keycloak/keycloak.py b/plugins/module_utils/identity/keycloak/keycloak.py index 75ef2bba02..c782e3690c 100644 --- a/plugins/module_utils/identity/keycloak/keycloak.py +++ b/plugins/module_utils/identity/keycloak/keycloak.py @@ -62,6 +62,10 @@ URL_CLIENTSCOPE = "{url}/admin/realms/{realm}/client-scopes/{id}" URL_CLIENTSCOPE_PROTOCOLMAPPERS = "{url}/admin/realms/{realm}/client-scopes/{id}/protocol-mappers/models" URL_CLIENTSCOPE_PROTOCOLMAPPER = "{url}/admin/realms/{realm}/client-scopes/{id}/protocol-mappers/models/{mapper_id}" +URL_CLIENT_ROLEMAPPINGS = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}" +URL_CLIENT_ROLEMAPPINGS_AVAILABLE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/available" +URL_CLIENT_ROLEMAPPINGS_COMPOSITE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/composite" + URL_AUTHENTICATION_FLOWS = "{url}/admin/realms/{realm}/authentication/flows" URL_AUTHENTICATION_FLOW = "{url}/admin/realms/{realm}/authentication/flows/{id}" URL_AUTHENTICATION_FLOW_COPY = "{url}/admin/realms/{realm}/authentication/flows/{copyfrom}/copy" @@ -376,8 +380,8 @@ class KeycloakAPI(object): def create_client(self, clientrep, realm="master"): """ Create a client in keycloak - :param clientrep: Client representation of client to be created. Must at least contain field clientId - :param realm: realm for client to be created + :param clientrep: Client representation of client to be created. Must at least contain field clientId. + :param realm: realm for client to be created. :return: HTTPResponse object on success """ client_url = URL_CLIENTS.format(url=self.baseurl, realm=realm) @@ -405,6 +409,121 @@ class KeycloakAPI(object): self.module.fail_json(msg='Could not delete client %s in realm %s: %s' % (id, realm, str(e))) + def get_client_roles_by_id(self, cid, realm="master"): + """ Fetch the roles of the a client on the Keycloak server. + + :param cid: ID of the client from which to obtain the rolemappings. + :param realm: Realm from which to obtain the rolemappings. + :return: The rollemappings of specified group and client of the realm (default "master"). + """ + client_roles_url = URL_CLIENT_ROLES.format(url=self.baseurl, realm=realm, id=cid) + try: + return json.loads(to_native(open_url(client_roles_url, method="GET", headers=self.restheaders, + validate_certs=self.validate_certs).read())) + except Exception as e: + self.module.fail_json(msg="Could not fetch rolemappings for client %s in realm %s: %s" + % (cid, realm, str(e))) + + def get_client_role_by_name(self, gid, cid, name, realm="master"): + """ Get the role ID of a client. + + :param gid: ID of the group from which to obtain the rolemappings. + :param cid: ID of the client from which to obtain the rolemappings. + :param name: Name of the role. + :param realm: Realm from which to obtain the rolemappings. + :return: The ID of the role, None if not found. + """ + rolemappings = self.get_client_roles_by_id(cid, realm=realm) + for role in rolemappings: + if name == role['name']: + return role['id'] + return None + + def get_client_rolemapping_by_id(self, gid, cid, rid, realm='master'): + """ Obtain client representation by id + + :param gid: ID of the group from which to obtain the rolemappings. + :param cid: ID of the client from which to obtain the rolemappings. + :param rid: ID of the role. + :param realm: client from this realm + :return: dict of rolemapping representation or None if none matching exist + """ + rolemappings_url = URL_CLIENT_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid) + try: + rolemappings = json.loads(to_native(open_url(rolemappings_url, method="GET", headers=self.restheaders, + validate_certs=self.validate_certs).read())) + for role in rolemappings: + if rid == role['id']: + return role + except Exception as e: + self.module.fail_json(msg="Could not fetch rolemappings for client %s in group %s, realm %s: %s" + % (cid, gid, realm, str(e))) + return None + + def get_client_available_rolemappings(self, gid, cid, realm="master"): + """ Fetch the available role of a client in a specified goup on the Keycloak server. + + :param gid: ID of the group from which to obtain the rolemappings. + :param cid: ID of the client from which to obtain the rolemappings. + :param realm: Realm from which to obtain the rolemappings. + :return: The rollemappings of specified group and client of the realm (default "master"). + """ + available_rolemappings_url = URL_CLIENT_ROLEMAPPINGS_AVAILABLE.format(url=self.baseurl, realm=realm, id=gid, client=cid) + try: + return json.loads(to_native(open_url(available_rolemappings_url, method="GET", headers=self.restheaders, + validate_certs=self.validate_certs).read())) + except Exception as e: + self.module.fail_json(msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s" + % (cid, gid, realm, str(e))) + + def get_client_composite_rolemappings(self, gid, cid, realm="master"): + """ Fetch the composite role of a client in a specified group on the Keycloak server. + + :param gid: ID of the group from which to obtain the rolemappings. + :param cid: ID of the client from which to obtain the rolemappings. + :param realm: Realm from which to obtain the rolemappings. + :return: The rollemappings of specified group and client of the realm (default "master"). + """ + available_rolemappings_url = URL_CLIENT_ROLEMAPPINGS_COMPOSITE.format(url=self.baseurl, realm=realm, id=gid, client=cid) + try: + return json.loads(to_native(open_url(available_rolemappings_url, method="GET", headers=self.restheaders, + validate_certs=self.validate_certs).read())) + except Exception as e: + self.module.fail_json(msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s" + % (cid, gid, realm, str(e))) + + def add_group_rolemapping(self, gid, cid, role_rep, realm="master"): + """ Fetch the composite role of a client in a specified goup on the Keycloak server. + + :param gid: ID of the group from which to obtain the rolemappings. + :param cid: ID of the client from which to obtain the rolemappings. + :param role_rep: Representation of the role to assign. + :param realm: Realm from which to obtain the rolemappings. + :return: None. + """ + available_rolemappings_url = URL_CLIENT_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid) + try: + open_url(available_rolemappings_url, method="POST", headers=self.restheaders, data=json.dumps(role_rep), validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s" + % (cid, gid, realm, str(e))) + + def delete_group_rolemapping(self, gid, cid, role_rep, realm="master"): + """ Delete the rolemapping of a client in a specified group on the Keycloak server. + + :param gid: ID of the group from which to obtain the rolemappings. + :param cid: ID of the client from which to obtain the rolemappings. + :param role_rep: Representation of the role to assign. + :param realm: Realm from which to obtain the rolemappings. + :return: None. + """ + available_rolemappings_url = URL_CLIENT_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid) + try: + open_url(available_rolemappings_url, method="DELETE", headers=self.restheaders, validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg="Could not delete available rolemappings for client %s in group %s, realm %s: %s" + % (cid, gid, realm, str(e))) + def get_client_templates(self, realm='master'): """ Obtains client template representations for client templates in a realm diff --git a/plugins/modules/identity/keycloak/keycloak_client_rolemapping.py b/plugins/modules/identity/keycloak/keycloak_client_rolemapping.py new file mode 100644 index 0000000000..e3d43d7919 --- /dev/null +++ b/plugins/modules/identity/keycloak/keycloak_client_rolemapping.py @@ -0,0 +1,347 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: keycloak_client_rolemapping + +short_description: Allows administration of Keycloak client_rolemapping with the Keycloak API +version_added: 3.5.0 + +description: + - This module allows you to add, remove or modify Keycloak client_rolemapping with the Keycloak REST API. + It requires access to the REST API via OpenID Connect; the user connecting and the client being + used must have the requisite access rights. In a default Keycloak installation, admin-cli + and an admin user would work, as would a separate client definition with the scope tailored + to your needs and a user having the expected roles. + + - The names of module options are snake_cased versions of the camelCase ones found in the + Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). + + - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will + be returned that way by this module. You may pass single values for attributes when calling the module, + and this will be translated into a list suitable for the API. + + - When updating a client_rolemapping, where possible provide the role ID to the module. This removes a lookup + to the API to translate the name into the role ID. + + +options: + state: + description: + - State of the client_rolemapping. + - On C(present), the client_rolemapping will be created if it does not yet exist, or updated with the parameters you provide. + - On C(absent), the client_rolemapping will be removed if it exists. + default: 'present' + type: str + choices: + - present + - absent + + realm: + type: str + description: + - They Keycloak realm under which this role_representation resides. + default: 'master' + + group_name: + type: str + description: + - Name of the group to be mapped. + - This parameter is required (can be replaced by gid for less API call). + + gid: + type: str + description: + - Id of the group to be mapped. + - This parameter is not required for updating or deleting the rolemapping but + providing it will reduce the number of API calls required. + + client_id: + type: str + description: + - Name of the client to be mapped (different than I(cid)). + - This parameter is required (can be replaced by cid for less API call). + + cid: + type: str + description: + - Id of the client to be mapped. + - This parameter is not required for updating or deleting the rolemapping but + providing it will reduce the number of API calls required. + + roles: + description: + - Roles to be mapped to the group. + type: list + elements: dict + suboptions: + name: + type: str + description: + - Name of the role_representation. + - This parameter is required only when creating or updating the role_representation. + id: + type: str + description: + - The unique identifier for this role_representation. + - This parameter is not required for updating or deleting a role_representation but + providing it will reduce the number of API calls required. + +extends_documentation_fragment: +- community.general.keycloak + + +author: + - Gaëtan Daubresse (@Gaetan2907) +''' + +EXAMPLES = ''' +- name: Map a client role to a group, authentication with credentials + community.general.keycloak_client_rolemappings: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + state: present + client_id: client1 + group_name: group1 + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost + +- name: Map a client role to a group, authentication with token + community.general.keycloak_client_rolemappings: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + token: TOKEN + state: present + client_id: client1 + group_name: group1 + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost + +- name: Unmap client role from a group + community.general.keycloak_client_rolemappings: + realm: MyCustomRealm + auth_client_id: admin-cli + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: USERNAME + auth_password: PASSWORD + state: absent + client_id: client1 + group_name: group1 + roles: + - name: role_name1 + id: role_id1 + - name: role_name2 + id: role_id2 + delegate_to: localhost + +''' + +RETURN = ''' +msg: + description: Message as to what action was taken + returned: always + type: str + sample: "Role role1 assigned to group group1." + +proposed: + description: role_representation representation of proposed changes to client_rolemapping. + returned: always + type: dict + sample: { + clientId: "test" + } +existing: + description: + - role_representation representation of existing role_representation. + - The sample is truncated. + returned: always + type: dict + sample: { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256", + } + } +end_state: + description: + - role_representation representation of role_representation after module execution. + - The sample is truncated. + returned: always + type: dict + sample: { + "adminUrl": "http://www.example.com/admin_url", + "attributes": { + "request.object.signature.alg": "RS256", + } + } +''' + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError, is_struct_included +from ansible.module_utils.basic import AnsibleModule + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + roles_spec = dict( + name=dict(type='str'), + id=dict(type='str'), + ) + + meta_args = dict( + state=dict(default='present', choices=['present', 'absent']), + realm=dict(default='master'), + gid=dict(type='str'), + group_name=dict(type='str'), + cid=dict(type='str'), + client_id=dict(type='str'), + roles=dict(type='list', elements='dict', options=roles_spec), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), + required_together=([['auth_realm', 'auth_username', 'auth_password']])) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + state = module.params.get('state') + cid = module.params.get('cid') + client_id = module.params.get('client_id') + gid = module.params.get('gid') + group_name = module.params.get('group_name') + roles = module.params.get('roles') + + # Check the parameters + if cid is None and client_id is None: + module.fail_json(msg='Either the `client_id` or `cid` has to be specified.') + if gid is None and group_name is None: + module.fail_json(msg='Either the `group_name` or `gid` has to be specified.') + + # Get the potential missing parameters + if gid is None: + group_rep = kc.get_group_by_name(group_name, realm=realm) + if group_rep is not None: + gid = group_rep['id'] + else: + module.fail_json(msg='Could not fetch group %s:' % group_name) + if cid is None: + cid = kc.get_client_id(client_id, realm=realm) + if cid is None: + module.fail_json(msg='Could not fetch client %s:' % client_id) + if roles is None: + module.exit_json(msg="Nothing to do (no roles specified).") + else: + for role_index, role in enumerate(roles, start=0): + if role['name'] is None and role['id'] is None: + module.fail_json(msg='Either the `name` or `id` has to be specified on each role.') + # Fetch missing role_id + if role['id'] is None: + role_id = kc.get_client_role_by_name(gid, cid, role['name'], realm=realm) + if role_id is not None: + role['id'] = role_id + else: + module.fail_json(msg='Could not fetch role %s:' % (role['name'])) + # Fetch missing role_name + else: + role['name'] = kc.get_client_rolemapping_by_id(gid, cid, role['id'], realm=realm)['name'] + if role['name'] is None: + module.fail_json(msg='Could not fetch role %s' % (role['id'])) + + # Get effective client-level role mappings + available_roles_before = kc.get_client_available_rolemappings(gid, cid, realm=realm) + assigned_roles_before = kc.get_client_composite_rolemappings(gid, cid, realm=realm) + + result['existing'] = assigned_roles_before + result['proposed'] = roles + + update_roles = [] + for role_index, role in enumerate(roles, start=0): + # Fetch roles to assign if state present + if state == 'present': + for available_role in available_roles_before: + if role['name'] == available_role['name']: + update_roles.append({ + 'id': role['id'], + 'name': role['name'], + }) + # Fetch roles to remove if state absent + else: + for assigned_role in assigned_roles_before: + if role['name'] == assigned_role['name']: + update_roles.append({ + 'id': role['id'], + 'name': role['name'], + }) + + if len(update_roles): + if state == 'present': + # Assign roles + result['changed'] = True + if module._diff: + result['diff'] = dict(before=assigned_roles_before, after=update_roles) + if module.check_mode: + module.exit_json(**result) + kc.add_group_rolemapping(gid, cid, update_roles, realm=realm) + result['msg'] = 'Roles %s assigned to group %s.' % (update_roles, group_name) + assigned_roles_after = kc.get_client_composite_rolemappings(gid, cid, realm=realm) + result['end_state'] = assigned_roles_after + module.exit_json(**result) + else: + # Remove mapping of role + result['changed'] = True + if module._diff: + result['diff'] = dict(before=assigned_roles_before, after=update_roles) + if module.check_mode: + module.exit_json(**result) + kc.delete_group_rolemapping(gid, cid, update_roles, realm=realm) + result['msg'] = 'Roles %s removed from group %s.' % (update_roles, group_name) + assigned_roles_after = kc.get_client_composite_rolemappings(gid, cid, realm=realm) + result['end_state'] = assigned_roles_after + module.exit_json(**result) + # Do nothing + else: + result['changed'] = False + result['msg'] = 'Nothing to do, roles %s are correctly mapped with group %s.' % (roles, group_name) + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_client_rolemapping.py b/plugins/modules/keycloak_client_rolemapping.py new file mode 120000 index 0000000000..02243ca68d --- /dev/null +++ b/plugins/modules/keycloak_client_rolemapping.py @@ -0,0 +1 @@ +identity/keycloak/keycloak_client_rolemapping.py \ No newline at end of file diff --git a/tests/unit/plugins/modules/identity/keycloak/test_keycloak_client_rolemapping.py b/tests/unit/plugins/modules/identity/keycloak/test_keycloak_client_rolemapping.py new file mode 100644 index 0000000000..8e753bc6d0 --- /dev/null +++ b/tests/unit/plugins/modules/identity/keycloak/test_keycloak_client_rolemapping.py @@ -0,0 +1,572 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from contextlib import contextmanager + +from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.general.tests.unit.compat.mock import call, patch +from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args + +from ansible_collections.community.general.plugins.modules.identity.keycloak import keycloak_client_rolemapping + +from itertools import count + +from ansible.module_utils.six import StringIO + + +@contextmanager +def patch_keycloak_api(get_group_by_name=None, get_client_id=None, get_client_role_by_name=None, + get_client_rolemapping_by_id=None, get_client_available_rolemappings=None, + get_client_composite_rolemappings=None, add_group_rolemapping=None, + delete_group_rolemapping=None): + """Mock context manager for patching the methods in PwPolicyIPAClient that contact the IPA server + + Patches the `login` and `_post_json` methods + + Keyword arguments are passed to the mock object that patches `_post_json` + + No arguments are passed to the mock object that patches `login` because no tests require it + + Example:: + + with patch_ipa(return_value={}) as (mock_login, mock_post): + ... + """ + + obj = keycloak_client_rolemapping.KeycloakAPI + with patch.object(obj, 'get_group_by_name', + side_effect=get_group_by_name) as mock_get_group_by_name: + with patch.object(obj, 'get_client_id', + side_effect=get_client_id) as mock_get_client_id: + with patch.object(obj, 'get_client_role_by_name', + side_effect=get_client_role_by_name) as mock_get_client_role_by_name: + with patch.object(obj, 'get_client_rolemapping_by_id', + side_effect=get_client_rolemapping_by_id) as mock_get_client_rolemapping_by_id: + with patch.object(obj, 'get_client_available_rolemappings', + side_effect=get_client_available_rolemappings) as mock_get_client_available_rolemappings: + with patch.object(obj, 'get_client_composite_rolemappings', + side_effect=get_client_composite_rolemappings) as mock_get_client_composite_rolemappings: + with patch.object(obj, 'add_group_rolemapping', + side_effect=add_group_rolemapping) as mock_add_group_rolemapping: + with patch.object(obj, 'delete_group_rolemapping', + side_effect=delete_group_rolemapping) as mock_delete_group_rolemapping: + yield mock_get_group_by_name, mock_get_client_id, mock_get_client_role_by_name, mock_add_group_rolemapping, \ + mock_get_client_rolemapping_by_id, mock_get_client_available_rolemappings, mock_get_client_composite_rolemappings, \ + mock_delete_group_rolemapping + + +def get_response(object_with_future_response, method, get_id_call_count): + if callable(object_with_future_response): + return object_with_future_response() + if isinstance(object_with_future_response, dict): + return get_response( + object_with_future_response[method], method, get_id_call_count) + if isinstance(object_with_future_response, list): + call_number = next(get_id_call_count) + return get_response( + object_with_future_response[call_number], method, get_id_call_count) + return object_with_future_response + + +def build_mocked_request(get_id_user_count, response_dict): + def _mocked_requests(*args, **kwargs): + url = args[0] + method = kwargs['method'] + future_response = response_dict.get(url, None) + return get_response(future_response, method, get_id_user_count) + return _mocked_requests + + +def create_wrapper(text_as_string): + """Allow to mock many times a call to one address. + Without this function, the StringIO is empty for the second call. + """ + def _create_wrapper(): + return StringIO(text_as_string) + return _create_wrapper + + +def mock_good_connection(): + token_response = { + 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), } + return patch( + 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + side_effect=build_mocked_request(count(), token_response), + autospec=True + ) + + +class TestKeycloakRealm(ModuleTestCase): + def setUp(self): + super(TestKeycloakRealm, self).setUp() + self.module = keycloak_client_rolemapping + + def test_map_clientrole_to_group_with_name(self): + """Add a new realm""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + 'realm': 'realm-name', + 'state': 'present', + 'client_id': 'test_client', + 'group_name': 'test_group', + 'roles': [ + { + 'name': 'test_role1', + }, + { + 'name': 'test_role1', + }, + ], + } + return_value_get_group_by_name = [{ + "access": { + "manage": "true", + "manageMembership": "true", + "view": "true" + }, + "attributes": "{}", + "clientRoles": "{}", + "id": "92f2400e-0ecb-4185-8950-12dcef616c2b", + "name": "test_group", + "path": "/test_group", + "realmRoles": "[]", + "subGroups": "[]" + }] + return_value_get_client_id = "c0f8490c-b224-4737-a567-20223e4c1727" + return_value_get_client_role_by_name = "e91af074-cfd5-40ee-8ef5-ae0ae1ce69fe" + return_value_get_client_available_rolemappings = [[ + { + "clientRole": "true", + "composite": "false", + "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", + "id": "c2bf2edb-da94-4f2f-b9f2-196dfee3fe4d", + "name": "test_role2" + }, + { + "clientRole": "true", + "composite": "false", + "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", + "id": "00a2d9a9-924e-49fa-8cde-c539c010ef6e", + "name": "test_role1" + } + ]] + return_value_get_client_composite_rolemappings = [ + None, + [ + { + "clientRole": "true", + "composite": "false", + "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", + "id": "c2bf2edb-da94-4f2f-b9f2-196dfee3fe4d", + "name": "test_role2" + }, + { + "clientRole": "true", + "composite": "false", + "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", + "id": "00a2d9a9-924e-49fa-8cde-c539c010ef6e", + "name": "test_role1" + } + ] + ] + + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id, + get_client_role_by_name=return_value_get_client_role_by_name, + get_client_available_rolemappings=return_value_get_client_available_rolemappings, + get_client_composite_rolemappings=return_value_get_client_composite_rolemappings) \ + as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_by_name, mock_add_group_rolemapping, + mock_get_client_rolemapping_by_id, mock_get_client_available_rolemappings, mock_get_client_composite_rolemappings, + mock_delete_group_rolemapping): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(mock_get_group_by_name.call_count, 1) + self.assertEqual(mock_get_client_id.call_count, 1) + self.assertEqual(mock_add_group_rolemapping.call_count, 1) + self.assertEqual(mock_get_client_rolemapping_by_id.call_count, 0) + self.assertEqual(mock_get_client_available_rolemappings.call_count, 1) + self.assertEqual(mock_get_client_composite_rolemappings.call_count, 2) + self.assertEqual(mock_delete_group_rolemapping.call_count, 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_map_clientrole_to_group_with_name_idempotency(self): + """Add a new realm""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + 'realm': 'realm-name', + 'state': 'present', + 'client_id': 'test_client', + 'group_name': 'test_group', + 'roles': [ + { + 'name': 'test_role1', + }, + { + 'name': 'test_role1', + }, + ], + } + return_value_get_group_by_name = [{ + "access": { + "manage": "true", + "manageMembership": "true", + "view": "true" + }, + "attributes": "{}", + "clientRoles": "{}", + "id": "92f2400e-0ecb-4185-8950-12dcef616c2b", + "name": "test_group", + "path": "/test_group", + "realmRoles": "[]", + "subGroups": "[]" + }] + return_value_get_client_id = "c0f8490c-b224-4737-a567-20223e4c1727" + return_value_get_client_role_by_name = "e91af074-cfd5-40ee-8ef5-ae0ae1ce69fe" + return_value_get_client_available_rolemappings = [[]] + return_value_get_client_composite_rolemappings = [[ + { + "clientRole": "true", + "composite": "false", + "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", + "id": "c2bf2edb-da94-4f2f-b9f2-196dfee3fe4d", + "name": "test_role2" + }, + { + "clientRole": "true", + "composite": "false", + "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", + "id": "00a2d9a9-924e-49fa-8cde-c539c010ef6e", + "name": "test_role1" + } + ]] + + changed = False + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id, + get_client_role_by_name=return_value_get_client_role_by_name, + get_client_available_rolemappings=return_value_get_client_available_rolemappings, + get_client_composite_rolemappings=return_value_get_client_composite_rolemappings) \ + as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_by_name, mock_add_group_rolemapping, + mock_get_client_rolemapping_by_id, mock_get_client_available_rolemappings, mock_get_client_composite_rolemappings, + mock_delete_group_rolemapping): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(mock_get_group_by_name.call_count, 1) + self.assertEqual(mock_get_client_id.call_count, 1) + self.assertEqual(mock_add_group_rolemapping.call_count, 0) + self.assertEqual(mock_get_client_rolemapping_by_id.call_count, 0) + self.assertEqual(mock_get_client_available_rolemappings.call_count, 1) + self.assertEqual(mock_get_client_composite_rolemappings.call_count, 1) + self.assertEqual(mock_delete_group_rolemapping.call_count, 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_map_clientrole_to_group_with_id(self): + """Add a new realm""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + 'realm': 'realm-name', + 'state': 'present', + 'cid': 'c0f8490c-b224-4737-a567-20223e4c1727', + 'gid': '92f2400e-0ecb-4185-8950-12dcef616c2b', + 'roles': [ + { + 'name': 'test_role1', + }, + { + 'name': 'test_role1', + }, + ], + } + return_value_get_group_by_name = [{ + "access": { + "manage": "true", + "manageMembership": "true", + "view": "true" + }, + "attributes": "{}", + "clientRoles": "{}", + "id": "92f2400e-0ecb-4185-8950-12dcef616c2b", + "name": "test_group", + "path": "/test_group", + "realmRoles": "[]", + "subGroups": "[]" + }] + return_value_get_client_id = "c0f8490c-b224-4737-a567-20223e4c1727" + return_value_get_client_role_by_name = "e91af074-cfd5-40ee-8ef5-ae0ae1ce69fe" + return_value_get_client_available_rolemappings = [[ + { + "clientRole": "true", + "composite": "false", + "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", + "id": "c2bf2edb-da94-4f2f-b9f2-196dfee3fe4d", + "name": "test_role2" + }, + { + "clientRole": "true", + "composite": "false", + "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", + "id": "00a2d9a9-924e-49fa-8cde-c539c010ef6e", + "name": "test_role1" + } + ]] + return_value_get_client_composite_rolemappings = [ + None, + [ + { + "clientRole": "true", + "composite": "false", + "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", + "id": "c2bf2edb-da94-4f2f-b9f2-196dfee3fe4d", + "name": "test_role2" + }, + { + "clientRole": "true", + "composite": "false", + "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", + "id": "00a2d9a9-924e-49fa-8cde-c539c010ef6e", + "name": "test_role1" + } + ] + ] + + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id, + get_client_role_by_name=return_value_get_client_role_by_name, + get_client_available_rolemappings=return_value_get_client_available_rolemappings, + get_client_composite_rolemappings=return_value_get_client_composite_rolemappings) \ + as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_by_name, mock_add_group_rolemapping, + mock_get_client_rolemapping_by_id, mock_get_client_available_rolemappings, mock_get_client_composite_rolemappings, + mock_delete_group_rolemapping): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(mock_get_group_by_name.call_count, 0) + self.assertEqual(mock_get_client_id.call_count, 0) + self.assertEqual(mock_add_group_rolemapping.call_count, 1) + self.assertEqual(mock_get_client_rolemapping_by_id.call_count, 0) + self.assertEqual(mock_get_client_available_rolemappings.call_count, 1) + self.assertEqual(mock_get_client_composite_rolemappings.call_count, 2) + self.assertEqual(mock_delete_group_rolemapping.call_count, 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_remove_clientrole_from_group(self): + """Add a new realm""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + 'realm': 'realm-name', + 'state': 'absent', + 'client_id': 'test_client', + 'group_name': 'test_group', + 'roles': [ + { + 'name': 'test_role1', + }, + { + 'name': 'test_role1', + }, + ], + } + return_value_get_group_by_name = [{ + "access": { + "manage": "true", + "manageMembership": "true", + "view": "true" + }, + "attributes": "{}", + "clientRoles": "{}", + "id": "92f2400e-0ecb-4185-8950-12dcef616c2b", + "name": "test_group", + "path": "/test_group", + "realmRoles": "[]", + "subGroups": "[]" + }] + return_value_get_client_id = "c0f8490c-b224-4737-a567-20223e4c1727" + return_value_get_client_role_by_name = "e91af074-cfd5-40ee-8ef5-ae0ae1ce69fe" + return_value_get_client_available_rolemappings = [[]] + return_value_get_client_composite_rolemappings = [ + [ + { + "clientRole": "true", + "composite": "false", + "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", + "id": "c2bf2edb-da94-4f2f-b9f2-196dfee3fe4d", + "name": "test_role2" + }, + { + "clientRole": "true", + "composite": "false", + "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", + "id": "00a2d9a9-924e-49fa-8cde-c539c010ef6e", + "name": "test_role1" + } + ], + [] + ] + + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id, + get_client_role_by_name=return_value_get_client_role_by_name, + get_client_available_rolemappings=return_value_get_client_available_rolemappings, + get_client_composite_rolemappings=return_value_get_client_composite_rolemappings) \ + as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_by_name, mock_add_group_rolemapping, + mock_get_client_rolemapping_by_id, mock_get_client_available_rolemappings, mock_get_client_composite_rolemappings, + mock_delete_group_rolemapping): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(mock_get_group_by_name.call_count, 1) + self.assertEqual(mock_get_client_id.call_count, 1) + self.assertEqual(mock_add_group_rolemapping.call_count, 0) + self.assertEqual(mock_get_client_rolemapping_by_id.call_count, 0) + self.assertEqual(mock_get_client_available_rolemappings.call_count, 1) + self.assertEqual(mock_get_client_composite_rolemappings.call_count, 2) + self.assertEqual(mock_delete_group_rolemapping.call_count, 1) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_remove_clientrole_from_group_idempotency(self): + """Add a new realm""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + 'realm': 'realm-name', + 'state': 'absent', + 'client_id': 'test_client', + 'group_name': 'test_group', + 'roles': [ + { + 'name': 'test_role1', + }, + { + 'name': 'test_role1', + }, + ], + } + return_value_get_group_by_name = [{ + "access": { + "manage": "true", + "manageMembership": "true", + "view": "true" + }, + "attributes": "{}", + "clientRoles": "{}", + "id": "92f2400e-0ecb-4185-8950-12dcef616c2b", + "name": "test_group", + "path": "/test_group", + "realmRoles": "[]", + "subGroups": "[]" + }] + return_value_get_client_id = "c0f8490c-b224-4737-a567-20223e4c1727" + return_value_get_client_role_by_name = "e91af074-cfd5-40ee-8ef5-ae0ae1ce69fe" + return_value_get_client_available_rolemappings = [ + [ + { + "clientRole": "true", + "composite": "false", + "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", + "id": "c2bf2edb-da94-4f2f-b9f2-196dfee3fe4d", + "name": "test_role2" + }, + { + "clientRole": "true", + "composite": "false", + "containerId": "c0f8490c-b224-4737-a567-20223e4c1727", + "id": "00a2d9a9-924e-49fa-8cde-c539c010ef6e", + "name": "test_role1" + } + ] + ] + return_value_get_client_composite_rolemappings = [[]] + + changed = False + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id, + get_client_role_by_name=return_value_get_client_role_by_name, + get_client_available_rolemappings=return_value_get_client_available_rolemappings, + get_client_composite_rolemappings=return_value_get_client_composite_rolemappings) \ + as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_by_name, mock_add_group_rolemapping, + mock_get_client_rolemapping_by_id, mock_get_client_available_rolemappings, mock_get_client_composite_rolemappings, + mock_delete_group_rolemapping): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(mock_get_group_by_name.call_count, 1) + self.assertEqual(mock_get_client_id.call_count, 1) + self.assertEqual(mock_add_group_rolemapping.call_count, 0) + self.assertEqual(mock_get_client_rolemapping_by_id.call_count, 0) + self.assertEqual(mock_get_client_available_rolemappings.call_count, 1) + self.assertEqual(mock_get_client_composite_rolemappings.call_count, 1) + self.assertEqual(mock_delete_group_rolemapping.call_count, 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + +if __name__ == '__main__': + unittest.main() From 9ccce821136789d4786038a57c565d97a7e21e22 Mon Sep 17 00:00:00 2001 From: Yvan Watchman Date: Sat, 31 Jul 2021 07:43:45 +0200 Subject: [PATCH 0479/3093] Feature: implement hpilo_info system power info (#3079) * report power state of host * Modify sample information * add changelog fragment * apply feedback from github community * apply feedback Co-authored-by: Yvan E. Watchman --- .../fragments/3079-report-power-state-hpilo.yaml | 3 +++ .../modules/remote_management/hpilo/hpilo_info.py | 13 +++++++++++++ 2 files changed, 16 insertions(+) create mode 100644 changelogs/fragments/3079-report-power-state-hpilo.yaml diff --git a/changelogs/fragments/3079-report-power-state-hpilo.yaml b/changelogs/fragments/3079-report-power-state-hpilo.yaml new file mode 100644 index 0000000000..e057e3395f --- /dev/null +++ b/changelogs/fragments/3079-report-power-state-hpilo.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - hpilo_info - added ``host_power_status`` return value to report power state of machine with ``OFF``, ``ON`` or ``UNKNOWN`` (https://github.com/ansible-collections/community.general/pull/3079). diff --git a/plugins/modules/remote_management/hpilo/hpilo_info.py b/plugins/modules/remote_management/hpilo/hpilo_info.py index f373b58639..2b6c30abd6 100644 --- a/plugins/modules/remote_management/hpilo/hpilo_info.py +++ b/plugins/modules/remote_management/hpilo/hpilo_info.py @@ -113,6 +113,15 @@ hw_uuid: returned: always type: str sample: 123456ABC78901D2 + +host_power_status: + description: + - Power status of host. + - Will be one of C(ON), C(OFF) and C(UNKNOWN). + returned: always + type: str + sample: ON + version_added: 3.5.0 ''' import re @@ -177,6 +186,7 @@ def main(): # TODO: Count number of CPUs, DIMMs and total memory try: data = ilo.get_host_data() + power_state = ilo.get_host_power_status() except hpilo.IloCommunicationError as e: module.fail_json(msg=to_native(e)) @@ -243,6 +253,9 @@ def main(): # reformat into a text friendly format info['hw_memory_total'] = "{0} GB".format(info['hw_memory_total']) + # Report host state + info['host_power_status'] = power_state or 'UNKNOWN' + module.exit_json(**info) From 5f8d6a73d3123e37eacb77712f5b083a670230a9 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sat, 31 Jul 2021 19:09:38 +1200 Subject: [PATCH 0480/3093] fixed RETURN doc (#3120) --- plugins/modules/source_control/github/github_issue.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/modules/source_control/github/github_issue.py b/plugins/modules/source_control/github/github_issue.py index 66d26c8301..88fe8f7b51 100644 --- a/plugins/modules/source_control/github/github_issue.py +++ b/plugins/modules/source_control/github/github_issue.py @@ -41,7 +41,7 @@ author: ''' RETURN = ''' -get_status: +issue_status: description: State of the GitHub issue type: str returned: success From 789f06dffec644dd0605138936fe16ab44e8ac05 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sat, 31 Jul 2021 19:10:54 +1200 Subject: [PATCH 0481/3093] removed extraneous dependency in integration test (#3119) --- tests/integration/targets/prepare_tests/tasks/main.yml | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 tests/integration/targets/prepare_tests/tasks/main.yml diff --git a/tests/integration/targets/prepare_tests/tasks/main.yml b/tests/integration/targets/prepare_tests/tasks/main.yml deleted file mode 100644 index e69de29bb2..0000000000 From 73c27d6a0e739bf94c10f347aa195048cba185eb Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 1 Aug 2021 22:35:08 +1200 Subject: [PATCH 0482/3093] utf8 marker batch1 (#3127) * added utf-8 markers to all .py files in plugins/{action,cache,callback} * added utf-8 markers to all .py files in plugins/connection * added utf-8 markers to all .py files in plugins/doc_fragments --- plugins/action/system/iptables_state.py | 1 + plugins/action/system/shutdown.py | 1 + plugins/cache/memcached.py | 1 + plugins/cache/pickle.py | 1 + plugins/cache/redis.py | 1 + plugins/cache/yaml.py | 1 + plugins/callback/context_demo.py | 1 + plugins/callback/counter_enabled.py | 1 + plugins/callback/dense.py | 1 + plugins/callback/hipchat.py | 1 + plugins/callback/jabber.py | 1 + plugins/callback/log_plays.py | 1 + plugins/callback/loganalytics.py | 1 + plugins/callback/logdna.py | 1 + plugins/callback/logentries.py | 1 + plugins/callback/logstash.py | 1 + plugins/callback/null.py | 1 + plugins/callback/say.py | 1 + plugins/callback/selective.py | 1 + plugins/callback/slack.py | 1 + plugins/callback/syslog_json.py | 1 + plugins/callback/unixy.py | 1 + plugins/callback/yaml.py | 1 + plugins/connection/chroot.py | 1 + plugins/connection/funcd.py | 1 + plugins/connection/iocage.py | 1 + plugins/connection/jail.py | 1 + plugins/connection/lxc.py | 1 + plugins/connection/lxd.py | 1 + plugins/connection/qubes.py | 1 + plugins/connection/saltstack.py | 1 + plugins/connection/zone.py | 1 + plugins/doc_fragments/hpe3par.py | 1 + plugins/doc_fragments/hwc.py | 1 + plugins/doc_fragments/oracle.py | 1 + plugins/doc_fragments/oracle_creatable_resource.py | 1 + plugins/doc_fragments/oracle_display_name_option.py | 1 + plugins/doc_fragments/oracle_name_option.py | 1 + plugins/doc_fragments/oracle_tags.py | 1 + plugins/doc_fragments/oracle_wait_options.py | 1 + plugins/doc_fragments/vexata.py | 1 + 41 files changed, 41 insertions(+) diff --git a/plugins/action/system/iptables_state.py b/plugins/action/system/iptables_state.py index 6884e77713..93e4bc2ed4 100644 --- a/plugins/action/system/iptables_state.py +++ b/plugins/action/system/iptables_state.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright: (c) 2020, quidame # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/action/system/shutdown.py b/plugins/action/system/shutdown.py index 953b73778b..4995ef8d8b 100644 --- a/plugins/action/system/shutdown.py +++ b/plugins/action/system/shutdown.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright: (c) 2020, Amin Vakil # Copyright: (c) 2016-2018, Matt Davis # Copyright: (c) 2018, Sam Doran diff --git a/plugins/cache/memcached.py b/plugins/cache/memcached.py index 5c9e54aaa0..fb2a778fc3 100644 --- a/plugins/cache/memcached.py +++ b/plugins/cache/memcached.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2014, Brian Coca, Josh Drake, et al # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/cache/pickle.py b/plugins/cache/pickle.py index 38a93e2e28..b790e73a4c 100644 --- a/plugins/cache/pickle.py +++ b/plugins/cache/pickle.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2017, Brian Coca # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/cache/redis.py b/plugins/cache/redis.py index 20616096ae..6b5f2c4ad0 100644 --- a/plugins/cache/redis.py +++ b/plugins/cache/redis.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2014, Brian Coca, Josh Drake, et al # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/cache/yaml.py b/plugins/cache/yaml.py index b47d74038c..b676dd0dbb 100644 --- a/plugins/cache/yaml.py +++ b/plugins/cache/yaml.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2017, Brian Coca # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/context_demo.py b/plugins/callback/context_demo.py index 2441f4063f..39c912acae 100644 --- a/plugins/callback/context_demo.py +++ b/plugins/callback/context_demo.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (C) 2012, Michael DeHaan, # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/counter_enabled.py b/plugins/callback/counter_enabled.py index 2b8c270024..352c773b9b 100644 --- a/plugins/callback/counter_enabled.py +++ b/plugins/callback/counter_enabled.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2018, Ivan Aragones Muniesa # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) ''' diff --git a/plugins/callback/dense.py b/plugins/callback/dense.py index abbf05ef48..38d3e1bee7 100644 --- a/plugins/callback/dense.py +++ b/plugins/callback/dense.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2016, Dag Wieers # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/hipchat.py b/plugins/callback/hipchat.py index e097ac8eb6..771c425df8 100644 --- a/plugins/callback/hipchat.py +++ b/plugins/callback/hipchat.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (C) 2014, Matt Martz # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/jabber.py b/plugins/callback/jabber.py index 83476a85c5..c57e08804a 100644 --- a/plugins/callback/jabber.py +++ b/plugins/callback/jabber.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright (C) 2016 maxn nikolaev.makc@gmail.com # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/log_plays.py b/plugins/callback/log_plays.py index df3482f483..24acf3fc95 100644 --- a/plugins/callback/log_plays.py +++ b/plugins/callback/log_plays.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (C) 2012, Michael DeHaan, # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/loganalytics.py b/plugins/callback/loganalytics.py index ef1ea02f87..ccc7649218 100644 --- a/plugins/callback/loganalytics.py +++ b/plugins/callback/loganalytics.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) diff --git a/plugins/callback/logdna.py b/plugins/callback/logdna.py index 165005d0bd..ddb4c477da 100644 --- a/plugins/callback/logdna.py +++ b/plugins/callback/logdna.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2018, Samir Musali # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/logentries.py b/plugins/callback/logentries.py index d78bff331c..344bd219cd 100644 --- a/plugins/callback/logentries.py +++ b/plugins/callback/logentries.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2015, Logentries.com, Jimmy Tang # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/logstash.py b/plugins/callback/logstash.py index ef862fdb42..95da7fa95a 100644 --- a/plugins/callback/logstash.py +++ b/plugins/callback/logstash.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (C) 2020, Yevhen Khmelenko # (C) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/null.py b/plugins/callback/null.py index cda8603167..9eb5198d0c 100644 --- a/plugins/callback/null.py +++ b/plugins/callback/null.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/say.py b/plugins/callback/say.py index e3efd3e63b..309777e241 100644 --- a/plugins/callback/say.py +++ b/plugins/callback/say.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2012, Michael DeHaan, # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/selective.py b/plugins/callback/selective.py index 8d882d89bd..b1e09c8236 100644 --- a/plugins/callback/selective.py +++ b/plugins/callback/selective.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) Fastly, inc 2016 # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/slack.py b/plugins/callback/slack.py index 74d338dbcc..c791bf6a36 100644 --- a/plugins/callback/slack.py +++ b/plugins/callback/slack.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (C) 2014-2015, Matt Martz # (C) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/syslog_json.py b/plugins/callback/syslog_json.py index a9547526ee..73543614a8 100644 --- a/plugins/callback/syslog_json.py +++ b/plugins/callback/syslog_json.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/unixy.py b/plugins/callback/unixy.py index aaca1bd8cc..dec2ab0c8c 100644 --- a/plugins/callback/unixy.py +++ b/plugins/callback/unixy.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright: (c) 2017, Allyson Bowles <@akatch> # Copyright: (c) 2012-2014, Michael DeHaan # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/yaml.py b/plugins/callback/yaml.py index da931d6b73..d4036c808e 100644 --- a/plugins/callback/yaml.py +++ b/plugins/callback/yaml.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/connection/chroot.py b/plugins/connection/chroot.py index c4c427aa0a..3e15947031 100644 --- a/plugins/connection/chroot.py +++ b/plugins/connection/chroot.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Based on local.py (c) 2012, Michael DeHaan # # (c) 2013, Maykel Moya diff --git a/plugins/connection/funcd.py b/plugins/connection/funcd.py index afea840ee8..caf9d06c60 100644 --- a/plugins/connection/funcd.py +++ b/plugins/connection/funcd.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Based on local.py (c) 2012, Michael DeHaan # Based on chroot.py (c) 2013, Maykel Moya # Copyright (c) 2013, Michael Scherer diff --git a/plugins/connection/iocage.py b/plugins/connection/iocage.py index e97867e58f..94761d5c17 100644 --- a/plugins/connection/iocage.py +++ b/plugins/connection/iocage.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Based on jail.py # (c) 2013, Michael Scherer # (c) 2015, Toshio Kuratomi diff --git a/plugins/connection/jail.py b/plugins/connection/jail.py index cee08ed8fd..c3de25c753 100644 --- a/plugins/connection/jail.py +++ b/plugins/connection/jail.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Based on local.py by Michael DeHaan # and chroot.py by Maykel Moya # Copyright (c) 2013, Michael Scherer diff --git a/plugins/connection/lxc.py b/plugins/connection/lxc.py index b18919efd3..d5c7a7ebbe 100644 --- a/plugins/connection/lxc.py +++ b/plugins/connection/lxc.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2015, Joerg Thalheim # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/connection/lxd.py b/plugins/connection/lxd.py index d523234449..31ff13c776 100644 --- a/plugins/connection/lxd.py +++ b/plugins/connection/lxd.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2016 Matt Clay # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/connection/qubes.py b/plugins/connection/qubes.py index ca221a7fac..fd72f38e2f 100644 --- a/plugins/connection/qubes.py +++ b/plugins/connection/qubes.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Based on the buildah connection plugin # Copyright (c) 2017 Ansible Project # 2018 Kushal Das diff --git a/plugins/connection/saltstack.py b/plugins/connection/saltstack.py index f8e3680aea..3d56083bb6 100644 --- a/plugins/connection/saltstack.py +++ b/plugins/connection/saltstack.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Based on local.py (c) 2012, Michael DeHaan # Based on chroot.py (c) 2013, Maykel Moya # Based on func.py diff --git a/plugins/connection/zone.py b/plugins/connection/zone.py index b12cffe28d..a859b5e32f 100644 --- a/plugins/connection/zone.py +++ b/plugins/connection/zone.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Based on local.py (c) 2012, Michael DeHaan # and chroot.py (c) 2013, Maykel Moya # and jail.py (c) 2013, Michael Scherer diff --git a/plugins/doc_fragments/hpe3par.py b/plugins/doc_fragments/hpe3par.py index fa51ccdb91..e16ead4207 100644 --- a/plugins/doc_fragments/hpe3par.py +++ b/plugins/doc_fragments/hpe3par.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright: (c) 2018, Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/doc_fragments/hwc.py b/plugins/doc_fragments/hwc.py index 80cd0465d7..c6c5dd23bd 100644 --- a/plugins/doc_fragments/hwc.py +++ b/plugins/doc_fragments/hwc.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright: (c) 2018, Huawei Inc. # GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/doc_fragments/oracle.py b/plugins/doc_fragments/oracle.py index 5ad04a2220..94ed18107d 100644 --- a/plugins/doc_fragments/oracle.py +++ b/plugins/doc_fragments/oracle.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/doc_fragments/oracle_creatable_resource.py b/plugins/doc_fragments/oracle_creatable_resource.py index 468eaabe3f..f76e7146b3 100644 --- a/plugins/doc_fragments/oracle_creatable_resource.py +++ b/plugins/doc_fragments/oracle_creatable_resource.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/doc_fragments/oracle_display_name_option.py b/plugins/doc_fragments/oracle_display_name_option.py index 01f92f183b..b9ce0d92fe 100644 --- a/plugins/doc_fragments/oracle_display_name_option.py +++ b/plugins/doc_fragments/oracle_display_name_option.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/doc_fragments/oracle_name_option.py b/plugins/doc_fragments/oracle_name_option.py index 9a7b0226f7..dd9b98816e 100644 --- a/plugins/doc_fragments/oracle_name_option.py +++ b/plugins/doc_fragments/oracle_name_option.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/doc_fragments/oracle_tags.py b/plugins/doc_fragments/oracle_tags.py index 1d9cae0e8f..e92598c549 100644 --- a/plugins/doc_fragments/oracle_tags.py +++ b/plugins/doc_fragments/oracle_tags.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/doc_fragments/oracle_wait_options.py b/plugins/doc_fragments/oracle_wait_options.py index 248319c2e8..d94f079a86 100644 --- a/plugins/doc_fragments/oracle_wait_options.py +++ b/plugins/doc_fragments/oracle_wait_options.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/doc_fragments/vexata.py b/plugins/doc_fragments/vexata.py index 9f756cc877..920457fa04 100644 --- a/plugins/doc_fragments/vexata.py +++ b/plugins/doc_fragments/vexata.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # # Copyright: (c) 2019, Sandeep Kasargod # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) From 047b7ada3ca0e2a0ab5b6e83de22174839c1741e Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 1 Aug 2021 22:36:53 +1200 Subject: [PATCH 0483/3093] uf8 marker batch2 (#3128) * added utf-8 markers to all .py files in plugins/filter * added utf-8 markers to all .py files in plugins/inventory * added utf-8 markers to all .py files in plugins/lookup --- plugins/filter/dict_kv.py | 1 + plugins/filter/jc.py | 1 + plugins/filter/json_query.py | 1 + plugins/filter/random_mac.py | 1 + plugins/filter/version_sort.py | 1 + plugins/inventory/linode.py | 1 + plugins/inventory/nmap.py | 1 + plugins/inventory/online.py | 1 + plugins/inventory/scaleway.py | 1 + plugins/inventory/stackpath_compute.py | 1 + plugins/inventory/virtualbox.py | 1 + plugins/lookup/cartesian.py | 1 + plugins/lookup/chef_databag.py | 1 + plugins/lookup/consul_kv.py | 1 + plugins/lookup/credstash.py | 1 + plugins/lookup/cyberarkpassword.py | 1 + plugins/lookup/dependent.py | 1 + plugins/lookup/dig.py | 1 + plugins/lookup/dnstxt.py | 1 + plugins/lookup/etcd.py | 1 + plugins/lookup/filetree.py | 1 + plugins/lookup/flattened.py | 1 + plugins/lookup/hiera.py | 1 + plugins/lookup/keyring.py | 1 + plugins/lookup/lastpass.py | 1 + plugins/lookup/lmdb_kv.py | 1 + plugins/lookup/manifold.py | 1 + plugins/lookup/nios.py | 1 + plugins/lookup/passwordstore.py | 1 + plugins/lookup/redis.py | 1 + plugins/lookup/shelvefile.py | 1 + 31 files changed, 31 insertions(+) diff --git a/plugins/filter/dict_kv.py b/plugins/filter/dict_kv.py index b2124ed767..fc1978b977 100644 --- a/plugins/filter/dict_kv.py +++ b/plugins/filter/dict_kv.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright (C) 2020 Stanislav German-Evtushenko (@giner) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/filter/jc.py b/plugins/filter/jc.py index e854128f67..42dcf98234 100644 --- a/plugins/filter/jc.py +++ b/plugins/filter/jc.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2015, Filipe Niero Felisbino # # This file is part of Ansible diff --git a/plugins/filter/json_query.py b/plugins/filter/json_query.py index 673cafa587..9b9ecb93f2 100644 --- a/plugins/filter/json_query.py +++ b/plugins/filter/json_query.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2015, Filipe Niero Felisbino # # This file is part of Ansible diff --git a/plugins/filter/random_mac.py b/plugins/filter/random_mac.py index aa9f59be08..dc04e99a96 100644 --- a/plugins/filter/random_mac.py +++ b/plugins/filter/random_mac.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2020 Ansible Project # # This file is part of Ansible diff --git a/plugins/filter/version_sort.py b/plugins/filter/version_sort.py index 598b8f2088..d228ea62d0 100644 --- a/plugins/filter/version_sort.py +++ b/plugins/filter/version_sort.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright (C) 2021 Eric Lavarde # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/inventory/linode.py b/plugins/inventory/linode.py index 049d67c973..566073a4a8 100644 --- a/plugins/inventory/linode.py +++ b/plugins/inventory/linode.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/inventory/nmap.py b/plugins/inventory/nmap.py index 05a83367af..ade3adc3d4 100644 --- a/plugins/inventory/nmap.py +++ b/plugins/inventory/nmap.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/inventory/online.py b/plugins/inventory/online.py index 2d305bb8d6..a74c6026ea 100644 --- a/plugins/inventory/online.py +++ b/plugins/inventory/online.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright (c) 2018 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/inventory/scaleway.py b/plugins/inventory/scaleway.py index 2e863a2531..b327824f33 100644 --- a/plugins/inventory/scaleway.py +++ b/plugins/inventory/scaleway.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright: (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/inventory/stackpath_compute.py b/plugins/inventory/stackpath_compute.py index 8e6b5bf953..e8477b95f3 100644 --- a/plugins/inventory/stackpath_compute.py +++ b/plugins/inventory/stackpath_compute.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright (c) 2020 Shay Rybak # Copyright (c) 2020 Ansible Project # GNU General Public License v3.0+ diff --git a/plugins/inventory/virtualbox.py b/plugins/inventory/virtualbox.py index 827618131a..672312cd8e 100644 --- a/plugins/inventory/virtualbox.py +++ b/plugins/inventory/virtualbox.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/cartesian.py b/plugins/lookup/cartesian.py index 45eb16d8b0..841f4f8c4d 100644 --- a/plugins/lookup/cartesian.py +++ b/plugins/lookup/cartesian.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2013, Bradley Young # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/chef_databag.py b/plugins/lookup/chef_databag.py index 0a1c6de3ed..d594c7681e 100644 --- a/plugins/lookup/chef_databag.py +++ b/plugins/lookup/chef_databag.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2016, Josh Bradley # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/consul_kv.py b/plugins/lookup/consul_kv.py index 8b9e4e9102..58f450eb65 100644 --- a/plugins/lookup/consul_kv.py +++ b/plugins/lookup/consul_kv.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2015, Steve Gargan # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/credstash.py b/plugins/lookup/credstash.py index 04935ee635..1a87deed41 100644 --- a/plugins/lookup/credstash.py +++ b/plugins/lookup/credstash.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2015, Ensighten # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/cyberarkpassword.py b/plugins/lookup/cyberarkpassword.py index ec6e6fcb56..112e7c1cd8 100644 --- a/plugins/lookup/cyberarkpassword.py +++ b/plugins/lookup/cyberarkpassword.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2017, Edward Nunez # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/dependent.py b/plugins/lookup/dependent.py index c9ce58567d..3f73f88bfa 100644 --- a/plugins/lookup/dependent.py +++ b/plugins/lookup/dependent.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2015-2021, Felix Fontein # (c) 2018 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/dig.py b/plugins/lookup/dig.py index b6c71954f0..6520b0d3ec 100644 --- a/plugins/lookup/dig.py +++ b/plugins/lookup/dig.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2015, Jan-Piet Mens # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/dnstxt.py b/plugins/lookup/dnstxt.py index d52301e7fb..84bff41795 100644 --- a/plugins/lookup/dnstxt.py +++ b/plugins/lookup/dnstxt.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2012, Jan-Piet Mens # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/etcd.py b/plugins/lookup/etcd.py index a3a7c42a3d..ca13442e43 100644 --- a/plugins/lookup/etcd.py +++ b/plugins/lookup/etcd.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2013, Jan-Piet Mens # (m) 2016, Mihai Moldovanu # (m) 2017, Juan Manuel Parrilla diff --git a/plugins/lookup/filetree.py b/plugins/lookup/filetree.py index 06b89bf396..e663fc9515 100644 --- a/plugins/lookup/filetree.py +++ b/plugins/lookup/filetree.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2016 Dag Wieers # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/flattened.py b/plugins/lookup/flattened.py index 515817ed09..d1ddd14f56 100644 --- a/plugins/lookup/flattened.py +++ b/plugins/lookup/flattened.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2013, Serge van Ginderachter # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/hiera.py b/plugins/lookup/hiera.py index a4358f7b1e..658f377d59 100644 --- a/plugins/lookup/hiera.py +++ b/plugins/lookup/hiera.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2017, Juan Manuel Parrilla # (c) 2012-17 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/keyring.py b/plugins/lookup/keyring.py index d5b7d1a154..a98ae7aee9 100644 --- a/plugins/lookup/keyring.py +++ b/plugins/lookup/keyring.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2016, Samuel Boucher # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/lastpass.py b/plugins/lookup/lastpass.py index 5e9f9907bd..3ae51b4c64 100644 --- a/plugins/lookup/lastpass.py +++ b/plugins/lookup/lastpass.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2016, Andrew Zenk # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/lmdb_kv.py b/plugins/lookup/lmdb_kv.py index a417874898..61dc410cc4 100644 --- a/plugins/lookup/lmdb_kv.py +++ b/plugins/lookup/lmdb_kv.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2017-2018, Jan-Piet Mens # (c) 2018 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/manifold.py b/plugins/lookup/manifold.py index 8b270ba0a2..076a475091 100644 --- a/plugins/lookup/manifold.py +++ b/plugins/lookup/manifold.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2018, Arigato Machine Inc. # (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/nios.py b/plugins/lookup/nios.py index 819d8077e6..008e8feffe 100644 --- a/plugins/lookup/nios.py +++ b/plugins/lookup/nios.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # # Copyright 2018 Red Hat | Ansible # diff --git a/plugins/lookup/passwordstore.py b/plugins/lookup/passwordstore.py index 9c545a1cb0..3e936d8b18 100644 --- a/plugins/lookup/passwordstore.py +++ b/plugins/lookup/passwordstore.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2017, Patrick Deelman # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/redis.py b/plugins/lookup/redis.py index a1d5a381b2..fdf3a6e17b 100644 --- a/plugins/lookup/redis.py +++ b/plugins/lookup/redis.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2012, Jan-Piet Mens # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/shelvefile.py b/plugins/lookup/shelvefile.py index 0067472513..175ed49891 100644 --- a/plugins/lookup/shelvefile.py +++ b/plugins/lookup/shelvefile.py @@ -1,3 +1,4 @@ +# coding: utf-8 -*- # (c) 2015, Alejandro Guirao # (c) 2012-17 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) From afe2842b1b4bf9661cd2f3f5c660c0833612bcc6 Mon Sep 17 00:00:00 2001 From: quidame Date: Mon, 2 Aug 2021 08:24:31 +0200 Subject: [PATCH 0484/3093] filesize: overwrite default `unsafe_writes` documentation (#3126) * overwrite default `unsafe_writes` documentation * Apply suggestions from code review Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- plugins/modules/files/filesize.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/plugins/modules/files/filesize.py b/plugins/modules/files/filesize.py index f073ff4119..81701438ca 100644 --- a/plugins/modules/files/filesize.py +++ b/plugins/modules/files/filesize.py @@ -87,6 +87,10 @@ options: - I(force=true) and I(sparse=true) are mutually exclusive. type: bool default: false + unsafe_writes: + description: + - This option is silently ignored. This module always modifies file + size in-place. notes: - This module supports C(check_mode) and C(diff). From 857d2eee50685de55ebd64f290c62cd83487d595 Mon Sep 17 00:00:00 2001 From: David Hummel <6109326+hummeltech@users.noreply.github.com> Date: Tue, 3 Aug 2021 23:16:11 -0700 Subject: [PATCH 0485/3093] nmcli: Add support for additional Wi-Fi network options (#3081) * nmcli: Add support for additional Wi-Fi network options * Added `changelog fragment` * Update changelogs/fragments/3081-add-wifi-option-to-nmcli-module.yml Co-authored-by: Ajpantuso Co-authored-by: Ajpantuso --- .../3081-add-wifi-option-to-nmcli-module.yml | 3 ++ plugins/modules/net_tools/nmcli.py | 40 ++++++++++++++- .../plugins/modules/net_tools/test_nmcli.py | 51 +++++++++++++++++++ 3 files changed, 93 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/3081-add-wifi-option-to-nmcli-module.yml diff --git a/changelogs/fragments/3081-add-wifi-option-to-nmcli-module.yml b/changelogs/fragments/3081-add-wifi-option-to-nmcli-module.yml new file mode 100644 index 0000000000..4425d955fc --- /dev/null +++ b/changelogs/fragments/3081-add-wifi-option-to-nmcli-module.yml @@ -0,0 +1,3 @@ +minor_changes: + - nmcli - add ``wifi`` option to support managing Wi-Fi settings such as ``hidden`` or ``mode`` + (https://github.com/ansible-collections/community.general/pull/3081). diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index 1750f9f99f..90fd5bbd0c 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -342,6 +342,14 @@ options: - Name of the Wireless router or the access point. type: str version_added: 3.0.0 + wifi: + description: + - 'The configuration of the Wifi connection. The valid attributes are listed on: + U(https://networkmanager.dev/docs/api/latest/settings-802-11-wireless.html).' + - 'For instance to create a hidden AP mode Wifi connection: + C({hidden: true, mode: ap}).' + type: dict + version_added: 3.5.0 ''' EXAMPLES = r''' @@ -658,6 +666,18 @@ EXAMPLES = r''' autoconnect: true state: present +- name: Create a hidden AP mode wifi connection + community.general.nmcli: + type: wifi + conn_name: ChocoMaster + ifname: wlo1 + ssid: ChocoMaster + wifi: + hidden: true + mode: ap + autoconnect: true + state: present + ''' RETURN = r"""# @@ -750,6 +770,7 @@ class Nmcli(object): self.dhcp_client_id = module.params['dhcp_client_id'] self.zone = module.params['zone'] self.ssid = module.params['ssid'] + self.wifi = module.params['wifi'] self.wifi_sec = module.params['wifi_sec'] if self.method4: @@ -878,8 +899,17 @@ class Nmcli(object): }) elif self.type == 'wifi': options.update({ + '802-11-wireless.ssid': self.ssid, 'connection.slave-type': 'bond' if self.master else None, }) + if self.wifi: + for name, value in self.wifi.items(): + # Disregard 'ssid' via 'wifi.ssid' + if name == 'ssid': + continue + options.update({ + '802-11-wireless.%s' % name: value + }) # Convert settings values based on the situation. for setting, value in options.items(): setting_type = self.settings_type(setting) @@ -978,7 +1008,8 @@ class Nmcli(object): 'ipv4.ignore-auto-routes', 'ipv4.may-fail', 'ipv6.ignore-auto-dns', - 'ipv6.ignore-auto-routes'): + 'ipv6.ignore-auto-routes', + '802-11-wireless.hidden'): return bool elif setting in ('ipv4.dns', 'ipv4.dns-search', @@ -1030,6 +1061,12 @@ class Nmcli(object): if self.type == "wifi": cmd.append('ssid') cmd.append(self.ssid) + if self.wifi: + for name, value in self.wifi.items(): + # Disallow setting 'ssid' via 'wifi.ssid' + if name == 'ssid': + continue + cmd += ['802-11-wireless.%s' % name, value] if self.wifi_sec: for name, value in self.wifi_sec.items(): cmd += ['wifi-sec.%s' % name, value] @@ -1255,6 +1292,7 @@ def main(): ip_tunnel_local=dict(type='str'), ip_tunnel_remote=dict(type='str'), ssid=dict(type='str'), + wifi=dict(type='dict'), wifi_sec=dict(type='dict', no_log=True), ), mutually_exclusive=[['never_default4', 'gw4']], diff --git a/tests/unit/plugins/modules/net_tools/test_nmcli.py b/tests/unit/plugins/modules/net_tools/test_nmcli.py index 63ec60537c..6df320a0c7 100644 --- a/tests/unit/plugins/modules/net_tools/test_nmcli.py +++ b/tests/unit/plugins/modules/net_tools/test_nmcli.py @@ -469,6 +469,22 @@ ipv6.ignore-auto-dns: no ipv6.ignore-auto-routes: no """ +TESTCASE_WIRELESS = [ + { + 'type': 'wifi', + 'conn_name': 'non_existent_nw_device', + 'ifname': 'wireless_non_existant', + 'ip4': '10.10.10.10/24', + 'ssid': 'Brittany', + 'wifi': { + 'hidden': True, + 'mode': 'ap', + }, + 'state': 'present', + '_ansible_check_mode': False, + } +] + def mocker_set(mocker, connection_exists=False, @@ -1530,3 +1546,38 @@ def test_ethernet_connection_static_unchanged(mocked_ethernet_connection_static_ results = json.loads(out) assert not results.get('failed') assert not results['changed'] + + +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_WIRELESS, indirect=['patch_ansible_module']) +def test_create_wireless(mocked_generic_connection_create, capfd): + """ + Test : Create wireless connection + """ + + with pytest.raises(SystemExit): + nmcli.main() + + assert nmcli.Nmcli.execute_command.call_count == 1 + arg_list = nmcli.Nmcli.execute_command.call_args_list + add_args, add_kw = arg_list[0] + + assert add_args[0][0] == '/usr/bin/nmcli' + assert add_args[0][1] == 'con' + assert add_args[0][2] == 'add' + assert add_args[0][3] == 'type' + assert add_args[0][4] == 'wifi' + assert add_args[0][5] == 'con-name' + assert add_args[0][6] == 'non_existent_nw_device' + + add_args_text = list(map(to_text, add_args[0])) + for param in ['connection.interface-name', 'wireless_non_existant', + 'ipv4.addresses', '10.10.10.10/24', + '802-11-wireless.ssid', 'Brittany', + '802-11-wireless.mode', 'ap', + '802-11-wireless.hidden', 'yes']: + assert param in add_args_text + + out, err = capfd.readouterr() + results = json.loads(out) + assert not results.get('failed') + assert results['changed'] From f2df1a7581e7bd8a386ac2a96b04c9925493ca24 Mon Sep 17 00:00:00 2001 From: Reto Kupferschmid Date: Wed, 4 Aug 2021 08:36:45 +0200 Subject: [PATCH 0486/3093] dnsimple update for python-dnsimple >=2.0.0 (#2946) * update dnsimple module * dnsimple: fixes for python-dnsimple >= 2.0.0 * Update plugins/modules/net_tools/dnsimple.py Co-authored-by: Abhijeet Kasurde * rewrite module to support dnsimple-python v1 and v2 * add changelog fragment * fix sanity checks * python 2 fixes * fix dnsimple requirement * add sandbox module parameter * Update changelogs/fragments/2946-python-dnsimple-v2-rewrite.yml Co-authored-by: Felix Fontein * Update plugins/modules/net_tools/dnsimple.py Co-authored-by: Felix Fontein * return only the first traceback * Update plugins/modules/net_tools/dnsimple.py Co-authored-by: Felix Fontein * Update plugins/modules/net_tools/dnsimple.py Co-authored-by: Felix Fontein * use separate classes for python-dnsimple 1 and 2 * add basic tests * fix checks * skip tests for unsupported python versions * Update plugins/modules/net_tools/dnsimple.py Co-authored-by: Felix Fontein * fix conditions Co-authored-by: Abhijeet Kasurde Co-authored-by: Felix Fontein --- .../2946-python-dnsimple-v2-rewrite.yml | 2 + plugins/modules/net_tools/dnsimple.py | 316 ++++++++++++++---- .../modules/net_tools/test_dnsimple.py | 62 ++++ tests/unit/requirements.txt | 6 +- 4 files changed, 316 insertions(+), 70 deletions(-) create mode 100644 changelogs/fragments/2946-python-dnsimple-v2-rewrite.yml create mode 100644 tests/unit/plugins/modules/net_tools/test_dnsimple.py diff --git a/changelogs/fragments/2946-python-dnsimple-v2-rewrite.yml b/changelogs/fragments/2946-python-dnsimple-v2-rewrite.yml new file mode 100644 index 0000000000..32a6341086 --- /dev/null +++ b/changelogs/fragments/2946-python-dnsimple-v2-rewrite.yml @@ -0,0 +1,2 @@ +minor_changes: + - dnsimple - module rewrite to include support for python-dnsimple>=2.0.0; also add ``sandbox`` parameter (https://github.com/ansible-collections/community.general/pull/2946). diff --git a/plugins/modules/net_tools/dnsimple.py b/plugins/modules/net_tools/dnsimple.py index c4314b6539..a575d944cb 100644 --- a/plugins/modules/net_tools/dnsimple.py +++ b/plugins/modules/net_tools/dnsimple.py @@ -14,13 +14,12 @@ module: dnsimple short_description: Interface with dnsimple.com (a DNS hosting service) description: - "Manages domains and records via the DNSimple API, see the docs: U(http://developer.dnsimple.com/)." -notes: - - DNSimple API v1 is deprecated. Please install dnsimple-python>=1.0.0 which uses v2 API. options: account_email: description: - Account email. If omitted, the environment variables C(DNSIMPLE_EMAIL) and C(DNSIMPLE_API_TOKEN) will be looked for. - "If those aren't found, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started)." + - "C(.dnsimple) config files are only supported in dnsimple-python<2.0.0" type: str account_api_token: description: @@ -72,6 +71,14 @@ options: - Only use with C(state) is set to C(present) on a record. type: 'bool' default: no + sandbox: + description: + - Use the DNSimple sandbox environment. + - Requires a dedicated account in the dnsimple sandbox environment. + - Check U(https://developer.dnsimple.com/sandbox/) for more information. + type: 'bool' + default: no + version_added: 3.5.0 requirements: - "dnsimple >= 1.0.0" author: "Alex Coomans (@drcapulet)" @@ -144,38 +151,227 @@ EXAMPLES = ''' RETURN = r"""# """ -import os import traceback from distutils.version import LooseVersion +import re -DNSIMPLE_IMP_ERR = None + +class DNSimpleV1(): + """class which uses dnsimple-python < 2""" + + def __init__(self, account_email, account_api_token, sandbox, module): + """init""" + self.module = module + self.account_email = account_email + self.account_api_token = account_api_token + self.sandbox = sandbox + self.dnsimple_client() + + def dnsimple_client(self): + """creates a dnsimple client object""" + if self.account_email and self.account_api_token: + self.client = DNSimple(sandbox=self.sandbox, email=self.account_email, api_token=self.account_api_token) + else: + self.client = DNSimple(sandbox=self.sandbox) + + def get_all_domains(self): + """returns a list of all domains""" + domain_list = self.client.domains() + return [d['domain'] for d in domain_list] + + def get_domain(self, domain): + """returns a single domain by name or id""" + try: + dr = self.client.domain(domain)['domain'] + except DNSimpleException as e: + exception_string = str(e.args[0]['message']) + if re.match(r"^Domain .+ not found$", exception_string): + dr = None + else: + raise + return dr + + def create_domain(self, domain): + """create a single domain""" + return self.client.add_domain(domain)['domain'] + + def delete_domain(self, domain): + """delete a single domain""" + self.client.delete(domain) + + def get_records(self, domain, dnsimple_filter=None): + """return dns ressource records which match a specified filter""" + return [r['record'] for r in self.client.records(str(domain), params=dnsimple_filter)] + + def delete_record(self, domain, rid): + """delete a single dns ressource record""" + self.client.delete_record(str(domain), rid) + + def update_record(self, domain, rid, ttl=None, priority=None): + """update a single dns ressource record""" + data = {} + if ttl: + data['ttl'] = ttl + if priority: + data['priority'] = priority + return self.client.update_record(str(domain), str(rid), data)['record'] + + def create_record(self, domain, name, record_type, content, ttl=None, priority=None): + """create a single dns ressource record""" + data = { + 'name': name, + 'type': record_type, + 'content': content, + } + if ttl: + data['ttl'] = ttl + if priority: + data['priority'] = priority + return self.client.add_record(str(domain), data)['record'] + + +class DNSimpleV2(): + """class which uses dnsimple-python >= 2""" + + def __init__(self, account_email, account_api_token, sandbox, module): + """init""" + self.module = module + self.account_email = account_email + self.account_api_token = account_api_token + self.sandbox = sandbox + self.pagination_per_page = 30 + self.dnsimple_client() + self.dnsimple_account() + + def dnsimple_client(self): + """creates a dnsimple client object""" + if self.account_email and self.account_api_token: + client = Client(sandbox=self.sandbox, email=self.account_email, access_token=self.account_api_token) + else: + msg = "Option account_email or account_api_token not provided. " \ + "Dnsimple authentiction with a .dnsimple config file is not " \ + "supported with dnsimple-python>=2.0.0" + raise DNSimpleException(msg) + client.identity.whoami() + self.client = client + + def dnsimple_account(self): + """select a dnsimple account. If a user token is used for authentication, + this user must only have access to a single account""" + account = self.client.identity.whoami().data.account + # user supplied a user token instead of account api token + if not account: + accounts = Accounts(self.client).list_accounts().data + if len(accounts) != 1: + msg = "The provided dnsimple token is a user token with multiple accounts." \ + "Use an account token or a user token with access to a single account." \ + "See https://support.dnsimple.com/articles/api-access-token/" + raise DNSimpleException(msg) + account = accounts[0] + self.account = account + + def get_all_domains(self): + """returns a list of all domains""" + domain_list = self._get_paginated_result(self.client.domains.list_domains, account_id=self.account.id) + return [d.__dict__ for d in domain_list] + + def get_domain(self, domain): + """returns a single domain by name or id""" + try: + dr = self.client.domains.get_domain(self.account.id, domain).data.__dict__ + except DNSimpleException as e: + exception_string = str(e.message) + if re.match(r"^Domain .+ not found$", exception_string): + dr = None + else: + raise + return dr + + def create_domain(self, domain): + """create a single domain""" + return self.client.domains.create_domain(self.account.id, domain).data.__dict__ + + def delete_domain(self, domain): + """delete a single domain""" + self.client.domains.delete_domain(self.account.id, domain) + + def get_records(self, zone, dnsimple_filter=None): + """return dns ressource records which match a specified filter""" + records_list = self._get_paginated_result(self.client.zones.list_records, + account_id=self.account.id, + zone=zone, filter=dnsimple_filter) + return [d.__dict__ for d in records_list] + + def delete_record(self, domain, rid): + """delete a single dns ressource record""" + self.client.zones.delete_record(self.account.id, domain, rid) + + def update_record(self, domain, rid, ttl=None, priority=None): + """update a single dns ressource record""" + zr = ZoneRecordUpdateInput(ttl=ttl, priority=priority) + result = self.client.zones.update_record(self.account.id, str(domain), str(rid), zr).data.__dict__ + return result + + def create_record(self, domain, name, record_type, content, ttl=None, priority=None): + """create a single dns ressource record""" + zr = ZoneRecordInput(name=name, type=record_type, content=content, ttl=ttl, priority=priority) + return self.client.zones.create_record(self.account.id, str(domain), zr).data.__dict__ + + def _get_paginated_result(self, operation, **options): + """return all results of a paginated api response""" + records_pagination = operation(per_page=self.pagination_per_page, **options).pagination + result_list = [] + for page in range(1, records_pagination.total_pages + 1): + page_data = operation(per_page=self.pagination_per_page, page=page, **options).data + result_list.extend(page_data) + return result_list + + +DNSIMPLE_IMP_ERR = [] +HAS_DNSIMPLE = False try: - from dnsimple import DNSimple - from dnsimple.dnsimple import __version__ as dnsimple_version - from dnsimple.dnsimple import DNSimpleException + # try to import dnsimple >= 2.0.0 + from dnsimple import Client, DNSimpleException + from dnsimple.service import Accounts + from dnsimple.version import version as dnsimple_version + from dnsimple.struct.zone_record import ZoneRecordUpdateInput, ZoneRecordInput HAS_DNSIMPLE = True except ImportError: - DNSIMPLE_IMP_ERR = traceback.format_exc() - HAS_DNSIMPLE = False + DNSIMPLE_IMP_ERR.append(traceback.format_exc()) -from ansible.module_utils.basic import AnsibleModule, missing_required_lib +if not HAS_DNSIMPLE: + # try to import dnsimple < 2.0.0 + try: + from dnsimple.dnsimple import __version__ as dnsimple_version + from dnsimple import DNSimple + from dnsimple.dnsimple import DNSimpleException + HAS_DNSIMPLE = True + except ImportError: + DNSIMPLE_IMP_ERR.append(traceback.format_exc()) + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback def main(): module = AnsibleModule( argument_spec=dict( - account_email=dict(type='str'), - account_api_token=dict(type='str', no_log=True), + account_email=dict(type='str', fallback=(env_fallback, ['DNSIMPLE_EMAIL'])), + account_api_token=dict(type='str', + no_log=True, + fallback=(env_fallback, ['DNSIMPLE_API_TOKEN'])), domain=dict(type='str'), record=dict(type='str'), record_ids=dict(type='list', elements='str'), - type=dict(type='str', choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', + type=dict(type='str', choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', + 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', + 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL', 'CAA']), ttl=dict(type='int', default=3600), value=dict(type='str'), priority=dict(type='int'), state=dict(type='str', choices=['present', 'absent'], default='present'), solo=dict(type='bool', default=False), + sandbox=dict(type='bool', default=False), ), required_together=[ ['record', 'value'] @@ -184,11 +380,7 @@ def main(): ) if not HAS_DNSIMPLE: - module.fail_json(msg=missing_required_lib('dnsimple'), exception=DNSIMPLE_IMP_ERR) - - if LooseVersion(dnsimple_version) < LooseVersion('1.0.0'): - module.fail_json(msg="Current version of dnsimple Python module [%s] uses 'v1' API which is deprecated." - " Please upgrade to version 1.0.0 and above to use dnsimple 'v2' API." % dnsimple_version) + module.fail_json(msg=missing_required_lib('dnsimple'), exception=DNSIMPLE_IMP_ERR[0]) account_email = module.params.get('account_email') account_api_token = module.params.get('account_api_token') @@ -201,29 +393,29 @@ def main(): priority = module.params.get('priority') state = module.params.get('state') is_solo = module.params.get('solo') + sandbox = module.params.get('sandbox') - if account_email and account_api_token: - client = DNSimple(email=account_email, api_token=account_api_token) - elif os.environ.get('DNSIMPLE_EMAIL') and os.environ.get('DNSIMPLE_API_TOKEN'): - client = DNSimple(email=os.environ.get('DNSIMPLE_EMAIL'), api_token=os.environ.get('DNSIMPLE_API_TOKEN')) - else: - client = DNSimple() + DNSIMPLE_MAJOR_VERSION = LooseVersion(dnsimple_version).version[0] try: + if DNSIMPLE_MAJOR_VERSION > 1: + ds = DNSimpleV2(account_email, account_api_token, sandbox, module) + else: + ds = DNSimpleV1(account_email, account_api_token, sandbox, module) # Let's figure out what operation we want to do - # No domain, return a list if not domain: - domains = client.domains() - module.exit_json(changed=False, result=[d['domain'] for d in domains]) + all_domains = ds.get_all_domains() + module.exit_json(changed=False, result=all_domains) # Domain & No record - if domain and record is None and not record_ids: - domains = [d['domain'] for d in client.domains()] + if record is None and not record_ids: if domain.isdigit(): - dr = next((d for d in domains if d['id'] == int(domain)), None) + typed_domain = int(domain) else: - dr = next((d for d in domains if d['name'] == domain), None) + typed_domain = str(domain) + dr = ds.get_domain(typed_domain) + # domain does not exist if state == 'present': if dr: module.exit_json(changed=False, result=dr) @@ -231,105 +423,91 @@ def main(): if module.check_mode: module.exit_json(changed=True) else: - module.exit_json(changed=True, result=client.add_domain(domain)['domain']) - + response = ds.create_domain(domain) + module.exit_json(changed=True, result=response) # state is absent else: if dr: if not module.check_mode: - client.delete(domain) + ds.delete_domain(domain) module.exit_json(changed=True) else: module.exit_json(changed=False) # need the not none check since record could be an empty string - if domain and record is not None: - records = [r['record'] for r in client.records(str(domain), params={'name': record})] - + if record is not None: if not record_type: module.fail_json(msg="Missing the record type") - if not value: module.fail_json(msg="Missing the record value") - rr = next((r for r in records if r['name'] == record and r['type'] == record_type and r['content'] == value), None) - + records_list = ds.get_records(domain, dnsimple_filter={'name': record}) + rr = next((r for r in records_list if r['name'] == record and r['type'] == record_type and r['content'] == value), None) if state == 'present': changed = False if is_solo: # delete any records that have the same name and record type - same_type = [r['id'] for r in records if r['name'] == record and r['type'] == record_type] + same_type = [r['id'] for r in records_list if r['name'] == record and r['type'] == record_type] if rr: same_type = [rid for rid in same_type if rid != rr['id']] if same_type: if not module.check_mode: for rid in same_type: - client.delete_record(str(domain), rid) + ds.delete_record(domain, rid) changed = True if rr: # check if we need to update if rr['ttl'] != ttl or rr['priority'] != priority: - data = {} - if ttl: - data['ttl'] = ttl - if priority: - data['priority'] = priority if module.check_mode: module.exit_json(changed=True) else: - module.exit_json(changed=True, result=client.update_record(str(domain), str(rr['id']), data)['record']) + response = ds.update_record(domain, rr['id'], ttl, priority) + module.exit_json(changed=True, result=response) else: module.exit_json(changed=changed, result=rr) else: # create it - data = { - 'name': record, - 'type': record_type, - 'content': value, - } - if ttl: - data['ttl'] = ttl - if priority: - data['priority'] = priority if module.check_mode: module.exit_json(changed=True) else: - module.exit_json(changed=True, result=client.add_record(str(domain), data)['record']) - + response = ds.create_record(domain, record, record_type, value, ttl, priority) + module.exit_json(changed=True, result=response) # state is absent else: if rr: if not module.check_mode: - client.delete_record(str(domain), rr['id']) + ds.delete_record(domain, rr['id']) module.exit_json(changed=True) else: module.exit_json(changed=False) # Make sure these record_ids either all exist or none - if domain and record_ids: - current_records = [str(r['record']['id']) for r in client.records(str(domain))] - wanted_records = [str(r) for r in record_ids] + if record_ids: + current_records = ds.get_records(domain, dnsimple_filter=None) + current_record_ids = [str(d['id']) for d in current_records] + wanted_record_ids = [str(r) for r in record_ids] if state == 'present': - difference = list(set(wanted_records) - set(current_records)) + difference = list(set(wanted_record_ids) - set(current_record_ids)) if difference: module.fail_json(msg="Missing the following records: %s" % difference) else: module.exit_json(changed=False) - # state is absent else: - difference = list(set(wanted_records) & set(current_records)) + difference = list(set(wanted_record_ids) & set(current_record_ids)) if difference: if not module.check_mode: for rid in difference: - client.delete_record(str(domain), rid) + ds.delete_record(domain, rid) module.exit_json(changed=True) else: module.exit_json(changed=False) except DNSimpleException as e: - module.fail_json(msg="Unable to contact DNSimple: %s" % e.message) - + if DNSIMPLE_MAJOR_VERSION > 1: + module.fail_json(msg="DNSimple exception: %s" % e.message) + else: + module.fail_json(msg="DNSimple exception: %s" % str(e.args[0]['message'])) module.fail_json(msg="Unknown what you wanted me to do") diff --git a/tests/unit/plugins/modules/net_tools/test_dnsimple.py b/tests/unit/plugins/modules/net_tools/test_dnsimple.py new file mode 100644 index 0000000000..b9dce3c215 --- /dev/null +++ b/tests/unit/plugins/modules/net_tools/test_dnsimple.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from ansible_collections.community.general.plugins.modules.net_tools import dnsimple as dnsimple_module +from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleFailJson, ModuleTestCase, set_module_args +from ansible_collections.community.general.tests.unit.compat.mock import patch +import pytest +import sys + +dnsimple = pytest.importorskip('dnsimple') +mandatory_py_version = pytest.mark.skipif( + sys.version_info < (3, 6), + reason='The dnsimple dependency requires python3.6 or higher' +) + +from dnsimple import DNSimpleException + + +class TestDNSimple(ModuleTestCase): + """Main class for testing dnsimple module.""" + + def setUp(self): + """Setup.""" + super(TestDNSimple, self).setUp() + self.module = dnsimple_module + + def tearDown(self): + """Teardown.""" + super(TestDNSimple, self).tearDown() + + def test_without_required_parameters(self): + """Failure must occurs when all parameters are missing""" + with self.assertRaises(AnsibleFailJson): + set_module_args({}) + self.module.main() + + @patch('dnsimple.service.Identity.whoami') + def test_account_token(self, mock_whoami): + mock_whoami.return_value.data.account = 42 + ds = self.module.DNSimpleV2('fake', 'fake', True, self.module) + self.assertEquals(ds.account, 42) + + @patch('dnsimple.service.Accounts.list_accounts') + @patch('dnsimple.service.Identity.whoami') + def test_user_token_multiple_accounts(self, mock_whoami, mock_accounts): + mock_accounts.return_value.data = [1, 2, 3] + mock_whoami.return_value.data.account = None + with self.assertRaises(DNSimpleException): + self.module.DNSimpleV2('fake', 'fake', True, self.module) + + @patch('dnsimple.service.Accounts.list_accounts') + @patch('dnsimple.service.Identity.whoami') + def test_user_token_single_account(self, mock_whoami, mock_accounts): + mock_accounts.return_value.data = [42] + mock_whoami.return_value.data.account = None + ds = self.module.DNSimpleV2('fake', 'fake', True, self.module) + self.assertEquals(ds.account, 42) diff --git a/tests/unit/requirements.txt b/tests/unit/requirements.txt index 1d082cffb8..c8294bd71a 100644 --- a/tests/unit/requirements.txt +++ b/tests/unit/requirements.txt @@ -21,4 +21,8 @@ lxml semantic_version # requirement for datadog_downtime module -datadog-api-client >= 1.0.0b3 ; python_version >= '3.6' \ No newline at end of file +datadog-api-client >= 1.0.0b3 ; python_version >= '3.6' + +# requirement for dnsimple module +dnsimple >= 2 ; python_version >= '3.6' +dataclasses ; python_version == '3.6' From 75688cb632197a934a93e4e3fa31d3d9a3755751 Mon Sep 17 00:00:00 2001 From: Scott Seekamp Date: Wed, 4 Aug 2021 11:53:43 -0600 Subject: [PATCH 0487/3093] redfish_command: allow setting the BootSourceOverrideMode property (#3135) * For #3134 Expose BootOverrideMode parameter to redfish_command to allow setting by user during run. * Fix trailing whitespace * Add changelog fragment to contribution. * Update changelogs/fragments/3135-add-redfish_command-bootoverridemode.yaml Co-authored-by: Felix Fontein * Update plugins/modules/remote_management/redfish/redfish_command.py Co-authored-by: Felix Fontein * Update plugins/modules/remote_management/redfish/redfish_command.py Co-authored-by: Felix Fontein * Update plugins/modules/remote_management/redfish/redfish_command.py Co-authored-by: Felix Fontein * Update plugins/modules/remote_management/redfish/redfish_command.py Co-authored-by: Felix Fontein * Update changelogs/fragments/3135-add-redfish_command-bootoverridemode.yaml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- ...-add-redfish_command-bootoverridemode.yaml | 2 ++ plugins/module_utils/redfish_utils.py | 8 +++++++- .../redfish/redfish_command.py | 20 ++++++++++++++++++- 3 files changed, 28 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/3135-add-redfish_command-bootoverridemode.yaml diff --git a/changelogs/fragments/3135-add-redfish_command-bootoverridemode.yaml b/changelogs/fragments/3135-add-redfish_command-bootoverridemode.yaml new file mode 100644 index 0000000000..d1f24d4c24 --- /dev/null +++ b/changelogs/fragments/3135-add-redfish_command-bootoverridemode.yaml @@ -0,0 +1,2 @@ +minor_changes: + - redfish_command - add ``boot_override_mode`` argument to BootSourceOverride commands (https://github.com/ansible-collections/community.general/issues/3134). diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index 8d293f0056..94e2c4b7d8 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -1565,6 +1565,7 @@ class RedfishUtils(object): uefi_target = boot_opts.get('uefi_target') boot_next = boot_opts.get('boot_next') override_enabled = boot_opts.get('override_enabled') + boot_override_mode = boot_opts.get('boot_override_mode') if not bootdevice and override_enabled != 'Disabled': return {'ret': False, @@ -1596,6 +1597,10 @@ class RedfishUtils(object): target = boot.get('BootSourceOverrideTarget') cur_uefi_target = boot.get('UefiTargetBootSourceOverride') cur_boot_next = boot.get('BootNext') + cur_override_mode = boot.get('BootSourceOverrideMode') + + if not boot_override_mode: + boot_override_mode = cur_override_mode if override_enabled == 'Disabled': payload = { @@ -1632,12 +1637,13 @@ class RedfishUtils(object): } } else: - if cur_enabled == override_enabled and target == bootdevice: + if cur_enabled == override_enabled and target == bootdevice and cur_override_mode == boot_override_mode: # If properties are already set, no changes needed return {'ret': True, 'changed': False} payload = { 'Boot': { 'BootSourceOverrideEnabled': override_enabled, + 'BootSourceOverrideMode': boot_override_mode, 'BootSourceOverrideTarget': bootdevice } } diff --git a/plugins/modules/remote_management/redfish/redfish_command.py b/plugins/modules/remote_management/redfish/redfish_command.py index 01f1fd771d..72392ec9f3 100644 --- a/plugins/modules/remote_management/redfish/redfish_command.py +++ b/plugins/modules/remote_management/redfish/redfish_command.py @@ -86,6 +86,12 @@ options: - Timeout in seconds for URL requests to OOB controller default: 10 type: int + boot_override_mode: + description: + - Boot mode when using an override. + type: str + choices: [ Legacy, UEFI ] + version_added: 3.5.0 uefi_target: required: false description: @@ -287,6 +293,16 @@ EXAMPLES = ''' username: "{{ username }}" password: "{{ password }}" + - name: Set one-time boot to BiosSetup + community.general.redfish_command: + category: Systems + command: SetOneTimeBoot + bootnext: BiosSetup + boot_override_mode: Legacy + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + - name: Disable persistent boot device override community.general.redfish_command: category: Systems @@ -591,6 +607,7 @@ def main(): timeout=dict(type='int', default=10), uefi_target=dict(), boot_next=dict(), + boot_override_mode=dict(choices=['Legacy', 'UEFI']), resource_id=dict(), update_image_uri=dict(), update_protocol=dict(), @@ -662,7 +679,8 @@ def main(): boot_opts = { 'bootdevice': module.params['bootdevice'], 'uefi_target': module.params['uefi_target'], - 'boot_next': module.params['boot_next'] + 'boot_next': module.params['boot_next'], + 'boot_override_mode': module.params['boot_override_mode'], } # VirtualMedia options From 3dba697e3353a9e9dac9bbfd4e0216d7b62b6c5f Mon Sep 17 00:00:00 2001 From: Reto Kupferschmid Date: Thu, 5 Aug 2021 14:25:42 +0200 Subject: [PATCH 0488/3093] nmcli: manage dummy connections (#3132) * manage dummy connections * add issue reference in changelog fragment * Update changelogs/fragments/3132-nmcli-dummy.yaml Co-authored-by: Ajpantuso * resolve test conflicts Co-authored-by: Ajpantuso --- changelogs/fragments/3132-nmcli-dummy.yaml | 2 + plugins/modules/net_tools/nmcli.py | 12 ++- .../plugins/modules/net_tools/test_nmcli.py | 102 ++++++++++++++++++ 3 files changed, 114 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/3132-nmcli-dummy.yaml diff --git a/changelogs/fragments/3132-nmcli-dummy.yaml b/changelogs/fragments/3132-nmcli-dummy.yaml new file mode 100644 index 0000000000..970bda34e8 --- /dev/null +++ b/changelogs/fragments/3132-nmcli-dummy.yaml @@ -0,0 +1,2 @@ +minor_changes: + - nmcli - add ``dummy`` interface support (https://github.com/ansible-collections/community.general/issues/724). diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index 90fd5bbd0c..bbc1b4770f 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -51,10 +51,11 @@ options: type: description: - This is the type of device or network connection that you wish to create or modify. + - Type C(dummy) is added in community.general 3.5.0. - Type C(generic) is added in Ansible 2.5. - Type C(infiniband) is added in community.general 2.0.0. type: str - choices: [ bond, bond-slave, bridge, bridge-slave, ethernet, generic, infiniband, ipip, sit, team, team-slave, vlan, vxlan, wifi ] + choices: [ bond, bond-slave, bridge, bridge-slave, dummy, ethernet, generic, infiniband, ipip, sit, team, team-slave, vlan, vxlan, wifi ] mode: description: - This is the type of device or network connection that you wish to create for a bond or bridge. @@ -775,6 +776,8 @@ class Nmcli(object): if self.method4: self.ipv4_method = self.method4 + elif self.type == 'dummy' and not self.ip4: + self.ipv4_method = 'disabled' elif self.ip4: self.ipv4_method = 'manual' else: @@ -782,6 +785,8 @@ class Nmcli(object): if self.method6: self.ipv6_method = self.method6 + elif self.type == 'dummy' and not self.ip6: + self.ipv6_method = 'disabled' elif self.ip6: self.ipv6_method = 'manual' else: @@ -938,6 +943,7 @@ class Nmcli(object): return self.type in ( 'bond', 'bridge', + 'dummy', 'ethernet', 'generic', 'infiniband', @@ -956,6 +962,7 @@ class Nmcli(object): @property def mtu_conn_type(self): return self.type in ( + 'dummy', 'ethernet', 'team-slave', ) @@ -1092,7 +1099,7 @@ class Nmcli(object): @property def create_connection_up(self): - if self.type in ('bond', 'ethernet', 'infiniband', 'wifi'): + if self.type in ('bond', 'dummy', 'ethernet', 'infiniband', 'wifi'): if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None): return True elif self.type == 'team': @@ -1218,6 +1225,7 @@ def main(): 'bond-slave', 'bridge', 'bridge-slave', + 'dummy', 'ethernet', 'generic', 'infiniband', diff --git a/tests/unit/plugins/modules/net_tools/test_nmcli.py b/tests/unit/plugins/modules/net_tools/test_nmcli.py index 6df320a0c7..b2307f245a 100644 --- a/tests/unit/plugins/modules/net_tools/test_nmcli.py +++ b/tests/unit/plugins/modules/net_tools/test_nmcli.py @@ -74,6 +74,12 @@ TESTCASE_CONNECTION = [ 'state': 'absent', '_ansible_check_mode': True, }, + { + 'type': 'dummy', + 'conn_name': 'non_existent_nw_device', + 'state': 'absent', + '_ansible_check_mode': True, + }, ] TESTCASE_GENERIC = [ @@ -485,6 +491,40 @@ TESTCASE_WIRELESS = [ } ] +TESTCASE_DUMMY_STATIC = [ + { + 'type': 'dummy', + 'conn_name': 'non_existent_nw_device', + 'ifname': 'dummy_non_existant', + 'ip4': '10.10.10.10/24', + 'gw4': '10.10.10.1', + 'dns4': ['1.1.1.1', '8.8.8.8'], + 'ip6': '2001:db8::1/128', + 'state': 'present', + '_ansible_check_mode': False, + } +] + +TESTCASE_DUMMY_STATIC_SHOW_OUTPUT = """\ +connection.id: non_existent_nw_device +connection.interface-name: dummy_non_existant +connection.autoconnect: yes +802-3-ethernet.mtu: auto +ipv4.method: manual +ipv4.addresses: 10.10.10.10/24 +ipv4.gateway: 10.10.10.1 +ipv4.ignore-auto-dns: no +ipv4.ignore-auto-routes: no +ipv4.never-default: no +ipv4.may-fail: yes +ipv4.dns: 1.1.1.1,8.8.8.8 +ipv6.method: auto +ipv6.ignore-auto-dns: no +ipv6.ignore-auto-routes: no +ipv6.method: manual +ipv6.addresses: 2001:db8::1/128 +""" + def mocker_set(mocker, connection_exists=False, @@ -641,6 +681,13 @@ def mocked_ethernet_connection_dhcp_to_static(mocker): )) +@pytest.fixture +def mocked_dummy_connection_static_unchanged(mocker): + mocker_set(mocker, + connection_exists=True, + execute_return=(0, TESTCASE_DUMMY_STATIC_SHOW_OUTPUT, "")) + + @pytest.mark.parametrize('patch_ansible_module', TESTCASE_BOND, indirect=['patch_ansible_module']) def test_bond_connection_create(mocked_generic_connection_create, capfd): """ @@ -1581,3 +1628,58 @@ def test_create_wireless(mocked_generic_connection_create, capfd): results = json.loads(out) assert not results.get('failed') assert results['changed'] + + +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_DUMMY_STATIC, indirect=['patch_ansible_module']) +def test_create_dummy_static(mocked_generic_connection_create, capfd): + """ + Test : Create dummy connection with static IP configuration + """ + + with pytest.raises(SystemExit): + nmcli.main() + + assert nmcli.Nmcli.execute_command.call_count == 2 + arg_list = nmcli.Nmcli.execute_command.call_args_list + add_args, add_kw = arg_list[0] + + assert add_args[0][0] == '/usr/bin/nmcli' + assert add_args[0][1] == 'con' + assert add_args[0][2] == 'add' + assert add_args[0][3] == 'type' + assert add_args[0][4] == 'dummy' + assert add_args[0][5] == 'con-name' + assert add_args[0][6] == 'non_existent_nw_device' + + add_args_text = list(map(to_text, add_args[0])) + for param in ['connection.interface-name', 'dummy_non_existant', + 'ipv4.addresses', '10.10.10.10/24', + 'ipv4.gateway', '10.10.10.1', + 'ipv4.dns', '1.1.1.1,8.8.8.8', + 'ipv6.addresses', '2001:db8::1/128']: + assert param in add_args_text + + up_args, up_kw = arg_list[1] + assert up_args[0][0] == '/usr/bin/nmcli' + assert up_args[0][1] == 'con' + assert up_args[0][2] == 'up' + assert up_args[0][3] == 'non_existent_nw_device' + + out, err = capfd.readouterr() + results = json.loads(out) + assert not results.get('failed') + assert results['changed'] + + +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_DUMMY_STATIC, indirect=['patch_ansible_module']) +def test_dummy_connection_static_unchanged(mocked_dummy_connection_static_unchanged, capfd): + """ + Test : Dummy connection with static IP configuration unchanged + """ + with pytest.raises(SystemExit): + nmcli.main() + + out, err = capfd.readouterr() + results = json.loads(out) + assert not results.get('failed') + assert not results['changed'] From a73720c103da165823c7eeee4812210f7f3bc774 Mon Sep 17 00:00:00 2001 From: Ricky White Date: Thu, 5 Aug 2021 13:28:32 -0400 Subject: [PATCH 0489/3093] Updated the tss lookup plugin to reflect breaking changes introduced in the underpinning SDK (#3139) * Updated the plugin to reflect breaking changes introduced in the underlying SDK v1.0.0 update. * Added Changelog fragment * Updates based on feedback/review * Added newline to pass CI * Added whitepace for linter * Update changelogs/fragments/3139-tss-lookup-plugin-update-to-make-compatible-with-sdk-v1.yml Co-authored-by: Ajpantuso Co-authored-by: Ajpantuso --- ...-update-to-make-compatible-with-sdk-v1.yml | 3 +++ plugins/lookup/tss.py | 20 ++++++++++++++++--- 2 files changed, 20 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/3139-tss-lookup-plugin-update-to-make-compatible-with-sdk-v1.yml diff --git a/changelogs/fragments/3139-tss-lookup-plugin-update-to-make-compatible-with-sdk-v1.yml b/changelogs/fragments/3139-tss-lookup-plugin-update-to-make-compatible-with-sdk-v1.yml new file mode 100644 index 0000000000..f06fa68ce0 --- /dev/null +++ b/changelogs/fragments/3139-tss-lookup-plugin-update-to-make-compatible-with-sdk-v1.yml @@ -0,0 +1,3 @@ +bugfixes: + - tss lookup plugin - fixed incompatibility with ``python-tss-sdk`` version 1.0.0 + (https://github.com/ansible-collections/community.general/issues/3057, https://github.com/ansible-collections/community.general/pull/3139). diff --git a/plugins/lookup/tss.py b/plugins/lookup/tss.py index b7b7cd85e0..d5e6ea6dcd 100644 --- a/plugins/lookup/tss.py +++ b/plugins/lookup/tss.py @@ -112,16 +112,17 @@ EXAMPLES = r""" - ansible.builtin.debug: msg: the password is {{ secret_password }} """ - +from distutils.version import LooseVersion from ansible.errors import AnsibleError, AnsibleOptionsError sdk_is_missing = False try: + from thycotic import __version__ as sdk_version from thycotic.secrets.server import ( SecretServer, - SecretServerAccessError, SecretServerError, + PasswordGrantAuthorizer, ) except ImportError: sdk_is_missing = True @@ -136,7 +137,20 @@ display = Display() class LookupModule(LookupBase): @staticmethod def Client(server_parameters): - return SecretServer(**server_parameters) + + if LooseVersion(sdk_version) < LooseVersion('1.0.0'): + return SecretServer(**server_parameters) + else: + authorizer = PasswordGrantAuthorizer( + server_parameters["base_url"], + server_parameters["username"], + server_parameters["password"], + server_parameters["token_path_uri"], + ) + + return SecretServer( + server_parameters["base_url"], authorizer, server_parameters["api_path_uri"] + ) def run(self, terms, variables, **kwargs): if sdk_is_missing: From e9494c12f2b21f8f51be92c8e48074f6a697a712 Mon Sep 17 00:00:00 2001 From: rainerleber <39616583+rainerleber@users.noreply.github.com> Date: Thu, 5 Aug 2021 22:42:43 +0200 Subject: [PATCH 0490/3093] Hana query userstore (#3125) * add hdbuserstore ability * add description * fix * add default * add description * add sample * Apply suggestions from code review Co-authored-by: quidame * add fragment, fix required if * remove whitespace * add coding fragment * Apply suggestions from code review Co-authored-by: Felix Fontein * added test for userstore * Update plugins/modules/database/saphana/hana_query.py Co-authored-by: Felix Fontein Co-authored-by: Rainer Leber Co-authored-by: quidame Co-authored-by: Felix Fontein --- .../fragments/3125-hana-query-userstore.yaml | 2 + .../modules/database/saphana/hana_query.py | 42 +++++++++++++++---- .../database/saphana/test_hana_query.py | 36 ++++++++++++++++ 3 files changed, 72 insertions(+), 8 deletions(-) create mode 100644 changelogs/fragments/3125-hana-query-userstore.yaml diff --git a/changelogs/fragments/3125-hana-query-userstore.yaml b/changelogs/fragments/3125-hana-query-userstore.yaml new file mode 100644 index 0000000000..0a626fe7f5 --- /dev/null +++ b/changelogs/fragments/3125-hana-query-userstore.yaml @@ -0,0 +1,2 @@ +minor_changes: + - hana_query - added the abillity to use hdbuserstore (https://github.com/ansible-collections/community.general/pull/3125). diff --git a/plugins/modules/database/saphana/hana_query.py b/plugins/modules/database/saphana/hana_query.py index 9b26134022..ac026d5adc 100644 --- a/plugins/modules/database/saphana/hana_query.py +++ b/plugins/modules/database/saphana/hana_query.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2021, Rainer Leber # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -21,13 +22,21 @@ options: type: str required: true user: - description: A dedicated username. Defaults to C(SYSTEM). + description: A dedicated username. The user could be also in hdbuserstore. Defaults to C(SYSTEM). type: str default: SYSTEM + userstore: + description: If C(true) the user must be in hdbuserstore. + type: bool + default: false + version_added: 3.5.0 password: - description: The password to connect to the database. + description: + - The password to connect to the database. + - "B(Note:) Since the passwords have to be passed as command line arguments, I(userstore=true) should + be used whenever possible, as command line arguments can be seen by other users + on the same machine." type: str - required: true autocommit: description: Autocommit the statement. type: bool @@ -89,6 +98,17 @@ EXAMPLES = r''' - /tmp/HANA_CPU_UtilizationPerCore_2.00.020+.txt - /tmp/HANA.txt host: "localhost" + +- name: Run several queries from user store + community.general.hana_query: + sid: "hdb" + instance: "01" + user: hdbstoreuser + userstore: true + query: + - "select user_name from users;" + - select * from users; + autocommit: False ''' RETURN = r''' @@ -117,16 +137,18 @@ def main(): argument_spec=dict( sid=dict(type='str', required=True), instance=dict(type='str', required=True), - encrypted=dict(type='bool', required=False, default=False), + encrypted=dict(type='bool', default=False), host=dict(type='str', required=False), - user=dict(type='str', required=False, default="SYSTEM"), - password=dict(type='str', required=True, no_log=True), + user=dict(type='str', default="SYSTEM"), + userstore=dict(type='bool', default=False), + password=dict(type='str', no_log=True), database=dict(type='str', required=False), query=dict(type='list', elements='str', required=False), filepath=dict(type='list', elements='path', required=False), - autocommit=dict(type='bool', required=False, default=True), + autocommit=dict(type='bool', default=True), ), required_one_of=[('query', 'filepath')], + required_if=[('userstore', False, ['password'])], supports_check_mode=False, ) rc, out, err, out_raw = [0, [], "", ""] @@ -136,6 +158,7 @@ def main(): sid = (params['sid']).upper() instance = params['instance'] user = params['user'] + userstore = params['userstore'] password = params['password'] autocommit = params['autocommit'] host = params['host'] @@ -161,7 +184,10 @@ def main(): if database is not None: command.extend(['-d', database]) # -x Suppresses additional output, such as the number of selected rows in a result set. - command.extend(['-x', '-i', instance, '-u', user, '-p', password]) + if userstore: + command.extend(['-x', '-U', user]) + else: + command.extend(['-x', '-i', instance, '-u', user, '-p', password]) if filepath is not None: command.extend(['-I']) diff --git a/tests/unit/plugins/modules/database/saphana/test_hana_query.py b/tests/unit/plugins/modules/database/saphana/test_hana_query.py index 4d158c028e..b0fd939604 100644 --- a/tests/unit/plugins/modules/database/saphana/test_hana_query.py +++ b/tests/unit/plugins/modules/database/saphana/test_hana_query.py @@ -64,3 +64,39 @@ class Testhana_query(ModuleTestCase): {'username': 'myuser', 'name': 'my user'}, ]]) self.assertEqual(run_command.call_count, 1) + + def test_hana_userstore_query(self): + """Check that result is processed with userstore.""" + set_module_args({ + 'sid': "HDB", + 'instance': "01", + 'encrypted': False, + 'host': "localhost", + 'user': "SYSTEM", + 'userstore': True, + 'database': "HDB", + 'query': "SELECT * FROM users;" + }) + with patch.object(basic.AnsibleModule, 'run_command') as run_command: + run_command.return_value = 0, 'username,name\n testuser,test user \n myuser, my user \n', '' + with self.assertRaises(AnsibleExitJson) as result: + hana_query.main() + self.assertEqual(result.exception.args[0]['query_result'], [[ + {'username': 'testuser', 'name': 'test user'}, + {'username': 'myuser', 'name': 'my user'}, + ]]) + self.assertEqual(run_command.call_count, 1) + + def test_hana_failed_no_passwd(self): + """Check that result is failed with no password.""" + with self.assertRaises(AnsibleFailJson): + set_module_args({ + 'sid': "HDB", + 'instance': "01", + 'encrypted': False, + 'host': "localhost", + 'user': "SYSTEM", + 'database': "HDB", + 'query': "SELECT * FROM users;" + }) + self.module.main() From 16476f5cb932a61c3db890402cc4bc39664af281 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 5 Aug 2021 23:46:39 +0200 Subject: [PATCH 0491/3093] Update AZP config (#3111) * Update AZP config. * Skip 8.4 as well for django_manage (next to 8.3 and 8.2). * Temporarily skip 8.4 for yum_versionlock. --- .azure-pipelines/azure-pipelines.yml | 4 ++-- tests/integration/targets/django_manage/aliases | 1 + tests/integration/targets/yum_versionlock/aliases | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index 8dc49e5c03..78df8b366f 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -186,8 +186,8 @@ stages: test: macos/11.1 - name: RHEL 7.9 test: rhel/7.9 - - name: RHEL 8.3 - test: rhel/8.3 + - name: RHEL 8.4 + test: rhel/8.4 - name: FreeBSD 12.2 test: freebsd/12.2 - name: FreeBSD 13.0 diff --git a/tests/integration/targets/django_manage/aliases b/tests/integration/targets/django_manage/aliases index e9c002109c..7488aa82d7 100644 --- a/tests/integration/targets/django_manage/aliases +++ b/tests/integration/targets/django_manage/aliases @@ -5,3 +5,4 @@ skip/macos skip/osx skip/rhel8.2 skip/rhel8.3 +skip/rhel8.4 diff --git a/tests/integration/targets/yum_versionlock/aliases b/tests/integration/targets/yum_versionlock/aliases index abe0a21e22..92b8e448f1 100644 --- a/tests/integration/targets/yum_versionlock/aliases +++ b/tests/integration/targets/yum_versionlock/aliases @@ -3,3 +3,4 @@ skip/aix skip/freebsd skip/osx skip/macos +skip/rhel8.4 # TODO make sure that tests work on 8.4 as well! From ff586f1105d35c0efe7c9e858ae6c943f9e4ec58 Mon Sep 17 00:00:00 2001 From: Bartosz <8199062+bartoszkosiorek@users.noreply.github.com> Date: Fri, 6 Aug 2021 10:01:05 +0200 Subject: [PATCH 0492/3093] pkgin: display stdout and stderr in case the error occurs (#3148) * pkgin: display stdout and stderr in case the error occurs * Update changelogs/fragments/pkgin-output-after-error.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- changelogs/fragments/pkgin-output-after-error.yml | 2 ++ plugins/modules/packaging/os/pkgin.py | 12 ++++++------ 2 files changed, 8 insertions(+), 6 deletions(-) create mode 100644 changelogs/fragments/pkgin-output-after-error.yml diff --git a/changelogs/fragments/pkgin-output-after-error.yml b/changelogs/fragments/pkgin-output-after-error.yml new file mode 100644 index 0000000000..a2dd2d6a1e --- /dev/null +++ b/changelogs/fragments/pkgin-output-after-error.yml @@ -0,0 +1,2 @@ +minor_changes: + - pkgin - in case of ``pkgin`` tool failue, display returned standard output ``stdout`` and standard error ``stderr`` to ease debugging (https://github.com/ansible-collections/community.general/issues/3146). diff --git a/plugins/modules/packaging/os/pkgin.py b/plugins/modules/packaging/os/pkgin.py index 2937314fa1..90a711fb6a 100644 --- a/plugins/modules/packaging/os/pkgin.py +++ b/plugins/modules/packaging/os/pkgin.py @@ -251,7 +251,7 @@ def remove_packages(module, packages): format_pkgin_command(module, "remove", package)) if not module.check_mode and query_package(module, package) in [PackageState.PRESENT, PackageState.OUTDATED]: - module.fail_json(msg="failed to remove %s: %s" % (package, out)) + module.fail_json(msg="failed to remove %s: %s" % (package, out), stdout=out, stderr=err) remove_c += 1 @@ -276,14 +276,14 @@ def install_packages(module, packages): format_pkgin_command(module, "install", package)) if not module.check_mode and not query_package(module, package) in [PackageState.PRESENT, PackageState.OUTDATED]: - module.fail_json(msg="failed to install %s: %s" % (package, out)) + module.fail_json(msg="failed to install %s: %s" % (package, out), stdout=out, stderr=err) install_c += 1 if install_c > 0: module.exit_json(changed=True, msg=format_action_message(module, "installed", install_c)) - module.exit_json(changed=False, msg="package(s) already present") + module.exit_json(changed=False, msg="package(s) already present", stdout=out, stderr=err) def update_package_db(module): @@ -296,7 +296,7 @@ def update_package_db(module): else: return True, "updated repository database" else: - module.fail_json(msg="could not update package db") + module.fail_json(msg="could not update package db", stdout=out, stderr=err) def do_upgrade_packages(module, full=False): @@ -312,7 +312,7 @@ def do_upgrade_packages(module, full=False): if re.search('^nothing to do.\n$', out): module.exit_json(changed=False, msg="nothing left to upgrade") else: - module.fail_json(msg="could not %s packages" % cmd) + module.fail_json(msg="could not %s packages" % cmd, stdout=out, stderr=err) def upgrade_packages(module): @@ -332,7 +332,7 @@ def clean_cache(module): # so assume it did. module.exit_json(changed=True, msg="cleaned caches") else: - module.fail_json(msg="could not clean package cache") + module.fail_json(msg="could not clean package cache", stdout=out, stderr=err) def main(): From da11a98cb734e99cc57f4ae6ec09d9199875c39b Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 8 Aug 2021 01:02:21 +1200 Subject: [PATCH 0493/3093] fixed the utf-8 marker (#3162) --- plugins/action/system/iptables_state.py | 2 +- plugins/action/system/shutdown.py | 2 +- plugins/cache/memcached.py | 2 +- plugins/cache/pickle.py | 2 +- plugins/cache/redis.py | 2 +- plugins/cache/yaml.py | 2 +- plugins/callback/context_demo.py | 2 +- plugins/callback/counter_enabled.py | 2 +- plugins/callback/dense.py | 2 +- plugins/callback/hipchat.py | 2 +- plugins/callback/jabber.py | 2 +- plugins/callback/log_plays.py | 2 +- plugins/callback/loganalytics.py | 2 +- plugins/callback/logdna.py | 2 +- plugins/callback/logentries.py | 2 +- plugins/callback/logstash.py | 2 +- plugins/callback/null.py | 2 +- plugins/callback/say.py | 2 +- plugins/callback/selective.py | 2 +- plugins/callback/slack.py | 2 +- plugins/callback/syslog_json.py | 2 +- plugins/callback/unixy.py | 2 +- plugins/callback/yaml.py | 2 +- plugins/connection/chroot.py | 2 +- plugins/connection/funcd.py | 2 +- plugins/connection/iocage.py | 2 +- plugins/connection/jail.py | 2 +- plugins/connection/lxc.py | 2 +- plugins/connection/lxd.py | 2 +- plugins/connection/qubes.py | 2 +- plugins/connection/saltstack.py | 2 +- plugins/connection/zone.py | 2 +- plugins/doc_fragments/hpe3par.py | 2 +- plugins/doc_fragments/hwc.py | 2 +- plugins/doc_fragments/oracle.py | 2 +- plugins/doc_fragments/oracle_creatable_resource.py | 2 +- plugins/doc_fragments/oracle_display_name_option.py | 2 +- plugins/doc_fragments/oracle_name_option.py | 2 +- plugins/doc_fragments/oracle_tags.py | 2 +- plugins/doc_fragments/oracle_wait_options.py | 2 +- plugins/doc_fragments/vexata.py | 2 +- plugins/filter/dict_kv.py | 2 +- plugins/filter/jc.py | 2 +- plugins/filter/json_query.py | 2 +- plugins/filter/random_mac.py | 2 +- plugins/filter/version_sort.py | 2 +- plugins/inventory/linode.py | 2 +- plugins/inventory/nmap.py | 2 +- plugins/inventory/online.py | 2 +- plugins/inventory/scaleway.py | 2 +- plugins/inventory/stackpath_compute.py | 2 +- plugins/inventory/virtualbox.py | 2 +- plugins/lookup/cartesian.py | 2 +- plugins/lookup/chef_databag.py | 2 +- plugins/lookup/consul_kv.py | 2 +- plugins/lookup/credstash.py | 2 +- plugins/lookup/cyberarkpassword.py | 2 +- plugins/lookup/dependent.py | 2 +- plugins/lookup/dig.py | 2 +- plugins/lookup/dnstxt.py | 2 +- plugins/lookup/etcd.py | 2 +- plugins/lookup/filetree.py | 2 +- plugins/lookup/flattened.py | 2 +- plugins/lookup/hiera.py | 2 +- plugins/lookup/keyring.py | 2 +- plugins/lookup/lastpass.py | 2 +- plugins/lookup/lmdb_kv.py | 2 +- plugins/lookup/manifold.py | 2 +- plugins/lookup/nios.py | 2 +- plugins/lookup/passwordstore.py | 2 +- plugins/lookup/redis.py | 2 +- plugins/lookup/shelvefile.py | 2 +- plugins/modules/clustering/nomad/nomad_job.py | 2 +- plugins/modules/clustering/nomad/nomad_job_info.py | 2 +- plugins/modules/web_infrastructure/apache2_module.py | 2 +- 75 files changed, 75 insertions(+), 75 deletions(-) diff --git a/plugins/action/system/iptables_state.py b/plugins/action/system/iptables_state.py index 93e4bc2ed4..b8ae1a5dea 100644 --- a/plugins/action/system/iptables_state.py +++ b/plugins/action/system/iptables_state.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright: (c) 2020, quidame # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/action/system/shutdown.py b/plugins/action/system/shutdown.py index 4995ef8d8b..19813b0847 100644 --- a/plugins/action/system/shutdown.py +++ b/plugins/action/system/shutdown.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright: (c) 2020, Amin Vakil # Copyright: (c) 2016-2018, Matt Davis # Copyright: (c) 2018, Sam Doran diff --git a/plugins/cache/memcached.py b/plugins/cache/memcached.py index fb2a778fc3..ee36628f40 100644 --- a/plugins/cache/memcached.py +++ b/plugins/cache/memcached.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2014, Brian Coca, Josh Drake, et al # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/cache/pickle.py b/plugins/cache/pickle.py index b790e73a4c..1e549d4d66 100644 --- a/plugins/cache/pickle.py +++ b/plugins/cache/pickle.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2017, Brian Coca # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/cache/redis.py b/plugins/cache/redis.py index 6b5f2c4ad0..3c73d8b5be 100644 --- a/plugins/cache/redis.py +++ b/plugins/cache/redis.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2014, Brian Coca, Josh Drake, et al # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/cache/yaml.py b/plugins/cache/yaml.py index b676dd0dbb..e5062b16d1 100644 --- a/plugins/cache/yaml.py +++ b/plugins/cache/yaml.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2017, Brian Coca # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/context_demo.py b/plugins/callback/context_demo.py index 39c912acae..c85cc60cda 100644 --- a/plugins/callback/context_demo.py +++ b/plugins/callback/context_demo.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (C) 2012, Michael DeHaan, # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/counter_enabled.py b/plugins/callback/counter_enabled.py index 352c773b9b..3b6e5e7ad4 100644 --- a/plugins/callback/counter_enabled.py +++ b/plugins/callback/counter_enabled.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2018, Ivan Aragones Muniesa # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) ''' diff --git a/plugins/callback/dense.py b/plugins/callback/dense.py index 38d3e1bee7..af8464631c 100644 --- a/plugins/callback/dense.py +++ b/plugins/callback/dense.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2016, Dag Wieers # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/hipchat.py b/plugins/callback/hipchat.py index 771c425df8..c64b892d9b 100644 --- a/plugins/callback/hipchat.py +++ b/plugins/callback/hipchat.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (C) 2014, Matt Martz # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/jabber.py b/plugins/callback/jabber.py index c57e08804a..b535fa9540 100644 --- a/plugins/callback/jabber.py +++ b/plugins/callback/jabber.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright (C) 2016 maxn nikolaev.makc@gmail.com # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/log_plays.py b/plugins/callback/log_plays.py index 24acf3fc95..2539bd9ade 100644 --- a/plugins/callback/log_plays.py +++ b/plugins/callback/log_plays.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (C) 2012, Michael DeHaan, # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/loganalytics.py b/plugins/callback/loganalytics.py index ccc7649218..04fc646dc4 100644 --- a/plugins/callback/loganalytics.py +++ b/plugins/callback/loganalytics.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) diff --git a/plugins/callback/logdna.py b/plugins/callback/logdna.py index ddb4c477da..0c459bfac2 100644 --- a/plugins/callback/logdna.py +++ b/plugins/callback/logdna.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2018, Samir Musali # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/logentries.py b/plugins/callback/logentries.py index 344bd219cd..ad71a6d448 100644 --- a/plugins/callback/logentries.py +++ b/plugins/callback/logentries.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2015, Logentries.com, Jimmy Tang # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/logstash.py b/plugins/callback/logstash.py index 95da7fa95a..4c4fad8450 100644 --- a/plugins/callback/logstash.py +++ b/plugins/callback/logstash.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (C) 2020, Yevhen Khmelenko # (C) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/null.py b/plugins/callback/null.py index 9eb5198d0c..13ea65b438 100644 --- a/plugins/callback/null.py +++ b/plugins/callback/null.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/say.py b/plugins/callback/say.py index 309777e241..8e8bd507a2 100644 --- a/plugins/callback/say.py +++ b/plugins/callback/say.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2012, Michael DeHaan, # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/selective.py b/plugins/callback/selective.py index b1e09c8236..403eb84b33 100644 --- a/plugins/callback/selective.py +++ b/plugins/callback/selective.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) Fastly, inc 2016 # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/slack.py b/plugins/callback/slack.py index c791bf6a36..5cb402b109 100644 --- a/plugins/callback/slack.py +++ b/plugins/callback/slack.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (C) 2014-2015, Matt Martz # (C) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/syslog_json.py b/plugins/callback/syslog_json.py index 73543614a8..f4865f2a26 100644 --- a/plugins/callback/syslog_json.py +++ b/plugins/callback/syslog_json.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/unixy.py b/plugins/callback/unixy.py index dec2ab0c8c..fd00fae71b 100644 --- a/plugins/callback/unixy.py +++ b/plugins/callback/unixy.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright: (c) 2017, Allyson Bowles <@akatch> # Copyright: (c) 2012-2014, Michael DeHaan # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/callback/yaml.py b/plugins/callback/yaml.py index d4036c808e..24140116ed 100644 --- a/plugins/callback/yaml.py +++ b/plugins/callback/yaml.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/connection/chroot.py b/plugins/connection/chroot.py index 3e15947031..295bd4046b 100644 --- a/plugins/connection/chroot.py +++ b/plugins/connection/chroot.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Based on local.py (c) 2012, Michael DeHaan # # (c) 2013, Maykel Moya diff --git a/plugins/connection/funcd.py b/plugins/connection/funcd.py index caf9d06c60..94d1a3bd9c 100644 --- a/plugins/connection/funcd.py +++ b/plugins/connection/funcd.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Based on local.py (c) 2012, Michael DeHaan # Based on chroot.py (c) 2013, Maykel Moya # Copyright (c) 2013, Michael Scherer diff --git a/plugins/connection/iocage.py b/plugins/connection/iocage.py index 94761d5c17..2fd74313bc 100644 --- a/plugins/connection/iocage.py +++ b/plugins/connection/iocage.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Based on jail.py # (c) 2013, Michael Scherer # (c) 2015, Toshio Kuratomi diff --git a/plugins/connection/jail.py b/plugins/connection/jail.py index c3de25c753..02f5aeeddd 100644 --- a/plugins/connection/jail.py +++ b/plugins/connection/jail.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Based on local.py by Michael DeHaan # and chroot.py by Maykel Moya # Copyright (c) 2013, Michael Scherer diff --git a/plugins/connection/lxc.py b/plugins/connection/lxc.py index d5c7a7ebbe..2aaf1619dc 100644 --- a/plugins/connection/lxc.py +++ b/plugins/connection/lxc.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2015, Joerg Thalheim # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/connection/lxd.py b/plugins/connection/lxd.py index 31ff13c776..63eaf6ca51 100644 --- a/plugins/connection/lxd.py +++ b/plugins/connection/lxd.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2016 Matt Clay # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/connection/qubes.py b/plugins/connection/qubes.py index fd72f38e2f..1de9e10011 100644 --- a/plugins/connection/qubes.py +++ b/plugins/connection/qubes.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Based on the buildah connection plugin # Copyright (c) 2017 Ansible Project # 2018 Kushal Das diff --git a/plugins/connection/saltstack.py b/plugins/connection/saltstack.py index 3d56083bb6..cbd85eaf3e 100644 --- a/plugins/connection/saltstack.py +++ b/plugins/connection/saltstack.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Based on local.py (c) 2012, Michael DeHaan # Based on chroot.py (c) 2013, Maykel Moya # Based on func.py diff --git a/plugins/connection/zone.py b/plugins/connection/zone.py index a859b5e32f..8fbcd8a038 100644 --- a/plugins/connection/zone.py +++ b/plugins/connection/zone.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Based on local.py (c) 2012, Michael DeHaan # and chroot.py (c) 2013, Maykel Moya # and jail.py (c) 2013, Michael Scherer diff --git a/plugins/doc_fragments/hpe3par.py b/plugins/doc_fragments/hpe3par.py index e16ead4207..ad445205d8 100644 --- a/plugins/doc_fragments/hpe3par.py +++ b/plugins/doc_fragments/hpe3par.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/doc_fragments/hwc.py b/plugins/doc_fragments/hwc.py index c6c5dd23bd..ecba2adde8 100644 --- a/plugins/doc_fragments/hwc.py +++ b/plugins/doc_fragments/hwc.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Huawei Inc. # GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/doc_fragments/oracle.py b/plugins/doc_fragments/oracle.py index 94ed18107d..94999c04ec 100644 --- a/plugins/doc_fragments/oracle.py +++ b/plugins/doc_fragments/oracle.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/doc_fragments/oracle_creatable_resource.py b/plugins/doc_fragments/oracle_creatable_resource.py index f76e7146b3..211ca6f9c1 100644 --- a/plugins/doc_fragments/oracle_creatable_resource.py +++ b/plugins/doc_fragments/oracle_creatable_resource.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/doc_fragments/oracle_display_name_option.py b/plugins/doc_fragments/oracle_display_name_option.py index b9ce0d92fe..ff70d45dd9 100644 --- a/plugins/doc_fragments/oracle_display_name_option.py +++ b/plugins/doc_fragments/oracle_display_name_option.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/doc_fragments/oracle_name_option.py b/plugins/doc_fragments/oracle_name_option.py index dd9b98816e..8c4f9c1e39 100644 --- a/plugins/doc_fragments/oracle_name_option.py +++ b/plugins/doc_fragments/oracle_name_option.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/doc_fragments/oracle_tags.py b/plugins/doc_fragments/oracle_tags.py index e92598c549..f95b22c8ed 100644 --- a/plugins/doc_fragments/oracle_tags.py +++ b/plugins/doc_fragments/oracle_tags.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/doc_fragments/oracle_wait_options.py b/plugins/doc_fragments/oracle_wait_options.py index d94f079a86..0312755ffa 100644 --- a/plugins/doc_fragments/oracle_wait_options.py +++ b/plugins/doc_fragments/oracle_wait_options.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/doc_fragments/vexata.py b/plugins/doc_fragments/vexata.py index 920457fa04..d541d5ad85 100644 --- a/plugins/doc_fragments/vexata.py +++ b/plugins/doc_fragments/vexata.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # # Copyright: (c) 2019, Sandeep Kasargod # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/filter/dict_kv.py b/plugins/filter/dict_kv.py index fc1978b977..7ce6c3e44a 100644 --- a/plugins/filter/dict_kv.py +++ b/plugins/filter/dict_kv.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright (C) 2020 Stanislav German-Evtushenko (@giner) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/filter/jc.py b/plugins/filter/jc.py index 42dcf98234..f8fc4ac5bd 100644 --- a/plugins/filter/jc.py +++ b/plugins/filter/jc.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2015, Filipe Niero Felisbino # # This file is part of Ansible diff --git a/plugins/filter/json_query.py b/plugins/filter/json_query.py index 9b9ecb93f2..9c835e8c71 100644 --- a/plugins/filter/json_query.py +++ b/plugins/filter/json_query.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2015, Filipe Niero Felisbino # # This file is part of Ansible diff --git a/plugins/filter/random_mac.py b/plugins/filter/random_mac.py index dc04e99a96..7d25555aa9 100644 --- a/plugins/filter/random_mac.py +++ b/plugins/filter/random_mac.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2020 Ansible Project # # This file is part of Ansible diff --git a/plugins/filter/version_sort.py b/plugins/filter/version_sort.py index d228ea62d0..c59e87c9c6 100644 --- a/plugins/filter/version_sort.py +++ b/plugins/filter/version_sort.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright (C) 2021 Eric Lavarde # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/inventory/linode.py b/plugins/inventory/linode.py index 566073a4a8..c2dcac5392 100644 --- a/plugins/inventory/linode.py +++ b/plugins/inventory/linode.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/inventory/nmap.py b/plugins/inventory/nmap.py index ade3adc3d4..e411006ff0 100644 --- a/plugins/inventory/nmap.py +++ b/plugins/inventory/nmap.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/inventory/online.py b/plugins/inventory/online.py index a74c6026ea..085c258d45 100644 --- a/plugins/inventory/online.py +++ b/plugins/inventory/online.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright (c) 2018 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/inventory/scaleway.py b/plugins/inventory/scaleway.py index b327824f33..86140124c5 100644 --- a/plugins/inventory/scaleway.py +++ b/plugins/inventory/scaleway.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright: (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/inventory/stackpath_compute.py b/plugins/inventory/stackpath_compute.py index e8477b95f3..d777875578 100644 --- a/plugins/inventory/stackpath_compute.py +++ b/plugins/inventory/stackpath_compute.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright (c) 2020 Shay Rybak # Copyright (c) 2020 Ansible Project # GNU General Public License v3.0+ diff --git a/plugins/inventory/virtualbox.py b/plugins/inventory/virtualbox.py index 672312cd8e..89a77c88bb 100644 --- a/plugins/inventory/virtualbox.py +++ b/plugins/inventory/virtualbox.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/cartesian.py b/plugins/lookup/cartesian.py index 841f4f8c4d..98043eba34 100644 --- a/plugins/lookup/cartesian.py +++ b/plugins/lookup/cartesian.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2013, Bradley Young # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/chef_databag.py b/plugins/lookup/chef_databag.py index d594c7681e..f5ccc766c2 100644 --- a/plugins/lookup/chef_databag.py +++ b/plugins/lookup/chef_databag.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2016, Josh Bradley # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/consul_kv.py b/plugins/lookup/consul_kv.py index 58f450eb65..8b779e6aca 100644 --- a/plugins/lookup/consul_kv.py +++ b/plugins/lookup/consul_kv.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2015, Steve Gargan # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/credstash.py b/plugins/lookup/credstash.py index 1a87deed41..9be3527b19 100644 --- a/plugins/lookup/credstash.py +++ b/plugins/lookup/credstash.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2015, Ensighten # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/cyberarkpassword.py b/plugins/lookup/cyberarkpassword.py index 112e7c1cd8..80323c10fd 100644 --- a/plugins/lookup/cyberarkpassword.py +++ b/plugins/lookup/cyberarkpassword.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2017, Edward Nunez # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/dependent.py b/plugins/lookup/dependent.py index 3f73f88bfa..1fb75ece66 100644 --- a/plugins/lookup/dependent.py +++ b/plugins/lookup/dependent.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2015-2021, Felix Fontein # (c) 2018 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/dig.py b/plugins/lookup/dig.py index 6520b0d3ec..f5156b4d1e 100644 --- a/plugins/lookup/dig.py +++ b/plugins/lookup/dig.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2015, Jan-Piet Mens # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/dnstxt.py b/plugins/lookup/dnstxt.py index 84bff41795..868d3dd3a3 100644 --- a/plugins/lookup/dnstxt.py +++ b/plugins/lookup/dnstxt.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2012, Jan-Piet Mens # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/etcd.py b/plugins/lookup/etcd.py index ca13442e43..0c81d0215b 100644 --- a/plugins/lookup/etcd.py +++ b/plugins/lookup/etcd.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2013, Jan-Piet Mens # (m) 2016, Mihai Moldovanu # (m) 2017, Juan Manuel Parrilla diff --git a/plugins/lookup/filetree.py b/plugins/lookup/filetree.py index e663fc9515..1c83486b05 100644 --- a/plugins/lookup/filetree.py +++ b/plugins/lookup/filetree.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2016 Dag Wieers # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/flattened.py b/plugins/lookup/flattened.py index d1ddd14f56..c2e4494fd4 100644 --- a/plugins/lookup/flattened.py +++ b/plugins/lookup/flattened.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2013, Serge van Ginderachter # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/hiera.py b/plugins/lookup/hiera.py index 658f377d59..5b440469eb 100644 --- a/plugins/lookup/hiera.py +++ b/plugins/lookup/hiera.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2017, Juan Manuel Parrilla # (c) 2012-17 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/keyring.py b/plugins/lookup/keyring.py index a98ae7aee9..73f9c5f4a9 100644 --- a/plugins/lookup/keyring.py +++ b/plugins/lookup/keyring.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2016, Samuel Boucher # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/lastpass.py b/plugins/lookup/lastpass.py index 3ae51b4c64..920d33176f 100644 --- a/plugins/lookup/lastpass.py +++ b/plugins/lookup/lastpass.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2016, Andrew Zenk # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/lmdb_kv.py b/plugins/lookup/lmdb_kv.py index 61dc410cc4..9dd46e338a 100644 --- a/plugins/lookup/lmdb_kv.py +++ b/plugins/lookup/lmdb_kv.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2017-2018, Jan-Piet Mens # (c) 2018 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/manifold.py b/plugins/lookup/manifold.py index 076a475091..01bb13cf0b 100644 --- a/plugins/lookup/manifold.py +++ b/plugins/lookup/manifold.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2018, Arigato Machine Inc. # (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/nios.py b/plugins/lookup/nios.py index 008e8feffe..089805c97a 100644 --- a/plugins/lookup/nios.py +++ b/plugins/lookup/nios.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # # Copyright 2018 Red Hat | Ansible # diff --git a/plugins/lookup/passwordstore.py b/plugins/lookup/passwordstore.py index 3e936d8b18..7c00f432b1 100644 --- a/plugins/lookup/passwordstore.py +++ b/plugins/lookup/passwordstore.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2017, Patrick Deelman # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/redis.py b/plugins/lookup/redis.py index fdf3a6e17b..8de7e04cce 100644 --- a/plugins/lookup/redis.py +++ b/plugins/lookup/redis.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2012, Jan-Piet Mens # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/lookup/shelvefile.py b/plugins/lookup/shelvefile.py index 175ed49891..56cfdf1143 100644 --- a/plugins/lookup/shelvefile.py +++ b/plugins/lookup/shelvefile.py @@ -1,4 +1,4 @@ -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2015, Alejandro Guirao # (c) 2012-17 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/clustering/nomad/nomad_job.py b/plugins/modules/clustering/nomad/nomad_job.py index a5e1cd3755..341592be50 100644 --- a/plugins/modules/clustering/nomad/nomad_job.py +++ b/plugins/modules/clustering/nomad/nomad_job.py @@ -1,5 +1,5 @@ #!/usr/bin/python -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2020, FERREIRA Christophe # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/clustering/nomad/nomad_job_info.py b/plugins/modules/clustering/nomad/nomad_job_info.py index d913ebeb61..d49111bb4f 100644 --- a/plugins/modules/clustering/nomad/nomad_job_info.py +++ b/plugins/modules/clustering/nomad/nomad_job_info.py @@ -1,5 +1,5 @@ #!/usr/bin/python -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2020, FERREIRA Christophe # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/apache2_module.py b/plugins/modules/web_infrastructure/apache2_module.py index d85ed0158f..c75dc1c30c 100644 --- a/plugins/modules/web_infrastructure/apache2_module.py +++ b/plugins/modules/web_infrastructure/apache2_module.py @@ -1,5 +1,5 @@ #!/usr/bin/python -# coding: utf-8 -*- +# -*- coding: utf-8 -*- # (c) 2013-2014, Christian Berendt # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) From 6bfa6e40f4c421e3781ccaf80743424c5b60ec0a Mon Sep 17 00:00:00 2001 From: David Hummel <6109326+hummeltech@users.noreply.github.com> Date: Sat, 7 Aug 2021 06:20:44 -0700 Subject: [PATCH 0494/3093] nmcli: Fix change detection for Wi-Fi security options (#3136) * Fixed `wifi_sec` option changes are not detected Also updated `docs` URL and formatting to match that of the `wifi` option * Removed extraneous `appends` to `cmd` in `connection_update` These really should have only been added to `connection_options` whose return values get `extended` onto `cmd` --- ...i-sec-change-detection-to-nmcli-module.yml | 3 ++ plugins/modules/net_tools/nmcli.py | 32 +++++------- .../plugins/modules/net_tools/test_nmcli.py | 51 +++++++++++++++++++ 3 files changed, 66 insertions(+), 20 deletions(-) create mode 100644 changelogs/fragments/3136-add-wifi-sec-change-detection-to-nmcli-module.yml diff --git a/changelogs/fragments/3136-add-wifi-sec-change-detection-to-nmcli-module.yml b/changelogs/fragments/3136-add-wifi-sec-change-detection-to-nmcli-module.yml new file mode 100644 index 0000000000..6cc5e7630d --- /dev/null +++ b/changelogs/fragments/3136-add-wifi-sec-change-detection-to-nmcli-module.yml @@ -0,0 +1,3 @@ +minor_changes: + - nmcli - add ``wifi-sec`` option change detection to support managing secure Wi-Fi connections + (https://github.com/ansible-collections/community.general/pull/3136). diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index bbc1b4770f..92d1e65ef7 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -332,10 +332,10 @@ options: version_added: 2.0.0 wifi_sec: description: - - 'The security configuration of the Wifi connection. The valid attributes are listed on:' - - 'U(https://developer.gnome.org/NetworkManager/stable/settings-802-11-wireless-security.html)' - - 'For instance to use common WPA-PSK auth with a password:' - - '- C({key-mgmt: wpa-psk, psk: my_password})' + - 'The security configuration of the WiFi connection. The valid attributes are listed on: + U(https://networkmanager.dev/docs/api/latest/settings-802-11-wireless-security.html).' + - 'For instance to use common WPA-PSK auth with a password: + C({key-mgmt: wpa-psk, psk: my_password}).' type: dict version_added: 3.0.0 ssid: @@ -345,9 +345,9 @@ options: version_added: 3.0.0 wifi: description: - - 'The configuration of the Wifi connection. The valid attributes are listed on: + - 'The configuration of the WiFi connection. The valid attributes are listed on: U(https://networkmanager.dev/docs/api/latest/settings-802-11-wireless.html).' - - 'For instance to create a hidden AP mode Wifi connection: + - 'For instance to create a hidden AP mode WiFi connection: C({hidden: true, mode: ap}).' type: dict version_added: 3.5.0 @@ -915,6 +915,11 @@ class Nmcli(object): options.update({ '802-11-wireless.%s' % name: value }) + if self.wifi_sec: + for name, value in self.wifi_sec.items(): + options.update({ + '802-11-wireless-security.%s' % name: value + }) # Convert settings values based on the situation. for setting, value in options.items(): setting_type = self.settings_type(setting) @@ -1065,19 +1070,6 @@ class Nmcli(object): else: ifname = self.ifname - if self.type == "wifi": - cmd.append('ssid') - cmd.append(self.ssid) - if self.wifi: - for name, value in self.wifi.items(): - # Disallow setting 'ssid' via 'wifi.ssid' - if name == 'ssid': - continue - cmd += ['802-11-wireless.%s' % name, value] - if self.wifi_sec: - for name, value in self.wifi_sec.items(): - cmd += ['wifi-sec.%s' % name, value] - options = { 'connection.interface-name': ifname, } @@ -1116,7 +1108,7 @@ class Nmcli(object): return self.connection_update('modify') def show_connection(self): - cmd = [self.nmcli_bin, 'con', 'show', self.conn_name] + cmd = [self.nmcli_bin, '--show-secrets', 'con', 'show', self.conn_name] (rc, out, err) = self.execute_command(cmd) diff --git a/tests/unit/plugins/modules/net_tools/test_nmcli.py b/tests/unit/plugins/modules/net_tools/test_nmcli.py index b2307f245a..c1b3e93ed4 100644 --- a/tests/unit/plugins/modules/net_tools/test_nmcli.py +++ b/tests/unit/plugins/modules/net_tools/test_nmcli.py @@ -491,6 +491,22 @@ TESTCASE_WIRELESS = [ } ] +TESTCASE_SECURE_WIRELESS = [ + { + 'type': 'wifi', + 'conn_name': 'non_existent_nw_device', + 'ifname': 'wireless_non_existant', + 'ip4': '10.10.10.10/24', + 'ssid': 'Brittany', + 'wifi_sec': { + 'key-mgmt': 'wpa-psk', + 'psk': 'VERY_SECURE_PASSWORD', + }, + 'state': 'present', + '_ansible_check_mode': False, + } +] + TESTCASE_DUMMY_STATIC = [ { 'type': 'dummy', @@ -1630,6 +1646,41 @@ def test_create_wireless(mocked_generic_connection_create, capfd): assert results['changed'] +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_SECURE_WIRELESS, indirect=['patch_ansible_module']) +def test_create_secure_wireless(mocked_generic_connection_create, capfd): + """ + Test : Create secure wireless connection + """ + + with pytest.raises(SystemExit): + nmcli.main() + + assert nmcli.Nmcli.execute_command.call_count == 1 + arg_list = nmcli.Nmcli.execute_command.call_args_list + add_args, add_kw = arg_list[0] + + assert add_args[0][0] == '/usr/bin/nmcli' + assert add_args[0][1] == 'con' + assert add_args[0][2] == 'add' + assert add_args[0][3] == 'type' + assert add_args[0][4] == 'wifi' + assert add_args[0][5] == 'con-name' + assert add_args[0][6] == 'non_existent_nw_device' + + add_args_text = list(map(to_text, add_args[0])) + for param in ['connection.interface-name', 'wireless_non_existant', + 'ipv4.addresses', '10.10.10.10/24', + '802-11-wireless.ssid', 'Brittany', + '802-11-wireless-security.key-mgmt', 'wpa-psk', + '802-11-wireless-security.psk', 'VERY_SECURE_PASSWORD']: + assert param in add_args_text + + out, err = capfd.readouterr() + results = json.loads(out) + assert not results.get('failed') + assert results['changed'] + + @pytest.mark.parametrize('patch_ansible_module', TESTCASE_DUMMY_STATIC, indirect=['patch_ansible_module']) def test_create_dummy_static(mocked_generic_connection_create, capfd): """ From 771e9de010b2c4cb256f5fe2a9375d63e6eac511 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 8 Aug 2021 20:40:22 +1200 Subject: [PATCH 0495/3093] mass-added the utf-8 marker (#3163) * added the utf-8 marker * fixed the utf-8 marker where it was missde before --- plugins/lookup/nios_next_ip.py | 1 + plugins/lookup/nios_next_network.py | 1 + plugins/module_utils/_mount.py | 1 + plugins/module_utils/_netapp.py | 1 + plugins/module_utils/alicloud_ecs.py | 1 + plugins/module_utils/cloud.py | 1 + plugins/module_utils/database.py | 1 + plugins/module_utils/heroku.py | 1 + plugins/module_utils/hwc_utils.py | 1 + plugins/module_utils/ibm_sa_utils.py | 1 + plugins/module_utils/identity/keycloak/keycloak.py | 1 + plugins/module_utils/known_hosts.py | 1 + plugins/module_utils/linode.py | 1 + plugins/module_utils/manageiq.py | 1 + plugins/module_utils/memset.py | 1 + plugins/module_utils/net_tools/nios/api.py | 1 + plugins/module_utils/oneandone.py | 1 + plugins/module_utils/oneview.py | 1 + plugins/module_utils/online.py | 1 + plugins/module_utils/opennebula.py | 1 + plugins/module_utils/oracle/oci_utils.py | 1 + plugins/module_utils/rax.py | 1 + plugins/module_utils/redfish_utils.py | 1 + plugins/module_utils/redhat.py | 1 + plugins/module_utils/remote_management/lxca/common.py | 1 + plugins/module_utils/scaleway.py | 1 + plugins/module_utils/storage/emc/emc_vnx.py | 1 + plugins/module_utils/storage/hpe3par/hpe3par.py | 1 + plugins/module_utils/univention_umc.py | 2 +- plugins/module_utils/utm_utils.py | 1 + plugins/modules/cloud/centurylink/clc_aa_policy.py | 1 + plugins/modules/cloud/centurylink/clc_alert_policy.py | 1 + plugins/modules/cloud/centurylink/clc_blueprint_package.py | 1 + plugins/modules/cloud/centurylink/clc_firewall_policy.py | 1 + plugins/modules/cloud/centurylink/clc_group.py | 1 + plugins/modules/cloud/centurylink/clc_loadbalancer.py | 1 + plugins/modules/cloud/centurylink/clc_modify_server.py | 1 + plugins/modules/cloud/centurylink/clc_publicip.py | 1 + plugins/modules/cloud/centurylink/clc_server.py | 1 + plugins/modules/cloud/centurylink/clc_server_snapshot.py | 1 + plugins/modules/cloud/heroku/heroku_collaborator.py | 1 + plugins/modules/cloud/linode/linode.py | 1 + plugins/modules/cloud/misc/proxmox.py | 1 + plugins/modules/cloud/misc/proxmox_template.py | 1 + plugins/modules/cloud/misc/xenserver_facts.py | 1 + plugins/modules/cloud/oneandone/oneandone_firewall_policy.py | 1 + plugins/modules/cloud/oneandone/oneandone_load_balancer.py | 1 + plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py | 1 + plugins/modules/cloud/oneandone/oneandone_private_network.py | 1 + plugins/modules/cloud/oneandone/oneandone_public_ip.py | 1 + plugins/modules/cloud/oneandone/oneandone_server.py | 1 + plugins/modules/cloud/opennebula/one_host.py | 1 + plugins/modules/cloud/opennebula/one_template.py | 1 + plugins/modules/cloud/oracle/oci_vcn.py | 1 + plugins/modules/cloud/ovh/ovh_ip_failover.py | 1 + plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py | 1 + plugins/modules/cloud/packet/packet_device.py | 1 + plugins/modules/cloud/packet/packet_sshkey.py | 1 + plugins/modules/cloud/profitbricks/profitbricks.py | 1 + plugins/modules/cloud/profitbricks/profitbricks_datacenter.py | 1 + plugins/modules/cloud/profitbricks/profitbricks_nic.py | 1 + plugins/modules/cloud/profitbricks/profitbricks_volume.py | 1 + .../cloud/profitbricks/profitbricks_volume_attachments.py | 1 + plugins/modules/cloud/pubnub/pubnub_blocks.py | 1 + plugins/modules/cloud/rackspace/rax.py | 1 + plugins/modules/cloud/rackspace/rax_cbs.py | 1 + plugins/modules/cloud/rackspace/rax_cbs_attachments.py | 1 + plugins/modules/cloud/rackspace/rax_cdb.py | 1 + plugins/modules/cloud/rackspace/rax_cdb_database.py | 1 + plugins/modules/cloud/rackspace/rax_cdb_user.py | 1 + plugins/modules/cloud/rackspace/rax_clb.py | 1 + plugins/modules/cloud/rackspace/rax_clb_nodes.py | 1 + plugins/modules/cloud/rackspace/rax_clb_ssl.py | 1 + plugins/modules/cloud/rackspace/rax_dns.py | 1 + plugins/modules/cloud/rackspace/rax_dns_record.py | 1 + plugins/modules/cloud/rackspace/rax_facts.py | 1 + plugins/modules/cloud/rackspace/rax_files.py | 1 + plugins/modules/cloud/rackspace/rax_files_objects.py | 1 + plugins/modules/cloud/rackspace/rax_identity.py | 1 + plugins/modules/cloud/rackspace/rax_keypair.py | 1 + plugins/modules/cloud/rackspace/rax_meta.py | 1 + plugins/modules/cloud/rackspace/rax_mon_alarm.py | 1 + plugins/modules/cloud/rackspace/rax_mon_check.py | 1 + plugins/modules/cloud/rackspace/rax_mon_entity.py | 1 + plugins/modules/cloud/rackspace/rax_mon_notification.py | 1 + plugins/modules/cloud/rackspace/rax_mon_notification_plan.py | 1 + plugins/modules/cloud/rackspace/rax_network.py | 1 + plugins/modules/cloud/rackspace/rax_queue.py | 1 + plugins/modules/cloud/rackspace/rax_scaling_group.py | 1 + plugins/modules/cloud/rackspace/rax_scaling_policy.py | 1 + plugins/modules/cloud/scaleway/scaleway_compute.py | 1 + plugins/modules/cloud/scaleway/scaleway_database_backup.py | 1 + plugins/modules/cloud/scaleway/scaleway_ip.py | 1 + plugins/modules/cloud/scaleway/scaleway_lb.py | 1 + plugins/modules/cloud/scaleway/scaleway_security_group.py | 1 + plugins/modules/cloud/scaleway/scaleway_security_group_rule.py | 1 + plugins/modules/cloud/scaleway/scaleway_sshkey.py | 1 + plugins/modules/cloud/scaleway/scaleway_user_data.py | 1 + plugins/modules/cloud/scaleway/scaleway_volume.py | 1 + plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py | 1 + plugins/modules/cloud/univention/udm_dns_record.py | 2 +- plugins/modules/cloud/univention/udm_dns_zone.py | 2 +- plugins/modules/cloud/univention/udm_group.py | 2 +- plugins/modules/cloud/univention/udm_share.py | 2 +- plugins/modules/cloud/univention/udm_user.py | 2 +- plugins/modules/clustering/consul/consul.py | 1 + plugins/modules/clustering/consul/consul_acl.py | 1 + plugins/modules/clustering/consul/consul_kv.py | 1 + plugins/modules/clustering/etcd3.py | 1 + plugins/modules/clustering/znode.py | 1 + plugins/modules/database/aerospike/aerospike_migrations.py | 1 + plugins/modules/database/influxdb/influxdb_database.py | 1 + plugins/modules/database/influxdb/influxdb_retention_policy.py | 1 + plugins/modules/database/influxdb/influxdb_user.py | 1 + plugins/modules/files/sapcar_extract.py | 1 + plugins/modules/files/xattr.py | 1 + plugins/modules/monitoring/sensu/sensu_client.py | 1 + plugins/modules/monitoring/sensu/sensu_handler.py | 1 + plugins/modules/net_tools/dnsimple.py | 1 + plugins/modules/net_tools/ip_netns.py | 1 + plugins/modules/net_tools/ipinfoio_facts.py | 2 +- plugins/modules/net_tools/ipwcli_dns.py | 1 + plugins/modules/net_tools/lldp.py | 1 + plugins/modules/net_tools/nios/nios_a_record.py | 1 + plugins/modules/net_tools/nios/nios_aaaa_record.py | 1 + plugins/modules/net_tools/nios/nios_cname_record.py | 1 + plugins/modules/net_tools/nios/nios_dns_view.py | 1 + plugins/modules/net_tools/nios/nios_fixed_address.py | 1 + plugins/modules/net_tools/nios/nios_host_record.py | 1 + plugins/modules/net_tools/nios/nios_member.py | 1 + plugins/modules/net_tools/nios/nios_mx_record.py | 1 + plugins/modules/net_tools/nios/nios_naptr_record.py | 1 + plugins/modules/net_tools/nios/nios_network.py | 1 + plugins/modules/net_tools/nios/nios_network_view.py | 1 + plugins/modules/net_tools/nios/nios_ptr_record.py | 1 + plugins/modules/net_tools/nios/nios_srv_record.py | 1 + plugins/modules/net_tools/nios/nios_txt_record.py | 1 + plugins/modules/net_tools/nios/nios_zone.py | 1 + plugins/modules/net_tools/nsupdate.py | 1 + plugins/modules/notification/syslogger.py | 1 + plugins/modules/packaging/language/pip_package_info.py | 1 + plugins/modules/packaging/os/redhat_subscription.py | 1 + plugins/modules/packaging/os/rhn_channel.py | 1 + plugins/modules/packaging/os/rhsm_release.py | 1 + plugins/modules/packaging/os/rhsm_repository.py | 1 + plugins/modules/packaging/os/rpm_ostree_pkg.py | 1 + plugins/modules/packaging/os/swupd.py | 1 + plugins/modules/packaging/os/zypper_repository.py | 2 +- plugins/modules/remote_management/lxca/lxca_cmms.py | 1 + plugins/modules/remote_management/lxca/lxca_nodes.py | 1 + plugins/modules/remote_management/manageiq/manageiq_group.py | 1 + plugins/modules/remote_management/manageiq/manageiq_tenant.py | 1 + plugins/modules/remote_management/manageiq/manageiq_user.py | 1 + .../remote_management/oneview/oneview_datacenter_info.py | 1 + .../remote_management/oneview/oneview_enclosure_info.py | 1 + .../remote_management/oneview/oneview_ethernet_network.py | 1 + .../remote_management/oneview/oneview_ethernet_network_info.py | 1 + .../modules/remote_management/oneview/oneview_fc_network.py | 1 + .../remote_management/oneview/oneview_fc_network_info.py | 1 + .../modules/remote_management/oneview/oneview_fcoe_network.py | 1 + .../remote_management/oneview/oneview_fcoe_network_info.py | 1 + .../oneview/oneview_logical_interconnect_group.py | 1 + .../oneview/oneview_logical_interconnect_group_info.py | 1 + .../modules/remote_management/oneview/oneview_network_set.py | 1 + .../remote_management/oneview/oneview_network_set_info.py | 1 + .../modules/remote_management/oneview/oneview_san_manager.py | 1 + .../remote_management/oneview/oneview_san_manager_info.py | 1 + plugins/modules/source_control/github/github_issue.py | 1 + plugins/modules/source_control/github/github_key.py | 1 + plugins/modules/source_control/github/github_webhook.py | 1 + plugins/modules/source_control/github/github_webhook_info.py | 1 + plugins/modules/storage/emc/emc_vnx_sg_member.py | 1 + plugins/modules/storage/hpe3par/ss_3par_cpg.py | 1 + plugins/modules/system/kernel_blacklist.py | 2 +- plugins/modules/system/lbu.py | 1 + plugins/modules/system/pids.py | 1 + plugins/modules/system/python_requirements_info.py | 1 + plugins/modules/system/selogin.py | 1 + plugins/modules/system/syspatch.py | 1 + plugins/modules/system/sysupgrade.py | 1 + plugins/modules/system/vdo.py | 1 + plugins/modules/web_infrastructure/jenkins_build.py | 1 + plugins/modules/web_infrastructure/jenkins_job.py | 1 + plugins/modules/web_infrastructure/jenkins_job_info.py | 1 + plugins/modules/web_infrastructure/jenkins_plugin.py | 2 +- plugins/modules/web_infrastructure/jenkins_script.py | 3 +-- plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group.py | 1 + .../web_infrastructure/sophos_utm/utm_aaa_group_info.py | 1 + .../web_infrastructure/sophos_utm/utm_ca_host_key_cert.py | 1 + .../web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py | 1 + plugins/modules/web_infrastructure/sophos_utm/utm_dns_host.py | 1 + .../sophos_utm/utm_network_interface_address.py | 1 + .../sophos_utm/utm_network_interface_address_info.py | 1 + .../web_infrastructure/sophos_utm/utm_proxy_auth_profile.py | 1 + .../web_infrastructure/sophos_utm/utm_proxy_exception.py | 1 + .../web_infrastructure/sophos_utm/utm_proxy_frontend.py | 1 + .../web_infrastructure/sophos_utm/utm_proxy_frontend_info.py | 1 + .../web_infrastructure/sophos_utm/utm_proxy_location.py | 1 + .../web_infrastructure/sophos_utm/utm_proxy_location_info.py | 1 + 199 files changed, 199 insertions(+), 12 deletions(-) diff --git a/plugins/lookup/nios_next_ip.py b/plugins/lookup/nios_next_ip.py index 58e95c7d13..8fdbbc6f99 100644 --- a/plugins/lookup/nios_next_ip.py +++ b/plugins/lookup/nios_next_ip.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # # Copyright 2018 Red Hat | Ansible # diff --git a/plugins/lookup/nios_next_network.py b/plugins/lookup/nios_next_network.py index c18c6ae993..a1c913320a 100644 --- a/plugins/lookup/nios_next_network.py +++ b/plugins/lookup/nios_next_network.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # # Copyright 2018 Red Hat | Ansible # diff --git a/plugins/module_utils/_mount.py b/plugins/module_utils/_mount.py index 58be0e8b68..391d468178 100644 --- a/plugins/module_utils/_mount.py +++ b/plugins/module_utils/_mount.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is based on # Lib/posixpath.py of cpython diff --git a/plugins/module_utils/_netapp.py b/plugins/module_utils/_netapp.py index 81a50a336d..126cc1bf16 100644 --- a/plugins/module_utils/_netapp.py +++ b/plugins/module_utils/_netapp.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible diff --git a/plugins/module_utils/alicloud_ecs.py b/plugins/module_utils/alicloud_ecs.py index 3c87c1ad76..d4d3bf76c9 100644 --- a/plugins/module_utils/alicloud_ecs.py +++ b/plugins/module_utils/alicloud_ecs.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible diff --git a/plugins/module_utils/cloud.py b/plugins/module_utils/cloud.py index d90d1f5234..7619023a3c 100644 --- a/plugins/module_utils/cloud.py +++ b/plugins/module_utils/cloud.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # # (c) 2016 Allen Sanabria, # diff --git a/plugins/module_utils/database.py b/plugins/module_utils/database.py index 67850308e0..825d3a2be9 100644 --- a/plugins/module_utils/database.py +++ b/plugins/module_utils/database.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible diff --git a/plugins/module_utils/heroku.py b/plugins/module_utils/heroku.py index b6e89614f1..70b144c077 100644 --- a/plugins/module_utils/heroku.py +++ b/plugins/module_utils/heroku.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Ansible Project # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) diff --git a/plugins/module_utils/hwc_utils.py b/plugins/module_utils/hwc_utils.py index c11cb7d4d2..489e90dd3c 100644 --- a/plugins/module_utils/hwc_utils.py +++ b/plugins/module_utils/hwc_utils.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Copyright (c), Google Inc, 2017 # Simplified BSD License (see licenses/simplified_bsd.txt or # https://opensource.org/licenses/BSD-2-Clause) diff --git a/plugins/module_utils/ibm_sa_utils.py b/plugins/module_utils/ibm_sa_utils.py index fdaa38a9fc..4f70f844cd 100644 --- a/plugins/module_utils/ibm_sa_utils.py +++ b/plugins/module_utils/ibm_sa_utils.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Copyright (C) 2018 IBM CORPORATION # Author(s): Tzur Eliyahu # diff --git a/plugins/module_utils/identity/keycloak/keycloak.py b/plugins/module_utils/identity/keycloak/keycloak.py index c782e3690c..d53a29ba10 100644 --- a/plugins/module_utils/identity/keycloak/keycloak.py +++ b/plugins/module_utils/identity/keycloak/keycloak.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Copyright (c) 2017, Eike Frost # # This code is part of Ansible, but is an independent component. diff --git a/plugins/module_utils/known_hosts.py b/plugins/module_utils/known_hosts.py index efd311eb51..ea6c95b6e2 100644 --- a/plugins/module_utils/known_hosts.py +++ b/plugins/module_utils/known_hosts.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible diff --git a/plugins/module_utils/linode.py b/plugins/module_utils/linode.py index 53d546dbe8..9d7c37e68d 100644 --- a/plugins/module_utils/linode.py +++ b/plugins/module_utils/linode.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible diff --git a/plugins/module_utils/manageiq.py b/plugins/module_utils/manageiq.py index 7038fac88a..98e5590cc6 100644 --- a/plugins/module_utils/manageiq.py +++ b/plugins/module_utils/manageiq.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # # Copyright (c) 2017, Daniel Korn # diff --git a/plugins/module_utils/memset.py b/plugins/module_utils/memset.py index 357fded58e..7813290a72 100644 --- a/plugins/module_utils/memset.py +++ b/plugins/module_utils/memset.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible diff --git a/plugins/module_utils/net_tools/nios/api.py b/plugins/module_utils/net_tools/nios/api.py index cbb8b63f3b..babda7659a 100644 --- a/plugins/module_utils/net_tools/nios/api.py +++ b/plugins/module_utils/net_tools/nios/api.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible diff --git a/plugins/module_utils/oneandone.py b/plugins/module_utils/oneandone.py index 466d2665fa..5f65b670f3 100644 --- a/plugins/module_utils/oneandone.py +++ b/plugins/module_utils/oneandone.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible diff --git a/plugins/module_utils/oneview.py b/plugins/module_utils/oneview.py index 66e1d6d4c7..6d786b0b80 100644 --- a/plugins/module_utils/oneview.py +++ b/plugins/module_utils/oneview.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible diff --git a/plugins/module_utils/online.py b/plugins/module_utils/online.py index 464e454288..c0294abb79 100644 --- a/plugins/module_utils/online.py +++ b/plugins/module_utils/online.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) from __future__ import (absolute_import, division, print_function) diff --git a/plugins/module_utils/opennebula.py b/plugins/module_utils/opennebula.py index a0a8d1305b..c896a9c6fa 100644 --- a/plugins/module_utils/opennebula.py +++ b/plugins/module_utils/opennebula.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # # Copyright 2018 www.privaz.io Valletech AB # diff --git a/plugins/module_utils/oracle/oci_utils.py b/plugins/module_utils/oracle/oci_utils.py index 0b82dadf0e..88e577af5c 100644 --- a/plugins/module_utils/oracle/oci_utils.py +++ b/plugins/module_utils/oracle/oci_utils.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Copyright (c) 2017, 2018, 2019 Oracle and/or its affiliates. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/module_utils/rax.py b/plugins/module_utils/rax.py index e8c455e0e9..84effee97c 100644 --- a/plugins/module_utils/rax.py +++ b/plugins/module_utils/rax.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index 94e2c4b7d8..c861820edf 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Copyright (c) 2017-2018 Dell EMC Inc. # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/module_utils/redhat.py b/plugins/module_utils/redhat.py index 0fb6e9b1f3..85f4a6aab2 100644 --- a/plugins/module_utils/redhat.py +++ b/plugins/module_utils/redhat.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible diff --git a/plugins/module_utils/remote_management/lxca/common.py b/plugins/module_utils/remote_management/lxca/common.py index 297397e30d..07092b9642 100644 --- a/plugins/module_utils/remote_management/lxca/common.py +++ b/plugins/module_utils/remote_management/lxca/common.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by diff --git a/plugins/module_utils/scaleway.py b/plugins/module_utils/scaleway.py index 3c73e92bb8..d714fd69e8 100644 --- a/plugins/module_utils/scaleway.py +++ b/plugins/module_utils/scaleway.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from __future__ import (absolute_import, division, print_function) __metaclass__ = type diff --git a/plugins/module_utils/storage/emc/emc_vnx.py b/plugins/module_utils/storage/emc/emc_vnx.py index afb1b6979c..5922512676 100644 --- a/plugins/module_utils/storage/emc/emc_vnx.py +++ b/plugins/module_utils/storage/emc/emc_vnx.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible diff --git a/plugins/module_utils/storage/hpe3par/hpe3par.py b/plugins/module_utils/storage/hpe3par/hpe3par.py index 47868a4b8c..b7734444dd 100644 --- a/plugins/module_utils/storage/hpe3par/hpe3par.py +++ b/plugins/module_utils/storage/hpe3par/hpe3par.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Hewlett Packard Enterprise Development LP # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) diff --git a/plugins/module_utils/univention_umc.py b/plugins/module_utils/univention_umc.py index c1d8b77749..a44a0052a9 100644 --- a/plugins/module_utils/univention_umc.py +++ b/plugins/module_utils/univention_umc.py @@ -1,4 +1,4 @@ -# -*- coding: UTF-8 -*- +# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. diff --git a/plugins/module_utils/utm_utils.py b/plugins/module_utils/utm_utils.py index fd196dcbca..7e6ff3093e 100644 --- a/plugins/module_utils/utm_utils.py +++ b/plugins/module_utils/utm_utils.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible diff --git a/plugins/modules/cloud/centurylink/clc_aa_policy.py b/plugins/modules/cloud/centurylink/clc_aa_policy.py index 88c27e20f5..1d52cca7c5 100644 --- a/plugins/modules/cloud/centurylink/clc_aa_policy.py +++ b/plugins/modules/cloud/centurylink/clc_aa_policy.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright (c) 2015 CenturyLink # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/centurylink/clc_alert_policy.py b/plugins/modules/cloud/centurylink/clc_alert_policy.py index 374f1cada1..de9d146dc4 100644 --- a/plugins/modules/cloud/centurylink/clc_alert_policy.py +++ b/plugins/modules/cloud/centurylink/clc_alert_policy.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright (c) 2015 CenturyLink diff --git a/plugins/modules/cloud/centurylink/clc_blueprint_package.py b/plugins/modules/cloud/centurylink/clc_blueprint_package.py index 4071b67c7c..bd0e868fa3 100644 --- a/plugins/modules/cloud/centurylink/clc_blueprint_package.py +++ b/plugins/modules/cloud/centurylink/clc_blueprint_package.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright (c) 2015 CenturyLink # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/centurylink/clc_firewall_policy.py b/plugins/modules/cloud/centurylink/clc_firewall_policy.py index ad26dc92f7..a8f8a4e5f0 100644 --- a/plugins/modules/cloud/centurylink/clc_firewall_policy.py +++ b/plugins/modules/cloud/centurylink/clc_firewall_policy.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright (c) 2015 CenturyLink # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/centurylink/clc_group.py b/plugins/modules/cloud/centurylink/clc_group.py index a80cc400e9..e1c05c6c0c 100644 --- a/plugins/modules/cloud/centurylink/clc_group.py +++ b/plugins/modules/cloud/centurylink/clc_group.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright (c) 2015 CenturyLink diff --git a/plugins/modules/cloud/centurylink/clc_loadbalancer.py b/plugins/modules/cloud/centurylink/clc_loadbalancer.py index 400a8b9c3f..950e087976 100644 --- a/plugins/modules/cloud/centurylink/clc_loadbalancer.py +++ b/plugins/modules/cloud/centurylink/clc_loadbalancer.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright (c) 2015 CenturyLink # diff --git a/plugins/modules/cloud/centurylink/clc_modify_server.py b/plugins/modules/cloud/centurylink/clc_modify_server.py index c0730a9c2b..90a368867e 100644 --- a/plugins/modules/cloud/centurylink/clc_modify_server.py +++ b/plugins/modules/cloud/centurylink/clc_modify_server.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright (c) 2015 CenturyLink # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/centurylink/clc_publicip.py b/plugins/modules/cloud/centurylink/clc_publicip.py index 8b5ac4cb4e..1cdb4aa8db 100644 --- a/plugins/modules/cloud/centurylink/clc_publicip.py +++ b/plugins/modules/cloud/centurylink/clc_publicip.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright (c) 2015 CenturyLink # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/centurylink/clc_server.py b/plugins/modules/cloud/centurylink/clc_server.py index 4e02421892..95481f1a52 100644 --- a/plugins/modules/cloud/centurylink/clc_server.py +++ b/plugins/modules/cloud/centurylink/clc_server.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright (c) 2015 CenturyLink # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/centurylink/clc_server_snapshot.py b/plugins/modules/cloud/centurylink/clc_server_snapshot.py index 1c706b07a4..1f92def088 100644 --- a/plugins/modules/cloud/centurylink/clc_server_snapshot.py +++ b/plugins/modules/cloud/centurylink/clc_server_snapshot.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright (c) 2015 CenturyLink # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/heroku/heroku_collaborator.py b/plugins/modules/cloud/heroku/heroku_collaborator.py index a326894dce..bbc34fdb30 100644 --- a/plugins/modules/cloud/heroku/heroku_collaborator.py +++ b/plugins/modules/cloud/heroku/heroku_collaborator.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/linode/linode.py b/plugins/modules/cloud/linode/linode.py index c9ee0e61ed..c627fb705a 100644 --- a/plugins/modules/cloud/linode/linode.py +++ b/plugins/modules/cloud/linode/linode.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/misc/proxmox.py b/plugins/modules/cloud/misc/proxmox.py index 21817f10dc..c777564186 100644 --- a/plugins/modules/cloud/misc/proxmox.py +++ b/plugins/modules/cloud/misc/proxmox.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/misc/proxmox_template.py b/plugins/modules/cloud/misc/proxmox_template.py index d7fb9341e6..bee2583908 100644 --- a/plugins/modules/cloud/misc/proxmox_template.py +++ b/plugins/modules/cloud/misc/proxmox_template.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright: Ansible Project # diff --git a/plugins/modules/cloud/misc/xenserver_facts.py b/plugins/modules/cloud/misc/xenserver_facts.py index bc01c56ecb..f65e3c9a86 100644 --- a/plugins/modules/cloud/misc/xenserver_facts.py +++ b/plugins/modules/cloud/misc/xenserver_facts.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py b/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py index 32e42ea865..d46ce38897 100644 --- a/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py +++ b/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify diff --git a/plugins/modules/cloud/oneandone/oneandone_load_balancer.py b/plugins/modules/cloud/oneandone/oneandone_load_balancer.py index 71f1d96b9c..5f541a878c 100644 --- a/plugins/modules/cloud/oneandone/oneandone_load_balancer.py +++ b/plugins/modules/cloud/oneandone/oneandone_load_balancer.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify diff --git a/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py b/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py index 67f2ce9cc0..28dd0d41c5 100644 --- a/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py +++ b/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify diff --git a/plugins/modules/cloud/oneandone/oneandone_private_network.py b/plugins/modules/cloud/oneandone/oneandone_private_network.py index edbdc9f8ce..6a16cf683e 100644 --- a/plugins/modules/cloud/oneandone/oneandone_private_network.py +++ b/plugins/modules/cloud/oneandone/oneandone_private_network.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify diff --git a/plugins/modules/cloud/oneandone/oneandone_public_ip.py b/plugins/modules/cloud/oneandone/oneandone_public_ip.py index edefbc938f..96b1c9f3a5 100644 --- a/plugins/modules/cloud/oneandone/oneandone_public_ip.py +++ b/plugins/modules/cloud/oneandone/oneandone_public_ip.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify diff --git a/plugins/modules/cloud/oneandone/oneandone_server.py b/plugins/modules/cloud/oneandone/oneandone_server.py index 9eaf943be7..aa651bd75f 100644 --- a/plugins/modules/cloud/oneandone/oneandone_server.py +++ b/plugins/modules/cloud/oneandone/oneandone_server.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify diff --git a/plugins/modules/cloud/opennebula/one_host.py b/plugins/modules/cloud/opennebula/one_host.py index 714d2d86a9..f205a40a2c 100644 --- a/plugins/modules/cloud/opennebula/one_host.py +++ b/plugins/modules/cloud/opennebula/one_host.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright 2018 www.privaz.io Valletech AB # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/opennebula/one_template.py b/plugins/modules/cloud/opennebula/one_template.py index b4c8a2fa83..3b0b601193 100644 --- a/plugins/modules/cloud/opennebula/one_template.py +++ b/plugins/modules/cloud/opennebula/one_template.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright: (c) 2021, Georg Gadinger # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/oracle/oci_vcn.py b/plugins/modules/cloud/oracle/oci_vcn.py index e2906357ae..a82914bdea 100644 --- a/plugins/modules/cloud/oracle/oci_vcn.py +++ b/plugins/modules/cloud/oracle/oci_vcn.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2017, 2018, Oracle and/or its affiliates. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/ovh/ovh_ip_failover.py b/plugins/modules/cloud/ovh/ovh_ip_failover.py index 545c40fff7..26179eb8f7 100644 --- a/plugins/modules/cloud/ovh/ovh_ip_failover.py +++ b/plugins/modules/cloud/ovh/ovh_ip_failover.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py b/plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py index 965a499c6e..28d6f3a129 100644 --- a/plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py +++ b/plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/packet/packet_device.py b/plugins/modules/cloud/packet/packet_device.py index f939572656..5cc8d13e9a 100644 --- a/plugins/modules/cloud/packet/packet_device.py +++ b/plugins/modules/cloud/packet/packet_device.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # (c) 2016, Tomas Karasek # (c) 2016, Matt Baldwin # (c) 2016, Thibaud Morel l'Horset diff --git a/plugins/modules/cloud/packet/packet_sshkey.py b/plugins/modules/cloud/packet/packet_sshkey.py index 97589cddb9..57e988630e 100644 --- a/plugins/modules/cloud/packet/packet_sshkey.py +++ b/plugins/modules/cloud/packet/packet_sshkey.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright 2016 Tomas Karasek # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/profitbricks/profitbricks.py b/plugins/modules/cloud/profitbricks/profitbricks.py index 4c24d6408f..3a75778a08 100644 --- a/plugins/modules/cloud/profitbricks/profitbricks.py +++ b/plugins/modules/cloud/profitbricks/profitbricks.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/profitbricks/profitbricks_datacenter.py b/plugins/modules/cloud/profitbricks/profitbricks_datacenter.py index e3ba1d4950..7897ffdeb9 100644 --- a/plugins/modules/cloud/profitbricks/profitbricks_datacenter.py +++ b/plugins/modules/cloud/profitbricks/profitbricks_datacenter.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/profitbricks/profitbricks_nic.py b/plugins/modules/cloud/profitbricks/profitbricks_nic.py index 49941241c6..5d98e05e4b 100644 --- a/plugins/modules/cloud/profitbricks/profitbricks_nic.py +++ b/plugins/modules/cloud/profitbricks/profitbricks_nic.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/profitbricks/profitbricks_volume.py b/plugins/modules/cloud/profitbricks/profitbricks_volume.py index 5fff01d3d7..be1c18b55a 100644 --- a/plugins/modules/cloud/profitbricks/profitbricks_volume.py +++ b/plugins/modules/cloud/profitbricks/profitbricks_volume.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/profitbricks/profitbricks_volume_attachments.py b/plugins/modules/cloud/profitbricks/profitbricks_volume_attachments.py index 72f03e674a..1fb3f3c0e2 100644 --- a/plugins/modules/cloud/profitbricks/profitbricks_volume_attachments.py +++ b/plugins/modules/cloud/profitbricks/profitbricks_volume_attachments.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/pubnub/pubnub_blocks.py b/plugins/modules/cloud/pubnub/pubnub_blocks.py index c8de702597..d3b76337a3 100644 --- a/plugins/modules/cloud/pubnub/pubnub_blocks.py +++ b/plugins/modules/cloud/pubnub/pubnub_blocks.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # PubNub Real-time Cloud-Hosted Push API and Push Notification Client # Frameworks diff --git a/plugins/modules/cloud/rackspace/rax.py b/plugins/modules/cloud/rackspace/rax.py index cbaa0a57d2..8c452d9d72 100644 --- a/plugins/modules/cloud/rackspace/rax.py +++ b/plugins/modules/cloud/rackspace/rax.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_cbs.py b/plugins/modules/cloud/rackspace/rax_cbs.py index a681feff84..b543f5979a 100644 --- a/plugins/modules/cloud/rackspace/rax_cbs.py +++ b/plugins/modules/cloud/rackspace/rax_cbs.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_cbs_attachments.py b/plugins/modules/cloud/rackspace/rax_cbs_attachments.py index 71d01620d4..fd21081475 100644 --- a/plugins/modules/cloud/rackspace/rax_cbs_attachments.py +++ b/plugins/modules/cloud/rackspace/rax_cbs_attachments.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_cdb.py b/plugins/modules/cloud/rackspace/rax_cdb.py index 5b9996cd21..04bbe71cda 100644 --- a/plugins/modules/cloud/rackspace/rax_cdb.py +++ b/plugins/modules/cloud/rackspace/rax_cdb.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_cdb_database.py b/plugins/modules/cloud/rackspace/rax_cdb_database.py index 6d3435e806..86cd1aac40 100644 --- a/plugins/modules/cloud/rackspace/rax_cdb_database.py +++ b/plugins/modules/cloud/rackspace/rax_cdb_database.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_cdb_user.py b/plugins/modules/cloud/rackspace/rax_cdb_user.py index 01c10950c4..674f17c070 100644 --- a/plugins/modules/cloud/rackspace/rax_cdb_user.py +++ b/plugins/modules/cloud/rackspace/rax_cdb_user.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_clb.py b/plugins/modules/cloud/rackspace/rax_clb.py index 5ff1e3140f..9160133e21 100644 --- a/plugins/modules/cloud/rackspace/rax_clb.py +++ b/plugins/modules/cloud/rackspace/rax_clb.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_clb_nodes.py b/plugins/modules/cloud/rackspace/rax_clb_nodes.py index c066ab66db..4adcc66fb7 100644 --- a/plugins/modules/cloud/rackspace/rax_clb_nodes.py +++ b/plugins/modules/cloud/rackspace/rax_clb_nodes.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_clb_ssl.py b/plugins/modules/cloud/rackspace/rax_clb_ssl.py index 114128e8b1..adf375124d 100644 --- a/plugins/modules/cloud/rackspace/rax_clb_ssl.py +++ b/plugins/modules/cloud/rackspace/rax_clb_ssl.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_dns.py b/plugins/modules/cloud/rackspace/rax_dns.py index e9b7e2be95..915e13a9a6 100644 --- a/plugins/modules/cloud/rackspace/rax_dns.py +++ b/plugins/modules/cloud/rackspace/rax_dns.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_dns_record.py b/plugins/modules/cloud/rackspace/rax_dns_record.py index 0b60120a75..1a6986dea7 100644 --- a/plugins/modules/cloud/rackspace/rax_dns_record.py +++ b/plugins/modules/cloud/rackspace/rax_dns_record.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_facts.py b/plugins/modules/cloud/rackspace/rax_facts.py index f9fd89556f..0288a5e35b 100644 --- a/plugins/modules/cloud/rackspace/rax_facts.py +++ b/plugins/modules/cloud/rackspace/rax_facts.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_files.py b/plugins/modules/cloud/rackspace/rax_files.py index 7080cc2f84..1e1f82c85d 100644 --- a/plugins/modules/cloud/rackspace/rax_files.py +++ b/plugins/modules/cloud/rackspace/rax_files.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # (c) 2013, Paul Durivage # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_files_objects.py b/plugins/modules/cloud/rackspace/rax_files_objects.py index dc44555417..3269fe0512 100644 --- a/plugins/modules/cloud/rackspace/rax_files_objects.py +++ b/plugins/modules/cloud/rackspace/rax_files_objects.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # (c) 2013, Paul Durivage # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_identity.py b/plugins/modules/cloud/rackspace/rax_identity.py index 330c510d09..2021052faa 100644 --- a/plugins/modules/cloud/rackspace/rax_identity.py +++ b/plugins/modules/cloud/rackspace/rax_identity.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_keypair.py b/plugins/modules/cloud/rackspace/rax_keypair.py index 0314883f60..90b0183e50 100644 --- a/plugins/modules/cloud/rackspace/rax_keypair.py +++ b/plugins/modules/cloud/rackspace/rax_keypair.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_meta.py b/plugins/modules/cloud/rackspace/rax_meta.py index b7d172d93f..3504181f19 100644 --- a/plugins/modules/cloud/rackspace/rax_meta.py +++ b/plugins/modules/cloud/rackspace/rax_meta.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_mon_alarm.py b/plugins/modules/cloud/rackspace/rax_mon_alarm.py index 8de26609db..7e99db3fa8 100644 --- a/plugins/modules/cloud/rackspace/rax_mon_alarm.py +++ b/plugins/modules/cloud/rackspace/rax_mon_alarm.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_mon_check.py b/plugins/modules/cloud/rackspace/rax_mon_check.py index e04dfc7444..17a3932f6e 100644 --- a/plugins/modules/cloud/rackspace/rax_mon_check.py +++ b/plugins/modules/cloud/rackspace/rax_mon_check.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_mon_entity.py b/plugins/modules/cloud/rackspace/rax_mon_entity.py index 69f49cd07b..2f8cdeefd8 100644 --- a/plugins/modules/cloud/rackspace/rax_mon_entity.py +++ b/plugins/modules/cloud/rackspace/rax_mon_entity.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_mon_notification.py b/plugins/modules/cloud/rackspace/rax_mon_notification.py index 416d03bae8..fb645c3036 100644 --- a/plugins/modules/cloud/rackspace/rax_mon_notification.py +++ b/plugins/modules/cloud/rackspace/rax_mon_notification.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py b/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py index d5294cd509..25e506829f 100644 --- a/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py +++ b/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_network.py b/plugins/modules/cloud/rackspace/rax_network.py index 27a793b5a1..146c08c8e1 100644 --- a/plugins/modules/cloud/rackspace/rax_network.py +++ b/plugins/modules/cloud/rackspace/rax_network.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_queue.py b/plugins/modules/cloud/rackspace/rax_queue.py index dca006da77..46c942c70d 100644 --- a/plugins/modules/cloud/rackspace/rax_queue.py +++ b/plugins/modules/cloud/rackspace/rax_queue.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_scaling_group.py b/plugins/modules/cloud/rackspace/rax_scaling_group.py index 2f8fa0a2cc..4080e4c6a4 100644 --- a/plugins/modules/cloud/rackspace/rax_scaling_group.py +++ b/plugins/modules/cloud/rackspace/rax_scaling_group.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/rackspace/rax_scaling_policy.py b/plugins/modules/cloud/rackspace/rax_scaling_policy.py index 384825f0ee..be46bd62a6 100644 --- a/plugins/modules/cloud/rackspace/rax_scaling_policy.py +++ b/plugins/modules/cloud/rackspace/rax_scaling_policy.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/cloud/scaleway/scaleway_compute.py b/plugins/modules/cloud/scaleway/scaleway_compute.py index 421157a425..c5d5af9177 100644 --- a/plugins/modules/cloud/scaleway/scaleway_compute.py +++ b/plugins/modules/cloud/scaleway/scaleway_compute.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Scaleway Compute management module # diff --git a/plugins/modules/cloud/scaleway/scaleway_database_backup.py b/plugins/modules/cloud/scaleway/scaleway_database_backup.py index 578032458d..35f35f820a 100644 --- a/plugins/modules/cloud/scaleway/scaleway_database_backup.py +++ b/plugins/modules/cloud/scaleway/scaleway_database_backup.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Scaleway database backups management module # diff --git a/plugins/modules/cloud/scaleway/scaleway_ip.py b/plugins/modules/cloud/scaleway/scaleway_ip.py index 26da122e31..135da120cf 100644 --- a/plugins/modules/cloud/scaleway/scaleway_ip.py +++ b/plugins/modules/cloud/scaleway/scaleway_ip.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Scaleway IP management module # diff --git a/plugins/modules/cloud/scaleway/scaleway_lb.py b/plugins/modules/cloud/scaleway/scaleway_lb.py index f19c0a3c43..9761500ab9 100644 --- a/plugins/modules/cloud/scaleway/scaleway_lb.py +++ b/plugins/modules/cloud/scaleway/scaleway_lb.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Scaleway Load-balancer management module # diff --git a/plugins/modules/cloud/scaleway/scaleway_security_group.py b/plugins/modules/cloud/scaleway/scaleway_security_group.py index 9303e06e00..f9faee6104 100644 --- a/plugins/modules/cloud/scaleway/scaleway_security_group.py +++ b/plugins/modules/cloud/scaleway/scaleway_security_group.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Scaleway Security Group management module # diff --git a/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py b/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py index 118883328a..9f95921202 100644 --- a/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py +++ b/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Scaleway Security Group Rule management module # diff --git a/plugins/modules/cloud/scaleway/scaleway_sshkey.py b/plugins/modules/cloud/scaleway/scaleway_sshkey.py index 08555b2316..bc15cefb20 100644 --- a/plugins/modules/cloud/scaleway/scaleway_sshkey.py +++ b/plugins/modules/cloud/scaleway/scaleway_sshkey.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Scaleway SSH keys management module # diff --git a/plugins/modules/cloud/scaleway/scaleway_user_data.py b/plugins/modules/cloud/scaleway/scaleway_user_data.py index 4a38e76d72..d51d3e174d 100644 --- a/plugins/modules/cloud/scaleway/scaleway_user_data.py +++ b/plugins/modules/cloud/scaleway/scaleway_user_data.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Scaleway user data management module # diff --git a/plugins/modules/cloud/scaleway/scaleway_volume.py b/plugins/modules/cloud/scaleway/scaleway_volume.py index e879d3c95c..a49e23c17d 100644 --- a/plugins/modules/cloud/scaleway/scaleway_volume.py +++ b/plugins/modules/cloud/scaleway/scaleway_volume.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Scaleway volumes management module # diff --git a/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py b/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py index 5ed8028e37..da8f010229 100644 --- a/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py +++ b/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) diff --git a/plugins/modules/cloud/univention/udm_dns_record.py b/plugins/modules/cloud/univention/udm_dns_record.py index 90654bee3c..0c56970dd3 100644 --- a/plugins/modules/cloud/univention/udm_dns_record.py +++ b/plugins/modules/cloud/univention/udm_dns_record.py @@ -1,5 +1,5 @@ #!/usr/bin/python -# -*- coding: UTF-8 -*- +# -*- coding: utf-8 -*- # Copyright: (c) 2016, Adfinis SyGroup AG # Tobias Rueetschi diff --git a/plugins/modules/cloud/univention/udm_dns_zone.py b/plugins/modules/cloud/univention/udm_dns_zone.py index 3e0cae523d..f1cea87e4f 100644 --- a/plugins/modules/cloud/univention/udm_dns_zone.py +++ b/plugins/modules/cloud/univention/udm_dns_zone.py @@ -1,5 +1,5 @@ #!/usr/bin/python -# -*- coding: UTF-8 -*- +# -*- coding: utf-8 -*- # Copyright: (c) 2016, Adfinis SyGroup AG # Tobias Rueetschi diff --git a/plugins/modules/cloud/univention/udm_group.py b/plugins/modules/cloud/univention/udm_group.py index d2cf2aea80..d20187c628 100644 --- a/plugins/modules/cloud/univention/udm_group.py +++ b/plugins/modules/cloud/univention/udm_group.py @@ -1,5 +1,5 @@ #!/usr/bin/python -# -*- coding: UTF-8 -*- +# -*- coding: utf-8 -*- # Copyright: (c) 2016, Adfinis SyGroup AG # Tobias Rueetschi diff --git a/plugins/modules/cloud/univention/udm_share.py b/plugins/modules/cloud/univention/udm_share.py index 3e8fb20792..fb86d83666 100644 --- a/plugins/modules/cloud/univention/udm_share.py +++ b/plugins/modules/cloud/univention/udm_share.py @@ -1,5 +1,5 @@ #!/usr/bin/python -# -*- coding: UTF-8 -*- +# -*- coding: utf-8 -*- # Copyright: (c) 2016, Adfinis SyGroup AG # Tobias Rueetschi diff --git a/plugins/modules/cloud/univention/udm_user.py b/plugins/modules/cloud/univention/udm_user.py index efbd95f426..b0d6138fda 100644 --- a/plugins/modules/cloud/univention/udm_user.py +++ b/plugins/modules/cloud/univention/udm_user.py @@ -1,5 +1,5 @@ #!/usr/bin/python -# -*- coding: UTF-8 -*- +# -*- coding: utf-8 -*- # Copyright: (c) 2016, Adfinis SyGroup AG # Tobias Rueetschi diff --git a/plugins/modules/clustering/consul/consul.py b/plugins/modules/clustering/consul/consul.py index cd695c4754..f85e1cc729 100644 --- a/plugins/modules/clustering/consul/consul.py +++ b/plugins/modules/clustering/consul/consul.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # (c) 2015, Steve Gargan # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/clustering/consul/consul_acl.py b/plugins/modules/clustering/consul/consul_acl.py index 5a37ca0eb9..1e01e58af5 100644 --- a/plugins/modules/clustering/consul/consul_acl.py +++ b/plugins/modules/clustering/consul/consul_acl.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # (c) 2015, Steve Gargan # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/clustering/consul/consul_kv.py b/plugins/modules/clustering/consul/consul_kv.py index d392228146..f7b33b856e 100644 --- a/plugins/modules/clustering/consul/consul_kv.py +++ b/plugins/modules/clustering/consul/consul_kv.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # (c) 2015, Steve Gargan # (c) 2018 Genome Research Ltd. diff --git a/plugins/modules/clustering/etcd3.py b/plugins/modules/clustering/etcd3.py index 28c5915693..6a09513364 100644 --- a/plugins/modules/clustering/etcd3.py +++ b/plugins/modules/clustering/etcd3.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # (c) 2018, Jean-Philippe Evrard # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/clustering/znode.py b/plugins/modules/clustering/znode.py index 8456a187ee..d55a502b15 100644 --- a/plugins/modules/clustering/znode.py +++ b/plugins/modules/clustering/znode.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright 2015 WP Engine, Inc. All rights reserved. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/database/aerospike/aerospike_migrations.py b/plugins/modules/database/aerospike/aerospike_migrations.py index 33f27cd381..27b979ad1f 100644 --- a/plugins/modules/database/aerospike/aerospike_migrations.py +++ b/plugins/modules/database/aerospike/aerospike_migrations.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- """short_description: Check or wait for migrations between nodes""" # Copyright: (c) 2018, Albert Autin diff --git a/plugins/modules/database/influxdb/influxdb_database.py b/plugins/modules/database/influxdb/influxdb_database.py index 7b798c3679..6601b30124 100644 --- a/plugins/modules/database/influxdb/influxdb_database.py +++ b/plugins/modules/database/influxdb/influxdb_database.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2016, Kamil Szczygiel # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/database/influxdb/influxdb_retention_policy.py b/plugins/modules/database/influxdb/influxdb_retention_policy.py index a145f9e32b..6cb45229cd 100644 --- a/plugins/modules/database/influxdb/influxdb_retention_policy.py +++ b/plugins/modules/database/influxdb/influxdb_retention_policy.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2016, Kamil Szczygiel # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/database/influxdb/influxdb_user.py b/plugins/modules/database/influxdb/influxdb_user.py index 8746445335..76524d8613 100644 --- a/plugins/modules/database/influxdb/influxdb_user.py +++ b/plugins/modules/database/influxdb/influxdb_user.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2017, Vitaliy Zhhuta # insipred by Kamil Szczygiel influxdb_database module diff --git a/plugins/modules/files/sapcar_extract.py b/plugins/modules/files/sapcar_extract.py index b6a76a1629..8463703c1e 100644 --- a/plugins/modules/files/sapcar_extract.py +++ b/plugins/modules/files/sapcar_extract.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2021, Rainer Leber # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/files/xattr.py b/plugins/modules/files/xattr.py index 8578ed4c4e..f862dd720b 100644 --- a/plugins/modules/files/xattr.py +++ b/plugins/modules/files/xattr.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/monitoring/sensu/sensu_client.py b/plugins/modules/monitoring/sensu/sensu_client.py index ee67a6e75b..886c398e09 100644 --- a/plugins/modules/monitoring/sensu/sensu_client.py +++ b/plugins/modules/monitoring/sensu/sensu_client.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # (c) 2017, Red Hat Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/monitoring/sensu/sensu_handler.py b/plugins/modules/monitoring/sensu/sensu_handler.py index 0a56831ae0..6511479899 100644 --- a/plugins/modules/monitoring/sensu/sensu_handler.py +++ b/plugins/modules/monitoring/sensu/sensu_handler.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # (c) 2017, Red Hat Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/dnsimple.py b/plugins/modules/net_tools/dnsimple.py index a575d944cb..188f9fd64a 100644 --- a/plugins/modules/net_tools/dnsimple.py +++ b/plugins/modules/net_tools/dnsimple.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright: Ansible Project # diff --git a/plugins/modules/net_tools/ip_netns.py b/plugins/modules/net_tools/ip_netns.py index 9854709e82..700f0a17bd 100644 --- a/plugins/modules/net_tools/ip_netns.py +++ b/plugins/modules/net_tools/ip_netns.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # (c) 2017, Arie Bregman # # This file is a module for Ansible that interacts with Network Manager diff --git a/plugins/modules/net_tools/ipinfoio_facts.py b/plugins/modules/net_tools/ipinfoio_facts.py index f4186cdc65..ee1d49f3ac 100644 --- a/plugins/modules/net_tools/ipinfoio_facts.py +++ b/plugins/modules/net_tools/ipinfoio_facts.py @@ -1,5 +1,5 @@ #!/usr/bin/python -# -*- coding: UTF-8 -*- +# -*- coding: utf-8 -*- # Copyright: (c) 2016, Aleksei Kostiuk # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/ipwcli_dns.py b/plugins/modules/net_tools/ipwcli_dns.py index 284f3ad810..8a6122edff 100644 --- a/plugins/modules/net_tools/ipwcli_dns.py +++ b/plugins/modules/net_tools/ipwcli_dns.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2020, Christian Wollinger # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/lldp.py b/plugins/modules/net_tools/lldp.py index ae86db4088..1b8fa9eb06 100644 --- a/plugins/modules/net_tools/lldp.py +++ b/plugins/modules/net_tools/lldp.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/nios/nios_a_record.py b/plugins/modules/net_tools/nios/nios_a_record.py index b4adfe0103..cc2e70b920 100644 --- a/plugins/modules/net_tools/nios/nios_a_record.py +++ b/plugins/modules/net_tools/nios/nios_a_record.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2018 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/nios/nios_aaaa_record.py b/plugins/modules/net_tools/nios/nios_aaaa_record.py index 9b22f86948..b6e5ff5fd6 100644 --- a/plugins/modules/net_tools/nios/nios_aaaa_record.py +++ b/plugins/modules/net_tools/nios/nios_aaaa_record.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2018 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/nios/nios_cname_record.py b/plugins/modules/net_tools/nios/nios_cname_record.py index 099cb02572..c752713663 100644 --- a/plugins/modules/net_tools/nios/nios_cname_record.py +++ b/plugins/modules/net_tools/nios/nios_cname_record.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2018 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/nios/nios_dns_view.py b/plugins/modules/net_tools/nios/nios_dns_view.py index 46c56fc7bb..a3bd9db938 100644 --- a/plugins/modules/net_tools/nios/nios_dns_view.py +++ b/plugins/modules/net_tools/nios/nios_dns_view.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2018 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/nios/nios_fixed_address.py b/plugins/modules/net_tools/nios/nios_fixed_address.py index bc2969bbe5..26e3ed7d68 100644 --- a/plugins/modules/net_tools/nios/nios_fixed_address.py +++ b/plugins/modules/net_tools/nios/nios_fixed_address.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2018 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/nios/nios_host_record.py b/plugins/modules/net_tools/nios/nios_host_record.py index 6fed663657..825ff31765 100644 --- a/plugins/modules/net_tools/nios/nios_host_record.py +++ b/plugins/modules/net_tools/nios/nios_host_record.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2018 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/nios/nios_member.py b/plugins/modules/net_tools/nios/nios_member.py index 186933864a..ff9bd5dfa5 100644 --- a/plugins/modules/net_tools/nios/nios_member.py +++ b/plugins/modules/net_tools/nios/nios_member.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2018 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/nios/nios_mx_record.py b/plugins/modules/net_tools/nios/nios_mx_record.py index 6e54ff2bda..a34a1fdc78 100644 --- a/plugins/modules/net_tools/nios/nios_mx_record.py +++ b/plugins/modules/net_tools/nios/nios_mx_record.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2018 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/nios/nios_naptr_record.py b/plugins/modules/net_tools/nios/nios_naptr_record.py index f943d3d6d9..e2e5e164d7 100644 --- a/plugins/modules/net_tools/nios/nios_naptr_record.py +++ b/plugins/modules/net_tools/nios/nios_naptr_record.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2018 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/nios/nios_network.py b/plugins/modules/net_tools/nios/nios_network.py index 6a7decb894..458e45dd8d 100644 --- a/plugins/modules/net_tools/nios/nios_network.py +++ b/plugins/modules/net_tools/nios/nios_network.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2018 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/nios/nios_network_view.py b/plugins/modules/net_tools/nios/nios_network_view.py index a27f8519a0..f4a18bcd26 100644 --- a/plugins/modules/net_tools/nios/nios_network_view.py +++ b/plugins/modules/net_tools/nios/nios_network_view.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2018 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/nios/nios_ptr_record.py b/plugins/modules/net_tools/nios/nios_ptr_record.py index 22550f129a..a0c3e63270 100644 --- a/plugins/modules/net_tools/nios/nios_ptr_record.py +++ b/plugins/modules/net_tools/nios/nios_ptr_record.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2018 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/nios/nios_srv_record.py b/plugins/modules/net_tools/nios/nios_srv_record.py index 574a5fcf8b..9c0247d49d 100644 --- a/plugins/modules/net_tools/nios/nios_srv_record.py +++ b/plugins/modules/net_tools/nios/nios_srv_record.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2018 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/nios/nios_txt_record.py b/plugins/modules/net_tools/nios/nios_txt_record.py index b3267af41f..6cb1d64d35 100644 --- a/plugins/modules/net_tools/nios/nios_txt_record.py +++ b/plugins/modules/net_tools/nios/nios_txt_record.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2018 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/nios/nios_zone.py b/plugins/modules/net_tools/nios/nios_zone.py index f97098351b..463c68c8ac 100644 --- a/plugins/modules/net_tools/nios/nios_zone.py +++ b/plugins/modules/net_tools/nios/nios_zone.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2018 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/net_tools/nsupdate.py b/plugins/modules/net_tools/nsupdate.py index 520d12e803..fc0d5e1c46 100644 --- a/plugins/modules/net_tools/nsupdate.py +++ b/plugins/modules/net_tools/nsupdate.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # (c) 2016, Marcin Skarbek # (c) 2016, Andreas Olsson diff --git a/plugins/modules/notification/syslogger.py b/plugins/modules/notification/syslogger.py index 226126f5a9..7627f35985 100644 --- a/plugins/modules/notification/syslogger.py +++ b/plugins/modules/notification/syslogger.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2017, Tim Rightnour # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/packaging/language/pip_package_info.py b/plugins/modules/packaging/language/pip_package_info.py index cdcc9f51cc..25825cefb1 100644 --- a/plugins/modules/packaging/language/pip_package_info.py +++ b/plugins/modules/packaging/language/pip_package_info.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/packaging/os/redhat_subscription.py b/plugins/modules/packaging/os/redhat_subscription.py index f3e5400900..7bb540b3f1 100644 --- a/plugins/modules/packaging/os/redhat_subscription.py +++ b/plugins/modules/packaging/os/redhat_subscription.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # James Laska (jlaska@redhat.com) # diff --git a/plugins/modules/packaging/os/rhn_channel.py b/plugins/modules/packaging/os/rhn_channel.py index f1954037fa..e3a1ae3098 100644 --- a/plugins/modules/packaging/os/rhn_channel.py +++ b/plugins/modules/packaging/os/rhn_channel.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) Vincent Van de Kussen # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/packaging/os/rhsm_release.py b/plugins/modules/packaging/os/rhsm_release.py index a4d8f71197..4b76cee274 100644 --- a/plugins/modules/packaging/os/rhsm_release.py +++ b/plugins/modules/packaging/os/rhsm_release.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # (c) 2018, Sean Myers # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/packaging/os/rhsm_repository.py b/plugins/modules/packaging/os/rhsm_repository.py index 7317be6633..b103ea621a 100644 --- a/plugins/modules/packaging/os/rhsm_repository.py +++ b/plugins/modules/packaging/os/rhsm_repository.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2017, Giovanni Sciortino (@giovannisciortino) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/packaging/os/rpm_ostree_pkg.py b/plugins/modules/packaging/os/rpm_ostree_pkg.py index 7c430732e7..38e2486ddc 100644 --- a/plugins/modules/packaging/os/rpm_ostree_pkg.py +++ b/plugins/modules/packaging/os/rpm_ostree_pkg.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Dusty Mabe # Copyright: (c) 2018, Ansible Project # Copyright: (c) 2021, Abhijeet Kasurde diff --git a/plugins/modules/packaging/os/swupd.py b/plugins/modules/packaging/os/swupd.py index 4dac01be64..6ededcad02 100644 --- a/plugins/modules/packaging/os/swupd.py +++ b/plugins/modules/packaging/os/swupd.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # (c) 2017, Alberto Murillo # diff --git a/plugins/modules/packaging/os/zypper_repository.py b/plugins/modules/packaging/os/zypper_repository.py index 608675528d..38aeab618e 100644 --- a/plugins/modules/packaging/os/zypper_repository.py +++ b/plugins/modules/packaging/os/zypper_repository.py @@ -1,5 +1,5 @@ #!/usr/bin/python -# encoding: utf-8 +# -*- coding: utf-8 -*- # (c) 2013, Matthias Vogelgesang # (c) 2014, Justin Lecher diff --git a/plugins/modules/remote_management/lxca/lxca_cmms.py b/plugins/modules/remote_management/lxca/lxca_cmms.py index 776ee49fd4..b3bb6c2a8c 100644 --- a/plugins/modules/remote_management/lxca/lxca_cmms.py +++ b/plugins/modules/remote_management/lxca/lxca_cmms.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # GNU General Public License v3.0+ (see COPYING or # https://www.gnu.org/licenses/gpl-3.0.txt) # diff --git a/plugins/modules/remote_management/lxca/lxca_nodes.py b/plugins/modules/remote_management/lxca/lxca_nodes.py index f788229d3d..62b8e334d8 100644 --- a/plugins/modules/remote_management/lxca/lxca_nodes.py +++ b/plugins/modules/remote_management/lxca/lxca_nodes.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # GNU General Public License v3.0+ (see COPYING or # https://www.gnu.org/licenses/gpl-3.0.txt) # diff --git a/plugins/modules/remote_management/manageiq/manageiq_group.py b/plugins/modules/remote_management/manageiq/manageiq_group.py index 2050eb63c8..2452e101d1 100644 --- a/plugins/modules/remote_management/manageiq/manageiq_group.py +++ b/plugins/modules/remote_management/manageiq/manageiq_group.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # (c) 2018, Evert Mulder (base on manageiq_user.py by Daniel Korn ) # diff --git a/plugins/modules/remote_management/manageiq/manageiq_tenant.py b/plugins/modules/remote_management/manageiq/manageiq_tenant.py index 3ec174cfa0..58c2e1ed71 100644 --- a/plugins/modules/remote_management/manageiq/manageiq_tenant.py +++ b/plugins/modules/remote_management/manageiq/manageiq_tenant.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # (c) 2018, Evert Mulder (base on manageiq_user.py by Daniel Korn ) # diff --git a/plugins/modules/remote_management/manageiq/manageiq_user.py b/plugins/modules/remote_management/manageiq/manageiq_user.py index 8905dde2e6..f3dc8103f7 100644 --- a/plugins/modules/remote_management/manageiq/manageiq_user.py +++ b/plugins/modules/remote_management/manageiq/manageiq_user.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # (c) 2017, Daniel Korn # diff --git a/plugins/modules/remote_management/oneview/oneview_datacenter_info.py b/plugins/modules/remote_management/oneview/oneview_datacenter_info.py index 04d4fc0c7e..3e5b96376e 100644 --- a/plugins/modules/remote_management/oneview/oneview_datacenter_info.py +++ b/plugins/modules/remote_management/oneview/oneview_datacenter_info.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/remote_management/oneview/oneview_enclosure_info.py b/plugins/modules/remote_management/oneview/oneview_enclosure_info.py index a9bbb8e799..249fea4874 100644 --- a/plugins/modules/remote_management/oneview/oneview_enclosure_info.py +++ b/plugins/modules/remote_management/oneview/oneview_enclosure_info.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/remote_management/oneview/oneview_ethernet_network.py b/plugins/modules/remote_management/oneview/oneview_ethernet_network.py index c09f09c8f6..99b5d0fed9 100644 --- a/plugins/modules/remote_management/oneview/oneview_ethernet_network.py +++ b/plugins/modules/remote_management/oneview/oneview_ethernet_network.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py b/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py index 63a9e1efae..1f25364d3a 100644 --- a/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py +++ b/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/remote_management/oneview/oneview_fc_network.py b/plugins/modules/remote_management/oneview/oneview_fc_network.py index 009a54a89b..59984ee8b6 100644 --- a/plugins/modules/remote_management/oneview/oneview_fc_network.py +++ b/plugins/modules/remote_management/oneview/oneview_fc_network.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/remote_management/oneview/oneview_fc_network_info.py b/plugins/modules/remote_management/oneview/oneview_fc_network_info.py index 86430402fe..4707f39f2d 100644 --- a/plugins/modules/remote_management/oneview/oneview_fc_network_info.py +++ b/plugins/modules/remote_management/oneview/oneview_fc_network_info.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/remote_management/oneview/oneview_fcoe_network.py b/plugins/modules/remote_management/oneview/oneview_fcoe_network.py index 30e05677f8..ef24f8fc8e 100644 --- a/plugins/modules/remote_management/oneview/oneview_fcoe_network.py +++ b/plugins/modules/remote_management/oneview/oneview_fcoe_network.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py b/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py index b0ede13820..6cb3501ddf 100644 --- a/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py +++ b/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py b/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py index 78735dc5e7..e833f9e092 100644 --- a/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py +++ b/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py b/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py index e8670a33a8..7a0f0dc987 100644 --- a/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py +++ b/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/remote_management/oneview/oneview_network_set.py b/plugins/modules/remote_management/oneview/oneview_network_set.py index 14efdabe70..3a2632b765 100644 --- a/plugins/modules/remote_management/oneview/oneview_network_set.py +++ b/plugins/modules/remote_management/oneview/oneview_network_set.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/remote_management/oneview/oneview_network_set_info.py b/plugins/modules/remote_management/oneview/oneview_network_set_info.py index 5cb7463b4c..595d003c56 100644 --- a/plugins/modules/remote_management/oneview/oneview_network_set_info.py +++ b/plugins/modules/remote_management/oneview/oneview_network_set_info.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/remote_management/oneview/oneview_san_manager.py b/plugins/modules/remote_management/oneview/oneview_san_manager.py index 858072826b..20870a31d5 100644 --- a/plugins/modules/remote_management/oneview/oneview_san_manager.py +++ b/plugins/modules/remote_management/oneview/oneview_san_manager.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/remote_management/oneview/oneview_san_manager_info.py b/plugins/modules/remote_management/oneview/oneview_san_manager_info.py index c80ef474cc..46ed001827 100644 --- a/plugins/modules/remote_management/oneview/oneview_san_manager_info.py +++ b/plugins/modules/remote_management/oneview/oneview_san_manager_info.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/source_control/github/github_issue.py b/plugins/modules/source_control/github/github_issue.py index 88fe8f7b51..4add29f341 100644 --- a/plugins/modules/source_control/github/github_issue.py +++ b/plugins/modules/source_control/github/github_issue.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2017-18, Abhijeet Kasurde # diff --git a/plugins/modules/source_control/github/github_key.py b/plugins/modules/source_control/github/github_key.py index 616636edea..2afbe29aa1 100644 --- a/plugins/modules/source_control/github/github_key.py +++ b/plugins/modules/source_control/github/github_key.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/source_control/github/github_webhook.py b/plugins/modules/source_control/github/github_webhook.py index b1f0cb7a2b..8703863fa9 100644 --- a/plugins/modules/source_control/github/github_webhook.py +++ b/plugins/modules/source_control/github/github_webhook.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/source_control/github/github_webhook_info.py b/plugins/modules/source_control/github/github_webhook_info.py index 3936cbe37b..98a7516e75 100644 --- a/plugins/modules/source_control/github/github_webhook_info.py +++ b/plugins/modules/source_control/github/github_webhook_info.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/storage/emc/emc_vnx_sg_member.py b/plugins/modules/storage/emc/emc_vnx_sg_member.py index 2698f5327a..20977687fc 100644 --- a/plugins/modules/storage/emc/emc_vnx_sg_member.py +++ b/plugins/modules/storage/emc/emc_vnx_sg_member.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright (c) 2018, Luca 'remix_tj' Lorenzetto # diff --git a/plugins/modules/storage/hpe3par/ss_3par_cpg.py b/plugins/modules/storage/hpe3par/ss_3par_cpg.py index 04604c0966..be4a6a02a2 100644 --- a/plugins/modules/storage/hpe3par/ss_3par_cpg.py +++ b/plugins/modules/storage/hpe3par/ss_3par_cpg.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/system/kernel_blacklist.py b/plugins/modules/system/kernel_blacklist.py index ff6f9c227e..d8cb4a9e9d 100644 --- a/plugins/modules/system/kernel_blacklist.py +++ b/plugins/modules/system/kernel_blacklist.py @@ -1,5 +1,5 @@ #!/usr/bin/python -# encoding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright: (c) 2013, Matthias Vogelgesang # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/system/lbu.py b/plugins/modules/system/lbu.py index 6f850791b1..fcc3a0d940 100644 --- a/plugins/modules/system/lbu.py +++ b/plugins/modules/system/lbu.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2019, Kaarle Ritvanen # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/system/pids.py b/plugins/modules/system/pids.py index e7312465f1..5c7b82a794 100644 --- a/plugins/modules/system/pids.py +++ b/plugins/modules/system/pids.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2019, Saranya Sridharan # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) diff --git a/plugins/modules/system/python_requirements_info.py b/plugins/modules/system/python_requirements_info.py index 08a9ddd64e..081826f4e6 100644 --- a/plugins/modules/system/python_requirements_info.py +++ b/plugins/modules/system/python_requirements_info.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright (c) 2018 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/system/selogin.py b/plugins/modules/system/selogin.py index 53b077f954..46daf1a76a 100644 --- a/plugins/modules/system/selogin.py +++ b/plugins/modules/system/selogin.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # (c) 2017, Petr Lautrbach # Based on seport.py module (c) 2014, Dan Keder diff --git a/plugins/modules/system/syspatch.py b/plugins/modules/system/syspatch.py index 6fcfaea0f5..42cb17b8a3 100644 --- a/plugins/modules/system/syspatch.py +++ b/plugins/modules/system/syspatch.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2019-2020, Andrew Klaus # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/system/sysupgrade.py b/plugins/modules/system/sysupgrade.py index a1956129df..333d7765d2 100644 --- a/plugins/modules/system/sysupgrade.py +++ b/plugins/modules/system/sysupgrade.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2020, Andrew Klaus # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/system/vdo.py b/plugins/modules/system/vdo.py index a27745510a..0b4fca306d 100644 --- a/plugins/modules/system/vdo.py +++ b/plugins/modules/system/vdo.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/jenkins_build.py b/plugins/modules/web_infrastructure/jenkins_build.py index 43dc667ace..0141185342 100644 --- a/plugins/modules/web_infrastructure/jenkins_build.py +++ b/plugins/modules/web_infrastructure/jenkins_build.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright: (c) Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/jenkins_job.py b/plugins/modules/web_infrastructure/jenkins_job.py index 9993a996e0..88a8766133 100644 --- a/plugins/modules/web_infrastructure/jenkins_job.py +++ b/plugins/modules/web_infrastructure/jenkins_job.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright: (c) Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/jenkins_job_info.py b/plugins/modules/web_infrastructure/jenkins_job_info.py index fc079857a6..503fbbf159 100644 --- a/plugins/modules/web_infrastructure/jenkins_job_info.py +++ b/plugins/modules/web_infrastructure/jenkins_job_info.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # Copyright: (c) Ansible Project # diff --git a/plugins/modules/web_infrastructure/jenkins_plugin.py b/plugins/modules/web_infrastructure/jenkins_plugin.py index a280b50aa6..6adb348156 100644 --- a/plugins/modules/web_infrastructure/jenkins_plugin.py +++ b/plugins/modules/web_infrastructure/jenkins_plugin.py @@ -1,5 +1,5 @@ #!/usr/bin/python -# encoding: utf-8 +# -*- coding: utf-8 -*- # (c) 2016, Jiri Tyr # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/jenkins_script.py b/plugins/modules/web_infrastructure/jenkins_script.py index 6d3b3d2253..3ad51a9703 100644 --- a/plugins/modules/web_infrastructure/jenkins_script.py +++ b/plugins/modules/web_infrastructure/jenkins_script.py @@ -1,6 +1,5 @@ #!/usr/bin/python - -# encoding: utf-8 +# -*- coding: utf-8 -*- # (c) 2016, James Hogarth # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group.py b/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group.py index 70a0a78fd8..e2fa6f5384 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Johannes Brunswicker # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py index d5660ab73c..ca291ba88b 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Johannes Brunswicker # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py b/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py index 81dffe223b..f05a1e6809 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Stephan Schwarz # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py index 9aa16d4aca..82eb42f620 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Stephan Schwarz # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_dns_host.py b/plugins/modules/web_infrastructure/sophos_utm/utm_dns_host.py index 76d463ccba..4554384d2d 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_dns_host.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_dns_host.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Johannes Brunswicker # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address.py b/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address.py index a8b3cc1f2b..a5c2d1fd36 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Juergen Wiebe # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py index 700799ab59..fb449939fa 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Juergen Wiebe # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py index 0dd460509a..e519d3cf33 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Stephan Schwarz # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_exception.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_exception.py index 6d606abf89..780bd68c92 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_exception.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_exception.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Sebastian Schenzel # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend.py index a738bfab6b..9d2bc7c6db 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Johannes Brunswicker # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py index 62a832d7c6..b68bde633a 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Johannes Brunswicker # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location.py index 99d56030be..4c0abb0608 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Johannes Brunswicker # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py index 99174a89b1..eda9f6ee14 100644 --- a/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py +++ b/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2018, Johannes Brunswicker # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) From 87ba15fa4589fab466231f68d5e0122ca8d312d3 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 8 Aug 2021 10:49:08 +0200 Subject: [PATCH 0496/3093] Inform contributors on changelog fragments in CONTRIBUTING.md (#3167) * Inform contributors on changelog fragments. * Mention docs-only PRs as well. --- CONTRIBUTING.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ba30ed1e02..a40dbd59eb 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -26,6 +26,7 @@ Also, consider taking up a valuable, reviewed, but abandoned pull request which * All commits of a pull request branch will be squashed into one commit at last. That does not mean you must have only one commit on your pull request, though! * Please try not to force-push if it is not needed, so reviewers and other users looking at your pull request later can see the pull request commit history. * Do not add merge commits to your PR. The bot will complain and you will have to rebase ([instructions for rebasing](https://docs.ansible.com/ansible/latest/dev_guide/developing_rebasing.html)) to remove them before your PR can be merged. To avoid that git automatically does merges during pulls, you can configure it to do rebases instead by running `git config pull.rebase true` inside the respository checkout. +* Make sure your PR includes a [changelog fragment](https://docs.ansible.com/ansible/devel/community/development_process.html#changelogs-how-to). (You must not include a fragment for new modules or new plugins, except for test and filter plugins. Also you shouldn't include one for docs-only changes. If you're not sure, simply don't include one, we'll tell you whether one is needed or not :) ) You can also read [our Quick-start development guide](https://github.com/ansible/community-docs/blob/main/create_pr_quick_start_guide.rst). From 85bcef3f5ac418d3244f144f3172e174ab3ba609 Mon Sep 17 00:00:00 2001 From: Sebastian Date: Sun, 8 Aug 2021 10:50:09 +0200 Subject: [PATCH 0497/3093] contributing: make expected behavior clearer (#3168) * contributing: make expected behavior clearer reformulate the preference of not having squashed commits clearer, shorter and more precise. https://github.com/ansible-collections/community.general/pull/3164#discussion_r684644504 * Update CONTRIBUTING.md Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- CONTRIBUTING.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a40dbd59eb..9df277591c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -23,8 +23,7 @@ Note that reviewing does not only mean code review, but also offering comments o Also, consider taking up a valuable, reviewed, but abandoned pull request which you could politely ask the original authors to complete yourself. * Try committing your changes with an informative but short commit message. -* All commits of a pull request branch will be squashed into one commit at last. That does not mean you must have only one commit on your pull request, though! -* Please try not to force-push if it is not needed, so reviewers and other users looking at your pull request later can see the pull request commit history. +* Do not squash your commits and force-push to your branch if not needed. Reviews of your pull request are much easier with individual commits to comprehend the pull request history. All commits of your pull request branch will be squashed into one commit by GitHub upon merge. * Do not add merge commits to your PR. The bot will complain and you will have to rebase ([instructions for rebasing](https://docs.ansible.com/ansible/latest/dev_guide/developing_rebasing.html)) to remove them before your PR can be merged. To avoid that git automatically does merges during pulls, you can configure it to do rebases instead by running `git config pull.rebase true` inside the respository checkout. * Make sure your PR includes a [changelog fragment](https://docs.ansible.com/ansible/devel/community/development_process.html#changelogs-how-to). (You must not include a fragment for new modules or new plugins, except for test and filter plugins. Also you shouldn't include one for docs-only changes. If you're not sure, simply don't include one, we'll tell you whether one is needed or not :) ) From 2831bc45f5f579db64e6252f9c8c88ac48b1cb91 Mon Sep 17 00:00:00 2001 From: quidame Date: Sun, 8 Aug 2021 18:34:34 +0200 Subject: [PATCH 0498/3093] ini_file: fix empty-value vs. no-value inconsistency (#3074) * fix empty-value vs. no-value inconsistency * rename changelog fragment * tests: omit value where there should be no value * add integration tests --- ...ni_file-3031-empty-value-inconsistency.yml | 4 ++++ plugins/modules/files/ini_file.py | 8 +++---- .../targets/ini_file/tasks/main.yml | 23 ++++++++++++++----- 3 files changed, 25 insertions(+), 10 deletions(-) create mode 100644 changelogs/fragments/3074-ini_file-3031-empty-value-inconsistency.yml diff --git a/changelogs/fragments/3074-ini_file-3031-empty-value-inconsistency.yml b/changelogs/fragments/3074-ini_file-3031-empty-value-inconsistency.yml new file mode 100644 index 0000000000..7bfe958a12 --- /dev/null +++ b/changelogs/fragments/3074-ini_file-3031-empty-value-inconsistency.yml @@ -0,0 +1,4 @@ +--- +bugfixes: + - ini_file - fix inconsistency between empty value and no value + (https://github.com/ansible-collections/community.general/issues/3031). diff --git a/plugins/modules/files/ini_file.py b/plugins/modules/files/ini_file.py index 7d6a988e85..a9c2e290b0 100644 --- a/plugins/modules/files/ini_file.py +++ b/plugins/modules/files/ini_file.py @@ -205,11 +205,11 @@ def do_ini(module, filename, section=None, option=None, value=None, for i in range(index, 0, -1): # search backwards for previous non-blank or non-comment line if not non_blank_non_comment_pattern.match(ini_lines[i - 1]): - if option and value: + if option and value is not None: ini_lines.insert(i, assignment_format % (option, value)) msg = 'option added' changed = True - elif option and not value and allow_no_value: + elif option and value is None and allow_no_value: ini_lines.insert(i, '%s\n' % option) msg = 'option added' changed = True @@ -225,7 +225,7 @@ def do_ini(module, filename, section=None, option=None, value=None, if state == 'present': # change the existing option line if match_opt(option, line): - if not value and allow_no_value: + if value is None and allow_no_value: newline = u'%s\n' % option else: newline = assignment_format % (option, value) @@ -324,7 +324,7 @@ def main(): create = module.params['create'] if state == 'present' and not allow_no_value and value is None: - module.fail_json("Parameter 'value' must not be empty if state=present and allow_no_value=False") + module.fail_json("Parameter 'value' must be defined if state=present and allow_no_value=False") (changed, backup_file, diff, msg) = do_ini(module, path, section, option, value, state, backup, no_extra_spaces, create, allow_no_value) diff --git a/tests/integration/targets/ini_file/tasks/main.yml b/tests/integration/targets/ini_file/tasks/main.yml index 210dafe2ca..96c6771b9e 100644 --- a/tests/integration/targets/ini_file/tasks/main.yml +++ b/tests/integration/targets/ini_file/tasks/main.yml @@ -215,10 +215,10 @@ path: "{{ output_file }}" section: mysqld option: "{{ item.o }}" - value: "{{ item.v }}" + value: "{{ item.v | d(omit) }}" allow_no_value: yes with_items: - - { o: "skip-name-resolve", v: null } + - { o: "skip-name-resolve" } - { o: "max_connections", v: "500" } - name: read content from output file @@ -459,12 +459,23 @@ option: like value: tea state: absent - - name: Test with empty string + + # See https://github.com/ansible-collections/community.general/issues/3031 + - name: Tests with empty strings ini_file: path: "{{ output_file }}" - section: extensions - option: evolve + section: "{{ item.section | d('extensions') }}" + option: "{{ item.option }}" value: "" + allow_no_value: "{{ item.no_value | d(omit) }}" + loop: + - option: evolve + - option: regress + - section: foobar + option: foo + no_value: true + - option: improve + no_value: true - name: read content from output file slurp: @@ -473,7 +484,7 @@ - name: set expected content and get current ini file content set_fact: - expected15: "\n[extensions]\nevolve = \n" + expected15: "\n[extensions]\nevolve = \nregress = \nimprove = \n[foobar]\nfoo = \n" content15: "{{ output_content.content | b64decode }}" - debug: var=content15 - name: Verify content of ini file is as expected From 7f96b7df60bad4bd85c787401d521717c842da23 Mon Sep 17 00:00:00 2001 From: David Hummel <6109326+hummeltech@users.noreply.github.com> Date: Sun, 8 Aug 2021 09:35:52 -0700 Subject: [PATCH 0499/3093] nmcli: writing secrets to command line is a security hole (#3160) * nmcli: use `stdin` for setting private `wifi_sec` options I.E.: * `802-11-wireless-security.leap-password` * `802-11-wireless-security.psk` * `802-11-wireless-security.wep-key0` * `802-11-wireless-security.wep-key1` * `802-11-wireless-security.wep-key2` * `802-11-wireless-security.wep-key3` * Changelog fragement formatting. * Update changelogs/fragments/3160-pass-wifi-secrets-via-stdin-to-nmcli-module.yml Co-authored-by: Felix Fontein * Make `wifi_sec_secret_options()` into a constant * Minor cleanup `'set ' + key + ' ' + value` => `'set %s %s' % (key, value)` * Change `casing` * Change `WIFI_SEC_SECRET_OPTIONS` from `list` to `tuple` * Update `edit_connection()` to not reset `edit_commands` It will just re`set` them if `edit_connection()` is called more than once. * Do not call `edit_connection()` if `connection_update(*)` fails * Fixed `pep8` issue `E713` in tests `test for membership should be 'not in'` * Simplify `create_connection()`/`modify_connection()` logic * `WIFI_SEC_SECRET_OPTIONS`=>`SECRET_OPTIONS`, options are prefixed * Moved `if key in self.SECRET_OPTIONS` into `if value is not None` check We don't need to do anything is the value is None Co-authored-by: Felix Fontein --- ...wifi-secrets-via-stdin-to-nmcli-module.yml | 4 + plugins/modules/net_tools/nmcli.py | 26 +++- .../plugins/modules/net_tools/test_nmcli.py | 139 +++++++++++++++++- 3 files changed, 166 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/3160-pass-wifi-secrets-via-stdin-to-nmcli-module.yml diff --git a/changelogs/fragments/3160-pass-wifi-secrets-via-stdin-to-nmcli-module.yml b/changelogs/fragments/3160-pass-wifi-secrets-via-stdin-to-nmcli-module.yml new file mode 100644 index 0000000000..47e1837a0b --- /dev/null +++ b/changelogs/fragments/3160-pass-wifi-secrets-via-stdin-to-nmcli-module.yml @@ -0,0 +1,4 @@ +security_fixes: + - nmcli - do not pass WiFi secrets on the ``nmcli`` command line. Use ``nmcli con edit`` + instead and pass secrets as ``stdin`` + (https://github.com/ansible-collections/community.general/issues/3145). diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index 92d1e65ef7..06b868dace 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -709,6 +709,15 @@ class Nmcli(object): platform = 'Generic' distribution = None + SECRET_OPTIONS = ( + '802-11-wireless-security.leap-password', + '802-11-wireless-security.psk', + '802-11-wireless-security.wep-key0', + '802-11-wireless-security.wep-key1', + '802-11-wireless-security.wep-key2', + '802-11-wireless-security.wep-key3' + ) + def __init__(self, module): self.module = module self.state = module.params['state'] @@ -792,6 +801,8 @@ class Nmcli(object): else: self.ipv6_method = None + self.edit_commands = [] + def execute_command(self, cmd, use_unsafe_shell=False, data=None): if isinstance(cmd, list): cmd = [to_text(item) for item in cmd] @@ -1079,12 +1090,17 @@ class Nmcli(object): # Constructing the command. for key, value in options.items(): if value is not None: + if key in self.SECRET_OPTIONS: + self.edit_commands += ['set %s %s' % (key, value)] + continue cmd.extend([key, value]) return self.execute_command(cmd) def create_connection(self): status = self.connection_update('create') + if status[0] == 0 and self.edit_commands: + status = self.edit_connection() if self.create_connection_up: status = self.up_connection() return status @@ -1105,7 +1121,15 @@ class Nmcli(object): return self.execute_command(cmd) def modify_connection(self): - return self.connection_update('modify') + status = self.connection_update('modify') + if status[0] == 0 and self.edit_commands: + status = self.edit_connection() + return status + + def edit_connection(self): + data = "\n".join(self.edit_commands + ['save', 'quit']) + cmd = [self.nmcli_bin, 'con', 'edit', self.conn_name] + return self.execute_command(cmd, data=data) def show_connection(self): cmd = [self.nmcli_bin, '--show-secrets', 'con', 'show', self.conn_name] diff --git a/tests/unit/plugins/modules/net_tools/test_nmcli.py b/tests/unit/plugins/modules/net_tools/test_nmcli.py index c1b3e93ed4..9f131c3873 100644 --- a/tests/unit/plugins/modules/net_tools/test_nmcli.py +++ b/tests/unit/plugins/modules/net_tools/test_nmcli.py @@ -697,6 +697,23 @@ def mocked_ethernet_connection_dhcp_to_static(mocker): )) +@pytest.fixture +def mocked_secure_wireless_create_failure(mocker): + mocker_set(mocker, + execute_return=(1, "", "")) + + +@pytest.fixture +def mocked_secure_wireless_modify_failure(mocker): + mocker_set(mocker, + connection_exists=True, + execute_return=None, + execute_side_effect=( + (0, "", ""), + (1, "", ""), + )) + + @pytest.fixture def mocked_dummy_connection_static_unchanged(mocker): mocker_set(mocker, @@ -1652,6 +1669,52 @@ def test_create_secure_wireless(mocked_generic_connection_create, capfd): Test : Create secure wireless connection """ + with pytest.raises(SystemExit): + nmcli.main() + + assert nmcli.Nmcli.execute_command.call_count == 2 + arg_list = nmcli.Nmcli.execute_command.call_args_list + add_args, add_kw = arg_list[0] + + assert add_args[0][0] == '/usr/bin/nmcli' + assert add_args[0][1] == 'con' + assert add_args[0][2] == 'add' + assert add_args[0][3] == 'type' + assert add_args[0][4] == 'wifi' + assert add_args[0][5] == 'con-name' + assert add_args[0][6] == 'non_existent_nw_device' + + add_args_text = list(map(to_text, add_args[0])) + for param in ['connection.interface-name', 'wireless_non_existant', + 'ipv4.addresses', '10.10.10.10/24', + '802-11-wireless.ssid', 'Brittany', + '802-11-wireless-security.key-mgmt', 'wpa-psk']: + assert param in add_args_text + + edit_args, edit_kw = arg_list[1] + assert edit_args[0][0] == '/usr/bin/nmcli' + assert edit_args[0][1] == 'con' + assert edit_args[0][2] == 'edit' + assert edit_args[0][3] == 'non_existent_nw_device' + + edit_kw_data = edit_kw['data'].split() + for param in ['802-11-wireless-security.psk', 'VERY_SECURE_PASSWORD', + 'save', + 'quit']: + assert param in edit_kw_data + + out, err = capfd.readouterr() + results = json.loads(out) + assert not results.get('failed') + assert results['changed'] + + +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_SECURE_WIRELESS, indirect=['patch_ansible_module']) +def test_create_secure_wireless_failure(mocked_secure_wireless_create_failure, capfd): + """ + Test : Create secure wireless connection w/failure + """ + with pytest.raises(SystemExit): nmcli.main() @@ -1671,16 +1734,88 @@ def test_create_secure_wireless(mocked_generic_connection_create, capfd): for param in ['connection.interface-name', 'wireless_non_existant', 'ipv4.addresses', '10.10.10.10/24', '802-11-wireless.ssid', 'Brittany', - '802-11-wireless-security.key-mgmt', 'wpa-psk', - '802-11-wireless-security.psk', 'VERY_SECURE_PASSWORD']: + '802-11-wireless-security.key-mgmt', 'wpa-psk']: assert param in add_args_text + out, err = capfd.readouterr() + results = json.loads(out) + assert results.get('failed') + assert 'changed' not in results + + +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_SECURE_WIRELESS, indirect=['patch_ansible_module']) +def test_modify_secure_wireless(mocked_generic_connection_modify, capfd): + """ + Test : Modify secure wireless connection + """ + + with pytest.raises(SystemExit): + nmcli.main() + assert nmcli.Nmcli.execute_command.call_count == 2 + arg_list = nmcli.Nmcli.execute_command.call_args_list + add_args, add_kw = arg_list[0] + + assert add_args[0][0] == '/usr/bin/nmcli' + assert add_args[0][1] == 'con' + assert add_args[0][2] == 'modify' + assert add_args[0][3] == 'non_existent_nw_device' + + add_args_text = list(map(to_text, add_args[0])) + for param in ['connection.interface-name', 'wireless_non_existant', + 'ipv4.addresses', '10.10.10.10/24', + '802-11-wireless.ssid', 'Brittany', + '802-11-wireless-security.key-mgmt', 'wpa-psk']: + assert param in add_args_text + + edit_args, edit_kw = arg_list[1] + assert edit_args[0][0] == '/usr/bin/nmcli' + assert edit_args[0][1] == 'con' + assert edit_args[0][2] == 'edit' + assert edit_args[0][3] == 'non_existent_nw_device' + + edit_kw_data = edit_kw['data'].split() + for param in ['802-11-wireless-security.psk', 'VERY_SECURE_PASSWORD', + 'save', + 'quit']: + assert param in edit_kw_data + out, err = capfd.readouterr() results = json.loads(out) assert not results.get('failed') assert results['changed'] +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_SECURE_WIRELESS, indirect=['patch_ansible_module']) +def test_modify_secure_wireless_failure(mocked_secure_wireless_modify_failure, capfd): + """ + Test : Modify secure wireless connection w/failure + """ + + with pytest.raises(SystemExit): + nmcli.main() + + assert nmcli.Nmcli.execute_command.call_count == 2 + arg_list = nmcli.Nmcli.execute_command.call_args_list + add_args, add_kw = arg_list[1] + + assert add_args[0][0] == '/usr/bin/nmcli' + assert add_args[0][1] == 'con' + assert add_args[0][2] == 'modify' + assert add_args[0][3] == 'non_existent_nw_device' + + add_args_text = list(map(to_text, add_args[0])) + for param in ['connection.interface-name', 'wireless_non_existant', + 'ipv4.addresses', '10.10.10.10/24', + '802-11-wireless.ssid', 'Brittany', + '802-11-wireless-security.key-mgmt', 'wpa-psk']: + assert param in add_args_text + + out, err = capfd.readouterr() + results = json.loads(out) + assert results.get('failed') + assert 'changed' not in results + + @pytest.mark.parametrize('patch_ansible_module', TESTCASE_DUMMY_STATIC, indirect=['patch_ansible_module']) def test_create_dummy_static(mocked_generic_connection_create, capfd): """ From 429359e977c40b24c421311388761fe958c60610 Mon Sep 17 00:00:00 2001 From: Roy Lenferink Date: Mon, 9 Aug 2021 16:32:57 +0200 Subject: [PATCH 0500/3093] Update the .gitignore with the latest version (#3177) This because it contains new changes, e.g. ignore development environments for Python projects. --- .gitignore | 81 ++++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 70 insertions(+), 11 deletions(-) diff --git a/.gitignore b/.gitignore index c6fc14ad0b..c6c78b42e7 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,6 @@ -# Created by https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv -# Edit at https://www.gitignore.io/?templates=git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv +# Created by https://www.toptal.com/developers/gitignore/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv +# Edit at https://www.toptal.com/developers/gitignore?templates=git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv ### dotenv ### .env @@ -88,7 +88,7 @@ flycheck_*.el .nfs* ### PyCharm+all ### -# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 # User-specific stuff @@ -98,6 +98,9 @@ flycheck_*.el .idea/**/dictionaries .idea/**/shelf +# AWS User-specific +.idea/**/aws.xml + # Generated files .idea/**/contentModel.xml @@ -118,6 +121,9 @@ flycheck_*.el # When using Gradle or Maven with auto-import, you should exclude module files, # since they will be recreated, and may cause churn. Uncomment if using # auto-import. +# .idea/artifacts +# .idea/compiler.xml +# .idea/jarRepositories.xml # .idea/modules.xml # .idea/*.iml # .idea/modules @@ -198,7 +204,6 @@ parts/ sdist/ var/ wheels/ -pip-wheel-metadata/ share/python-wheels/ *.egg-info/ .installed.cfg @@ -225,13 +230,25 @@ htmlcov/ nosetests.xml coverage.xml *.cover +*.py,cover .hypothesis/ .pytest_cache/ +cover/ # Translations *.mo *.pot +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + # Scrapy stuff: .scrapy @@ -239,9 +256,19 @@ coverage.xml docs/_build/ # PyBuilder +.pybuilder/ target/ +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + # pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: .python-version # pipenv @@ -251,12 +278,24 @@ target/ # install all needed dependencies. #Pipfile.lock -# celery beat schedule file +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff celerybeat-schedule +celerybeat.pid # SageMath parsed files *.sage.py +# Environments +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + # Spyder project settings .spyderproject .spyproject @@ -264,10 +303,6 @@ celerybeat-schedule # Rope project settings .ropeproject -# Mr Developer -.mr.developer.cfg -.project - # mkdocs documentation /site @@ -279,9 +314,16 @@ dmypy.json # Pyre type checker .pyre/ +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + ### Vim ### # Swap [._]*.s[a-v][a-z] +!*.svg # comment out if you don't need vector files [._]*.sw[a-p] [._]s[a-rt-v][a-z] [._]ss[a-gi-z] @@ -299,11 +341,13 @@ tags [._]*.un~ ### WebStorm ### -# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 # User-specific stuff +# AWS User-specific + # Generated files # Sensitive or high-churn files @@ -314,6 +358,9 @@ tags # When using Gradle or Maven with auto-import, you should exclude module files, # since they will be recreated, and may cause churn. Uncomment if using # auto-import. +# .idea/artifacts +# .idea/compiler.xml +# .idea/jarRepositories.xml # .idea/modules.xml # .idea/*.iml # .idea/modules @@ -349,15 +396,27 @@ tags # *.ipr # Sonarlint plugin +# https://plugins.jetbrains.com/plugin/7973-sonarlint .idea/**/sonarlint/ # SonarQube Plugin +# https://plugins.jetbrains.com/plugin/7238-sonarqube-community-plugin .idea/**/sonarIssues.xml # Markdown Navigator plugin +# https://plugins.jetbrains.com/plugin/7896-markdown-navigator-enhanced .idea/**/markdown-navigator.xml +.idea/**/markdown-navigator-enh.xml .idea/**/markdown-navigator/ +# Cache file creation bug +# See https://youtrack.jetbrains.com/issue/JBR-2257 +.idea/$CACHE_FILE$ + +# CodeStream plugin +# https://plugins.jetbrains.com/plugin/12206-codestream +.idea/codestream.xml + ### Windows ### # Windows thumbnail cache files Thumbs.db @@ -384,4 +443,4 @@ $RECYCLE.BIN/ # Windows shortcuts *.lnk -# End of https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv +# End of https://www.toptal.com/developers/gitignore/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv From 56b5be0630e226f20cf461b2ab9c722de5e34483 Mon Sep 17 00:00:00 2001 From: zorun Date: Mon, 9 Aug 2021 22:44:36 +0200 Subject: [PATCH 0501/3093] openbsd_pkg: Fix regexp matching crash (#3161) When a package name contains special characters (e.g. "g++"), they are interpreted as part of the regexp. This can lead to a crash with an error in the python re module, for instance with "g++": sre_constants.error: multiple repeat Fix this by escaping the package name. Co-authored-by: Baptiste Jonglez --- .../fragments/3161-openbsd-pkg-fix-regexp-matching-crash.yml | 2 ++ plugins/modules/packaging/os/openbsd_pkg.py | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/3161-openbsd-pkg-fix-regexp-matching-crash.yml diff --git a/changelogs/fragments/3161-openbsd-pkg-fix-regexp-matching-crash.yml b/changelogs/fragments/3161-openbsd-pkg-fix-regexp-matching-crash.yml new file mode 100644 index 0000000000..bb29542c04 --- /dev/null +++ b/changelogs/fragments/3161-openbsd-pkg-fix-regexp-matching-crash.yml @@ -0,0 +1,2 @@ +bugfixes: + - openbsd_pkg - fix regexp matching crash. This bug could trigger on package names with special characters, for example ``g++`` (https://github.com/ansible-collections/community.general/pull/3161). diff --git a/plugins/modules/packaging/os/openbsd_pkg.py b/plugins/modules/packaging/os/openbsd_pkg.py index 61e2a5e52b..05c374cb4e 100644 --- a/plugins/modules/packaging/os/openbsd_pkg.py +++ b/plugins/modules/packaging/os/openbsd_pkg.py @@ -241,7 +241,7 @@ def package_present(names, pkg_spec, module): # "file:/local/package/directory/ is empty" message on stderr # while still installing the package, so we need to look for # for a message like "packagename-1.0: ok" just in case. - match = re.search(r"\W%s-[^:]+: ok\W" % pkg_spec[name]['stem'], pkg_spec[name]['stdout']) + match = re.search(r"\W%s-[^:]+: ok\W" % re.escape(pkg_spec[name]['stem']), pkg_spec[name]['stdout']) if match: # It turns out we were able to install the package. @@ -295,7 +295,7 @@ def package_latest(names, pkg_spec, module): pkg_spec[name]['changed'] = False for installed_name in pkg_spec[name]['installed_names']: module.debug("package_latest(): checking for pre-upgrade package name: %s" % installed_name) - match = re.search(r"\W%s->.+: ok\W" % installed_name, pkg_spec[name]['stdout']) + match = re.search(r"\W%s->.+: ok\W" % re.escape(installed_name), pkg_spec[name]['stdout']) if match: module.debug("package_latest(): pre-upgrade package name match: %s" % installed_name) From 1705335ba78ea301b7d3905c9a03d821503f4256 Mon Sep 17 00:00:00 2001 From: rainerleber <39616583+rainerleber@users.noreply.github.com> Date: Mon, 9 Aug 2021 22:52:44 +0200 Subject: [PATCH 0502/3093] SAP task list execution (#3169) * add sap task list execute * Apply suggestions from code review Co-authored-by: Felix Fontein * remove json out * Apply suggestions from code review Co-authored-by: Felix Fontein * change logic Co-authored-by: Rainer Leber Co-authored-by: Felix Fontein --- .github/BOTMETA.yml | 2 + plugins/modules/sap_task_list_execute.py | 1 + .../modules/system/sap_task_list_execute.py | 341 ++++++++++++++++++ .../system/test_sap_task_list_execute.py | 89 +++++ 4 files changed, 433 insertions(+) create mode 120000 plugins/modules/sap_task_list_execute.py create mode 100644 plugins/modules/system/sap_task_list_execute.py create mode 100644 tests/unit/plugins/modules/system/test_sap_task_list_execute.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 4912a03ba4..1e982296d6 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1058,6 +1058,8 @@ files: ignore: ryansb $modules/system/runit.py: maintainers: jsumners + $modules/system/sap_task_list_execute: + maintainers: rainerleber $modules/system/sefcontext.py: maintainers: dagwieers $modules/system/selinux_permissive.py: diff --git a/plugins/modules/sap_task_list_execute.py b/plugins/modules/sap_task_list_execute.py new file mode 120000 index 0000000000..c27ac0a6ca --- /dev/null +++ b/plugins/modules/sap_task_list_execute.py @@ -0,0 +1 @@ +system/sap_task_list_execute.py \ No newline at end of file diff --git a/plugins/modules/system/sap_task_list_execute.py b/plugins/modules/system/sap_task_list_execute.py new file mode 100644 index 0000000000..87d6a1060d --- /dev/null +++ b/plugins/modules/system/sap_task_list_execute.py @@ -0,0 +1,341 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Rainer Leber +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: sap_task_list_execute +short_description: Perform SAP Task list execution +version_added: "3.5.0" +description: + - The C(sap_task_list_execute) module depends on C(pyrfc) Python library (version 2.4.0 and upwards). + Depending on distribution you are using, you may need to install additional packages to + have these available. + - Tasks in the task list which requires manual activities will be confirmed automatically. + - This module will use the RFC package C(STC_TM_API). + +requirements: + - pyrfc >= 2.4.0 + - xmltodict + +options: + conn_username: + description: The required username for the SAP system. + required: true + type: str + conn_password: + description: The required password for the SAP system. + required: true + type: str + host: + description: The required host for the SAP system. Can be either an FQDN or IP Address. + required: true + type: str + sysnr: + description: + - The system number of the SAP system. + - You must quote the value to ensure retaining the leading zeros. + default: '00' + type: str + client: + description: + - The client number to connect to. + - You must quote the value to ensure retaining the leading zeros. + default: '000' + type: str + task_to_execute: + description: The task list which will be executed. + required: true + type: str + task_parameters: + description: + - The tasks and the parameters for execution. + - If the task list do not need any parameters. This could be empty. + - If only specific tasks from the task list should be executed. + The tasks even when no parameter is needed must be provided. + Alongside with the module parameter I(task_skip=true). + type: list + elements: dict + suboptions: + TASKNAME: + description: The name of the task in the task list. + type: str + required: true + FIELDNAME: + description: The name of the field of the task. + type: str + VALUE: + description: The value which have to be set. + type: raw + task_settings: + description: + - Setting for the execution of the task list. This can be the following as in TCODE SE80 described. + Check Mode C(CHECKRUN), Background Processing Active C(BATCH) (this is the default value), + Asynchronous Execution C(ASYNC), Trace Mode C(TRACE), Server Name C(BATCH_TARGET). + default: ['BATCH'] + type: list + elements: str + task_skip: + description: + - If this parameter is C(true) not defined tasks in I(task_parameters) are skipped. + - This could be the case when only certain tasks should run from the task list. + default: false + type: bool + +notes: + - Does not support C(check_mode). +author: + - Rainer Leber (@rainerleber) +''' + +EXAMPLES = r''' +# Pass in a message +- name: Test task execution + community.general.sap_task_list_execute: + conn_username: DDIC + conn_password: Passwd1234 + host: 10.1.8.10 + sysnr: '01' + client: '000' + task_to_execute: SAP_BASIS_SSL_CHECK + task_settings: batch + +- name: Pass in input parameters + community.general.sap_task_list_execute: + conn_username: DDIC + conn_password: Passwd1234 + host: 10.1.8.10 + sysnr: '00' + client: '000' + task_to_execute: SAP_BASIS_SSL_CHECK + task_parameters : + - { 'TASKNAME': 'CL_STCT_CHECK_SEC_CRYPTO', 'FIELDNAME': 'P_OPT2', 'VALUE': 'X' } + - TASKNAME: CL_STCT_CHECK_SEC_CRYPTO + FIELDNAME: P_OPT3 + VALUE: X + task_settings: batch + +# Exported environement variables. +- name: Hint if module will fail with error message like ImportError libsapnwrfc.so... + community.general.sap_task_list_execute: + conn_username: DDIC + conn_password: Passwd1234 + host: 10.1.8.10 + sysnr: '00' + client: '000' + task_to_execute: SAP_BASIS_SSL_CHECK + task_settings: batch + environment: + SAPNWRFC_HOME: /usr/local/sap/nwrfcsdk + LD_LIBRARY_PATH: /usr/local/sap/nwrfcsdk/lib +''' + +RETURN = r''' +msg: + description: A small execution description. + type: str + returned: always + sample: 'Successful' +out: + description: A complete description of the executed tasks. If this is available. + type: list + elements: dict + returned: on success + sample: [...,{ + "LOG": { + "STCTM_S_LOG": [ + { + "ACTIVITY": "U_CONFIG", + "ACTIVITY_DESCR": "Configuration changed", + "DETAILS": null, + "EXEC_ID": "20210728184903.815739", + "FIELD": null, + "ID": "STC_TASK", + "LOG_MSG_NO": "000000", + "LOG_NO": null, + "MESSAGE": "For radiobutton group ICM too many options are set; choose only one option", + "MESSAGE_V1": "ICM", + "MESSAGE_V2": null, + "MESSAGE_V3": null, + "MESSAGE_V4": null, + "NUMBER": "048", + "PARAMETER": null, + "PERIOD": "M", + "PERIOD_DESCR": "Maintenance", + "ROW": "0", + "SRC_LINE": "170", + "SRC_OBJECT": "CL_STCTM_REPORT_UI IF_STCTM_UI_TASK~SET_PARAMETERS", + "SYSTEM": null, + "TIMESTMP": "20210728184903", + "TSTPNM": "DDIC", + "TYPE": "E" + },... + ]}}] +''' + +from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.json_utils import json +import traceback +try: + from pyrfc import Connection +except ImportError: + HAS_PYRFC_LIBRARY = False + PYRFC_LIBRARY_IMPORT_ERROR = traceback.format_exc() +else: + HAS_PYRFC_LIBRARY = True +try: + import xmltodict +except ImportError: + HAS_XMLTODICT_LIBRARY = False + XMLTODICT_LIBRARY_IMPORT_ERROR = traceback.format_exc() +else: + HAS_XMLTODICT_LIBRARY = True + + +def call_rfc_method(connection, method_name, kwargs): + # PyRFC call function + return connection.call(method_name, **kwargs) + + +def process_exec_settings(task_settings): + # processes task settings to objects + exec_settings = {} + for settings in task_settings: + temp_dict = {settings.upper(): 'X'} + for key, value in temp_dict.items(): + exec_settings[key] = value + return exec_settings + + +def xml_to_dict(xml_raw): + try: + xml_parsed = xmltodict.parse(xml_raw, dict_constructor=dict) + xml_dict = xml_parsed['asx:abap']['asx:values']['SESSION']['TASKLIST'] + except KeyError: + xml_dict = "No logs available." + return xml_dict + + +def run_module(): + + params_spec = dict( + TASKNAME=dict(type='str', required=True), + FIELDNAME=dict(type='str'), + VALUE=dict(type='raw'), + ) + + # define available arguments/parameters a user can pass to the module + module = AnsibleModule( + argument_spec=dict( + # values for connection + conn_username=dict(type='str', required=True), + conn_password=dict(type='str', required=True, no_log=True), + host=dict(type='str', required=True), + sysnr=dict(type='str', default="00"), + client=dict(type='str', default="000"), + # values for execution tasks + task_to_execute=dict(type='str', required=True), + task_parameters=dict(type='list', elements='dict', options=params_spec), + task_settings=dict(type='list', elements='str', default=['BATCH']), + task_skip=dict(type='bool', default=False), + ), + supports_check_mode=False, + ) + result = dict(changed=False, msg='', out={}) + + params = module.params + + username = params['conn_username'].upper() + password = params['conn_password'] + host = params['host'] + sysnr = params['sysnr'] + client = params['client'] + + task_parameters = params['task_parameters'] + task_to_execute = params['task_to_execute'] + task_settings = params['task_settings'] + task_skip = params['task_skip'] + + if not HAS_PYRFC_LIBRARY: + module.fail_json( + msg=missing_required_lib('pyrfc'), + exception=PYRFC_LIBRARY_IMPORT_ERROR) + + if not HAS_XMLTODICT_LIBRARY: + module.fail_json( + msg=missing_required_lib('xmltodict'), + exception=XMLTODICT_LIBRARY_IMPORT_ERROR) + + # basic RFC connection with pyrfc + try: + conn = Connection(user=username, passwd=password, ashost=host, sysnr=sysnr, client=client) + except Exception as err: + result['error'] = str(err) + result['msg'] = 'Something went wrong connecting to the SAP system.' + module.fail_json(**result) + + try: + raw_params = call_rfc_method(conn, 'STC_TM_SCENARIO_GET_PARAMETERS', + {'I_SCENARIO_ID': task_to_execute}) + except Exception as err: + result['error'] = str(err) + result['msg'] = 'The task list does not exsist.' + module.fail_json(**result) + exec_settings = process_exec_settings(task_settings) + # initialize session task + session_init = call_rfc_method(conn, 'STC_TM_SESSION_BEGIN', + {'I_SCENARIO_ID': task_to_execute, + 'I_INIT_ONLY': 'X'}) + # Confirm Tasks which requires manual activities from Task List Run + for task in raw_params['ET_PARAMETER']: + call_rfc_method(conn, 'STC_TM_TASK_CONFIRM', + {'I_SESSION_ID': session_init['E_SESSION_ID'], + 'I_TASKNAME': task['TASKNAME']}) + if task_skip: + for task in raw_params['ET_PARAMETER']: + call_rfc_method(conn, 'STC_TM_TASK_SKIP', + {'I_SESSION_ID': session_init['E_SESSION_ID'], + 'I_TASKNAME': task['TASKNAME'], 'I_SKIP_DEP_TASKS': 'X'}) + # unskip defined tasks and set parameters + if task_parameters is not None: + for task in task_parameters: + call_rfc_method(conn, 'STC_TM_TASK_UNSKIP', + {'I_SESSION_ID': session_init['E_SESSION_ID'], + 'I_TASKNAME': task['TASKNAME'], 'I_UNSKIP_DEP_TASKS': 'X'}) + + call_rfc_method(conn, 'STC_TM_SESSION_SET_PARAMETERS', + {'I_SESSION_ID': session_init['E_SESSION_ID'], + 'IT_PARAMETER': task_parameters}) + # start the task + try: + session_start = call_rfc_method(conn, 'STC_TM_SESSION_RESUME', + {'I_SESSION_ID': session_init['E_SESSION_ID'], + 'IS_EXEC_SETTINGS': exec_settings}) + except Exception as err: + result['error'] = str(err) + result['msg'] = 'Something went wrong. See error.' + module.fail_json(**result) + # get task logs because the execution may successfully but the tasks shows errors or warnings + # returned value is ABAPXML https://help.sap.com/doc/abapdocu_755_index_htm/7.55/en-US/abenabap_xslt_asxml_general.htm + session_log = call_rfc_method(conn, 'STC_TM_SESSION_GET_LOG', + {'I_SESSION_ID': session_init['E_SESSION_ID']}) + + task_list = xml_to_dict(session_log['E_LOG']) + + result['changed'] = True + result['msg'] = session_start['E_STATUS_DESCR'] + result['out'] = task_list + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/tests/unit/plugins/modules/system/test_sap_task_list_execute.py b/tests/unit/plugins/modules/system/test_sap_task_list_execute.py new file mode 100644 index 0000000000..9d2299cacb --- /dev/null +++ b/tests/unit/plugins/modules/system/test_sap_task_list_execute.py @@ -0,0 +1,89 @@ +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys +from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock +from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args + +sys.modules['pyrfc'] = MagicMock() +sys.modules['pyrfc.Connection'] = MagicMock() +sys.modules['xmltodict'] = MagicMock() +sys.modules['xmltodict.parse'] = MagicMock() + +from ansible_collections.community.general.plugins.modules.system import sap_task_list_execute + + +class TestSAPRfcModule(ModuleTestCase): + + def setUp(self): + super(TestSAPRfcModule, self).setUp() + self.module = sap_task_list_execute + + def tearDown(self): + super(TestSAPRfcModule, self).tearDown() + + def define_rfc_connect(self, mocker): + return mocker.patch(self.module.call_rfc_method) + + def test_without_required_parameters(self): + """Failure must occurs when all parameters are missing""" + with self.assertRaises(AnsibleFailJson): + set_module_args({}) + self.module.main() + + def test_error_no_task_list(self): + """tests fail to exec task list""" + + set_module_args({ + "conn_username": "DDIC", + "conn_password": "Test1234", + "host": "10.1.8.9", + "task_to_execute": "SAP_BASIS_SSL_CHECK" + }) + + with patch.object(self.module, 'Connection') as conn: + conn.return_value = '' + with self.assertRaises(AnsibleFailJson) as result: + self.module.main() + self.assertEqual(result.exception.args[0]['msg'], 'The task list does not exsist.') + + def test_success(self): + """test execute task list success""" + + set_module_args({ + "conn_username": "DDIC", + "conn_password": "Test1234", + "host": "10.1.8.9", + "task_to_execute": "SAP_BASIS_SSL_CHECK" + }) + with patch.object(self.module, 'xml_to_dict') as XML: + XML.return_value = {'item': [{'TASK': {'CHECK_STATUS_DESCR': 'Check successfully', + 'STATUS_DESCR': 'Executed successfully', 'TASKNAME': 'CL_STCT_CHECK_SEC_CRYPTO', + 'LNR': '1', 'DESCRIPTION': 'Check SAP Cryptographic Library', 'DOCU_EXIST': 'X', + 'LOG_EXIST': 'X', 'ACTION_SKIP': None, 'ACTION_UNSKIP': None, 'ACTION_CONFIRM': None, + 'ACTION_MAINTAIN': None}}]} + + with self.assertRaises(AnsibleExitJson) as result: + sap_task_list_execute.main() + self.assertEqual(result.exception.args[0]['out'], {'item': [{'TASK': {'CHECK_STATUS_DESCR': 'Check successfully', + 'STATUS_DESCR': 'Executed successfully', 'TASKNAME': 'CL_STCT_CHECK_SEC_CRYPTO', + 'LNR': '1', 'DESCRIPTION': 'Check SAP Cryptographic Library', 'DOCU_EXIST': 'X', + 'LOG_EXIST': 'X', 'ACTION_SKIP': None, 'ACTION_UNSKIP': None, + 'ACTION_CONFIRM': None, 'ACTION_MAINTAIN': None}}]}) + + def test_success_no_log(self): + """test execute task list success without logs""" + + set_module_args({ + "conn_username": "DDIC", + "conn_password": "Test1234", + "host": "10.1.8.9", + "task_to_execute": "SAP_BASIS_SSL_CHECK" + }) + with patch.object(self.module, 'xml_to_dict') as XML: + XML.return_value = "No logs available." + with self.assertRaises(AnsibleExitJson) as result: + sap_task_list_execute.main() + self.assertEqual(result.exception.args[0]['out'], 'No logs available.') From b5d6457611b88f6b9e6efdc562d39ed427758324 Mon Sep 17 00:00:00 2001 From: Dag Wieers Date: Tue, 10 Aug 2021 07:49:18 +0200 Subject: [PATCH 0503/3093] Support older version of psutil (RHEL7 and RHEL6) (#2808) * Support older version of psutil (RHEL7 and RHEL6) The psutil python module is a true mess, they changed the API twice. The function arguments, as well as the objects that are returned. The documentation does not make it clear which version supports what so the safest implementation is this waterfall approach. A better approach would be to inspect the returned information, rather than trust a version, but that would not be any more efficient. In the end it is better to have something that at least works out-of-the-box on all platforms than something that requires custom updates to system packages before it works as expected. Especially for something as basic as `pids`. * A little bit more concise * Apply suggestions from code review * Add changelog fragment. Co-authored-by: Felix Fontein --- .../fragments/2808-pids-older-psutil.yml | 2 ++ plugins/modules/system/pids.py | 19 ++++++++++++++----- 2 files changed, 16 insertions(+), 5 deletions(-) create mode 100644 changelogs/fragments/2808-pids-older-psutil.yml diff --git a/changelogs/fragments/2808-pids-older-psutil.yml b/changelogs/fragments/2808-pids-older-psutil.yml new file mode 100644 index 0000000000..34015e3f2c --- /dev/null +++ b/changelogs/fragments/2808-pids-older-psutil.yml @@ -0,0 +1,2 @@ +bugfixes: +- "pids - avoid crashes for older ``psutil`` versions, like on RHEL6 and RHEL7 (https://github.com/ansible-collections/community.general/pull/2808)." diff --git a/plugins/modules/system/pids.py b/plugins/modules/system/pids.py index 5c7b82a794..622bec2500 100644 --- a/plugins/modules/system/pids.py +++ b/plugins/modules/system/pids.py @@ -79,11 +79,20 @@ def compare_lower(a, b): def get_pid(name): pids = [] - for proc in psutil.process_iter(attrs=['name', 'cmdline']): - if compare_lower(proc.info['name'], name) or \ - proc.info['cmdline'] and compare_lower(proc.info['cmdline'][0], name): - pids.append(proc.pid) - + try: + for proc in psutil.process_iter(attrs=['name', 'cmdline']): + if compare_lower(proc.info['name'], name) or \ + proc.info['cmdline'] and compare_lower(proc.info['cmdline'][0], name): + pids.append(proc.pid) + except TypeError: # EL6, EL7: process_iter() takes no arguments (1 given) + for proc in psutil.process_iter(): + try: # EL7 + proc_name, proc_cmdline = proc.name(), proc.cmdline() + except TypeError: # EL6: 'str' object is not callable + proc_name, proc_cmdline = proc.name, proc.cmdline + if compare_lower(proc_name, name) or \ + proc_cmdline and compare_lower(proc_cmdline[0], name): + pids.append(proc.pid) return pids From 6033ce695bc891828887019439ecf11668f58086 Mon Sep 17 00:00:00 2001 From: Sebastian Date: Thu, 12 Aug 2021 08:17:03 +0200 Subject: [PATCH 0504/3093] zypper: support transactional-updates (#3164) * zypper: support transactional-updates - Check if transactional updates are in use by checking for the existence of /var/lib/misc/transactional-update.state - Prefix zypper-commands with /sbin/transactional-update --continue --drop-if-no-change --quiet run if this is the case fixes ansible-collections/community.general#3159 * re-add get_bin_path for executables * fix typo --- .../3164-zypper-support-transactional-updates.yaml | 2 ++ plugins/modules/packaging/os/zypper.py | 8 ++++++++ 2 files changed, 10 insertions(+) create mode 100644 changelogs/fragments/3164-zypper-support-transactional-updates.yaml diff --git a/changelogs/fragments/3164-zypper-support-transactional-updates.yaml b/changelogs/fragments/3164-zypper-support-transactional-updates.yaml new file mode 100644 index 0000000000..d12ff9a6bf --- /dev/null +++ b/changelogs/fragments/3164-zypper-support-transactional-updates.yaml @@ -0,0 +1,2 @@ +minor_changes: + - zypper - prefix zypper commands with ``/sbin/transactional-update --continue --drop-if-no-change --quiet run`` if transactional updates are detected (https://github.com/ansible-collections/community.general/issues/3159). diff --git a/plugins/modules/packaging/os/zypper.py b/plugins/modules/packaging/os/zypper.py index 367bd8d9a0..2295b5a566 100644 --- a/plugins/modules/packaging/os/zypper.py +++ b/plugins/modules/packaging/os/zypper.py @@ -29,6 +29,7 @@ author: short_description: Manage packages on SUSE and openSUSE description: - Manage packages on SUSE and openSUSE using the zypper and rpm tools. + - Also supports transactional updates, by running zypper inside C(/sbin/transactional-update --continue --drop-if-no-change --quiet run). options: name: description: @@ -213,6 +214,7 @@ EXAMPLES = ''' ZYPP_LOCK_TIMEOUT: 20 ''' +import os.path import xml import re from xml.dom.minidom import parseString as parseXML @@ -337,6 +339,8 @@ def get_cmd(m, subcommand): is_install = subcommand in ['install', 'update', 'patch', 'dist-upgrade'] is_refresh = subcommand == 'refresh' cmd = [m.get_bin_path('zypper', required=True), '--quiet', '--non-interactive', '--xmlout'] + if transactional_updates(): + cmd = [m.get_bin_path('transactional-update', required=True), '--continue', '--drop-if-no-change', '--quiet', 'run'] + cmd if m.params['extra_args_precommand']: args_list = m.params['extra_args_precommand'].split() cmd.extend(args_list) @@ -491,6 +495,10 @@ def repo_refresh(m): return retvals + +def transactional_updates(): + return os.path.exists('/var/lib/misc/transactional-update.state') + # =========================================== # Main control flow From 1e466df863ffebd19ddab66b99ed19eec21e6c0e Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Thu, 12 Aug 2021 02:18:38 -0400 Subject: [PATCH 0505/3093] archive - idempotency enhancement for 4.0.0 (#3075) * Initial Commit * Comparing with tar file checksums rather than tar header checksums * Added changelog fragment * Revert "Comparing with tar file checksums rather than tar header checksums" This reverts commit bed4b171077058f1ed29785c6def52de2b1f441c. * Restricting idempotency tests by format * Applying review suggestions --- .../3075-archive-idempotency-enhancements.yml | 4 ++ plugins/modules/files/archive.py | 61 ++++++++++++++++--- .../targets/archive/tests/idempotency.yml | 21 +++---- 3 files changed, 65 insertions(+), 21 deletions(-) create mode 100644 changelogs/fragments/3075-archive-idempotency-enhancements.yml diff --git a/changelogs/fragments/3075-archive-idempotency-enhancements.yml b/changelogs/fragments/3075-archive-idempotency-enhancements.yml new file mode 100644 index 0000000000..3d0bf65fb7 --- /dev/null +++ b/changelogs/fragments/3075-archive-idempotency-enhancements.yml @@ -0,0 +1,4 @@ +--- +breaking_changes: + - archive - adding idempotency checks for changes to file names and content within the ``destination`` file + (https://github.com/ansible-collections/community.general/pull/3075). diff --git a/plugins/modules/files/archive.py b/plugins/modules/files/archive.py index 30c4de5aa8..91dc6e5112 100644 --- a/plugins/modules/files/archive.py +++ b/plugins/modules/files/archive.py @@ -182,6 +182,7 @@ import zipfile from fnmatch import fnmatch from sys import version_info from traceback import format_exc +from zlib import crc32 from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.common.text.converters import to_bytes, to_native @@ -234,10 +235,6 @@ def expand_paths(paths): return expanded_path, is_globby -def legacy_filter(path, exclusion_patterns): - return matches_exclusion_patterns(path, exclusion_patterns) - - def matches_exclusion_patterns(path, exclusion_patterns): return any(fnmatch(path, p) for p in exclusion_patterns) @@ -313,6 +310,7 @@ class Archive(object): if self.remove: self._check_removal_safety() + self.original_checksums = self.destination_checksums() self.original_size = self.destination_size() def add(self, path, archive_name): @@ -377,8 +375,16 @@ class Archive(object): msg='Errors when writing archive at %s: %s' % (_to_native(self.destination), '; '.join(self.errors)) ) - def compare_with_original(self): - self.changed |= self.original_size != self.destination_size() + def is_different_from_original(self): + if self.original_checksums is None: + return self.original_size != self.destination_size() + else: + return self.original_checksums != self.destination_checksums() + + def destination_checksums(self): + if self.destination_exists() and self.destination_readable(): + return self._get_checksums(self.destination) + return None def destination_exists(self): return self.destination and os.path.exists(self.destination) @@ -494,6 +500,10 @@ class Archive(object): def _add(self, path, archive_name): pass + @abc.abstractmethod + def _get_checksums(self, path): + pass + class ZipArchive(Archive): def __init__(self, module): @@ -513,9 +523,18 @@ class ZipArchive(Archive): self.file = zipfile.ZipFile(_to_native_ascii(self.destination), 'w', zipfile.ZIP_DEFLATED, True) def _add(self, path, archive_name): - if not legacy_filter(path, self.exclusion_patterns): + if not matches_exclusion_patterns(path, self.exclusion_patterns): self.file.write(path, archive_name) + def _get_checksums(self, path): + try: + archive = zipfile.ZipFile(_to_native_ascii(path), 'r') + checksums = set((info.filename, info.CRC) for info in archive.infolist()) + archive.close() + except zipfile.BadZipfile: + checksums = set() + return checksums + class TarArchive(Archive): def __init__(self, module): @@ -554,13 +573,35 @@ class TarArchive(Archive): return None if matches_exclusion_patterns(tarinfo.name, self.exclusion_patterns) else tarinfo def py26_filter(path): - return legacy_filter(path, self.exclusion_patterns) + return matches_exclusion_patterns(path, self.exclusion_patterns) if PY27: self.file.add(path, archive_name, recursive=False, filter=py27_filter) else: self.file.add(path, archive_name, recursive=False, exclude=py26_filter) + def _get_checksums(self, path): + try: + if self.format == 'xz': + with lzma.open(_to_native_ascii(path), 'r') as f: + archive = tarfile.open(fileobj=f) + checksums = set((info.name, info.chksum) for info in archive.getmembers()) + archive.close() + else: + archive = tarfile.open(_to_native_ascii(path), 'r|' + self.format) + checksums = set((info.name, info.chksum) for info in archive.getmembers()) + archive.close() + except (lzma.LZMAError, tarfile.ReadError, tarfile.CompressionError): + try: + # The python implementations of gzip, bz2, and lzma do not support restoring compressed files + # to their original names so only file checksum is returned + f = self._open_compressed_file(_to_native_ascii(path), 'r') + checksums = set([(b'', crc32(f.read()))]) + f.close() + except Exception: + checksums = set() + return checksums + def get_archive(module): if module.params['format'] == 'zip': @@ -603,7 +644,7 @@ def main(): else: archive.add_targets() archive.destination_state = STATE_INCOMPLETE if archive.has_unfound_targets() else STATE_ARCHIVED - archive.compare_with_original() + archive.changed |= archive.is_different_from_original() if archive.remove: archive.remove_targets() else: @@ -613,7 +654,7 @@ def main(): else: path = archive.paths[0] archive.add_single_target(path) - archive.compare_with_original() + archive.changed |= archive.is_different_from_original() if archive.remove: archive.remove_single_target(path) diff --git a/tests/integration/targets/archive/tests/idempotency.yml b/tests/integration/targets/archive/tests/idempotency.yml index f53f768164..9262601572 100644 --- a/tests/integration/targets/archive/tests/idempotency.yml +++ b/tests/integration/targets/archive/tests/idempotency.yml @@ -19,12 +19,12 @@ format: "{{ format }}" register: file_content_idempotency_after -# After idempotency fix result will be reliably changed for all formats - name: Assert task status is changed - file content idempotency ({{ format }}) assert: that: - - file_content_idempotency_after is not changed - when: "format in ('tar', 'zip')" + - file_content_idempotency_after is changed + # Only ``zip`` archives are guaranteed to compare file content checksums rather than header checksums + when: "format == 'zip'" - name: Remove archive - file content idempotency ({{ format }}) file: @@ -54,12 +54,10 @@ format: "{{ format }}" register: file_name_idempotency_after -# After idempotency fix result will be reliably changed for all formats - name: Check task status - file name idempotency ({{ format }}) assert: that: - - file_name_idempotency_after is not changed - when: "format in ('tar', 'zip')" + - file_name_idempotency_after is changed - name: Remove archive - file name idempotency ({{ format }}) file: @@ -89,12 +87,12 @@ format: "{{ format }}" register: single_file_content_idempotency_after -# After idempotency fix result will be reliably changed for all formats - name: Assert task status is changed - single file content idempotency ({{ format }}) assert: that: - - single_file_content_idempotency_after is not changed - when: "format in ('tar', 'zip')" + - single_file_content_idempotency_after is changed + # ``tar`` archives are not guaranteed to identify changes to file content if the file meta properties are unchanged. + when: "format != 'tar'" - name: Remove archive - single file content idempotency ({{ format }}) file: @@ -125,11 +123,12 @@ register: single_file_name_idempotency_after -# After idempotency fix result will be reliably changed for all formats +# The gz, bz2, and xz formats do not store the original file name +# so it is not possible to identify a change in this scenario. - name: Check task status - single file name idempotency ({{ format }}) assert: that: - - single_file_name_idempotency_after is not changed + - single_file_name_idempotency_after is changed when: "format in ('tar', 'zip')" - name: Remove archive - single file name idempotency ({{ format }}) From 5855ef558a5357383e4be93606c1bd101bb48f85 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 10 Aug 2021 07:53:16 +0200 Subject: [PATCH 0506/3093] Next planned release is 3.6.0. --- galaxy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/galaxy.yml b/galaxy.yml index 0f19d8d443..724e76110d 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,6 +1,6 @@ namespace: community name: general -version: 3.5.0 +version: 3.6.0 readme: README.md authors: - Ansible (https://github.com/ansible) From 1fec1d0c81449fb4fc3dea9de362a27de7c1fbda Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 12 Aug 2021 12:07:50 +0200 Subject: [PATCH 0507/3093] Fix new devel sanity errors. (#3194) --- changelogs/fragments/3194-sanity.yml | 14 ++++++++++++++ plugins/cache/memcached.py | 8 ++++---- plugins/callback/logdna.py | 2 +- plugins/connection/saltstack.py | 2 +- plugins/inventory/online.py | 2 +- plugins/module_utils/_netapp.py | 4 ++-- plugins/module_utils/online.py | 2 +- plugins/module_utils/scaleway.py | 2 +- plugins/modules/cloud/opennebula/one_template.py | 4 ++-- plugins/modules/cloud/packet/packet_device.py | 5 ++--- plugins/modules/cloud/packet/packet_sshkey.py | 2 +- .../modules/packaging/language/maven_artifact.py | 2 +- plugins/modules/system/launchd.py | 6 +++--- plugins/modules/system/ufw.py | 4 ++-- tests/unit/mock/loader.py | 4 ++-- 15 files changed, 38 insertions(+), 25 deletions(-) create mode 100644 changelogs/fragments/3194-sanity.yml diff --git a/changelogs/fragments/3194-sanity.yml b/changelogs/fragments/3194-sanity.yml new file mode 100644 index 0000000000..095894a685 --- /dev/null +++ b/changelogs/fragments/3194-sanity.yml @@ -0,0 +1,14 @@ +bugfixes: +- "memcached cache plugin - change function argument names to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." +- "logdns callback plugin - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." +- "saltstack connection plugin - fix function signature (https://github.com/ansible-collections/community.general/pull/3199)." +- "online inventory plugin - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." +- "netapp module utils - remove always-true conditional to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." +- "online module utils - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." +- "scaleway module utils - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." +- "one_template - change function argument name to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." +- "packet_device - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." +- "packet_sshkey - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." +- "maven_artifact - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." +- "launchd - use private attribute to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." +- "ufw - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." diff --git a/plugins/cache/memcached.py b/plugins/cache/memcached.py index ee36628f40..b7d14aa86d 100644 --- a/plugins/cache/memcached.py +++ b/plugins/cache/memcached.py @@ -154,12 +154,12 @@ class CacheModuleKeys(MutableSet): def __len__(self): return len(self._keyset) - def add(self, key): - self._keyset[key] = time.time() + def add(self, value): + self._keyset[value] = time.time() self._cache.set(self.PREFIX, self._keyset) - def discard(self, key): - del self._keyset[key] + def discard(self, value): + del self._keyset[value] self._cache.set(self.PREFIX, self._keyset) def remove_by_timerange(self, s_min, s_max): diff --git a/plugins/callback/logdna.py b/plugins/callback/logdna.py index 0c459bfac2..138b612de8 100644 --- a/plugins/callback/logdna.py +++ b/plugins/callback/logdna.py @@ -78,7 +78,7 @@ def get_mac(): # Getting hostname of system: def get_hostname(): - return str(socket.gethostname()).split('.local')[0] + return str(socket.gethostname()).split('.local', 1)[0] # Getting IP of system: diff --git a/plugins/connection/saltstack.py b/plugins/connection/saltstack.py index cbd85eaf3e..95870ad2d0 100644 --- a/plugins/connection/saltstack.py +++ b/plugins/connection/saltstack.py @@ -51,7 +51,7 @@ class Connection(ConnectionBase): self._connected = True return self - def exec_command(self, cmd, sudoable=False, in_data=None): + def exec_command(self, cmd, in_data=None, sudoable=False): """ run a command on the remote minion """ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) diff --git a/plugins/inventory/online.py b/plugins/inventory/online.py index 085c258d45..c678d3e0e5 100644 --- a/plugins/inventory/online.py +++ b/plugins/inventory/online.py @@ -235,7 +235,7 @@ class InventoryModule(BaseInventoryPlugin): self.headers = { 'Authorization': "Bearer %s" % token, - 'User-Agent': "ansible %s Python %s" % (ansible_version, python_version.split(' ')[0]), + 'User-Agent': "ansible %s Python %s" % (ansible_version, python_version.split(' ', 1)[0]), 'Content-type': 'application/json' } diff --git a/plugins/module_utils/_netapp.py b/plugins/module_utils/_netapp.py index 126cc1bf16..8eda53b344 100644 --- a/plugins/module_utils/_netapp.py +++ b/plugins/module_utils/_netapp.py @@ -384,8 +384,8 @@ class NetAppESeriesModule(object): path = path[1:] request_url = self.url + self.DEFAULT_REST_API_PATH + path - if self.log_requests or True: - self.module.log(pformat(dict(url=request_url, data=data, method=method))) + # if self.log_requests: + self.module.log(pformat(dict(url=request_url, data=data, method=method))) return request(url=request_url, data=data, method=method, headers=headers, use_proxy=True, force=False, last_mod_time=None, timeout=self.DEFAULT_TIMEOUT, http_agent=self.HTTP_AGENT, force_basic_auth=True, ignore_errors=ignore_errors, **self.creds) diff --git a/plugins/module_utils/online.py b/plugins/module_utils/online.py index c0294abb79..b5acbcc017 100644 --- a/plugins/module_utils/online.py +++ b/plugins/module_utils/online.py @@ -101,7 +101,7 @@ class Online(object): @staticmethod def get_user_agent_string(module): - return "ansible %s Python %s" % (module.ansible_version, sys.version.split(' ')[0]) + return "ansible %s Python %s" % (module.ansible_version, sys.version.split(' ', 1)[0]) def get(self, path, data=None, headers=None): return self.send('GET', path, data, headers) diff --git a/plugins/module_utils/scaleway.py b/plugins/module_utils/scaleway.py index d714fd69e8..bcada5fcb9 100644 --- a/plugins/module_utils/scaleway.py +++ b/plugins/module_utils/scaleway.py @@ -142,7 +142,7 @@ class Scaleway(object): @staticmethod def get_user_agent_string(module): - return "ansible %s Python %s" % (module.ansible_version, sys.version.split(' ')[0]) + return "ansible %s Python %s" % (module.ansible_version, sys.version.split(' ', 1)[0]) def get(self, path, data=None, headers=None, params=None): return self.send(method='GET', path=path, data=data, headers=headers, params=params) diff --git a/plugins/modules/cloud/opennebula/one_template.py b/plugins/modules/cloud/opennebula/one_template.py index 3b0b601193..b1d2c69ccf 100644 --- a/plugins/modules/cloud/opennebula/one_template.py +++ b/plugins/modules/cloud/opennebula/one_template.py @@ -213,8 +213,8 @@ class TemplateModule(OpenNebulaModule): def get_template_by_id(self, template_id): return self.get_template(lambda template: (template.ID == template_id)) - def get_template_by_name(self, template_name): - return self.get_template(lambda template: (template.NAME == template_name)) + def get_template_by_name(self, name): + return self.get_template(lambda template: (template.NAME == name)) def get_template_instance(self, requested_id, requested_name): if requested_id: diff --git a/plugins/modules/cloud/packet/packet_device.py b/plugins/modules/cloud/packet/packet_device.py index 5cc8d13e9a..5912a6f46a 100644 --- a/plugins/modules/cloud/packet/packet_device.py +++ b/plugins/modules/cloud/packet/packet_device.py @@ -509,11 +509,10 @@ def wait_for_devices_active(module, packet_conn, watched_devices): def wait_for_public_IPv(module, packet_conn, created_devices): def has_public_ip(addr_list, ip_v): - return any([a['public'] and a['address_family'] == ip_v and - a['address'] for a in addr_list]) + return any(a['public'] and a['address_family'] == ip_v and a['address'] for a in addr_list) def all_have_public_ip(ds, ip_v): - return all([has_public_ip(d.ip_addresses, ip_v) for d in ds]) + return all(has_public_ip(d.ip_addresses, ip_v) for d in ds) address_family = module.params.get('wait_for_public_IPv') diff --git a/plugins/modules/cloud/packet/packet_sshkey.py b/plugins/modules/cloud/packet/packet_sshkey.py index 57e988630e..4800718fd0 100644 --- a/plugins/modules/cloud/packet/packet_sshkey.py +++ b/plugins/modules/cloud/packet/packet_sshkey.py @@ -168,7 +168,7 @@ def get_sshkey_selector(module): return k.key == select_dict['key'] else: # if key string not specified, all the fields must match - return all([select_dict[f] == getattr(k, f) for f in select_dict]) + return all(select_dict[f] == getattr(k, f) for f in select_dict) return selector diff --git a/plugins/modules/packaging/language/maven_artifact.py b/plugins/modules/packaging/language/maven_artifact.py index 9e2f94190f..c184830580 100644 --- a/plugins/modules/packaging/language/maven_artifact.py +++ b/plugins/modules/packaging/language/maven_artifact.py @@ -565,7 +565,7 @@ class MavenDownloader: return "Cannot find %s checksum from %s" % (checksum_alg, remote_url) try: # Check if remote checksum only contains md5/sha1 or md5/sha1 + filename - _remote_checksum = remote_checksum.split(None)[0] + _remote_checksum = remote_checksum.split(None, 1)[0] remote_checksum = _remote_checksum # remote_checksum is empty so we continue and keep original checksum string # This should not happen since we check for remote_checksum before diff --git a/plugins/modules/system/launchd.py b/plugins/modules/system/launchd.py index e8d82ff318..8c09a44f6e 100644 --- a/plugins/modules/system/launchd.py +++ b/plugins/modules/system/launchd.py @@ -141,14 +141,14 @@ class Plist: self.__changed = False self.__service = service - state, pid, dummy, dummy = LaunchCtlList(module, service).run() + state, pid, dummy, dummy = LaunchCtlList(module, self.__service).run() # Check if readPlist is available or not self.old_plistlib = hasattr(plistlib, 'readPlist') - self.__file = self.__find_service_plist(service) + self.__file = self.__find_service_plist(self.__service) if self.__file is None: - msg = 'Unable to infer the path of %s service plist file' % service + msg = 'Unable to infer the path of %s service plist file' % self.__service if pid is None and state == ServiceState.UNLOADED: msg += ' and it was not found among active services' module.fail_json(msg=msg) diff --git a/plugins/modules/system/ufw.py b/plugins/modules/system/ufw.py index c6df6fe63a..465df6adb5 100644 --- a/plugins/modules/system/ufw.py +++ b/plugins/modules/system/ufw.py @@ -526,8 +526,8 @@ def main(): lines = [(numbered_line_re.match(line), '(v6)' in line) for line in numbered_state.splitlines()] lines = [(int(matcher.group(1)), ipv6) for (matcher, ipv6) in lines if matcher] last_number = max([no for (no, ipv6) in lines]) if lines else 0 - has_ipv4 = any([not ipv6 for (no, ipv6) in lines]) - has_ipv6 = any([ipv6 for (no, ipv6) in lines]) + has_ipv4 = any(not ipv6 for (no, ipv6) in lines) + has_ipv6 = any(ipv6 for (no, ipv6) in lines) if relative_to_cmd == 'first-ipv4': relative_to = 1 elif relative_to_cmd == 'last-ipv4': diff --git a/tests/unit/mock/loader.py b/tests/unit/mock/loader.py index 756d532e68..5389bdcb2f 100644 --- a/tests/unit/mock/loader.py +++ b/tests/unit/mock/loader.py @@ -32,8 +32,8 @@ class DictDataLoader(DataLoader): # TODO: the real _get_file_contents returns a bytestring, so we actually convert the # unicode/text it's created with to utf-8 - def _get_file_contents(self, path): - path = to_text(path) + def _get_file_contents(self, file_name): + path = to_text(file_name) if path in self._file_mapping: return (to_bytes(self._file_mapping[path]), False) else: From e123623f5c2566ef61c397e4c739664b17a59428 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 12 Aug 2021 13:11:02 +0200 Subject: [PATCH 0508/3093] Fix PR #. --- changelogs/fragments/3194-sanity.yml | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/changelogs/fragments/3194-sanity.yml b/changelogs/fragments/3194-sanity.yml index 095894a685..b6961556ce 100644 --- a/changelogs/fragments/3194-sanity.yml +++ b/changelogs/fragments/3194-sanity.yml @@ -1,14 +1,14 @@ bugfixes: -- "memcached cache plugin - change function argument names to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." -- "logdns callback plugin - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." -- "saltstack connection plugin - fix function signature (https://github.com/ansible-collections/community.general/pull/3199)." -- "online inventory plugin - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." -- "netapp module utils - remove always-true conditional to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." -- "online module utils - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." -- "scaleway module utils - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." -- "one_template - change function argument name to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." -- "packet_device - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." -- "packet_sshkey - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." -- "maven_artifact - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." -- "launchd - use private attribute to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." -- "ufw - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3199)." +- "memcached cache plugin - change function argument names to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." +- "logdns callback plugin - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." +- "saltstack connection plugin - fix function signature (https://github.com/ansible-collections/community.general/pull/3194)." +- "online inventory plugin - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." +- "netapp module utils - remove always-true conditional to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." +- "online module utils - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." +- "scaleway module utils - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." +- "one_template - change function argument name to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." +- "packet_device - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." +- "packet_sshkey - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." +- "maven_artifact - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." +- "launchd - use private attribute to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." +- "ufw - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194)." From f7dba23e50590e99ecd97d5235151e7fc3e8490b Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 12 Aug 2021 22:14:34 +0200 Subject: [PATCH 0509/3093] Remove deprecated netapp leftovers. (#3197) --- changelogs/fragments/netapp-removal.yml | 2 + plugins/doc_fragments/_netapp.py | 138 ----- plugins/module_utils/_netapp.py | 748 ------------------------ 3 files changed, 2 insertions(+), 886 deletions(-) create mode 100644 changelogs/fragments/netapp-removal.yml delete mode 100644 plugins/doc_fragments/_netapp.py delete mode 100644 plugins/module_utils/_netapp.py diff --git a/changelogs/fragments/netapp-removal.yml b/changelogs/fragments/netapp-removal.yml new file mode 100644 index 0000000000..e515e377cd --- /dev/null +++ b/changelogs/fragments/netapp-removal.yml @@ -0,0 +1,2 @@ +removed_features: +- "Removed deprecated netapp module utils and doc fragments (https://github.com/ansible-collections/community.general/pull/3197)." diff --git a/plugins/doc_fragments/_netapp.py b/plugins/doc_fragments/_netapp.py deleted file mode 100644 index c3d0d3ba06..0000000000 --- a/plugins/doc_fragments/_netapp.py +++ /dev/null @@ -1,138 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright: (c) 2018, Sumit Kumar , chris Archibald -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - - DOCUMENTATION = r''' -options: - - See respective platform section for more details -requirements: - - See respective platform section for more details -notes: - - Ansible modules are available for the following NetApp Storage Platforms: E-Series, ONTAP, SolidFire -''' - - # Documentation fragment for ONTAP (na_cdot) - ONTAP = r''' -options: - hostname: - required: true - description: - - The hostname or IP address of the ONTAP instance. - username: - required: true - description: - - This can be a Cluster-scoped or SVM-scoped account, depending on whether a Cluster-level or SVM-level API is required. - For more information, please read the documentation U(https://mysupport.netapp.com/NOW/download/software/nmsdk/9.4/). - aliases: ['user'] - password: - required: true - description: - - Password for the specified user. - aliases: ['pass'] -requirements: - - A physical or virtual clustered Data ONTAP system. The modules were developed with Clustered Data ONTAP 8.3 - - Ansible 2.2 - - netapp-lib (2015.9.25). Install using 'pip install netapp-lib' - -notes: - - The modules prefixed with na\\_cdot are built to support the ONTAP storage platform. - -''' - - # Documentation fragment for SolidFire - SOLIDFIRE = r''' -options: - hostname: - required: true - description: - - The hostname or IP address of the SolidFire cluster. - username: - required: true - description: - - Please ensure that the user has the adequate permissions. For more information, please read the official documentation - U(https://mysupport.netapp.com/documentation/docweb/index.html?productID=62636&language=en-US). - aliases: ['user'] - password: - required: true - description: - - Password for the specified user. - aliases: ['pass'] - -requirements: - - The modules were developed with SolidFire 10.1 - - solidfire-sdk-python (1.1.0.92) or greater. Install using 'pip install solidfire-sdk-python' - -notes: - - The modules prefixed with na\\_elementsw are built to support the SolidFire storage platform. - -''' - - # Documentation fragment for ONTAP (na_ontap) - NA_ONTAP = r''' -options: - hostname: - description: - - The hostname or IP address of the ONTAP instance. - type: str - required: true - username: - description: - - This can be a Cluster-scoped or SVM-scoped account, depending on whether a Cluster-level or SVM-level API is required. - For more information, please read the documentation U(https://mysupport.netapp.com/NOW/download/software/nmsdk/9.4/). - type: str - required: true - aliases: [ user ] - password: - description: - - Password for the specified user. - type: str - required: true - aliases: [ pass ] - https: - description: - - Enable and disable https - type: bool - default: no - validate_certs: - description: - - If set to C(no), the SSL certificates will not be validated. - - This should only set to C(False) used on personally controlled sites using self-signed certificates. - type: bool - default: yes - http_port: - description: - - Override the default port (80 or 443) with this port - type: int - ontapi: - description: - - The ontap api version to use - type: int - use_rest: - description: - - REST API if supported by the target system for all the resources and attributes the module requires. Otherwise will revert to ZAPI. - - Always -- will always use the REST API - - Never -- will always use the ZAPI - - Auto -- will try to use the REST Api - default: Auto - choices: ['Never', 'Always', 'Auto'] - type: str - - -requirements: - - A physical or virtual clustered Data ONTAP system. The modules support Data ONTAP 9.1 and onward - - Ansible 2.6 - - Python2 netapp-lib (2017.10.30) or later. Install using 'pip install netapp-lib' - - Python3 netapp-lib (2018.11.13) or later. Install using 'pip install netapp-lib' - - To enable http on the cluster you must run the following commands 'set -privilege advanced;' 'system services web modify -http-enabled true;' - -notes: - - The modules prefixed with na\\_ontap are built to support the ONTAP storage platform. - -''' diff --git a/plugins/module_utils/_netapp.py b/plugins/module_utils/_netapp.py deleted file mode 100644 index 8eda53b344..0000000000 --- a/plugins/module_utils/_netapp.py +++ /dev/null @@ -1,748 +0,0 @@ -# -*- coding: utf-8 -*- -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is BSD licensed. -# Modules you write using this snippet, which is embedded dynamically by Ansible -# still belong to the author of the module, and may assign their own license -# to the complete work. -# -# Copyright (c) 2017, Sumit Kumar -# Copyright (c) 2017, Michael Price -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without modification, -# are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import json -import os -import random -import mimetypes - -from pprint import pformat -from ansible.module_utils import six -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError -from ansible.module_utils.urls import open_url -from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils.common.text.converters import to_native - -try: - from ansible.module_utils.ansible_release import __version__ as ansible_version -except ImportError: - ansible_version = 'unknown' - -try: - from netapp_lib.api.zapi import zapi - HAS_NETAPP_LIB = True -except ImportError: - HAS_NETAPP_LIB = False - -try: - import requests - HAS_REQUESTS = True -except ImportError: - HAS_REQUESTS = False - -import ssl -try: - from urlparse import urlparse, urlunparse -except ImportError: - from urllib.parse import urlparse, urlunparse - - -HAS_SF_SDK = False -SF_BYTE_MAP = dict( - # Management GUI displays 1024 ** 3 as 1.1 GB, thus use 1000. - bytes=1, - b=1, - kb=1000, - mb=1000 ** 2, - gb=1000 ** 3, - tb=1000 ** 4, - pb=1000 ** 5, - eb=1000 ** 6, - zb=1000 ** 7, - yb=1000 ** 8 -) - -POW2_BYTE_MAP = dict( - # Here, 1 kb = 1024 - bytes=1, - b=1, - kb=1024, - mb=1024 ** 2, - gb=1024 ** 3, - tb=1024 ** 4, - pb=1024 ** 5, - eb=1024 ** 6, - zb=1024 ** 7, - yb=1024 ** 8 -) - -try: - from solidfire.factory import ElementFactory - from solidfire.custom.models import TimeIntervalFrequency - from solidfire.models import Schedule, ScheduleInfo - - HAS_SF_SDK = True -except Exception: - HAS_SF_SDK = False - - -def has_netapp_lib(): - return HAS_NETAPP_LIB - - -def has_sf_sdk(): - return HAS_SF_SDK - - -def na_ontap_host_argument_spec(): - - return dict( - hostname=dict(required=True, type='str'), - username=dict(required=True, type='str', aliases=['user']), - password=dict(required=True, type='str', aliases=['pass'], no_log=True), - https=dict(required=False, type='bool', default=False), - validate_certs=dict(required=False, type='bool', default=True), - http_port=dict(required=False, type='int'), - ontapi=dict(required=False, type='int'), - use_rest=dict(required=False, type='str', default='Auto', choices=['Never', 'Always', 'Auto']) - ) - - -def ontap_sf_host_argument_spec(): - - return dict( - hostname=dict(required=True, type='str'), - username=dict(required=True, type='str', aliases=['user']), - password=dict(required=True, type='str', aliases=['pass'], no_log=True) - ) - - -def aws_cvs_host_argument_spec(): - - return dict( - api_url=dict(required=True, type='str'), - validate_certs=dict(required=False, type='bool', default=True), - api_key=dict(required=True, type='str', no_log=True), - secret_key=dict(required=True, type='str', no_log=True) - ) - - -def create_sf_connection(module, port=None): - hostname = module.params['hostname'] - username = module.params['username'] - password = module.params['password'] - - if HAS_SF_SDK and hostname and username and password: - try: - return_val = ElementFactory.create(hostname, username, password, port=port) - return return_val - except Exception: - raise Exception("Unable to create SF connection") - else: - module.fail_json(msg="the python SolidFire SDK module is required") - - -def setup_na_ontap_zapi(module, vserver=None): - hostname = module.params['hostname'] - username = module.params['username'] - password = module.params['password'] - https = module.params['https'] - validate_certs = module.params['validate_certs'] - port = module.params['http_port'] - version = module.params['ontapi'] - - if HAS_NETAPP_LIB: - # set up zapi - server = zapi.NaServer(hostname) - server.set_username(username) - server.set_password(password) - if vserver: - server.set_vserver(vserver) - if version: - minor = version - else: - minor = 110 - server.set_api_version(major=1, minor=minor) - # default is HTTP - if https: - if port is None: - port = 443 - transport_type = 'HTTPS' - # HACK to bypass certificate verification - if validate_certs is False: - if not os.environ.get('PYTHONHTTPSVERIFY', '') and getattr(ssl, '_create_unverified_context', None): - ssl._create_default_https_context = ssl._create_unverified_context - else: - if port is None: - port = 80 - transport_type = 'HTTP' - server.set_transport_type(transport_type) - server.set_port(port) - server.set_server_type('FILER') - return server - else: - module.fail_json(msg="the python NetApp-Lib module is required") - - -def setup_ontap_zapi(module, vserver=None): - hostname = module.params['hostname'] - username = module.params['username'] - password = module.params['password'] - - if HAS_NETAPP_LIB: - # set up zapi - server = zapi.NaServer(hostname) - server.set_username(username) - server.set_password(password) - if vserver: - server.set_vserver(vserver) - # Todo : Replace hard-coded values with configurable parameters. - server.set_api_version(major=1, minor=110) - server.set_port(80) - server.set_server_type('FILER') - server.set_transport_type('HTTP') - return server - else: - module.fail_json(msg="the python NetApp-Lib module is required") - - -def eseries_host_argument_spec(): - """Retrieve a base argument specification common to all NetApp E-Series modules""" - argument_spec = basic_auth_argument_spec() - argument_spec.update(dict( - api_username=dict(type='str', required=True), - api_password=dict(type='str', required=True, no_log=True), - api_url=dict(type='str', required=True), - ssid=dict(type='str', required=False, default='1'), - validate_certs=dict(type='bool', required=False, default=True) - )) - return argument_spec - - -class NetAppESeriesModule(object): - """Base class for all NetApp E-Series modules. - - Provides a set of common methods for NetApp E-Series modules, including version checking, mode (proxy, embedded) - verification, http requests, secure http redirection for embedded web services, and logging setup. - - Be sure to add the following lines in the module's documentation section: - extends_documentation_fragment: - - netapp.eseries - - :param dict(dict) ansible_options: dictionary of ansible option definitions - :param str web_services_version: minimally required web services rest api version (default value: "02.00.0000.0000") - :param bool supports_check_mode: whether the module will support the check_mode capabilities (default=False) - :param list(list) mutually_exclusive: list containing list(s) of mutually exclusive options (optional) - :param list(list) required_if: list containing list(s) containing the option, the option value, and then - a list of required options. (optional) - :param list(list) required_one_of: list containing list(s) of options for which at least one is required. (optional) - :param list(list) required_together: list containing list(s) of options that are required together. (optional) - :param bool log_requests: controls whether to log each request (default: True) - """ - DEFAULT_TIMEOUT = 60 - DEFAULT_SECURE_PORT = "8443" - DEFAULT_REST_API_PATH = "devmgr/v2/" - DEFAULT_REST_API_ABOUT_PATH = "devmgr/utils/about" - DEFAULT_HEADERS = {"Content-Type": "application/json", "Accept": "application/json", - "netapp-client-type": "Ansible-%s" % ansible_version} - HTTP_AGENT = "Ansible / %s" % ansible_version - SIZE_UNIT_MAP = dict(bytes=1, b=1, kb=1024, mb=1024**2, gb=1024**3, tb=1024**4, - pb=1024**5, eb=1024**6, zb=1024**7, yb=1024**8) - - def __init__(self, ansible_options, web_services_version=None, supports_check_mode=False, - mutually_exclusive=None, required_if=None, required_one_of=None, required_together=None, - log_requests=True): - argument_spec = eseries_host_argument_spec() - argument_spec.update(ansible_options) - - self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=supports_check_mode, - mutually_exclusive=mutually_exclusive, required_if=required_if, - required_one_of=required_one_of, required_together=required_together) - - args = self.module.params - self.web_services_version = web_services_version if web_services_version else "02.00.0000.0000" - self.ssid = args["ssid"] - self.url = args["api_url"] - self.log_requests = log_requests - self.creds = dict(url_username=args["api_username"], - url_password=args["api_password"], - validate_certs=args["validate_certs"]) - - if not self.url.endswith("/"): - self.url += "/" - - self.is_embedded_mode = None - self.is_web_services_valid_cache = None - - def _check_web_services_version(self): - """Verify proxy or embedded web services meets minimum version required for module. - - The minimum required web services version is evaluated against version supplied through the web services rest - api. AnsibleFailJson exception will be raised when the minimum is not met or exceeded. - - This helper function will update the supplied api url if secure http is not used for embedded web services - - :raise AnsibleFailJson: raised when the contacted api service does not meet the minimum required version. - """ - if not self.is_web_services_valid_cache: - - url_parts = urlparse(self.url) - if not url_parts.scheme or not url_parts.netloc: - self.module.fail_json(msg="Failed to provide valid API URL. Example: https://192.168.1.100:8443/devmgr/v2. URL [%s]." % self.url) - - if url_parts.scheme not in ["http", "https"]: - self.module.fail_json(msg="Protocol must be http or https. URL [%s]." % self.url) - - self.url = "%s://%s/" % (url_parts.scheme, url_parts.netloc) - about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH - rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, ignore_errors=True, **self.creds) - - if rc != 200: - self.module.warn("Failed to retrieve web services about information! Retrying with secure ports. Array Id [%s]." % self.ssid) - self.url = "https://%s:8443/" % url_parts.netloc.split(":")[0] - about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH - try: - rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, **self.creds) - except Exception as error: - self.module.fail_json(msg="Failed to retrieve the webservices about information! Array Id [%s]. Error [%s]." - % (self.ssid, to_native(error))) - - major, minor, other, revision = data["version"].split(".") - minimum_major, minimum_minor, other, minimum_revision = self.web_services_version.split(".") - - if not (major > minimum_major or - (major == minimum_major and minor > minimum_minor) or - (major == minimum_major and minor == minimum_minor and revision >= minimum_revision)): - self.module.fail_json(msg="Web services version does not meet minimum version required. Current version: [%s]." - " Version required: [%s]." % (data["version"], self.web_services_version)) - - self.module.log("Web services rest api version met the minimum required version.") - self.is_web_services_valid_cache = True - - def is_embedded(self): - """Determine whether web services server is the embedded web services. - - If web services about endpoint fails based on an URLError then the request will be attempted again using - secure http. - - :raise AnsibleFailJson: raised when web services about endpoint failed to be contacted. - :return bool: whether contacted web services is running from storage array (embedded) or from a proxy. - """ - self._check_web_services_version() - - if self.is_embedded_mode is None: - about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH - try: - rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, **self.creds) - self.is_embedded_mode = not data["runningAsProxy"] - except Exception as error: - self.module.fail_json(msg="Failed to retrieve the webservices about information! Array Id [%s]. Error [%s]." - % (self.ssid, to_native(error))) - - return self.is_embedded_mode - - def request(self, path, data=None, method='GET', headers=None, ignore_errors=False): - """Issue an HTTP request to a url, retrieving an optional JSON response. - - :param str path: web services rest api endpoint path (Example: storage-systems/1/graph). Note that when the - full url path is specified then that will be used without supplying the protocol, hostname, port and rest path. - :param data: data required for the request (data may be json or any python structured data) - :param str method: request method such as GET, POST, DELETE. - :param dict headers: dictionary containing request headers. - :param bool ignore_errors: forces the request to ignore any raised exceptions. - """ - self._check_web_services_version() - - if headers is None: - headers = self.DEFAULT_HEADERS - - if not isinstance(data, str) and headers["Content-Type"] == "application/json": - data = json.dumps(data) - - if path.startswith("/"): - path = path[1:] - request_url = self.url + self.DEFAULT_REST_API_PATH + path - - # if self.log_requests: - self.module.log(pformat(dict(url=request_url, data=data, method=method))) - - return request(url=request_url, data=data, method=method, headers=headers, use_proxy=True, force=False, last_mod_time=None, - timeout=self.DEFAULT_TIMEOUT, http_agent=self.HTTP_AGENT, force_basic_auth=True, ignore_errors=ignore_errors, **self.creds) - - -def create_multipart_formdata(files, fields=None, send_8kb=False): - """Create the data for a multipart/form request. - - :param list(list) files: list of lists each containing (name, filename, path). - :param list(list) fields: list of lists each containing (key, value). - :param bool send_8kb: only sends the first 8kb of the files (default: False). - """ - boundary = "---------------------------" + "".join([str(random.randint(0, 9)) for x in range(27)]) - data_parts = list() - data = None - - if six.PY2: # Generate payload for Python 2 - newline = "\r\n" - if fields is not None: - for key, value in fields: - data_parts.extend(["--%s" % boundary, - 'Content-Disposition: form-data; name="%s"' % key, - "", - value]) - - for name, filename, path in files: - with open(path, "rb") as fh: - value = fh.read(8192) if send_8kb else fh.read() - - data_parts.extend(["--%s" % boundary, - 'Content-Disposition: form-data; name="%s"; filename="%s"' % (name, filename), - "Content-Type: %s" % (mimetypes.guess_type(path)[0] or "application/octet-stream"), - "", - value]) - data_parts.extend(["--%s--" % boundary, ""]) - data = newline.join(data_parts) - - else: - newline = six.b("\r\n") - if fields is not None: - for key, value in fields: - data_parts.extend([six.b("--%s" % boundary), - six.b('Content-Disposition: form-data; name="%s"' % key), - six.b(""), - six.b(value)]) - - for name, filename, path in files: - with open(path, "rb") as fh: - value = fh.read(8192) if send_8kb else fh.read() - - data_parts.extend([six.b("--%s" % boundary), - six.b('Content-Disposition: form-data; name="%s"; filename="%s"' % (name, filename)), - six.b("Content-Type: %s" % (mimetypes.guess_type(path)[0] or "application/octet-stream")), - six.b(""), - value]) - data_parts.extend([six.b("--%s--" % boundary), b""]) - data = newline.join(data_parts) - - headers = { - "Content-Type": "multipart/form-data; boundary=%s" % boundary, - "Content-Length": str(len(data))} - - return headers, data - - -def request(url, data=None, headers=None, method='GET', use_proxy=True, - force=False, last_mod_time=None, timeout=10, validate_certs=True, - url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): - """Issue an HTTP request to a url, retrieving an optional JSON response.""" - - if headers is None: - headers = {"Content-Type": "application/json", "Accept": "application/json"} - headers.update({"netapp-client-type": "Ansible-%s" % ansible_version}) - - if not http_agent: - http_agent = "Ansible / %s" % ansible_version - - try: - r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, - force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, - url_username=url_username, url_password=url_password, http_agent=http_agent, - force_basic_auth=force_basic_auth) - except HTTPError as err: - r = err.fp - - try: - raw_data = r.read() - if raw_data: - data = json.loads(raw_data) - else: - raw_data = None - except Exception: - if ignore_errors: - pass - else: - raise Exception(raw_data) - - resp_code = r.getcode() - - if resp_code >= 400 and not ignore_errors: - raise Exception(resp_code, data) - else: - return resp_code, data - - -def ems_log_event(source, server, name="Ansible", id="12345", version=ansible_version, - category="Information", event="setup", autosupport="false"): - ems_log = zapi.NaElement('ems-autosupport-log') - # Host name invoking the API. - ems_log.add_new_child("computer-name", name) - # ID of event. A user defined event-id, range [0..2^32-2]. - ems_log.add_new_child("event-id", id) - # Name of the application invoking the API. - ems_log.add_new_child("event-source", source) - # Version of application invoking the API. - ems_log.add_new_child("app-version", version) - # Application defined category of the event. - ems_log.add_new_child("category", category) - # Description of event to log. An application defined message to log. - ems_log.add_new_child("event-description", event) - ems_log.add_new_child("log-level", "6") - ems_log.add_new_child("auto-support", autosupport) - server.invoke_successfully(ems_log, True) - - -def get_cserver_zapi(server): - vserver_info = zapi.NaElement('vserver-get-iter') - query_details = zapi.NaElement.create_node_with_children('vserver-info', **{'vserver-type': 'admin'}) - query = zapi.NaElement('query') - query.add_child_elem(query_details) - vserver_info.add_child_elem(query) - result = server.invoke_successfully(vserver_info, - enable_tunneling=False) - attribute_list = result.get_child_by_name('attributes-list') - vserver_list = attribute_list.get_child_by_name('vserver-info') - return vserver_list.get_child_content('vserver-name') - - -def get_cserver(connection, is_rest=False): - if not is_rest: - return get_cserver_zapi(connection) - - params = {'fields': 'type'} - api = "private/cli/vserver" - json, error = connection.get(api, params) - if json is None or error is not None: - # exit if there is an error or no data - return None - vservers = json.get('records') - if vservers is not None: - for vserver in vservers: - if vserver['type'] == 'admin': # cluster admin - return vserver['vserver'] - if len(vservers) == 1: # assume vserver admin - return vservers[0]['vserver'] - - return None - - -class OntapRestAPI(object): - def __init__(self, module, timeout=60): - self.module = module - self.username = self.module.params['username'] - self.password = self.module.params['password'] - self.hostname = self.module.params['hostname'] - self.use_rest = self.module.params['use_rest'] - self.verify = self.module.params['validate_certs'] - self.timeout = timeout - self.url = 'https://' + self.hostname + '/api/' - self.errors = list() - self.debug_logs = list() - self.check_required_library() - - def check_required_library(self): - if not HAS_REQUESTS: - self.module.fail_json(msg=missing_required_lib('requests')) - - def send_request(self, method, api, params, json=None, return_status_code=False): - ''' send http request and process reponse, including error conditions ''' - url = self.url + api - status_code = None - content = None - json_dict = None - json_error = None - error_details = None - - def get_json(response): - ''' extract json, and error message if present ''' - try: - json = response.json() - except ValueError: - return None, None - error = json.get('error') - return json, error - - try: - response = requests.request(method, url, verify=self.verify, auth=(self.username, self.password), params=params, timeout=self.timeout, json=json) - content = response.content # for debug purposes - status_code = response.status_code - # If the response was successful, no Exception will be raised - response.raise_for_status() - json_dict, json_error = get_json(response) - except requests.exceptions.HTTPError as err: - __, json_error = get_json(response) - if json_error is None: - self.log_error(status_code, 'HTTP error: %s' % err) - error_details = str(err) - # If an error was reported in the json payload, it is handled below - except requests.exceptions.ConnectionError as err: - self.log_error(status_code, 'Connection error: %s' % err) - error_details = str(err) - except Exception as err: - self.log_error(status_code, 'Other error: %s' % err) - error_details = str(err) - if json_error is not None: - self.log_error(status_code, 'Endpoint error: %d: %s' % (status_code, json_error)) - error_details = json_error - self.log_debug(status_code, content) - if return_status_code: - return status_code, error_details - return json_dict, error_details - - def get(self, api, params): - method = 'GET' - return self.send_request(method, api, params) - - def post(self, api, data, params=None): - method = 'POST' - return self.send_request(method, api, params, json=data) - - def patch(self, api, data, params=None): - method = 'PATCH' - return self.send_request(method, api, params, json=data) - - def delete(self, api, data, params=None): - method = 'DELETE' - return self.send_request(method, api, params, json=data) - - def _is_rest(self, used_unsupported_rest_properties=None): - if self.use_rest == "Always": - if used_unsupported_rest_properties: - error = "REST API currently does not support '%s'" % \ - ', '.join(used_unsupported_rest_properties) - return True, error - else: - return True, None - if self.use_rest == 'Never' or used_unsupported_rest_properties: - # force ZAPI if requested or if some parameter requires it - return False, None - method = 'HEAD' - api = 'cluster/software' - status_code, __ = self.send_request(method, api, params=None, return_status_code=True) - if status_code == 200: - return True, None - return False, None - - def is_rest(self, used_unsupported_rest_properties=None): - ''' only return error if there is a reason to ''' - use_rest, error = self._is_rest(used_unsupported_rest_properties) - if used_unsupported_rest_properties is None: - return use_rest - return use_rest, error - - def log_error(self, status_code, message): - self.errors.append(message) - self.debug_logs.append((status_code, message)) - - def log_debug(self, status_code, content): - self.debug_logs.append((status_code, content)) - - -class AwsCvsRestAPI(object): - def __init__(self, module, timeout=60): - self.module = module - self.api_key = self.module.params['api_key'] - self.secret_key = self.module.params['secret_key'] - self.api_url = self.module.params['api_url'] - self.verify = self.module.params['validate_certs'] - self.timeout = timeout - self.url = 'https://' + self.api_url + '/v1/' - self.check_required_library() - - def check_required_library(self): - if not HAS_REQUESTS: - self.module.fail_json(msg=missing_required_lib('requests')) - - def send_request(self, method, api, params, json=None): - ''' send http request and process reponse, including error conditions ''' - url = self.url + api - status_code = None - content = None - json_dict = None - json_error = None - error_details = None - headers = { - 'Content-type': "application/json", - 'api-key': self.api_key, - 'secret-key': self.secret_key, - 'Cache-Control': "no-cache", - } - - def get_json(response): - ''' extract json, and error message if present ''' - try: - json = response.json() - - except ValueError: - return None, None - success_code = [200, 201, 202] - if response.status_code not in success_code: - error = json.get('message') - else: - error = None - return json, error - try: - response = requests.request(method, url, headers=headers, timeout=self.timeout, json=json) - status_code = response.status_code - # If the response was successful, no Exception will be raised - json_dict, json_error = get_json(response) - except requests.exceptions.HTTPError as err: - __, json_error = get_json(response) - if json_error is None: - error_details = str(err) - except requests.exceptions.ConnectionError as err: - error_details = str(err) - except Exception as err: - error_details = str(err) - if json_error is not None: - error_details = json_error - - return json_dict, error_details - - # If an error was reported in the json payload, it is handled below - def get(self, api, params=None): - method = 'GET' - return self.send_request(method, api, params) - - def post(self, api, data, params=None): - method = 'POST' - return self.send_request(method, api, params, json=data) - - def patch(self, api, data, params=None): - method = 'PATCH' - return self.send_request(method, api, params, json=data) - - def put(self, api, data, params=None): - method = 'PUT' - return self.send_request(method, api, params, json=data) - - def delete(self, api, data, params=None): - method = 'DELETE' - return self.send_request(method, api, params, json=data) - - def get_state(self, jobId): - """ Method to get the state of the job """ - method = 'GET' - response, status_code = self.get('Jobs/%s' % jobId) - while str(response['state']) not in 'done': - response, status_code = self.get('Jobs/%s' % jobId) - return 'done' From 25267b80941f1b41c280ed2d6c1ec8162fb8a62b Mon Sep 17 00:00:00 2001 From: Daniel Ziegenberg Date: Sun, 15 Aug 2021 12:59:50 +0200 Subject: [PATCH 0510/3093] ini_file: add multiple options with same name to ini file (#3033) * ini_file - prepare for fixing #273 - restructure tests - fix error message call: fail_json() takes 1 positional argument but 2 were given * ini_file - multiple values for one option (#273) - add module option 'exclusive' (boolean) for the abbility to add single option=value entries without overwriting existing options with the same name but different values - add abbility to define multiple options with the same name but different values * ini_file - add more tests for ini_file * ini_file - fix sanity tests * apply suggested changes: - rename 03-regressions.yml to 03-encoding.yml - fix typos - fix documentation * apply suggested changes: - test errors also for result is failed * apply suggested changes: - make state=absent also work with module option exclusive - add more tests for state=absent and module option exclusive * fix sanity test: - 02-values.yml:251:9: hyphens: too many spaces after hyphen * apply proposed changes * apply proposed changes from review - adjust version_added to 3.6.0 - small syntax change in changelog fragment --- ...ple_options_with_same_name_to_ini_file.yml | 3 + plugins/modules/files/ini_file.py | 265 +++-- .../targets/ini_file/tasks/main.yml | 552 +-------- .../targets/ini_file/tasks/tests/00-basic.yml | 38 + .../targets/ini_file/tasks/tests/01-value.yml | 589 ++++++++++ .../ini_file/tasks/tests/02-values.yml | 1013 +++++++++++++++++ .../ini_file/tasks/tests/03-encoding.yml | 41 + 7 files changed, 1902 insertions(+), 599 deletions(-) create mode 100644 changelogs/fragments/273-add_multiple_options_with_same_name_to_ini_file.yml create mode 100644 tests/integration/targets/ini_file/tasks/tests/00-basic.yml create mode 100644 tests/integration/targets/ini_file/tasks/tests/01-value.yml create mode 100644 tests/integration/targets/ini_file/tasks/tests/02-values.yml create mode 100644 tests/integration/targets/ini_file/tasks/tests/03-encoding.yml diff --git a/changelogs/fragments/273-add_multiple_options_with_same_name_to_ini_file.yml b/changelogs/fragments/273-add_multiple_options_with_same_name_to_ini_file.yml new file mode 100644 index 0000000000..f32dc305b5 --- /dev/null +++ b/changelogs/fragments/273-add_multiple_options_with_same_name_to_ini_file.yml @@ -0,0 +1,3 @@ +minor_changes: + - ini_file - add module option ``exclusive`` (boolean) for the ability to add/remove single ``option=value`` entries without overwriting existing options with the same name but different values (https://github.com/ansible-collections/community.general/pull/3033). + - ini_file - add abbility to define multiple options with the same name but different values (https://github.com/ansible-collections/community.general/issues/273, https://github.com/ansible-collections/community.general/issues/1204). diff --git a/plugins/modules/files/ini_file.py b/plugins/modules/files/ini_file.py index a9c2e290b0..f25cc063ff 100644 --- a/plugins/modules/files/ini_file.py +++ b/plugins/modules/files/ini_file.py @@ -47,7 +47,18 @@ options: description: - The string value to be associated with an I(option). - May be omitted when removing an I(option). + - Mutually exclusive with I(values). + - I(value=v) is equivalent to I(values=[v]). type: str + values: + description: + - The string value to be associated with an I(option). + - May be omitted when removing an I(option). + - Mutually exclusive with I(value). + - I(value=v) is equivalent to I(values=[v]). + type: list + elements: str + version_added: 3.6.0 backup: description: - Create a backup file including the timestamp information so you can get @@ -56,10 +67,25 @@ options: default: no state: description: - - If set to C(absent) the option or section will be removed if present instead of created. + - If set to C(absent) and I(exclusive) set to C(yes) all matching I(option) lines are removed. + - If set to C(absent) and I(exclusive) set to C(no) the specified C(option=value) lines are removed, + but the other I(option)s with the same name are not touched. + - If set to C(present) and I(exclusive) set to C(no) the specified C(option=values) lines are added, + but the other I(option)s with the same name are not touched. + - If set to C(present) and I(exclusive) set to C(yes) all given C(option=values) lines will be + added and the other I(option)s with the same name are removed. type: str choices: [ absent, present ] default: present + exclusive: + description: + - If set to C(yes) (default), all matching I(option) lines are removed when I(state=absent), + or replaced when I(state=present). + - If set to C(no), only the specified I(value(s)) are added when I(state=present), + or removed when I(state=absent), and existing ones are not modified. + type: bool + default: yes + version_added: 3.6.0 no_extra_spaces: description: - Do not insert spaces before and after '=' symbol. @@ -103,6 +129,27 @@ EXAMPLES = r''' option: temperature value: cold backup: yes + +- name: Add "beverage=lemon juice" is in section "[drinks]" in specified file + community.general.ini_file: + path: /etc/conf + section: drinks + option: beverage + value: lemon juice + mode: '0600' + state: present + exclusive: no + +- name: Ensure multiple values "beverage=coke" and "beverage=pepsi" are in section "[drinks]" in specified file + community.general.ini_file: + path: /etc/conf + section: drinks + option: beverage + values: + - coke + - pepsi + mode: '0600' + state: present ''' import io @@ -117,24 +164,37 @@ from ansible.module_utils.common.text.converters import to_bytes, to_text def match_opt(option, line): option = re.escape(option) - return re.match('[#;]?( |\t)*%s( |\t)*(=|$)' % option, line) + return re.match('[#;]?( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line) def match_active_opt(option, line): option = re.escape(option) - return re.match('( |\t)*%s( |\t)*(=|$)' % option, line) + return re.match('( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line) -def do_ini(module, filename, section=None, option=None, value=None, - state='present', backup=False, no_extra_spaces=False, create=True, - allow_no_value=False): +def update_section_line(changed, section_lines, index, changed_lines, newline, msg): + option_changed = section_lines[index] != newline + changed = changed or option_changed + if option_changed: + msg = 'option changed' + section_lines[index] = newline + changed_lines[index] = 1 + return (changed, msg) + + +def do_ini(module, filename, section=None, option=None, values=None, + state='present', exclusive=True, backup=False, no_extra_spaces=False, + create=True, allow_no_value=False): if section is not None: section = to_text(section) if option is not None: option = to_text(option) - if value is not None: - value = to_text(value) + + # deduplicate entries in values + values_unique = [] + [values_unique.append(to_text(value)) for value in values if value not in values_unique and value is not None] + values = values_unique diff = dict( before='', @@ -145,7 +205,7 @@ def do_ini(module, filename, section=None, option=None, value=None, if not os.path.exists(filename): if not create: - module.fail_json(rc=257, msg='Destination %s does not exist !' % filename) + module.fail_json(rc=257, msg='Destination %s does not exist!' % filename) destpath = os.path.dirname(filename) if not os.path.exists(destpath) and not module.check_mode: os.makedirs(destpath) @@ -185,74 +245,134 @@ def do_ini(module, filename, section=None, option=None, value=None, section = fake_section_name within_section = not section - section_start = 0 + section_start = section_end = 0 msg = 'OK' if no_extra_spaces: assignment_format = u'%s=%s\n' else: assignment_format = u'%s = %s\n' + option_no_value_present = False + non_blank_non_comment_pattern = re.compile(to_text(r'^[ \t]*([#;].*)?$')) + before = after = [] + section_lines = [] + for index, line in enumerate(ini_lines): + # find start and end of section if line.startswith(u'[%s]' % section): within_section = True section_start = index elif line.startswith(u'['): if within_section: - if state == 'present': - # insert missing option line at the end of the section - for i in range(index, 0, -1): - # search backwards for previous non-blank or non-comment line - if not non_blank_non_comment_pattern.match(ini_lines[i - 1]): - if option and value is not None: - ini_lines.insert(i, assignment_format % (option, value)) - msg = 'option added' - changed = True - elif option and value is None and allow_no_value: - ini_lines.insert(i, '%s\n' % option) - msg = 'option added' - changed = True - break - elif state == 'absent' and not option: - # remove the entire section - del ini_lines[section_start:index] - msg = 'section removed' + section_end = index + break + + before = ini_lines[0:section_start] + section_lines = ini_lines[section_start:section_end] + after = ini_lines[section_end:len(ini_lines)] + + # Keep track of changed section_lines + changed_lines = [0] * len(section_lines) + + # handling multiple instances of option=value when state is 'present' with/without exclusive is a bit complex + # + # 1. edit all lines where we have a option=value pair with a matching value in values[] + # 2. edit all the remaing lines where we have a matching option + # 3. delete remaining lines where we have a matching option + # 4. insert missing option line(s) at the end of the section + + if state == 'present' and option: + for index, line in enumerate(section_lines): + if match_opt(option, line): + match = match_opt(option, line) + if values and match.group(6) in values: + matched_value = match.group(6) + if not matched_value and allow_no_value: + # replace existing option with no value line(s) + newline = u'%s\n' % option + option_no_value_present = True + else: + # replace existing option=value line(s) + newline = assignment_format % (option, matched_value) + (changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg) + values.remove(matched_value) + elif not values and allow_no_value: + # replace existing option with no value line(s) + newline = u'%s\n' % option + (changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg) + option_no_value_present = True + break + + if state == 'present' and exclusive and not allow_no_value: + # override option with no value to option with value if not allow_no_value + if len(values) > 0: + for index, line in enumerate(section_lines): + if not changed_lines[index] and match_active_opt(option, section_lines[index]): + newline = assignment_format % (option, values.pop(0)) + (changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg) + if len(values) == 0: + break + # remove all remaining option occurrences from the rest of the section + for index in range(len(section_lines) - 1, 0, -1): + if not changed_lines[index] and match_active_opt(option, section_lines[index]): + del section_lines[index] + del changed_lines[index] + changed = True + msg = 'option changed' + + if state == 'present': + # insert missing option line(s) at the end of the section + for index in range(len(section_lines), 0, -1): + # search backwards for previous non-blank or non-comment line + if not non_blank_non_comment_pattern.match(section_lines[index - 1]): + if option and values: + # insert option line(s) + for element in values[::-1]: + # items are added backwards, so traverse the list backwards to not confuse the user + # otherwise some of their options might appear in reverse order for whatever fancy reason ¯\_(ツ)_/¯ + if element is not None: + # insert option=value line + section_lines.insert(index, assignment_format % (option, element)) + msg = 'option added' + changed = True + elif element is None and allow_no_value: + # insert option with no value line + section_lines.insert(index, u'%s\n' % option) + msg = 'option added' + changed = True + elif option and not values and allow_no_value and not option_no_value_present: + # insert option with no value line(s) + section_lines.insert(index, u'%s\n' % option) + msg = 'option added' changed = True break + + if state == 'absent': + if option: + if exclusive: + # delete all option line(s) with given option and ignore value + new_section_lines = [line for line in section_lines if not (match_active_opt(option, line))] + if section_lines != new_section_lines: + changed = True + msg = 'option changed' + section_lines = new_section_lines + elif not exclusive and len(values) > 0: + # delete specified option=value line(s) + new_section_lines = [i for i in section_lines if not (match_active_opt(option, i) and match_active_opt(option, i).group(6) in values)] + if section_lines != new_section_lines: + changed = True + msg = 'option changed' + section_lines = new_section_lines else: - if within_section and option: - if state == 'present': - # change the existing option line - if match_opt(option, line): - if value is None and allow_no_value: - newline = u'%s\n' % option - else: - newline = assignment_format % (option, value) - option_changed = ini_lines[index] != newline - changed = changed or option_changed - if option_changed: - msg = 'option changed' - ini_lines[index] = newline - if option_changed: - # remove all possible option occurrences from the rest of the section - index = index + 1 - while index < len(ini_lines): - line = ini_lines[index] - if line.startswith(u'['): - break - if match_active_opt(option, line): - del ini_lines[index] - else: - index = index + 1 - break - elif state == 'absent': - # delete the existing line - if match_active_opt(option, line): - del ini_lines[index] - changed = True - msg = 'option changed' - break + # drop the entire section + section_lines = [] + msg = 'section removed' + changed = True + + # reassemble the ini_lines after manipulation + ini_lines = before + section_lines + after # remove the fake section line del ini_lines[0] @@ -261,9 +381,10 @@ def do_ini(module, filename, section=None, option=None, value=None, if not within_section and state == 'present': ini_lines.append(u'[%s]\n' % section) msg = 'section and option added' - if option and value is not None: - ini_lines.append(assignment_format % (option, value)) - elif option and value is None and allow_no_value: + if option and values: + for value in values: + ini_lines.append(assignment_format % (option, value)) + elif option and not values and allow_no_value: ini_lines.append(u'%s\n' % option) else: msg = 'only section added' @@ -303,12 +424,17 @@ def main(): section=dict(type='str', required=True), option=dict(type='str'), value=dict(type='str'), + values=dict(type='list', elements='str'), backup=dict(type='bool', default=False), state=dict(type='str', default='present', choices=['absent', 'present']), + exclusive=dict(type='bool', default=True), no_extra_spaces=dict(type='bool', default=False), allow_no_value=dict(type='bool', default=False), create=dict(type='bool', default=True) ), + mutually_exclusive=[ + ['value', 'values'] + ], add_file_common_args=True, supports_check_mode=True, ) @@ -317,16 +443,23 @@ def main(): section = module.params['section'] option = module.params['option'] value = module.params['value'] + values = module.params['values'] state = module.params['state'] + exclusive = module.params['exclusive'] backup = module.params['backup'] no_extra_spaces = module.params['no_extra_spaces'] allow_no_value = module.params['allow_no_value'] create = module.params['create'] - if state == 'present' and not allow_no_value and value is None: - module.fail_json("Parameter 'value' must be defined if state=present and allow_no_value=False") + if state == 'present' and not allow_no_value and value is None and not values: + module.fail_json(msg="Parameter 'value(s)' must be defined if state=present and allow_no_value=False.") - (changed, backup_file, diff, msg) = do_ini(module, path, section, option, value, state, backup, no_extra_spaces, create, allow_no_value) + if value is not None: + values = [value] + elif values is None: + values = [] + + (changed, backup_file, diff, msg) = do_ini(module, path, section, option, values, state, exclusive, backup, no_extra_spaces, create, allow_no_value) if not module.check_mode and os.path.exists(path): file_args = module.load_file_common_arguments(module.params) diff --git a/tests/integration/targets/ini_file/tasks/main.yml b/tests/integration/targets/ini_file/tasks/main.yml index 96c6771b9e..b3a1c85531 100644 --- a/tests/integration/targets/ini_file/tasks/main.yml +++ b/tests/integration/targets/ini_file/tasks/main.yml @@ -23,545 +23,31 @@ # along with Ansible. If not, see . - name: record the output directory - set_fact: output_file={{ remote_tmp_dir }}/foo.ini - -- name: add "fav=lemonade" is in section "[drinks]" in specified file - ini_file: - path: "{{ output_file }}" - section: drinks - option: fav - value: lemonade - register: result1 - -- name: verify ini_file 'changed' is true - assert: - that: - - result1 is changed - - result1.msg == 'section and option added' - -- name: read content from output file - slurp: - src: "{{ output_file }}" - register: output_content - -- name: set expected content and get current ini file content set_fact: - expected1: | + output_file: "{{ remote_tmp_dir }}/foo.ini" + non_existing_file: "{{ remote_tmp_dir }}/bar.ini" - [drinks] - fav = lemonade - content1: "{{ output_content.content | b64decode }}" - -- name: Verify content of ini file is as expected - assert: - that: - - content1 == expected1 - -- name: add "fav=lemonade" is in section "[drinks]" again - ini_file: - path: "{{ output_file }}" - section: drinks - option: fav - value: lemonade - register: result2 - -- name: Ensure unchanged - assert: - that: - - result2 is not changed - - result2.msg == 'OK' - -- name: Ensure "beverage=coke" is in section "[drinks]" - ini_file: - path: "{{ output_file }}" - section: drinks - option: beverage - value: coke - register: result3 - -- name: read content from output file - slurp: - src: "{{ output_file }}" - register: output_content - -- name: set expected content and get current ini file content - set_fact: - expected3: | - - [drinks] - fav = lemonade - beverage = coke - content3: "{{ output_content.content | b64decode }}" - -- name: assert 'changed' is true and content is OK - assert: - that: - - result3 is changed - - result3.msg == 'option added' - - content3 == expected3 - -- name: Remove option "beverage=coke" - ini_file: - path: "{{ output_file }}" - section: drinks - option: beverage - state: absent - register: result4 - -- name: read content from output file - slurp: - src: "{{ output_file }}" - register: output_content - -- name: get ini file content - set_fact: - content4: "{{ output_content.content | b64decode }}" - -- name: assert changed and content is as expected - assert: - that: - - result4 is changed - - result4.msg == 'option changed' - - content4 == expected1 - -- name: remove section 'drinks' - ini_file: - path: "{{ output_file }}" - section: drinks - state: absent - register: result5 - -- name: read content from output file - slurp: - src: "{{ output_file }}" - register: output_content - -- name: get current ini file content - set_fact: - content5: "{{ output_content.content | b64decode }}" - -- name: assert changed and content is empty - assert: - that: - - result5 is changed - - result5.msg == 'section removed' - - content5 == "\n" - -# allow_no_value - -- name: test allow_no_value - ini_file: - path: "{{ output_file }}" - section: mysqld - option: skip-name - allow_no_value: yes - register: result6 - -- name: assert section and option added - assert: - that: - - result6 is changed - - result6.msg == 'section and option added' - -- name: test allow_no_value idempotency - ini_file: - path: "{{ output_file }}" - section: mysqld - option: skip-name - allow_no_value: yes - register: result6 - -- name: assert 'changed' false - assert: - that: - - result6 is not changed - - result6.msg == 'OK' - -- name: test create empty section - ini_file: - path: "{{ output_file }}" - section: new_empty_section - allow_no_value: yes - register: result6a - -- name: assert section added - assert: - that: - - result6a is changed - - result6a.msg == 'only section added' - -- name: test create empty section idempotency - ini_file: - path: "{{ output_file }}" - section: new_empty_section - allow_no_value: yes - register: result6a - -- name: assert 'changed' false - assert: - that: - - result6a is not changed - - result6a.msg == 'OK' - -- name: test remove empty section - ini_file: - state: absent - path: "{{ output_file }}" - section: new_empty_section - allow_no_value: yes - -- name: test allow_no_value with loop - ini_file: - path: "{{ output_file }}" - section: mysqld - option: "{{ item.o }}" - value: "{{ item.v | d(omit) }}" - allow_no_value: yes - with_items: - - { o: "skip-name-resolve" } - - { o: "max_connections", v: "500" } - -- name: read content from output file - slurp: - src: "{{ output_file }}" - register: output_content - -- name: set expected content and get current ini file content - set_fact: - content7: "{{ output_content.content | b64decode }}" - expected7: | - - [mysqld] - skip-name - skip-name-resolve - max_connections = 500 - -- name: Verify content of ini file is as expected - assert: - that: - - content7 == expected7 - -- name: change option with no value to option with value - ini_file: - path: "{{ output_file }}" - section: mysqld - option: skip-name - value: myvalue - register: result8 - -- name: read content from output file - slurp: - src: "{{ output_file }}" - register: output_content - -- name: set expected content and get current ini file content - set_fact: - content8: "{{ output_content.content | b64decode }}" - expected8: | - - [mysqld] - skip-name = myvalue - skip-name-resolve - max_connections = 500 - -- name: assert 'changed' and msg 'option changed' and content is as expected - assert: - that: - - result8 is changed - - result8.msg == 'option changed' - - content8 == expected8 - -- name: change option with value to option with no value - ini_file: - path: "{{ output_file }}" - section: mysqld - option: skip-name - allow_no_value: yes - register: result9 - -- name: read content from output file - slurp: - src: "{{ output_file }}" - register: output_content - -- name: set expected content and get current ini file content - set_fact: - content9: "{{ output_content.content | b64decode }}" - expected9: | - - [mysqld] - skip-name - skip-name-resolve - max_connections = 500 - -- name: assert 'changed' and msg 'option changed' and content is as expected - assert: - that: - - result9 is changed - - result9.msg == 'option changed' - - content9 == expected9 - -- name: Remove option with no value - ini_file: - path: "{{ output_file }}" - section: mysqld - option: skip-name-resolve - state: absent - register: result10 - -- name: read content from output file - slurp: - src: "{{ output_file }}" - register: output_content - -- name: set expected content and get current ini file content - set_fact: - content10: "{{ output_content.content | b64decode }}" - expected10: | - - [mysqld] - skip-name - max_connections = 500 - -- name: assert 'changed' and msg 'option changed' and content is as expected - assert: - that: - - result10 is changed - - result10.msg == 'option changed' - - content10 == expected10 - -- name: Clean test file - copy: - content: "" - dest: "{{ output_file }}" - force: yes - -- name: Ensure "beverage=coke" is created within no section - ini_file: - section: - path: "{{ output_file }}" - option: beverage - value: coke - register: result11 - -- name: read content from output file - slurp: - src: "{{ output_file }}" - register: output_content - -- name: set expected content and get current ini file content - set_fact: - expected11: "beverage = coke\n\n" - content11: "{{ output_content.content | b64decode }}" - -- name: assert 'changed' is true and content is OK (no section) - assert: - that: - - result11 is changed - - result11.msg == 'option added' - - content11 == expected11 - -- name: Ensure "beverage=coke" is modified as "beverage=water" within no section - ini_file: - path: "{{ output_file }}" - option: beverage - value: water - section: - register: result12 - -- name: read content from output file - slurp: - src: "{{ output_file }}" - register: output_content - -- name: set expected content and get current ini file content - set_fact: - expected12: "beverage = water\n\n" - - content12: "{{ output_content.content | b64decode }}" - -- name: assert 'changed' is true and content is OK (no section) - assert: - that: - - result12 is changed - - result12.msg == 'option changed' - - content12 == expected12 - -- name: remove option 'beverage' within no section - ini_file: - section: - path: "{{ output_file }}" - option: beverage - state: absent - register: result13 - -- name: read content from output file - slurp: - src: "{{ output_file }}" - register: output_content - -- name: get current ini file content - set_fact: - content13: "{{ output_content.content | b64decode }}" - -- name: assert changed (no section) - assert: - that: - - result13 is changed - - result13.msg == 'option changed' - - content13 == "\n" - -- name: Check add option without section before existing section +- name: include tasks block: - - name: Add option with section - ini_file: + + - name: include tasks to perform basic tests + include_tasks: tests/00-basic.yml + + - name: reset output file + file: path: "{{ output_file }}" - section: drinks - option: beverage - value: water - - name: Add option without section - ini_file: - path: "{{ output_file }}" - section: - option: like - value: tea - -- name: read content from output file - slurp: - src: "{{ output_file }}" - register: output_content - -- name: set expected content and get current ini file content - set_fact: - expected14: | - like = tea - - [drinks] - beverage = water - content14: "{{ output_content.content | b64decode }}" - -- name: Verify content of ini file is as expected - assert: - that: - - content14 == expected14 - -- name: Check add option with empty string value - block: - - name: Remove drinks - ini_file: - path: "{{ output_file }}" - section: drinks - state: absent - - name: Remove tea - ini_file: - path: "{{ output_file }}" - section: - option: like - value: tea state: absent - # See https://github.com/ansible-collections/community.general/issues/3031 - - name: Tests with empty strings - ini_file: + - name: include tasks to perform tests with parameter "value" + include_tasks: tests/01-value.yml + + - name: reset output file + file: path: "{{ output_file }}" - section: "{{ item.section | d('extensions') }}" - option: "{{ item.option }}" - value: "" - allow_no_value: "{{ item.no_value | d(omit) }}" - loop: - - option: evolve - - option: regress - - section: foobar - option: foo - no_value: true - - option: improve - no_value: true + state: absent -- name: read content from output file - slurp: - src: "{{ output_file }}" - register: output_content + - name: include tasks to perform tests with parameter "values" + include_tasks: tests/02-values.yml -- name: set expected content and get current ini file content - set_fact: - expected15: "\n[extensions]\nevolve = \nregress = \nimprove = \n[foobar]\nfoo = \n" - content15: "{{ output_content.content | b64decode }}" -- debug: var=content15 -- name: Verify content of ini file is as expected - assert: - that: - - content15 == expected15 - -- name: Create starting ini file - copy: - # The content below is the following text file with BOM: - # [section1] - # var1=aaa - # var2=bbb - # [section2] - # var3=ccc - content: !!binary | - 77u/W3NlY3Rpb24xXQp2YXIxPWFhYQp2YXIyPWJiYgpbc2VjdGlvbjJdCnZhcjM9Y2NjCg== - dest: "{{ output_file }}" -- name: Test ini breakage - ini_file: - path: "{{ output_file }}" - section: section1 - option: var4 - value: 0 - -- name: read content from output file - slurp: - src: "{{ output_file }}" - register: output_content - -- name: set expected content and get current ini file content - set_fact: - expected16: "[section1]\nvar1=aaa\nvar2=bbb\nvar4 = 0\n[section2]\nvar3=ccc\n" - content16: "{{ output_content.content | b64decode }}" -- debug: - var: content16 -- name: Verify content of ini file is as expected - assert: - that: - - content16 == expected16 - -# Regression test for https://github.com/ansible-collections/community.general/pull/2578#issuecomment-868092282 -- name: Create UTF-8 test file - copy: - content: !!binary | - W2FwcDptYWluXQphdmFpbGFibGVfbGFuZ3VhZ2VzID0gZW4gZnIgZXMgZGUgcHQgamEgbHQgemhf - VFcgaWQgZGEgcHRfQlIgcnUgc2wgaXQgbmxfTkwgdWsgdGEgc2kgY3MgbmIgaHUKIyBGdWxsIGxh - bmd1YWdlIG5hbWVzIGluIG5hdGl2ZSBsYW5ndWFnZSAoY29tbWEgc2VwYXJhdGVkKQphdmFpbGFi - bGVfbGFuZ3VhZ2VzX2Z1bGwgPSBFbmdsaXNoLCBGcmFuw6dhaXMsIEVzcGHDsW9sLCBEZXV0c2No - LCBQb3J0dWd1w6pzLCDml6XmnKzoqp4sIExpZXR1dm9zLCDkuK3mlocsIEluZG9uZXNpYSwgRGFu - c2ssIFBvcnR1Z3XDqnMgKEJyYXNpbCksINCg0YPRgdGB0LrQuNC5LCBTbG92ZW7FocSNaW5hLCBJ - dGFsaWFubywgTmVkZXJsYW5kcywg0KPQutGA0LDRl9C90YHRjNC60LAsIOCupOCuruCuv+CutOCv - jSwg4LeD4LeS4LaC4LeE4La9LCDEjGVza3ksIEJva23DpWwsIE1hZ3lhcgo= - dest: '{{ output_file }}' -- name: Add entries - ini_file: - section: "{{ item.section }}" - option: "{{ item.option }}" - value: "{{ item.value }}" - path: '{{ output_file }}' - create: true - loop: - - section: app:main - option: sqlalchemy.url - value: postgresql://app:secret@database/app - - section: handler_filelog - option: args - value: (sys.stderr,) - - section: handler_filelog - option: class - value: StreamHandler - - section: handler_exc_handler - option: args - value: (sys.stderr,) - - section: båz - option: fföø - value: ḃâŗ - - section: båz - option: fföø - value: bar + - name: include tasks to test regressions + include_tasks: tests/03-encoding.yml diff --git a/tests/integration/targets/ini_file/tasks/tests/00-basic.yml b/tests/integration/targets/ini_file/tasks/tests/00-basic.yml new file mode 100644 index 0000000000..8f8d345f7e --- /dev/null +++ b/tests/integration/targets/ini_file/tasks/tests/00-basic.yml @@ -0,0 +1,38 @@ +--- +## basiscs + +- name: test-basic 1 - specify both "value" and "values" and fail + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + value: lemonade + values: + - coke + - sprite + register: result_basic_1 + ignore_errors: true + +- name: test-basic 1 - verify error message + assert: + that: + - result_basic_1 is not changed + - result_basic_1 is failed + - "result_basic_1.msg == 'parameters are mutually exclusive: value|values'" + + +- name: test-basic 2 - set "create=no" on non-existing file and fail + ini_file: + path: "{{ non_existing_file }}" + section: food + create: false + value: banana + register: result_basic_2 + ignore_errors: true + +- name: test-basic 2 - verify error message + assert: + that: + - result_basic_2 is not changed + - result_basic_2 is failed + - result_basic_2.msg == "Destination {{ non_existing_file }} does not exist!" diff --git a/tests/integration/targets/ini_file/tasks/tests/01-value.yml b/tests/integration/targets/ini_file/tasks/tests/01-value.yml new file mode 100644 index 0000000000..93499cc63d --- /dev/null +++ b/tests/integration/targets/ini_file/tasks/tests/01-value.yml @@ -0,0 +1,589 @@ +--- + +## testing value + +- name: test-value 1 - set "state=present" and "value=null" and "allow_no_value=false" and fail + ini_file: + path: "{{ output_file }}" + section: cars + option: audi + value: null + allow_no_value: false + register: result_value_1 + ignore_errors: true + +- name: test-value 1 - verify error message + assert: + that: + - result_value_1 is not changed + - result_value_1 is failed + - result_value_1.msg == "Parameter 'value(s)' must be defined if state=present and allow_no_value=False." + + +- name: test-value 2 - set "state=present" and omit "value" and "allow_no_value=false" and fail + ini_file: + path: "{{ output_file }}" + section: cars + option: audi + allow_no_value: false + register: result_value_2 + ignore_errors: true + +- name: test-value 2 - verify error message + assert: + that: + - result_value_2 is not changed + - result_value_2 is failed + - result_value_2.msg == "Parameter 'value(s)' must be defined if state=present and allow_no_value=False." + + +- name: test-value 3 - add "fav=lemonade" in section "[drinks]" in specified file + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + value: lemonade + register: result3 + +- name: test-value 3 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 3 - set expected content and get current ini file content + set_fact: + expected3: | + + [drinks] + fav = lemonade + content3: "{{ output_content.content | b64decode }}" + +- name: test-value 3 - Verify content of ini file is as expected and ini_file 'changed' is true + assert: + that: + - result3 is changed + - result3.msg == 'section and option added' + - content3 == expected3 + + +- name: test-value 4 - add "fav=lemonade" is in section "[drinks]" again + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + value: lemonade + register: result4 + +- name: test-value 4 - Ensure unchanged + assert: + that: + - result4 is not changed + - result4.msg == 'OK' + + +- name: test-value 5 - Ensure "beverage=coke" is in section "[drinks]" + ini_file: + path: "{{ output_file }}" + section: drinks + option: beverage + value: coke + register: result5 + +- name: test-value 5 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 5 - set expected content and get current ini file content + set_fact: + expected5: | + + [drinks] + fav = lemonade + beverage = coke + content5: "{{ output_content.content | b64decode }}" + +- name: test-value 5 - assert 'changed' is true and content is OK + assert: + that: + - result5 is changed + - result5.msg == 'option added' + - content5 == expected5 + + +- name: test-value 6 - Remove option "beverage=coke" + ini_file: + path: "{{ output_file }}" + section: drinks + option: beverage + state: absent + register: result6 + +- name: test-value 6 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 6 - set expected content and get current ini file content + set_fact: + expected6: | + + [drinks] + fav = lemonade + content6: "{{ output_content.content | b64decode }}" + +- name: test-value 6 - assert 'changed' is true and content is as expected + assert: + that: + - result6 is changed + - result6.msg == 'option changed' + - content6 == expected6 + + +- name: test-value 7 - remove section 'drinks' + ini_file: + path: "{{ output_file }}" + section: drinks + state: absent + register: result7 + +- name: test-value 7 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 7 - get current ini file content + set_fact: + content7: "{{ output_content.content | b64decode }}" + +- name: test-value 7 - assert 'changed' is true and content is empty + assert: + that: + - result7 is changed + - result7.msg == 'section removed' + - content7 == "\n" + + +# allow_no_value + +- name: test-value 8 - test allow_no_value + ini_file: + path: "{{ output_file }}" + section: mysqld + option: skip-name + allow_no_value: yes + register: result8 + +- name: test-value 8 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 8 - set expected content and get current ini file content + set_fact: + content8: "{{ output_content.content | b64decode }}" + expected8: | + + [mysqld] + skip-name + +- name: test-value 8 - assert 'changed' is true and section and option added + assert: + that: + - result8 is changed + - result8.msg == 'section and option added' + - content8 == expected8 + + +- name: test-value 9 - test allow_no_value idempotency + ini_file: + path: "{{ output_file }}" + section: mysqld + option: skip-name + allow_no_value: yes + register: result9 + +- name: test-value 9 - assert 'changed' is false + assert: + that: + - result9 is not changed + - result9.msg == 'OK' + + +- name: test-value 10 - test create empty section + ini_file: + path: "{{ output_file }}" + section: new_empty_section + allow_no_value: yes + register: result10 + +- name: test-value 10 - assert 'changed' is true and section added + assert: + that: + - result10 is changed + - result10.msg == 'only section added' + + +- name: test-value 11 - test create empty section idempotency + ini_file: + path: "{{ output_file }}" + section: new_empty_section + allow_no_value: yes + register: result11 + +- name: test-value 11 - assert 'changed' is false + assert: + that: + - result11 is not changed + - result11.msg == 'OK' + + +- name: test-value 12 - test remove empty section + ini_file: + state: absent + path: "{{ output_file }}" + section: new_empty_section + allow_no_value: yes + +- name: test-value 12 - test allow_no_value with loop + ini_file: + path: "{{ output_file }}" + section: mysqld + option: "{{ item.o }}" + value: "{{ item.v | d(omit) }}" + allow_no_value: yes + loop: + - { o: "skip-name-resolve" } + - { o: "max_connections", v: "500" } + +- name: test-value 12 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 12 - set expected content and get current ini file content + set_fact: + content12: "{{ output_content.content | b64decode }}" + expected12: | + + [mysqld] + skip-name + skip-name-resolve + max_connections = 500 + +- name: test-value 12 - Verify content of ini file is as expected + assert: + that: + - content12 == expected12 + + +- name: test-value 13 - change option with no value to option with value + ini_file: + path: "{{ output_file }}" + section: mysqld + option: skip-name + value: myvalue + register: result13 + +- name: test-value 13 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 13 - set expected content and get current ini file content + set_fact: + content13: "{{ output_content.content | b64decode }}" + expected13: | + + [mysqld] + skip-name = myvalue + skip-name-resolve + max_connections = 500 + +- name: test-value 13 - assert 'changed' and msg 'option changed' and content is as expected + assert: + that: + - result13 is changed + - result13.msg == 'option changed' + - content13 == expected13 + + +- name: test-value 14 - change option with value to option with no value + ini_file: + path: "{{ output_file }}" + section: mysqld + option: skip-name + allow_no_value: yes + register: result14 + +- name: test-value 14 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 14 - set expected content and get current ini file content + set_fact: + content14: "{{ output_content.content | b64decode }}" + expected14: | + + [mysqld] + skip-name + skip-name-resolve + max_connections = 500 + +- name: test-value 14 - assert 'changed' is true and msg 'option changed' and content is as expected + assert: + that: + - result14 is changed + - result14.msg == 'option changed' + - content14 == expected14 + + +- name: test-value 15 - Remove option with no value + ini_file: + path: "{{ output_file }}" + section: mysqld + option: skip-name-resolve + state: absent + register: result15 + +- name: test-value 15 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 15 - set expected content and get current ini file content + set_fact: + content15: "{{ output_content.content | b64decode }}" + expected15: | + + [mysqld] + skip-name + max_connections = 500 + +- name: test-value 15 - assert 'changed' is true and msg 'option changed' and content is as expected + assert: + that: + - result15 is changed + - result15.msg == 'option changed' + - content15 == expected15 + + +- name: test-value 16 - Clean test file + copy: + content: "" + dest: "{{ output_file }}" + force: yes + +- name: test-value 16 - Ensure "beverage=coke" is created within no section + ini_file: + section: + path: "{{ output_file }}" + option: beverage + value: coke + register: result16 + +- name: test-value 16 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 16 - set expected content and get current ini file content + set_fact: + expected16: |+ + beverage = coke + + content16: "{{ output_content.content | b64decode }}" + +- name: test-value 16 - assert 'changed' is true and content is OK (no section) + assert: + that: + - result16 is changed + - result16.msg == 'option added' + - content16 == expected16 + + +- name: test-value 17 - Ensure "beverage=coke" is modified as "beverage=water" within no section + ini_file: + path: "{{ output_file }}" + option: beverage + value: water + section: + register: result17 + +- name: test-value 17 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 17 - set expected content and get current ini file content + set_fact: + expected17: |+ + beverage = water + + content17: "{{ output_content.content | b64decode }}" + +- name: test-value 17 - assert 'changed' is true and content is OK (no section) + assert: + that: + - result17 is changed + - result17.msg == 'option changed' + - content17 == expected17 + + +- name: test-value 18 - remove option 'beverage' within no section + ini_file: + section: + path: "{{ output_file }}" + option: beverage + state: absent + register: result18 + +- name: test-value 18 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 18 - get current ini file content + set_fact: + content18: "{{ output_content.content | b64decode }}" + +- name: test-value 18 - assert 'changed' is true and option is removed (no section) + assert: + that: + - result18 is changed + - result18.msg == 'option changed' + - content18 == "\n" + + +- name: test-value 19 - Check add option without section before existing section + block: + - name: test-value 19 - Add option with section + ini_file: + path: "{{ output_file }}" + section: drinks + option: beverage + value: water + - name: test-value 19 - Add option without section + ini_file: + path: "{{ output_file }}" + section: + option: like + value: tea + +- name: test-value 19 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 19 - set expected content and get current ini file content + set_fact: + expected19: | + like = tea + + [drinks] + beverage = water + content19: "{{ output_content.content | b64decode }}" + +- name: test-value 19 - Verify content of ini file is as expected + assert: + that: + - content19 == expected19 + + +- name: test-value 20 - Check add option with empty string value + block: + - name: test-value 20 - Remove drinks + ini_file: + path: "{{ output_file }}" + section: drinks + state: absent + - name: test-value 20 - Remove tea + ini_file: + path: "{{ output_file }}" + section: + option: like + value: tea + state: absent + # See https://github.com/ansible-collections/community.general/issues/3031 + - name: test-value 20 - Tests with empty strings + ini_file: + path: "{{ output_file }}" + section: "{{ item.section | d('extensions') }}" + option: "{{ item.option }}" + value: "" + allow_no_value: "{{ item.no_value | d(omit) }}" + loop: + - option: evolve + - option: regress + - section: foobar + option: foo + no_value: true + - option: improve + no_value: true + +- name: test-value 20 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 20 - set expected content and get current ini file content + set_fact: + expected20: |+ + + [extensions] + evolve = + regress = + improve = + [foobar] + foo = + content20: "{{ output_content.content | b64decode }}" + +- name: test-value 20 - Verify content of ini file is as expected + assert: + that: + - content20 == expected20 + + +- name: test-value 21 - Create starting ini file + copy: + # The content below is the following text file with BOM: + # [section1] + # var1=aaa + # var2=bbb + # [section2] + # var3=ccc + content: !!binary | + 77u/W3NlY3Rpb24xXQp2YXIxPWFhYQp2YXIyPWJiYgpbc2VjdGlvbjJdCnZhcjM9Y2NjCg== + dest: "{{ output_file }}" + +- name: test-value 21 - Test ini breakage + ini_file: + path: "{{ output_file }}" + section: section1 + option: var4 + value: 0 + register: result21 + +- name: test-value 21 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 21 - set expected content and get current ini file content + set_fact: + expected21: | + [section1] + var1=aaa + var2=bbb + var4 = 0 + [section2] + var3=ccc + content21: "{{ output_content.content | b64decode }}" + +- name: test-value 21 - Verify content of ini file is as expected + assert: + that: + - result21 is changed + - result21.msg == 'option added' + - content21 == expected21 diff --git a/tests/integration/targets/ini_file/tasks/tests/02-values.yml b/tests/integration/targets/ini_file/tasks/tests/02-values.yml new file mode 100644 index 0000000000..c3ef6b61a6 --- /dev/null +++ b/tests/integration/targets/ini_file/tasks/tests/02-values.yml @@ -0,0 +1,1013 @@ +--- + +## testing values + +- name: "test-values 1 - set 'state=present' and 'values=[]' and 'allow_no_value=false' and fail" + ini_file: + path: "{{ output_file }}" + section: cars + option: audi + values: [] + allow_no_value: false + register: result1 + ignore_errors: true + +- name: test-values 1 - verify error message + assert: + that: + - result1 is not changed + - result1 is failed + - result1.msg == "Parameter 'value(s)' must be defined if state=present and allow_no_value=False." + + +- name: "test-values 2 - set 'state=present' and omit 'values' and 'allow_no_value=false' and fail" + ini_file: + path: "{{ output_file }}" + section: cars + option: audi + allow_no_value: false + register: result2 + ignore_errors: true + +- name: test-values 2 - verify error message + assert: + that: + - result2 is not changed + - result2 is failed + - result2.msg == "Parameter 'value(s)' must be defined if state=present and allow_no_value=False." + + +- name: "test-values 3 - ensure 'fav=lemonade' and 'fav=cocktail' is 'present' in section '[drinks]' in specified file" + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + values: + - lemonade + - cocktail + state: present + register: result3 + +- name: test-values 3 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 3 - set expected content and get current ini file content + set_fact: + expected3: | + + [drinks] + fav = lemonade + fav = cocktail + content3: "{{ output_content.content | b64decode }}" + +- name: test-values 3 - Verify content of ini file is as expected and ini_file 'changed' is true + assert: + that: + - result3 is changed + - result3.msg == 'section and option added' + - content3 == expected3 + + +- name: "test-values 4 - remove option 'fav=lemonade' from section '[drinks]' in specified file" + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + values: + - lemonade + state: absent + exclusive: false + register: result4 + +- name: test-values 4 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 4 - set expected content and get current ini file content + set_fact: + expected4: | + + [drinks] + fav = cocktail + content4: "{{ output_content.content | b64decode }}" + +- name: test-values 4 - Verify content of ini file is as expected and ini_file 'changed' is true + assert: + that: + - result4 is changed + - result4.msg == 'option changed' + - content4 == expected4 + + +- name: "test-values 5 - add option 'fav=lemonade' in section '[drinks]' in specified file" + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + values: + - lemonade + state: present + exclusive: false + register: result5 + +- name: test-values 5 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 5 - set expected content and get current ini file content + set_fact: + expected5: | + + [drinks] + fav = cocktail + fav = lemonade + content5: "{{ output_content.content | b64decode }}" + +- name: test-values 5 - Verify content of ini file is as expected and ini_file 'changed' is true + assert: + that: + - result5 is changed + - result5.msg == 'option added' + - content5 == expected5 + + +- name: "test-values 6 - ensure 'fav=lemonade' and 'fav=cocktail' is 'present' in section '[drinks]' and check for idempotency" + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + values: + - lemonade + - cocktail + state: present + register: result6 + +- name: test-values 6 - Ensure unchanged + assert: + that: + - result6 is not changed + - result6.msg == 'OK' + + +- name: "test-values 7 - ensure 'fav=cocktail' and 'fav=lemonade' (list reverse order) is 'present' in section '[drinks]' and check for idempotency" + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + values: + - cocktail + - lemonade + state: present + register: result7 + +- name: test-values 7 - Ensure unchanged + assert: + that: + - result7 is not changed + - result7.msg == 'OK' + + +- name: "test-values 8 - add option 'fav=lemonade' in section '[drinks]' again and ensure idempotency" + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + values: + - lemonade + state: present + exclusive: false + register: result8 + +- name: test-values 8 - Ensure unchanged + assert: + that: + - result8 is not changed + - result8.msg == 'OK' + + +- name: "test-values 9 - ensure only 'fav=lemonade' is 'present' in section '[drinks]' in specified file" + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + values: + - lemonade + state: present + register: result9 + +- name: test-values 9 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 9 - set expected content and get current ini file content + set_fact: + expected9: | + + [drinks] + fav = lemonade + content9: "{{ output_content.content | b64decode }}" + +- name: test-values 9 - Verify content of ini file is as expected and ini_file 'changed' is true + assert: + that: + - result9 is changed + - result9.msg == 'option changed' + - content9 == expected9 + + +- name: "test-values 10 - remove non-existent 'fav=cocktail' from section '[drinks]' in specified file" + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + values: + - cocktail + state: absent + register: result10 + +- name: test-values 10 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 10 - set expected content and get current ini file content + set_fact: + expected10: | + + [drinks] + content10: "{{ output_content.content | b64decode }}" + + +- name: test-values 10 - Ensure unchanged + assert: + that: + - result10 is changed + - result10.msg == 'option changed' + - content10 == expected10 + + +- name: "test-values 11 - Ensure 'fav=lemonade' and 'beverage=coke' is 'present' in section '[drinks]'" + block: + - name: "test-values 11 - resetting ini_fie: Ensure 'fav=lemonade' is 'present' in section '[drinks]'" + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + values: + - lemonade + state: present + - name: "test-values 11 - Ensure 'beverage=coke' is 'present' in section '[drinks]'" + ini_file: + path: "{{ output_file }}" + section: drinks + option: beverage + values: + - coke + state: present + register: result11 + +- name: test-values 11 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 11 - set expected content and get current ini file content + set_fact: + expected11: | + + [drinks] + fav = lemonade + beverage = coke + content11: "{{ output_content.content | b64decode }}" + +- name: test-values 11 - assert 'changed' is true and content is OK + assert: + that: + - result11 is changed + - result11.msg == 'option added' + - content11 == expected11 + + +- name: "test-values 12 - add option 'fav=lemonade' in section '[drinks]' again and ensure idempotency" + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + values: + - lemonade + state: present + exclusive: false + register: result12 + +- name: test-values 12 - Ensure unchanged + assert: + that: + - result12 is not changed + - result12.msg == 'OK' + + +- name: "test-values 13 - add option 'fav=cocktail' in section '[drinks]' in specified file" + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + values: + - cocktail + state: present + exclusive: false + register: result13 + +- name: test-values 13 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 13 - set expected content and get current ini file content + set_fact: + expected13: | + + [drinks] + fav = lemonade + beverage = coke + fav = cocktail + content13: "{{ output_content.content | b64decode }}" + +- name: test-values 13 - Verify content of ini file is as expected and ini_file 'changed' is true + assert: + that: + - result13 is changed + - result13.msg == 'option added' + - content13 == expected13 + + +- name: "test-values 14 - Ensure 'refreshment=[water, juice, soft drink]' is 'present' in section '[drinks]'" + ini_file: + path: "{{ output_file }}" + section: drinks + option: refreshment + values: + - water + - juice + - soft drink + state: present + register: result14 + +- name: test-values 14 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 14 - set expected content and get current ini file content + set_fact: + expected14: | + + [drinks] + fav = lemonade + beverage = coke + fav = cocktail + refreshment = water + refreshment = juice + refreshment = soft drink + content14: "{{ output_content.content | b64decode }}" + +- name: test-values 14 - assert 'changed' is true and content is OK + assert: + that: + - result14 is changed + - result14.msg == 'option added' + - content14 == expected14 + + +- name: "test-values 15 - ensure 'fav=lemonade' and 'fav=cocktail' is 'present' in section '[drinks]' and check for idempotency" + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + values: + - lemonade + - cocktail + state: present + register: result15 + +- name: test-values 15 - Ensure unchanged + assert: + that: + - result15 is not changed + - result15.msg == 'OK' + + +- name: "test-values 16 - ensure 'fav=cocktail' and 'fav=lemonade' (list reverse order) is 'present' in section '[drinks]' and check for idempotency" + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + values: + - cocktail + - lemonade + state: present + register: result16 + +- name: test-values 16 - Ensure unchanged + assert: + that: + - result16 is not changed + - result16.msg == 'OK' + + +- name: "test-values 17 - Ensure option 'refreshment' is 'absent' in section '[drinks]'" + ini_file: + path: "{{ output_file }}" + section: drinks + option: refreshment + state: absent + register: result17 + +- name: test-values 17 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 17 - set expected content and get current ini file content + set_fact: + expected17: | + + [drinks] + fav = lemonade + beverage = coke + fav = cocktail + content17: "{{ output_content.content | b64decode }}" + +- name: test-values 17 - assert 'changed' is true and content is as expected + assert: + that: + - result17 is changed + - result17.msg == 'option changed' + - content17 == expected17 + + +- name: "test-values 18 - Ensure 'beverage=coke' is 'abesent' in section '[drinks]'" + ini_file: + path: "{{ output_file }}" + section: drinks + option: beverage + state: absent + register: result18 + +- name: test-values 18 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 18 - set expected content and get current ini file content + set_fact: + expected18: | + + [drinks] + fav = lemonade + fav = cocktail + content18: "{{ output_content.content | b64decode }}" + +- name: test-values 18 - assert 'changed' is true and content is as expected + assert: + that: + - result18 is changed + - result18.msg == 'option changed' + - content18 == expected18 + + +- name: "test-values 19 - Ensure non-existant 'beverage=coke' is 'abesent' in section '[drinks]'" + ini_file: + path: "{{ output_file }}" + section: drinks + option: beverage + values: + - coke + state: absent + register: result19 + +- name: test-values 19 - Ensure unchanged + assert: + that: + - result19 is not changed + - result19.msg == 'OK' + + +- name: test-values 20 - remove section 'drinks' + ini_file: + path: "{{ output_file }}" + section: drinks + state: absent + register: result20 + +- name: test-values 20 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 20 - get current ini file content + set_fact: + content20: "{{ output_content.content | b64decode }}" + +- name: test-values 20 - assert 'changed' is true and content is empty + assert: + that: + - result20 is changed + - result20.msg == 'section removed' + - content20 == "\n" + + +- name: "test-values 21 - Ensure 'refreshment=[water, juice, soft drink, juice]' (duplicates removed) is 'present' in section '[drinks]'" + ini_file: + path: "{{ output_file }}" + section: drinks + option: refreshment + values: + - water + - juice + - soft drink + - juice + state: present + register: result21 + +- name: test-values 21 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 21 - set expected content and get current ini file content + set_fact: + expected21: | + + [drinks] + refreshment = water + refreshment = juice + refreshment = soft drink + content21: "{{ output_content.content | b64decode }}" + +- name: test-values 21 - assert 'changed' is true and content is OK + assert: + that: + - result21 is changed + - result21.msg == 'section and option added' + - content21 == expected21 + + +- name: test-values 22 - Create starting ini file + copy: + content: | + + # Some comment to test + [mysqld] + connect_timeout = 300 + max_connections = 1000 + [section1] + var1 = aaa + # comment in section + var2 = foo + # var2 = bar + + [section2] + var3 = ccc + # comment after section + dest: "{{ output_file }}" + +- name: "test-values 22 - Ensure 'skip-name' with 'allow_no_value' is 'present' in section '[mysqld]' test allow_no_value" + ini_file: + path: "{{ output_file }}" + section: mysqld + option: skip-name + allow_no_value: true + state: present + register: result22 + +- name: test-values 22 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 22 - set expected content and get current ini file content + set_fact: + expected22: | + + # Some comment to test + [mysqld] + connect_timeout = 300 + max_connections = 1000 + skip-name + [section1] + var1 = aaa + # comment in section + var2 = foo + # var2 = bar + + [section2] + var3 = ccc + # comment after section + content22: "{{ output_content.content | b64decode }}" + +- name: test-values 22 - assert 'changed' is true and content is OK and option added + assert: + that: + - result22 is changed + - result22.msg == 'option added' + - content22 == expected22 + + +- name: "test-values 23 - Ensure 'var2=[foo, foobar]' is 'present' in section '[section1]'" + ini_file: + path: "{{ output_file }}" + section: section1 + option: var2 + values: + - foo + - foobar + state: present + register: result23 + +- name: test-values 23 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 23 - set expected content and get current ini file content + set_fact: + content23: "{{ output_content.content | b64decode }}" + expected23: | + + # Some comment to test + [mysqld] + connect_timeout = 300 + max_connections = 1000 + skip-name + [section1] + var1 = aaa + # comment in section + var2 = foo + var2 = foobar + # var2 = bar + + [section2] + var3 = ccc + # comment after section +- name: test-values 23 - assert 'changed' and msg 'option added' and content is as expected + assert: + that: + - result23 is changed + - result23.msg == 'option added' + - content23 == expected23 + + +- name: "test-values 24 - Ensure 'var2=[foo, foobar, bar]' is 'present' in section '[section1]' replacing commented option 'var2=bar'" + ini_file: + path: "{{ output_file }}" + section: section1 + option: var2 + values: + - foo + - bar + - foobar + state: present + register: result24 + +- name: test-values 24 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 24 - set expected content and get current ini file content + set_fact: + content24: "{{ output_content.content | b64decode }}" + expected24: | + + # Some comment to test + [mysqld] + connect_timeout = 300 + max_connections = 1000 + skip-name + [section1] + var1 = aaa + # comment in section + var2 = foo + var2 = foobar + var2 = bar + + [section2] + var3 = ccc + # comment after section +- name: test-values 24 - assert 'added' and msg 'option changed' and content is as expected + assert: + that: + - result24 is changed + - result24.msg == 'option changed' + - content24 == expected24 + + +- name: test-values 25 - Clean test file + copy: + content: "" + dest: "{{ output_file }}" + force: yes + +- name: "test-values 25 - Ensure 'beverage=[coke, pepsi]' is created within no section" + ini_file: + section: + path: "{{ output_file }}" + option: beverage + values: + - coke + - pepsi + state: present + register: result25 + +- name: test-values 25 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 25 - set expected content and get current ini file content + set_fact: + expected25: |+ + beverage = coke + beverage = pepsi + + content25: "{{ output_content.content | b64decode }}" + +- name: test-values 25 - assert 'changed' is true and content is OK (no section) + assert: + that: + - result25 is changed + - result25.msg == 'option added' + - content25 == expected25 + + +- name: "test-values 26 - Ensure 'beverage=coke' and 'beverage=pepsi' are modified within no section" + ini_file: + path: "{{ output_file }}" + option: beverage + values: + - water + - orange juice + section: + state: present + exclusive: true + register: result26 + +- name: test-values 26 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 26 - set expected content and get current ini file content + set_fact: + expected26: |+ + beverage = water + beverage = orange juice + + content26: "{{ output_content.content | b64decode }}" + +- name: test-values 26 - assert 'changed' is true and content is OK (no section) + assert: + that: + - result26 is changed + - result26.msg == 'option changed' + - content26 == expected26 + + +- name: "test-values 27 - ensure option 'beverage' is 'absent' within no section" + ini_file: + section: + path: "{{ output_file }}" + option: beverage + state: absent + register: result27 + +- name: test-values 27 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 27 - get current ini file content + set_fact: + content27: "{{ output_content.content | b64decode }}" + +- name: test-values 27 - assert changed (no section) + assert: + that: + - result27 is changed + - result27.msg == 'option changed' + - content27 == "\n" + + +- name: "test-values 28 - Ensure option 'present' without section before existing section" + block: + - name: test-values 28 - ensure option present within section + ini_file: + path: "{{ output_file }}" + section: drinks + option: beverage + values: + - water + - orange juice + state: present + + - name: test-values 28 - ensure option present without section + ini_file: + path: "{{ output_file }}" + section: + option: like + values: + - tea + - coffee + state: present + +- name: test-values 28 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-values 28 - set expected content and get current ini file content + set_fact: + expected28: | + like = tea + like = coffee + + [drinks] + beverage = water + beverage = orange juice + content28: "{{ output_content.content | b64decode }}" + +- name: test-values 28 - Verify content of ini file is as expected + assert: + that: + - content28 == expected28 + + +- name: test-value 29 - Create starting ini file + copy: + content: | + [drinks] + fav = cocktail + beverage = water + fav = lemonade + beverage = orange juice + dest: "{{ output_file }}" + +- name: "test-value 29 - Test 'state=absent' with 'exclusive=true' with multiple options in ini_file" + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + values: + - cocktail + state: absent + register: result29 + +- name: test-value 29 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 29 - set expected content and get current ini file content + set_fact: + expected29: | + [drinks] + beverage = water + beverage = orange juice + content29: "{{ output_content.content | b64decode }}" + +- name: test-value 29 - Verify content of ini file is as expected + assert: + that: + - result29 is changed + - result29.msg == 'option changed' + - content29 == expected29 + + +- name: test-value 30 - Create starting ini file + copy: + content: | + [drinks] + fav = cocktail + beverage = water + fav = lemonade + beverage = orange juice + dest: "{{ output_file }}" + +- name: "test-value 30 - Test 'state=absent' with 'exclusive=false' with multiple options in ini_file" + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + values: + - cocktail + state: absent + exclusive: false + register: result30 + +- name: test-value 30 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 30 - set expected content and get current ini file content + set_fact: + expected30: | + [drinks] + beverage = water + fav = lemonade + beverage = orange juice + content30: "{{ output_content.content | b64decode }}" + +- name: test-value 30 - Verify content of ini file is as expected + assert: + that: + - result30 is changed + - result30.msg == 'option changed' + - content30 == expected30 + + +- name: test-value 31 - Create starting ini file + copy: + content: | + [drinks] + fav = cocktail + beverage = water + fav = lemonade + beverage = orange juice + dest: "{{ output_file }}" + +- name: "test-value 31 - Test 'state=absent' with 'exclusive=true' and no value given with multiple options in ini_file" + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + state: absent + register: result31 + +- name: test-value 31 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 31 - set expected content and get current ini file content + set_fact: + expected31: | + [drinks] + beverage = water + beverage = orange juice + content31: "{{ output_content.content | b64decode }}" + +- name: test-value 31 - Verify content of ini file is as expected + assert: + that: + - result31 is changed + - result31.msg == 'option changed' + - content31 == expected31 + + +- name: test-value 32 - Create starting ini file + copy: + content: | + [drinks] + fav = cocktail + beverage = water + fav = lemonade + beverage = orange juice + dest: "{{ output_file }}" + +- name: "test-value 32 - Test 'state=absent' with 'exclusive=false' and no value given with multiple options in ini_file" + ini_file: + path: "{{ output_file }}" + section: drinks + option: fav + state: absent + exclusive: false + register: result32 + diff: true + +- name: test-value 32 - read content from output file + slurp: + src: "{{ output_file }}" + register: output_content + +- name: test-value 32 - set expected content and get current ini file content + set_fact: + expected32: | + [drinks] + fav = cocktail + beverage = water + fav = lemonade + beverage = orange juice + content32: "{{ output_content.content | b64decode }}" + +- name: test-value 32 - Verify content of ini file is as expected + assert: + that: + - result32 is not changed + - result32.msg == 'OK' + - content32 == expected32 diff --git a/tests/integration/targets/ini_file/tasks/tests/03-encoding.yml b/tests/integration/targets/ini_file/tasks/tests/03-encoding.yml new file mode 100644 index 0000000000..6280ae1ffb --- /dev/null +++ b/tests/integration/targets/ini_file/tasks/tests/03-encoding.yml @@ -0,0 +1,41 @@ +--- + +# Regression test for https://github.com/ansible-collections/community.general/pull/2578#issuecomment-868092282 +- name: Create UTF-8 test file + copy: + content: !!binary | + W2FwcDptYWluXQphdmFpbGFibGVfbGFuZ3VhZ2VzID0gZW4gZnIgZXMgZGUgcHQgamEgbHQgemhf + VFcgaWQgZGEgcHRfQlIgcnUgc2wgaXQgbmxfTkwgdWsgdGEgc2kgY3MgbmIgaHUKIyBGdWxsIGxh + bmd1YWdlIG5hbWVzIGluIG5hdGl2ZSBsYW5ndWFnZSAoY29tbWEgc2VwYXJhdGVkKQphdmFpbGFi + bGVfbGFuZ3VhZ2VzX2Z1bGwgPSBFbmdsaXNoLCBGcmFuw6dhaXMsIEVzcGHDsW9sLCBEZXV0c2No + LCBQb3J0dWd1w6pzLCDml6XmnKzoqp4sIExpZXR1dm9zLCDkuK3mlocsIEluZG9uZXNpYSwgRGFu + c2ssIFBvcnR1Z3XDqnMgKEJyYXNpbCksINCg0YPRgdGB0LrQuNC5LCBTbG92ZW7FocSNaW5hLCBJ + dGFsaWFubywgTmVkZXJsYW5kcywg0KPQutGA0LDRl9C90YHRjNC60LAsIOCupOCuruCuv+CutOCv + jSwg4LeD4LeS4LaC4LeE4La9LCDEjGVza3ksIEJva23DpWwsIE1hZ3lhcgo= + dest: '{{ output_file }}' +- name: Add entries + ini_file: + section: "{{ item.section }}" + option: "{{ item.option }}" + value: "{{ item.value }}" + path: '{{ output_file }}' + create: true + loop: + - section: app:main + option: sqlalchemy.url + value: postgresql://app:secret@database/app + - section: handler_filelog + option: args + value: (sys.stderr,) + - section: handler_filelog + option: class + value: StreamHandler + - section: handler_exc_handler + option: args + value: (sys.stderr,) + - section: båz + option: fföø + value: ḃâŗ + - section: båz + option: fföø + value: bar From 432c89148721b4a4598cb20d9569aa9bbdac1f2b Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 15 Aug 2021 21:11:16 +1000 Subject: [PATCH 0511/3093] Add ipv4 example to linode inventory docs (#3200) * Add ipv4 example to linode inventory * Update plugins/inventory/linode.py Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- plugins/inventory/linode.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/plugins/inventory/linode.py b/plugins/inventory/linode.py index c2dcac5392..177bd0a42a 100644 --- a/plugins/inventory/linode.py +++ b/plugins/inventory/linode.py @@ -78,6 +78,10 @@ groups: webservers: "'web' in (tags|list)" mailservers: "'mail' in (tags|list)" compose: + # By default, Ansible tries to connect to the label of the instance. + # Since that might not be a valid name to connect to, you can + # replace it with the first IPv4 address of the linode as follows: + ansible_ssh_host: ipv4[0] ansible_port: 2222 ''' From 16945d3847f65319278ec460760fa1d0422b8cb5 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 16 Aug 2021 22:23:06 +1200 Subject: [PATCH 0512/3093] vdo - refactor (#3191) * refactor to vdo * adjusted if condition * added changelog fragment * Update plugins/modules/system/vdo.py Co-authored-by: Felix Fontein * adjustements per the PR * more occurrences of bool compared with yes or no * Update changelogs/fragments/3191-vdo-refactor.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- changelogs/fragments/3191-vdo-refactor.yml | 4 + plugins/modules/system/vdo.py | 255 ++++++--------------- 2 files changed, 76 insertions(+), 183 deletions(-) create mode 100644 changelogs/fragments/3191-vdo-refactor.yml diff --git a/changelogs/fragments/3191-vdo-refactor.yml b/changelogs/fragments/3191-vdo-refactor.yml new file mode 100644 index 0000000000..fe3fcfe7b1 --- /dev/null +++ b/changelogs/fragments/3191-vdo-refactor.yml @@ -0,0 +1,4 @@ +minor_changes: + - vdo - minor refactoring of the code (https://github.com/ansible-collections/community.general/pull/3191). +bugfixes: + - vdo - boolean arguments now compared with proper ``true`` and ``false`` values instead of string representations like ``"yes"`` or ``"no"`` (https://github.com/ansible-collections/community.general/pull/3191). diff --git a/plugins/modules/system/vdo.py b/plugins/modules/system/vdo.py index 0b4fca306d..ab5cf4e400 100644 --- a/plugins/modules/system/vdo.py +++ b/plugins/modules/system/vdo.py @@ -315,7 +315,7 @@ except ImportError: # # @return vdolist A list of currently created VDO volumes. def inventory_vdos(module, vdocmd): - rc, vdostatusout, err = module.run_command("%s status" % (vdocmd)) + rc, vdostatusout, err = module.run_command([vdocmd, "status"]) # if rc != 0: # module.fail_json(msg="Inventorying VDOs failed: %s" @@ -323,15 +323,13 @@ def inventory_vdos(module, vdocmd): vdolist = [] - if (rc == 2 and - re.findall(r"vdoconf.yml does not exist", err, re.MULTILINE)): + if rc == 2 and re.findall(r"vdoconf\.yml does not exist", err, re.MULTILINE): # If there is no /etc/vdoconf.yml file, assume there are no # VDO volumes. Return an empty list of VDO volumes. return vdolist if rc != 0: - module.fail_json(msg="Inventorying VDOs failed: %s" - % vdostatusout, rc=rc, err=err) + module.fail_json(msg="Inventorying VDOs failed: %s" % vdostatusout, rc=rc, err=err) vdostatusyaml = yaml.load(vdostatusout) if vdostatusyaml is None: @@ -346,7 +344,7 @@ def inventory_vdos(module, vdocmd): def list_running_vdos(module, vdocmd): - rc, vdolistout, err = module.run_command("%s list" % (vdocmd)) + rc, vdolistout, err = module.run_command([vdocmd, "list"]) runningvdolist = filter(None, vdolistout.split('\n')) return runningvdolist @@ -360,36 +358,30 @@ def list_running_vdos(module, vdocmd): # # @return vdocmdoptions A string to be used in a 'vdo ' command. def start_vdo(module, vdoname, vdocmd): - rc, out, err = module.run_command("%s start --name=%s" % (vdocmd, vdoname)) + rc, out, err = module.run_command([vdocmd, "start", "--name=%s" % vdoname]) if rc == 0: module.log("started VDO volume %s" % vdoname) - return rc def stop_vdo(module, vdoname, vdocmd): - rc, out, err = module.run_command("%s stop --name=%s" % (vdocmd, vdoname)) + rc, out, err = module.run_command([vdocmd, "stop", "--name=%s" % vdoname]) if rc == 0: module.log("stopped VDO volume %s" % vdoname) - return rc def activate_vdo(module, vdoname, vdocmd): - rc, out, err = module.run_command("%s activate --name=%s" - % (vdocmd, vdoname)) + rc, out, err = module.run_command([vdocmd, "activate", "--name=%s" % vdoname]) if rc == 0: module.log("activated VDO volume %s" % vdoname) - return rc def deactivate_vdo(module, vdoname, vdocmd): - rc, out, err = module.run_command("%s deactivate --name=%s" - % (vdocmd, vdoname)) + rc, out, err = module.run_command([vdocmd, "deactivate", "--name=%s" % vdoname]) if rc == 0: module.log("deactivated VDO volume %s" % vdoname) - return rc @@ -397,32 +389,31 @@ def add_vdooptions(params): vdocmdoptions = "" options = [] - if ('logicalsize' in params) and (params['logicalsize'] is not None): + if params.get('logicalsize') is not None: options.append("--vdoLogicalSize=" + params['logicalsize']) - if (('blockmapcachesize' in params) and - (params['blockmapcachesize'] is not None)): + if params.get('blockmapcachesize') is not None: options.append("--blockMapCacheSize=" + params['blockmapcachesize']) - if ('readcache' in params) and (params['readcache'] == 'enabled'): + if params.get('readcache') == 'enabled': options.append("--readCache=enabled") - if ('readcachesize' in params) and (params['readcachesize'] is not None): + if params.get('readcachesize') is not None: options.append("--readCacheSize=" + params['readcachesize']) - if ('slabsize' in params) and (params['slabsize'] is not None): + if params.get('slabsize') is not None: options.append("--vdoSlabSize=" + params['slabsize']) - if ('emulate512' in params) and (params['emulate512']): + if params.get('emulate512'): options.append("--emulate512=enabled") - if ('indexmem' in params) and (params['indexmem'] is not None): + if params.get('indexmem') is not None: options.append("--indexMem=" + params['indexmem']) - if ('indexmode' in params) and (params['indexmode'] == 'sparse'): + if params.get('indexmode') == 'sparse': options.append("--sparseIndex=enabled") - if ('force' in params) and (params['force']): + if params.get('force'): options.append("--force") # Entering an invalid thread config results in a cryptic @@ -431,23 +422,21 @@ def add_vdooptions(params): # output a more helpful message, but one would have to log # onto that system to read the error. For now, heed the thread # limit warnings in the DOCUMENTATION section above. - if ('ackthreads' in params) and (params['ackthreads'] is not None): + if params.get('ackthreads') is not None: options.append("--vdoAckThreads=" + params['ackthreads']) - if ('biothreads' in params) and (params['biothreads'] is not None): + if params.get('biothreads') is not None: options.append("--vdoBioThreads=" + params['biothreads']) - if ('cputhreads' in params) and (params['cputhreads'] is not None): + if params.get('cputhreads') is not None: options.append("--vdoCpuThreads=" + params['cputhreads']) - if ('logicalthreads' in params) and (params['logicalthreads'] is not None): + if params.get('logicalthreads') is not None: options.append("--vdoLogicalThreads=" + params['logicalthreads']) - if (('physicalthreads' in params) and - (params['physicalthreads'] is not None)): + if params.get('physicalthreads') is not None: options.append("--vdoPhysicalThreads=" + params['physicalthreads']) - vdocmdoptions = ' '.join(options) return vdocmdoptions @@ -531,31 +520,24 @@ def run_module(): # Since this is a creation of a new VDO volume, it will contain all # all of the parameters given by the playbook; the rest will # assume default values. - options = module.params - vdocmdoptions = add_vdooptions(options) - rc, out, err = module.run_command("%s create --name=%s --device=%s %s" - % (vdocmd, desiredvdo, device, - vdocmdoptions)) + vdocmdoptions = add_vdooptions(module.params) + rc, out, err = module.run_command( + [vdocmd, "create", "--name=%s" % desiredvdo, "--device=%s" % device] + vdocmdoptions) if rc == 0: result['changed'] = True else: - module.fail_json(msg="Creating VDO %s failed." - % desiredvdo, rc=rc, err=err) + module.fail_json(msg="Creating VDO %s failed." % desiredvdo, rc=rc, err=err) - if (module.params['compression'] == 'disabled'): - rc, out, err = module.run_command("%s disableCompression --name=%s" - % (vdocmd, desiredvdo)) + if module.params['compression'] == 'disabled': + rc, out, err = module.run_command([vdocmd, "disableCompression", "--name=%s" % desiredvdo]) - if ((module.params['deduplication'] is not None) and - module.params['deduplication'] == 'disabled'): - rc, out, err = module.run_command("%s disableDeduplication " - "--name=%s" - % (vdocmd, desiredvdo)) + if module.params['deduplication'] == 'disabled': + rc, out, err = module.run_command([vdocmd, "disableDeduplication", "--name=%s" % desiredvdo]) - if module.params['activated'] == 'no': + if module.params['activated'] is False: deactivate_vdo(module, desiredvdo, vdocmd) - if module.params['running'] == 'no': + if module.params['running'] is False: stop_vdo(module, desiredvdo, vdocmd) # Print a post-run list of VDO volumes in the result object. @@ -564,8 +546,8 @@ def run_module(): module.exit_json(**result) # Modify the current parameters of a VDO that exists. - if (desiredvdo in vdolist) and (state == 'present'): - rc, vdostatusoutput, err = module.run_command("%s status" % (vdocmd)) + if desiredvdo in vdolist and state == 'present': + rc, vdostatusoutput, err = module.run_command([vdocmd, "status"]) vdostatusyaml = yaml.load(vdostatusoutput) # An empty dictionary to contain dictionaries of VDO statistics @@ -630,7 +612,7 @@ def run_module(): diffparams = {} # Check for differences between the playbook parameters and the - # current parameters. This will need a comparison function; + # current parameters. This will need a comparison function; # since AnsibleModule params are all strings, compare them as # strings (but if it's None; skip). for key in currentparams.keys(): @@ -641,10 +623,7 @@ def run_module(): if diffparams: vdocmdoptions = add_vdooptions(diffparams) if vdocmdoptions: - rc, out, err = module.run_command("%s modify --name=%s %s" - % (vdocmd, - desiredvdo, - vdocmdoptions)) + rc, out, err = module.run_command([vdocmd, "modify", "--name=%s" % desiredvdo] + vdocmdoptions) if rc == 0: result['changed'] = True else: @@ -653,107 +632,36 @@ def run_module(): if 'deduplication' in diffparams.keys(): dedupemod = diffparams['deduplication'] - if dedupemod == 'disabled': - rc, out, err = module.run_command("%s " - "disableDeduplication " - "--name=%s" - % (vdocmd, desiredvdo)) + dedupeparam = "disableDeduplication" if dedupemod == 'disabled' else "enableDeduplication" + rc, out, err = module.run_command([vdocmd, dedupeparam, "--name=%s" % desiredvdo]) - if rc == 0: - result['changed'] = True - else: - module.fail_json(msg="Changing deduplication on " - "VDO volume %s failed." - % desiredvdo, rc=rc, err=err) - - if dedupemod == 'enabled': - rc, out, err = module.run_command("%s " - "enableDeduplication " - "--name=%s" - % (vdocmd, desiredvdo)) - - if rc == 0: - result['changed'] = True - else: - module.fail_json(msg="Changing deduplication on " - "VDO volume %s failed." - % desiredvdo, rc=rc, err=err) + if rc == 0: + result['changed'] = True + else: + module.fail_json(msg="Changing deduplication on VDO volume %s failed." % desiredvdo, rc=rc, err=err) if 'compression' in diffparams.keys(): compressmod = diffparams['compression'] - if compressmod == 'disabled': - rc, out, err = module.run_command("%s disableCompression " - "--name=%s" - % (vdocmd, desiredvdo)) - - if rc == 0: - result['changed'] = True - else: - module.fail_json(msg="Changing compression on " - "VDO volume %s failed." - % desiredvdo, rc=rc, err=err) - - if compressmod == 'enabled': - rc, out, err = module.run_command("%s enableCompression " - "--name=%s" - % (vdocmd, desiredvdo)) - - if rc == 0: - result['changed'] = True - else: - module.fail_json(msg="Changing compression on " - "VDO volume %s failed." - % desiredvdo, rc=rc, err=err) + compressparam = "disableCompression" if compressmod == 'disabled' else "enableCompression" + rc, out, err = module.run_command([vdocmd, compressparam, "--name=%s" % desiredvdo]) + if rc == 0: + result['changed'] = True + else: + module.fail_json(msg="Changing compression on VDO volume %s failed." % desiredvdo, rc=rc, err=err) if 'writepolicy' in diffparams.keys(): writepolmod = diffparams['writepolicy'] - if writepolmod == 'auto': - rc, out, err = module.run_command("%s " - "changeWritePolicy " - "--name=%s " - "--writePolicy=%s" - % (vdocmd, - desiredvdo, - writepolmod)) + rc, out, err = module.run_command([ + vdocmd, + "changeWritePolicy", + "--name=%s" % desiredvdo, + "--writePolicy=%s" % writepolmod, + ]) - if rc == 0: - result['changed'] = True - else: - module.fail_json(msg="Changing write policy on " - "VDO volume %s failed." - % desiredvdo, rc=rc, err=err) - - if writepolmod == 'sync': - rc, out, err = module.run_command("%s " - "changeWritePolicy " - "--name=%s " - "--writePolicy=%s" - % (vdocmd, - desiredvdo, - writepolmod)) - - if rc == 0: - result['changed'] = True - else: - module.fail_json(msg="Changing write policy on " - "VDO volume %s failed." - % desiredvdo, rc=rc, err=err) - - if writepolmod == 'async': - rc, out, err = module.run_command("%s " - "changeWritePolicy " - "--name=%s " - "--writePolicy=%s" - % (vdocmd, - desiredvdo, - writepolmod)) - - if rc == 0: - result['changed'] = True - else: - module.fail_json(msg="Changing write policy on " - "VDO volume %s failed." - % desiredvdo, rc=rc, err=err) + if rc == 0: + result['changed'] = True + else: + module.fail_json(msg="Changing write policy on VDO volume %s failed." % desiredvdo, rc=rc, err=err) # Process the size parameters, to determine of a growPhysical or # growLogical operation needs to occur. @@ -771,19 +679,15 @@ def run_module(): diffsizeparams = {} for key in sizeparams.keys(): - if module.params[key] is not None: - if str(sizeparams[key]) != module.params[key]: - diffsizeparams[key] = module.params[key] + if module.params[key] is not None and str(sizeparams[key]) != module.params[key]: + diffsizeparams[key] = module.params[key] if module.params['growphysical']: physdevice = module.params['device'] - rc, devsectors, err = module.run_command("blockdev --getsz %s" - % (physdevice)) + rc, devsectors, err = module.run_command([module.get_bin_path("blockdev"), "--getsz", physdevice]) devblocks = (int(devsectors) / 8) dmvdoname = ('/dev/mapper/' + desiredvdo) - currentvdostats = (processedvdos[desiredvdo] - ['VDO statistics'] - [dmvdoname]) + currentvdostats = processedvdos[desiredvdo]['VDO statistics'][dmvdoname] currentphysblocks = currentvdostats['physical blocks'] # Set a growPhysical threshold to grow only when there is @@ -795,34 +699,25 @@ def run_module(): if currentphysblocks > growthresh: result['changed'] = True - rc, out, err = module.run_command("%s growPhysical --name=%s" - % (vdocmd, desiredvdo)) + rc, out, err = module.run_command([vdocmd, "growPhysical", "--name=%s" % desiredvdo]) if 'logicalsize' in diffsizeparams.keys(): result['changed'] = True - vdocmdoptions = ("--vdoLogicalSize=" + - diffsizeparams['logicalsize']) - rc, out, err = module.run_command("%s growLogical --name=%s %s" - % (vdocmd, - desiredvdo, - vdocmdoptions)) + rc, out, err = module.run_command([vdocmd, "growLogical", "--name=%s" % desiredvdo, "--vdoLogicalSize=%s" % diffsizeparams['logicalsize']]) vdoactivatestatus = processedvdos[desiredvdo]['Activate'] - if ((module.params['activated'] == 'no') and - (vdoactivatestatus == 'enabled')): + if module.params['activated'] is False and vdoactivatestatus == 'enabled': deactivate_vdo(module, desiredvdo, vdocmd) if not result['changed']: result['changed'] = True - if ((module.params['activated'] == 'yes') and - (vdoactivatestatus == 'disabled')): + if module.params['activated'] and vdoactivatestatus == 'disabled': activate_vdo(module, desiredvdo, vdocmd) if not result['changed']: result['changed'] = True - if ((module.params['running'] == 'no') and - (desiredvdo in runningvdolist)): + if module.params['running'] is False and desiredvdo in runningvdolist: stop_vdo(module, desiredvdo, vdocmd) if not result['changed']: result['changed'] = True @@ -834,10 +729,7 @@ def run_module(): # the activate_vdo() operation succeeded, as 'vdoactivatestatus' # will have the activated status prior to the activate_vdo() # call. - if (((vdoactivatestatus == 'enabled') or - (module.params['activated'] == 'yes')) and - (module.params['running'] == 'yes') and - (desiredvdo not in runningvdolist)): + if (vdoactivatestatus == 'enabled' or module.params['activated']) and module.params['running'] and desiredvdo not in runningvdolist: start_vdo(module, desiredvdo, vdocmd) if not result['changed']: result['changed'] = True @@ -850,14 +742,12 @@ def run_module(): module.exit_json(**result) # Remove a desired VDO that currently exists. - if (desiredvdo in vdolist) and (state == 'absent'): - rc, out, err = module.run_command("%s remove --name=%s" - % (vdocmd, desiredvdo)) + if desiredvdo in vdolist and state == 'absent': + rc, out, err = module.run_command([vdocmd, "remove", "--name=%s" % desiredvdo]) if rc == 0: result['changed'] = True else: - module.fail_json(msg="Removing VDO %s failed." - % desiredvdo, rc=rc, err=err) + module.fail_json(msg="Removing VDO %s failed." % desiredvdo, rc=rc, err=err) # Print a post-run list of VDO volumes in the result object. vdolist = inventory_vdos(module, vdocmd) @@ -869,8 +759,7 @@ def run_module(): # not exist. Print a post-run list of VDO volumes in the result # object. vdolist = inventory_vdos(module, vdocmd) - module.log("received request to remove non-existent VDO volume %s" - % desiredvdo) + module.log("received request to remove non-existent VDO volume %s" % desiredvdo) module.exit_json(**result) From 8a4cdd2b8a23cbbbc3dd9beb22c53fe75aafaf76 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Mon, 16 Aug 2021 22:24:15 +1200 Subject: [PATCH 0513/3093] slack - minor refactoring and pythonifying (#3205) * slack - minor refactoring and pythonifying * added changelog fragment * Update changelogs/fragments/3205-slack-minor-refactor.yaml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../fragments/3205-slack-minor-refactor.yaml | 2 ++ plugins/modules/notification/slack.py | 32 +++++++++---------- 2 files changed, 18 insertions(+), 16 deletions(-) create mode 100644 changelogs/fragments/3205-slack-minor-refactor.yaml diff --git a/changelogs/fragments/3205-slack-minor-refactor.yaml b/changelogs/fragments/3205-slack-minor-refactor.yaml new file mode 100644 index 0000000000..5337350f69 --- /dev/null +++ b/changelogs/fragments/3205-slack-minor-refactor.yaml @@ -0,0 +1,2 @@ +minor_changes: + - slack - minor refactoring (https://github.com/ansible-collections/community.general/pull/3205). diff --git a/plugins/modules/notification/slack.py b/plugins/modules/notification/slack.py index 197e5f9498..3023bd9d8a 100644 --- a/plugins/modules/notification/slack.py +++ b/plugins/modules/notification/slack.py @@ -264,12 +264,12 @@ def is_valid_hex_color(color_choice): def escape_quotes(text): - '''Backslash any quotes within text.''' + """Backslash any quotes within text.""" return "".join(escape_table.get(c, c) for c in text) def recursive_escape_quotes(obj, keys): - '''Recursively escape quotes inside supplied keys inside block kit objects''' + """Recursively escape quotes inside supplied keys inside block kit objects""" if isinstance(obj, dict): escaped = {} for k, v in obj.items(): @@ -284,7 +284,7 @@ def recursive_escape_quotes(obj, keys): return escaped -def build_payload_for_slack(module, text, channel, thread_id, username, icon_url, icon_emoji, link_names, +def build_payload_for_slack(text, channel, thread_id, username, icon_url, icon_emoji, link_names, parse, color, attachments, blocks, message_id): payload = {} if color == "normal" and text is not None: @@ -344,7 +344,7 @@ def build_payload_for_slack(module, text, channel, thread_id, username, icon_url return payload -def get_slack_message(module, domain, token, channel, ts): +def get_slack_message(module, token, channel, ts): headers = { 'Content-Type': 'application/json', 'Accept': 'application/json', @@ -372,7 +372,7 @@ def do_notify_slack(module, domain, token, payload): use_webapi = False if token.count('/') >= 2: # New style webhook token - slack_uri = SLACK_INCOMING_WEBHOOK % (token) + slack_uri = SLACK_INCOMING_WEBHOOK % token elif re.match(r'^xox[abp]-\S+$', token): slack_uri = SLACK_UPDATEMESSAGE_WEBAPI if 'ts' in payload else SLACK_POSTMESSAGE_WEBAPI use_webapi = True @@ -396,7 +396,7 @@ def do_notify_slack(module, domain, token, payload): if use_webapi: obscured_incoming_webhook = slack_uri else: - obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % ('[obscured]') + obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % '[obscured]' module.fail_json(msg=" failed to send %s to %s: %s" % (data, obscured_incoming_webhook, info['msg'])) # each API requires different handling @@ -409,21 +409,21 @@ def do_notify_slack(module, domain, token, payload): def main(): module = AnsibleModule( argument_spec=dict( - domain=dict(type='str', required=False, default=None), + domain=dict(type='str'), token=dict(type='str', required=True, no_log=True), - msg=dict(type='str', required=False, default=None), - channel=dict(type='str', default=None), - thread_id=dict(type='str', default=None), + msg=dict(type='str'), + channel=dict(type='str'), + thread_id=dict(type='str'), username=dict(type='str', default='Ansible'), icon_url=dict(type='str', default='https://www.ansible.com/favicon.ico'), - icon_emoji=dict(type='str', default=None), + icon_emoji=dict(type='str'), link_names=dict(type='int', default=1, choices=[0, 1]), - parse=dict(type='str', default=None, choices=['none', 'full']), + parse=dict(type='str', choices=['none', 'full']), validate_certs=dict(default=True, type='bool'), color=dict(type='str', default='normal'), - attachments=dict(type='list', elements='dict', required=False, default=None), + attachments=dict(type='list', elements='dict'), blocks=dict(type='list', elements='dict'), - message_id=dict(type='str', default=None), + message_id=dict(type='str'), ), supports_check_mode=True, ) @@ -453,7 +453,7 @@ def main(): # if updating an existing message, we can check if there's anything to update if message_id is not None: changed = False - msg = get_slack_message(module, domain, token, channel, message_id) + msg = get_slack_message(module, token, channel, message_id) for key in ('icon_url', 'icon_emoji', 'link_names', 'color', 'attachments', 'blocks'): if msg.get(key) != module.params.get(key): changed = True @@ -465,7 +465,7 @@ def main(): elif module.check_mode: module.exit_json(changed=changed) - payload = build_payload_for_slack(module, text, channel, thread_id, username, icon_url, icon_emoji, link_names, + payload = build_payload_for_slack(text, channel, thread_id, username, icon_url, icon_emoji, link_names, parse, color, attachments, blocks, message_id) slack_response = do_notify_slack(module, domain, token, payload) From fccae19177152817b21b87ebef223decbc83e3e8 Mon Sep 17 00:00:00 2001 From: Kellin Date: Tue, 17 Aug 2021 01:05:02 -0400 Subject: [PATCH 0514/3093] Linode inventory plugin typo fixes (#3218) - Fix a typo in the Linode inventory plugin unit tests - Fix some style issues in descriptions where punctuation was missing Signed-off-by: Kellin --- plugins/inventory/linode.py | 2 +- tests/unit/plugins/inventory/test_linode.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inventory/linode.py b/plugins/inventory/linode.py index 177bd0a42a..5af9effd52 100644 --- a/plugins/inventory/linode.py +++ b/plugins/inventory/linode.py @@ -23,7 +23,7 @@ DOCUMENTATION = r''' - constructed options: plugin: - description: marks this as an instance of the 'linode' plugin + description: Marks this as an instance of the 'linode' plugin. required: true choices: ['linode', 'community.general.linode'] access_token: diff --git a/tests/unit/plugins/inventory/test_linode.py b/tests/unit/plugins/inventory/test_linode.py index ab75c6c9fc..f2627d850d 100644 --- a/tests/unit/plugins/inventory/test_linode.py +++ b/tests/unit/plugins/inventory/test_linode.py @@ -62,7 +62,7 @@ def test_empty_config_query_options(inventory): assert regions == types == tags == [] -def test_conig_query_options(inventory): +def test_config_query_options(inventory): regions, types, tags = inventory._get_query_options({ 'regions': ['eu-west', 'us-east'], 'types': ['g5-standard-2', 'g6-standard-2'], From f19e191467bdb62d35636f5989e36221a1be3503 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 17 Aug 2021 07:32:02 +0200 Subject: [PATCH 0515/3093] Temporarily disable datadog_downtime unit tests. (#3222) --- ...test_datadog_downtime.py => test_datadog_downtime.py.disabled} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tests/unit/plugins/modules/monitoring/{test_datadog_downtime.py => test_datadog_downtime.py.disabled} (100%) diff --git a/tests/unit/plugins/modules/monitoring/test_datadog_downtime.py b/tests/unit/plugins/modules/monitoring/test_datadog_downtime.py.disabled similarity index 100% rename from tests/unit/plugins/modules/monitoring/test_datadog_downtime.py rename to tests/unit/plugins/modules/monitoring/test_datadog_downtime.py.disabled From 41101e55a09c618fae5ed16c005cfff64ebe5c0c Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 17 Aug 2021 20:43:18 +1200 Subject: [PATCH 0516/3093] module_helper - implemented classmethod to start the module (#3206) * module_helper - implemented classmethod to start the module plus minor change * rolled back the __changed__() method * added changelog fragment * Update plugins/module_utils/mh/base.py Co-authored-by: Sviatoslav Sydorenko * no capt Piccards allowed in the base class * removed extra piccards Co-authored-by: Sviatoslav Sydorenko --- changelogs/fragments/3206-mh-classmethod.yaml | 2 ++ plugins/module_utils/mh/base.py | 4 ++++ plugins/modules/packaging/language/cpanm.py | 3 +-- plugins/modules/system/xfconf.py | 3 +-- 4 files changed, 8 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/3206-mh-classmethod.yaml diff --git a/changelogs/fragments/3206-mh-classmethod.yaml b/changelogs/fragments/3206-mh-classmethod.yaml new file mode 100644 index 0000000000..19cd8a6739 --- /dev/null +++ b/changelogs/fragments/3206-mh-classmethod.yaml @@ -0,0 +1,2 @@ +minor_changes: + - module_helper module_utils - added classmethod to trigger the execution of MH modules (https://github.com/ansible-collections/community.general/pull/3206). diff --git a/plugins/module_utils/mh/base.py b/plugins/module_utils/mh/base.py index a120c2556e..90c228b306 100644 --- a/plugins/module_utils/mh/base.py +++ b/plugins/module_utils/mh/base.py @@ -63,3 +63,7 @@ class ModuleHelperBase(object): if 'failed' not in output: output['failed'] = False self.module.exit_json(changed=self.has_changed(), **output) + + @classmethod + def execute(cls, module=None): + cls(module).run() diff --git a/plugins/modules/packaging/language/cpanm.py b/plugins/modules/packaging/language/cpanm.py index b8ab7e1a2f..d2c4d5a2ec 100644 --- a/plugins/modules/packaging/language/cpanm.py +++ b/plugins/modules/packaging/language/cpanm.py @@ -248,8 +248,7 @@ class CPANMinus(CmdMixin, ModuleHelper): def main(): - cpanm = CPANMinus() - cpanm.run() + CPANMinus.execute() if __name__ == '__main__': diff --git a/plugins/modules/system/xfconf.py b/plugins/modules/system/xfconf.py index 001613fc23..baf6bdd494 100644 --- a/plugins/modules/system/xfconf.py +++ b/plugins/modules/system/xfconf.py @@ -277,8 +277,7 @@ class XFConfProperty(CmdMixin, StateMixin, ModuleHelper): def main(): - xfconf = XFConfProperty() - xfconf.run() + XFConfProperty.execute() if __name__ == '__main__': From 6ac410b3f617074bb7af86050d8adfa4d495a3e6 Mon Sep 17 00:00:00 2001 From: Ricky White Date: Wed, 18 Aug 2021 03:26:44 -0400 Subject: [PATCH 0517/3093] tss: added fix for bug report in issue #3192 (#3199) * Added fix for bug report in issue #3192 * Added changelog fragment * Typo fix * Added Importerror to exception - as req by linters * Moved the conditional import statement to try/except block --- ...gin-bugfix-for-backwards-compatibility.yml | 3 +++ plugins/lookup/tss.py | 26 ++++++++++++++----- 2 files changed, 22 insertions(+), 7 deletions(-) create mode 100644 changelogs/fragments/3199-tss-lookup-plugin-bugfix-for-backwards-compatibility.yml diff --git a/changelogs/fragments/3199-tss-lookup-plugin-bugfix-for-backwards-compatibility.yml b/changelogs/fragments/3199-tss-lookup-plugin-bugfix-for-backwards-compatibility.yml new file mode 100644 index 0000000000..3909286487 --- /dev/null +++ b/changelogs/fragments/3199-tss-lookup-plugin-bugfix-for-backwards-compatibility.yml @@ -0,0 +1,3 @@ +bugfixes: + - tss lookup plugin - fixed backwards compatibility issue with ``python-tss-sdk`` version <=0.0.5 + (https://github.com/ansible-collections/community.general/issues/3192, https://github.com/ansible-collections/community.general/pull/3199). diff --git a/plugins/lookup/tss.py b/plugins/lookup/tss.py index d5e6ea6dcd..65f8b114f6 100644 --- a/plugins/lookup/tss.py +++ b/plugins/lookup/tss.py @@ -118,15 +118,23 @@ from ansible.errors import AnsibleError, AnsibleOptionsError sdk_is_missing = False try: - from thycotic import __version__ as sdk_version - from thycotic.secrets.server import ( - SecretServer, - SecretServerError, - PasswordGrantAuthorizer, - ) + from thycotic.secrets.server import SecretServer, SecretServerError except ImportError: sdk_is_missing = True +# Added for backwards compatability - See issue #3192 +# https://github.com/ansible-collections/community.general/issues/3192 +try: + from thycotic import __version__ as sdk_version +except ImportError: + sdk_version = "0.0.5" + +try: + from thycotic.secrets.server import PasswordGrantAuthorizer + sdK_version_below_v1 = False +except ImportError: + sdK_version_below_v1 = True + from ansible.utils.display import Display from ansible.plugins.lookup import LookupBase @@ -138,9 +146,13 @@ class LookupModule(LookupBase): @staticmethod def Client(server_parameters): - if LooseVersion(sdk_version) < LooseVersion('1.0.0'): + if LooseVersion(sdk_version) < LooseVersion('1.0.0') or sdK_version_below_v1: return SecretServer(**server_parameters) else: + # The Password Authorizer became available in v1.0.0 and beyond. + # Import only if sdk_version requires it. + # from thycotic.secrets.server import PasswordGrantAuthorizer + authorizer = PasswordGrantAuthorizer( server_parameters["base_url"], server_parameters["username"], From c7fccb2c0168ed6d95de9cafe831e2b5bd4b0c9b Mon Sep 17 00:00:00 2001 From: Jacob Date: Thu, 19 Aug 2021 15:13:10 -0400 Subject: [PATCH 0518/3093] redfish_info: Include Status property for GetChassisThermals (#3233) * redfish_info: Include Status property for GetChassisThermals Include Status property for Thermal objects when querying Thermal properties via GetChassisThermals command. FIXES #3232 * fixup for rename of fragments file * Update changelogs/fragments/3233-include-thermal-sensor-status-via-redfish_info.yaml Co-authored-by: Ajpantuso Co-authored-by: Ajpantuso --- .../3233-include-thermal-sensor-status-via-redfish_info.yaml | 2 ++ plugins/module_utils/redfish_utils.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/3233-include-thermal-sensor-status-via-redfish_info.yaml diff --git a/changelogs/fragments/3233-include-thermal-sensor-status-via-redfish_info.yaml b/changelogs/fragments/3233-include-thermal-sensor-status-via-redfish_info.yaml new file mode 100644 index 0000000000..baed989fbf --- /dev/null +++ b/changelogs/fragments/3233-include-thermal-sensor-status-via-redfish_info.yaml @@ -0,0 +1,2 @@ +minor_changes: + - redfish_info - include ``Status`` property for Thermal objects when querying Thermal properties via ``GetChassisThermals`` command (https://github.com/ansible-collections/community.general/issues/3232). diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index c861820edf..0f8e6630ba 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -1887,7 +1887,7 @@ class RedfishUtils(object): 'LowerThresholdCritical', 'LowerThresholdFatal', 'LowerThresholdNonCritical', 'MaxReadingRangeTemp', 'MinReadingRangeTemp', 'ReadingCelsius', 'RelatedItem', - 'SensorNumber'] + 'SensorNumber', 'Status'] # Go through list for chassis_uri in self.chassis_uris: From bcccf4e388b573f68bc9f93572e3679675788473 Mon Sep 17 00:00:00 2001 From: Martin Vician Date: Fri, 20 Aug 2021 12:54:29 +0100 Subject: [PATCH 0519/3093] Add option for domain authorization (#3228) Use DomainPasswordGrantAuthorizer if parameter `domain` is used. --- .../3228-tss-domain-authorization.yml | 3 ++ plugins/lookup/tss.py | 45 +++++++++++++++---- 2 files changed, 39 insertions(+), 9 deletions(-) create mode 100644 changelogs/fragments/3228-tss-domain-authorization.yml diff --git a/changelogs/fragments/3228-tss-domain-authorization.yml b/changelogs/fragments/3228-tss-domain-authorization.yml new file mode 100644 index 0000000000..0a80b3dd8e --- /dev/null +++ b/changelogs/fragments/3228-tss-domain-authorization.yml @@ -0,0 +1,3 @@ +minor_changes: + - tss lookup plugin - added new parameter for domain authorization + (https://github.com/ansible-collections/community.general/pull/3228). diff --git a/plugins/lookup/tss.py b/plugins/lookup/tss.py index 65f8b114f6..ecc3fd6c8b 100644 --- a/plugins/lookup/tss.py +++ b/plugins/lookup/tss.py @@ -45,6 +45,16 @@ options: - section: tss_lookup key: password required: true + domain: + default: "" + description: The domain with which to request the OAuth2 Access Grant. + env: + - name: TSS_DOMAIN + ini: + - section: tss_lookup + key: domain + required: false + version_added: 3.6.0 api_path_uri: default: /api/v1 description: The path to append to the base URL to form a valid REST @@ -130,7 +140,8 @@ except ImportError: sdk_version = "0.0.5" try: - from thycotic.secrets.server import PasswordGrantAuthorizer + from thycotic.secrets.server import PasswordGrantAuthorizer, DomainPasswordGrantAuthorizer + sdK_version_below_v1 = False except ImportError: sdK_version_below_v1 = True @@ -138,7 +149,6 @@ except ImportError: from ansible.utils.display import Display from ansible.plugins.lookup import LookupBase - display = Display() @@ -147,18 +157,34 @@ class LookupModule(LookupBase): def Client(server_parameters): if LooseVersion(sdk_version) < LooseVersion('1.0.0') or sdK_version_below_v1: - return SecretServer(**server_parameters) - else: - # The Password Authorizer became available in v1.0.0 and beyond. - # Import only if sdk_version requires it. - # from thycotic.secrets.server import PasswordGrantAuthorizer - - authorizer = PasswordGrantAuthorizer( + return SecretServer( server_parameters["base_url"], server_parameters["username"], server_parameters["password"], + server_parameters["api_path_uri"], server_parameters["token_path_uri"], ) + else: + # The Password Authorizer and Domain Password Authorizer + # became available in v1.0.0 and beyond. + # Import only if sdk_version requires it. + # from thycotic.secrets.server import PasswordGrantAuthorizer + + if server_parameters["domain"]: + authorizer = DomainPasswordGrantAuthorizer( + server_parameters["base_url"], + server_parameters["username"], + server_parameters["domain"], + server_parameters["password"], + server_parameters["token_path_uri"], + ) + else: + authorizer = PasswordGrantAuthorizer( + server_parameters["base_url"], + server_parameters["username"], + server_parameters["password"], + server_parameters["token_path_uri"], + ) return SecretServer( server_parameters["base_url"], authorizer, server_parameters["api_path_uri"] @@ -175,6 +201,7 @@ class LookupModule(LookupBase): "base_url": self.get_option("base_url"), "username": self.get_option("username"), "password": self.get_option("password"), + "domain": self.get_option("domain"), "api_path_uri": self.get_option("api_path_uri"), "token_path_uri": self.get_option("token_path_uri"), } From 8a62b79ef2e902116575f1fe266bbce5def6f9e8 Mon Sep 17 00:00:00 2001 From: David Hummel <6109326+hummeltech@users.noreply.github.com> Date: Fri, 20 Aug 2021 12:45:30 -0700 Subject: [PATCH 0520/3093] nmcli: Disallow Wi-Fi options not supported by nmcli (#3141) * nmcli: Disallow Wi-Fi options not supported by nmcli By querying nmcli directly * Added changelog fragment * Added tests * Simplify `get_available_options()` * Update changelogs/fragments/3141-disallow-options-unsupported-by-nmcli.yml Co-authored-by: Felix Fontein * Remove redundant `802-11-wireless` settings from test show outputs * Update `mocked_wireless_create(mocker)` * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Ajpantuso * Address comment re. creating function & use nmcli naming conventions I.E. `setting`.`property` = `value` ``` nmcli> help set set [. ] :: set property value This command sets property value. Example: nmcli> set con.id My connection ``` * Added `ignore_unsupported_suboptions` option & improved `wifi(_sec)` doc * Corrected pep8 issues ``` ERROR: Found 2 pep8 issue(s) which need to be resolved: ERROR: plugins/modules/net_tools/nmcli.py:342:161: E501: line too long (236 > 160 characters) ERROR: plugins/modules/net_tools/nmcli.py:359:161: E501: line too long (237 > 160 characters) ``` * Fixed remaining sanity check issues and added even more docs * No need to split Note * Update plugins/modules/net_tools/nmcli.py 3.5.0 has already been released. Co-authored-by: Felix Fontein * Followed uniformity guideline for format macros from Ansible's dev guide * Addressed comment https://github.com/ansible-collections/community.general/pull/3141#discussion_r689098383 * Documentation cleanup continuation * Replace `NM_SETTING_*`s having a description with their numeric value * Splitting up long paragraphs. Also removed `wifi`.`seen-bssids` as it "`is only meant for reading`" * Addressed remaining comments and clarified `wake-on-lan` note * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Felix Fontein * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Felix Fontein * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Felix Fontein * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Felix Fontein * Finishing addressing documentation comments. * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Ajpantuso * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Ajpantuso * Update nmcli.py * Added wifi-related `list` type options to `settings_type` method * Moved `edit_commands` `execution` logic into its own method * Move `unsupported_property` deletion into `main` function * Missing `.items()` * Resolved missing proper `nmcli conn edit` arguments * Resolve pylint issue `dangerous-default-value` Co-authored-by: Felix Fontein Co-authored-by: Ajpantuso Co-authored-by: David Hummel --- ...-disallow-options-unsupported-by-nmcli.yml | 3 + plugins/modules/net_tools/nmcli.py | 378 +++++++++++++++++- .../plugins/modules/net_tools/test_nmcli.py | 190 ++++++++- 3 files changed, 546 insertions(+), 25 deletions(-) create mode 100644 changelogs/fragments/3141-disallow-options-unsupported-by-nmcli.yml diff --git a/changelogs/fragments/3141-disallow-options-unsupported-by-nmcli.yml b/changelogs/fragments/3141-disallow-options-unsupported-by-nmcli.yml new file mode 100644 index 0000000000..e6c15c8786 --- /dev/null +++ b/changelogs/fragments/3141-disallow-options-unsupported-by-nmcli.yml @@ -0,0 +1,3 @@ +minor_changes: + - nmcli - query ``nmcli`` directly to determine available WiFi options + (https://github.com/ansible-collections/community.general/pull/3141). diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index 06b868dace..0a7d78b681 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -332,11 +332,141 @@ options: version_added: 2.0.0 wifi_sec: description: - - 'The security configuration of the WiFi connection. The valid attributes are listed on: + - The security configuration of the WiFi connection. + - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host. + - 'An up-to-date list of supported attributes can be found here: U(https://networkmanager.dev/docs/api/latest/settings-802-11-wireless-security.html).' - 'For instance to use common WPA-PSK auth with a password: C({key-mgmt: wpa-psk, psk: my_password}).' type: dict + suboptions: + auth-alg: + description: + - When WEP is used (that is, if I(key-mgmt) = C(none) or C(ieee8021x)) indicate the 802.11 authentication algorithm required by the AP here. + - One of C(open) for Open System, C(shared) for Shared Key, or C(leap) for Cisco LEAP. + - When using Cisco LEAP (that is, if I(key-mgmt=ieee8021x) and I(auth-alg=leap)) the I(leap-username) and I(leap-password) properties + must be specified. + type: str + choices: [ open, shared, leap ] + fils: + description: + - Indicates whether Fast Initial Link Setup (802.11ai) must be enabled for the connection. + - One of C(0) (use global default value), C(1) (disable FILS), C(2) (enable FILS if the supplicant and the access point support it) or C(3) + (enable FILS and fail if not supported). + - When set to C(0) and no global default is set, FILS will be optionally enabled. + type: int + choices: [ 0, 1, 2, 3 ] + default: 0 + group: + description: + - A list of group/broadcast encryption algorithms which prevents connections to Wi-Fi networks that do not utilize one of the algorithms in + the list. + - For maximum compatibility leave this property empty. + type: list + elements: str + choices: [ wep40, wep104, tkip, ccmp ] + key-mgmt: + description: + - Key management used for the connection. + - One of C(none) (WEP or no password protection), C(ieee8021x) (Dynamic WEP), C(owe) (Opportunistic Wireless Encryption), C(wpa-psk) (WPA2 + + WPA3 personal), C(sae) (WPA3 personal only), C(wpa-eap) (WPA2 + WPA3 enterprise) or C(wpa-eap-suite-b-192) (WPA3 enterprise only). + - This property must be set for any Wi-Fi connection that uses security. + type: str + choices: [ none, ieee8021x, owe, wpa-psk, sae, wpa-eap, wpa-eap-suite-b-192 ] + leap-password-flags: + description: Flags indicating how to handle the I(leap-password) property. + type: list + elements: int + leap-password: + description: The login password for legacy LEAP connections (that is, if I(key-mgmt=ieee8021x) and I(auth-alg=leap)). + type: str + leap-username: + description: The login username for legacy LEAP connections (that is, if I(key-mgmt=ieee8021x) and I(auth-alg=leap)). + type: str + pairwise: + description: + - A list of pairwise encryption algorithms which prevents connections to Wi-Fi networks that do not utilize one of the algorithms in the + list. + - For maximum compatibility leave this property empty. + type: list + elements: str + choices: [ tkip, ccmp ] + pmf: + description: + - Indicates whether Protected Management Frames (802.11w) must be enabled for the connection. + - One of C(0) (use global default value), C(1) (disable PMF), C(2) (enable PMF if the supplicant and the access point support it) or C(3) + (enable PMF and fail if not supported). + - When set to C(0) and no global default is set, PMF will be optionally enabled. + type: int + choices: [ 0, 1, 2, 3 ] + default: 0 + proto: + description: + - List of strings specifying the allowed WPA protocol versions to use. + - Each element may be C(wpa) (allow WPA) or C(rsn) (allow WPA2/RSN). + - If not specified, both WPA and RSN connections are allowed. + type: list + elements: str + choices: [ wpa, rsn ] + psk-flags: + description: Flags indicating how to handle the I(psk) property. + type: list + elements: int + psk: + description: + - Pre-Shared-Key for WPA networks. + - For WPA-PSK, it is either an ASCII passphrase of 8 to 63 characters that is (as specified in the 802.11i standard) hashed to derive the + actual key, or the key in form of 64 hexadecimal character. + - The WPA3-Personal networks use a passphrase of any length for SAE authentication. + type: str + wep-key-flags: + description: Flags indicating how to handle the I(wep-key0), I(wep-key1), I(wep-key2), and I(wep-key3) properties. + type: list + elements: int + wep-key-type: + description: + - Controls the interpretation of WEP keys. + - Allowed values are C(1), in which case the key is either a 10- or 26-character hexadecimal string, or a 5- or 13-character ASCII + password; or C(2), in which case the passphrase is provided as a string and will be hashed using the de-facto MD5 method to derive the + actual WEP key. + type: int + choices: [ 1, 2 ] + wep-key0: + description: + - Index 0 WEP key. This is the WEP key used in most networks. + - See the I(wep-key-type) property for a description of how this key is interpreted. + type: str + wep-key1: + description: + - Index 1 WEP key. This WEP index is not used by most networks. + - See the I(wep-key-type) property for a description of how this key is interpreted. + type: str + wep-key2: + description: + - Index 2 WEP key. This WEP index is not used by most networks. + - See the I(wep-key-type) property for a description of how this key is interpreted. + type: str + wep-key3: + description: + - Index 3 WEP key. This WEP index is not used by most networks. + - See the I(wep-key-type) property for a description of how this key is interpreted. + type: str + wep-tx-keyidx: + description: + - When static WEP is used (that is, if I(key-mgmt=none)) and a non-default WEP key index is used by the AP, put that WEP key index here. + - Valid values are C(0) (default key) through C(3). + - Note that some consumer access points (like the Linksys WRT54G) number the keys C(1) - C(4). + type: int + choices: [ 0, 1, 2, 3 ] + default: 0 + wps-method: + description: + - Flags indicating which mode of WPS is to be used if any. + - There is little point in changing the default setting as NetworkManager will automatically determine whether it is feasible to start WPS + enrollment from the Access Point capabilities. + - WPS can be disabled by setting this property to a value of C(1). + type: int + default: 0 version_added: 3.0.0 ssid: description: @@ -345,12 +475,162 @@ options: version_added: 3.0.0 wifi: description: - - 'The configuration of the WiFi connection. The valid attributes are listed on: + - The configuration of the WiFi connection. + - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host. + - 'An up-to-date list of supported attributes can be found here: U(https://networkmanager.dev/docs/api/latest/settings-802-11-wireless.html).' - 'For instance to create a hidden AP mode WiFi connection: C({hidden: true, mode: ap}).' type: dict + suboptions: + ap-isolation: + description: + - Configures AP isolation, which prevents communication between wireless devices connected to this AP. + - This property can be set to a value different from C(-1) only when the interface is configured in AP mode. + - If set to C(1), devices are not able to communicate with each other. This increases security because it protects devices against attacks + from other clients in the network. At the same time, it prevents devices to access resources on the same wireless networks as file + shares, printers, etc. + - If set to C(0), devices can talk to each other. + - When set to C(-1), the global default is used; in case the global default is unspecified it is assumed to be C(0). + type: int + choices: [ -1, 0, 1 ] + default: -1 + assigned-mac-address: + description: + - The new field for the cloned MAC address. + - It can be either a hardware address in ASCII representation, or one of the special values C(preserve), C(permanent), C(random) or + C(stable). + - This field replaces the deprecated I(cloned-mac-address) on D-Bus, which can only contain explicit hardware addresses. + - Note that this property only exists in D-Bus API. libnm and nmcli continue to call this property I(cloned-mac-address). + type: str + band: + description: + - 802.11 frequency band of the network. + - One of C(a) for 5GHz 802.11a or C(bg) for 2.4GHz 802.11. + - This will lock associations to the Wi-Fi network to the specific band, so for example, if C(a) is specified, the device will not + associate with the same network in the 2.4GHz band even if the network's settings are compatible. + - This setting depends on specific driver capability and may not work with all drivers. + type: str + choices: [ a, bg ] + bssid: + description: + - If specified, directs the device to only associate with the given access point. + - This capability is highly driver dependent and not supported by all devices. + - Note this property does not control the BSSID used when creating an Ad-Hoc network and is unlikely to in the future. + type: str + channel: + description: + - Wireless channel to use for the Wi-Fi connection. + - The device will only join (or create for Ad-Hoc networks) a Wi-Fi network on the specified channel. + - Because channel numbers overlap between bands, this property also requires the I(band) property to be set. + type: int + default: 0 + cloned-mac-address: + description: + - This D-Bus field is deprecated in favor of I(assigned-mac-address) which is more flexible and allows specifying special variants like + C(random). + - For libnm and nmcli, this field is called I(cloned-mac-address). + type: str + generate-mac-address-mask: + description: + - With I(cloned-mac-address) setting C(random) or C(stable), by default all bits of the MAC address are scrambled and a + locally-administered, unicast MAC address is created. This property allows to specify that certain bits are fixed. + - Note that the least significant bit of the first MAC address will always be unset to create a unicast MAC address. + - If the property is C(null), it is eligible to be overwritten by a default connection setting. + - If the value is still c(null) or an empty string, the default is to create a locally-administered, unicast MAC address. + - If the value contains one MAC address, this address is used as mask. The set bits of the mask are to be filled with the current MAC + address of the device, while the unset bits are subject to randomization. + - Setting C(FE:FF:FF:00:00:00) means to preserve the OUI of the current MAC address and only randomize the lower 3 bytes using the + C(random) or C(stable) algorithm. + - If the value contains one additional MAC address after the mask, this address is used instead of the current MAC address to fill the bits + that shall not be randomized. + - For example, a value of C(FE:FF:FF:00:00:00 68:F7:28:00:00:00) will set the OUI of the MAC address to 68:F7:28, while the lower bits are + randomized. + - A value of C(02:00:00:00:00:00 00:00:00:00:00:00) will create a fully scrambled globally-administered, burned-in MAC address. + - If the value contains more than one additional MAC addresses, one of them is chosen randomly. For example, + C(02:00:00:00:00:00 00:00:00:00:00:00 02:00:00:00:00:00) will create a fully scrambled MAC address, randomly locally or globally + administered. + type: str + hidden: + description: + - If C(true), indicates that the network is a non-broadcasting network that hides its SSID. This works both in infrastructure and AP mode. + - In infrastructure mode, various workarounds are used for a more reliable discovery of hidden networks, such as probe-scanning the SSID. + However, these workarounds expose inherent insecurities with hidden SSID networks, and thus hidden SSID networks should be used with + caution. + - In AP mode, the created network does not broadcast its SSID. + - Note that marking the network as hidden may be a privacy issue for you (in infrastructure mode) or client stations (in AP mode), as the + explicit probe-scans are distinctly recognizable on the air. + type: bool + default: false + mac-address-blacklist: + description: + - A list of permanent MAC addresses of Wi-Fi devices to which this connection should never apply. + - Each MAC address should be given in the standard hex-digits-and-colons notation (for example, C(00:11:22:33:44:55)). + type: list + elements: str + mac-address-randomization: + description: + - One of C(0) (never randomize unless the user has set a global default to randomize and the supplicant supports randomization), C(1) + (never randomize the MAC address), or C(2) (always randomize the MAC address). + - This property is deprecated for I(cloned-mac-address). + type: int + default: 0 + choices: [ 0, 1, 2 ] + mac-address: + description: + - If specified, this connection will only apply to the Wi-Fi device whose permanent MAC address matches. + - This property does not change the MAC address of the device (for example for MAC spoofing). + type: str + mode: + description: Wi-Fi network mode. If blank, C(infrastructure) is assumed. + type: str + choices: [ infrastructure, mesh, adhoc, ap ] + default: infrastructure + mtu: + description: If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into multiple Ethernet frames. + type: int + default: 0 + powersave: + description: + - One of C(2) (disable Wi-Fi power saving), C(3) (enable Wi-Fi power saving), C(1) (don't touch currently configure setting) or C(0) (use + the globally configured value). + - All other values are reserved. + type: int + default: 0 + choices: [ 0, 1, 2, 3 ] + rate: + description: + - If non-zero, directs the device to only use the specified bitrate for communication with the access point. + - Units are in Kb/s, so for example C(5500) = 5.5 Mbit/s. + - This property is highly driver dependent and not all devices support setting a static bitrate. + type: int + default: 0 + tx-power: + description: + - If non-zero, directs the device to use the specified transmit power. + - Units are dBm. + - This property is highly driver dependent and not all devices support setting a static transmit power. + type: int + default: 0 + wake-on-wlan: + description: + - The NMSettingWirelessWakeOnWLan options to enable. Not all devices support all options. + - May be any combination of C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_ANY) (C(0x2)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_DISCONNECT) (C(0x4)), + C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_MAGIC) (C(0x8)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_GTK_REKEY_FAILURE) (C(0x10)), + C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_EAP_IDENTITY_REQUEST) (C(0x20)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_4WAY_HANDSHAKE) (C(0x40)), + C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_RFKILL_RELEASE) (C(0x80)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_TCP) (C(0x100)) or the special values + C(0x1) (to use global settings) and C(0x8000) (to disable management of Wake-on-LAN in NetworkManager). + - Note the option values' sum must be specified in order to combine multiple options. + type: int + default: 1 version_added: 3.5.0 + ignore_unsupported_suboptions: + description: + - Ignore suboptions which are invalid or unsupported by the version of NetworkManager/nmcli installed on the host. + - Only I(wifi) and I(wifi_sec) options are currently affected. + type: bool + default: false + version_added: 3.6.0 ''' EXAMPLES = r''' @@ -699,6 +979,7 @@ class Nmcli(object): A subclass may wish to override the following action methods:- - create_connection() - delete_connection() + - edit_connection() - modify_connection() - show_connection() - up_connection() @@ -721,6 +1002,7 @@ class Nmcli(object): def __init__(self, module): self.module = module self.state = module.params['state'] + self.ignore_unsupported_suboptions = module.params['ignore_unsupported_suboptions'] self.autoconnect = module.params['autoconnect'] self.conn_name = module.params['conn_name'] self.master = module.params['master'] @@ -810,6 +1092,12 @@ class Nmcli(object): cmd = to_text(cmd) return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data) + def execute_edit_commands(self, commands, arguments): + arguments = arguments or [] + cmd = [self.nmcli_bin, 'con', 'edit'] + arguments + data = "\n".join(commands) + return self.execute_command(cmd, data=data) + def connection_options(self, detect_change=False): # Options common to multiple connection types. options = { @@ -920,9 +1208,6 @@ class Nmcli(object): }) if self.wifi: for name, value in self.wifi.items(): - # Disregard 'ssid' via 'wifi.ssid' - if name == 'ssid': - continue options.update({ '802-11-wireless.%s' % name: value }) @@ -1039,7 +1324,14 @@ class Nmcli(object): 'ipv4.routes', 'ipv4.route-metric' 'ipv6.dns', - 'ipv6.dns-search'): + 'ipv6.dns-search', + '802-11-wireless-security.group', + '802-11-wireless-security.leap-password-flags', + '802-11-wireless-security.pairwise', + '802-11-wireless-security.proto', + '802-11-wireless-security.psk-flags', + '802-11-wireless-security.wep-key-flags', + '802-11-wireless.mac-address-blacklist'): return list return str @@ -1127,9 +1419,8 @@ class Nmcli(object): return status def edit_connection(self): - data = "\n".join(self.edit_commands + ['save', 'quit']) - cmd = [self.nmcli_bin, 'con', 'edit', self.conn_name] - return self.execute_command(cmd, data=data) + commands = self.edit_commands + ['save', 'quit'] + return self.execute_edit_commands(commands, arguments=[self.conn_name]) def show_connection(self): cmd = [self.nmcli_bin, '--show-secrets', 'con', 'show', self.conn_name] @@ -1173,6 +1464,60 @@ class Nmcli(object): return conn_info + def get_supported_properties(self, setting): + properties = [] + + if setting == '802-11-wireless-security': + set_property = 'psk' + set_value = 'FAKEVALUE' + commands = ['set %s.%s %s' % (setting, set_property, set_value)] + else: + commands = [] + + commands += ['print %s' % setting, 'quit', 'yes'] + + (rc, out, err) = self.execute_edit_commands(commands, arguments=['type', self.type]) + + if rc != 0: + raise NmcliModuleError(err) + + for line in out.splitlines(): + prefix = '%s.' % setting + if (line.startswith(prefix)): + pair = line.split(':', 1) + property = pair[0].strip().replace(prefix, '') + properties.append(property) + + return properties + + def check_for_unsupported_properties(self, setting): + if setting == '802-11-wireless': + setting_key = 'wifi' + elif setting == '802-11-wireless-security': + setting_key = 'wifi_sec' + else: + setting_key = setting + + supported_properties = self.get_supported_properties(setting) + unsupported_properties = [] + + for property, value in getattr(self, setting_key).items(): + if property not in supported_properties: + unsupported_properties.append(property) + + if unsupported_properties: + msg_options = [] + for property in unsupported_properties: + msg_options.append('%s.%s' % (setting_key, property)) + + msg = 'Invalid or unsupported option(s): "%s"' % '", "'.join(msg_options) + if self.ignore_unsupported_suboptions: + self.module.warn(msg) + else: + self.module.fail_json(msg=msg) + + return unsupported_properties + def _compare_conn_params(self, conn_info, options): changed = False diff_before = dict() @@ -1230,6 +1575,7 @@ def main(): # Parsing argument file module = AnsibleModule( argument_spec=dict( + ignore_unsupported_suboptions=dict(type='bool', default=False), autoconnect=dict(type='bool', default=True), state=dict(type='str', required=True, choices=['absent', 'present']), conn_name=dict(type='str', required=True), @@ -1315,6 +1661,7 @@ def main(): ip_tunnel_dev=dict(type='str'), ip_tunnel_local=dict(type='str'), ip_tunnel_remote=dict(type='str'), + # 802-11-wireless* specific vars ssid=dict(type='str'), wifi=dict(type='dict'), wifi_sec=dict(type='dict', no_log=True), @@ -1343,6 +1690,19 @@ def main(): nmcli.module.fail_json(msg="Please specify a name for the master when type is %s" % nmcli.type) if nmcli.ifname is None: nmcli.module.fail_json(msg="Please specify an interface name for the connection when type is %s" % nmcli.type) + if nmcli.type == 'wifi': + unsupported_properties = {} + if nmcli.wifi: + if 'ssid' in nmcli.wifi: + module.warn("Ignoring option 'wifi.ssid', it must be specified with option 'ssid'") + del nmcli.wifi['ssid'] + unsupported_properties['wifi'] = nmcli.check_for_unsupported_properties('802-11-wireless') + if nmcli.wifi_sec: + unsupported_properties['wifi_sec'] = nmcli.check_for_unsupported_properties('802-11-wireless-security') + if nmcli.ignore_unsupported_suboptions and unsupported_properties: + for setting_key, properties in unsupported_properties.items(): + for property in properties: + del getattr(nmcli, setting_key)[property] try: if nmcli.state == 'absent': diff --git a/tests/unit/plugins/modules/net_tools/test_nmcli.py b/tests/unit/plugins/modules/net_tools/test_nmcli.py index 9f131c3873..ca83044201 100644 --- a/tests/unit/plugins/modules/net_tools/test_nmcli.py +++ b/tests/unit/plugins/modules/net_tools/test_nmcli.py @@ -507,6 +507,51 @@ TESTCASE_SECURE_WIRELESS = [ } ] +TESTCASE_DEFAULT_WIRELESS_SHOW_OUTPUT = """\ +802-11-wireless.ssid: -- +802-11-wireless.mode: infrastructure +802-11-wireless.band: -- +802-11-wireless.channel: 0 +802-11-wireless.bssid: -- +802-11-wireless.rate: 0 +802-11-wireless.tx-power: 0 +802-11-wireless.mac-address: -- +802-11-wireless.cloned-mac-address: -- +802-11-wireless.generate-mac-address-mask:-- +802-11-wireless.mac-address-blacklist: -- +802-11-wireless.mac-address-randomization:default +802-11-wireless.mtu: auto +802-11-wireless.seen-bssids: -- +802-11-wireless.hidden: no +802-11-wireless.powersave: 0 (default) +802-11-wireless.wake-on-wlan: 0x1 (default) +802-11-wireless.ap-isolation: -1 (default) +""" + +TESTCASE_DEFAULT_SECURE_WIRELESS_SHOW_OUTPUT = \ + TESTCASE_DEFAULT_WIRELESS_SHOW_OUTPUT + """\ +802-11-wireless-security.key-mgmt: -- +802-11-wireless-security.wep-tx-keyidx: 0 +802-11-wireless-security.auth-alg: -- +802-11-wireless-security.proto: -- +802-11-wireless-security.pairwise: -- +802-11-wireless-security.group: -- +802-11-wireless-security.pmf: 0 (default) +802-11-wireless-security.leap-username: -- +802-11-wireless-security.wep-key0: -- +802-11-wireless-security.wep-key1: -- +802-11-wireless-security.wep-key2: -- +802-11-wireless-security.wep-key3: -- +802-11-wireless-security.wep-key-flags: 0 (none) +802-11-wireless-security.wep-key-type: unknown +802-11-wireless-security.psk: testingtestingtesting +802-11-wireless-security.psk-flags: 0 (none) +802-11-wireless-security.leap-password: -- +802-11-wireless-security.leap-password-flags:0 (none) +802-11-wireless-security.wps-method: 0x0 (default) +802-11-wireless-security.fils: 0 (default) +""" + TESTCASE_DUMMY_STATIC = [ { 'type': 'dummy', @@ -697,10 +742,48 @@ def mocked_ethernet_connection_dhcp_to_static(mocker): )) +@pytest.fixture +def mocked_wireless_create(mocker): + mocker_set(mocker, + execute_return=None, + execute_side_effect=( + (0, TESTCASE_DEFAULT_WIRELESS_SHOW_OUTPUT, ""), + (0, "", ""), + )) + + +@pytest.fixture +def mocked_secure_wireless_create(mocker): + mocker_set(mocker, + execute_return=None, + execute_side_effect=( + (0, TESTCASE_DEFAULT_SECURE_WIRELESS_SHOW_OUTPUT, ""), + (0, "", ""), + (0, "", ""), + )) + + @pytest.fixture def mocked_secure_wireless_create_failure(mocker): mocker_set(mocker, - execute_return=(1, "", "")) + execute_return=None, + execute_side_effect=( + (0, TESTCASE_DEFAULT_SECURE_WIRELESS_SHOW_OUTPUT, ""), + (1, "", ""), + )) + + +@pytest.fixture +def mocked_secure_wireless_modify(mocker): + mocker_set(mocker, + connection_exists=True, + execute_return=None, + execute_side_effect=( + (0, TESTCASE_DEFAULT_SECURE_WIRELESS_SHOW_OUTPUT, ""), + (0, "", ""), + (0, "", ""), + (0, "", ""), + )) @pytest.fixture @@ -709,6 +792,7 @@ def mocked_secure_wireless_modify_failure(mocker): connection_exists=True, execute_return=None, execute_side_effect=( + (0, TESTCASE_DEFAULT_SECURE_WIRELESS_SHOW_OUTPUT, ""), (0, "", ""), (1, "", ""), )) @@ -1629,7 +1713,7 @@ def test_ethernet_connection_static_unchanged(mocked_ethernet_connection_static_ @pytest.mark.parametrize('patch_ansible_module', TESTCASE_WIRELESS, indirect=['patch_ansible_module']) -def test_create_wireless(mocked_generic_connection_create, capfd): +def test_create_wireless(mocked_wireless_create, capfd): """ Test : Create wireless connection """ @@ -1637,10 +1721,22 @@ def test_create_wireless(mocked_generic_connection_create, capfd): with pytest.raises(SystemExit): nmcli.main() - assert nmcli.Nmcli.execute_command.call_count == 1 + assert nmcli.Nmcli.execute_command.call_count == 2 arg_list = nmcli.Nmcli.execute_command.call_args_list - add_args, add_kw = arg_list[0] + get_available_options_args, get_available_options_kw = arg_list[0] + assert get_available_options_args[0][0] == '/usr/bin/nmcli' + assert get_available_options_args[0][1] == 'con' + assert get_available_options_args[0][2] == 'edit' + assert get_available_options_args[0][3] == 'type' + assert get_available_options_args[0][4] == 'wifi' + + get_available_options_data = get_available_options_kw['data'].split() + for param in ['print', '802-11-wireless', + 'quit', 'yes']: + assert param in get_available_options_data + + add_args, add_kw = arg_list[1] assert add_args[0][0] == '/usr/bin/nmcli' assert add_args[0][1] == 'con' assert add_args[0][2] == 'add' @@ -1664,7 +1760,7 @@ def test_create_wireless(mocked_generic_connection_create, capfd): @pytest.mark.parametrize('patch_ansible_module', TESTCASE_SECURE_WIRELESS, indirect=['patch_ansible_module']) -def test_create_secure_wireless(mocked_generic_connection_create, capfd): +def test_create_secure_wireless(mocked_secure_wireless_create, capfd): """ Test : Create secure wireless connection """ @@ -1672,10 +1768,22 @@ def test_create_secure_wireless(mocked_generic_connection_create, capfd): with pytest.raises(SystemExit): nmcli.main() - assert nmcli.Nmcli.execute_command.call_count == 2 + assert nmcli.Nmcli.execute_command.call_count == 3 arg_list = nmcli.Nmcli.execute_command.call_args_list - add_args, add_kw = arg_list[0] + get_available_options_args, get_available_options_kw = arg_list[0] + assert get_available_options_args[0][0] == '/usr/bin/nmcli' + assert get_available_options_args[0][1] == 'con' + assert get_available_options_args[0][2] == 'edit' + assert get_available_options_args[0][3] == 'type' + assert get_available_options_args[0][4] == 'wifi' + + get_available_options_data = get_available_options_kw['data'].split() + for param in ['print', '802-11-wireless-security', + 'quit', 'yes']: + assert param in get_available_options_data + + add_args, add_kw = arg_list[1] assert add_args[0][0] == '/usr/bin/nmcli' assert add_args[0][1] == 'con' assert add_args[0][2] == 'add' @@ -1691,7 +1799,7 @@ def test_create_secure_wireless(mocked_generic_connection_create, capfd): '802-11-wireless-security.key-mgmt', 'wpa-psk']: assert param in add_args_text - edit_args, edit_kw = arg_list[1] + edit_args, edit_kw = arg_list[2] assert edit_args[0][0] == '/usr/bin/nmcli' assert edit_args[0][1] == 'con' assert edit_args[0][2] == 'edit' @@ -1718,10 +1826,22 @@ def test_create_secure_wireless_failure(mocked_secure_wireless_create_failure, c with pytest.raises(SystemExit): nmcli.main() - assert nmcli.Nmcli.execute_command.call_count == 1 + assert nmcli.Nmcli.execute_command.call_count == 2 arg_list = nmcli.Nmcli.execute_command.call_args_list - add_args, add_kw = arg_list[0] + get_available_options_args, get_available_options_kw = arg_list[0] + assert get_available_options_args[0][0] == '/usr/bin/nmcli' + assert get_available_options_args[0][1] == 'con' + assert get_available_options_args[0][2] == 'edit' + assert get_available_options_args[0][3] == 'type' + assert get_available_options_args[0][4] == 'wifi' + + get_available_options_data = get_available_options_kw['data'].split() + for param in ['print', '802-11-wireless-security', + 'quit', 'yes']: + assert param in get_available_options_data + + add_args, add_kw = arg_list[1] assert add_args[0][0] == '/usr/bin/nmcli' assert add_args[0][1] == 'con' assert add_args[0][2] == 'add' @@ -1744,17 +1864,36 @@ def test_create_secure_wireless_failure(mocked_secure_wireless_create_failure, c @pytest.mark.parametrize('patch_ansible_module', TESTCASE_SECURE_WIRELESS, indirect=['patch_ansible_module']) -def test_modify_secure_wireless(mocked_generic_connection_modify, capfd): +def test_modify_secure_wireless(mocked_secure_wireless_modify, capfd): """ Test : Modify secure wireless connection """ with pytest.raises(SystemExit): nmcli.main() - assert nmcli.Nmcli.execute_command.call_count == 2 + assert nmcli.Nmcli.execute_command.call_count == 4 arg_list = nmcli.Nmcli.execute_command.call_args_list - add_args, add_kw = arg_list[0] + get_available_options_args, get_available_options_kw = arg_list[0] + assert get_available_options_args[0][0] == '/usr/bin/nmcli' + assert get_available_options_args[0][1] == 'con' + assert get_available_options_args[0][2] == 'edit' + assert get_available_options_args[0][3] == 'type' + assert get_available_options_args[0][4] == 'wifi' + + get_available_options_data = get_available_options_kw['data'].split() + for param in ['print', '802-11-wireless-security', + 'quit', 'yes']: + assert param in get_available_options_data + + show_args, show_kw = arg_list[1] + assert show_args[0][0] == '/usr/bin/nmcli' + assert show_args[0][1] == '--show-secrets' + assert show_args[0][2] == 'con' + assert show_args[0][3] == 'show' + assert show_args[0][4] == 'non_existent_nw_device' + + add_args, add_kw = arg_list[2] assert add_args[0][0] == '/usr/bin/nmcli' assert add_args[0][1] == 'con' assert add_args[0][2] == 'modify' @@ -1767,7 +1906,7 @@ def test_modify_secure_wireless(mocked_generic_connection_modify, capfd): '802-11-wireless-security.key-mgmt', 'wpa-psk']: assert param in add_args_text - edit_args, edit_kw = arg_list[1] + edit_args, edit_kw = arg_list[3] assert edit_args[0][0] == '/usr/bin/nmcli' assert edit_args[0][1] == 'con' assert edit_args[0][2] == 'edit' @@ -1794,10 +1933,29 @@ def test_modify_secure_wireless_failure(mocked_secure_wireless_modify_failure, c with pytest.raises(SystemExit): nmcli.main() - assert nmcli.Nmcli.execute_command.call_count == 2 + assert nmcli.Nmcli.execute_command.call_count == 3 arg_list = nmcli.Nmcli.execute_command.call_args_list - add_args, add_kw = arg_list[1] + get_available_options_args, get_available_options_kw = arg_list[0] + assert get_available_options_args[0][0] == '/usr/bin/nmcli' + assert get_available_options_args[0][1] == 'con' + assert get_available_options_args[0][2] == 'edit' + assert get_available_options_args[0][3] == 'type' + assert get_available_options_args[0][4] == 'wifi' + + get_available_options_data = get_available_options_kw['data'].split() + for param in ['print', '802-11-wireless-security', + 'quit', 'yes']: + assert param in get_available_options_data + + show_args, show_kw = arg_list[1] + assert show_args[0][0] == '/usr/bin/nmcli' + assert show_args[0][1] == '--show-secrets' + assert show_args[0][2] == 'con' + assert show_args[0][3] == 'show' + assert show_args[0][4] == 'non_existent_nw_device' + + add_args, add_kw = arg_list[2] assert add_args[0][0] == '/usr/bin/nmcli' assert add_args[0][1] == 'con' assert add_args[0][2] == 'modify' From 23e7ef025529b7c79fab284121f0b4b5045e45fa Mon Sep 17 00:00:00 2001 From: Matt 'Archer' Vaughn Date: Sat, 21 Aug 2021 15:57:28 -0400 Subject: [PATCH 0521/3093] Add option for retry_servfail (#3247) * Add option for retry_servfail cf. https://dnspython.readthedocs.io/en/latest/resolver-class.html#dns.resolver.Resolver.retry_servfail Setting this option to `True` allows for the possibility of the lookup plugin to retry and thereby recover from potentially transient lookup failures, which would otherwise cause the task or play to bail with an unrecoverable exception. * Create 3247-retry_servfail-for-dig * documentation for `retry_servfail` option * Rename 3247-retry_servfail-for-dig to 3247-retry_servfail-for-dig.yaml * fix whitespace * Update plugins/lookup/dig.py Co-authored-by: Ajpantuso * Update plugins/lookup/dig.py Co-authored-by: Ajpantuso * rm try/except block Co-authored-by: Ajpantuso --- changelogs/fragments/3247-retry_servfail-for-dig.yaml | 3 +++ plugins/lookup/dig.py | 11 +++++++++++ 2 files changed, 14 insertions(+) create mode 100644 changelogs/fragments/3247-retry_servfail-for-dig.yaml diff --git a/changelogs/fragments/3247-retry_servfail-for-dig.yaml b/changelogs/fragments/3247-retry_servfail-for-dig.yaml new file mode 100644 index 0000000000..1e4a00384f --- /dev/null +++ b/changelogs/fragments/3247-retry_servfail-for-dig.yaml @@ -0,0 +1,3 @@ +--- +minor_changes: + - dig lookup plugin - add ``retry_servfail`` option (https://github.com/ansible-collections/community.general/pull/3247). diff --git a/plugins/lookup/dig.py b/plugins/lookup/dig.py index f5156b4d1e..19ded61de7 100644 --- a/plugins/lookup/dig.py +++ b/plugins/lookup/dig.py @@ -35,6 +35,11 @@ DOCUMENTATION = ''' flat: description: If 0 each record is returned as a dictionary, otherwise a string default: 1 + retry_servfail: + description: Retry a nameserver if it returns SERVFAIL. + default: false + type: bool + version_added: 3.6.0 notes: - ALL is not a record per-se, merely the listed fields are available for any record results you retrieve in the form of a dictionary. - While the 'dig' lookup plugin supports anything which dnspython supports out of the box, only a subset can be converted into a dictionary. @@ -73,6 +78,10 @@ EXAMPLES = """ - ansible.builtin.debug: msg: "XMPP service for gmail.com. is available at {{ item.target }} on port {{ item.port }}" with_items: "{{ lookup('community.general.dig', '_xmpp-server._tcp.gmail.com./SRV', 'flat=0', wantlist=True) }}" + +- name: Retry nameservers that return SERVFAIL + ansible.builtin.debug: + msg: "{{ lookup('community.general.dig', 'example.org./A', 'retry_servfail=True') }}" """ RETURN = """ @@ -300,6 +309,8 @@ class LookupModule(LookupBase): rdclass = dns.rdataclass.from_text(arg) except Exception as e: raise AnsibleError("dns lookup illegal CLASS: %s" % to_native(e)) + elif opt == 'retry_servfail': + myres.retry_servfail = bool(arg) continue From 1ca9c350109966f7f901f447b7b9c483bd5006cf Mon Sep 17 00:00:00 2001 From: zerotens Date: Mon, 23 Aug 2021 06:24:05 +0200 Subject: [PATCH 0522/3093] nmcli: allow IPv4/IPv6 configuration on ipip and sit devices (#3239) * Allow IPv4/IPv6 configuration on mode "sit" tunnel devices * Update Unit Test for Allow IPv4/IPv6 configuration on mode "sit" tunnel devices * Add changelog for Allow IPv4/IPv6 configuration on mode "sit" tunnel devices * Update changelogs/fragments/3239-nmcli-sit-ip-config-bugfix.yaml Co-authored-by: Ajpantuso * Added ip4/ip6 configuration arguments for ipip tunnels Co-authored-by: Ajpantuso --- .../fragments/3239-nmcli-sit-ipip-config-bugfix.yaml | 2 ++ plugins/modules/net_tools/nmcli.py | 2 ++ tests/unit/plugins/modules/net_tools/test_nmcli.py | 12 ++++++++++++ 3 files changed, 16 insertions(+) create mode 100644 changelogs/fragments/3239-nmcli-sit-ipip-config-bugfix.yaml diff --git a/changelogs/fragments/3239-nmcli-sit-ipip-config-bugfix.yaml b/changelogs/fragments/3239-nmcli-sit-ipip-config-bugfix.yaml new file mode 100644 index 0000000000..78a172342e --- /dev/null +++ b/changelogs/fragments/3239-nmcli-sit-ipip-config-bugfix.yaml @@ -0,0 +1,2 @@ +bugfixes: + - "nmcli - added ip4/ip6 configuration arguments for ``sit`` and ``ipip`` tunnels (https://github.com/ansible-collections/community.general/issues/3238, https://github.com/ansible-collections/community.general/pull/3239)." diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index 0a7d78b681..cce9e44ee4 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -1248,6 +1248,8 @@ class Nmcli(object): 'ethernet', 'generic', 'infiniband', + 'ipip', + 'sit', 'team', 'vlan', 'wifi' diff --git a/tests/unit/plugins/modules/net_tools/test_nmcli.py b/tests/unit/plugins/modules/net_tools/test_nmcli.py index ca83044201..f81b636a81 100644 --- a/tests/unit/plugins/modules/net_tools/test_nmcli.py +++ b/tests/unit/plugins/modules/net_tools/test_nmcli.py @@ -388,6 +388,12 @@ TESTCASE_IPIP_SHOW_OUTPUT = """\ connection.id: non_existent_nw_device connection.interface-name: ipip-existent_nw_device connection.autoconnect: yes +ipv4.ignore-auto-dns: no +ipv4.ignore-auto-routes: no +ipv4.never-default: no +ipv4.may-fail: yes +ipv6.ignore-auto-dns: no +ipv6.ignore-auto-routes: no ip-tunnel.mode: ipip ip-tunnel.parent: non_existent_ipip_device ip-tunnel.local: 192.168.225.5 @@ -411,6 +417,12 @@ TESTCASE_SIT_SHOW_OUTPUT = """\ connection.id: non_existent_nw_device connection.interface-name: sit-existent_nw_device connection.autoconnect: yes +ipv4.ignore-auto-dns: no +ipv4.ignore-auto-routes: no +ipv4.never-default: no +ipv4.may-fail: yes +ipv6.ignore-auto-dns: no +ipv6.ignore-auto-routes: no ip-tunnel.mode: sit ip-tunnel.parent: non_existent_sit_device ip-tunnel.local: 192.168.225.5 From f2fa56b485bab467250185c0dd3ee6b777ee0044 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Wed, 25 Aug 2021 06:36:19 +0200 Subject: [PATCH 0523/3093] Fix apache2_module a2enmod/a2dismod detection and error message if not found. (#3258) --- changelogs/fragments/3258-apache2_module.yml | 2 ++ plugins/modules/web_infrastructure/apache2_module.py | 10 ++++++---- 2 files changed, 8 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/3258-apache2_module.yml diff --git a/changelogs/fragments/3258-apache2_module.yml b/changelogs/fragments/3258-apache2_module.yml new file mode 100644 index 0000000000..a60f2125a4 --- /dev/null +++ b/changelogs/fragments/3258-apache2_module.yml @@ -0,0 +1,2 @@ +bugfixes: +- "apache2_module - fix ``a2enmod``/``a2dismod`` detection, and error message when not found (https://github.com/ansible-collections/community.general/issues/3253)." diff --git a/plugins/modules/web_infrastructure/apache2_module.py b/plugins/modules/web_infrastructure/apache2_module.py index c75dc1c30c..44327fe13c 100644 --- a/plugins/modules/web_infrastructure/apache2_module.py +++ b/plugins/modules/web_infrastructure/apache2_module.py @@ -202,15 +202,17 @@ def _set_state(module, state): result=success_msg, warnings=module.warnings) - a2mod_binary = [module.get_bin_path(a2mod_binary)] - if a2mod_binary is None: + a2mod_binary_path = module.get_bin_path(a2mod_binary) + if a2mod_binary_path is None: module.fail_json(msg="%s not found. Perhaps this system does not use %s to manage apache" % (a2mod_binary, a2mod_binary)) + a2mod_binary_cmd = [a2mod_binary_path] + if not want_enabled and force: # force exists only for a2dismod on debian - a2mod_binary.append('-f') + a2mod_binary_cmd.append('-f') - result, stdout, stderr = module.run_command(a2mod_binary + [name]) + result, stdout, stderr = module.run_command(a2mod_binary_cmd + [name]) if _module_is_enabled(module) == want_enabled: module.exit_json(changed=True, From cbcb942b0efacde22d7310b1d66205face469b75 Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Wed, 25 Aug 2021 00:41:05 -0400 Subject: [PATCH 0524/3093] tss_lookup_plugin - Refactor and decoupling (#3252) * Initial commit * Adding changelog fragment * Applying initial review suggestions * Increasing unit coverage * Removed unneccessary constant * Improving test readability * Cleanup constants --- .../3252-tss_lookup_plugin-refactor.yml | 4 + plugins/lookup/tss.py | 159 ++++++++++-------- tests/unit/plugins/lookup/test_tss.py | 104 ++++++++++-- 3 files changed, 187 insertions(+), 80 deletions(-) create mode 100644 changelogs/fragments/3252-tss_lookup_plugin-refactor.yml diff --git a/changelogs/fragments/3252-tss_lookup_plugin-refactor.yml b/changelogs/fragments/3252-tss_lookup_plugin-refactor.yml new file mode 100644 index 0000000000..6e8ccb29f8 --- /dev/null +++ b/changelogs/fragments/3252-tss_lookup_plugin-refactor.yml @@ -0,0 +1,4 @@ +--- +minor_changes: + - tss lookup plugin - refactored to decouple the supporting third-party library (``python-tss-sdk``) + (https://github.com/ansible-collections/community.general/pull/3252). diff --git a/plugins/lookup/tss.py b/plugins/lookup/tss.py index ecc3fd6c8b..fe6042e130 100644 --- a/plugins/lookup/tss.py +++ b/plugins/lookup/tss.py @@ -47,7 +47,9 @@ options: required: true domain: default: "" - description: The domain with which to request the OAuth2 Access Grant. + description: + - The domain with which to request the OAuth2 Access Grant. + - Requires C(python-tss-sdk) version 1.0.0 or greater. env: - name: TSS_DOMAIN ini: @@ -122,100 +124,125 @@ EXAMPLES = r""" - ansible.builtin.debug: msg: the password is {{ secret_password }} """ -from distutils.version import LooseVersion -from ansible.errors import AnsibleError, AnsibleOptionsError -sdk_is_missing = False +import abc + +from ansible.errors import AnsibleError, AnsibleOptionsError +from ansible.module_utils import six +from ansible.plugins.lookup import LookupBase +from ansible.utils.display import Display try: from thycotic.secrets.server import SecretServer, SecretServerError -except ImportError: - sdk_is_missing = True -# Added for backwards compatability - See issue #3192 -# https://github.com/ansible-collections/community.general/issues/3192 -try: - from thycotic import __version__ as sdk_version + HAS_TSS_SDK = True except ImportError: - sdk_version = "0.0.5" + SecretServer = None + SecretServerError = None + HAS_TSS_SDK = False try: from thycotic.secrets.server import PasswordGrantAuthorizer, DomainPasswordGrantAuthorizer - sdK_version_below_v1 = False + HAS_TSS_AUTHORIZER = True except ImportError: - sdK_version_below_v1 = True + PasswordGrantAuthorizer = None + DomainPasswordGrantAuthorizer = None + HAS_TSS_AUTHORIZER = False -from ansible.utils.display import Display -from ansible.plugins.lookup import LookupBase display = Display() -class LookupModule(LookupBase): - @staticmethod - def Client(server_parameters): +@six.add_metaclass(abc.ABCMeta) +class TSSClient(object): + def __init__(self): + self._client = None - if LooseVersion(sdk_version) < LooseVersion('1.0.0') or sdK_version_below_v1: - return SecretServer( + @staticmethod + def from_params(**server_parameters): + if HAS_TSS_AUTHORIZER: + return TSSClientV1(**server_parameters) + else: + return TSSClientV0(**server_parameters) + + def get_secret(self, term): + display.debug("tss_lookup term: %s" % term) + + secret_id = self._term_to_secret_id(term) + display.vvv(u"Secret Server lookup of Secret with ID %d" % secret_id) + + return self._client.get_secret_json(secret_id) + + @staticmethod + def _term_to_secret_id(term): + try: + return int(term) + except ValueError: + raise AnsibleOptionsError("Secret ID must be an integer") + + +class TSSClientV0(TSSClient): + def __init__(self, **server_parameters): + super(TSSClientV0, self).__init__() + + if server_parameters.get("domain"): + raise AnsibleError("The 'domain' option requires 'python-tss-sdk' version 1.0.0 or greater") + + self._client = SecretServer( + server_parameters["base_url"], + server_parameters["username"], + server_parameters["password"], + server_parameters["api_path_uri"], + server_parameters["token_path_uri"], + ) + + +class TSSClientV1(TSSClient): + def __init__(self, **server_parameters): + super(TSSClientV1, self).__init__() + + authorizer = self._get_authorizer(**server_parameters) + self._client = SecretServer( + server_parameters["base_url"], authorizer, server_parameters["api_path_uri"] + ) + + @staticmethod + def _get_authorizer(**server_parameters): + if server_parameters.get("domain"): + return DomainPasswordGrantAuthorizer( server_parameters["base_url"], server_parameters["username"], + server_parameters["domain"], server_parameters["password"], - server_parameters["api_path_uri"], server_parameters["token_path_uri"], ) - else: - # The Password Authorizer and Domain Password Authorizer - # became available in v1.0.0 and beyond. - # Import only if sdk_version requires it. - # from thycotic.secrets.server import PasswordGrantAuthorizer - if server_parameters["domain"]: - authorizer = DomainPasswordGrantAuthorizer( - server_parameters["base_url"], - server_parameters["username"], - server_parameters["domain"], - server_parameters["password"], - server_parameters["token_path_uri"], - ) - else: - authorizer = PasswordGrantAuthorizer( - server_parameters["base_url"], - server_parameters["username"], - server_parameters["password"], - server_parameters["token_path_uri"], - ) + return PasswordGrantAuthorizer( + server_parameters["base_url"], + server_parameters["username"], + server_parameters["password"], + server_parameters["token_path_uri"], + ) - return SecretServer( - server_parameters["base_url"], authorizer, server_parameters["api_path_uri"] - ) +class LookupModule(LookupBase): def run(self, terms, variables, **kwargs): - if sdk_is_missing: + if not HAS_TSS_SDK: raise AnsibleError("python-tss-sdk must be installed to use this plugin") self.set_options(var_options=variables, direct=kwargs) - secret_server = LookupModule.Client( - { - "base_url": self.get_option("base_url"), - "username": self.get_option("username"), - "password": self.get_option("password"), - "domain": self.get_option("domain"), - "api_path_uri": self.get_option("api_path_uri"), - "token_path_uri": self.get_option("token_path_uri"), - } + tss = TSSClient.from_params( + base_url=self.get_option("base_url"), + username=self.get_option("username"), + password=self.get_option("password"), + domain=self.get_option("domain"), + api_path_uri=self.get_option("api_path_uri"), + token_path_uri=self.get_option("token_path_uri"), ) - result = [] - for term in terms: - display.debug("tss_lookup term: %s" % term) - try: - id = int(term) - display.vvv(u"Secret Server lookup of Secret with ID %d" % id) - result.append(secret_server.get_secret_json(id)) - except ValueError: - raise AnsibleOptionsError("Secret ID must be an integer") - except SecretServerError as error: - raise AnsibleError("Secret Server lookup failure: %s" % error.message) - return result + try: + return [tss.get_secret(term) for term in terms] + except SecretServerError as error: + raise AnsibleError("Secret Server lookup failure: %s" % error.message) diff --git a/tests/unit/plugins/lookup/test_tss.py b/tests/unit/plugins/lookup/test_tss.py index cca2f6ff5f..97073d34be 100644 --- a/tests/unit/plugins/lookup/test_tss.py +++ b/tests/unit/plugins/lookup/test_tss.py @@ -10,12 +10,25 @@ __metaclass__ = type from ansible_collections.community.general.tests.unit.compat.unittest import TestCase from ansible_collections.community.general.tests.unit.compat.mock import ( patch, + DEFAULT, MagicMock, ) from ansible_collections.community.general.plugins.lookup import tss from ansible.plugins.loader import lookup_loader +TSS_IMPORT_PATH = 'ansible_collections.community.general.plugins.lookup.tss' + + +def make_absolute(name): + return '.'.join([TSS_IMPORT_PATH, name]) + + +class SecretServerError(Exception): + def __init__(self): + self.message = '' + + class MockSecretServer(MagicMock): RESPONSE = '{"foo": "bar"}' @@ -23,21 +36,84 @@ class MockSecretServer(MagicMock): return self.RESPONSE -class TestLookupModule(TestCase): +class MockFaultySecretServer(MagicMock): + def get_secret_json(self, path): + raise SecretServerError + + +@patch(make_absolute('SecretServer'), MockSecretServer()) +class TestTSSClient(TestCase): + def setUp(self): + self.server_params = { + 'base_url': '', + 'username': '', + 'domain': '', + 'password': '', + 'api_path_uri': '', + 'token_path_uri': '', + } + + def test_from_params(self): + with patch(make_absolute('HAS_TSS_AUTHORIZER'), False): + self.assert_client_version('v0') + + with patch.dict(self.server_params, {'domain': 'foo'}): + with self.assertRaises(tss.AnsibleError): + self._get_client() + + with patch.multiple(TSS_IMPORT_PATH, + HAS_TSS_AUTHORIZER=True, + PasswordGrantAuthorizer=DEFAULT, + DomainPasswordGrantAuthorizer=DEFAULT): + + self.assert_client_version('v1') + + with patch.dict(self.server_params, {'domain': 'foo'}): + self.assert_client_version('v1') + + def assert_client_version(self, version): + version_to_class = { + 'v0': tss.TSSClientV0, + 'v1': tss.TSSClientV1 + } + + client = self._get_client() + self.assertIsInstance(client, version_to_class[version]) + + def _get_client(self): + return tss.TSSClient.from_params(**self.server_params) + + +class TestLookupModule(TestCase): + VALID_TERMS = [1] + INVALID_TERMS = ['foo'] + def setUp(self): - tss.sdk_is_missing = False self.lookup = lookup_loader.get("community.general.tss") - @patch( - "ansible_collections.community.general.plugins.lookup.tss.LookupModule.Client", - MockSecretServer(), - ) + @patch.multiple(TSS_IMPORT_PATH, + HAS_TSS_SDK=False, + SecretServer=MockSecretServer) + def test_missing_sdk(self): + with self.assertRaises(tss.AnsibleError): + self._run_lookup(self.VALID_TERMS) + + @patch.multiple(TSS_IMPORT_PATH, + HAS_TSS_SDK=True, + SecretServerError=SecretServerError) def test_get_secret_json(self): - self.assertListEqual( - [MockSecretServer.RESPONSE], - self.lookup.run( - [1], - [], - **{"base_url": "dummy", "username": "dummy", "password": "dummy", } - ), - ) + with patch(make_absolute('SecretServer'), MockSecretServer): + self.assertListEqual([MockSecretServer.RESPONSE], self._run_lookup(self.VALID_TERMS)) + + with self.assertRaises(tss.AnsibleOptionsError): + self._run_lookup(self.INVALID_TERMS) + + with patch(make_absolute('SecretServer'), MockFaultySecretServer): + with self.assertRaises(tss.AnsibleError): + self._run_lookup(self.VALID_TERMS) + + def _run_lookup(self, terms, variables=None, **kwargs): + variables = variables or [] + kwargs = kwargs or {"base_url": "dummy", "username": "dummy", "password": "dummy"} + + return self.lookup.run(terms, variables, **kwargs) From e40aa69e77752fe3571ec500ce749c67f7a50a13 Mon Sep 17 00:00:00 2001 From: Robin Roth Date: Thu, 26 Aug 2021 08:09:26 +0200 Subject: [PATCH 0525/3093] Stop notifications for apache2_module for me (#3261) --- .github/BOTMETA.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 1e982296d6..6055224145 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1101,7 +1101,8 @@ files: $modules/web_infrastructure/apache2_mod_proxy.py: maintainers: oboukili $modules/web_infrastructure/apache2_module.py: - maintainers: berendt n0trax robinro + maintainers: berendt n0trax + ignore: robinro $modules/web_infrastructure/deploy_helper.py: maintainers: ramondelafuente $modules/web_infrastructure/django_manage.py: From b8a081b9b23ae6b858115b8890ad5f5e8c0a0e11 Mon Sep 17 00:00:00 2001 From: zerotens Date: Thu, 26 Aug 2021 08:16:36 +0200 Subject: [PATCH 0526/3093] nmcli: Support gre tunnels (#3262) * Add gre tunnel support * Add gre tunnel support * Fix Blank Lines * Fix unit test Add changelog fragment * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Felix Fontein * Update Docs * Update plugins/modules/net_tools/nmcli.py Co-authored-by: Ajpantuso * Update Docs Co-authored-by: Felix Fontein Co-authored-by: Ajpantuso --- .../3262-nmcli-add-gre-tunnel-support.yaml | 2 + plugins/modules/net_tools/nmcli.py | 41 +++++- .../plugins/modules/net_tools/test_nmcli.py | 123 ++++++++++++++++++ 3 files changed, 162 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/3262-nmcli-add-gre-tunnel-support.yaml diff --git a/changelogs/fragments/3262-nmcli-add-gre-tunnel-support.yaml b/changelogs/fragments/3262-nmcli-add-gre-tunnel-support.yaml new file mode 100644 index 0000000000..e3f6bef7bc --- /dev/null +++ b/changelogs/fragments/3262-nmcli-add-gre-tunnel-support.yaml @@ -0,0 +1,2 @@ +minor_changes: + - "nmcli - add ``gre`` tunnel support (https://github.com/ansible-collections/community.general/issues/3105, https://github.com/ansible-collections/community.general/pull/3262)." diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index cce9e44ee4..7bc8a6b775 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -55,7 +55,7 @@ options: - Type C(generic) is added in Ansible 2.5. - Type C(infiniband) is added in community.general 2.0.0. type: str - choices: [ bond, bond-slave, bridge, bridge-slave, dummy, ethernet, generic, infiniband, ipip, sit, team, team-slave, vlan, vxlan, wifi ] + choices: [ bond, bond-slave, bridge, bridge-slave, dummy, ethernet, generic, gre, infiniband, ipip, sit, team, team-slave, vlan, vxlan, wifi ] mode: description: - This is the type of device or network connection that you wish to create for a bond or bridge. @@ -314,16 +314,28 @@ options: type: str ip_tunnel_dev: description: - - This is used with IPIP/SIT - parent device this IPIP/SIT tunnel, can use ifname. + - This is used with GRE/IPIP/SIT - parent device this GRE/IPIP/SIT tunnel, can use ifname. type: str ip_tunnel_remote: description: - - This is used with IPIP/SIT - IPIP/SIT destination IP address. + - This is used with GRE/IPIP/SIT - GRE/IPIP/SIT destination IP address. type: str ip_tunnel_local: description: - - This is used with IPIP/SIT - IPIP/SIT local IP address. + - This is used with GRE/IPIP/SIT - GRE/IPIP/SIT local IP address. type: str + ip_tunnel_input_key: + description: + - The key used for tunnel input packets. + - Only used when I(type=gre). + type: str + version_added: 3.6.0 + ip_tunnel_output_key: + description: + - The key used for tunnel output packets. + - Only used when I(type=gre). + type: str + version_added: 3.6.0 zone: description: - The trust level of the connection. @@ -896,6 +908,14 @@ EXAMPLES = r''' vxlan_local: 192.168.1.2 vxlan_remote: 192.168.1.5 + - name: Add gre + community.general.nmcli: + type: gre + conn_name: gre_test1 + ip_tunnel_dev: eth0 + ip_tunnel_local: 192.168.1.2 + ip_tunnel_remote: 192.168.1.5 + - name: Add ipip community.general.nmcli: type: ipip @@ -1058,6 +1078,8 @@ class Nmcli(object): self.ip_tunnel_dev = module.params['ip_tunnel_dev'] self.ip_tunnel_local = module.params['ip_tunnel_local'] self.ip_tunnel_remote = module.params['ip_tunnel_remote'] + self.ip_tunnel_input_key = module.params['ip_tunnel_input_key'] + self.ip_tunnel_output_key = module.params['ip_tunnel_output_key'] self.nmcli_bin = self.module.get_bin_path('nmcli', True) self.dhcp_client_id = module.params['dhcp_client_id'] self.zone = module.params['zone'] @@ -1190,6 +1212,11 @@ class Nmcli(object): 'ip-tunnel.parent': self.ip_tunnel_dev, 'ip-tunnel.remote': self.ip_tunnel_remote, }) + if self.type == 'gre': + options.update({ + 'ip-tunnel.input-key': self.ip_tunnel_input_key, + 'ip-tunnel.output-key': self.ip_tunnel_output_key + }) elif self.type == 'vlan': options.update({ 'vlan.id': self.vlanid, @@ -1247,6 +1274,7 @@ class Nmcli(object): 'dummy', 'ethernet', 'generic', + 'gre', 'infiniband', 'ipip', 'sit', @@ -1293,6 +1321,7 @@ class Nmcli(object): @property def tunnel_conn_type(self): return self.type in ( + 'gre', 'ipip', 'sit', ) @@ -1592,6 +1621,7 @@ def main(): 'dummy', 'ethernet', 'generic', + 'gre', 'infiniband', 'ipip', 'sit', @@ -1663,6 +1693,9 @@ def main(): ip_tunnel_dev=dict(type='str'), ip_tunnel_local=dict(type='str'), ip_tunnel_remote=dict(type='str'), + # ip-tunnel type gre specific vars + ip_tunnel_input_key=dict(type='str', no_log=True), + ip_tunnel_output_key=dict(type='str', no_log=True), # 802-11-wireless* specific vars ssid=dict(type='str'), wifi=dict(type='dict'), diff --git a/tests/unit/plugins/modules/net_tools/test_nmcli.py b/tests/unit/plugins/modules/net_tools/test_nmcli.py index f81b636a81..9277bd5fb6 100644 --- a/tests/unit/plugins/modules/net_tools/test_nmcli.py +++ b/tests/unit/plugins/modules/net_tools/test_nmcli.py @@ -62,6 +62,12 @@ TESTCASE_CONNECTION = [ 'state': 'absent', '_ansible_check_mode': True, }, + { + 'type': 'gre', + 'conn_name': 'non_existent_nw_device', + 'state': 'absent', + '_ansible_check_mode': True, + }, { 'type': 'ipip', 'conn_name': 'non_existent_nw_device', @@ -371,6 +377,39 @@ vxlan.local: 192.168.225.5 vxlan.remote: 192.168.225.6 """ +TESTCASE_GRE = [ + { + 'type': 'gre', + 'conn_name': 'non_existent_nw_device', + 'ifname': 'gre-existent_nw_device', + 'ip_tunnel_dev': 'non_existent_gre_device', + 'ip_tunnel_local': '192.168.225.5', + 'ip_tunnel_remote': '192.168.225.6', + 'ip_tunnel_input_key': '1', + 'ip_tunnel_output_key': '2', + 'state': 'present', + '_ansible_check_mode': False, + } +] + +TESTCASE_GRE_SHOW_OUTPUT = """\ +connection.id: non_existent_nw_device +connection.interface-name: gre-existent_nw_device +connection.autoconnect: yes +ipv4.ignore-auto-dns: no +ipv4.ignore-auto-routes: no +ipv4.never-default: no +ipv4.may-fail: yes +ipv6.ignore-auto-dns: no +ipv6.ignore-auto-routes: no +ip-tunnel.mode: gre +ip-tunnel.parent: non_existent_gre_device +ip-tunnel.local: 192.168.225.5 +ip-tunnel.remote: 192.168.225.6 +ip-tunnel.input-key: 1 +ip-tunnel.output-key: 2 +""" + TESTCASE_IPIP = [ { 'type': 'ipip', @@ -708,6 +747,13 @@ def mocked_vxlan_connection_unchanged(mocker): execute_return=(0, TESTCASE_VXLAN_SHOW_OUTPUT, "")) +@pytest.fixture +def mocked_gre_connection_unchanged(mocker): + mocker_set(mocker, + connection_exists=True, + execute_return=(0, TESTCASE_GRE_SHOW_OUTPUT, "")) + + @pytest.fixture def mocked_ipip_connection_unchanged(mocker): mocker_set(mocker, @@ -1630,6 +1676,83 @@ def test_eth_dhcp_client_id_con_create(mocked_generic_connection_create, capfd): assert results['changed'] +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GRE, indirect=['patch_ansible_module']) +def test_create_gre(mocked_generic_connection_create, capfd): + """ + Test if gre created + """ + with pytest.raises(SystemExit): + nmcli.main() + + assert nmcli.Nmcli.execute_command.call_count == 1 + arg_list = nmcli.Nmcli.execute_command.call_args_list + args, kwargs = arg_list[0] + + assert args[0][0] == '/usr/bin/nmcli' + assert args[0][1] == 'con' + assert args[0][2] == 'add' + assert args[0][3] == 'type' + assert args[0][4] == 'ip-tunnel' + assert args[0][5] == 'con-name' + assert args[0][6] == 'non_existent_nw_device' + + args_text = list(map(to_text, args[0])) + for param in ['connection.interface-name', 'gre-existent_nw_device', + 'ip-tunnel.local', '192.168.225.5', + 'ip-tunnel.mode', 'gre', + 'ip-tunnel.parent', 'non_existent_gre_device', + 'ip-tunnel.remote', '192.168.225.6', + 'ip-tunnel.input-key', '1', + 'ip-tunnel.output-key', '2']: + assert param in args_text + + out, err = capfd.readouterr() + results = json.loads(out) + assert not results.get('failed') + assert results['changed'] + + +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GRE, indirect=['patch_ansible_module']) +def test_gre_mod(mocked_generic_connection_modify, capfd): + """ + Test if gre modified + """ + with pytest.raises(SystemExit): + nmcli.main() + + assert nmcli.Nmcli.execute_command.call_count == 1 + arg_list = nmcli.Nmcli.execute_command.call_args_list + args, kwargs = arg_list[0] + + assert args[0][0] == '/usr/bin/nmcli' + assert args[0][1] == 'con' + assert args[0][2] == 'modify' + assert args[0][3] == 'non_existent_nw_device' + + args_text = list(map(to_text, args[0])) + for param in ['ip-tunnel.local', '192.168.225.5', 'ip-tunnel.remote', '192.168.225.6']: + assert param in args_text + + out, err = capfd.readouterr() + results = json.loads(out) + assert not results.get('failed') + assert results['changed'] + + +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GRE, indirect=['patch_ansible_module']) +def test_gre_connection_unchanged(mocked_gre_connection_unchanged, capfd): + """ + Test : GRE connection unchanged + """ + with pytest.raises(SystemExit): + nmcli.main() + + out, err = capfd.readouterr() + results = json.loads(out) + assert not results.get('failed') + assert not results['changed'] + + @pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_DHCP, indirect=['patch_ansible_module']) def test_ethernet_connection_dhcp_unchanged(mocked_ethernet_connection_dhcp_unchanged, capfd): """ From cc458f7c376d59455bc4028c25d20a850cb7fd82 Mon Sep 17 00:00:00 2001 From: Nicolas Karolak Date: Fri, 27 Aug 2021 06:08:54 +0200 Subject: [PATCH 0527/3093] parse scw-cli config file for oauth_token (#3250) If `api_token` is not set and config file exists, it will try to fetch the value from the activated profile and fallback on default. This should not break existing workflows. --- .../fragments/3250-parse-scw-config.yml | 2 + plugins/inventory/scaleway.py | 41 ++++++++++++++++++- 2 files changed, 41 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/3250-parse-scw-config.yml diff --git a/changelogs/fragments/3250-parse-scw-config.yml b/changelogs/fragments/3250-parse-scw-config.yml new file mode 100644 index 0000000000..8c96c55e47 --- /dev/null +++ b/changelogs/fragments/3250-parse-scw-config.yml @@ -0,0 +1,2 @@ +minor_changes: + - scaleway plugin inventory - parse scw-cli config file for ``oauth_token`` (https://github.com/ansible-collections/community.general/pull/3250). diff --git a/plugins/inventory/scaleway.py b/plugins/inventory/scaleway.py index 86140124c5..fa65eae321 100644 --- a/plugins/inventory/scaleway.py +++ b/plugins/inventory/scaleway.py @@ -13,6 +13,8 @@ DOCUMENTATION = r''' short_description: Scaleway inventory source description: - Get inventory hosts from Scaleway. + requirements: + - PyYAML options: plugin: description: Token that ensures this is a source file for the 'scaleway' plugin. @@ -30,9 +32,10 @@ DOCUMENTATION = r''' description: Filter results on a specific tag. type: list oauth_token: - required: True description: - Scaleway OAuth token. + - If not explicitly defined or in environment variables, it will try to lookup in the scaleway-cli configuration file + (C($SCW_CONFIG_PATH), C($XDG_CONFIG_HOME/scw/config.yaml), or C(~/.config/scw/config.yaml)). - More details on L(how to generate token, https://www.scaleway.com/en/docs/generate-api-keys/). env: # in order of precedence @@ -95,13 +98,22 @@ variables: ansible_user: "'admin'" ''' +import os import json +try: + import yaml +except ImportError as exc: + YAML_IMPORT_ERROR = exc +else: + YAML_IMPORT_ERROR = None + from ansible.errors import AnsibleError from ansible.plugins.inventory import BaseInventoryPlugin, Constructable from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, parse_pagination_link from ansible.module_utils.urls import open_url from ansible.module_utils.common.text.converters import to_native, to_text +from ansible.module_utils.six import raise_from import ansible.module_utils.six.moves.urllib.parse as urllib_parse @@ -278,13 +290,38 @@ class InventoryModule(BaseInventoryPlugin, Constructable): # Composed variables self._set_composite_vars(self.get_option('variables'), host_infos, hostname, strict=False) + def get_oauth_token(self): + oauth_token = self.get_option('oauth_token') + + if 'SCW_CONFIG_PATH' in os.environ: + scw_config_path = os.getenv('SCW_CONFIG_PATH') + elif 'XDG_CONFIG_HOME' in os.environ: + scw_config_path = os.path.join(os.getenv('XDG_CONFIG_HOME'), 'scw', 'config.yaml') + else: + scw_config_path = os.path.join(os.path.expanduser('~'), '.config', 'scw', 'config.yaml') + + if not oauth_token and os.path.exists(scw_config_path): + with open(scw_config_path) as fh: + scw_config = yaml.safe_load(fh) + active_profile = scw_config.get('active_profile', 'default') + if active_profile == 'default': + oauth_token = scw_config.get('secret_key') + else: + oauth_token = scw_config['profiles'][active_profile].get('secret_key') + + return oauth_token + def parse(self, inventory, loader, path, cache=True): + if YAML_IMPORT_ERROR: + raise_from(AnsibleError('PyYAML is probably missing'), YAML_IMPORT_ERROR) super(InventoryModule, self).parse(inventory, loader, path) self._read_config_data(path=path) config_zones = self.get_option("regions") tags = self.get_option("tags") - token = self.get_option("oauth_token") + token = self.get_oauth_token() + if not token: + raise AnsibleError("'oauth_token' value is null, you must configure it either in inventory, envvars or scaleway-cli config.") hostname_preference = self.get_option("hostnames") for zone in self._get_zones(config_zones): From 825e17c1cfc33571b273984f195166f268b0850c Mon Sep 17 00:00:00 2001 From: Laurent Paumier <30328363+laurpaum@users.noreply.github.com> Date: Fri, 27 Aug 2021 06:17:04 +0200 Subject: [PATCH 0528/3093] Fix keycloak_realm module (#3231) * fix events_listeners element type add events_enabled parameter * Update plugins/modules/identity/keycloak/keycloak_realm.py Co-authored-by: Felix Fontein * add changelog * Update changelogs/fragments/3231-fix-keycloak-realm-events.yml Co-authored-by: Felix Fontein * Update changelogs/fragments/3231-fix-keycloak-realm-events.yml Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_realm.py Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../fragments/3231-fix-keycloak-realm-events.yml | 5 +++++ plugins/modules/identity/keycloak/keycloak_realm.py | 12 ++++++++++-- 2 files changed, 15 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/3231-fix-keycloak-realm-events.yml diff --git a/changelogs/fragments/3231-fix-keycloak-realm-events.yml b/changelogs/fragments/3231-fix-keycloak-realm-events.yml new file mode 100644 index 0000000000..9950ed2c59 --- /dev/null +++ b/changelogs/fragments/3231-fix-keycloak-realm-events.yml @@ -0,0 +1,5 @@ +bugfixes: + - keycloak_realm - element type for ``events_listeners`` parameter should be ``string`` instead of ``dict`` (https://github.com/ansible-collections/community.general/pull/3231). +minor_changes: + - keycloak_realm - add ``events_enabled`` parameter to allow activation or deactivation of login events (https://github.com/ansible-collections/community.general/pull/3231). + \ No newline at end of file diff --git a/plugins/modules/identity/keycloak/keycloak_realm.py b/plugins/modules/identity/keycloak/keycloak_realm.py index 95f79704ef..da37fa2723 100644 --- a/plugins/modules/identity/keycloak/keycloak_realm.py +++ b/plugins/modules/identity/keycloak/keycloak_realm.py @@ -242,6 +242,13 @@ options: - enabledEventTypes type: list elements: str + events_enabled: + description: + - Enables or disables login events for this realm. + aliases: + - eventsEnabled + type: bool + version_added: 3.6.0 events_expiration: description: - The realm events expiration. @@ -254,7 +261,7 @@ options: aliases: - eventsListeners type: list - elements: dict + elements: str failure_factor: description: - The realm failure factor. @@ -626,8 +633,9 @@ def main(): email_theme=dict(type='str', aliases=['emailTheme']), enabled=dict(type='bool'), enabled_event_types=dict(type='list', elements='str', aliases=['enabledEventTypes']), + events_enabled=dict(type='bool', aliases=['eventsEnabled']), events_expiration=dict(type='int', aliases=['eventsExpiration']), - events_listeners=dict(type='list', elements='dict', aliases=['eventsListeners']), + events_listeners=dict(type='list', elements='str', aliases=['eventsListeners']), failure_factor=dict(type='int', aliases=['failureFactor']), internationalization_enabled=dict(type='bool', aliases=['internationalizationEnabled']), login_theme=dict(type='str', aliases=['loginTheme']), From e77adff0b76e13ce932dde6ab26ea320335d7476 Mon Sep 17 00:00:00 2001 From: Kellin Date: Fri, 27 Aug 2021 00:20:04 -0400 Subject: [PATCH 0529/3093] Linode Inventory can use full IP data from APIv4 (#3203) * Linode Inventory can use full IP data from APIv4 - The Linode dynamic inventory module does not currently distinguish between private and public IP addresses even though the Linode APIv4 contains this information. This change keeps the current behavior as the default and adds an option to set `ip_style: api`. When set, this option allows administrators to differentiate between private, public, slaac, local_link, and pool network addresses providing a more nuanced and granular view of the remote host's network information. Signed-off-by: Kellin * Review - amend changelog details - Adds a link back to this pull request - Uses markdown styles for easier to read publishing in the changelogs - Amends the wording style to match the existing changelog styles Co-authored-by: Felix Fontein * Add scope to example invocation - Adds the `community.general` scope to invocation example Co-authored-by: Felix Fontein * Convert lamda to list comprehension - Change the ip type filter from a lambda to a list comprehension Co-authored-by: Felix Fontein * Add punctuation to description sentence - Adds a period to the end of the description sentence Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- ...node-inventory-return-full-api-ip-data.yml | 2 + plugins/inventory/linode.py | 46 +++++++++++++++++++ tests/unit/plugins/inventory/test_linode.py | 1 + 3 files changed, 49 insertions(+) create mode 100644 changelogs/fragments/3203-linode-inventory-return-full-api-ip-data.yml diff --git a/changelogs/fragments/3203-linode-inventory-return-full-api-ip-data.yml b/changelogs/fragments/3203-linode-inventory-return-full-api-ip-data.yml new file mode 100644 index 0000000000..fa7581e820 --- /dev/null +++ b/changelogs/fragments/3203-linode-inventory-return-full-api-ip-data.yml @@ -0,0 +1,2 @@ +minor_changes: + - "linode inventory plugin - adds the ``ip_style`` configuration key. Set to ``api`` to get more detailed network details back from the remote Linode host (https://github.com/ansible-collections/community.general/pull/3203)." diff --git a/plugins/inventory/linode.py b/plugins/inventory/linode.py index 5af9effd52..0ce510852a 100644 --- a/plugins/inventory/linode.py +++ b/plugins/inventory/linode.py @@ -26,6 +26,15 @@ DOCUMENTATION = r''' description: Marks this as an instance of the 'linode' plugin. required: true choices: ['linode', 'community.general.linode'] + ip_style: + description: Populate hostvars with all information available from the Linode APIv4. + type: string + default: + - plain + choices: + - plain + - api + version_added: 3.6.0 access_token: description: The Linode account personal access token. required: true @@ -83,6 +92,13 @@ compose: # replace it with the first IPv4 address of the linode as follows: ansible_ssh_host: ipv4[0] ansible_port: 2222 + +# Example where control traffic limited to internal network +plugin: community.general.linode +access_token: foobar +ip_style: api +compose: + ansible_host: "ipv4 | community.general.json_query('[?public==`false`].address') | first" ''' import os @@ -170,14 +186,44 @@ class InventoryModule(BaseInventoryPlugin, Constructable): def _add_hostvars_for_instances(self): """Add hostvars for instances in the dynamic inventory.""" + ip_style = self.get_option('ip_style') for instance in self.instances: hostvars = instance._raw_json for hostvar_key in hostvars: + if ip_style == 'api' and hostvar_key in ['ipv4', 'ipv6']: + continue self.inventory.set_variable( instance.label, hostvar_key, hostvars[hostvar_key] ) + if ip_style == 'api': + ips = instance.ips.ipv4.public + instance.ips.ipv4.private + ips += [instance.ips.ipv6.slaac, instance.ips.ipv6.link_local] + ips += instance.ips.ipv6.pools + + for ip_type in set(ip.type for ip in ips): + self.inventory.set_variable( + instance.label, + ip_type, + self._ip_data([ip for ip in ips if ip.type == ip_type]) + ) + + def _ip_data(self, ip_list): + data = [] + for ip in list(ip_list): + data.append( + { + 'address': ip.address, + 'subnet_mask': ip.subnet_mask, + 'gateway': ip.gateway, + 'public': ip.public, + 'prefix': ip.prefix, + 'rdns': ip.rdns, + 'type': ip.type + } + ) + return data def _validate_option(self, name, desired_type, option_value): """Validate user specified configuration data against types.""" diff --git a/tests/unit/plugins/inventory/test_linode.py b/tests/unit/plugins/inventory/test_linode.py index f2627d850d..501f95b1f2 100644 --- a/tests/unit/plugins/inventory/test_linode.py +++ b/tests/unit/plugins/inventory/test_linode.py @@ -49,6 +49,7 @@ def test_access_token_lookup(inventory): def test_validate_option(inventory): assert ['eu-west'] == inventory._validate_option('regions', list, 'eu-west') assert ['eu-west'] == inventory._validate_option('regions', list, ['eu-west']) + assert 'api' == inventory._validate_option('ip_style', str, 'api') def test_validation_option_bad_option(inventory): From 4e2d4e3c68c078e1b04ef69bc9a4a5f8588f3b7a Mon Sep 17 00:00:00 2001 From: Atlas974 <43972908+Atlas974@users.noreply.github.com> Date: Fri, 27 Aug 2021 18:48:32 +0200 Subject: [PATCH 0530/3093] Fixed incorrect VMID: cloning to an existing VM (#3266) * Fixed incorrect VMID: cloning to an existing VM During a cloning operation, if the destination VM already exists the VMID returned is not correct. The VMID returned should be that of the destination VM and not that of the source VM (consistent with line 1230). A playbook that relies on the returned VMID, for example, to perform other operations on the destination VM, will not work properly if it is unexpectedly interrupted. * Add files via upload * moved 3266-vmid-existing-target-clone.yml to changelogs/fragments/ replaced line separator CRLF -> LF * storing vmid list in variable to avoid multiple API calls --- changelogs/fragments/3266-vmid-existing-target-clone.yml | 3 +++ plugins/modules/cloud/misc/proxmox_kvm.py | 5 +++-- 2 files changed, 6 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/3266-vmid-existing-target-clone.yml diff --git a/changelogs/fragments/3266-vmid-existing-target-clone.yml b/changelogs/fragments/3266-vmid-existing-target-clone.yml new file mode 100644 index 0000000000..5ff59f5311 --- /dev/null +++ b/changelogs/fragments/3266-vmid-existing-target-clone.yml @@ -0,0 +1,3 @@ +bugfixes: + - proxmox_kvm - clone operation should return the VMID of the target VM and not that of the source VM. + This was failing when the target VM with the chosen name already existed (https://github.com/ansible-collections/community.general/pull/3266). \ No newline at end of file diff --git a/plugins/modules/cloud/misc/proxmox_kvm.py b/plugins/modules/cloud/misc/proxmox_kvm.py index 159968ce6e..25b29b369b 100644 --- a/plugins/modules/cloud/misc/proxmox_kvm.py +++ b/plugins/modules/cloud/misc/proxmox_kvm.py @@ -1201,8 +1201,9 @@ def main(): module.fail_json(vmid=vmid, msg='VM with vmid = %s does not exist in cluster' % vmid) # Ensure the choosen VM name doesn't already exist when cloning - if get_vmid(proxmox, name): - module.exit_json(changed=False, vmid=vmid, msg="VM with name <%s> already exists" % name) + existing_vmid = get_vmid(proxmox, name) + if existing_vmid: + module.exit_json(changed=False, vmid=existing_vmid[0], msg="VM with name <%s> already exists" % name) # Ensure the choosen VM id doesn't already exist when cloning if get_vm(proxmox, newid): From 69641d36e18c5a62d966dc24db28d2a0d28b9bda Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 29 Aug 2021 07:50:09 +1200 Subject: [PATCH 0531/3093] openwrt_init - improvements (#3284) * improvements on openwrt_init * added changelog fragment --- .../3284-openwrt_init-improvements.yaml | 4 +++ plugins/modules/system/openwrt_init.py | 35 ++++++------------- 2 files changed, 14 insertions(+), 25 deletions(-) create mode 100644 changelogs/fragments/3284-openwrt_init-improvements.yaml diff --git a/changelogs/fragments/3284-openwrt_init-improvements.yaml b/changelogs/fragments/3284-openwrt_init-improvements.yaml new file mode 100644 index 0000000000..99a60dfce8 --- /dev/null +++ b/changelogs/fragments/3284-openwrt_init-improvements.yaml @@ -0,0 +1,4 @@ +minor_changes: + - openwrt_init - minor refactoring (https://github.com/ansible-collections/community.general/pull/3284). +bugfixes: + - openwrt_init - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3284). diff --git a/plugins/modules/system/openwrt_init.py b/plugins/modules/system/openwrt_init.py index afc3c3a956..fa9488ecb2 100644 --- a/plugins/modules/system/openwrt_init.py +++ b/plugins/modules/system/openwrt_init.py @@ -70,9 +70,7 @@ RETURN = ''' ''' import os -import glob from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.text.converters import to_bytes, to_native module = None init_script = None @@ -81,15 +79,12 @@ init_script = None # =============================== # Check if service is enabled def is_enabled(): - (rc, out, err) = module.run_command("%s enabled" % init_script) - if rc == 0: - return True - return False + rc, dummy, dummy = module.run_command([init_script, 'enabled']) + return rc == 0 # =========================================== # Main control flow - def main(): global module, init_script # init @@ -98,22 +93,19 @@ def main(): name=dict(required=True, type='str', aliases=['service']), state=dict(type='str', choices=['started', 'stopped', 'restarted', 'reloaded']), enabled=dict(type='bool'), - pattern=dict(type='str', required=False, default=None), + pattern=dict(type='str'), ), supports_check_mode=True, - required_one_of=[['state', 'enabled']], + required_one_of=[('state', 'enabled')], ) # initialize service = module.params['name'] init_script = '/etc/init.d/' + service - rc = 0 - out = err = '' result = { 'name': service, 'changed': False, } - # check if service exists if not os.path.exists(init_script): module.fail_json(msg='service %s does not exist' % service) @@ -129,13 +121,10 @@ def main(): # Change enable/disable if needed if enabled != module.params['enabled']: result['changed'] = True - if module.params['enabled']: - action = 'enable' - else: - action = 'disable' + action = 'enable' if module.params['enabled'] else 'disable' if not module.check_mode: - (rc, out, err) = module.run_command("%s %s" % (init_script, action)) + rc, dummy, err = module.run_command([init_script, action]) # openwrt init scripts can return a non-zero exit code on a successful 'enable' # command if the init script doesn't contain a STOP value, so we ignore the exit # code and explicitly check if the service is now in the desired state @@ -153,17 +142,13 @@ def main(): psbin = module.get_bin_path('ps', True) # this should be busybox ps, so we only want/need to the 'w' option - (rc, psout, pserr) = module.run_command('%s w' % psbin) + rc, psout, dummy = module.run_command([psbin, 'w']) # If rc is 0, set running as appropriate if rc == 0: lines = psout.split("\n") - for line in lines: - if module.params['pattern'] in line and "pattern=" not in line: - # so as to not confuse ./hacking/test-module.py - running = True - break + running = any((module.params['pattern'] in line and "pattern=" not in line) for line in lines) else: - (rc, out, err) = module.run_command("%s running" % init_script) + rc, dummy, dummy = module.run_command([init_script, 'running']) if rc == 0: running = True @@ -187,7 +172,7 @@ def main(): if action: if not module.check_mode: - (rc, out, err) = module.run_command("%s %s" % (init_script, action)) + rc, dummy, err = module.run_command([init_script, action]) if rc != 0: module.fail_json(msg="Unable to %s service %s: %s" % (action, service, err)) From a91eb6ae4f9b2ca3143c953ed65303e94c806a1d Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 29 Aug 2021 07:54:39 +1200 Subject: [PATCH 0532/3093] snap - improved error handling (#3211) * snap - improved error handling * added changelog fragment * removed experiments left overs * rolled back the smaller list of params for commands other than install --- .../fragments/3211-snap-error-handling.yml | 2 + plugins/modules/packaging/os/snap.py | 70 ++++--------------- 2 files changed, 17 insertions(+), 55 deletions(-) create mode 100644 changelogs/fragments/3211-snap-error-handling.yml diff --git a/changelogs/fragments/3211-snap-error-handling.yml b/changelogs/fragments/3211-snap-error-handling.yml new file mode 100644 index 0000000000..d361b99f01 --- /dev/null +++ b/changelogs/fragments/3211-snap-error-handling.yml @@ -0,0 +1,2 @@ +minor_changes: + - snap - improved module error handling, especially for the case when snap server is down (https://github.com/ansible-collections/community.general/issues/2970). diff --git a/plugins/modules/packaging/os/snap.py b/plugins/modules/packaging/os/snap.py index de6fedccdc..a62be76425 100644 --- a/plugins/modules/packaging/os/snap.py +++ b/plugins/modules/packaging/os/snap.py @@ -1,6 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- +# Copyright: (c) 2021, Alexei Znamensky (russoz) # Copyright: (c) 2018, Stanislas Lange (angristan) # Copyright: (c) 2018, Victor Carceler @@ -12,17 +13,13 @@ __metaclass__ = type DOCUMENTATION = ''' --- module: snap - short_description: Manages snaps - - description: - "Manages snaps packages." - options: name: description: - - Name of the snap to install or remove. Can be a list of snaps. + - Name of the snaps. required: true type: list elements: str @@ -117,10 +114,10 @@ from ansible_collections.community.general.plugins.module_utils.module_helper im __state_map = dict( present='install', absent='remove', - info='info', # not public - list='list', # not public enabled='enable', disabled='disable', + info='info', # not public + list='list', # not public ) @@ -171,9 +168,6 @@ class Snap(CmdStateModuleHelper): '\n'.join(results[3]), ] - def snap_exists(self, snap_name): - return 0 == self.run_command(params=[{'state': 'info'}, {'name': snap_name}])[0] - def is_snap_installed(self, snap_name): return 0 == self.run_command(params=[{'state': 'list'}, {'name': snap_name}])[0] @@ -188,14 +182,7 @@ class Snap(CmdStateModuleHelper): notes = match.group('notes') return "disabled" not in notes.split(',') - def validate_input_snaps(self): - """Ensure that all exist.""" - for snap_name in self.vars.name: - if not self.snap_exists(snap_name): - raise ModuleHelperException(msg="No snap matching '%s' available." % snap_name) - def state_present(self): - self.validate_input_snaps() # if snap doesnt exist, it will explode when trying to install self.vars.meta('classic').set(output=True) self.vars.meta('channel').set(output=True) actionable_snaps = [s for s in self.vars.name if not self.is_snap_installed(s)] @@ -227,59 +214,32 @@ class Snap(CmdStateModuleHelper): "error output for more details.".format(cmd=self.vars.cmd) raise ModuleHelperException(msg=msg) - def state_absent(self): - self.validate_input_snaps() # if snap doesnt exist, it will be absent by definition - actionable_snaps = [s for s in self.vars.name if self.is_snap_installed(s)] + def _generic_state_action(self, actionable_func, actionable_var, params=None): + actionable_snaps = [s for s in self.vars.name if actionable_func(s)] if not actionable_snaps: return self.changed = True - self.vars.snaps_removed = actionable_snaps + self.vars[actionable_var] = actionable_snaps if self.module.check_mode: return - params = ['classic', 'channel', 'state'] # get base cmd parts + if params is None: + params = ['state'] commands = [params + [{'actionable_snaps': actionable_snaps}]] self.vars.cmd, rc, out, err = self._run_multiple_commands(commands) if rc == 0: return - msg = "Ooops! Snap removal failed while executing '{cmd}', please examine logs and " \ + msg = "Ooops! Snap operation failed while executing '{cmd}', please examine logs and " \ "error output for more details.".format(cmd=self.vars.cmd) raise ModuleHelperException(msg=msg) + def state_absent(self): + self._generic_state_action(self.is_snap_installed, "snaps_removed", ['classic', 'channel', 'state']) + def state_enabled(self): - self.validate_input_snaps() - actionable_snaps = [s for s in self.vars.name if self.is_snap_enabled(s) is False] - if not actionable_snaps: - return - self.changed = True - self.vars.snaps_enabled = actionable_snaps - if self.module.check_mode: - return - params = ['classic', 'channel', 'state'] # get base cmd parts - commands = [params + [{'actionable_snaps': actionable_snaps}]] - self.vars.cmd, rc, out, err = self._run_multiple_commands(commands) - if rc == 0: - return - msg = "Ooops! Snap enabling failed while executing '{cmd}', please examine logs and " \ - "error output for more details.".format(cmd=self.vars.cmd) - raise ModuleHelperException(msg=msg) + self._generic_state_action(lambda s: not self.is_snap_enabled(s), "snaps_enabled", ['classic', 'channel', 'state']) def state_disabled(self): - self.validate_input_snaps() - actionable_snaps = [s for s in self.vars.name if self.is_snap_enabled(s) is True] - if not actionable_snaps: - return - self.changed = True - self.vars.snaps_enabled = actionable_snaps - if self.module.check_mode: - return - params = ['classic', 'channel', 'state'] # get base cmd parts - commands = [params + [{'actionable_snaps': actionable_snaps}]] - self.vars.cmd, rc, out, err = self._run_multiple_commands(commands) - if rc == 0: - return - msg = "Ooops! Snap disabling failed while executing '{cmd}', please examine logs and " \ - "error output for more details.".format(cmd=self.vars.cmd) - raise ModuleHelperException(msg=msg) + self._generic_state_action(self.is_snap_enabled, "snaps_disabled", ['classic', 'channel', 'state']) def main(): From cf433567535d2a854335a4e252aa6320752208b3 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Sun, 29 Aug 2021 10:15:34 +0200 Subject: [PATCH 0533/3093] Fix unit tests (#3289) * Force new enough requests version. * Revert "Force new enough requests version." This reverts commit 339d40bef7d10e19b4d8beb885eb7e414b5c7354. * Make sure we don't install a too new python-gitlab for Ansible 2.10. * Change requirement instead of appending new one. * Fix quoting. * Try to skip if import fails. * Revert "Try to skip if import fails." This reverts commit 254bbd8548c08be4d49aca2e2fcedf23e1d23436. * Make other Python versions happy... * Update tests/utils/shippable/units.sh Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> --- tests/utils/shippable/shippable.sh | 2 +- tests/utils/shippable/units.sh | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/utils/shippable/shippable.sh b/tests/utils/shippable/shippable.sh index 3a00812f12..5f94d9fff5 100755 --- a/tests/utils/shippable/shippable.sh +++ b/tests/utils/shippable/shippable.sh @@ -232,4 +232,4 @@ fi ansible-test env --dump --show --timeout "${timeout}" --color -v if [ "${SHIPPABLE_BUILD_ID:-}" ]; then "tests/utils/shippable/check_matrix.py"; fi -"tests/utils/shippable/${script}.sh" "${test}" +"tests/utils/shippable/${script}.sh" "${test}" "${ansible_version}" diff --git a/tests/utils/shippable/units.sh b/tests/utils/shippable/units.sh index 38e79935e7..88db336d26 100755 --- a/tests/utils/shippable/units.sh +++ b/tests/utils/shippable/units.sh @@ -22,6 +22,11 @@ esac ansible-test env --timeout "${timeout}" --color -v +if [ "$2" == "2.10" ]; then + sed -i -E 's/^python-gitlab($| .*)/python-gitlab < 2.10.1 ; python_version >= '\'3.6\''/g' tests/unit/requirements.txt + echo "python-gitlab ; python_version < '3.6'" >> tests/unit/requirements.txt +fi + # shellcheck disable=SC2086 ansible-test units --color -v --docker default --python "${version}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} \ "${options[@]:+${options[@]}}" \ From df8fdcda7901a40d7d48c17ec9ce35c3d43ecbfe Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 29 Aug 2021 23:03:15 +1200 Subject: [PATCH 0534/3093] mh CmdMixin - added ArgFormat.BOOLEAN_NOT and logic (#3290) * mh CmdMixin - added ArgFormat.BOOLEAN_NOT and logic * added changelog fragment --- changelogs/fragments/3290-mh-cmd-boolean-not.yaml | 2 ++ plugins/module_utils/mh/mixins/cmd.py | 5 ++++- tests/unit/plugins/module_utils/test_module_helper.py | 10 ++++++++-- 3 files changed, 14 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/3290-mh-cmd-boolean-not.yaml diff --git a/changelogs/fragments/3290-mh-cmd-boolean-not.yaml b/changelogs/fragments/3290-mh-cmd-boolean-not.yaml new file mode 100644 index 0000000000..ab34539f15 --- /dev/null +++ b/changelogs/fragments/3290-mh-cmd-boolean-not.yaml @@ -0,0 +1,2 @@ +minor_changes: + - module_helper cmd module utils - added the ``ArgFormat`` style ``BOOLEAN_NOT``, to add CLI parameters when the module argument is false-ish (https://github.com/ansible-collections/community.general/pull/3290). diff --git a/plugins/module_utils/mh/mixins/cmd.py b/plugins/module_utils/mh/mixins/cmd.py index aed4174c4f..51e5ae9873 100644 --- a/plugins/module_utils/mh/mixins/cmd.py +++ b/plugins/module_utils/mh/mixins/cmd.py @@ -16,6 +16,7 @@ class ArgFormat(object): BOOLEAN = 0 PRINTF = 1 FORMAT = 2 + BOOLEAN_NOT = 3 @staticmethod def stars_deco(num): @@ -50,12 +51,14 @@ class ArgFormat(object): _fmts = { ArgFormat.BOOLEAN: lambda _fmt, v: ([_fmt] if bool(v) else []), + ArgFormat.BOOLEAN_NOT: lambda _fmt, v: ([] if bool(v) else [_fmt]), ArgFormat.PRINTF: printf_fmt, ArgFormat.FORMAT: lambda _fmt, v: [_fmt.format(v)], } self.name = name self.stars = stars + self.style = style if fmt is None: fmt = "{0}" @@ -76,7 +79,7 @@ class ArgFormat(object): self.arg_format = (self.stars_deco(stars))(self.arg_format) def to_text(self, value): - if value is None: + if value is None and self.style != ArgFormat.BOOLEAN_NOT: return [] func = self.arg_format return [str(p) for p in func(value)] diff --git a/tests/unit/plugins/module_utils/test_module_helper.py b/tests/unit/plugins/module_utils/test_module_helper.py index f40a0f10ee..00667fcea3 100644 --- a/tests/unit/plugins/module_utils/test_module_helper.py +++ b/tests/unit/plugins/module_utils/test_module_helper.py @@ -24,6 +24,12 @@ ARG_FORMATS = dict( False, []), simple_boolean_none=("--superflag", ArgFormat.BOOLEAN, 0, None, []), + simple_boolean_not_true=("--superflag", ArgFormat.BOOLEAN_NOT, 0, + True, []), + simple_boolean_not_false=("--superflag", ArgFormat.BOOLEAN_NOT, 0, + False, ["--superflag"]), + simple_boolean_not_none=("--superflag", ArgFormat.BOOLEAN_NOT, 0, + None, ["--superflag"]), single_printf=("--param=%s", ArgFormat.PRINTF, 0, "potatoes", ["--param=potatoes"]), single_printf_no_substitution=("--param", ArgFormat.PRINTF, 0, @@ -65,7 +71,7 @@ def test_arg_format(fmt, style, stars, value, expected): af = ArgFormat('name', fmt, style, stars) actual = af.to_text(value) print("formatted string = {0}".format(actual)) - assert actual == expected + assert actual == expected, "actual = {0}".format(actual) ARG_FORMATS_FAIL = dict( @@ -218,7 +224,7 @@ CAUSE_CHG_DECO_IDS = sorted(CAUSE_CHG_DECO.keys()) @pytest.mark.parametrize(CAUSE_CHG_DECO_PARAMS, [[CAUSE_CHG_DECO[tc][param] - for param in CAUSE_CHG_DECO_PARAMS] + for param in CAUSE_CHG_DECO_PARAMS] for tc in CAUSE_CHG_DECO_IDS], ids=CAUSE_CHG_DECO_IDS) def test_cause_changes_deco(method, expect_exception, expect_changed): From 1ce79db7633ce2a8caf71681222466a95e18de18 Mon Sep 17 00:00:00 2001 From: Reto Kupferschmid Date: Sun, 29 Aug 2021 13:20:46 +0200 Subject: [PATCH 0535/3093] add deprecation warning for python-dnsimple 1 (#3267) * add deprecation warning for python-dnsimple 1 * add changelog fragment * Update changelogs/fragments/3267-dnsimple1-deprecation.yml Co-authored-by: Felix Fontein * Update plugins/modules/net_tools/dnsimple.py Co-authored-by: Felix Fontein * fix typo Co-authored-by: Felix Fontein * Update plugins/modules/net_tools/dnsimple.py Co-authored-by: Felix Fontein * Update changelogs/fragments/3267-dnsimple1-deprecation.yml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- changelogs/fragments/3267-dnsimple1-deprecation.yml | 3 +++ plugins/modules/net_tools/dnsimple.py | 7 +++++++ 2 files changed, 10 insertions(+) create mode 100644 changelogs/fragments/3267-dnsimple1-deprecation.yml diff --git a/changelogs/fragments/3267-dnsimple1-deprecation.yml b/changelogs/fragments/3267-dnsimple1-deprecation.yml new file mode 100644 index 0000000000..dadc1d2901 --- /dev/null +++ b/changelogs/fragments/3267-dnsimple1-deprecation.yml @@ -0,0 +1,3 @@ +--- +deprecated_features: + - "dnsimple - python-dnsimple < 2.0.0 is deprecated and support for it will be removed in community.general 5.0.0 (https://github.com/ansible-collections/community.general/pull/2946#discussion_r667624693)." diff --git a/plugins/modules/net_tools/dnsimple.py b/plugins/modules/net_tools/dnsimple.py index 188f9fd64a..3681348f4e 100644 --- a/plugins/modules/net_tools/dnsimple.py +++ b/plugins/modules/net_tools/dnsimple.py @@ -82,6 +82,8 @@ options: version_added: 3.5.0 requirements: - "dnsimple >= 1.0.0" +notes: + - "Support for C(dnsimple < 2) is deprecated and will be removed in community.general 5.0.0." author: "Alex Coomans (@drcapulet)" ''' @@ -402,6 +404,11 @@ def main(): if DNSIMPLE_MAJOR_VERSION > 1: ds = DNSimpleV2(account_email, account_api_token, sandbox, module) else: + module.deprecate( + 'Support for python-dnsimple < 2 is deprecated. ' + 'Update python-dnsimple to version >= 2.0.0', + version='5.0.0', collection_name='community.general' + ) ds = DNSimpleV1(account_email, account_api_token, sandbox, module) # Let's figure out what operation we want to do # No domain, return a list From d9dcdcbbe469d140de61cfb4b5643d5bddfadb4e Mon Sep 17 00:00:00 2001 From: Sebastian Damm Date: Mon, 30 Aug 2021 06:53:30 +0200 Subject: [PATCH 0536/3093] udm_dns_record: Fix handling of PTR records (#3244) (#3256) * udm_dns_record: Fix handling of PTR records (#3244) Before, it was not possible to manage PTR records in Univention DNS, due to broken zone lookups and improper used parameters of the object. This patch fixes the PTR handling, allowing both v4 and v6 entries. * udm_dns_record: [doc] add changelog fragment * udm_dns_record: [fix] validation errors * udm_dns_record: import ipaddress module conditionally (#3244) * udm_dns_record: fix sanity check error, improve doc (#3244) * udm_dns_record: Improve changes to meet community standards (#3244) --- ...256-fix-ptr-handling-in-udm_dns_record.yml | 2 + .../cloud/univention/udm_dns_record.py | 60 +++++++++++++++++-- 2 files changed, 57 insertions(+), 5 deletions(-) create mode 100644 changelogs/fragments/3256-fix-ptr-handling-in-udm_dns_record.yml diff --git a/changelogs/fragments/3256-fix-ptr-handling-in-udm_dns_record.yml b/changelogs/fragments/3256-fix-ptr-handling-in-udm_dns_record.yml new file mode 100644 index 0000000000..141a31349f --- /dev/null +++ b/changelogs/fragments/3256-fix-ptr-handling-in-udm_dns_record.yml @@ -0,0 +1,2 @@ +bugfixes: + - udm_dns_record - fixed managing of PTR records, which can never have worked before (https://github.com/ansible-collections/community.general/pull/3256). diff --git a/plugins/modules/cloud/univention/udm_dns_record.py b/plugins/modules/cloud/univention/udm_dns_record.py index 0c56970dd3..4e7aa70b32 100644 --- a/plugins/modules/cloud/univention/udm_dns_record.py +++ b/plugins/modules/cloud/univention/udm_dns_record.py @@ -21,6 +21,7 @@ description: requirements: - Python >= 2.6 - Univention + - ipaddress (for I(type=ptr_record)) options: state: type: str @@ -34,11 +35,13 @@ options: description: - "Name of the record, this is also the DNS record. E.g. www for www.example.com." + - For PTR records this has to be the IP address. zone: type: str required: true description: - Corresponding DNS zone for this record, e.g. example.com. + - For PTR records this has to be the full reverse zone (for example C(1.1.192.in-addr.arpa)). type: type: str required: true @@ -66,12 +69,29 @@ EXAMPLES = ''' a: - 192.0.2.1 - 2001:0db8::42 + +- name: Create a DNS v4 PTR record on a UCS + community.general.udm_dns_record: + name: 192.0.2.1 + zone: 2.0.192.in-addr.arpa + type: ptr_record + data: + ptr_record: "www.example.com." + +- name: Create a DNS v6 PTR record on a UCS + community.general.udm_dns_record: + name: 2001:db8:0:0:0:ff00:42:8329 + zone: 2.4.0.0.0.0.f.f.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa + type: ptr_record + data: + ptr_record: "www.example.com." ''' RETURN = '''#''' HAVE_UNIVENTION = False +HAVE_IPADDRESS = False try: from univention.admin.handlers.dns import ( forward_zone, @@ -82,6 +102,7 @@ except ImportError: pass from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import missing_required_lib from ansible_collections.community.general.plugins.module_utils.univention_umc import ( umc_module_for_add, umc_module_for_edit, @@ -90,6 +111,11 @@ from ansible_collections.community.general.plugins.module_utils.univention_umc i config, uldap, ) +try: + import ipaddress + HAVE_IPADDRESS = True +except ImportError: + pass def main(): @@ -124,14 +150,30 @@ def main(): changed = False diff = None + workname = name + if type == 'ptr_record': + if not HAVE_IPADDRESS: + module.fail_json(msg=missing_required_lib('ipaddress')) + try: + if 'arpa' not in zone: + raise Exception("Zone must be reversed zone for ptr_record. (e.g. 1.1.192.in-addr.arpa)") + ipaddr_rev = ipaddress.ip_address(name).reverse_pointer + subnet_offset = ipaddr_rev.find(zone) + if subnet_offset == -1: + raise Exception("reversed IP address {0} is not part of zone.".format(ipaddr_rev)) + workname = ipaddr_rev[0:subnet_offset - 1] + except Exception as e: + module.fail_json( + msg='handling PTR record for {0} in zone {1} failed: {2}'.format(name, zone, e) + ) + obj = list(ldap_search( - '(&(objectClass=dNSZone)(zoneName={0})(relativeDomainName={1}))'.format(zone, name), + '(&(objectClass=dNSZone)(zoneName={0})(relativeDomainName={1}))'.format(zone, workname), attr=['dNSZone'] )) - exists = bool(len(obj)) container = 'zoneName={0},cn=dns,{1}'.format(zone, base_dn()) - dn = 'relativeDomainName={0},{1}'.format(name, container) + dn = 'relativeDomainName={0},{1}'.format(workname, container) if state == 'present': try: @@ -144,13 +186,21 @@ def main(): ) or reverse_zone.lookup( config(), uldap(), - '(zone={0})'.format(zone), + '(zoneName={0})'.format(zone), scope='domain', ) + if len(so) == 0: + raise Exception("Did not find zone '{0}' in Univention".format(zone)) obj = umc_module_for_add('dns/{0}'.format(type), container, superordinate=so[0]) else: obj = umc_module_for_edit('dns/{0}'.format(type), dn) - obj['name'] = name + + if type == 'ptr_record': + obj['ip'] = name + obj['address'] = workname + else: + obj['name'] = name + for k, v in data.items(): obj[k] = v diff = obj.diff() From 97e2c3dec9fee886a37d029b08a554228809eaa2 Mon Sep 17 00:00:00 2001 From: Laurent Paumier <30328363+laurpaum@users.noreply.github.com> Date: Tue, 31 Aug 2021 07:07:53 +0200 Subject: [PATCH 0537/3093] Keycloak: add identity providers management (#3210) * init new module * update * add mappers * improve mappers * tests * fix tests * fix tests * Update plugins/modules/identity/keycloak/keycloak_identity_provider.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_identity_provider.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_identity_provider.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_identity_provider.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_identity_provider.py Co-authored-by: Felix Fontein * Update plugins/modules/identity/keycloak/keycloak_identity_provider.py Co-authored-by: Felix Fontein * fix typos * update botmeta * improve change detection * fix tests * add integration tests * remove updateProfileFirstLoginMode parameter Co-authored-by: Laurent PAUMIER Co-authored-by: Felix Fontein --- .github/BOTMETA.yml | 2 + .../identity/keycloak/keycloak.py | 164 +++++ .../keycloak/keycloak_identity_provider.py | 608 ++++++++++++++++++ plugins/modules/keycloak_identity_provider.py | 1 + .../keycloak_identity_provider/aliases | 1 + .../keycloak_identity_provider/tasks/main.yml | 171 +++++ .../keycloak_identity_provider/vars/main.yml | 7 + .../test_keycloak_identity_provider.py | 495 ++++++++++++++ 8 files changed, 1449 insertions(+) create mode 100644 plugins/modules/identity/keycloak/keycloak_identity_provider.py create mode 120000 plugins/modules/keycloak_identity_provider.py create mode 100644 tests/integration/targets/keycloak_identity_provider/aliases create mode 100644 tests/integration/targets/keycloak_identity_provider/tasks/main.yml create mode 100644 tests/integration/targets/keycloak_identity_provider/vars/main.yml create mode 100644 tests/unit/plugins/modules/identity/keycloak/test_keycloak_identity_provider.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 6055224145..0d2922182b 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -506,6 +506,8 @@ files: maintainers: Gaetan2907 $modules/identity/keycloak/keycloak_group.py: maintainers: adamgoossens + $modules/identity/keycloak/keycloak_identity_provider.py: + maintainers: laurpaum $modules/identity/keycloak/keycloak_realm.py: maintainers: kris2kris $modules/identity/keycloak/keycloak_role.py: diff --git a/plugins/module_utils/identity/keycloak/keycloak.py b/plugins/module_utils/identity/keycloak/keycloak.py index d53a29ba10..5ddb1320b9 100644 --- a/plugins/module_utils/identity/keycloak/keycloak.py +++ b/plugins/module_utils/identity/keycloak/keycloak.py @@ -78,6 +78,11 @@ URL_AUTHENTICATION_EXECUTION_RAISE_PRIORITY = "{url}/admin/realms/{realm}/authen URL_AUTHENTICATION_EXECUTION_LOWER_PRIORITY = "{url}/admin/realms/{realm}/authentication/executions/{id}/lower-priority" URL_AUTHENTICATION_CONFIG = "{url}/admin/realms/{realm}/authentication/config/{id}" +URL_IDENTITY_PROVIDERS = "{url}/admin/realms/{realm}/identity-provider/instances" +URL_IDENTITY_PROVIDER = "{url}/admin/realms/{realm}/identity-provider/instances/{alias}" +URL_IDENTITY_PROVIDER_MAPPERS = "{url}/admin/realms/{realm}/identity-provider/instances/{alias}/mappers" +URL_IDENTITY_PROVIDER_MAPPER = "{url}/admin/realms/{realm}/identity-provider/instances/{alias}/mappers/{id}" + def keycloak_argument_spec(): """ @@ -1437,3 +1442,162 @@ class KeycloakAPI(object): except Exception as e: self.module.fail_json(msg='Could not get executions for authentication flow %s in realm %s: %s' % (config["alias"], realm, str(e))) + + def get_identity_providers(self, realm='master'): + """ Fetch representations for identity providers in a realm + :param realm: realm to be queried + :return: list of representations for identity providers + """ + idps_url = URL_IDENTITY_PROVIDERS.format(url=self.baseurl, realm=realm) + try: + return json.loads(to_native(open_url(idps_url, method='GET', headers=self.restheaders, + validate_certs=self.validate_certs).read())) + except ValueError as e: + self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of identity providers for realm %s: %s' + % (realm, str(e))) + except Exception as e: + self.module.fail_json(msg='Could not obtain list of identity providers for realm %s: %s' + % (realm, str(e))) + + def get_identity_provider(self, alias, realm='master'): + """ Fetch identity provider representation from a realm using the idp's alias. + If the identity provider does not exist, None is returned. + :param alias: Alias of the identity provider to fetch. + :param realm: Realm in which the identity provider resides; default 'master'. + """ + idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=alias) + try: + return json.loads(to_native(open_url(idp_url, method="GET", headers=self.restheaders, + validate_certs=self.validate_certs).read())) + except HTTPError as e: + if e.code == 404: + return None + else: + self.module.fail_json(msg='Could not fetch identity provider %s in realm %s: %s' + % (alias, realm, str(e))) + except Exception as e: + self.module.fail_json(msg='Could not fetch identity provider %s in realm %s: %s' + % (alias, realm, str(e))) + + def create_identity_provider(self, idprep, realm='master'): + """ Create an identity provider. + :param idprep: Identity provider representation of the idp to be created. + :param realm: Realm in which this identity provider resides, default "master". + :return: HTTPResponse object on success + """ + idps_url = URL_IDENTITY_PROVIDERS.format(url=self.baseurl, realm=realm) + try: + return open_url(idps_url, method='POST', headers=self.restheaders, + data=json.dumps(idprep), validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Could not create identity provider %s in realm %s: %s' + % (idprep['alias'], realm, str(e))) + + def update_identity_provider(self, idprep, realm='master'): + """ Update an existing identity provider. + :param idprep: Identity provider representation of the idp to be updated. + :param realm: Realm in which this identity provider resides, default "master". + :return HTTPResponse object on success + """ + idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=idprep['alias']) + try: + return open_url(idp_url, method='PUT', headers=self.restheaders, + data=json.dumps(idprep), validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Could not update identity provider %s in realm %s: %s' + % (idprep['alias'], realm, str(e))) + + def delete_identity_provider(self, alias, realm='master'): + """ Delete an identity provider. + :param alias: Alias of the identity provider. + :param realm: Realm in which this identity provider resides, default "master". + """ + idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=alias) + try: + return open_url(idp_url, method='DELETE', headers=self.restheaders, + validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Unable to delete identity provider %s in realm %s: %s' + % (alias, realm, str(e))) + + def get_identity_provider_mappers(self, alias, realm='master'): + """ Fetch representations for identity provider mappers + :param alias: Alias of the identity provider. + :param realm: realm to be queried + :return: list of representations for identity provider mappers + """ + mappers_url = URL_IDENTITY_PROVIDER_MAPPERS.format(url=self.baseurl, realm=realm, alias=alias) + try: + return json.loads(to_native(open_url(mappers_url, method='GET', headers=self.restheaders, + validate_certs=self.validate_certs).read())) + except ValueError as e: + self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of identity provider mappers for idp %s in realm %s: %s' + % (alias, realm, str(e))) + except Exception as e: + self.module.fail_json(msg='Could not obtain list of identity provider mappers for idp %s in realm %s: %s' + % (alias, realm, str(e))) + + def get_identity_provider_mapper(self, mid, alias, realm='master'): + """ Fetch identity provider representation from a realm using the idp's alias. + If the identity provider does not exist, None is returned. + :param mid: Unique ID of the mapper to fetch. + :param alias: Alias of the identity provider. + :param realm: Realm in which the identity provider resides; default 'master'. + """ + mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mid) + try: + return json.loads(to_native(open_url(mapper_url, method="GET", headers=self.restheaders, + validate_certs=self.validate_certs).read())) + except HTTPError as e: + if e.code == 404: + return None + else: + self.module.fail_json(msg='Could not fetch mapper %s for identity provider %s in realm %s: %s' + % (mid, alias, realm, str(e))) + except Exception as e: + self.module.fail_json(msg='Could not fetch mapper %s for identity provider %s in realm %s: %s' + % (mid, alias, realm, str(e))) + + def create_identity_provider_mapper(self, mapper, alias, realm='master'): + """ Create an identity provider mapper. + :param mapper: IdentityProviderMapperRepresentation of the mapper to be created. + :param alias: Alias of the identity provider. + :param realm: Realm in which this identity provider resides, default "master". + :return: HTTPResponse object on success + """ + mappers_url = URL_IDENTITY_PROVIDER_MAPPERS.format(url=self.baseurl, realm=realm, alias=alias) + try: + return open_url(mappers_url, method='POST', headers=self.restheaders, + data=json.dumps(mapper), validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Could not create identity provider mapper %s for idp %s in realm %s: %s' + % (mapper['name'], alias, realm, str(e))) + + def update_identity_provider_mapper(self, mapper, alias, realm='master'): + """ Update an existing identity provider. + :param mapper: IdentityProviderMapperRepresentation of the mapper to be updated. + :param alias: Alias of the identity provider. + :param realm: Realm in which this identity provider resides, default "master". + :return HTTPResponse object on success + """ + mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mapper['id']) + try: + return open_url(mapper_url, method='PUT', headers=self.restheaders, + data=json.dumps(mapper), validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Could not update mapper %s for identity provider %s in realm %s: %s' + % (mapper['id'], alias, realm, str(e))) + + def delete_identity_provider_mapper(self, mid, alias, realm='master'): + """ Delete an identity provider. + :param mid: Unique ID of the mapper to delete. + :param alias: Alias of the identity provider. + :param realm: Realm in which this identity provider resides, default "master". + """ + mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mid) + try: + return open_url(mapper_url, method='DELETE', headers=self.restheaders, + validate_certs=self.validate_certs) + except Exception as e: + self.module.fail_json(msg='Unable to delete mapper %s for identity provider %s in realm %s: %s' + % (mid, alias, realm, str(e))) diff --git a/plugins/modules/identity/keycloak/keycloak_identity_provider.py b/plugins/modules/identity/keycloak/keycloak_identity_provider.py new file mode 100644 index 0000000000..f56aeb9067 --- /dev/null +++ b/plugins/modules/identity/keycloak/keycloak_identity_provider.py @@ -0,0 +1,608 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: keycloak_identity_provider + +short_description: Allows administration of Keycloak identity providers via Keycloak API + +version_added: 3.6.0 + +description: + - This module allows you to add, remove or modify Keycloak identity providers via the Keycloak REST API. + It requires access to the REST API via OpenID Connect; the user connecting and the client being + used must have the requisite access rights. In a default Keycloak installation, admin-cli + and an admin user would work, as would a separate client definition with the scope tailored + to your needs and a user having the expected roles. + + - The names of module options are snake_cased versions of the camelCase ones found in the + Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/15.0/rest-api/index.html). + + +options: + state: + description: + - State of the identity provider. + - On C(present), the identity provider will be created if it does not yet exist, or updated with the parameters you provide. + - On C(absent), the identity provider will be removed if it exists. + default: 'present' + type: str + choices: + - present + - absent + + realm: + description: + - The Keycloak realm under which this identity provider resides. + default: 'master' + type: str + + alias: + description: + - The alias uniquely identifies an identity provider and it is also used to build the redirect URI. + required: true + type: str + + display_name: + description: + - Friendly name for identity provider. + aliases: + - displayName + type: str + + enabled: + description: + - Enable/disable this identity provider. + type: bool + + store_token: + description: + - Enable/disable whether tokens must be stored after authenticating users. + aliases: + - storeToken + type: bool + + add_read_token_role_on_create: + description: + - Enable/disable whether new users can read any stored tokens. This assigns the C(broker.read-token) role. + aliases: + - addReadTokenRoleOnCreate + type: bool + + trust_email: + description: + - If enabled, email provided by this provider is not verified even if verification is enabled for the realm. + aliases: + - trustEmail + type: bool + + link_only: + description: + - If true, users cannot log in through this provider. They can only link to this provider. + This is useful if you don't want to allow login from the provider, but want to integrate with a provider. + aliases: + - linkOnly + type: bool + + first_broker_login_flow_alias: + description: + - Alias of authentication flow, which is triggered after first login with this identity provider. + aliases: + - firstBrokerLoginFlowAlias + type: str + + post_broker_login_flow_alias: + description: + - Alias of authentication flow, which is triggered after each login with this identity provider. + aliases: + - postBrokerLoginFlowAlias + type: str + + authenticate_by_default: + description: + - Specifies if this identity provider should be used by default for authentication even before displaying login screen. + aliases: + - authenticateByDefault + type: bool + + provider_id: + description: + - Protocol used by this provider (supported values are C(oidc) or C(saml)). + aliases: + - providerId + type: str + + config: + description: + - Dict specifying the configuration options for the provider; the contents differ depending on the value of I(providerId). + Examples are given below for C(oidc) and C(saml). It is easiest to obtain valid config values by dumping an already-existing + identity provider configuration through check-mode in the I(existing) field. + type: dict + suboptions: + hide_on_login_page: + description: + - If hidden, login with this provider is possible only if requested explicitly, for example using the C(kc_idp_hint) parameter. + aliases: + - hideOnLoginPage + type: bool + + gui_order: + description: + - Number defining order of the provider in GUI (for example, on Login page). + aliases: + - guiOrder + type: int + + sync_mode: + description: + - Default sync mode for all mappers. The sync mode determines when user data will be synced using the mappers. + aliases: + - syncMode + type: str + + issuer: + description: + - The issuer identifier for the issuer of the response. If not provided, no validation will be performed. + type: str + + authorizationUrl: + description: + - The Authorization URL. + type: str + + tokenUrl: + description: + - The Token URL. + type: str + + logoutUrl: + description: + - End session endpoint to use to logout user from external IDP. + type: str + + userInfoUrl: + description: + - The User Info URL. + type: str + + clientAuthMethod: + description: + - The client authentication method. + type: str + + clientId: + description: + - The client or client identifier registered within the identity provider. + type: str + + clientSecret: + description: + - The client or client secret registered within the identity provider. + type: str + + defaultScope: + description: + - The scopes to be sent when asking for authorization. + type: str + + validateSignature: + description: + - Enable/disable signature validation of external IDP signatures. + type: bool + + useJwksUrl: + description: + - If the switch is on, identity provider public keys will be downloaded from given JWKS URL. + type: bool + + jwksUrl: + description: + - URL where identity provider keys in JWK format are stored. See JWK specification for more details. + type: str + + entityId: + description: + - The Entity ID that will be used to uniquely identify this SAML Service Provider. + type: str + + singleSignOnServiceUrl: + description: + - The URL that must be used to send authentication requests (SAML AuthnRequest). + type: str + + singleLogoutServiceUrl: + description: + - The URL that must be used to send logout requests. + type: str + + backchannelSupported: + description: + - Does the external IDP support backchannel logout? + type: str + + nameIDPolicyFormat: + description: + - Specifies the URI reference corresponding to a name identifier format. + type: str + + principalType: + description: + - Way to identify and track external users from the assertion. + type: str + + mappers: + description: + - A list of dicts defining mappers associated with this Identity Provider. + type: list + elements: dict + suboptions: + id: + description: + - Unique ID of this mapper. + type: str + + name: + description: + - Name of the mapper. + type: str + + identityProviderAlias: + description: + - Alias of the identity provider for this mapper. + type: str + + identityProviderMapper: + description: + - Type of mapper. + type: str + + config: + description: + - Dict specifying the configuration options for the mapper; the contents differ depending on the value of I(identityProviderMapper). + type: dict + +extends_documentation_fragment: +- community.general.keycloak + +author: + - Laurent Paumier (@laurpaum) +''' + +EXAMPLES = ''' +- name: Create OIDC identity provider, authentication with credentials + community.general.keycloak_identity_provider: + state: present + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: admin + auth_password: admin + realm: myrealm + alias: oidc-idp + display_name: OpenID Connect IdP + enabled: true + provider_id: oidc + config: + issuer: https://idp.example.com + authorizationUrl: https://idp.example.com/auth + tokenUrl: https://idp.example.com/token + userInfoUrl: https://idp.example.com/userinfo + clientAuthMethod: client_secret_post + clientId: my-client + clientSecret: secret + +- name: Create SAML identity provider, authentication with credentials + community.general.keycloak_identity_provider: + state: present + auth_keycloak_url: https://auth.example.com/auth + auth_realm: master + auth_username: admin + auth_password: admin + realm: myrealm + alias: saml-idp + display_name: SAML IdP + enabled: true + provider_id: saml + config: + entityId: https://auth.example.com/auth/realms/myrealm + singleSignOnServiceUrl: https://idp.example.com/login + wantAuthnRequestsSigned: true + wantAssertionsSigned: true +''' + +RETURN = ''' +msg: + description: Message as to what action was taken + returned: always + type: str + sample: "Identity provider my-idp has been created" + +proposed: + description: Representation of proposed changes to identity provider + returned: always + type: dict + sample: { + "config": { + "authorizationUrl": "https://idp.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "secret", + "issuer": "https://idp.example.com", + "tokenUrl": "https://idp.example.com/token", + "userInfoUrl": "https://idp.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP", + "providerId": "oidc" + } + +existing: + description: Representation of existing identity provider + returned: always + type: dict + sample: { + "addReadTokenRoleOnCreate": false, + "alias": "my-idp", + "authenticateByDefault": false, + "config": { + "authorizationUrl": "https://old.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "**********", + "issuer": "https://old.example.com", + "syncMode": "FORCE", + "tokenUrl": "https://old.example.com/token", + "userInfoUrl": "https://old.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP", + "enabled": true, + "firstBrokerLoginFlowAlias": "first broker login", + "internalId": "4d28d7e3-1b80-45bb-8a30-5822bf55aa1c", + "linkOnly": false, + "providerId": "oidc", + "storeToken": false, + "trustEmail": false, + } + +end_state: + description: Representation of identity provider after module execution + returned: always + type: dict + sample: { + "addReadTokenRoleOnCreate": false, + "alias": "my-idp", + "authenticateByDefault": false, + "config": { + "authorizationUrl": "https://idp.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "**********", + "issuer": "https://idp.example.com", + "tokenUrl": "https://idp.example.com/token", + "userInfoUrl": "https://idp.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP", + "enabled": true, + "firstBrokerLoginFlowAlias": "first broker login", + "internalId": "4d28d7e3-1b80-45bb-8a30-5822bf55aa1c", + "linkOnly": false, + "providerId": "oidc", + "storeToken": false, + "trustEmail": false, + } + +''' + +from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \ + keycloak_argument_spec, get_token, KeycloakError +from ansible.module_utils.basic import AnsibleModule + + +def sanitize(idp): + result = idp.copy() + if 'config' in result: + result['config'] = sanitize(result['config']) + if 'clientSecret' in result: + result['clientSecret'] = '**********' + return result + + +def get_identity_provider_with_mappers(kc, alias, realm): + idp = kc.get_identity_provider(alias, realm) + if idp is not None: + idp['mappers'] = sorted(kc.get_identity_provider_mappers(alias, realm), key=lambda x: x.get('name')) + if idp is None: + idp = dict() + return idp + + +def main(): + """ + Module execution + + :return: + """ + argument_spec = keycloak_argument_spec() + + mapper_spec = dict( + id=dict(type='str'), + name=dict(type='str'), + identityProviderAlias=dict(type='str'), + identityProviderMapper=dict(type='str'), + config=dict(type='dict'), + ) + + meta_args = dict( + state=dict(type='str', default='present', choices=['present', 'absent']), + realm=dict(type='str', default='master'), + alias=dict(type='str', required=True), + add_read_token_role_on_create=dict(type='bool', aliases=['addReadTokenRoleOnCreate']), + authenticate_by_default=dict(type='bool', aliases=['authenticateByDefault']), + config=dict(type='dict'), + display_name=dict(type='str', aliases=['displayName']), + enabled=dict(type='bool'), + first_broker_login_flow_alias=dict(type='str', aliases=['firstBrokerLoginFlowAlias']), + link_only=dict(type='bool', aliases=['linkOnly']), + post_broker_login_flow_alias=dict(type='str', aliases=['postBrokerLoginFlowAlias']), + provider_id=dict(type='str', aliases=['providerId']), + store_token=dict(type='bool', aliases=['storeToken']), + trust_email=dict(type='bool', aliases=['trustEmail']), + mappers=dict(type='list', elements='dict', options=mapper_spec), + ) + + argument_spec.update(meta_args) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]), + required_together=([['auth_realm', 'auth_username', 'auth_password']])) + + result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={}) + + # Obtain access token, initialize API + try: + connection_header = get_token(module.params) + except KeycloakError as e: + module.fail_json(msg=str(e)) + + kc = KeycloakAPI(module, connection_header) + + realm = module.params.get('realm') + alias = module.params.get('alias') + state = module.params.get('state') + + # convert module parameters to client representation parameters (if they belong in there) + idp_params = [x for x in module.params + if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'mappers'] and + module.params.get(x) is not None] + + # does the identity provider already exist? + before_idp = get_identity_provider_with_mappers(kc, alias, realm) + + # build a changeset + changeset = dict() + + for param in idp_params: + new_param_value = module.params.get(param) + old_value = before_idp[camel(param)] if camel(param) in before_idp else None + if new_param_value != old_value: + changeset[camel(param)] = new_param_value + + # special handling of mappers list to allow change detection + changeset['mappers'] = before_idp.get('mappers', list()) + if module.params.get('mappers') is not None: + for new_mapper in module.params.get('mappers'): + old_mapper = next((x for x in changeset['mappers'] if x['name'] == new_mapper['name']), None) + new_mapper = dict((k, v) for k, v in new_mapper.items() if new_mapper[k] is not None) + if old_mapper is not None: + old_mapper.update(new_mapper) + else: + changeset['mappers'].append(new_mapper) + # remove mappers if not present in module params + changeset['mappers'] = [x for x in changeset['mappers'] + if [y for y in module.params.get('mappers', []) if y['name'] == x['name']] != []] + + # prepare the new representation + updated_idp = before_idp.copy() + updated_idp.update(changeset) + + result['proposed'] = sanitize(changeset) + result['existing'] = sanitize(before_idp) + + # if before_idp is none, the identity provider doesn't exist. + if before_idp == dict(): + if state == 'absent': + # nothing to do. + if module._diff: + result['diff'] = dict(before='', after='') + result['changed'] = False + result['end_state'] = dict() + result['msg'] = 'Identity provider does not exist; doing nothing.' + module.exit_json(**result) + + # for 'present', create a new identity provider. + result['changed'] = True + + if module._diff: + result['diff'] = dict(before='', after=sanitize(updated_idp)) + + if module.check_mode: + module.exit_json(**result) + + # do it for real! + updated_idp = updated_idp.copy() + mappers = updated_idp.pop('mappers', []) + kc.create_identity_provider(updated_idp, realm) + for mapper in mappers: + kc.create_identity_provider_mapper(mapper, alias, realm) + after_idp = get_identity_provider_with_mappers(kc, alias, realm) + + result['end_state'] = sanitize(after_idp) + + result['msg'] = 'Identity provider {alias} has been created'.format(alias=alias) + module.exit_json(**result) + + else: + if state == 'present': + # no changes + if updated_idp == before_idp: + result['changed'] = False + result['end_state'] = sanitize(updated_idp) + result['msg'] = "No changes required to identity provider {alias}.".format(alias=alias) + module.exit_json(**result) + + # update the existing role + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=sanitize(before_idp), after=sanitize(updated_idp)) + + if module.check_mode: + module.exit_json(**result) + + # do the update + updated_idp = updated_idp.copy() + updated_mappers = updated_idp.pop('mappers', []) + kc.update_identity_provider(updated_idp, realm) + for mapper in updated_mappers: + if mapper.get('id') is not None: + kc.update_identity_provider_mapper(mapper, alias, realm) + else: + kc.create_identity_provider_mapper(mapper, alias, realm) + for mapper in [x for x in before_idp['mappers'] + if [y for y in updated_mappers if y["name"] == x['name']] == []]: + kc.delete_identity_provider_mapper(mapper['id'], alias, realm) + + after_idp = get_identity_provider_with_mappers(kc, alias, realm) + + result['end_state'] = sanitize(after_idp) + + result['msg'] = "Identity provider {alias} has been updated".format(alias=alias) + module.exit_json(**result) + + elif state == 'absent': + result['changed'] = True + + if module._diff: + result['diff'] = dict(before=sanitize(before_idp), after='') + + if module.check_mode: + module.exit_json(**result) + + # delete for real + kc.delete_identity_provider(alias, realm) + + result['end_state'] = dict() + + result['msg'] = "Identity provider {alias} has been deleted".format(alias=alias) + module.exit_json(**result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/keycloak_identity_provider.py b/plugins/modules/keycloak_identity_provider.py new file mode 120000 index 0000000000..6beed321db --- /dev/null +++ b/plugins/modules/keycloak_identity_provider.py @@ -0,0 +1 @@ +./identity/keycloak/keycloak_identity_provider.py \ No newline at end of file diff --git a/tests/integration/targets/keycloak_identity_provider/aliases b/tests/integration/targets/keycloak_identity_provider/aliases new file mode 100644 index 0000000000..ad7ccf7ada --- /dev/null +++ b/tests/integration/targets/keycloak_identity_provider/aliases @@ -0,0 +1 @@ +unsupported diff --git a/tests/integration/targets/keycloak_identity_provider/tasks/main.yml b/tests/integration/targets/keycloak_identity_provider/tasks/main.yml new file mode 100644 index 0000000000..5bc0bc3fa0 --- /dev/null +++ b/tests/integration/targets/keycloak_identity_provider/tasks/main.yml @@ -0,0 +1,171 @@ +--- +- name: Create realm + community.general.keycloak_realm: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + id: "{{ realm }}" + realm: "{{ realm }}" + state: present + +- name: Create new identity provider + community.general.keycloak_identity_provider: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + alias: "{{ idp }}" + display_name: OpenID Connect IdP + enabled: true + provider_id: oidc + config: + issuer: https://idp.example.com + authorizationUrl: https://idp.example.com/auth + tokenUrl: https://idp.example.com/token + userInfoUrl: https://idp.example.com/userinfo + clientAuthMethod: client_secret_post + clientId: clientid + clientSecret: clientsecret + syncMode: FORCE + mappers: + - name: "first_name" + identityProviderAlias: "oidc-idp" + identityProviderMapper: "oidc-user-attribute-idp-mapper" + config: + claim: "first_name" + user.attribute: "first_name" + syncMode: "INHERIT" + - name: "last_name" + identityProviderAlias: "oidc-idp" + identityProviderMapper: "oidc-user-attribute-idp-mapper" + config: + claim: "last_name" + user.attribute: "last_name" + syncMode: "INHERIT" + state: present + register: result + +- name: Debug + debug: + var: result + +- name: Assert identity provider created + assert: + that: + - result is changed + - result.existing == {} + - result.end_state.alias == "{{ idp }}" + - result.end_state.mappers != [] + +- name: Update existing identity provider (no change) + community.general.keycloak_identity_provider: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + alias: "{{ idp }}" + enabled: true + provider_id: oidc + config: + issuer: https://idp.example.com + authorizationUrl: https://idp.example.com/auth + tokenUrl: https://idp.example.com/token + userInfoUrl: https://idp.example.com/userinfo + clientAuthMethod: client_secret_post + clientId: clientid + clientSecret: "**********" + syncMode: FORCE + mappers: + - name: "first_name" + identityProviderAlias: "oidc-idp" + identityProviderMapper: "oidc-user-attribute-idp-mapper" + config: + claim: "first_name" + user.attribute: "first_name" + syncMode: "INHERIT" + - name: "last_name" + identityProviderAlias: "oidc-idp" + identityProviderMapper: "oidc-user-attribute-idp-mapper" + config: + claim: "last_name" + user.attribute: "last_name" + syncMode: "INHERIT" + state: present + register: result + +- name: Debug + debug: + var: result + +- name: Assert identity provider unchanged + assert: + that: + - result is not changed + +- name: Update existing identity provider (with change) + community.general.keycloak_identity_provider: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + alias: "{{ idp }}" + enabled: false + state: present + register: result + +- name: Debug + debug: + var: result + +- name: Assert identity provider updated + assert: + that: + - result is changed + - result.existing.enabled == true + - result.end_state.enabled == false + +- name: Delete existing identity provider + community.general.keycloak_identity_provider: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + alias: "{{ idp }}" + state: absent + register: result + +- name: Debug + debug: + var: result + +- name: Assert identity provider deleted + assert: + that: + - result is changed + - result.end_state == {} + +- name: Delete absent identity provider + community.general.keycloak_identity_provider: + auth_keycloak_url: "{{ url }}" + auth_realm: "{{ admin_realm }}" + auth_username: "{{ admin_user }}" + auth_password: "{{ admin_password }}" + realm: "{{ realm }}" + alias: "{{ idp }}" + state: absent + register: result + +- name: Debug + debug: + var: result + +- name: Assert identity provider unchanged + assert: + that: + - result is not changed + - result.end_state == {} diff --git a/tests/integration/targets/keycloak_identity_provider/vars/main.yml b/tests/integration/targets/keycloak_identity_provider/vars/main.yml new file mode 100644 index 0000000000..bd37149b31 --- /dev/null +++ b/tests/integration/targets/keycloak_identity_provider/vars/main.yml @@ -0,0 +1,7 @@ +--- +url: http://localhost:8080/auth +admin_realm: master +admin_user: admin +admin_password: password +realm: myrealm +idp: myidp diff --git a/tests/unit/plugins/modules/identity/keycloak/test_keycloak_identity_provider.py b/tests/unit/plugins/modules/identity/keycloak/test_keycloak_identity_provider.py new file mode 100644 index 0000000000..8666b61759 --- /dev/null +++ b/tests/unit/plugins/modules/identity/keycloak/test_keycloak_identity_provider.py @@ -0,0 +1,495 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from contextlib import contextmanager + +from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.general.tests.unit.compat.mock import call, patch +from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args + +from ansible_collections.community.general.plugins.modules.identity.keycloak import keycloak_identity_provider + +from itertools import count + +from ansible.module_utils.six import StringIO + + +@contextmanager +def patch_keycloak_api(get_identity_provider, create_identity_provider=None, update_identity_provider=None, delete_identity_provider=None, + get_identity_provider_mappers=None, create_identity_provider_mapper=None, update_identity_provider_mapper=None, + delete_identity_provider_mapper=None): + """Mock context manager for patching the methods in PwPolicyIPAClient that contact the IPA server + + Patches the `login` and `_post_json` methods + + Keyword arguments are passed to the mock object that patches `_post_json` + + No arguments are passed to the mock object that patches `login` because no tests require it + + Example:: + + with patch_ipa(return_value={}) as (mock_login, mock_post): + ... + """ + + obj = keycloak_identity_provider.KeycloakAPI + with patch.object(obj, 'get_identity_provider', side_effect=get_identity_provider) \ + as mock_get_identity_provider: + with patch.object(obj, 'create_identity_provider', side_effect=create_identity_provider) \ + as mock_create_identity_provider: + with patch.object(obj, 'update_identity_provider', side_effect=update_identity_provider) \ + as mock_update_identity_provider: + with patch.object(obj, 'delete_identity_provider', side_effect=delete_identity_provider) \ + as mock_delete_identity_provider: + with patch.object(obj, 'get_identity_provider_mappers', side_effect=get_identity_provider_mappers) \ + as mock_get_identity_provider_mappers: + with patch.object(obj, 'create_identity_provider_mapper', side_effect=create_identity_provider_mapper) \ + as mock_create_identity_provider_mapper: + with patch.object(obj, 'update_identity_provider_mapper', side_effect=update_identity_provider_mapper) \ + as mock_update_identity_provider_mapper: + with patch.object(obj, 'delete_identity_provider_mapper', side_effect=delete_identity_provider_mapper) \ + as mock_delete_identity_provider_mapper: + yield mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, \ + mock_delete_identity_provider, mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, \ + mock_update_identity_provider_mapper, mock_delete_identity_provider_mapper + + +def get_response(object_with_future_response, method, get_id_call_count): + if callable(object_with_future_response): + return object_with_future_response() + if isinstance(object_with_future_response, dict): + return get_response( + object_with_future_response[method], method, get_id_call_count) + if isinstance(object_with_future_response, list): + call_number = next(get_id_call_count) + return get_response( + object_with_future_response[call_number], method, get_id_call_count) + return object_with_future_response + + +def build_mocked_request(get_id_user_count, response_dict): + def _mocked_requests(*args, **kwargs): + url = args[0] + method = kwargs['method'] + future_response = response_dict.get(url, None) + return get_response(future_response, method, get_id_user_count) + return _mocked_requests + + +def create_wrapper(text_as_string): + """Allow to mock many times a call to one address. + Without this function, the StringIO is empty for the second call. + """ + def _create_wrapper(): + return StringIO(text_as_string) + return _create_wrapper + + +def mock_good_connection(): + token_response = { + 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), } + return patch( + 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url', + side_effect=build_mocked_request(count(), token_response), + autospec=True + ) + + +class TestKeycloakIdentityProvider(ModuleTestCase): + def setUp(self): + super(TestKeycloakIdentityProvider, self).setUp() + self.module = keycloak_identity_provider + + def test_create_when_absent(self): + """Add a new identity provider""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + 'validate_certs': True, + 'realm': 'realm-name', + 'alias': 'oidc-idp', + 'display_name': 'OpenID Connect IdP', + 'enabled': True, + 'provider_id': 'oidc', + 'config': { + 'issuer': 'https://idp.example.com', + 'authorizationUrl': 'https://idp.example.com/auth', + 'tokenUrl': 'https://idp.example.com/token', + 'userInfoUrl': 'https://idp.example.com/userinfo', + 'clientAuthMethod': 'client_secret_post', + 'clientId': 'my-client', + 'clientSecret': 'secret', + 'syncMode': "FORCE", + }, + 'mappers': [{ + 'name': "first_name", + 'identityProviderAlias': "oidc-idp", + 'identityProviderMapper': "oidc-user-attribute-idp-mapper", + 'config': { + 'claim': "first_name", + 'user.attribute': "first_name", + 'syncMode': "INHERIT", + } + }, { + 'name': "last_name", + 'identityProviderAlias': "oidc-idp", + 'identityProviderMapper': "oidc-user-attribute-idp-mapper", + 'config': { + 'claim': "last_name", + 'user.attribute': "last_name", + 'syncMode': "INHERIT", + } + }] + } + return_value_idp_get = [ + None, + { + "addReadTokenRoleOnCreate": False, + "alias": "oidc-idp", + "authenticateByDefault": False, + "config": { + "authorizationUrl": "https://idp.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "no_log", + "issuer": "https://idp.example.com", + "syncMode": "FORCE", + "tokenUrl": "https://idp.example.com/token", + "userInfoUrl": "https://idp.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP", + "enabled": True, + "firstBrokerLoginFlowAlias": "first broker login", + "internalId": "7ab437d5-f2bb-4ecc-91a8-315349454da6", + "linkOnly": False, + "providerId": "oidc", + "storeToken": False, + "trustEmail": False, + } + ] + return_value_mappers_get = [ + [{ + "config": { + "claim": "first_name", + "syncMode": "INHERIT", + "user.attribute": "first_name" + }, + "id": "5fde49bb-93bd-4f5d-97d6-c5d0c1d07aef", + "identityProviderAlias": "oidc-idp", + "identityProviderMapper": "oidc-user-attribute-idp-mapper", + "name": "first_name" + }, { + "config": { + "claim": "last_name", + "syncMode": "INHERIT", + "user.attribute": "last_name" + }, + "id": "f00c61e0-34d9-4bed-82d1-7e45acfefc09", + "identityProviderAlias": "oidc-idp", + "identityProviderMapper": "oidc-user-attribute-idp-mapper", + "name": "last_name" + }] + ] + return_value_idp_created = [None] + return_value_mapper_created = [None, None] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_identity_provider=return_value_idp_get, get_identity_provider_mappers=return_value_mappers_get, + create_identity_provider=return_value_idp_created, create_identity_provider_mapper=return_value_mapper_created) \ + as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider, + mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper, + mock_delete_identity_provider_mapper): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_identity_provider.mock_calls), 2) + self.assertEqual(len(mock_get_identity_provider_mappers.mock_calls), 1) + self.assertEqual(len(mock_create_identity_provider.mock_calls), 1) + self.assertEqual(len(mock_create_identity_provider_mapper.mock_calls), 2) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_create_when_present(self): + """Update existing identity provider""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + 'validate_certs': True, + 'realm': 'realm-name', + 'alias': 'oidc-idp', + 'display_name': 'OpenID Connect IdP', + 'enabled': True, + 'provider_id': 'oidc', + 'config': { + 'issuer': 'https://idp.example.com', + 'authorizationUrl': 'https://idp.example.com/auth', + 'tokenUrl': 'https://idp.example.com/token', + 'userInfoUrl': 'https://idp.example.com/userinfo', + 'clientAuthMethod': 'client_secret_post', + 'clientId': 'my-client', + 'clientSecret': 'secret', + 'syncMode': "FORCE" + }, + 'mappers': [{ + 'name': "first_name", + 'identityProviderAlias': "oidc-idp", + 'identityProviderMapper': "oidc-user-attribute-idp-mapper", + 'config': { + 'claim': "first_name", + 'user.attribute': "first_name", + 'syncMode': "INHERIT", + } + }, { + 'name': "last_name", + 'identityProviderAlias': "oidc-idp", + 'identityProviderMapper': "oidc-user-attribute-idp-mapper", + 'config': { + 'claim': "last_name", + 'user.attribute': "last_name", + 'syncMode': "INHERIT", + } + }] + } + return_value_idp_get = [ + { + "addReadTokenRoleOnCreate": False, + "alias": "oidc-idp", + "authenticateByDefault": False, + "config": { + "authorizationUrl": "https://idp.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "no_log", + "issuer": "https://idp.example.com", + "syncMode": "FORCE", + "tokenUrl": "https://idp.example.com/token", + "userInfoUrl": "https://idp.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP changeme", + "enabled": True, + "firstBrokerLoginFlowAlias": "first broker login", + "internalId": "7ab437d5-f2bb-4ecc-91a8-315349454da6", + "linkOnly": False, + "providerId": "oidc", + "storeToken": False, + "trustEmail": False, + }, + { + "addReadTokenRoleOnCreate": False, + "alias": "oidc-idp", + "authenticateByDefault": False, + "config": { + "authorizationUrl": "https://idp.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "no_log", + "issuer": "https://idp.example.com", + "syncMode": "FORCE", + "tokenUrl": "https://idp.example.com/token", + "userInfoUrl": "https://idp.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP", + "enabled": True, + "firstBrokerLoginFlowAlias": "first broker login", + "internalId": "7ab437d5-f2bb-4ecc-91a8-315349454da6", + "linkOnly": False, + "providerId": "oidc", + "storeToken": False, + "trustEmail": False, + } + ] + return_value_mappers_get = [ + [{ + "config": { + "claim": "first_name_changeme", + "syncMode": "INHERIT", + "user.attribute": "first_name_changeme" + }, + "id": "5fde49bb-93bd-4f5d-97d6-c5d0c1d07aef", + "identityProviderAlias": "oidc-idp", + "identityProviderMapper": "oidc-user-attribute-idp-mapper", + "name": "first_name" + }], + [{ + "config": { + "claim": "first_name", + "syncMode": "INHERIT", + "user.attribute": "first_name" + }, + "id": "5fde49bb-93bd-4f5d-97d6-c5d0c1d07aef", + "identityProviderAlias": "oidc-idp", + "identityProviderMapper": "oidc-user-attribute-idp-mapper", + "name": "first_name" + }, { + "config": { + "claim": "last_name", + "syncMode": "INHERIT", + "user.attribute": "last_name" + }, + "id": "f00c61e0-34d9-4bed-82d1-7e45acfefc09", + "identityProviderAlias": "oidc-idp", + "identityProviderMapper": "oidc-user-attribute-idp-mapper", + "name": "last_name" + }] + ] + return_value_idp_updated = [None] + return_value_mapper_updated = [None] + return_value_mapper_created = [None] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_identity_provider=return_value_idp_get, get_identity_provider_mappers=return_value_mappers_get, + update_identity_provider=return_value_idp_updated, update_identity_provider_mapper=return_value_mapper_updated, + create_identity_provider_mapper=return_value_mapper_created) \ + as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider, + mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper, + mock_delete_identity_provider_mapper): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_identity_provider.mock_calls), 2) + self.assertEqual(len(mock_get_identity_provider_mappers.mock_calls), 2) + self.assertEqual(len(mock_update_identity_provider.mock_calls), 1) + self.assertEqual(len(mock_update_identity_provider_mapper.mock_calls), 1) + self.assertEqual(len(mock_create_identity_provider_mapper.mock_calls), 1) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_delete_when_absent(self): + """Remove an absent identity provider""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + 'validate_certs': True, + 'realm': 'realm-name', + 'alias': 'oidc-idp', + 'state': 'absent', + } + return_value_idp_get = [None] + changed = False + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_identity_provider=return_value_idp_get) \ + as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider, + mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper, + mock_delete_identity_provider_mapper): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_identity_provider.mock_calls), 1) + self.assertEqual(len(mock_delete_identity_provider.mock_calls), 0) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + def test_delete_when_present(self): + """Remove an existing identity provider""" + + module_args = { + 'auth_keycloak_url': 'http://keycloak.url/auth', + 'auth_password': 'admin', + 'auth_realm': 'master', + 'auth_username': 'admin', + 'auth_client_id': 'admin-cli', + 'validate_certs': True, + 'realm': 'realm-name', + 'alias': 'oidc-idp', + 'state': 'absent', + } + return_value_idp_get = [ + { + "addReadTokenRoleOnCreate": False, + "alias": "oidc-idp", + "authenticateByDefault": False, + "config": { + "authorizationUrl": "https://idp.example.com/auth", + "clientAuthMethod": "client_secret_post", + "clientId": "my-client", + "clientSecret": "no_log", + "issuer": "https://idp.example.com", + "syncMode": "FORCE", + "tokenUrl": "https://idp.example.com/token", + "userInfoUrl": "https://idp.example.com/userinfo" + }, + "displayName": "OpenID Connect IdP", + "enabled": True, + "firstBrokerLoginFlowAlias": "first broker login", + "internalId": "7ab437d5-f2bb-4ecc-91a8-315349454da6", + "linkOnly": False, + "providerId": "oidc", + "storeToken": False, + "trustEmail": False, + }, + None + ] + return_value_mappers_get = [ + [{ + "config": { + "claim": "email", + "syncMode": "INHERIT", + "user.attribute": "email" + }, + "id": "5fde49bb-93bd-4f5d-97d6-c5d0c1d07aef", + "identityProviderAlias": "oidc-idp", + "identityProviderMapper": "oidc-user-attribute-idp-mapper", + "name": "email" + }] + ] + return_value_idp_deleted = [None] + changed = True + + set_module_args(module_args) + + # Run the module + + with mock_good_connection(): + with patch_keycloak_api(get_identity_provider=return_value_idp_get, get_identity_provider_mappers=return_value_mappers_get, + delete_identity_provider=return_value_idp_deleted) \ + as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider, + mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper, + mock_delete_identity_provider_mapper): + with self.assertRaises(AnsibleExitJson) as exec_info: + self.module.main() + + self.assertEqual(len(mock_get_identity_provider.mock_calls), 1) + self.assertEqual(len(mock_get_identity_provider_mappers.mock_calls), 1) + self.assertEqual(len(mock_delete_identity_provider.mock_calls), 1) + + # Verify that the module's changed status matches what is expected + self.assertIs(exec_info.exception.args[0]['changed'], changed) + + +if __name__ == '__main__': + unittest.main() From 58c6f6c95af9c7f27d9dac67c3d05c8c7f07d546 Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Tue, 31 Aug 2021 01:10:10 -0400 Subject: [PATCH 0538/3093] Initial commit (#3300) --- plugins/modules/cloud/misc/proxmox.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/plugins/modules/cloud/misc/proxmox.py b/plugins/modules/cloud/misc/proxmox.py index c777564186..136829d13f 100644 --- a/plugins/modules/cloud/misc/proxmox.py +++ b/plugins/modules/cloud/misc/proxmox.py @@ -32,7 +32,14 @@ options: type: str disk: description: - - hard disk size in GB for instance + - This option was previously described as "hard disk size in GB for instance" however several formats describing + a lxc mount are permitted. + - Older versions of Proxmox will accept a numeric value for size using the I(storage) parameter to automatically + choose which storage to allocate from, however new versions enforce the C(:) syntax. + - "Additional options are available by using some combination of the following key-value pairs as a + comma-delimited list C([volume=] [,acl=<1|0>] [,mountoptions=] [,quota=<1|0>] + [,replicate=<1|0>] [,ro=<1|0>] [,shared=<1|0>] [,size=])." + - See U(https://pve.proxmox.com/wiki/Linux_Container) for a full description. - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this option has a default of C(3). Note that the default value of I(proxmox_default_behavior) changes in community.general 4.0.0. From baa721ac2281f9e628821863798f030c9efd4c9d Mon Sep 17 00:00:00 2001 From: froebela <32922546+froebela@users.noreply.github.com> Date: Tue, 31 Aug 2021 07:11:58 +0200 Subject: [PATCH 0539/3093] zfs.py: treated received properties as local and added diff mode support (#502) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * zfs.py: treated received properties as local and added diff mode support If you use "zfs set" to explicitly set ZFS properties, they are marked as from source "local". If ZFS properties are implicitly set by using "zfs send" and "zfs receive", for example as part of a template based installation, they are marked as from source "received". But as there is no technical difference between both types of them, the “received” ZFS properties should also be considered “local”. Otherwise Ansible would detect changes, which aren’t actual changes. Therefore I changed line 202/207 to reflect this. For us it’s quite important, that Ansible modules support the diff mode in order to qualify changes. Therefore I added some code lines to address this. * added changelog fragment for PR #502 * fixed typos in changelog fragment for PR #502 * minor changes in changelog fragment for PR #502 * added link to pull request in changelog fragment for PR #502 * extended the diff data structure to always include the name of the zfs filesystem * added code to also maintain the diff data structure after a change * reverted back some code lines for better code readability * added an extra dict in the diff data structure to hold the zfs properties --- .../502-zfs_bugfix_and_diff_mode_support.yaml | 4 ++++ plugins/modules/storage/zfs/zfs.py | 22 +++++++++++++++---- 2 files changed, 22 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/502-zfs_bugfix_and_diff_mode_support.yaml diff --git a/changelogs/fragments/502-zfs_bugfix_and_diff_mode_support.yaml b/changelogs/fragments/502-zfs_bugfix_and_diff_mode_support.yaml new file mode 100644 index 0000000000..1ba7727c7c --- /dev/null +++ b/changelogs/fragments/502-zfs_bugfix_and_diff_mode_support.yaml @@ -0,0 +1,4 @@ +bugfixes: + - zfs - treated received properties as local (https://github.com/ansible-collections/community.general/pull/502). +minor_changes: + - zfs - added diff mode support (https://github.com/ansible-collections/community.general/pull/502). diff --git a/plugins/modules/storage/zfs/zfs.py b/plugins/modules/storage/zfs/zfs.py index 2d5d4487dd..a804753a16 100644 --- a/plugins/modules/storage/zfs/zfs.py +++ b/plugins/modules/storage/zfs/zfs.py @@ -194,12 +194,16 @@ class Zfs(object): self.module.fail_json(msg=err) def set_properties_if_changed(self): + diff = {'before': {'extra_zfs_properties': {}}, 'after': {'extra_zfs_properties': {}}} current_properties = self.get_current_properties() for prop, value in self.properties.items(): - if current_properties.get(prop, None) != value: + current_value = current_properties.get(prop, None) + if current_value != value: self.set_property(prop, value) + diff['before']['extra_zfs_properties'][prop] = current_value + diff['after']['extra_zfs_properties'][prop] = value if self.module.check_mode: - return + return diff updated_properties = self.get_current_properties() for prop in self.properties: value = updated_properties.get(prop, None) @@ -207,6 +211,9 @@ class Zfs(object): self.module.fail_json(msg="zfsprop was not present after being successfully set: %s" % prop) if current_properties.get(prop, None) != value: self.changed = True + if prop in diff['after']['extra_zfs_properties']: + diff['after']['extra_zfs_properties'][prop] = value + return diff def get_current_properties(self): cmd = [self.zfs_cmd, 'get', '-H', '-p', '-o', "property,value,source"] @@ -220,7 +227,7 @@ class Zfs(object): # include source '-' so that creation-only properties are not removed # to avoids errors when the dataset already exists and the property is not changed # this scenario is most likely when the same playbook is run more than once - if source == 'local' or source == '-': + if source == 'local' or source == 'received' or source == '-': properties[prop] = value # Add alias for enhanced sharing properties if self.enhanced_sharing: @@ -266,13 +273,20 @@ def main(): if state == 'present': if zfs.exists(): - zfs.set_properties_if_changed() + result['diff'] = zfs.set_properties_if_changed() else: zfs.create() + result['diff'] = {'before': {'state': 'absent'}, 'after': {'state': state}} elif state == 'absent': if zfs.exists(): zfs.destroy() + result['diff'] = {'before': {'state': 'present'}, 'after': {'state': state}} + else: + result['diff'] = {} + + result['diff']['before_header'] = name + result['diff']['after_header'] = name result.update(zfs.properties) result['changed'] = zfs.changed From b2bb7e3f9c2e4225ac8f6c6867ab4051c3538993 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 31 Aug 2021 17:14:08 +1200 Subject: [PATCH 0540/3093] django_manage - added splitting the command parameter for running (#3283) * added splitting the command parameter for running * added changelog fragment * refactored variable names for improved readability --- ...3-django_manage-fix-command-splitting.yaml | 2 + .../web_infrastructure/django_manage.py | 38 ++++++++++--------- .../simple_project/p1/p1/settings.py | 1 + .../targets/django_manage/tasks/main.yaml | 6 +++ 4 files changed, 29 insertions(+), 18 deletions(-) create mode 100644 changelogs/fragments/3283-django_manage-fix-command-splitting.yaml diff --git a/changelogs/fragments/3283-django_manage-fix-command-splitting.yaml b/changelogs/fragments/3283-django_manage-fix-command-splitting.yaml new file mode 100644 index 0000000000..ba8b4efd69 --- /dev/null +++ b/changelogs/fragments/3283-django_manage-fix-command-splitting.yaml @@ -0,0 +1,2 @@ +bugfixes: + - django_manage - argument ``command`` is being splitted again as it should (https://github.com/ansible-collections/community.general/issues/3215). diff --git a/plugins/modules/web_infrastructure/django_manage.py b/plugins/modules/web_infrastructure/django_manage.py index 98ffdc446b..0c8126c457 100644 --- a/plugins/modules/web_infrastructure/django_manage.py +++ b/plugins/modules/web_infrastructure/django_manage.py @@ -158,6 +158,7 @@ EXAMPLES = """ import os import sys +import shlex from ansible.module_utils.basic import AnsibleModule @@ -273,61 +274,62 @@ def main(): ), ) - command = module.params['command'] + command_split = shlex.split(module.params['command']) + command_bin = command_split[0] project_path = module.params['project_path'] virtualenv = module.params['virtualenv'] for param in specific_params: value = module.params[param] - if value and param not in command_allowed_param_map[command]: - module.fail_json(msg='%s param is incompatible with command=%s' % (param, command)) + if value and param not in command_allowed_param_map[command_bin]: + module.fail_json(msg='%s param is incompatible with command=%s' % (param, command_bin)) - for param in command_required_param_map.get(command, ()): + for param in command_required_param_map.get(command_bin, ()): if not module.params[param]: - module.fail_json(msg='%s param is required for command=%s' % (param, command)) + module.fail_json(msg='%s param is required for command=%s' % (param, command_bin)) _ensure_virtualenv(module) - cmd = ["./manage.py", command] + run_cmd_args = ["./manage.py"] + command_split - if command in noinput_commands: - cmd.append("--noinput") + if command_bin in noinput_commands and '--noinput' not in command_split: + run_cmd_args.append("--noinput") for param in general_params: if module.params[param]: - cmd.append('--%s=%s' % (param, module.params[param])) + run_cmd_args.append('--%s=%s' % (param, module.params[param])) for param in specific_boolean_params: if module.params[param]: - cmd.append('--%s' % param) + run_cmd_args.append('--%s' % param) # these params always get tacked on the end of the command for param in end_of_command_params: if module.params[param]: - cmd.append(module.params[param]) + run_cmd_args.append(module.params[param]) - rc, out, err = module.run_command(cmd, cwd=project_path) + rc, out, err = module.run_command(run_cmd_args, cwd=project_path) if rc != 0: - if command == 'createcachetable' and 'table' in err and 'already exists' in err: + if command_bin == 'createcachetable' and 'table' in err and 'already exists' in err: out = 'already exists.' else: if "Unknown command:" in err: - _fail(module, cmd, err, "Unknown django command: %s" % command) - _fail(module, cmd, out, err, path=os.environ["PATH"], syspath=sys.path) + _fail(module, run_cmd_args, err, "Unknown django command: %s" % command_bin) + _fail(module, run_cmd_args, out, err, path=os.environ["PATH"], syspath=sys.path) changed = False lines = out.split('\n') - filt = globals().get(command + "_filter_output", None) + filt = globals().get(command_bin + "_filter_output", None) if filt: filtered_output = list(filter(filt, lines)) if len(filtered_output): changed = True - check_changed = globals().get("{0}_check_changed".format(command), None) + check_changed = globals().get("{0}_check_changed".format(command_bin), None) if check_changed: changed = check_changed(out) - module.exit_json(changed=changed, out=out, cmd=cmd, app_path=project_path, project_path=project_path, + module.exit_json(changed=changed, out=out, cmd=run_cmd_args, app_path=project_path, project_path=project_path, virtualenv=virtualenv, settings=module.params['settings'], pythonpath=module.params['pythonpath']) diff --git a/tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/settings.py b/tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/settings.py index 0a11583aba..f2472c1fe8 100644 --- a/tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/settings.py +++ b/tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/settings.py @@ -121,3 +121,4 @@ USE_TZ = True # https://docs.djangoproject.com/en/3.1/howto/static-files/ STATIC_URL = '/static/' +STATIC_ROOT = '/tmp/django-static' diff --git a/tests/integration/targets/django_manage/tasks/main.yaml b/tests/integration/targets/django_manage/tasks/main.yaml index ed305ca96b..0421739acc 100644 --- a/tests/integration/targets/django_manage/tasks/main.yaml +++ b/tests/integration/targets/django_manage/tasks/main.yaml @@ -48,3 +48,9 @@ pythonpath: "{{ tmp_django_root.path }}/1045-single-app-project/" command: check virtualenv: "{{ tmp_django_root.path }}/venv" + +- name: Run collectstatic --noinput on simple project + community.general.django_manage: + project_path: "{{ tmp_django_root.path }}/simple_project/p1" + command: collectstatic --noinput + virtualenv: "{{ tmp_django_root.path }}/venv" From 2d6816e11e1672df5b2aa485e8af9eaa45d7c5be Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Tue, 31 Aug 2021 04:21:53 -0400 Subject: [PATCH 0541/3093] proxmox inventory plugin - Update examples documentation (#3299) * Initial commit * Update plugins/inventory/proxmox.py Co-authored-by: Felix Fontein --- plugins/inventory/proxmox.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py index 33a564f333..f52f0f1bb3 100644 --- a/plugins/inventory/proxmox.py +++ b/plugins/inventory/proxmox.py @@ -88,13 +88,24 @@ DOCUMENTATION = ''' ''' EXAMPLES = ''' +# Minimal example which will not gather additional facts for QEMU/LXC guests +# By not specifying a URL the plugin will attempt to connect to the controller host on port 8006 # my.proxmox.yml plugin: community.general.proxmox -url: http://localhost:8006 user: ansible@pve password: secure -validate_certs: no + +# More complete example demonstrating the use of 'want_facts' and the constructed options +# Note that using facts returned by 'want_facts' in constructed options requires 'want_facts=true' +# my.proxmox.yml +plugin: community.general.proxmox +url: http://pve.domain.com:8006 +user: ansible@pve +password: secure +validate_certs: false +want_facts: true keyed_groups: + # proxmox_tags_parsed is an example of a fact only returned when 'want_facts=true' - key: proxmox_tags_parsed separator: "" prefix: group From edd7b84285dd944f8c3e736928ef6a56a563748b Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 31 Aug 2021 22:34:57 +1200 Subject: [PATCH 0542/3093] pamd - fixed issue+minor refactorings (#3285) * pamd - fixed issue+minor refactorings * added changelog fragment * added unit test suggested in issue * Update tests/integration/targets/pamd/tasks/main.yml * fixed per PR + additional adjustment Co-authored-by: Felix Fontein --- .../3285-pamd-updated-with-empty-args.yaml | 4 ++ plugins/modules/system/pamd.py | 46 +++++++++---------- tests/integration/targets/pamd/tasks/main.yml | 31 ++++++++++--- .../unit/plugins/modules/system/test_pamd.py | 8 ++++ 4 files changed, 59 insertions(+), 30 deletions(-) create mode 100644 changelogs/fragments/3285-pamd-updated-with-empty-args.yaml diff --git a/changelogs/fragments/3285-pamd-updated-with-empty-args.yaml b/changelogs/fragments/3285-pamd-updated-with-empty-args.yaml new file mode 100644 index 0000000000..1c176dfdc3 --- /dev/null +++ b/changelogs/fragments/3285-pamd-updated-with-empty-args.yaml @@ -0,0 +1,4 @@ +bugfixes: + - pamd - code for ``state=updated`` when dealing with the pam module arguments, made no distinction between ``None`` and an empty list (https://github.com/ansible-collections/community.general/issues/3260). +minor_changes: + - pamd - minor refactorings (https://github.com/ansible-collections/community.general/pull/3285). diff --git a/plugins/modules/system/pamd.py b/plugins/modules/system/pamd.py index 738a23ee43..dda504974d 100644 --- a/plugins/modules/system/pamd.py +++ b/plugins/modules/system/pamd.py @@ -274,8 +274,7 @@ RULE_REGEX = re.compile(r"""(?P-?(?:auth|account|session|password))\s (?P\[.*\]|\S*)\s+ (?P\S*)\s* (?P.*)\s*""", re.X) - -RULE_ARG_REGEX = re.compile(r"""(\[.*\]|\S*)""") +RULE_ARG_REGEX = re.compile(r"(\[.*\]|\S*)") VALID_TYPES = ['account', '-account', 'auth', '-auth', 'password', '-password', 'session', '-session'] @@ -358,11 +357,9 @@ class PamdRule(PamdLine): # Method to check if a rule matches the type, control and path. def matches(self, rule_type, rule_control, rule_path, rule_args=None): - if (rule_type == self.rule_type and + return (rule_type == self.rule_type and rule_control == self.rule_control and - rule_path == self.rule_path): - return True - return False + rule_path == self.rule_path) @classmethod def rule_from_string(cls, line): @@ -507,25 +504,25 @@ class PamdService(object): # Get a list of rules we want to change rules_to_find = self.get(rule_type, rule_control, rule_path) - new_args = parse_module_arguments(new_args) + new_args = parse_module_arguments(new_args, return_none=True) changes = 0 for current_rule in rules_to_find: rule_changed = False if new_type: - if(current_rule.rule_type != new_type): + if current_rule.rule_type != new_type: rule_changed = True current_rule.rule_type = new_type if new_control: - if(current_rule.rule_control != new_control): + if current_rule.rule_control != new_control: rule_changed = True current_rule.rule_control = new_control if new_path: - if(current_rule.rule_path != new_path): + if current_rule.rule_path != new_path: rule_changed = True current_rule.rule_path = new_path - if new_args: - if(current_rule.rule_args != new_args): + if new_args is not None: + if current_rule.rule_args != new_args: rule_changed = True current_rule.rule_args = new_args @@ -724,8 +721,9 @@ class PamdService(object): current_line = self._head while current_line is not None: - if not current_line.validate()[0]: - return current_line.validate() + curr_validate = current_line.validate() + if not curr_validate[0]: + return curr_validate current_line = current_line.next return True, "Module is valid" @@ -750,22 +748,25 @@ class PamdService(object): return '\n'.join(lines) + '\n' -def parse_module_arguments(module_arguments): - # Return empty list if we have no args to parse - if not module_arguments: - return [] - elif isinstance(module_arguments, list) and len(module_arguments) == 1 and not module_arguments[0]: +def parse_module_arguments(module_arguments, return_none=False): + # If args is None, return empty list by default. + # But if return_none is True, then return None + if module_arguments is None: + return None if return_none else [] + if isinstance(module_arguments, list) and len(module_arguments) == 1 and not module_arguments[0]: return [] if not isinstance(module_arguments, list): module_arguments = [module_arguments] - parsed_args = list() + # From this point on, module_arguments is guaranteed to be a list, empty or not + parsed_args = [] + re_clear_spaces = re.compile(r"\s*=\s*") for arg in module_arguments: for item in filter(None, RULE_ARG_REGEX.findall(arg)): if not item.startswith("["): - re.sub("\\s*=\\s*", "=", item) + re_clear_spaces.sub("=", item) parsed_args.append(item) return parsed_args @@ -861,8 +862,7 @@ def main(): fd.write(str(service)) except IOError: - module.fail_json(msg='Unable to create temporary \ - file %s' % temp_file) + module.fail_json(msg='Unable to create temporary file %s' % temp_file) module.atomic_move(temp_file.name, os.path.realpath(fname)) diff --git a/tests/integration/targets/pamd/tasks/main.yml b/tests/integration/targets/pamd/tasks/main.yml index 3e0fb4ee32..3835ff9db0 100644 --- a/tests/integration/targets/pamd/tasks/main.yml +++ b/tests/integration/targets/pamd/tasks/main.yml @@ -5,11 +5,10 @@ set_fact: test_pamd_file: "/tmp/pamd_file" -- name: Copy temporary pam.d file +- name: Create temporary pam.d file copy: content: "session required pam_lastlog.so silent showfailed" dest: "{{ test_pamd_file }}" - - name: Test working on a single-line file works (2925) community.general.pamd: path: /tmp @@ -20,17 +19,37 @@ module_arguments: silent state: args_absent register: pamd_file_output - - name: Check if changes made assert: that: - pamd_file_output is changed -- name: Copy temporary pam.d file +- name: Test removing all arguments from an entry (3260) + community.general.pamd: + path: /tmp + name: pamd_file + type: session + control: required + module_path: pam_lastlog.so + module_arguments: "" + state: updated + register: pamd_file_output_noargs +- name: Read back the file (3260) + slurp: + src: "{{ test_pamd_file }}" + register: pamd_file_slurp_noargs +- name: Check if changes made (3260) + vars: + line_array: "{{ (pamd_file_slurp_noargs.content|b64decode).split('\n')[2].split() }}" + assert: + that: + - pamd_file_output_noargs is changed + - line_array == ['session', 'required', 'pam_lastlog.so'] + +- name: Create temporary pam.d file copy: content: "" dest: "{{ test_pamd_file }}" - # This test merely demonstrates that, as-is, module will not perform any changes on an empty file # All the existing values for "state" will first search for a rule matching type, control, module_path # and will not perform any change whatsoever if no existing rules match. @@ -43,12 +62,10 @@ module_path: pam_lastlog.so module_arguments: silent register: pamd_file_output_empty - - name: Read back the file slurp: src: "{{ test_pamd_file }}" register: pamd_file_slurp - - name: Check if changes made assert: that: diff --git a/tests/unit/plugins/modules/system/test_pamd.py b/tests/unit/plugins/modules/system/test_pamd.py index e7a6883564..19c9d7352a 100644 --- a/tests/unit/plugins/modules/system/test_pamd.py +++ b/tests/unit/plugins/modules/system/test_pamd.py @@ -218,6 +218,14 @@ auth required pam_deny.so test_rule = PamdRule('auth', 'sufficient', 'pam_unix.so', 'nullok try_first_pass') self.assertNotIn(str(test_rule), str(self.pamd)) + def test_update_rule_remove_module_args(self): + self.assertTrue(self.pamd.update_rule('auth', 'sufficient', 'pam_unix.so', new_args='')) + test_rule = PamdRule('auth', 'sufficient', 'pam_unix.so', '') + self.assertIn(str(test_rule), str(self.pamd)) + + test_rule = PamdRule('auth', 'sufficient', 'pam_unix.so', 'nullok try_first_pass') + self.assertNotIn(str(test_rule), str(self.pamd)) + def test_update_first_three(self): self.assertTrue(self.pamd.update_rule('auth', 'required', 'pam_env.so', new_type='one', new_control='two', new_path='three')) From 1f5345881d1af3429573f53b07d2684537626089 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 31 Aug 2021 23:09:29 +1200 Subject: [PATCH 0543/3093] open_iscsi - minor refactoring (#3286) * open_iscsi - minor refactoring * added changelog fragment --- .../3286-open_iscsi-improvements.yaml | 4 + plugins/modules/system/open_iscsi.py | 109 +++++++----------- 2 files changed, 47 insertions(+), 66 deletions(-) create mode 100644 changelogs/fragments/3286-open_iscsi-improvements.yaml diff --git a/changelogs/fragments/3286-open_iscsi-improvements.yaml b/changelogs/fragments/3286-open_iscsi-improvements.yaml new file mode 100644 index 0000000000..860a5f7811 --- /dev/null +++ b/changelogs/fragments/3286-open_iscsi-improvements.yaml @@ -0,0 +1,4 @@ +minor_changes: + - open_iscsi - minor refactoring (https://github.com/ansible-collections/community.general/pull/3286). +bugfixes: + - open_iscsi - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3286). diff --git a/plugins/modules/system/open_iscsi.py b/plugins/modules/system/open_iscsi.py index 570925f6a4..2d255356e6 100644 --- a/plugins/modules/system/open_iscsi.py +++ b/plugins/modules/system/open_iscsi.py @@ -125,6 +125,7 @@ import time from ansible.module_utils.basic import AnsibleModule ISCSIADM = 'iscsiadm' +iscsiadm_cmd = None def compare_nodelists(l1, l2): @@ -134,12 +135,12 @@ def compare_nodelists(l1, l2): def iscsi_get_cached_nodes(module, portal=None): - cmd = '%s --mode node' % iscsiadm_cmd - (rc, out, err) = module.run_command(cmd) + cmd = [iscsiadm_cmd, '--mode', 'node'] + rc, out, err = module.run_command(cmd) + nodes = [] if rc == 0: lines = out.splitlines() - nodes = [] for line in lines: # line format is "ip:port,target_portal_group_tag targetname" parts = line.split() @@ -156,7 +157,7 @@ def iscsi_get_cached_nodes(module, portal=None): # for newer versions see iscsiadm(8); also usr/iscsiadm.c for details # err can contain [N|n]o records... elif rc == 21 or (rc == 255 and "o records found" in err): - nodes = [] + pass else: module.fail_json(cmd=cmd, rc=rc, msg=err) @@ -164,16 +165,13 @@ def iscsi_get_cached_nodes(module, portal=None): def iscsi_discover(module, portal, port): - cmd = '%s --mode discovery --type sendtargets --portal %s:%s' % (iscsiadm_cmd, portal, port) - (rc, out, err) = module.run_command(cmd) - - if rc > 0: - module.fail_json(cmd=cmd, rc=rc, msg=err) + cmd = [iscsiadm_cmd, '--mode', 'discovery', '--type', 'sendtargets', '--portal', '%s:%s' % (portal, port)] + module.run_command(cmd, check_rc=True) def target_loggedon(module, target, portal=None, port=None): - cmd = '%s --mode session' % iscsiadm_cmd - (rc, out, err) = module.run_command(cmd) + cmd = [iscsiadm_cmd, '--mode', 'session'] + rc, out, err = module.run_command(cmd) if portal is None: portal = "" @@ -199,30 +197,23 @@ def target_login(module, target, portal=None, port=None): ('node.session.auth.username', node_user), ('node.session.auth.password', node_pass)] for (name, value) in params: - cmd = '%s --mode node --targetname %s --op=update --name %s --value %s' % (iscsiadm_cmd, target, name, value) - (rc, out, err) = module.run_command(cmd) - if rc > 0: - module.fail_json(cmd=cmd, rc=rc, msg=err) + cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--op=update', '--name', name, '--value', value] + module.run_command(cmd, check_rc=True) - cmd = '%s --mode node --targetname %s --login' % (iscsiadm_cmd, target) + cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--login'] if portal is not None and port is not None: - cmd += ' --portal %s:%s' % (portal, port) + cmd.append('--portal') + cmd.append('%s:%s' % (portal, port)) - (rc, out, err) = module.run_command(cmd) - - if rc > 0: - module.fail_json(cmd=cmd, rc=rc, msg=err) + module.run_command(cmd, check_rc=True) def target_logout(module, target): - cmd = '%s --mode node --targetname %s --logout' % (iscsiadm_cmd, target) - (rc, out, err) = module.run_command(cmd) - - if rc > 0: - module.fail_json(cmd=cmd, rc=rc, msg=err) + cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--logout'] + module.run_command(cmd, check_rc=True) -def target_device_node(module, target): +def target_device_node(target): # if anyone know a better way to find out which devicenodes get created for # a given target... @@ -239,51 +230,39 @@ def target_device_node(module, target): def target_isauto(module, target, portal=None, port=None): - cmd = '%s --mode node --targetname %s' % (iscsiadm_cmd, target) + cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target] - if portal is not None: - if port is not None: - portal = '%s:%s' % (portal, port) - cmd = '%s --portal %s' % (cmd, portal) + if portal is not None and port is not None: + cmd.append('--portal') + cmd.append('%s:%s' % (portal, port)) - (rc, out, err) = module.run_command(cmd) + dummy, out, dummy = module.run_command(cmd, check_rc=True) - if rc == 0: - lines = out.splitlines() - for line in lines: - if 'node.startup' in line: - return 'automatic' in line - return False - else: - module.fail_json(cmd=cmd, rc=rc, msg=err) + lines = out.splitlines() + for line in lines: + if 'node.startup' in line: + return 'automatic' in line + return False def target_setauto(module, target, portal=None, port=None): - cmd = '%s --mode node --targetname %s --op=update --name node.startup --value automatic' % (iscsiadm_cmd, target) + cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--op=update', '--name', 'node.startup', '--value', 'automatic'] - if portal is not None: - if port is not None: - portal = '%s:%s' % (portal, port) - cmd = '%s --portal %s' % (cmd, portal) + if portal is not None and port is not None: + cmd.append('--portal') + cmd.append('%s:%s' % (portal, port)) - (rc, out, err) = module.run_command(cmd) - - if rc > 0: - module.fail_json(cmd=cmd, rc=rc, msg=err) + module.run_command(cmd, check_rc=True) def target_setmanual(module, target, portal=None, port=None): - cmd = '%s --mode node --targetname %s --op=update --name node.startup --value manual' % (iscsiadm_cmd, target) + cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--op=update', '--name', 'node.startup', '--value', 'manual'] - if portal is not None: - if port is not None: - portal = '%s:%s' % (portal, port) - cmd = '%s --portal %s' % (cmd, portal) + if portal is not None and port is not None: + cmd.append('--portal') + cmd.append('%s:%s' % (portal, port)) - (rc, out, err) = module.run_command(cmd) - - if rc > 0: - module.fail_json(cmd=cmd, rc=rc, msg=err) + module.run_command(cmd, check_rc=True) def main(): @@ -308,6 +287,7 @@ def main(): ), required_together=[['node_user', 'node_pass']], + required_if=[('discover', True, ['portal'])], supports_check_mode=True, ) @@ -335,13 +315,10 @@ def main(): cached = iscsi_get_cached_nodes(module, portal) # return json dict - result = {} - result['changed'] = False + result = {'changed': False} if discover: - if portal is None: - module.fail_json(msg="Need to specify at least the portal (ip) to discover") - elif check: + if check: nodes = cached else: iscsi_discover(module, portal, port) @@ -376,13 +353,13 @@ def main(): if (login and loggedon) or (not login and not loggedon): result['changed'] |= False if login: - result['devicenodes'] = target_device_node(module, target) + result['devicenodes'] = target_device_node(target) elif not check: if login: target_login(module, target, portal, port) # give udev some time time.sleep(1) - result['devicenodes'] = target_device_node(module, target) + result['devicenodes'] = target_device_node(target) else: target_logout(module, target) result['changed'] |= True From fce562ad6dee6d43ce9db070a8908000ffefc23d Mon Sep 17 00:00:00 2001 From: Max Bidlingmaier Date: Tue, 31 Aug 2021 15:07:52 +0200 Subject: [PATCH 0544/3093] Enhancement to gitlab_group_members to accept user lists as input (#3047) * - fix to issue 3041 - add func to work with user lists - add func to set members to the ones give * Added version_added to new parameter * fixed elements in definition of gitlab_users nad wrong import in gitlab_users * linter issues fixed * added list elelements to argument_spec * More whitspeaces for the linter * Update plugins/modules/source_control/gitlab/gitlab_group_members.py Co-authored-by: Felix Fontein * adapted changelog * removed bugfix (other PR), changes due to review * changed input handling according to review * Fixed test findings * Added list of dict to allow for specifying user/access_level tuples * corrected doc section * fixed parameter definitions * removed strange additional import * Update changelogs/fragments/3041-gitlab_x_members_fix_and_enhancement.yml Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_group_members.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_project_members.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_group_members.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_group_members.py Co-authored-by: Felix Fontein * fixed required if * Update plugins/modules/source_control/gitlab/gitlab_group_members.py Co-authored-by: Felix Fontein * Added suggestions from PR * fixed identation problem * Update plugins/modules/source_control/gitlab/gitlab_group_members.py Co-authored-by: Zainab Alsaffar * Update plugins/modules/source_control/gitlab/gitlab_group_members.py Co-authored-by: Zainab Alsaffar * Update plugins/modules/source_control/gitlab/gitlab_group_members.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_group_members.py Co-authored-by: Felix Fontein * Recommended changes from discussionst * Fixed issues from automatic tests * added missing metaclass due to test finding * added integration tests * Update plugins/modules/source_control/gitlab/gitlab_group_members.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_group_members.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_group_members.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_group_members.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_group_members.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_group_members.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_group_members.py Co-authored-by: Felix Fontein * fixed optimization for only one user * Reverted gitlab_project_members to original version - changes will be done in a separate branch * added examples for new functionality * - fixed changelog after reverting gitlab_project_memebers - fully reverted gitlab_project_members * Fixed error handling: when single users are not updateable in bulk mode the exception should not stop the code flow but document the problem in the result. * Better error handling * on error give username, not gitlab numeric userid * Fixed broken check_mode * Update plugins/modules/source_control/gitlab/gitlab_group_members.py Co-authored-by: Felix Fontein * Change from review Co-authored-by: Max Bidlingmaier Co-authored-by: Felix Fontein Co-authored-by: Zainab Alsaffar --- ...1-gitlab_x_members_fix_and_enhancement.yml | 3 + .../gitlab/gitlab_group_members.py | 330 ++++++++++++++---- .../gitlab_group_members/tasks/main.yml | 47 ++- .../gitlab_group_members/vars/main.yml | 10 +- 4 files changed, 311 insertions(+), 79 deletions(-) create mode 100644 changelogs/fragments/3041-gitlab_x_members_fix_and_enhancement.yml diff --git a/changelogs/fragments/3041-gitlab_x_members_fix_and_enhancement.yml b/changelogs/fragments/3041-gitlab_x_members_fix_and_enhancement.yml new file mode 100644 index 0000000000..ce558e1f84 --- /dev/null +++ b/changelogs/fragments/3041-gitlab_x_members_fix_and_enhancement.yml @@ -0,0 +1,3 @@ +minor_changes: +- gitlab_group_members - ``gitlab_user`` can now also be a list of users (https://github.com/ansible-collections/community.general/pull/3047). +- gitlab_group_members - added functionality to set all members exactly as given (https://github.com/ansible-collections/community.general/pull/3047). diff --git a/plugins/modules/source_control/gitlab/gitlab_group_members.py b/plugins/modules/source_control/gitlab/gitlab_group_members.py index 50779e6445..b526873d30 100644 --- a/plugins/modules/source_control/gitlab/gitlab_group_members.py +++ b/plugins/modules/source_control/gitlab/gitlab_group_members.py @@ -32,15 +32,38 @@ options: type: str gitlab_user: description: - - The username of the member to add to/remove from the GitLab group. - required: true - type: str + - A username or a list of usernames to add to/remove from the GitLab group. + - Mutually exclusive with I(gitlab_users_access). + type: list + elements: str access_level: description: - The access level for the user. - Required if I(state=present), user state is set to present. + - Mutually exclusive with I(gitlab_users_access). type: str choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] + gitlab_users_access: + description: + - Provide a list of user to access level mappings. + - Every dictionary in this list specifies a user (by username) and the access level the user should have. + - Mutually exclusive with I(gitlab_user) and I(access_level). + - Use together with I(purge_users) to remove all users not specified here from the group. + type: list + elements: dict + suboptions: + name: + description: A username or a list of usernames to add to/remove from the GitLab group. + type: str + required: true + access_level: + description: + - The access level for the user. + - Required if I(state=present), user state is set to present. + type: str + choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] + required: true + version_added: 3.6.0 state: description: - State of the member in the group. @@ -49,6 +72,15 @@ options: choices: ['present', 'absent'] default: 'present' type: str + purge_users: + description: + - Adds/remove users of the given access_level to match the given gitlab_user/gitlab_users_access list. + If omitted do not purge orphaned members. + - Is only used when I(state=present). + type: list + elements: str + choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] + version_added: 3.6.0 notes: - Supports C(check_mode). ''' @@ -70,6 +102,51 @@ EXAMPLES = r''' gitlab_group: groupname gitlab_user: username state: absent + +- name: Add a list of Users to A GitLab Group + gitlab_group_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + gitlab_group: groupname + gitlab_user: + - user1 + - user2 + access_level: developer + state: present + +- name: Add a list of Users with Dedicated Access Levels to A GitLab Group + gitlab_group_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + gitlab_group: groupname + gitlab_users_access: + - name: user1 + access_level: developer + - name: user2 + access_level: maintainer + state: present + +- name: Add a user, remove all others which might be on this access level + gitlab_group_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + gitlab_group: groupname + gitlab_user: username + access_level: developer + pruge_users: developer + state: present + +- name: Remove a list of Users with Dedicated Access Levels to A GitLab Group + gitlab_group_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + gitlab_group: groupname + gitlab_users_access: + - name: user1 + access_level: developer + - name: user2 + access_level: maintainer + state: absent ''' RETURN = r''' # ''' @@ -111,6 +188,17 @@ class GitLabGroup(object): group = self._gitlab.groups.get(gitlab_group_id) return group.members.list(all=True) + # get single member in a group by user name + def get_member_in_a_group(self, gitlab_group_id, gitlab_user_id): + member = None + group = self._gitlab.groups.get(gitlab_group_id) + try: + member = group.members.get(gitlab_user_id) + if member: + return member + except gitlab.exceptions.GitlabGetError as e: + return None + # check if the user is a member of the group def is_user_a_member(self, members, gitlab_user_id): for member in members: @@ -120,27 +208,14 @@ class GitLabGroup(object): # add user to a group def add_member_to_group(self, gitlab_user_id, gitlab_group_id, access_level): - try: - group = self._gitlab.groups.get(gitlab_group_id) - add_member = group.members.create( - {'user_id': gitlab_user_id, 'access_level': access_level}) - - if add_member: - return add_member.username - - except (gitlab.exceptions.GitlabCreateError) as e: - self._module.fail_json( - msg="Failed to add member to the Group, Group ID %s: %s" % (gitlab_group_id, e)) + group = self._gitlab.groups.get(gitlab_group_id) + add_member = group.members.create( + {'user_id': gitlab_user_id, 'access_level': access_level}) # remove user from a group def remove_user_from_group(self, gitlab_user_id, gitlab_group_id): - try: - group = self._gitlab.groups.get(gitlab_group_id) - group.members.delete(gitlab_user_id) - - except (gitlab.exceptions.GitlabDeleteError) as e: - self._module.fail_json( - msg="Failed to remove member from GitLab group, ID %s: %s" % (gitlab_group_id, e)) + group = self._gitlab.groups.get(gitlab_group_id) + group.members.delete(gitlab_user_id) # get user's access level def get_user_access_level(self, members, gitlab_user_id): @@ -152,12 +227,8 @@ class GitLabGroup(object): def update_user_access_level(self, members, gitlab_user_id, access_level): for member in members: if member.id == gitlab_user_id: - try: - member.access_level = access_level - member.save() - except (gitlab.exceptions.GitlabCreateError) as e: - self._module.fail_json( - msg="Failed to update the access level for the member, %s: %s" % (gitlab_user_id, e)) + member.access_level = access_level + member.save() def main(): @@ -165,9 +236,18 @@ def main(): argument_spec.update(dict( api_token=dict(type='str', required=True, no_log=True), gitlab_group=dict(type='str', required=True), - gitlab_user=dict(type='str', required=True), + gitlab_user=dict(type='list', elements='str'), state=dict(type='str', default='present', choices=['present', 'absent']), - access_level=dict(type='str', required=False, choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']) + access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']), + purge_users=dict(type='list', elements='str', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']), + gitlab_users_access=dict( + type='list', + elements='dict', + options=dict( + name=dict(type='str', required=True), + access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner'], required=True), + ) + ), )) module = AnsibleModule( @@ -175,15 +255,19 @@ def main(): mutually_exclusive=[ ['api_username', 'api_token'], ['api_password', 'api_token'], + ['gitlab_user', 'gitlab_users_access'], + ['access_level', 'gitlab_users_access'], ], required_together=[ ['api_username', 'api_password'], + ['gitlab_user', 'access_level'], ], required_one_of=[ ['api_username', 'api_token'], + ['gitlab_user', 'gitlab_users_access'], ], required_if=[ - ['state', 'present', ['access_level']], + ['state', 'present', ['access_level', 'gitlab_users_access'], True], ], supports_check_mode=True, ) @@ -191,72 +275,166 @@ def main(): if not HAS_PY_GITLAB: module.fail_json(msg=missing_required_lib('python-gitlab', url='https://python-gitlab.readthedocs.io/en/stable/'), exception=GITLAB_IMP_ERR) + access_level_int = { + 'guest': gitlab.GUEST_ACCESS, + 'reporter': gitlab.REPORTER_ACCESS, + 'developer': gitlab.DEVELOPER_ACCESS, + 'maintainer': gitlab.MAINTAINER_ACCESS, + 'owner': gitlab.OWNER_ACCESS + } + gitlab_group = module.params['gitlab_group'] - gitlab_user = module.params['gitlab_user'] state = module.params['state'] access_level = module.params['access_level'] + purge_users = module.params['purge_users'] - # convert access level string input to int - if access_level: - access_level_int = { - 'guest': gitlab.GUEST_ACCESS, - 'reporter': gitlab.REPORTER_ACCESS, - 'developer': gitlab.DEVELOPER_ACCESS, - 'maintainer': gitlab.MAINTAINER_ACCESS, - 'owner': gitlab.OWNER_ACCESS - } - - access_level = access_level_int[access_level] + if purge_users: + purge_users = [access_level_int[level] for level in purge_users] # connect to gitlab server gl = gitlabAuthentication(module) group = GitLabGroup(module, gl) - gitlab_user_id = group.get_user_id(gitlab_user) gitlab_group_id = group.get_group_id(gitlab_group) # group doesn't exist if not gitlab_group_id: module.fail_json(msg="group '%s' not found." % gitlab_group) - # user doesn't exist - if not gitlab_user_id: - if state == 'absent': - module.exit_json(changed=False, result="user '%s' not found, and thus also not part of the group" % gitlab_user) - else: - module.fail_json(msg="user '%s' not found." % gitlab_user) + members = [] + if module.params['gitlab_user'] is not None: + gitlab_users_access = [] + gitlab_users = module.params['gitlab_user'] + for gl_user in gitlab_users: + gitlab_users_access.append({'name': gl_user, 'access_level': access_level_int[access_level] if access_level else None}) + elif module.params['gitlab_users_access'] is not None: + gitlab_users_access = module.params['gitlab_users_access'] + for user_level in gitlab_users_access: + user_level['access_level'] = access_level_int[user_level['access_level']] - members = group.get_members_in_a_group(gitlab_group_id) - is_user_a_member = group.is_user_a_member(members, gitlab_user_id) - - # check if the user is a member in the group - if not is_user_a_member: - if state == 'present': - # add user to the group - if not module.check_mode: - group.add_member_to_group(gitlab_user_id, gitlab_group_id, access_level) - module.exit_json(changed=True, result="Successfully added user '%s' to the group." % gitlab_user) - # state as absent - else: - module.exit_json(changed=False, result="User, '%s', is not a member in the group. No change to report" % gitlab_user) - # in case that a user is a member + if len(gitlab_users_access) == 1 and not purge_users: + # only single user given + members = [group.get_member_in_a_group(gitlab_group_id, group.get_user_id(gitlab_users_access[0]['name']))] + if members[0] is None: + members = [] + elif len(gitlab_users_access) > 1 or purge_users: + # list of users given + members = group.get_members_in_a_group(gitlab_group_id) else: - if state == 'present': - # compare the access level - user_access_level = group.get_user_access_level(members, gitlab_user_id) - if user_access_level == access_level: - module.exit_json(changed=False, result="User, '%s', is already a member in the group. No change to report" % gitlab_user) + module.exit_json(changed='OK', result="Nothing to do, please give at least one user or set purge_users true.", + result_data=[]) + + changed = False + error = False + changed_users = [] + changed_data = [] + + for gitlab_user in gitlab_users_access: + gitlab_user_id = group.get_user_id(gitlab_user['name']) + + # user doesn't exist + if not gitlab_user_id: + if state == 'absent': + changed_users.append("user '%s' not found, and thus also not part of the group" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', + 'msg': "user '%s' not found, and thus also not part of the group" % gitlab_user['name']}) else: - # update the access level for the user - if not module.check_mode: - group.update_user_access_level(members, gitlab_user_id, access_level) - module.exit_json(changed=True, result="Successfully updated the access level for the user, '%s'" % gitlab_user) + error = True + changed_users.append("user '%s' not found." % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "user '%s' not found." % gitlab_user['name']}) + continue + + is_user_a_member = group.is_user_a_member(members, gitlab_user_id) + + # check if the user is a member in the group + if not is_user_a_member: + if state == 'present': + # add user to the group + try: + if not module.check_mode: + group.add_member_to_group(gitlab_user_id, gitlab_group_id, gitlab_user['access_level']) + changed = True + changed_users.append("Successfully added user '%s' to group" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', + 'msg': "Successfully added user '%s' to group" % gitlab_user['name']}) + except (gitlab.exceptions.GitlabCreateError) as e: + error = True + changed_users.append("Failed to updated the access level for the user, '%s'" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "Not allowed to add the access level for the member, %s: %s" % (gitlab_user['name'], e)}) + # state as absent + else: + changed_users.append("User, '%s', is not a member in the group. No change to report" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', + 'msg': "User, '%s', is not a member in the group. No change to report" % gitlab_user['name']}) + # in case that a user is a member else: - # remove the user from the group - if not module.check_mode: - group.remove_user_from_group(gitlab_user_id, gitlab_group_id) - module.exit_json(changed=True, result="Successfully removed user, '%s', from the group" % gitlab_user) + if state == 'present': + # compare the access level + user_access_level = group.get_user_access_level(members, gitlab_user_id) + if user_access_level == gitlab_user['access_level']: + changed_users.append("User, '%s', is already a member in the group. No change to report" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', + 'msg': "User, '%s', is already a member in the group. No change to report" % gitlab_user['name']}) + else: + # update the access level for the user + try: + if not module.check_mode: + group.update_user_access_level(members, gitlab_user_id, gitlab_user['access_level']) + changed = True + changed_users.append("Successfully updated the access level for the user, '%s'" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', + 'msg': "Successfully updated the access level for the user, '%s'" % gitlab_user['name']}) + except (gitlab.exceptions.GitlabUpdateError) as e: + error = True + changed_users.append("Failed to updated the access level for the user, '%s'" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "Not allowed to update the access level for the member, %s: %s" % (gitlab_user['name'], e)}) + else: + # remove the user from the group + try: + if not module.check_mode: + group.remove_user_from_group(gitlab_user_id, gitlab_group_id) + changed = True + changed_users.append("Successfully removed user, '%s', from the group" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', + 'msg': "Successfully removed user, '%s', from the group" % gitlab_user['name']}) + except (gitlab.exceptions.GitlabDeleteError) as e: + error = True + changed_users.append("Failed to removed user, '%s', from the group" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "Failed to remove user, '%s' from the group: %s" % (gitlab_user['name'], e)}) + + # if state = present and purge_users set delete users which are in members having give access level but not in gitlab_users + if state == 'present' and purge_users: + uppercase_names_in_gitlab_users_access = [] + for name in gitlab_users_access: + uppercase_names_in_gitlab_users_access.append(name['name'].upper()) + + for member in members: + if member.access_level in purge_users and member.username.upper() not in uppercase_names_in_gitlab_users_access: + try: + if not module.check_mode: + group.remove_user_from_group(member.id, gitlab_group_id) + changed = True + changed_users.append("Successfully removed user '%s', from group. Was not in given list" % member.username) + changed_data.append({'gitlab_user': member.username, 'result': 'CHANGED', + 'msg': "Successfully removed user '%s', from group. Was not in given list" % member.username}) + except (gitlab.exceptions.GitlabDeleteError) as e: + error = True + changed_users.append("Failed to removed user, '%s', from the group" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "Failed to remove user, '%s' from the group: %s" % (gitlab_user['name'], e)}) + + if len(gitlab_users_access) == 1 and error: + # if single user given and an error occurred return error for list errors will be per user + module.fail_json(msg="FAILED: '%s '" % changed_users[0], result_data=changed_data) + elif error: + module.fail_json(msg='FAILED: At least one given user/permission could not be set', result_data=changed_data) + + module.exit_json(changed=changed, msg='Successfully set memberships', result="\n".join(changed_users), result_data=changed_data) if __name__ == '__main__': diff --git a/tests/integration/targets/gitlab_group_members/tasks/main.yml b/tests/integration/targets/gitlab_group_members/tasks/main.yml index 4d4f1168d0..109a0f2bdb 100644 --- a/tests/integration/targets/gitlab_group_members/tasks/main.yml +++ b/tests/integration/targets/gitlab_group_members/tasks/main.yml @@ -13,7 +13,7 @@ state: present - name: Add a User to A GitLab Group - gitlab_group_members: + gitlab_group_members: api_url: '{{ gitlab_server_url }}' api_token: '{{ gitlab_api_access_token }}' gitlab_group: '{{ gitlab_group_name }}' @@ -27,4 +27,47 @@ api_token: '{{ gitlab_api_access_token }}' gitlab_group: '{{ gitlab_group_name }}' gitlab_user: '{{ username }}' - state: absent \ No newline at end of file + state: absent + +- name: Add a list of Users to A GitLab Group + gitlab_group_members: + api_url: '{{ gitlab_server_url }}' + api_token: '{{ gitlab_api_access_token }}' + gitlab_group: '{{ gitlab_group_name }}' + gitlab_user: '{{ userlist }}' + access_level: '{{ gitlab_access_level }}' + state: present + +- name: Remove a list of Users to A GitLab Group + gitlab_group_members: + api_url: '{{ gitlab_server_url }}' + api_token: '{{ gitlab_api_access_token }}' + gitlab_group: '{{ gitlab_group_name }}' + gitlab_user: '{{ userlist }}' + state: absent + +- name: Add a list of Users with Dedicated Access Levels to A GitLab Group + gitlab_group_members: + api_url: '{{ gitlab_server_url }}' + api_token: '{{ gitlab_api_access_token }}' + gitlab_group: '{{ gitlab_group_name }}' + gitlab_users_access: '{{ dedicated_access_users }}' + state: present + +- name: Remove a list of Users with Dedicated Access Levels to A GitLab Group + gitlab_group_members: + api_url: '{{ gitlab_server_url }}' + api_token: '{{ gitlab_api_access_token }}' + gitlab_group: '{{ gitlab_group_name }}' + gitlab_users_access: '{{ dedicated_access_users }}' + state: absent + +- name: Add a user, remove all others which might be on this access level + gitlab_group_members: + api_url: '{{ gitlab_server_url }}' + api_token: '{{ gitlab_api_access_token }}' + gitlab_group: '{{ gitlab_group_name }}' + gitlab_user: '{{ username }}' + access_level: '{{ gitlab_access_level }}' + pruge_users: '{{ gitlab_access_level }}' + state: present diff --git a/tests/integration/targets/gitlab_group_members/vars/main.yml b/tests/integration/targets/gitlab_group_members/vars/main.yml index 7f68893cf9..6a6b17319d 100644 --- a/tests/integration/targets/gitlab_group_members/vars/main.yml +++ b/tests/integration/targets/gitlab_group_members/vars/main.yml @@ -2,4 +2,12 @@ gitlab_server_url: https://gitlabserver.example.com gitlab_api_access_token: 126hngbscx890cv09b gitlab_group_name: groupname1 username: username1 -gitlab_access_level: developer \ No newline at end of file +gitlab_access_level: developer +userlist: + - username1 + - username2 +dedicated_access_users: + - name: username1 + access_level: "developer" + - name: username2 + access_level: "maintainer" From 135faf44216febd34f19a1b39b941c967a7375fe Mon Sep 17 00:00:00 2001 From: Scott Anderson Date: Tue, 31 Aug 2021 12:19:29 -0400 Subject: [PATCH 0545/3093] django_manage: Remove scottanderson42 and tastychutney as maintainers. (#3314) Note: tastychutney is another github account of mine that was also added as a maintainer. --- .github/BOTMETA.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 0d2922182b..b07f95e8cc 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1108,7 +1108,8 @@ files: $modules/web_infrastructure/deploy_helper.py: maintainers: ramondelafuente $modules/web_infrastructure/django_manage.py: - maintainers: scottanderson42 russoz tastychutney + maintainers: russoz + ignore: scottanderson42 tastychutney labels: django_manage $modules/web_infrastructure/ejabberd_user.py: maintainers: privateip From bf8df21d27b81dd8e0c406cde63ffbf108529a8f Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Tue, 31 Aug 2021 18:22:08 +0200 Subject: [PATCH 0546/3093] Next expected release is 3.7.0. --- galaxy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/galaxy.yml b/galaxy.yml index 724e76110d..5b08ca814a 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,6 +1,6 @@ namespace: community name: general -version: 3.6.0 +version: 3.7.0 readme: README.md authors: - Ansible (https://github.com/ansible) From c121e8685fec9bb47e1cd744213d5e1bc9e3d7b4 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Wed, 1 Sep 2021 07:33:22 +0200 Subject: [PATCH 0547/3093] Fix documentation bugs. (#3321) --- .../source_control/gitlab/gitlab_group_members.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/plugins/modules/source_control/gitlab/gitlab_group_members.py b/plugins/modules/source_control/gitlab/gitlab_group_members.py index b526873d30..d11e72d92f 100644 --- a/plugins/modules/source_control/gitlab/gitlab_group_members.py +++ b/plugins/modules/source_control/gitlab/gitlab_group_members.py @@ -74,7 +74,7 @@ options: type: str purge_users: description: - - Adds/remove users of the given access_level to match the given gitlab_user/gitlab_users_access list. + - Adds/remove users of the given access_level to match the given I(gitlab_user)/I(gitlab_users_access) list. If omitted do not purge orphaned members. - Is only used when I(state=present). type: list @@ -104,7 +104,7 @@ EXAMPLES = r''' state: absent - name: Add a list of Users to A GitLab Group - gitlab_group_members: + community.general.gitlab_group_members: api_url: 'https://gitlab.example.com' api_token: 'Your-Private-Token' gitlab_group: groupname @@ -115,7 +115,7 @@ EXAMPLES = r''' state: present - name: Add a list of Users with Dedicated Access Levels to A GitLab Group - gitlab_group_members: + community.general.gitlab_group_members: api_url: 'https://gitlab.example.com' api_token: 'Your-Private-Token' gitlab_group: groupname @@ -127,7 +127,7 @@ EXAMPLES = r''' state: present - name: Add a user, remove all others which might be on this access level - gitlab_group_members: + community.general.gitlab_group_members: api_url: 'https://gitlab.example.com' api_token: 'Your-Private-Token' gitlab_group: groupname @@ -137,7 +137,7 @@ EXAMPLES = r''' state: present - name: Remove a list of Users with Dedicated Access Levels to A GitLab Group - gitlab_group_members: + community.general.gitlab_group_members: api_url: 'https://gitlab.example.com' api_token: 'Your-Private-Token' gitlab_group: groupname From 7c493eb4e5eef63832762eac4978909d93a61808 Mon Sep 17 00:00:00 2001 From: Silvie Chlupova <33493796+schlupov@users.noreply.github.com> Date: Wed, 1 Sep 2021 22:58:10 +0200 Subject: [PATCH 0548/3093] Fix copr integration tests (#3237) Fixes: #2084 --- changelogs/fragments/3237-copr-fix_chroot_naming.yml | 2 ++ plugins/modules/packaging/os/copr.py | 11 ++++++----- tests/integration/targets/copr/aliases | 1 - tests/integration/targets/copr/tasks/main.yml | 8 ++++---- 4 files changed, 12 insertions(+), 10 deletions(-) create mode 100644 changelogs/fragments/3237-copr-fix_chroot_naming.yml diff --git a/changelogs/fragments/3237-copr-fix_chroot_naming.yml b/changelogs/fragments/3237-copr-fix_chroot_naming.yml new file mode 100644 index 0000000000..7a942bc94e --- /dev/null +++ b/changelogs/fragments/3237-copr-fix_chroot_naming.yml @@ -0,0 +1,2 @@ +bugfixes: + - copr - fix chroot naming issues, ``centos-stream`` changed naming to ``centos-stream-`` (for exmaple ``centos-stream-8``) (https://github.com/ansible-collections/community.general/issues/2084, https://github.com/ansible-collections/community.general/pull/3237). \ No newline at end of file diff --git a/plugins/modules/packaging/os/copr.py b/plugins/modules/packaging/os/copr.py index 4bf665e045..cb31e8c9fb 100644 --- a/plugins/modules/packaging/os/copr.py +++ b/plugins/modules/packaging/os/copr.py @@ -120,8 +120,7 @@ class CoprModule(object): @property def short_chroot(self): """str: Chroot (distribution-version-architecture) shorten to distribution-version.""" - chroot_parts = self.chroot.split("-") - return "{0}-{1}".format(chroot_parts[0], chroot_parts[1]) + return self.chroot.rsplit('-', 1)[0] @property def arch(self): @@ -193,18 +192,20 @@ class CoprModule(object): Returns: Information about the repository. """ - distribution, version = self.short_chroot.split("-") + distribution, version = self.short_chroot.split('-', 1) chroot = self.short_chroot while True: repo_info, status_code = self._get(chroot) if repo_info: return repo_info if distribution == "rhel": - chroot = "centos-stream" + chroot = "centos-stream-8" distribution = "centos" elif distribution == "centos": - if version == "stream": + if version == "stream-8": version = "8" + elif version == "stream-9": + version = "9" chroot = "epel-{0}".format(version) distribution = "epel" else: diff --git a/tests/integration/targets/copr/aliases b/tests/integration/targets/copr/aliases index fbe7da85db..0ad5e1c80c 100644 --- a/tests/integration/targets/copr/aliases +++ b/tests/integration/targets/copr/aliases @@ -3,4 +3,3 @@ needs/root skip/macos skip/osx skip/freebsd -disabled # FIXME diff --git a/tests/integration/targets/copr/tasks/main.yml b/tests/integration/targets/copr/tasks/main.yml index 32ce67208d..1c8afd992f 100644 --- a/tests/integration/targets/copr/tasks/main.yml +++ b/tests/integration/targets/copr/tasks/main.yml @@ -6,7 +6,7 @@ host: copr.fedorainfracloud.org state: enabled name: '@copr/integration_tests' - chroot: centos-stream-x86_64 + chroot: fedora-rawhide-x86_64 register: result - name: assert that the copr project was enabled @@ -21,7 +21,7 @@ copr: state: enabled name: '@copr/integration_tests' - chroot: centos-stream-x86_64 + chroot: fedora-rawhide-x86_64 register: result - name: assert that the copr project was enabled @@ -46,7 +46,7 @@ copr: state: disabled name: '@copr/integration_tests' - chroot: centos-stream-x86_64 + chroot: fedora-rawhide-x86_64 register: result - name: assert that the copr project was disabled @@ -61,4 +61,4 @@ host: copr.fedorainfracloud.org state: absent name: '@copr/integration_tests' - chroot: centos-stream-x86_64 + chroot: fedora-rawhide-x86_64 From 3502f3b48690500c8d60942ca85bb3527f856e4e Mon Sep 17 00:00:00 2001 From: Kyle Williams <36274986+kyle-williams-1@users.noreply.github.com> Date: Wed, 1 Sep 2021 14:59:27 -0600 Subject: [PATCH 0549/3093] redfish: clean etag of quotes before patch (#3296) * Some vendors surround header etag with quotes, which need to be cleaned before sending a patch * Minor change fragment * Add etag strip quote option * Rebase * Cleanup fragment * Apply suggestions from code review Co-authored-by: Felix Fontein * Update plugins/modules/remote_management/redfish/redfish_command.py Co-authored-by: Felix Fontein * Description update * Update plugins/modules/remote_management/redfish/redfish_config.py Co-authored-by: Felix Fontein Co-authored-by: Kyle Williams Co-authored-by: Felix Fontein --- changelogs/fragments/3296-clean-etag.yaml | 2 ++ plugins/module_utils/redfish_utils.py | 5 ++++- .../redfish/redfish_command.py | 17 +++++++++++++++-- .../remote_management/redfish/redfish_config.py | 17 +++++++++++++++-- 4 files changed, 36 insertions(+), 5 deletions(-) create mode 100644 changelogs/fragments/3296-clean-etag.yaml diff --git a/changelogs/fragments/3296-clean-etag.yaml b/changelogs/fragments/3296-clean-etag.yaml new file mode 100644 index 0000000000..317772cb15 --- /dev/null +++ b/changelogs/fragments/3296-clean-etag.yaml @@ -0,0 +1,2 @@ +minor_changes: + - "redfish_command and redfish_config and redfish_utils module utils - add parameter to strip etag of quotes before patch, since some vendors do not properly ``If-Match`` etag with quotes (https://github.com/ansible-collections/community.general/pull/3296)." diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index 0f8e6630ba..b4d0dba015 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -29,7 +29,7 @@ FAIL_MSG = 'Issuing a data modification command without specifying the '\ class RedfishUtils(object): def __init__(self, creds, root_uri, timeout, module, resource_id=None, - data_modification=False): + data_modification=False, strip_etag_quotes=False): self.root_uri = root_uri self.creds = creds self.timeout = timeout @@ -37,6 +37,7 @@ class RedfishUtils(object): self.service_root = '/redfish/v1/' self.resource_id = resource_id self.data_modification = data_modification + self.strip_etag_quotes = strip_etag_quotes self._init_session() def _auth_params(self, headers): @@ -121,6 +122,8 @@ class RedfishUtils(object): if not etag: etag = r['data'].get('@odata.etag') if etag: + if self.strip_etag_quotes: + etag = etag.strip('"') req_headers['If-Match'] = etag username, password, basic_auth = self._auth_params(req_headers) try: diff --git a/plugins/modules/remote_management/redfish/redfish_command.py b/plugins/modules/remote_management/redfish/redfish_command.py index 72392ec9f3..e79308f2d7 100644 --- a/plugins/modules/remote_management/redfish/redfish_command.py +++ b/plugins/modules/remote_management/redfish/redfish_command.py @@ -207,6 +207,15 @@ options: description: - The transfer method to use with the image type: str + strip_etag_quotes: + description: + - Removes surrounding quotes of etag used in C(If-Match) header + of C(PATCH) requests. + - Only use this option to resolve bad vendor implementation where + C(If-Match) only matches the unquoted etag string. + type: bool + default: false + version_added: 3.7.0 author: "Jose Delarosa (@jose-delarosa)" ''' @@ -631,7 +640,8 @@ def main(): transfer_protocol_type=dict(), transfer_method=dict(), ) - ) + ), + strip_etag_quotes=dict(type='bool', default=False), ), required_together=[ ('username', 'password'), @@ -686,10 +696,13 @@ def main(): # VirtualMedia options virtual_media = module.params['virtual_media'] + # Etag options + strip_etag_quotes = module.params['strip_etag_quotes'] + # Build root URI root_uri = "https://" + module.params['baseuri'] rf_utils = RedfishUtils(creds, root_uri, timeout, module, - resource_id=resource_id, data_modification=True) + resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes) # Check that Category is valid if category not in CATEGORY_COMMANDS_ALL: diff --git a/plugins/modules/remote_management/redfish/redfish_config.py b/plugins/modules/remote_management/redfish/redfish_config.py index 9b15a3e63e..ff4b15487e 100644 --- a/plugins/modules/remote_management/redfish/redfish_config.py +++ b/plugins/modules/remote_management/redfish/redfish_config.py @@ -91,6 +91,15 @@ options: - setting dict of EthernetInterface on OOB controller type: dict version_added: '0.2.0' + strip_etag_quotes: + description: + - Removes surrounding quotes of etag used in C(If-Match) header + of C(PATCH) requests. + - Only use this option to resolve bad vendor implementation where + C(If-Match) only matches the unquoted etag string. + type: bool + default: false + version_added: 3.7.0 author: "Jose Delarosa (@jose-delarosa)" ''' @@ -237,7 +246,8 @@ def main(): nic_config=dict( type='dict', default={} - ) + ), + strip_etag_quotes=dict(type='bool', default=False), ), required_together=[ ('username', 'password'), @@ -275,10 +285,13 @@ def main(): nic_addr = module.params['nic_addr'] nic_config = module.params['nic_config'] + # Etag options + strip_etag_quotes = module.params['strip_etag_quotes'] + # Build root URI root_uri = "https://" + module.params['baseuri'] rf_utils = RedfishUtils(creds, root_uri, timeout, module, - resource_id=resource_id, data_modification=True) + resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes) # Check that Category is valid if category not in CATEGORY_COMMANDS_ALL: From 76317d1f6473a82490927095f14f6df5422e3bed Mon Sep 17 00:00:00 2001 From: Manuel Gayer <85677493+nm-mga@users.noreply.github.com> Date: Sun, 5 Sep 2021 18:28:04 +0200 Subject: [PATCH 0550/3093] nmcli: Support GSM connections (#3313) * nmcli: Support GSM connections * Add GSM support * Add GSM unit test * nmcli: Add changelog fragment * nmcli: Fix GSM unit test * Fix copy-paste error in test_gsm_mod * nmcli: Fix yaml formatting * nmcli: Fix yaml formatting * nmcli: Fix typeerror * type must be str not string * nmcli: Fix gsm_show_output * gsm.username did not match input * nmcli: Fix gsm_show_output * doublechecked generated output with test-client * nmcli: GSM fix unit test * Removed `mocked_gsm_connection_unchanged` * Revert "nmcli: GSM fix unit test" This reverts commit 2d112b779aab03865731377919c509b8e88ad56a. * nmcli: gsm fix unit test * Add needed output to `TESTCASE_GSM_SHOW_OUTPUT` * Move `mocked_gsm_connection_unchanged`to sort correctly * nmcli: gsm fix _compare_conn_params * Strip double-qoutes of gsm.apn if exist * nmcli: GSM apply suggestions from code review Co-authored-by: Ajpantuso * nmcli: GSM: Fix documentation * Shorten too long lines * nmcli: GSM apply suggestions from code review Co-authored-by: Ajpantuso * nmcli: GSM add version Co-authored-by: Felix Fontein Co-authored-by: Ajpantuso Co-authored-by: Felix Fontein --- .../fragments/3313-nmcli-add_gsm_support.yml | 2 + plugins/modules/net_tools/nmcli.py | 129 ++++++++++++++++- .../plugins/modules/net_tools/test_nmcli.py | 137 ++++++++++++++++++ 3 files changed, 265 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/3313-nmcli-add_gsm_support.yml diff --git a/changelogs/fragments/3313-nmcli-add_gsm_support.yml b/changelogs/fragments/3313-nmcli-add_gsm_support.yml new file mode 100644 index 0000000000..9986bca675 --- /dev/null +++ b/changelogs/fragments/3313-nmcli-add_gsm_support.yml @@ -0,0 +1,2 @@ +minor_changes: + - "nmcli - add ``gsm`` support (https://github.com/ansible-collections/community.general/pull/3313)." diff --git a/plugins/modules/net_tools/nmcli.py b/plugins/modules/net_tools/nmcli.py index 7bc8a6b775..843e8bd8ef 100644 --- a/plugins/modules/net_tools/nmcli.py +++ b/plugins/modules/net_tools/nmcli.py @@ -54,8 +54,9 @@ options: - Type C(dummy) is added in community.general 3.5.0. - Type C(generic) is added in Ansible 2.5. - Type C(infiniband) is added in community.general 2.0.0. + - Type C(gsm) is added in community.general 3.7.0. type: str - choices: [ bond, bond-slave, bridge, bridge-slave, dummy, ethernet, generic, gre, infiniband, ipip, sit, team, team-slave, vlan, vxlan, wifi ] + choices: [ bond, bond-slave, bridge, bridge-slave, dummy, ethernet, generic, gre, infiniband, ipip, sit, team, team-slave, vlan, vxlan, wifi, gsm ] mode: description: - This is the type of device or network connection that you wish to create for a bond or bridge. @@ -183,7 +184,7 @@ options: mtu: description: - The connection MTU, e.g. 9000. This can't be applied when creating the interface and is done once the interface has been created. - - Can be used when modifying Team, VLAN, Ethernet (Future plans to implement wifi, pppoe, infiniband) + - Can be used when modifying Team, VLAN, Ethernet (Future plans to implement wifi, gsm, pppoe, infiniband) - This parameter defaults to C(1500) when unset. type: int dhcp_client_id: @@ -643,6 +644,101 @@ options: type: bool default: false version_added: 3.6.0 + gsm: + description: + - The configuration of the GSM connection. + - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host. + - 'An up-to-date list of supported attributes can be found here: + U(https://networkmanager.dev/docs/api/latest/settings-gsm.html).' + - 'For instance to use apn, pin, username and password: + C({apn: provider.apn, pin: 1234, username: apn.username, password: apn.password}).' + type: dict + version_added: 3.7.0 + suboptions: + apn: + description: + - The GPRS Access Point Name specifying the APN used when establishing a data session with the GSM-based network. + - The APN often determines how the user will be billed for their network usage and whether the user has access to the Internet or + just a provider-specific walled-garden, so it is important to use the correct APN for the user's mobile broadband plan. + - The APN may only be composed of the characters a-z, 0-9, ., and - per GSM 03.60 Section 14.9. + type: str + auto-config: + description: When C(true), the settings such as I(gsm.apn), I(gsm.username), or I(gsm.password) will default to values that match the network + the modem will register to in the Mobile Broadband Provider database. + type: bool + default: false + device-id: + description: + - The device unique identifier (as given by the C(WWAN) management service) which this connection applies to. + - If given, the connection will only apply to the specified device. + type: str + home-only: + description: + - When C(true), only connections to the home network will be allowed. + - Connections to roaming networks will not be made. + type: bool + default: false + mtu: + description: If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into multiple Ethernet frames. + type: int + default: 0 + network-id: + description: + - The Network ID (GSM LAI format, ie MCC-MNC) to force specific network registration. + - If the Network ID is specified, NetworkManager will attempt to force the device to register only on the specified network. + - This can be used to ensure that the device does not roam when direct roaming control of the device is not otherwise possible. + type: str + number: + description: Legacy setting that used to help establishing PPP data sessions for GSM-based modems. + type: str + password: + description: + - The password used to authenticate with the network, if required. + - Many providers do not require a password, or accept any password. + - But if a password is required, it is specified here. + type: str + password-flags: + description: + - NMSettingSecretFlags indicating how to handle the I(password) property. + - 'Following choices are allowed: + C(0) B(NONE): The system is responsible for providing and storing this secret (default), + C(1) B(AGENT_OWNED): A user secret agent is responsible for providing and storing this secret; when it is required agents will be + asked to retrieve it + C(2) B(NOT_SAVED): This secret should not be saved, but should be requested from the user each time it is needed + C(4) B(NOT_REQUIRED): In situations where it cannot be automatically determined that the secret is required + (some VPNs and PPP providers do not require all secrets) this flag indicates that the specific secret is not required.' + type: int + choices: [ 0, 1, 2 , 4 ] + default: 0 + pin: + description: + - If the SIM is locked with a PIN it must be unlocked before any other operations are requested. + - Specify the PIN here to allow operation of the device. + type: str + pin-flags: + description: + - NMSettingSecretFlags indicating how to handle the I(gsm.pin) property. + - See I(gsm.password-flags) for NMSettingSecretFlags choices. + type: int + choices: [ 0, 1, 2 , 4 ] + default: 0 + sim-id: + description: + - The SIM card unique identifier (as given by the C(WWAN) management service) which this connection applies to. + - 'If given, the connection will apply to any device also allowed by I(gsm.device-id) which contains a SIM card matching + the given identifier.' + type: str + sim-operator-id: + description: + - A MCC/MNC string like C(310260) or C(21601I) identifying the specific mobile network operator which this connection applies to. + - 'If given, the connection will apply to any device also allowed by I(gsm.device-id) and I(gsm.sim-id) which contains a SIM card + provisioned by the given operator.' + type: str + username: + description: + - The username used to authenticate with the network, if required. + - Many providers do not require a username, or accept any username. + - But if a username is required, it is specified here. ''' EXAMPLES = r''' @@ -979,6 +1075,19 @@ EXAMPLES = r''' autoconnect: true state: present +- name: Create a gsm connection + community.general.nmcli: + type: gsm + conn_name: my-gsm-provider + ifname: cdc-wdm0 + gsm: + apn: my.provider.apn + username: my-provider-username + password: my-provider-password + pin: my-sim-pin + autoconnect: true + state: present + ''' RETURN = r"""# @@ -1086,6 +1195,7 @@ class Nmcli(object): self.ssid = module.params['ssid'] self.wifi = module.params['wifi'] self.wifi_sec = module.params['wifi_sec'] + self.gsm = module.params['gsm'] if self.method4: self.ipv4_method = self.method4 @@ -1243,6 +1353,12 @@ class Nmcli(object): options.update({ '802-11-wireless-security.%s' % name: value }) + elif self.type == 'gsm': + if self.gsm: + for name, value in self.gsm.items(): + options.update({ + 'gsm.%s' % name: value, + }) # Convert settings values based on the situation. for setting, value in options.items(): setting_type = self.settings_type(setting) @@ -1280,7 +1396,8 @@ class Nmcli(object): 'sit', 'team', 'vlan', - 'wifi' + 'wifi', + 'gsm', ) @property @@ -1573,6 +1690,10 @@ class Nmcli(object): value = value.upper() # ensure current_value is also converted to uppercase in case nmcli changes behaviour current_value = current_value.upper() + if key == 'gsm.apn': + # Depending on version nmcli adds double-qoutes to gsm.apn + # Need to strip them in order to compare both + current_value = current_value.strip('"') else: # parameter does not exist current_value = None @@ -1630,6 +1751,7 @@ def main(): 'vlan', 'vxlan', 'wifi', + 'gsm', ]), ip4=dict(type='str'), gw4=dict(type='str'), @@ -1700,6 +1822,7 @@ def main(): ssid=dict(type='str'), wifi=dict(type='dict'), wifi_sec=dict(type='dict', no_log=True), + gsm=dict(type='dict'), ), mutually_exclusive=[['never_default4', 'gw4']], required_if=[("type", "wifi", [("ssid")])], diff --git a/tests/unit/plugins/modules/net_tools/test_nmcli.py b/tests/unit/plugins/modules/net_tools/test_nmcli.py index 9277bd5fb6..bf2977e81d 100644 --- a/tests/unit/plugins/modules/net_tools/test_nmcli.py +++ b/tests/unit/plugins/modules/net_tools/test_nmcli.py @@ -86,6 +86,12 @@ TESTCASE_CONNECTION = [ 'state': 'absent', '_ansible_check_mode': True, }, + { + 'type': 'gsm', + 'conn_name': 'non_existent_nw_device', + 'state': 'absent', + '_ansible_check_mode': True, + }, ] TESTCASE_GENERIC = [ @@ -603,6 +609,7 @@ TESTCASE_DEFAULT_SECURE_WIRELESS_SHOW_OUTPUT = \ 802-11-wireless-security.fils: 0 (default) """ + TESTCASE_DUMMY_STATIC = [ { 'type': 'dummy', @@ -638,6 +645,53 @@ ipv6.addresses: 2001:db8::1/128 """ +TESTCASE_GSM = [ + { + 'type': 'gsm', + 'conn_name': 'non_existent_nw_device', + 'ifname': 'gsm_non_existant', + 'gsm': { + 'apn': 'internet.telekom', + 'username': 't-mobile', + 'password': 'tm', + 'pin': '1234', + }, + 'method4': 'auto', + 'state': 'present', + '_ansible_check_mode': False, + } +] + +TESTCASE_GSM_SHOW_OUTPUT = """\ +connection.id: non_existent_nw_device +connection.type: gsm +connection.interface-name: gsm_non_existant +connection.autoconnect: yes +ipv4.method: auto +ipv4.ignore-auto-dns: no +ipv4.ignore-auto-routes: no +ipv4.never-default: no +ipv4.may-fail: yes +ipv6.method: auto +ipv6.ignore-auto-dns: no +ipv6.ignore-auto-routes: no +gsm.auto-config: no +gsm.number: -- +gsm.username: t-mobile +gsm.password: tm +gsm.password-flags: 0 (none) +gsm.apn: "internet.telekom" +gsm.network-id: -- +gsm.pin: 1234 +gsm.pin-flags: 0 (none) +gsm.home-only: no +gsm.device-id: -- +gsm.sim-id: -- +gsm.sim-operator-id: -- +gsm.mtu: auto +""" + + def mocker_set(mocker, connection_exists=False, execute_return=(0, "", ""), @@ -863,6 +917,13 @@ def mocked_dummy_connection_static_unchanged(mocker): execute_return=(0, TESTCASE_DUMMY_STATIC_SHOW_OUTPUT, "")) +@pytest.fixture +def mocked_gsm_connection_unchanged(mocker): + mocker_set(mocker, + connection_exists=True, + execute_return=(0, TESTCASE_GSM_SHOW_OUTPUT, "")) + + @pytest.mark.parametrize('patch_ansible_module', TESTCASE_BOND, indirect=['patch_ansible_module']) def test_bond_connection_create(mocked_generic_connection_create, capfd): """ @@ -2162,3 +2223,79 @@ def test_dummy_connection_static_unchanged(mocked_dummy_connection_static_unchan results = json.loads(out) assert not results.get('failed') assert not results['changed'] + + +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GSM, indirect=['patch_ansible_module']) +def test_create_gsm(mocked_generic_connection_create, capfd): + """ + Test if gsm created + """ + with pytest.raises(SystemExit): + nmcli.main() + + assert nmcli.Nmcli.execute_command.call_count == 1 + arg_list = nmcli.Nmcli.execute_command.call_args_list + args, kwargs = arg_list[0] + + assert args[0][0] == '/usr/bin/nmcli' + assert args[0][1] == 'con' + assert args[0][2] == 'add' + assert args[0][3] == 'type' + assert args[0][4] == 'gsm' + assert args[0][5] == 'con-name' + assert args[0][6] == 'non_existent_nw_device' + + args_text = list(map(to_text, args[0])) + for param in ['connection.interface-name', 'gsm_non_existant', + 'gsm.apn', 'internet.telekom', + 'gsm.username', 't-mobile', + 'gsm.password', 'tm', + 'gsm.pin', '1234']: + assert param in args_text + + out, err = capfd.readouterr() + results = json.loads(out) + assert not results.get('failed') + assert results['changed'] + + +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GSM, indirect=['patch_ansible_module']) +def test_gsm_mod(mocked_generic_connection_modify, capfd): + """ + Test if gsm modified + """ + with pytest.raises(SystemExit): + nmcli.main() + + assert nmcli.Nmcli.execute_command.call_count == 1 + arg_list = nmcli.Nmcli.execute_command.call_args_list + args, kwargs = arg_list[0] + + assert args[0][0] == '/usr/bin/nmcli' + assert args[0][1] == 'con' + assert args[0][2] == 'modify' + assert args[0][3] == 'non_existent_nw_device' + + args_text = list(map(to_text, args[0])) + for param in ['gsm.username', 't-mobile', + 'gsm.password', 'tm']: + assert param in args_text + + out, err = capfd.readouterr() + results = json.loads(out) + assert not results.get('failed') + assert results['changed'] + + +@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GSM, indirect=['patch_ansible_module']) +def test_gsm_connection_unchanged(mocked_gsm_connection_unchanged, capfd): + """ + Test if gsm connection unchanged + """ + with pytest.raises(SystemExit): + nmcli.main() + + out, err = capfd.readouterr() + results = json.loads(out) + assert not results.get('failed') + assert not results['changed'] From 0f9311c3d9d1d1f6eeff43ff40b5d524555e0fe6 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 7 Sep 2021 05:23:33 +1200 Subject: [PATCH 0551/3093] zpool_facts - pythonification (#3332) * pythonification * added changelog fragment * adjustment per PR --- .../fragments/3332-zpool_facts-pythonify.yaml | 2 + plugins/modules/storage/zfs/zpool_facts.py | 46 +++++++------------ 2 files changed, 19 insertions(+), 29 deletions(-) create mode 100644 changelogs/fragments/3332-zpool_facts-pythonify.yaml diff --git a/changelogs/fragments/3332-zpool_facts-pythonify.yaml b/changelogs/fragments/3332-zpool_facts-pythonify.yaml new file mode 100644 index 0000000000..ddb29b9efb --- /dev/null +++ b/changelogs/fragments/3332-zpool_facts-pythonify.yaml @@ -0,0 +1,2 @@ +minor_changes: + - zpool_facts - minor refactoring (https://github.com/ansible-collections/community.general/pull/3332). diff --git a/plugins/modules/storage/zfs/zpool_facts.py b/plugins/modules/storage/zfs/zpool_facts.py index ed3d6cf965..b7a66255c6 100644 --- a/plugins/modules/storage/zfs/zpool_facts.py +++ b/plugins/modules/storage/zfs/zpool_facts.py @@ -125,23 +125,16 @@ class ZPoolFacts(object): def __init__(self, module): self.module = module - self.name = module.params['name'] self.parsable = module.params['parsable'] self.properties = module.params['properties'] - self._pools = defaultdict(dict) self.facts = [] def pool_exists(self): cmd = [self.module.get_bin_path('zpool'), 'list', self.name] - - (rc, out, err) = self.module.run_command(cmd) - - if rc == 0: - return True - else: - return False + rc, dummy, dummy = self.module.run_command(cmd) + return rc == 0 def get_facts(self): cmd = [self.module.get_bin_path('zpool'), 'get', '-H'] @@ -153,41 +146,36 @@ class ZPoolFacts(object): if self.name: cmd.append(self.name) - (rc, out, err) = self.module.run_command(cmd) + rc, out, err = self.module.run_command(cmd, check_rc=True) - if rc == 0: - for line in out.splitlines(): - pool, property, value = line.split('\t') + for line in out.splitlines(): + pool, prop, value = line.split('\t') - self._pools[pool].update({property: value}) + self._pools[pool].update({prop: value}) - for k, v in iteritems(self._pools): - v.update({'name': k}) - self.facts.append(v) + for k, v in iteritems(self._pools): + v.update({'name': k}) + self.facts.append(v) - return {'ansible_zfs_pools': self.facts} - else: - self.module.fail_json(msg='Error while trying to get facts about ZFS pool: %s' % self.name, - stderr=err, - rc=rc) + return {'ansible_zfs_pools': self.facts} def main(): module = AnsibleModule( argument_spec=dict( - name=dict(required=False, aliases=['pool', 'zpool'], type='str'), - parsable=dict(required=False, default=False, type='bool'), - properties=dict(required=False, default='all', type='str'), + name=dict(aliases=['pool', 'zpool'], type='str'), + parsable=dict(default=False, type='bool'), + properties=dict(default='all', type='str'), ), supports_check_mode=True ) zpool_facts = ZPoolFacts(module) - result = {} - result['changed'] = False - result['name'] = zpool_facts.name - + result = { + 'changed': False, + 'name': zpool_facts.name, + } if zpool_facts.parsable: result['parsable'] = zpool_facts.parsable From a20862797ecea1e4d90ceccbdbe2cd656776dc71 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 6 Sep 2021 22:37:10 +0200 Subject: [PATCH 0552/3093] Fix default value of new option. (#3338) --- changelogs/fragments/3337-linode-fix.yml | 2 ++ plugins/inventory/linode.py | 3 +-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/3337-linode-fix.yml diff --git a/changelogs/fragments/3337-linode-fix.yml b/changelogs/fragments/3337-linode-fix.yml new file mode 100644 index 0000000000..06887b1901 --- /dev/null +++ b/changelogs/fragments/3337-linode-fix.yml @@ -0,0 +1,2 @@ +bugfixes: + - "linode inventory plugin - fix default value of new option ``ip_style`` (https://github.com/ansible-collections/community.general/issues/3337)." diff --git a/plugins/inventory/linode.py b/plugins/inventory/linode.py index 0ce510852a..4bbd79a303 100644 --- a/plugins/inventory/linode.py +++ b/plugins/inventory/linode.py @@ -29,8 +29,7 @@ DOCUMENTATION = r''' ip_style: description: Populate hostvars with all information available from the Linode APIv4. type: string - default: - - plain + default: plain choices: - plain - api From dd25c0d3bfcd2a41a8ec6970180c16caa04087b9 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Tue, 7 Sep 2021 16:22:46 +1200 Subject: [PATCH 0553/3093] django_manage - split params (#3334) * django_manage - fix fixures * docs formatting adjustments * param apps also in need of splitting * oops, the splitted version was not being properly added to the command args * added changelog fragment * check for None * moving to shlex.split() * Update changelogs/fragments/3334-django_manage-split-params.yaml Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- .../fragments/3334-django_manage-split-params.yaml | 2 ++ plugins/modules/web_infrastructure/django_manage.py | 11 +++++++---- 2 files changed, 9 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/3334-django_manage-split-params.yaml diff --git a/changelogs/fragments/3334-django_manage-split-params.yaml b/changelogs/fragments/3334-django_manage-split-params.yaml new file mode 100644 index 0000000000..38ec68a532 --- /dev/null +++ b/changelogs/fragments/3334-django_manage-split-params.yaml @@ -0,0 +1,2 @@ +bugfixes: + - django_manage - parameters ``apps`` and ``fixtures`` are now splitted instead of being used as a single argument (https://github.com/ansible-collections/community.general/issues/3333). diff --git a/plugins/modules/web_infrastructure/django_manage.py b/plugins/modules/web_infrastructure/django_manage.py index 0c8126c457..4ced7452bb 100644 --- a/plugins/modules/web_infrastructure/django_manage.py +++ b/plugins/modules/web_infrastructure/django_manage.py @@ -62,7 +62,7 @@ options: clear: description: - Clear the existing files before trying to copy or link the original file. - - Used only with the 'collectstatic' command. The C(--noinput) argument will be added automatically. + - Used only with the C(collectstatic) command. The C(--noinput) argument will be added automatically. required: false default: no type: bool @@ -109,9 +109,9 @@ options: required: false aliases: [test_runner] notes: - - C(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the virtualenv parameter + - C(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the I(virtualenv) parameter is specified. - - This module will create a virtualenv if the virtualenv parameter is specified and a virtualenv does not already + - This module will create a virtualenv if the I(virtualenv) parameter is specified and a virtual environment does not already exist at the given location. - This module assumes English error messages for the C(createcachetable) command to detect table existence, unfortunately. @@ -306,7 +306,10 @@ def main(): # these params always get tacked on the end of the command for param in end_of_command_params: if module.params[param]: - run_cmd_args.append(module.params[param]) + if param in ('fixtures', 'apps'): + run_cmd_args.extend(shlex.split(module.params[param])) + else: + run_cmd_args.append(module.params[param]) rc, out, err = module.run_command(run_cmd_args, cwd=project_path) if rc != 0: From 6b207bce4ce4929a8979bf22f3f3543c597e5ef5 Mon Sep 17 00:00:00 2001 From: Andreas Botzner Date: Wed, 8 Sep 2021 07:14:37 +0200 Subject: [PATCH 0554/3093] Adds redis_data_info module (#3227) * Added redis_data_info module Added: - redis_data_info module and suggested 'exists' return flag. - module_utils for redis with a base class that handles database connections. - inhereited unit tests and added some new ones for the exit flag * Docfix and sanity * typo * Suggested doc changes and ssl option * TLS and validate_certs fix * Set support_check_mode for info plugin * Docfix and import errors * Redis versioning Fix * version bump and append fixes --- .github/BOTMETA.yml | 2 + plugins/doc_fragments/redis.py | 57 +++++++++ plugins/module_utils/redis.py | 93 ++++++++++++++ .../modules/database/misc/redis_data_info.py | 111 +++++++++++++++++ plugins/modules/redis_data_info.py | 1 + .../database/misc/test_redis_data_info.py | 113 ++++++++++++++++++ 6 files changed, 377 insertions(+) create mode 100644 plugins/doc_fragments/redis.py create mode 100644 plugins/module_utils/redis.py create mode 100644 plugins/modules/database/misc/redis_data_info.py create mode 120000 plugins/modules/redis_data_info.py create mode 100644 tests/unit/plugins/modules/database/misc/test_redis_data_info.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index b07f95e8cc..5b55449a67 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -458,6 +458,8 @@ files: maintainers: slok $modules/database/misc/redis_info.py: maintainers: levonet + $modules/database/misc/redis_data_info.py: + maintainers: paginabianca $modules/database/misc/riak.py: maintainers: drewkerrigan jsmartin $modules/database/mssql/mssql_db.py: diff --git a/plugins/doc_fragments/redis.py b/plugins/doc_fragments/redis.py new file mode 100644 index 0000000000..e7af25ec8f --- /dev/null +++ b/plugins/doc_fragments/redis.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Andreas Botzner +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + # Common parameters for Redis modules + DOCUMENTATION = r''' +options: + login_host: + description: + - Specify the target host running the database. + default: localhost + type: str + login_port: + description: + - Specify the port to connect to. + default: 6379 + type: int + login_user: + description: + - Specify the user to authenticate with. + - Requires L(redis,https://pypi.org/project/redis) >= 3.4.0. + type: str + login_password: + description: + - Specify the password to authenticate with. + - Usually not used when target is localhost. + type: str + tls: + description: + - Specify whether or not to use TLS for the connection. + type: bool + default: true + validate_certs: + description: + - Specify whether or not to validate TLS certificates. + - This should only be turned off for personally controlled sites or with + C(localhost) as target. + type: bool + default: true + ca_certs: + description: + - Path to root certificates file. If not set and I(tls) is + set to C(true), certifi ca-certificates will be used. + type: str +requirements: [ "redis", "certifi" ] + +notes: + - Requires the C(redis) Python package on the remote host. You can + install it with pip (C(pip install redis)) or with a package manager. + Information on the library can be found at U(https://github.com/andymccurdy/redis-py). +''' diff --git a/plugins/module_utils/redis.py b/plugins/module_utils/redis.py new file mode 100644 index 0000000000..9d55aecad0 --- /dev/null +++ b/plugins/module_utils/redis.py @@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- +# +# Copyright: (c) 2021, Andreas Botzner +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +from ansible.module_utils.basic import missing_required_lib +__metaclass__ = type + +import traceback + +REDIS_IMP_ERR = None +try: + from redis import Redis + from redis import __version__ as redis_version + HAS_REDIS_PACKAGE = True +except ImportError: + REDIS_IMP_ERR = traceback.format_exc() + HAS_REDIS_PACKAGE = False + +try: + import certifi + HAS_CERTIFI_PACKAGE = True +except ImportError: + CERTIFI_IMPORT_ERROR = traceback.format_exc() + HAS_CERTIFI_PACKAGE = False + + +def fail_imports(module): + errors = [] + traceback = [] + if not HAS_REDIS_PACKAGE: + errors.append(missing_required_lib('redis')) + traceback.append(REDIS_IMP_ERR) + if not HAS_CERTIFI_PACKAGE: + errors.append(missing_required_lib('certifi')) + traceback.append(CERTIFI_IMPORT_ERROR) + if errors: + module.fail_json(errors=errors, traceback='\n'.join(traceback)) + + +def redis_auth_argument_spec(): + return dict( + login_host=dict(type='str', + default='localhost',), + login_user=dict(type='str'), + login_password=dict(type='str', + no_log=True + ), + login_port=dict(type='int', default=6379), + tls=dict(type='bool', + default=True), + validate_certs=dict(type='bool', + default=True + ), + ca_certs=dict(type='str') + ) + + +class RedisAnsible(object): + '''Base class for Redis module''' + + def __init__(self, module): + self.module = module + self.connection = self._connect() + + def _connect(self): + login_host = self.module.params['login_host'] + login_user = self.module.params['login_user'] + login_password = self.module.params['login_password'] + login_port = self.module.params['login_port'] + tls = self.module.params['tls'] + validate_certs = 'required' if self.module.params['validate_certs'] else None + ca_certs = self.module.params['ca_certs'] + if tls and ca_certs is None: + ca_certs = str(certifi.where()) + if tuple(map(int, redis_version.split('.'))) < (3, 4, 0) and login_user is not None: + self.module.fail_json( + msg='The option `username` in only supported with redis >= 3.4.0.') + params = {'host': login_host, + 'port': login_port, + 'password': login_password, + 'ssl_ca_certs': ca_certs, + 'ssl_cert_reqs': validate_certs, + 'ssl': tls} + if login_user is not None: + params['username'] = login_user + try: + return Redis(**params) + except Exception as e: + self.module.fail_json(msg='{0}'.format(str(e))) + return None diff --git a/plugins/modules/database/misc/redis_data_info.py b/plugins/modules/database/misc/redis_data_info.py new file mode 100644 index 0000000000..866bda62d1 --- /dev/null +++ b/plugins/modules/database/misc/redis_data_info.py @@ -0,0 +1,111 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Andreas Botzner +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: redis_data_info +short_description: Get value of key in Redis database +version_added: 3.7.0 +description: + - Get value of keys in Redis database. +author: "Andreas Botzner (@paginabianca)" +options: + key: + description: + - Database key. + type: str + required: true + +extends_documentation_fragment: + - community.general.redis + +seealso: + - module: community.general.redis_info + - module: community.general.redis +''' + +EXAMPLES = ''' +- name: Get key foo=bar from loalhost with no username + community.general.redis_data_info: + login_host: localhost + login_password: supersecret + key: foo + +- name: Get key foo=bar on redishost with custom ca-cert file + community.general.redis_data_info: + login_host: redishost + login_password: supersecret + login_user: somuser + validate_certs: true + ssl_ca_certs: /path/to/ca/certs + key: foo +''' + +RETURN = ''' +exists: + description: If they key exists in the database. + returned: on success + type: bool +value: + description: Value key was set to. + returned: if existing + type: str + sample: 'value_of_some_key' +msg: + description: A short message. + returned: always + type: str + sample: 'Got key: foo with value: bar' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.redis import ( + fail_imports, redis_auth_argument_spec, RedisAnsible) + + +def main(): + redis_auth_args = redis_auth_argument_spec() + module_args = dict( + key=dict(type='str', required=True, no_log=False), + ) + module_args.update(redis_auth_args) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + ) + fail_imports(module) + + redis = RedisAnsible(module) + + key = module.params['key'] + result = {'changed': False} + + value = None + try: + value = redis.connection.get(key) + except Exception as e: + msg = 'Failed to get value of key "{0}" with exception: {1}'.format( + key, str(e)) + result['msg'] = msg + module.fail_json(**result) + + if value is None: + msg = 'Key "{0}" does not exist in database'.format(key) + result['exists'] = False + else: + msg = 'Got key "{0}"'.format(key) + result['value'] = value + result['exists'] = True + result['msg'] = msg + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/redis_data_info.py b/plugins/modules/redis_data_info.py new file mode 120000 index 0000000000..14c54fb2d3 --- /dev/null +++ b/plugins/modules/redis_data_info.py @@ -0,0 +1 @@ +database/misc/redis_data_info.py \ No newline at end of file diff --git a/tests/unit/plugins/modules/database/misc/test_redis_data_info.py b/tests/unit/plugins/modules/database/misc/test_redis_data_info.py new file mode 100644 index 0000000000..808c583e37 --- /dev/null +++ b/tests/unit/plugins/modules/database/misc/test_redis_data_info.py @@ -0,0 +1,113 @@ +# -*- coding: utf-8 -*- +# +# Copyright: (c) 2021, Andreas Botzner +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +import pytest +import json +from redis import __version__ + +from ansible_collections.community.general.plugins.modules.database.misc import ( + redis_data_info) +from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args + + +HAS_REDIS_USERNAME_OPTION = True +if tuple(map(int, __version__.split('.'))) < (3, 4, 0): + HAS_REDIS_USERNAME_OPTION = False + + +def test_redis_data_info_without_arguments(capfd): + set_module_args({}) + with pytest.raises(SystemExit): + redis_data_info.main() + out, err = capfd.readouterr() + assert not err + assert json.loads(out)['failed'] + + +@pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0") +def test_redis_data_info_existing_key(capfd, mocker): + set_module_args({'login_host': 'localhost', + 'login_user': 'root', + 'login_password': 'secret', + 'key': 'foo', + '_ansible_check_mode': False}) + mocker.patch('redis.Redis.get', return_value='bar') + with pytest.raises(SystemExit): + redis_data_info.main() + out, err = capfd.readouterr() + print(out) + assert not err + assert json.loads(out)['exists'] + assert json.loads(out)['value'] == 'bar' + + +@pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0") +def test_redis_data_info_absent_key(capfd, mocker): + set_module_args({'login_host': 'localhost', + 'login_user': 'root', + 'login_password': 'secret', + 'key': 'foo', + '_ansible_check_mode': False}) + mocker.patch('redis.Redis.get', return_value=None) + with pytest.raises(SystemExit): + redis_data_info.main() + out, err = capfd.readouterr() + print(out) + assert not err + assert not json.loads(out)['exists'] + assert 'value' not in json.loads(out) + + +@pytest.mark.skipif(HAS_REDIS_USERNAME_OPTION, reason="Redis version > 3.4.0") +def test_redis_data_fail_username(capfd, mocker): + set_module_args({'login_host': 'localhost', + 'login_user': 'root', + 'login_password': 'secret', + 'key': 'foo', + '_ansible_check_mode': False}) + with pytest.raises(SystemExit): + redis_data_info.main() + out, err = capfd.readouterr() + print(out) + assert not err + assert json.loads(out)['failed'] + assert json.loads( + out)['msg'] == 'The option `username` in only supported with redis >= 3.4.0.' + + +@pytest.mark.skipif(HAS_REDIS_USERNAME_OPTION, reason="Redis version > 3.4.0") +def test_redis_data_info_absent_key_no_username(capfd, mocker): + set_module_args({'login_host': 'localhost', + 'login_password': 'secret', + 'key': 'foo', + '_ansible_check_mode': False}) + mocker.patch('redis.Redis.get', return_value=None) + with pytest.raises(SystemExit): + redis_data_info.main() + out, err = capfd.readouterr() + print(out) + assert not err + assert not json.loads(out)['exists'] + assert 'value' not in json.loads(out) + + +@pytest.mark.skipif(HAS_REDIS_USERNAME_OPTION, reason="Redis version > 3.4.0") +def test_redis_data_info_existing_key_no_username(capfd, mocker): + set_module_args({'login_host': 'localhost', + 'login_password': 'secret', + 'key': 'foo', + '_ansible_check_mode': False}) + mocker.patch('redis.Redis.get', return_value='bar') + with pytest.raises(SystemExit): + redis_data_info.main() + out, err = capfd.readouterr() + print(out) + assert not err + assert json.loads(out)['exists'] + assert json.loads(out)['value'] == 'bar' From 7c43cc3faa51bfb873e5c2a6336478f9594f14de Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Thu, 9 Sep 2021 07:31:44 +0200 Subject: [PATCH 0555/3093] Improve CI (#3348) * Remove superfluous test. * Use remote_temp_dir instead of output_dir on remote. * Read certificate from correct place. * Adjust more places. * Fix boolean. * Improve cryptography setup. * Fix java_keystore changes. * Need to copy binary from remote. * Use correct Python for serve script. * Sleep before downloading. * Use correct Python interpreter. * Avoid failing shebang test. * Fix permission error with macOS 11.1. * Avoid shebang trouble. --- .../ansible_galaxy_install/meta/main.yml | 2 + .../ansible_galaxy_install/tasks/main.yml | 2 +- .../integration/targets/archive/meta/main.yml | 1 + .../targets/archive/tasks/main.yml | 2 +- .../targets/archive/tests/broken-link.yml | 12 +- .../targets/archive/tests/core.yml | 58 ++++---- .../targets/archive/tests/exclusions.yml | 18 +-- .../targets/archive/tests/idempotency.yml | 56 ++++---- .../targets/archive/tests/remove.yml | 70 +++++----- .../integration/targets/consul/meta/main.yml | 1 + .../integration/targets/consul/tasks/main.yml | 28 ++-- .../targets/deploy_helper/meta/main.yml | 2 + .../targets/deploy_helper/tasks/main.yml | 2 +- .../targets/filter_random_mac/meta/main.yml | 2 + .../targets/filter_random_mac/tasks/main.yml | 3 - .../targets/flatpak/tasks/setup.yml | 2 +- tests/integration/targets/gem/meta/main.yml | 1 + tests/integration/targets/gem/tasks/main.yml | 18 +-- .../targets/git_config/meta/main.yml | 2 + .../tasks/get_set_state_present_file.yml | 5 +- .../git_config/tasks/setup_no_value.yml | 3 +- .../targets/git_config/tasks/setup_value.yml | 3 +- tests/integration/targets/hg/meta/main.yml | 1 + .../targets/hg/tasks/run-tests.yml | 6 +- .../targets/iso_create/meta/main.yml | 1 + .../targets/iso_create/tasks/main.yml | 54 ++++---- .../iso_create/tasks/prepare_dest_dir.yml | 4 +- .../targets/iso_extract/meta/main.yml | 1 + .../targets/iso_extract/tasks/main.yml | 2 +- .../targets/iso_extract/tasks/prepare.yml | 6 +- .../targets/iso_extract/tasks/tests.yml | 8 +- .../targets/java_cert/defaults/main.yml | 18 +-- .../targets/java_cert/meta/main.yml | 1 + .../targets/java_cert/tasks/main.yml | 22 +-- .../targets/java_cert/tasks/state_change.yml | 8 +- .../targets/java_keystore/meta/main.yml | 1 + .../targets/java_keystore/tasks/prepare.yml | 14 +- .../targets/java_keystore/tasks/tests.yml | 128 ++++++++++++++---- tests/integration/targets/mail/meta/main.yml | 2 + tests/integration/targets/mail/tasks/main.yml | 4 +- tests/integration/targets/nomad/meta/main.yml | 1 + .../integration/targets/nomad/tasks/main.yml | 22 +-- tests/integration/targets/npm/meta/main.yml | 1 + tests/integration/targets/npm/tasks/main.yml | 2 +- tests/integration/targets/npm/tasks/setup.yml | 4 +- tests/integration/targets/pids/meta/main.yml | 2 + tests/integration/targets/pids/tasks/main.yml | 18 ++- .../targets/setup_openssl/tasks/main.yml | 21 +++ .../targets/setup_openssl/vars/Debian.yml | 2 + .../targets/setup_openssl/vars/FreeBSD.yml | 2 + .../targets/setup_openssl/vars/RedHat.yml | 2 + .../targets/setup_openssl/vars/Suse.yml | 2 + .../targets/ssh_config/meta/main.yml | 1 + .../targets/ssh_config/tasks/main.yml | 8 +- .../targets/supervisorctl/meta/main.yml | 1 + .../targets/supervisorctl/tasks/main.yml | 2 +- .../targets/synchronize-buildah/aliases | 3 - .../targets/synchronize-buildah/inventory | 1 - .../files/normal_file.txt | 1 - .../test_buildah_synchronize/tasks/main.yml | 71 ---------- .../targets/synchronize-buildah/runme.sh | 15 -- .../test_synchronize_buildah.yml | 8 -- .../targets/xattr/defaults/main.yml | 2 +- tests/integration/targets/xattr/meta/main.yml | 1 + tests/integration/targets/yarn/meta/main.yml | 1 + tests/integration/targets/yarn/tasks/run.yml | 28 ++-- .../integration/targets/zypper/meta/main.yml | 2 + .../targets/zypper/tasks/zypper.yml | 36 ++--- 68 files changed, 440 insertions(+), 394 deletions(-) create mode 100644 tests/integration/targets/ansible_galaxy_install/meta/main.yml create mode 100644 tests/integration/targets/deploy_helper/meta/main.yml create mode 100644 tests/integration/targets/filter_random_mac/meta/main.yml create mode 100644 tests/integration/targets/git_config/meta/main.yml create mode 100644 tests/integration/targets/mail/meta/main.yml create mode 100644 tests/integration/targets/pids/meta/main.yml delete mode 100644 tests/integration/targets/synchronize-buildah/aliases delete mode 100644 tests/integration/targets/synchronize-buildah/inventory delete mode 100644 tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/files/normal_file.txt delete mode 100644 tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/tasks/main.yml delete mode 100644 tests/integration/targets/synchronize-buildah/runme.sh delete mode 100644 tests/integration/targets/synchronize-buildah/test_synchronize_buildah.yml create mode 100644 tests/integration/targets/zypper/meta/main.yml diff --git a/tests/integration/targets/ansible_galaxy_install/meta/main.yml b/tests/integration/targets/ansible_galaxy_install/meta/main.yml new file mode 100644 index 0000000000..1810d4bec9 --- /dev/null +++ b/tests/integration/targets/ansible_galaxy_install/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_remote_tmp_dir diff --git a/tests/integration/targets/ansible_galaxy_install/tasks/main.yml b/tests/integration/targets/ansible_galaxy_install/tasks/main.yml index 232c96aff5..276dab3a30 100644 --- a/tests/integration/targets/ansible_galaxy_install/tasks/main.yml +++ b/tests/integration/targets/ansible_galaxy_install/tasks/main.yml @@ -50,7 +50,7 @@ ################################################### - name: set_fact: - reqs_file: '{{ output_dir }}/reqs.yaml' + reqs_file: '{{ remote_tmp_dir }}/reqs.yaml' - name: Copy requirements file copy: diff --git a/tests/integration/targets/archive/meta/main.yml b/tests/integration/targets/archive/meta/main.yml index 5438ced5c3..56bc554611 100644 --- a/tests/integration/targets/archive/meta/main.yml +++ b/tests/integration/targets/archive/meta/main.yml @@ -1,2 +1,3 @@ dependencies: - setup_pkg_mgr + - setup_remote_tmp_dir diff --git a/tests/integration/targets/archive/tasks/main.yml b/tests/integration/targets/archive/tasks/main.yml index 1e2c9f9c27..e7b6c44175 100644 --- a/tests/integration/targets/archive/tasks/main.yml +++ b/tests/integration/targets/archive/tasks/main.yml @@ -75,7 +75,7 @@ register: backports_lzma_pip - name: prep our files - copy: src={{ item }} dest={{output_dir}}/{{ item }} + copy: src={{ item }} dest={{remote_tmp_dir}}/{{ item }} with_items: - foo.txt - bar.txt diff --git a/tests/integration/targets/archive/tests/broken-link.yml b/tests/integration/targets/archive/tests/broken-link.yml index cc1e07aaf1..677ebe0bf7 100644 --- a/tests/integration/targets/archive/tests/broken-link.yml +++ b/tests/integration/targets/archive/tests/broken-link.yml @@ -3,29 +3,29 @@ - name: Create link - broken link ({{ format }}) file: src: /nowhere - dest: "{{ output_dir }}/nowhere.txt" + dest: "{{ remote_tmp_dir }}/nowhere.txt" state: link force: yes - name: Archive - broken link ({{ format }}) archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_broken_link.{{ format }}" + path: "{{ remote_tmp_dir }}/*.txt" + dest: "{{ remote_tmp_dir }}/archive_broken_link.{{ format }}" format: "{{ format }}" - name: Verify archive exists - broken link ({{ format }}) file: - path: "{{ output_dir }}/archive_broken_link.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_broken_link.{{ format }}" state: file - name: Remove archive - broken link ({{ format }}) file: - path: "{{ output_dir }}/archive_broken_link.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_broken_link.{{ format }}" state: absent - name: Remove link - broken link ({{ format }}) file: - path: "{{ output_dir }}/nowhere.txt" + path: "{{ remote_tmp_dir }}/nowhere.txt" state: absent # 'zip' does not support symlink's when: format != 'zip' diff --git a/tests/integration/targets/archive/tests/core.yml b/tests/integration/targets/archive/tests/core.yml index d008e9c122..f3ae906429 100644 --- a/tests/integration/targets/archive/tests/core.yml +++ b/tests/integration/targets/archive/tests/core.yml @@ -25,14 +25,14 @@ # Core functionality tests - name: Archive - no options ({{ format }}) archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_no_opts.{{ format }}" + path: "{{ remote_tmp_dir }}/*.txt" + dest: "{{ remote_tmp_dir }}/archive_no_opts.{{ format }}" format: "{{ format }}" register: archive_no_options - name: Verify that archive exists - no options ({{ format }}) file: - path: "{{output_dir}}/archive_no_opts.{{ format }}" + path: "{{remote_tmp_dir}}/archive_no_opts.{{ format }}" state: file - name: Verify that archive result is changed and includes all files - no options ({{ format }}) @@ -44,20 +44,20 @@ - name: Remove the archive - no options ({{ format }}) file: - path: "{{ output_dir }}/archive_no_options.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_no_options.{{ format }}" state: absent - name: Archive - file options ({{ format }}) archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_file_options.{{ format }}" + path: "{{ remote_tmp_dir }}/*.txt" + dest: "{{ remote_tmp_dir }}/archive_file_options.{{ format }}" format: "{{ format }}" mode: "u+rwX,g-rwx,o-rwx" register: archive_file_options - name: Retrieve archive file information - file options ({{ format }}) stat: - path: "{{ output_dir }}/archive_file_options.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_file_options.{{ format }}" register: archive_file_options_stat - name: Test that the file modes were changed @@ -69,19 +69,19 @@ - name: Remove the archive - file options ({{ format }}) file: - path: "{{ output_dir }}/archive_file_options.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_file_options.{{ format }}" state: absent - name: Archive - non-ascii ({{ format }}) archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_nonascii_くらとみ.{{ format }}" + path: "{{ remote_tmp_dir }}/*.txt" + dest: "{{ remote_tmp_dir }}/archive_nonascii_くらとみ.{{ format }}" format: "{{ format }}" register: archive_nonascii - name: Retrieve archive file information - non-ascii ({{ format }}) stat: - path: "{{ output_dir }}/archive_nonascii_くらとみ.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_nonascii_くらとみ.{{ format }}" register: archive_nonascii_stat - name: Test that archive exists - non-ascii ({{ format }}) @@ -92,13 +92,13 @@ - name: Remove the archive - non-ascii ({{ format }}) file: - path: "{{ output_dir }}/archive_nonascii_くらとみ.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_nonascii_くらとみ.{{ format }}" state: absent - name: Archive - single target ({{ format }}) archive: - path: "{{ output_dir }}/foo.txt" - dest: "{{ output_dir }}/archive_single_target.{{ format }}" + path: "{{ remote_tmp_dir }}/foo.txt" + dest: "{{ remote_tmp_dir }}/archive_single_target.{{ format }}" format: "{{ format }}" register: archive_single_target @@ -117,7 +117,7 @@ - block: - name: Retrieve contents of archive - single target ({{ format }}) ansible.builtin.unarchive: - src: "{{ output_dir }}/archive_single_target.{{ format }}" + src: "{{ remote_tmp_dir }}/archive_single_target.{{ format }}" dest: . list_files: true check_mode: true @@ -135,22 +135,22 @@ - name: Remove archive - single target ({{ format }}) file: - path: "{{ output_dir }}/archive_single_target.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_single_target.{{ format }}" state: absent - name: Archive - path list ({{ format }}) archive: path: - - "{{ output_dir }}/empty.txt" - - "{{ output_dir }}/foo.txt" - - "{{ output_dir }}/bar.txt" - dest: "{{ output_dir }}/archive_path_list.{{ format }}" + - "{{ remote_tmp_dir }}/empty.txt" + - "{{ remote_tmp_dir }}/foo.txt" + - "{{ remote_tmp_dir }}/bar.txt" + dest: "{{ remote_tmp_dir }}/archive_path_list.{{ format }}" format: "{{ format }}" register: archive_path_list - name: Verify that archive exists - path list ({{ format }}) file: - path: "{{output_dir}}/archive_path_list.{{ format }}" + path: "{{remote_tmp_dir}}/archive_path_list.{{ format }}" state: file - name: Assert that archive contains all files - path list ({{ format }}) @@ -161,16 +161,16 @@ - name: Remove archive - path list ({{ format }}) file: - path: "{{ output_dir }}/archive_path_list.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_path_list.{{ format }}" state: absent - name: Archive - missing paths ({{ format }}) archive: path: - - "{{ output_dir }}/*.txt" - - "{{ output_dir }}/dne.txt" - exclude_path: "{{ output_dir }}/foo.txt" - dest: "{{ output_dir }}/archive_missing_paths.{{ format }}" + - "{{ remote_tmp_dir }}/*.txt" + - "{{ remote_tmp_dir }}/dne.txt" + exclude_path: "{{ remote_tmp_dir }}/foo.txt" + dest: "{{ remote_tmp_dir }}/archive_missing_paths.{{ format }}" format: "{{ format }}" register: archive_missing_paths @@ -179,10 +179,10 @@ that: - archive_missing_paths is changed - "archive_missing_paths.dest_state == 'incomplete'" - - "'{{ output_dir }}/dne.txt' in archive_missing_paths.missing" - - "'{{ output_dir }}/foo.txt' not in archive_missing_paths.missing" + - "'{{ remote_tmp_dir }}/dne.txt' in archive_missing_paths.missing" + - "'{{ remote_tmp_dir }}/foo.txt' not in archive_missing_paths.missing" - name: Remove archive - missing paths ({{ format }}) file: - path: "{{ output_dir }}/archive_missing_paths.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_missing_paths.{{ format }}" state: absent diff --git a/tests/integration/targets/archive/tests/exclusions.yml b/tests/integration/targets/archive/tests/exclusions.yml index 0b65f85851..b2a8c7b890 100644 --- a/tests/integration/targets/archive/tests/exclusions.yml +++ b/tests/integration/targets/archive/tests/exclusions.yml @@ -1,8 +1,8 @@ --- - name: Archive - exclusion patterns ({{ format }}) archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_exclusion_patterns.{{ format }}" + path: "{{ remote_tmp_dir }}/*.txt" + dest: "{{ remote_tmp_dir }}/archive_exclusion_patterns.{{ format }}" format: "{{ format }}" exclusion_patterns: b?r.* register: archive_exclusion_patterns @@ -15,26 +15,26 @@ - name: Remove archive - exclusion patterns ({{ format }}) file: - path: "{{ output_dir }}/archive_exclusion_patterns.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_exclusion_patterns.{{ format }}" state: absent - name: Archive - exclude path ({{ format }}) archive: path: - - "{{ output_dir }}/sub/subfile.txt" - - "{{ output_dir }}" + - "{{ remote_tmp_dir }}/sub/subfile.txt" + - "{{ remote_tmp_dir }}" exclude_path: - - "{{ output_dir }}" - dest: "{{ output_dir }}/archive_exclude_paths.{{ format }}" + - "{{ remote_tmp_dir }}" + dest: "{{ remote_tmp_dir }}/archive_exclude_paths.{{ format }}" format: "{{ format }}" register: archive_excluded_paths - name: Assert that excluded paths do not influence archive root - exclude path ({{ format }}) assert: that: - - archive_excluded_paths.arcroot != output_dir + - archive_excluded_paths.arcroot != remote_tmp_dir - name: Remove archive - exclude path ({{ format }}) file: - path: "{{ output_dir }}/archive_exclude_paths.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_exclude_paths.{{ format }}" state: absent diff --git a/tests/integration/targets/archive/tests/idempotency.yml b/tests/integration/targets/archive/tests/idempotency.yml index 9262601572..5a44922adb 100644 --- a/tests/integration/targets/archive/tests/idempotency.yml +++ b/tests/integration/targets/archive/tests/idempotency.yml @@ -1,8 +1,8 @@ --- - name: Archive - file content idempotency ({{ format }}) archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_file_content_idempotency.{{ format }}" + path: "{{ remote_tmp_dir }}/*.txt" + dest: "{{ remote_tmp_dir }}/archive_file_content_idempotency.{{ format }}" format: "{{ format }}" register: file_content_idempotency_before @@ -10,12 +10,12 @@ lineinfile: line: bar.txt regexp: "^foo.txt$" - path: "{{ output_dir }}/foo.txt" + path: "{{ remote_tmp_dir }}/foo.txt" - name: Archive second time - file content idempotency ({{ format }}) archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_file_content_idempotency.{{ format }}" + path: "{{ remote_tmp_dir }}/*.txt" + dest: "{{ remote_tmp_dir }}/archive_file_content_idempotency.{{ format }}" format: "{{ format }}" register: file_content_idempotency_after @@ -28,29 +28,29 @@ - name: Remove archive - file content idempotency ({{ format }}) file: - path: "{{ output_dir }}/archive_file_content_idempotency.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_file_content_idempotency.{{ format }}" state: absent - name: Modify file back - file content idempotency ({{ format }}) lineinfile: line: foo.txt regexp: "^bar.txt$" - path: "{{ output_dir }}/foo.txt" + path: "{{ remote_tmp_dir }}/foo.txt" - name: Archive - file name idempotency ({{ format }}) archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_file_name_idempotency.{{ format }}" + path: "{{ remote_tmp_dir }}/*.txt" + dest: "{{ remote_tmp_dir }}/archive_file_name_idempotency.{{ format }}" format: "{{ format }}" register: file_name_idempotency_before - name: Rename file - file name idempotency ({{ format }}) - command: "mv {{ output_dir}}/foo.txt {{ output_dir }}/fii.txt" + command: "mv {{ remote_tmp_dir }}/foo.txt {{ remote_tmp_dir }}/fii.txt" - name: Archive again - file name idempotency ({{ format }}) archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_file_name_idempotency.{{ format }}" + path: "{{ remote_tmp_dir }}/*.txt" + dest: "{{ remote_tmp_dir }}/archive_file_name_idempotency.{{ format }}" format: "{{ format }}" register: file_name_idempotency_after @@ -61,16 +61,16 @@ - name: Remove archive - file name idempotency ({{ format }}) file: - path: "{{ output_dir }}/archive_file_name_idempotency.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_file_name_idempotency.{{ format }}" state: absent - name: Rename file back - file name idempotency ({{ format }}) - command: "mv {{ output_dir }}/fii.txt {{ output_dir }}/foo.txt" + command: "mv {{ remote_tmp_dir }}/fii.txt {{ remote_tmp_dir }}/foo.txt" - name: Archive - single file content idempotency ({{ format }}) archive: - path: "{{ output_dir }}/foo.txt" - dest: "{{ output_dir }}/archive_single_file_content_idempotency.{{ format }}" + path: "{{ remote_tmp_dir }}/foo.txt" + dest: "{{ remote_tmp_dir }}/archive_single_file_content_idempotency.{{ format }}" format: "{{ format }}" register: single_file_content_idempotency_before @@ -78,12 +78,12 @@ lineinfile: line: bar.txt regexp: "^foo.txt$" - path: "{{ output_dir }}/foo.txt" + path: "{{ remote_tmp_dir }}/foo.txt" - name: Archive second time - single file content idempotency ({{ format }}) archive: - path: "{{ output_dir }}/foo.txt" - dest: "{{ output_dir }}/archive_single_file_content_idempotency.{{ format }}" + path: "{{ remote_tmp_dir }}/foo.txt" + dest: "{{ remote_tmp_dir }}/archive_single_file_content_idempotency.{{ format }}" format: "{{ format }}" register: single_file_content_idempotency_after @@ -96,29 +96,29 @@ - name: Remove archive - single file content idempotency ({{ format }}) file: - path: "{{ output_dir }}/archive_single_file_content_idempotency.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_single_file_content_idempotency.{{ format }}" state: absent - name: Modify file back - single file content idempotency ({{ format }}) lineinfile: line: foo.txt regexp: "^bar.txt$" - path: "{{ output_dir }}/foo.txt" + path: "{{ remote_tmp_dir }}/foo.txt" - name: Archive - single file name idempotency ({{ format }}) archive: - path: "{{ output_dir }}/foo.txt" - dest: "{{ output_dir }}/archive_single_file_name_idempotency.{{ format }}" + path: "{{ remote_tmp_dir }}/foo.txt" + dest: "{{ remote_tmp_dir }}/archive_single_file_name_idempotency.{{ format }}" format: "{{ format }}" register: single_file_name_idempotency_before - name: Rename file - single file name idempotency ({{ format }}) - command: "mv {{ output_dir}}/foo.txt {{ output_dir }}/fii.txt" + command: "mv {{ remote_tmp_dir }}/foo.txt {{ remote_tmp_dir }}/fii.txt" - name: Archive again - single file name idempotency ({{ format }}) archive: - path: "{{ output_dir }}/fii.txt" - dest: "{{ output_dir }}/archive_single_file_name_idempotency.{{ format }}" + path: "{{ remote_tmp_dir }}/fii.txt" + dest: "{{ remote_tmp_dir }}/archive_single_file_name_idempotency.{{ format }}" format: "{{ format }}" register: single_file_name_idempotency_after @@ -133,8 +133,8 @@ - name: Remove archive - single file name idempotency ({{ format }}) file: - path: "{{ output_dir }}/archive_single_file_name_idempotency.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_single_file_name_idempotency.{{ format }}" state: absent - name: Rename file back - single file name idempotency ({{ format }}) - command: "mv {{ output_dir }}/fii.txt {{ output_dir }}/foo.txt" + command: "mv {{ remote_tmp_dir }}/fii.txt {{ remote_tmp_dir }}/foo.txt" diff --git a/tests/integration/targets/archive/tests/remove.yml b/tests/integration/targets/archive/tests/remove.yml index 26849ac850..08f16e98da 100644 --- a/tests/integration/targets/archive/tests/remove.yml +++ b/tests/integration/targets/archive/tests/remove.yml @@ -1,15 +1,15 @@ --- - name: Archive - remove source files ({{ format }}) archive: - path: "{{ output_dir }}/*.txt" - dest: "{{ output_dir }}/archive_remove_source_files.{{ format }}" + path: "{{ remote_tmp_dir }}/*.txt" + dest: "{{ remote_tmp_dir }}/archive_remove_source_files.{{ format }}" format: "{{ format }}" remove: yes register: archive_remove_source_files - name: Verify archive exists - remove source files ({{ format }}) file: - path: "{{ output_dir }}/archive_remove_source_files.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_remove_source_files.{{ format }}" state: file - name: Verify all files were archived - remove source files ({{ format }}) @@ -20,13 +20,13 @@ - name: Remove Archive - remove source files ({{ format }}) file: - path: "{{ output_dir }}/archive_remove_source_files.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_remove_source_files.{{ format }}" state: absent - name: Assert that source files were removed - remove source files ({{ format }}) assert: that: - - "'{{ output_dir }}/{{ item }}' is not exists" + - "'{{ remote_tmp_dir }}/{{ item }}' is not exists" with_items: - foo.txt - bar.txt @@ -35,7 +35,7 @@ - name: Copy source files - remove source directory ({{ format }}) copy: src: "{{ item }}" - dest: "{{ output_dir }}/{{ item }}" + dest: "{{ remote_tmp_dir }}/{{ item }}" with_items: - foo.txt - bar.txt @@ -43,13 +43,13 @@ - name: Create temporary directory - remove source directory ({{ format }}) file: - path: "{{ output_dir }}/tmpdir" + path: "{{ remote_tmp_dir }}/tmpdir" state: directory - name: Copy source files to temporary directory - remove source directory ({{ format }}) copy: src: "{{ item }}" - dest: "{{ output_dir }}/tmpdir/{{ item }}" + dest: "{{ remote_tmp_dir }}/tmpdir/{{ item }}" with_items: - foo.txt - bar.txt @@ -57,15 +57,15 @@ - name: Archive - remove source directory ({{ format }}) archive: - path: "{{ output_dir }}/tmpdir" - dest: "{{ output_dir }}/archive_remove_source_directory.{{ format }}" + path: "{{ remote_tmp_dir }}/tmpdir" + dest: "{{ remote_tmp_dir }}/archive_remove_source_directory.{{ format }}" format: "{{ format }}" remove: yes register: archive_remove_source_directory - name: Verify archive exists - remove source directory ({{ format }}) file: - path: "{{ output_dir }}/archive_remove_source_directory.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_remove_source_directory.{{ format }}" state: file - name: Verify archive contains all files - remove source directory ({{ format }}) @@ -76,23 +76,23 @@ - name: Remove archive - remove source directory ({{ format }}) file: - path: "{{ output_dir }}/archive_remove_source_directory.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_remove_source_directory.{{ format }}" state: absent - name: Verify source directory was removed - remove source directory ({{ format }}) assert: that: - - "'{{ output_dir }}/tmpdir' is not exists" + - "'{{ remote_tmp_dir }}/tmpdir' is not exists" - name: Create temporary directory - remove source excluding path ({{ format }}) file: - path: "{{ output_dir }}/tmpdir" + path: "{{ remote_tmp_dir }}/tmpdir" state: directory - name: Copy source files to temporary directory - remove source excluding path ({{ format }}) copy: src: "{{ item }}" - dest: "{{ output_dir }}/tmpdir/{{ item }}" + dest: "{{ remote_tmp_dir }}/tmpdir/{{ item }}" with_items: - foo.txt - bar.txt @@ -100,16 +100,16 @@ - name: Archive - remove source excluding path ({{ format }}) archive: - path: "{{ output_dir }}/tmpdir/*" - dest: "{{ output_dir }}/archive_remove_source_excluding_path.{{ format }}" + path: "{{ remote_tmp_dir }}/tmpdir/*" + dest: "{{ remote_tmp_dir }}/archive_remove_source_excluding_path.{{ format }}" format: "{{ format }}" remove: yes - exclude_path: "{{ output_dir }}/tmpdir/empty.txt" + exclude_path: "{{ remote_tmp_dir }}/tmpdir/empty.txt" register: archive_remove_source_excluding_path - name: Verify archive exists - remove source excluding path ({{ format }}) file: - path: "{{ output_dir }}/archive_remove_source_excluding_path.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_remove_source_excluding_path.{{ format }}" state: file - name: Verify all files except excluded are archived - remove source excluding path ({{ format }}) @@ -120,18 +120,18 @@ - name: Remove archive - remove source excluding path ({{ format }}) file: - path: "{{ output_dir }}/archive_remove_source_excluding_path.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_remove_source_excluding_path.{{ format }}" state: absent - name: Verify that excluded file still exists - remove source excluding path ({{ format }}) file: - path: "{{ output_dir }}/tmpdir/empty.txt" + path: "{{ remote_tmp_dir }}/tmpdir/empty.txt" state: file - name: Copy source files to temporary directory - remove source excluding sub path ({{ format }}) copy: src: "{{ item }}" - dest: "{{ output_dir }}/tmpdir/{{ item }}" + dest: "{{ remote_tmp_dir }}/tmpdir/{{ item }}" with_items: - foo.txt - bar.txt @@ -142,33 +142,33 @@ - name: Archive - remove source excluding sub path ({{ format }}) archive: path: - - "{{ output_dir }}/tmpdir/*.txt" - - "{{ output_dir }}/tmpdir/sub/*" - dest: "{{ output_dir }}/archive_remove_source_excluding_sub_path.{{ format }}" + - "{{ remote_tmp_dir }}/tmpdir/*.txt" + - "{{ remote_tmp_dir }}/tmpdir/sub/*" + dest: "{{ remote_tmp_dir }}/archive_remove_source_excluding_sub_path.{{ format }}" format: "{{ format }}" remove: yes - exclude_path: "{{ output_dir }}/tmpdir/sub/subfile.txt" + exclude_path: "{{ remote_tmp_dir }}/tmpdir/sub/subfile.txt" register: archive_remove_source_excluding_sub_path - name: Verify archive exists - remove source excluding sub path ({{ format }}) file: - path: "{{ output_dir }}/archive_remove_source_excluding_sub_path.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_remove_source_excluding_sub_path.{{ format }}" state: file - name: Remove archive - remove source excluding sub path ({{ format }}) file: - path: "{{ output_dir }}/archive_remove_source_excluding_sub_path.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_remove_source_excluding_sub_path.{{ format }}" state: absent - name: Verify that sub path still exists - remove source excluding sub path ({{ format }}) file: - path: "{{ output_dir }}/tmpdir/sub/subfile.txt" + path: "{{ remote_tmp_dir }}/tmpdir/sub/subfile.txt" state: file - name: Copy source files to temporary directory - remove source with nested paths ({{ format }}) copy: src: "{{ item }}" - dest: "{{ output_dir }}/tmpdir/{{ item }}" + dest: "{{ remote_tmp_dir }}/tmpdir/{{ item }}" with_items: - foo.txt - bar.txt @@ -178,20 +178,20 @@ - name: Archive - remove source with nested paths ({{ format }}) archive: - path: "{{ output_dir }}/tmpdir/" - dest: "{{ output_dir }}/archive_remove_source_nested_paths.{{ format }}" + path: "{{ remote_tmp_dir }}/tmpdir/" + dest: "{{ remote_tmp_dir }}/archive_remove_source_nested_paths.{{ format }}" format: "{{ format }}" remove: yes register: archive_remove_nested_paths - name: Verify archive exists - remove source with nested paths ({{ format }}) file: - path: "{{ output_dir }}/archive_remove_source_nested_paths.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_remove_source_nested_paths.{{ format }}" state: file - name: Verify source files were removed - remove source with nested paths ({{ format }}) file: - path: "{{ output_dir }}/tmpdir" + path: "{{ remote_tmp_dir }}/tmpdir" state: absent register: archive_remove_nested_paths_status @@ -203,5 +203,5 @@ - name: Remove archive - remove source with nested paths ({{ format }}) file: - path: "{{ output_dir }}/archive_remove_source_nested_paths.{{ format }}" + path: "{{ remote_tmp_dir }}/archive_remove_source_nested_paths.{{ format }}" state: absent diff --git a/tests/integration/targets/consul/meta/main.yml b/tests/integration/targets/consul/meta/main.yml index f4c99a2ad7..f9bb8406a4 100644 --- a/tests/integration/targets/consul/meta/main.yml +++ b/tests/integration/targets/consul/meta/main.yml @@ -2,3 +2,4 @@ dependencies: - setup_pkg_mgr - setup_openssl + - setup_remote_tmp_dir diff --git a/tests/integration/targets/consul/tasks/main.yml b/tests/integration/targets/consul/tasks/main.yml index 4de2d332e5..7f216f81f0 100644 --- a/tests/integration/targets/consul/tasks/main.yml +++ b/tests/integration/targets/consul/tasks/main.yml @@ -7,7 +7,7 @@ vars: consul_version: 1.5.0 consul_uri: https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/consul/consul_{{ consul_version }}_{{ ansible_system | lower }}_{{ consul_arch }}.zip - consul_cmd: '{{ output_dir }}/consul' + consul_cmd: '{{ remote_tmp_dir }}/consul' block: - name: register pyOpenSSL version command: '{{ ansible_python_interpreter }} -c ''import OpenSSL; print(OpenSSL.__version__)''' @@ -27,19 +27,19 @@ block: - name: Generate privatekey community.crypto.openssl_privatekey: - path: '{{ output_dir }}/privatekey.pem' + path: '{{ remote_tmp_dir }}/privatekey.pem' - name: Generate CSR community.crypto.openssl_csr: - path: '{{ output_dir }}/csr.csr' - privatekey_path: '{{ output_dir }}/privatekey.pem' + path: '{{ remote_tmp_dir }}/csr.csr' + privatekey_path: '{{ remote_tmp_dir }}/privatekey.pem' subject: commonName: localhost - name: Generate selfsigned certificate register: selfsigned_certificate community.crypto.openssl_certificate: - path: '{{ output_dir }}/cert.pem' - csr_path: '{{ output_dir }}/csr.csr' - privatekey_path: '{{ output_dir }}/privatekey.pem' + path: '{{ remote_tmp_dir }}/cert.pem' + csr_path: '{{ remote_tmp_dir }}/csr.csr' + privatekey_path: '{{ remote_tmp_dir }}/privatekey.pem' provider: selfsigned selfsigned_digest: sha256 - name: Install unzip @@ -59,21 +59,21 @@ - name: Download consul binary unarchive: src: '{{ consul_uri }}' - dest: '{{ output_dir }}' + dest: '{{ remote_tmp_dir }}' remote_src: true register: result until: result is success - vars: - remote_dir: '{{ echo_output_dir.stdout }}' + remote_dir: '{{ echo_remote_tmp_dir.stdout }}' block: - - command: echo {{ output_dir }} - register: echo_output_dir + - command: echo {{ remote_tmp_dir }} + register: echo_remote_tmp_dir - name: Create configuration file template: src: consul_config.hcl.j2 - dest: '{{ output_dir }}/consul_config.hcl' + dest: '{{ remote_tmp_dir }}/consul_config.hcl' - name: Start Consul (dev mode enabled) - shell: nohup {{ consul_cmd }} agent -dev -config-file {{ output_dir }}/consul_config.hcl /dev/null 2>&1 & + shell: nohup {{ consul_cmd }} agent -dev -config-file {{ remote_tmp_dir }}/consul_config.hcl /dev/null 2>&1 & - name: Create some data command: '{{ consul_cmd }} kv put data/value{{ item }} foo{{ item }}' loop: @@ -83,5 +83,5 @@ - import_tasks: consul_session.yml always: - name: Kill consul process - shell: kill $(cat {{ output_dir }}/consul.pid) + shell: kill $(cat {{ remote_tmp_dir }}/consul.pid) ignore_errors: true diff --git a/tests/integration/targets/deploy_helper/meta/main.yml b/tests/integration/targets/deploy_helper/meta/main.yml new file mode 100644 index 0000000000..1810d4bec9 --- /dev/null +++ b/tests/integration/targets/deploy_helper/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_remote_tmp_dir diff --git a/tests/integration/targets/deploy_helper/tasks/main.yml b/tests/integration/targets/deploy_helper/tasks/main.yml index a61ab2a075..6d03b8da0e 100644 --- a/tests/integration/targets/deploy_helper/tasks/main.yml +++ b/tests/integration/targets/deploy_helper/tasks/main.yml @@ -5,7 +5,7 @@ #################################################################### - name: record the output directory - set_fact: deploy_helper_test_root={{output_dir}}/deploy_helper_test_root + set_fact: deploy_helper_test_root={{remote_tmp_dir}}/deploy_helper_test_root - name: State=query with default parameters deploy_helper: path={{ deploy_helper_test_root }} state=query diff --git a/tests/integration/targets/filter_random_mac/meta/main.yml b/tests/integration/targets/filter_random_mac/meta/main.yml new file mode 100644 index 0000000000..1810d4bec9 --- /dev/null +++ b/tests/integration/targets/filter_random_mac/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_remote_tmp_dir diff --git a/tests/integration/targets/filter_random_mac/tasks/main.yml b/tests/integration/targets/filter_random_mac/tasks/main.yml index 782b6e5c95..e09017c6fb 100644 --- a/tests/integration/targets/filter_random_mac/tasks/main.yml +++ b/tests/integration/targets/filter_random_mac/tasks/main.yml @@ -8,9 +8,6 @@ # Copyright: (c) 2019, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -- set_fact: - output_dir: "{{ lookup('env', 'OUTPUT_DIR') }}" - - name: Test random_mac filter bad argument type debug: var: "0 | community.general.random_mac" diff --git a/tests/integration/targets/flatpak/tasks/setup.yml b/tests/integration/targets/flatpak/tasks/setup.yml index 8fc0a23566..decf20d166 100644 --- a/tests/integration/targets/flatpak/tasks/setup.yml +++ b/tests/integration/targets/flatpak/tasks/setup.yml @@ -57,7 +57,7 @@ mode: '0755' - name: Start HTTP server - command: '{{ remote_tmp_dir }}/serve.py 127.0.0.1 8000 /tmp/flatpak/' + command: '{{ ansible_python.executable }} {{ remote_tmp_dir }}/serve.py 127.0.0.1 8000 /tmp/flatpak/' async: 120 poll: 0 register: webserver_status diff --git a/tests/integration/targets/gem/meta/main.yml b/tests/integration/targets/gem/meta/main.yml index 5438ced5c3..56bc554611 100644 --- a/tests/integration/targets/gem/meta/main.yml +++ b/tests/integration/targets/gem/meta/main.yml @@ -1,2 +1,3 @@ dependencies: - setup_pkg_mgr + - setup_remote_tmp_dir diff --git a/tests/integration/targets/gem/tasks/main.yml b/tests/integration/targets/gem/tasks/main.yml index 499057775c..4674fb1075 100644 --- a/tests/integration/targets/gem/tasks/main.yml +++ b/tests/integration/targets/gem/tasks/main.yml @@ -122,7 +122,7 @@ gem: name: gist state: present - install_dir: "{{ output_dir }}/gems" + install_dir: "{{ remote_tmp_dir }}/gems" ignore_errors: yes register: install_gem_fail_result @@ -141,12 +141,12 @@ name: gist state: present user_install: no - install_dir: "{{ output_dir }}/gems" + install_dir: "{{ remote_tmp_dir }}/gems" register: install_gem_result - name: Find gems in custom directory find: - paths: "{{ output_dir }}/gems/gems" + paths: "{{ remote_tmp_dir }}/gems/gems" file_type: directory contains: gist register: gem_search @@ -163,12 +163,12 @@ name: gist state: absent user_install: no - install_dir: "{{ output_dir }}/gems" + install_dir: "{{ remote_tmp_dir }}/gems" register: install_gem_result - name: Find gems in custom directory find: - paths: "{{ output_dir }}/gems/gems" + paths: "{{ remote_tmp_dir }}/gems/gems" file_type: directory contains: gist register: gem_search @@ -184,14 +184,14 @@ gem: name: gist state: present - bindir: "{{ output_dir }}/custom_bindir" + bindir: "{{ remote_tmp_dir }}/custom_bindir" norc: yes user_install: no # Avoid conflicts between --install-dir and --user-install when running as root on CentOS / Fedora / RHEL register: install_gem_result - name: Get stats of gem executable stat: - path: "{{ output_dir }}/custom_bindir/gist" + path: "{{ remote_tmp_dir }}/custom_bindir/gist" register: gem_bindir_stat - name: Ensure gem executable was installed in custom directory @@ -204,14 +204,14 @@ gem: name: gist state: absent - bindir: "{{ output_dir }}/custom_bindir" + bindir: "{{ remote_tmp_dir }}/custom_bindir" norc: yes user_install: no # Avoid conflicts between --install-dir and --user-install when running as root on CentOS / Fedora / RHEL register: install_gem_result - name: Get stats of gem executable stat: - path: "{{ output_dir }}/custom_bindir/gist" + path: "{{ remote_tmp_dir }}/custom_bindir/gist" register: gem_bindir_stat - name: Ensure gem executable was removed from custom directory diff --git a/tests/integration/targets/git_config/meta/main.yml b/tests/integration/targets/git_config/meta/main.yml new file mode 100644 index 0000000000..1810d4bec9 --- /dev/null +++ b/tests/integration/targets/git_config/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_remote_tmp_dir diff --git a/tests/integration/targets/git_config/tasks/get_set_state_present_file.yml b/tests/integration/targets/git_config/tasks/get_set_state_present_file.yml index 20946ac393..5d46ed35c5 100644 --- a/tests/integration/targets/git_config/tasks/get_set_state_present_file.yml +++ b/tests/integration/targets/git_config/tasks/get_set_state_present_file.yml @@ -6,7 +6,7 @@ name: "{{ option_name }}" value: "{{ option_value }}" scope: "file" - file: "{{ output_dir }}/gitconfig_file" + file: "{{ remote_tmp_dir }}/gitconfig_file" state: present register: result @@ -14,7 +14,7 @@ git_config: name: "{{ option_name }}" scope: "file" - file: "{{ output_dir }}/gitconfig_file" + file: "{{ remote_tmp_dir }}/gitconfig_file" state: present register: get_result @@ -26,4 +26,3 @@ - set_result.diff.after == option_value + "\n" - get_result is not changed - get_result.config_value == option_value -... diff --git a/tests/integration/targets/git_config/tasks/setup_no_value.yml b/tests/integration/targets/git_config/tasks/setup_no_value.yml index d5552450cf..7bccfc0368 100644 --- a/tests/integration/targets/git_config/tasks/setup_no_value.yml +++ b/tests/integration/targets/git_config/tasks/setup_no_value.yml @@ -8,6 +8,5 @@ - name: set up without value (file) file: - path: "{{ output_dir }}/gitconfig_file" + path: "{{ remote_tmp_dir }}/gitconfig_file" state: absent -... diff --git a/tests/integration/targets/git_config/tasks/setup_value.yml b/tests/integration/targets/git_config/tasks/setup_value.yml index 3eff9c423a..748e838b3d 100644 --- a/tests/integration/targets/git_config/tasks/setup_value.yml +++ b/tests/integration/targets/git_config/tasks/setup_value.yml @@ -9,5 +9,4 @@ - name: set up with value (file) copy: src: gitconfig - dest: "{{ output_dir }}/gitconfig_file" -... + dest: "{{ remote_tmp_dir }}/gitconfig_file" diff --git a/tests/integration/targets/hg/meta/main.yml b/tests/integration/targets/hg/meta/main.yml index 5438ced5c3..56bc554611 100644 --- a/tests/integration/targets/hg/meta/main.yml +++ b/tests/integration/targets/hg/meta/main.yml @@ -1,2 +1,3 @@ dependencies: - setup_pkg_mgr + - setup_remote_tmp_dir diff --git a/tests/integration/targets/hg/tasks/run-tests.yml b/tests/integration/targets/hg/tasks/run-tests.yml index 775b297817..0818f4f466 100644 --- a/tests/integration/targets/hg/tasks/run-tests.yml +++ b/tests/integration/targets/hg/tasks/run-tests.yml @@ -6,14 +6,14 @@ - name: set where to extract the repo set_fact: - checkout_dir: "{{ output_dir }}/hg_project_test" + checkout_dir: "{{ remote_tmp_dir }}/hg_project_test" - name: set what repo to use set_fact: repo: "http://hg.pf.osdn.net/view/a/ak/akasurde/hg_project_test" -- name: clean out the output_dir - shell: rm -rf {{ output_dir }}/* +- name: clean out the remote_tmp_dir + shell: rm -rf {{ remote_tmp_dir }}/* - name: verify that mercurial is installed so this test can continue shell: which hg diff --git a/tests/integration/targets/iso_create/meta/main.yml b/tests/integration/targets/iso_create/meta/main.yml index 5438ced5c3..56bc554611 100644 --- a/tests/integration/targets/iso_create/meta/main.yml +++ b/tests/integration/targets/iso_create/meta/main.yml @@ -1,2 +1,3 @@ dependencies: - setup_pkg_mgr + - setup_remote_tmp_dir diff --git a/tests/integration/targets/iso_create/tasks/main.yml b/tests/integration/targets/iso_create/tasks/main.yml index 4a0df3b818..0e21e01aef 100644 --- a/tests/integration/targets/iso_create/tasks/main.yml +++ b/tests/integration/targets/iso_create/tasks/main.yml @@ -14,15 +14,23 @@ - debug: var=install_pycdlib - set_fact: - output_dir_test: '{{ output_dir }}/test_iso_create' + output_test_dir: '{{ remote_tmp_dir }}/test_iso_create' # - include_tasks: prepare_dest_dir.yml +- name: Copy files and directories + copy: + src: '{{ item }}' + dest: '{{ remote_tmp_dir }}/{{ item }}' + loop: + - test1.cfg + - test_dir + - name: Test check mode iso_create: src_files: - - "{{ role_path }}/files/test1.cfg" - dest_iso: "{{ output_dir_test }}/test.iso" + - "{{ remote_tmp_dir }}/test1.cfg" + dest_iso: "{{ output_test_dir }}/test.iso" interchange_level: 3 register: iso_result check_mode: yes @@ -30,7 +38,7 @@ - name: Check if iso file created stat: - path: "{{ output_dir_test }}/test.iso" + path: "{{ output_test_dir }}/test.iso" register: iso_file - debug: var=iso_file - assert: @@ -41,15 +49,15 @@ - name: Create iso file with a specified file iso_create: src_files: - - "{{ role_path }}/files/test1.cfg" - dest_iso: "{{ output_dir_test }}/test.iso" + - "{{ remote_tmp_dir }}/test1.cfg" + dest_iso: "{{ output_test_dir }}/test.iso" interchange_level: 3 register: iso_result - debug: var=iso_result - name: Check if iso file created stat: - path: "{{ output_dir_test }}/test.iso" + path: "{{ output_test_dir }}/test.iso" register: iso_file - assert: @@ -60,16 +68,16 @@ - name: Create iso file with a specified file and folder iso_create: src_files: - - "{{ role_path }}/files/test1.cfg" - - "{{ role_path }}/files/test_dir" - dest_iso: "{{ output_dir_test }}/test1.iso" + - "{{ remote_tmp_dir }}/test1.cfg" + - "{{ remote_tmp_dir }}/test_dir" + dest_iso: "{{ output_test_dir }}/test1.iso" interchange_level: 3 register: iso_result - debug: var=iso_result - name: Check if iso file created stat: - path: "{{ output_dir_test }}/test1.iso" + path: "{{ output_test_dir }}/test1.iso" register: iso_file - assert: @@ -80,15 +88,15 @@ - name: Create iso file with volume identification string iso_create: src_files: - - "{{ role_path }}/files/test1.cfg" - dest_iso: "{{ output_dir_test }}/test2.iso" + - "{{ remote_tmp_dir }}/test1.cfg" + dest_iso: "{{ output_test_dir }}/test2.iso" vol_ident: "OEMDRV" register: iso_result - debug: var=iso_result - name: Check if iso file created stat: - path: "{{ output_dir_test }}/test2.iso" + path: "{{ output_test_dir }}/test2.iso" register: iso_file - assert: @@ -99,15 +107,15 @@ - name: Create iso file with Rock Ridge extention iso_create: src_files: - - "{{ role_path }}/files/test1.cfg" - dest_iso: "{{ output_dir_test }}/test3.iso" + - "{{ remote_tmp_dir }}/test1.cfg" + dest_iso: "{{ output_test_dir }}/test3.iso" rock_ridge: "1.09" register: iso_result - debug: var=iso_result - name: Check if iso file created stat: - path: "{{ output_dir_test }}/test3.iso" + path: "{{ output_test_dir }}/test3.iso" register: iso_file - assert: @@ -118,15 +126,15 @@ - name: Create iso file with Joliet extention iso_create: src_files: - - "{{ role_path }}/files/test1.cfg" - dest_iso: "{{ output_dir_test }}/test4.iso" + - "{{ remote_tmp_dir }}/test1.cfg" + dest_iso: "{{ output_test_dir }}/test4.iso" joliet: 3 register: iso_result - debug: var=iso_result - name: Check if iso file created stat: - path: "{{ output_dir_test }}/test4.iso" + path: "{{ output_test_dir }}/test4.iso" register: iso_file - assert: @@ -137,15 +145,15 @@ - name: Create iso file with UDF enabled iso_create: src_files: - - "{{ role_path }}/files/test1.cfg" - dest_iso: "{{ output_dir_test }}/test5.iso" + - "{{ remote_tmp_dir }}/test1.cfg" + dest_iso: "{{ output_test_dir }}/test5.iso" udf: True register: iso_result - debug: var=iso_result - name: Check if iso file created stat: - path: "{{ output_dir_test }}/test5.iso" + path: "{{ output_test_dir }}/test5.iso" register: iso_file - assert: diff --git a/tests/integration/targets/iso_create/tasks/prepare_dest_dir.yml b/tests/integration/targets/iso_create/tasks/prepare_dest_dir.yml index 94c529d52a..8320c3942e 100644 --- a/tests/integration/targets/iso_create/tasks/prepare_dest_dir.yml +++ b/tests/integration/targets/iso_create/tasks/prepare_dest_dir.yml @@ -3,10 +3,10 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - name: Make sure our testing sub-directory does not exist file: - path: '{{ output_dir_test }}' + path: '{{ output_test_dir }}' state: absent - name: Create our testing sub-directory file: - path: '{{ output_dir_test }}' + path: '{{ output_test_dir }}' state: directory diff --git a/tests/integration/targets/iso_extract/meta/main.yml b/tests/integration/targets/iso_extract/meta/main.yml index 0e51c36ebd..07990bd4ef 100644 --- a/tests/integration/targets/iso_extract/meta/main.yml +++ b/tests/integration/targets/iso_extract/meta/main.yml @@ -1,3 +1,4 @@ dependencies: - setup_pkg_mgr - setup_epel + - setup_remote_tmp_dir diff --git a/tests/integration/targets/iso_extract/tasks/main.yml b/tests/integration/targets/iso_extract/tasks/main.yml index 1eb279a3d2..18fd9b37a9 100644 --- a/tests/integration/targets/iso_extract/tasks/main.yml +++ b/tests/integration/targets/iso_extract/tasks/main.yml @@ -23,7 +23,7 @@ # along with Ansible. If not, see . - set_fact: - output_dir_test: '{{ output_dir }}/test_iso_extract' + output_test_dir: '{{ remote_tmp_dir }}/test_iso_extract' - name: Install 7zip import_tasks: 7zip.yml diff --git a/tests/integration/targets/iso_extract/tasks/prepare.yml b/tests/integration/targets/iso_extract/tasks/prepare.yml index 78c06ad52c..4e240caca6 100644 --- a/tests/integration/targets/iso_extract/tasks/prepare.yml +++ b/tests/integration/targets/iso_extract/tasks/prepare.yml @@ -19,15 +19,15 @@ - name: Make sure our testing sub-directory does not exist file: - path: '{{ output_dir_test }}' + path: '{{ output_test_dir }}' state: absent - name: Create our testing sub-directory file: - path: '{{ output_dir_test }}' + path: '{{ output_test_dir }}' state: directory - name: copy the iso to the test dir copy: src: test.iso - dest: '{{ output_dir_test }}' + dest: '{{ output_test_dir }}' diff --git a/tests/integration/targets/iso_extract/tasks/tests.yml b/tests/integration/targets/iso_extract/tasks/tests.yml index 18f22422ce..1475027adf 100644 --- a/tests/integration/targets/iso_extract/tasks/tests.yml +++ b/tests/integration/targets/iso_extract/tasks/tests.yml @@ -19,8 +19,8 @@ - name: Extract the iso iso_extract: - image: '{{ output_dir_test }}/test.iso' - dest: '{{ output_dir_test }}' + image: '{{ output_test_dir }}/test.iso' + dest: '{{ output_test_dir }}' files: - 1.txt - 2.txt @@ -32,8 +32,8 @@ - name: Extract the iso again iso_extract: - image: '{{ output_dir_test }}/test.iso' - dest: '{{ output_dir_test }}' + image: '{{ output_test_dir }}/test.iso' + dest: '{{ output_test_dir }}' files: - 1.txt - 2.txt diff --git a/tests/integration/targets/java_cert/defaults/main.yml b/tests/integration/targets/java_cert/defaults/main.yml index 8e63493600..b391eeff2d 100644 --- a/tests/integration/targets/java_cert/defaults/main.yml +++ b/tests/integration/targets/java_cert/defaults/main.yml @@ -1,15 +1,15 @@ --- test_pkcs12_path: testpkcs.p12 test_keystore_path: keystore.jks -test_keystore2_path: "{{ output_dir }}/keystore2.jks" +test_keystore2_path: "{{ remote_tmp_dir }}/keystore2.jks" test_keystore2_password: changeit -test_cert_path: "{{ output_dir }}/cert.pem" -test_key_path: "{{ output_dir }}/key.pem" -test_csr_path: "{{ output_dir }}/req.csr" -test_cert2_path: "{{ output_dir }}/cert2.pem" -test_key2_path: "{{ output_dir }}/key2.pem" -test_csr2_path: "{{ output_dir }}/req2.csr" -test_pkcs_path: "{{ output_dir }}/cert.p12" -test_pkcs2_path: "{{ output_dir }}/cert2.p12" +test_cert_path: "{{ remote_tmp_dir }}/cert.pem" +test_key_path: "{{ remote_tmp_dir }}/key.pem" +test_csr_path: "{{ remote_tmp_dir }}/req.csr" +test_cert2_path: "{{ remote_tmp_dir }}/cert2.pem" +test_key2_path: "{{ remote_tmp_dir }}/key2.pem" +test_csr2_path: "{{ remote_tmp_dir }}/req2.csr" +test_pkcs_path: "{{ remote_tmp_dir }}/cert.p12" +test_pkcs2_path: "{{ remote_tmp_dir }}/cert2.p12" test_ssl: setupSSLServer.py test_ssl_port: 21500 diff --git a/tests/integration/targets/java_cert/meta/main.yml b/tests/integration/targets/java_cert/meta/main.yml index 9bc23ac67f..1d78393199 100644 --- a/tests/integration/targets/java_cert/meta/main.yml +++ b/tests/integration/targets/java_cert/meta/main.yml @@ -1,3 +1,4 @@ dependencies: - setup_java_keytool - setup_openssl + - setup_remote_tmp_dir diff --git a/tests/integration/targets/java_cert/tasks/main.yml b/tests/integration/targets/java_cert/tasks/main.yml index 20550740da..2088e3bfda 100644 --- a/tests/integration/targets/java_cert/tasks/main.yml +++ b/tests/integration/targets/java_cert/tasks/main.yml @@ -9,15 +9,15 @@ - name: prep pkcs12 file ansible.builtin.copy: src: "{{ test_pkcs12_path }}" - dest: "{{ output_dir }}/{{ test_pkcs12_path }}" + dest: "{{ remote_tmp_dir }}/{{ test_pkcs12_path }}" - name: import pkcs12 community.general.java_cert: - pkcs12_path: "{{ output_dir }}/{{ test_pkcs12_path }}" + pkcs12_path: "{{ remote_tmp_dir }}/{{ test_pkcs12_path }}" pkcs12_password: changeit pkcs12_alias: default cert_alias: default - keystore_path: "{{ output_dir }}/{{ test_keystore_path }}" + keystore_path: "{{ remote_tmp_dir }}/{{ test_keystore_path }}" keystore_pass: changeme_keystore keystore_create: yes state: present @@ -30,11 +30,11 @@ - name: import pkcs12 with wrong password community.general.java_cert: - pkcs12_path: "{{ output_dir }}/{{ test_pkcs12_path }}" + pkcs12_path: "{{ remote_tmp_dir }}/{{ test_pkcs12_path }}" pkcs12_password: wrong_pass pkcs12_alias: default cert_alias: default_new - keystore_path: "{{ output_dir }}/{{ test_keystore_path }}" + keystore_path: "{{ remote_tmp_dir }}/{{ test_keystore_path }}" keystore_pass: changeme_keystore keystore_create: yes state: present @@ -49,9 +49,9 @@ - name: test fail on mutually exclusive params community.general.java_cert: cert_path: ca.crt - pkcs12_path: "{{ output_dir }}/{{ test_pkcs12_path }}" + pkcs12_path: "{{ remote_tmp_dir }}/{{ test_pkcs12_path }}" cert_alias: default - keystore_path: "{{ output_dir }}/{{ test_keystore_path }}" + keystore_path: "{{ remote_tmp_dir }}/{{ test_keystore_path }}" keystore_pass: changeme_keystore keystore_create: yes state: present @@ -65,7 +65,7 @@ - name: test fail on missing required params community.general.java_cert: - keystore_path: "{{ output_dir }}/{{ test_keystore_path }}" + keystore_path: "{{ remote_tmp_dir }}/{{ test_keystore_path }}" keystore_pass: changeme_keystore state: absent ignore_errors: true @@ -78,7 +78,7 @@ - name: delete object based on cert_alias parameter community.general.java_cert: - keystore_path: "{{ output_dir }}/{{ test_keystore_path }}" + keystore_path: "{{ remote_tmp_dir }}/{{ test_keystore_path }}" keystore_pass: changeme_keystore cert_alias: default state: absent @@ -98,8 +98,8 @@ path: "{{ item }}" state: absent loop: - - "{{ output_dir }}/{{ test_pkcs12_path }}" - - "{{ output_dir }}/{{ test_keystore_path }}" + - "{{ remote_tmp_dir }}/{{ test_pkcs12_path }}" + - "{{ remote_tmp_dir }}/{{ test_keystore_path }}" - "{{ test_keystore2_path }}" - "{{ test_cert_path }}" - "{{ test_key_path }}" diff --git a/tests/integration/targets/java_cert/tasks/state_change.yml b/tests/integration/targets/java_cert/tasks/state_change.yml index 38ef62cd0f..c0b92c8d2a 100644 --- a/tests/integration/targets/java_cert/tasks/state_change.yml +++ b/tests/integration/targets/java_cert/tasks/state_change.yml @@ -239,13 +239,17 @@ - name: Copy the ssl server script copy: src: "setupSSLServer.py" - dest: "{{ output_dir }}" + dest: "{{ remote_tmp_dir }}" - name: Create an SSL server that we will use for testing URL imports - command: python {{ output_dir }}/setupSSLServer.py {{ output_dir }} {{ test_ssl_port }} + command: "{{ ansible_python.executable }} {{ remote_tmp_dir }}/setupSSLServer.py {{ remote_tmp_dir }} {{ test_ssl_port }}" async: 10 poll: 0 +- name: "Wait for one second to make sure that the serve script has actually been started" + pause: + seconds: 1 + - name: | Download the original cert.pem from our temporary server. The current cert should contain cert2.pem. Importing this cert should return a status of changed diff --git a/tests/integration/targets/java_keystore/meta/main.yml b/tests/integration/targets/java_keystore/meta/main.yml index 9bc23ac67f..1d78393199 100644 --- a/tests/integration/targets/java_keystore/meta/main.yml +++ b/tests/integration/targets/java_keystore/meta/main.yml @@ -1,3 +1,4 @@ dependencies: - setup_java_keytool - setup_openssl + - setup_remote_tmp_dir diff --git a/tests/integration/targets/java_keystore/tasks/prepare.yml b/tests/integration/targets/java_keystore/tasks/prepare.yml index f8811c03ed..04b7cbd9d8 100644 --- a/tests/integration/targets/java_keystore/tasks/prepare.yml +++ b/tests/integration/targets/java_keystore/tasks/prepare.yml @@ -1,12 +1,12 @@ --- - name: Create test directory ansible.builtin.file: - path: "{{ output_dir }}" + path: "{{ remote_tmp_dir }}" state: directory - name: Create private keys community.crypto.openssl_privatekey: - path: "{{ output_dir ~ '/' ~ (item.keyname | default(item.name)) ~ '.key' }}" + path: "{{ remote_tmp_dir ~ '/' ~ (item.keyname | default(item.name)) ~ '.key' }}" size: 2048 # this should work everywhere # The following is more efficient, but might not work everywhere: # type: ECC @@ -17,17 +17,17 @@ - name: Create CSRs community.crypto.openssl_csr: - path: "{{ output_dir ~ '/' ~ item.name ~ '.csr' }}" - privatekey_path: "{{ output_dir ~ '/' ~ (item.keyname | default(item.name)) ~ '.key' }}" + path: "{{ remote_tmp_dir ~ '/' ~ item.name ~ '.csr' }}" + privatekey_path: "{{ remote_tmp_dir ~ '/' ~ (item.keyname | default(item.name)) ~ '.key' }}" privatekey_passphrase: "{{ item.passphrase | default(omit) }}" commonName: "{{ item.commonName }}" loop: "{{ java_keystore_certs + java_keystore_new_certs }}" - name: Create certificates community.crypto.x509_certificate: - path: "{{ output_dir ~ '/' ~ item.name ~ '.pem' }}" - csr_path: "{{ output_dir ~ '/' ~ item.name ~ '.csr' }}" - privatekey_path: "{{ output_dir ~ '/' ~ (item.keyname | default(item.name)) ~ '.key' }}" + path: "{{ remote_tmp_dir ~ '/' ~ item.name ~ '.pem' }}" + csr_path: "{{ remote_tmp_dir ~ '/' ~ item.name ~ '.csr' }}" + privatekey_path: "{{ remote_tmp_dir ~ '/' ~ (item.keyname | default(item.name)) ~ '.key' }}" privatekey_passphrase: "{{ item.passphrase | default(omit) }}" provider: selfsigned loop: "{{ java_keystore_certs + java_keystore_new_certs }}" diff --git a/tests/integration/targets/java_keystore/tasks/tests.yml b/tests/integration/targets/java_keystore/tasks/tests.yml index 8510a64165..07b30ad97d 100644 --- a/tests/integration/targets/java_keystore/tasks/tests.yml +++ b/tests/integration/targets/java_keystore/tasks/tests.yml @@ -1,199 +1,273 @@ --- - name: Create test directory ansible.builtin.file: - path: "{{ output_dir }}" + path: "{{ remote_tmp_dir }}" state: directory - name: Ensure the Java keystore does not exist (cleanup between tests) ansible.builtin.file: - path: "{{ output_dir ~ '/' ~ item.name ~ '.jks' }}" + path: "{{ remote_tmp_dir ~ '/' ~ item.name ~ '.jks' }}" state: absent loop: "{{ java_keystore_certs }}" loop_control: - label: "{{ output_dir ~ '/' ~ item.name ~ '.jks' }}" + label: "{{ remote_tmp_dir ~ '/' ~ item.name ~ '.jks' }}" +- name: Read certificates + slurp: + src: "{{ remote_tmp_dir ~ '/' ~ item.name ~ '.pem' }}" + loop: "{{ java_keystore_certs }}" + when: not remote_cert + register: certificates + +- name: Read certificate keys + slurp: + src: "{{ remote_tmp_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.key' }}" + loop: "{{ java_keystore_certs }}" + when: not remote_cert + register: certificate_keys + - name: Create a Java keystore for the given ({{ 'remote' if remote_cert else 'local' }}) certificates (check mode) community.general.java_keystore: &java_keystore_params name: example - dest: "{{ output_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.jks' }}" - certificate: "{{ omit if remote_cert else lookup('file', output_dir ~ '/' ~ item.name ~ '.pem') }}" - private_key: "{{ omit if remote_cert else lookup('file', output_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.key') }}" - certificate_path: "{{ omit if not remote_cert else output_dir ~ '/' ~ item.name ~ '.pem' }}" - private_key_path: "{{ omit if not remote_cert else output_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.key' }}" + dest: "{{ remote_tmp_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.jks' }}" + certificate: "{{ omit if remote_cert else (certificates.results[loop_index].content | b64decode) }}" + private_key: "{{ omit if remote_cert else (certificate_keys.results[loop_index].content | b64decode) }}" + certificate_path: "{{ omit if not remote_cert else remote_tmp_dir ~ '/' ~ item.name ~ '.pem' }}" + private_key_path: "{{ omit if not remote_cert else remote_tmp_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.key' }}" private_key_passphrase: "{{ item.passphrase | d(omit) }}" password: changeit ssl_backend: "{{ ssl_backend }}" keystore_type: "{{ item.keystore_type | d(omit) }}" loop: "{{ java_keystore_certs }}" + loop_control: + index_var: loop_index check_mode: yes register: result_check - name: Create a Java keystore for the given certificates community.general.java_keystore: *java_keystore_params loop: "{{ java_keystore_certs }}" + loop_control: + index_var: loop_index register: result - name: Create a Java keystore for the given certificates (idempotency, check mode) community.general.java_keystore: *java_keystore_params loop: "{{ java_keystore_certs }}" + loop_control: + index_var: loop_index check_mode: yes register: result_idem_check - name: Create a Java keystore for the given certificates (idempotency) community.general.java_keystore: *java_keystore_params loop: "{{ java_keystore_certs }}" + loop_control: + index_var: loop_index register: result_idem -- name: Create a Java keystore for the given certificates (certificate changed, check mode) - community.general.java_keystore: *java_keystore_params +- name: Read certificates (new) + slurp: + src: "{{ remote_tmp_dir ~ '/' ~ item.name ~ '.pem' }}" loop: "{{ java_keystore_new_certs }}" + when: not remote_cert + register: certificates_new + +- name: Read certificate keys (new) + slurp: + src: "{{ remote_tmp_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.key' }}" + loop: "{{ java_keystore_new_certs }}" + when: not remote_cert + register: certificate_keys_new + +- name: Create a Java keystore for the given certificates (certificate changed, check mode) + community.general.java_keystore: &java_keystore_params_new_certs + name: example + dest: "{{ remote_tmp_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.jks' }}" + certificate: "{{ omit if remote_cert else (certificates_new.results[loop_index].content | b64decode) }}" + private_key: "{{ omit if remote_cert else (certificate_keys_new.results[loop_index].content | b64decode) }}" + certificate_path: "{{ omit if not remote_cert else remote_tmp_dir ~ '/' ~ item.name ~ '.pem' }}" + private_key_path: "{{ omit if not remote_cert else remote_tmp_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.key' }}" + private_key_passphrase: "{{ item.passphrase | d(omit) }}" + password: changeit + ssl_backend: "{{ ssl_backend }}" + keystore_type: "{{ item.keystore_type | d(omit) }}" + loop: "{{ java_keystore_new_certs }}" + loop_control: + index_var: loop_index check_mode: yes register: result_change_check - name: Create a Java keystore for the given certificates (certificate changed) - community.general.java_keystore: *java_keystore_params + community.general.java_keystore: *java_keystore_params_new_certs loop: "{{ java_keystore_new_certs }}" + loop_control: + index_var: loop_index register: result_change - name: Create a Java keystore for the given certificates (alias changed, check mode) community.general.java_keystore: - <<: *java_keystore_params + <<: *java_keystore_params_new_certs name: foobar loop: "{{ java_keystore_new_certs }}" + loop_control: + index_var: loop_index check_mode: yes register: result_alias_change_check - name: Create a Java keystore for the given certificates (alias changed) community.general.java_keystore: - <<: *java_keystore_params + <<: *java_keystore_params_new_certs name: foobar loop: "{{ java_keystore_new_certs }}" + loop_control: + index_var: loop_index register: result_alias_change - name: Create a Java keystore for the given certificates (password changed, check mode) community.general.java_keystore: - <<: *java_keystore_params + <<: *java_keystore_params_new_certs name: foobar password: hunter2 loop: "{{ java_keystore_new_certs }}" + loop_control: + index_var: loop_index check_mode: yes register: result_pw_change_check - name: Create a Java keystore for the given certificates (password changed) community.general.java_keystore: - <<: *java_keystore_params + <<: *java_keystore_params_new_certs name: foobar password: hunter2 loop: "{{ java_keystore_new_certs }}" + loop_control: + index_var: loop_index register: result_pw_change - name: Create a Java keystore for the given certificates (force keystore type pkcs12, check mode) community.general.java_keystore: - <<: *java_keystore_params + <<: *java_keystore_params_new_certs name: foobar password: hunter2 keystore_type: pkcs12 loop: "{{ java_keystore_new_certs }}" + loop_control: + index_var: loop_index check_mode: yes register: result_type_pkcs12_check - name: Create a Java keystore for the given certificates (force keystore type jks, check mode) community.general.java_keystore: - <<: *java_keystore_params + <<: *java_keystore_params_new_certs name: foobar password: hunter2 keystore_type: jks loop: "{{ java_keystore_new_certs }}" + loop_control: + index_var: loop_index check_mode: yes register: result_type_jks_check - name: Create a Java keystore for the given certificates (force keystore type jks) community.general.java_keystore: - <<: *java_keystore_params + <<: *java_keystore_params_new_certs name: foobar password: hunter2 keystore_type: jks loop: "{{ java_keystore_new_certs }}" + loop_control: + index_var: loop_index register: result_type_jks - name: Stat keystore (before failure) ansible.builtin.stat: - path: "{{ output_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.jks' }}" + path: "{{ remote_tmp_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.jks' }}" loop: "{{ java_keystore_new_certs }}" register: result_stat_before - name: Fail to create a Java keystore for the given certificates (password too short) community.general.java_keystore: - <<: *java_keystore_params + <<: *java_keystore_params_new_certs name: foobar password: short keystore_type: jks loop: "{{ java_keystore_new_certs }}" + loop_control: + index_var: loop_index register: result_fail_jks ignore_errors: true - name: Stat keystore (after failure) ansible.builtin.stat: - path: "{{ output_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.jks' }}" + path: "{{ remote_tmp_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.jks' }}" loop: "{{ java_keystore_new_certs }}" register: result_stat_after - name: Create a Java keystore for the given certificates (keystore type changed, check mode) community.general.java_keystore: - <<: *java_keystore_params + <<: *java_keystore_params_new_certs name: foobar password: hunter2 keystore_type: pkcs12 loop: "{{ java_keystore_new_certs }}" + loop_control: + index_var: loop_index check_mode: yes register: result_type_change_check - name: Create a Java keystore for the given certificates (keystore type changed) community.general.java_keystore: - <<: *java_keystore_params + <<: *java_keystore_params_new_certs name: foobar password: hunter2 keystore_type: pkcs12 loop: "{{ java_keystore_new_certs }}" + loop_control: + index_var: loop_index register: result_type_change - name: Create a Java keystore for the given certificates (omit keystore type, check mode) community.general.java_keystore: - <<: *java_keystore_params + <<: *java_keystore_params_new_certs name: foobar password: hunter2 loop: "{{ java_keystore_new_certs }}" + loop_control: + index_var: loop_index check_mode: yes register: result_type_omit_check - name: Create a Java keystore for the given certificates (omit keystore type) community.general.java_keystore: - <<: *java_keystore_params + <<: *java_keystore_params_new_certs name: foobar password: hunter2 loop: "{{ java_keystore_new_certs }}" + loop_control: + index_var: loop_index register: result_type_omit - name: Check that the remote certificates have not been removed ansible.builtin.file: - path: "{{ output_dir ~ '/' ~ item.name ~ '.pem' }}" + path: "{{ remote_tmp_dir ~ '/' ~ item.name ~ '.pem' }}" state: file loop: "{{ java_keystore_certs + java_keystore_new_certs }}" when: remote_cert - name: Check that the remote private keys have not been removed ansible.builtin.file: - path: "{{ output_dir ~ '/' ~ item.name ~ '.key' }}" + path: "{{ remote_tmp_dir ~ '/' ~ item.name ~ '.key' }}" state: file loop: "{{ java_keystore_certs }}" when: remote_cert diff --git a/tests/integration/targets/mail/meta/main.yml b/tests/integration/targets/mail/meta/main.yml new file mode 100644 index 0000000000..1810d4bec9 --- /dev/null +++ b/tests/integration/targets/mail/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_remote_tmp_dir diff --git a/tests/integration/targets/mail/tasks/main.yml b/tests/integration/targets/mail/tasks/main.yml index 714b662dfd..dbde6743d2 100644 --- a/tests/integration/targets/mail/tasks/main.yml +++ b/tests/integration/targets/mail/tasks/main.yml @@ -16,7 +16,7 @@ - name: Install test smtpserver copy: src: '{{ item }}' - dest: '{{ output_dir }}/{{ item }}' + dest: '{{ remote_tmp_dir }}/{{ item }}' loop: - smtpserver.py - smtpserver.crt @@ -25,7 +25,7 @@ # FIXME: Verify the mail after it was send would be nice # This would require either dumping the content, or registering async task output - name: Start test smtpserver - shell: '{{ ansible_python.executable }} {{ output_dir }}/smtpserver.py 10025:10465' + shell: '{{ ansible_python.executable }} {{ remote_tmp_dir }}/smtpserver.py 10025:10465' async: 30 poll: 0 register: smtpserver diff --git a/tests/integration/targets/nomad/meta/main.yml b/tests/integration/targets/nomad/meta/main.yml index f4c99a2ad7..f9bb8406a4 100644 --- a/tests/integration/targets/nomad/meta/main.yml +++ b/tests/integration/targets/nomad/meta/main.yml @@ -2,3 +2,4 @@ dependencies: - setup_pkg_mgr - setup_openssl + - setup_remote_tmp_dir diff --git a/tests/integration/targets/nomad/tasks/main.yml b/tests/integration/targets/nomad/tasks/main.yml index 1e42e7b2f6..81833684f0 100644 --- a/tests/integration/targets/nomad/tasks/main.yml +++ b/tests/integration/targets/nomad/tasks/main.yml @@ -6,7 +6,7 @@ vars: nomad_version: 0.12.4 nomad_uri: https://releases.hashicorp.com/nomad/{{ nomad_version }}/nomad_{{ nomad_version }}_{{ ansible_system | lower }}_{{ nomad_arch }}.zip - nomad_cmd: '{{ output_dir }}/nomad' + nomad_cmd: '{{ remote_tmp_dir }}/nomad' block: - name: register pyOpenSSL version @@ -36,21 +36,21 @@ block: - name: Generate privatekey community.crypto.openssl_privatekey: - path: '{{ output_dir }}/privatekey.pem' + path: '{{ remote_tmp_dir }}/privatekey.pem' - name: Generate CSR community.crypto.openssl_csr: - path: '{{ output_dir }}/csr.csr' - privatekey_path: '{{ output_dir }}/privatekey.pem' + path: '{{ remote_tmp_dir }}/csr.csr' + privatekey_path: '{{ remote_tmp_dir }}/privatekey.pem' subject: commonName: localhost - name: Generate selfsigned certificate register: selfsigned_certificate community.crypto.openssl_certificate: - path: '{{ output_dir }}/cert.pem' - csr_path: '{{ output_dir }}/csr.csr' - privatekey_path: '{{ output_dir }}/privatekey.pem' + path: '{{ remote_tmp_dir }}/cert.pem' + csr_path: '{{ remote_tmp_dir }}/csr.csr' + privatekey_path: '{{ remote_tmp_dir }}/privatekey.pem' provider: selfsigned selfsigned_digest: sha256 @@ -75,17 +75,17 @@ - name: Download nomad binary unarchive: src: '{{ nomad_uri }}' - dest: '{{ output_dir }}' + dest: '{{ remote_tmp_dir }}' remote_src: true register: result until: result is success - vars: - remote_dir: '{{ echo_output_dir.stdout }}' + remote_dir: '{{ echo_remote_tmp_dir.stdout }}' block: - - command: echo {{ output_dir }} - register: echo_output_dir + - command: echo {{ remote_tmp_dir }} + register: echo_remote_tmp_dir - name: Run tests integration block: diff --git a/tests/integration/targets/npm/meta/main.yml b/tests/integration/targets/npm/meta/main.yml index 392c359035..230548b160 100644 --- a/tests/integration/targets/npm/meta/main.yml +++ b/tests/integration/targets/npm/meta/main.yml @@ -1,3 +1,4 @@ dependencies: - setup_pkg_mgr - setup_gnutar + - setup_remote_tmp_dir diff --git a/tests/integration/targets/npm/tasks/main.yml b/tests/integration/targets/npm/tasks/main.yml index ed5a16a624..c3971fd91d 100644 --- a/tests/integration/targets/npm/tasks/main.yml +++ b/tests/integration/targets/npm/tasks/main.yml @@ -25,7 +25,7 @@ # Setup steps # expand remote path -- command: 'echo {{ output_dir }}' +- command: 'echo {{ remote_tmp_dir }}' register: echo - set_fact: remote_dir: '{{ echo.stdout }}' diff --git a/tests/integration/targets/npm/tasks/setup.yml b/tests/integration/targets/npm/tasks/setup.yml index 4e0d908e33..a463b1f8b7 100644 --- a/tests/integration/targets/npm/tasks/setup.yml +++ b/tests/integration/targets/npm/tasks/setup.yml @@ -1,6 +1,6 @@ - name: 'Download NPM' unarchive: src: 'https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/npm/{{ nodejs_path }}.tar.gz' - dest: '{{ output_dir }}' + dest: '{{ remote_tmp_dir }}' remote_src: yes - creates: '{{ output_dir }}/{{ nodejs_path }}.tar.gz' + creates: '{{ remote_tmp_dir }}/{{ nodejs_path }}.tar.gz' diff --git a/tests/integration/targets/pids/meta/main.yml b/tests/integration/targets/pids/meta/main.yml new file mode 100644 index 0000000000..1810d4bec9 --- /dev/null +++ b/tests/integration/targets/pids/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_remote_tmp_dir diff --git a/tests/integration/targets/pids/tasks/main.yml b/tests/integration/targets/pids/tasks/main.yml index b56093cf0c..823d588561 100644 --- a/tests/integration/targets/pids/tasks/main.yml +++ b/tests/integration/targets/pids/tasks/main.yml @@ -31,13 +31,21 @@ register: find_sleep - name: "Copying 'sleep' binary" + command: cp {{ find_sleep.stdout }} {{ remote_tmp_dir }}/{{ random_name }} + # The following does not work on macOS 11.1 (it uses shutil.copystat, and that will die with a PermissionError): + # copy: + # src: "{{ find_sleep.stdout }}" + # dest: "{{ remote_tmp_dir }}/{{ random_name }}" + # mode: "0777" + # remote_src: true + +- name: Copy helper script copy: - src: "{{ find_sleep.stdout }}" - dest: "{{ output_dir }}/{{ random_name }}" - mode: "0777" + src: obtainpid.sh + dest: "{{ remote_tmp_dir }}/obtainpid.sh" - name: "Running the copy of 'sleep' binary" - command: "sh {{ role_path }}/files/obtainpid.sh '{{ output_dir }}/{{ random_name }}' '{{ output_dir }}/obtainpid.txt'" + command: "sh {{ remote_tmp_dir }}/obtainpid.sh '{{ remote_tmp_dir }}/{{ random_name }}' '{{ remote_tmp_dir }}/obtainpid.txt'" async: 100 poll: 0 @@ -74,7 +82,7 @@ - name: "Reading pid from the file" slurp: - src: "{{ output_dir }}/obtainpid.txt" + src: "{{ remote_tmp_dir }}/obtainpid.txt" register: newpid - name: "Verify that the Process IDs (PIDs) returned is not empty and also equal to the PIDs obtained in console" diff --git a/tests/integration/targets/setup_openssl/tasks/main.yml b/tests/integration/targets/setup_openssl/tasks/main.yml index 62df7dd5f6..27d485a83f 100644 --- a/tests/integration/targets/setup_openssl/tasks/main.yml +++ b/tests/integration/targets/setup_openssl/tasks/main.yml @@ -33,6 +33,27 @@ extra_args: "-c {{ remote_constraints }}" when: ansible_os_family == 'Darwin' +- when: ansible_facts.distribution ~ ansible_facts.distribution_major_version not in ['CentOS6', 'RedHat6'] + block: + - name: Install cryptography (Python 3) + become: true + package: + name: '{{ cryptography_package_name_python3 }}' + when: not ansible_os_family == 'Darwin' and ansible_python_version is version('3.0', '>=') + + - name: Install cryptography (Python 2) + become: true + package: + name: '{{ cryptography_package_name }}' + when: not ansible_os_family == 'Darwin' and ansible_python_version is version('3.0', '<') + + - name: Install cryptography (Darwin) + become: true + pip: + name: cryptography>=3.3 + extra_args: "-c {{ remote_constraints }}" + when: ansible_os_family == 'Darwin' + - name: register pyOpenSSL version command: "{{ ansible_python.executable }} -c 'import OpenSSL; print(OpenSSL.__version__)'" register: pyopenssl_version diff --git a/tests/integration/targets/setup_openssl/vars/Debian.yml b/tests/integration/targets/setup_openssl/vars/Debian.yml index 755c7a083c..7254d00a5f 100644 --- a/tests/integration/targets/setup_openssl/vars/Debian.yml +++ b/tests/integration/targets/setup_openssl/vars/Debian.yml @@ -1,3 +1,5 @@ +cryptography_package_name: python-cryptography +cryptography_package_name_python3: python3-cryptography pyopenssl_package_name: python-openssl pyopenssl_package_name_python3: python3-openssl openssl_package_name: openssl diff --git a/tests/integration/targets/setup_openssl/vars/FreeBSD.yml b/tests/integration/targets/setup_openssl/vars/FreeBSD.yml index 4fef270602..c34b3646f4 100644 --- a/tests/integration/targets/setup_openssl/vars/FreeBSD.yml +++ b/tests/integration/targets/setup_openssl/vars/FreeBSD.yml @@ -1,3 +1,5 @@ +cryptography_package_name: py27-cryptography +cryptography_package_name_python3: "py{{ ansible_python.version.major }}{{ ansible_python.version.minor }}-cryptography" pyopenssl_package_name: py27-openssl pyopenssl_package_name_python3: "py{{ ansible_python.version.major }}{{ ansible_python.version.minor }}-openssl" openssl_package_name: openssl diff --git a/tests/integration/targets/setup_openssl/vars/RedHat.yml b/tests/integration/targets/setup_openssl/vars/RedHat.yml index 2959932cd7..5e077d732f 100644 --- a/tests/integration/targets/setup_openssl/vars/RedHat.yml +++ b/tests/integration/targets/setup_openssl/vars/RedHat.yml @@ -1,3 +1,5 @@ +cryptography_package_name: python-cryptography +cryptography_package_name_python3: python3-cryptography pyopenssl_package_name: pyOpenSSL pyopenssl_package_name_python3: python3-pyOpenSSL openssl_package_name: openssl diff --git a/tests/integration/targets/setup_openssl/vars/Suse.yml b/tests/integration/targets/setup_openssl/vars/Suse.yml index 2d5200f341..ec2c556bee 100644 --- a/tests/integration/targets/setup_openssl/vars/Suse.yml +++ b/tests/integration/targets/setup_openssl/vars/Suse.yml @@ -1,3 +1,5 @@ +cryptography_package_name: python-cryptography +cryptography_package_name_python3: python3-cryptography pyopenssl_package_name: python-pyOpenSSL pyopenssl_package_name_python3: python3-pyOpenSSL openssl_package_name: openssl diff --git a/tests/integration/targets/ssh_config/meta/main.yml b/tests/integration/targets/ssh_config/meta/main.yml index 91a63627f6..4c6838dbe1 100644 --- a/tests/integration/targets/ssh_config/meta/main.yml +++ b/tests/integration/targets/ssh_config/meta/main.yml @@ -1,2 +1,3 @@ dependencies: - setup_remote_constraints + - setup_remote_tmp_dir diff --git a/tests/integration/targets/ssh_config/tasks/main.yml b/tests/integration/targets/ssh_config/tasks/main.yml index bd5acc9e04..74a6f02fd2 100644 --- a/tests/integration/targets/ssh_config/tasks/main.yml +++ b/tests/integration/targets/ssh_config/tasks/main.yml @@ -9,15 +9,15 @@ extra_args: "-c {{ remote_constraints }}" - set_fact: - output_dir_test: '{{ output_dir }}/test_ssh_config' + output_test_dir: '{{ remote_tmp_dir }}/test_ssh_config' - set_fact: - ssh_config_test: '{{ output_dir_test }}/ssh_config_test' - ssh_private_key: '{{ output_dir_test }}/fake_id_rsa' + ssh_config_test: '{{ output_test_dir }}/ssh_config_test' + ssh_private_key: '{{ output_test_dir }}/fake_id_rsa' - name: create a temporary directory file: - path: "{{ output_dir_test }}" + path: "{{ output_test_dir }}" state: directory - name: Copy sample config file diff --git a/tests/integration/targets/supervisorctl/meta/main.yml b/tests/integration/targets/supervisorctl/meta/main.yml index 5438ced5c3..56bc554611 100644 --- a/tests/integration/targets/supervisorctl/meta/main.yml +++ b/tests/integration/targets/supervisorctl/meta/main.yml @@ -1,2 +1,3 @@ dependencies: - setup_pkg_mgr + - setup_remote_tmp_dir diff --git a/tests/integration/targets/supervisorctl/tasks/main.yml b/tests/integration/targets/supervisorctl/tasks/main.yml index 2a7ecdcfc0..0c3dd31b76 100644 --- a/tests/integration/targets/supervisorctl/tasks/main.yml +++ b/tests/integration/targets/supervisorctl/tasks/main.yml @@ -9,7 +9,7 @@ suffix: supervisorctl-tests register: supervisord_sock_path - - command: 'echo {{ output_dir }}' + - command: 'echo {{ remote_tmp_dir }}' register: echo - set_fact: remote_dir: '{{ echo.stdout }}' diff --git a/tests/integration/targets/synchronize-buildah/aliases b/tests/integration/targets/synchronize-buildah/aliases deleted file mode 100644 index 30b10b7ccb..0000000000 --- a/tests/integration/targets/synchronize-buildah/aliases +++ /dev/null @@ -1,3 +0,0 @@ -non_local -needs/root -unsupported diff --git a/tests/integration/targets/synchronize-buildah/inventory b/tests/integration/targets/synchronize-buildah/inventory deleted file mode 100644 index 2eeaf31350..0000000000 --- a/tests/integration/targets/synchronize-buildah/inventory +++ /dev/null @@ -1 +0,0 @@ -buildah-container ansible_host=buildah-container ansible_connection=buildah diff --git a/tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/files/normal_file.txt b/tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/files/normal_file.txt deleted file mode 100644 index 33257a92c0..0000000000 --- a/tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/files/normal_file.txt +++ /dev/null @@ -1 +0,0 @@ -abnormal content diff --git a/tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/tasks/main.yml b/tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/tasks/main.yml deleted file mode 100644 index a80e218921..0000000000 --- a/tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/tasks/main.yml +++ /dev/null @@ -1,71 +0,0 @@ -#################################################################### -# WARNING: These are designed specifically for Ansible tests # -# and should not be used as examples of how to write Ansible roles # -#################################################################### - -# test code for the synchronize module -# (c) 2014, James Tanner - -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -- name: cleanup old files - file: - path: '{{ output_dir }}' - state: absent - -- name: ensure the target directory exists - file: - path: '{{ output_dir }}' - state: directory - -- name: synchronize file to new filename - synchronize: - src: normal_file.txt - dest: '{{ output_dir }}/remote_file.txt' - register: sync_result - -- assert: - that: - - "'changed' in sync_result" - - sync_result is changed - - "'cmd' in sync_result" - - "'rsync' in sync_result.cmd" - - "'msg' in sync_result" - - "sync_result.msg.startswith('/dev/null 2>/dev/null - -set -e - -buildah from --name $CONTAINER_NAME docker.io/library/centos:7 -trap '{ buildah rm $CONTAINER_NAME; }' EXIT -buildah run $CONTAINER_NAME -- yum install -y rsync - -ansible-playbook test_synchronize_buildah.yml -c buildah -i inventory -vv diff --git a/tests/integration/targets/synchronize-buildah/test_synchronize_buildah.yml b/tests/integration/targets/synchronize-buildah/test_synchronize_buildah.yml deleted file mode 100644 index e1cc96657e..0000000000 --- a/tests/integration/targets/synchronize-buildah/test_synchronize_buildah.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- hosts: buildah-container - connection: buildah - gather_facts: no - vars: - output_dir: /tmp/ansible_test_synchronize_buildah - roles: - - test_buildah_synchronize diff --git a/tests/integration/targets/xattr/defaults/main.yml b/tests/integration/targets/xattr/defaults/main.yml index af18fb8474..c208bf6fb9 100644 --- a/tests/integration/targets/xattr/defaults/main.yml +++ b/tests/integration/targets/xattr/defaults/main.yml @@ -1 +1 @@ -test_file: "{{ output_dir }}/foo.txt" +test_file: "{{ remote_tmp_dir }}/foo.txt" diff --git a/tests/integration/targets/xattr/meta/main.yml b/tests/integration/targets/xattr/meta/main.yml index 5438ced5c3..56bc554611 100644 --- a/tests/integration/targets/xattr/meta/main.yml +++ b/tests/integration/targets/xattr/meta/main.yml @@ -1,2 +1,3 @@ dependencies: - setup_pkg_mgr + - setup_remote_tmp_dir diff --git a/tests/integration/targets/yarn/meta/main.yml b/tests/integration/targets/yarn/meta/main.yml index 392c359035..230548b160 100644 --- a/tests/integration/targets/yarn/meta/main.yml +++ b/tests/integration/targets/yarn/meta/main.yml @@ -1,3 +1,4 @@ dependencies: - setup_pkg_mgr - setup_gnutar + - setup_remote_tmp_dir diff --git a/tests/integration/targets/yarn/tasks/run.yml b/tests/integration/targets/yarn/tasks/run.yml index bd17d7ffeb..906880797f 100644 --- a/tests/integration/targets/yarn/tasks/run.yml +++ b/tests/integration/targets/yarn/tasks/run.yml @@ -6,31 +6,31 @@ - name: 'Download Nodejs' unarchive: src: 'https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/yarn/{{ nodejs_path }}.tar.gz' - dest: '{{ output_dir }}' + dest: '{{ remote_tmp_dir }}' remote_src: yes - creates: '{{ output_dir }}/{{ nodejs_path }}.tar.gz' + creates: '{{ remote_tmp_dir }}/{{ nodejs_path }}.tar.gz' - name: 'Download Yarn' unarchive: src: 'https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/yarn/yarn-v{{yarn_version}}.tar.gz' - dest: '{{ output_dir }}' + dest: '{{ remote_tmp_dir }}' remote_src: yes - creates: '{{ output_dir }}/yarn-v{{yarn_version}}_pkg.tar.gz' + creates: '{{ remote_tmp_dir }}/yarn-v{{yarn_version}}_pkg.tar.gz' - name: 'Copy node to directory created earlier' - command: "mv {{ output_dir }}/{{ nodejs_path }} /usr/local/lib/nodejs/{{nodejs_path}}" + command: "mv {{ remote_tmp_dir }}/{{ nodejs_path }} /usr/local/lib/nodejs/{{nodejs_path}}" # Clean up before running tests - name: Remove any previous Nodejs modules file: - path: '{{output_dir}}/node_modules' + path: '{{remote_tmp_dir}}/node_modules' state: absent # Set vars for our test harness - vars: #node_bin_path: "/usr/local/lib/nodejs/node-v{{nodejs_version}}/bin" node_bin_path: "/usr/local/lib/nodejs/{{ nodejs_path }}/bin" - yarn_bin_path: "{{ output_dir }}/yarn-v{{ yarn_version }}/bin" + yarn_bin_path: "{{ remote_tmp_dir }}/yarn-v{{ yarn_version }}/bin" package: 'iconv-lite' environment: PATH: "{{ node_bin_path }}:{{ansible_env.PATH}}" @@ -45,11 +45,11 @@ - name: 'Create dummy package.json' copy: src: templates/package.j2 - dest: '{{ output_dir }}/package.json' + dest: '{{ remote_tmp_dir }}/package.json' - name: 'Install all packages.' yarn: - path: '{{ output_dir }}' + path: '{{ remote_tmp_dir }}' executable: '{{ yarn_bin_path }}/yarn' state: present environment: @@ -57,7 +57,7 @@ - name: 'Install the same package from package.json again.' yarn: - path: '{{ output_dir }}' + path: '{{ remote_tmp_dir }}' executable: '{{ yarn_bin_path }}/yarn' name: '{{ package }}' state: present @@ -71,7 +71,7 @@ - name: 'Install all packages in check mode.' yarn: - path: '{{ output_dir }}' + path: '{{ remote_tmp_dir }}' executable: '{{ yarn_bin_path }}/yarn' state: present environment: @@ -89,7 +89,7 @@ - name: 'Install package with explicit version (older version of package)' yarn: - path: '{{ output_dir }}' + path: '{{ remote_tmp_dir }}' executable: '{{ yarn_bin_path }}/yarn' name: left-pad version: 1.1.0 @@ -104,7 +104,7 @@ - name: 'Upgrade old package' yarn: - path: '{{ output_dir }}' + path: '{{ remote_tmp_dir }}' executable: '{{ yarn_bin_path }}/yarn' name: left-pad state: latest @@ -118,7 +118,7 @@ - name: 'Remove a package' yarn: - path: '{{ output_dir }}' + path: '{{ remote_tmp_dir }}' executable: '{{ yarn_bin_path }}/yarn' name: '{{ package }}' state: absent diff --git a/tests/integration/targets/zypper/meta/main.yml b/tests/integration/targets/zypper/meta/main.yml new file mode 100644 index 0000000000..1810d4bec9 --- /dev/null +++ b/tests/integration/targets/zypper/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_remote_tmp_dir diff --git a/tests/integration/targets/zypper/tasks/zypper.yml b/tests/integration/targets/zypper/tasks/zypper.yml index eed27ca3b2..30c4f991bc 100644 --- a/tests/integration/targets/zypper/tasks/zypper.yml +++ b/tests/integration/targets/zypper/tasks/zypper.yml @@ -150,17 +150,17 @@ # INSTALL broken local package - name: create directory file: - path: "{{output_dir | expanduser}}/zypper1" + path: "{{remote_tmp_dir | expanduser}}/zypper1" state: directory - name: fake rpm package file: - path: "{{output_dir | expanduser}}/zypper1/broken.rpm" + path: "{{remote_tmp_dir | expanduser}}/zypper1/broken.rpm" state: touch - name: install broken rpm zypper: - name: "{{output_dir | expanduser}}/zypper1/broken.rpm" + name: "{{remote_tmp_dir | expanduser}}/zypper1/broken.rpm" state: present register: zypper_result ignore_errors: yes @@ -191,29 +191,29 @@ - name: create directory file: - path: "{{output_dir | expanduser}}/zypper2" + path: "{{remote_tmp_dir | expanduser}}/zypper2" state: directory - name: copy spec file copy: src: empty.spec - dest: "{{ output_dir | expanduser }}/zypper2/empty.spec" + dest: "{{ remote_tmp_dir | expanduser }}/zypper2/empty.spec" - name: build rpm command: | rpmbuild -bb \ - --define "_topdir {{output_dir | expanduser }}/zypper2/rpm-build" + --define "_topdir {{remote_tmp_dir | expanduser }}/zypper2/rpm-build" --define "_builddir %{_topdir}" \ --define "_rpmdir %{_topdir}" \ --define "_srcrpmdir %{_topdir}" \ - --define "_specdir {{output_dir | expanduser}}/zypper2" \ + --define "_specdir {{remote_tmp_dir | expanduser}}/zypper2" \ --define "_sourcedir %{_topdir}" \ - {{ output_dir }}/zypper2/empty.spec + {{ remote_tmp_dir }}/zypper2/empty.spec register: rpm_build_result - name: install empty rpm zypper: - name: "{{ output_dir | expanduser }}/zypper2/rpm-build/noarch/empty-1-0.noarch.rpm" + name: "{{ remote_tmp_dir | expanduser }}/zypper2/rpm-build/noarch/empty-1-0.noarch.rpm" disable_gpg_check: yes register: zypper_result @@ -236,13 +236,13 @@ - name: extract from rpm zypper: - name: "{{ output_dir | expanduser }}/zypper2/rpm-build/noarch/empty-1-0.noarch.rpm" + name: "{{ remote_tmp_dir | expanduser }}/zypper2/rpm-build/noarch/empty-1-0.noarch.rpm" state: installed disable_gpg_check: yes - extra_args_precommand: --root {{ output_dir | expanduser }}/testdir/ + extra_args_precommand: --root {{ remote_tmp_dir | expanduser }}/testdir/ - name: check that dir var is exist - stat: path={{ output_dir | expanduser }}/testdir/var + stat: path={{ remote_tmp_dir | expanduser }}/testdir/var register: stat_result - name: check that we extract rpm package in testdir folder and folder var is exist @@ -458,25 +458,25 @@ - name: Deploy spec files to build 2 packages with duplicate files. template: src: duplicate.spec.j2 - dest: "{{ output_dir | expanduser }}/zypper2/duplicate{{ item }}.spec" + dest: "{{ remote_tmp_dir | expanduser }}/zypper2/duplicate{{ item }}.spec" loop: "{{ looplist }}" - name: build rpms with duplicate files command: | rpmbuild -bb \ - --define "_topdir {{output_dir | expanduser }}/zypper2/rpm-build" + --define "_topdir {{remote_tmp_dir | expanduser }}/zypper2/rpm-build" --define "_builddir %{_topdir}" \ --define "_rpmdir %{_topdir}" \ --define "_srcrpmdir %{_topdir}" \ - --define "_specdir {{output_dir | expanduser}}/zypper2" \ + --define "_specdir {{remote_tmp_dir | expanduser}}/zypper2" \ --define "_sourcedir %{_topdir}" \ - {{ output_dir | expanduser }}/zypper2/duplicate{{ item }}.spec + {{ remote_tmp_dir | expanduser }}/zypper2/duplicate{{ item }}.spec loop: "{{ looplist }}" - name: install duplicate rpms zypper: name: >- - {{ output_dir | expanduser }}/zypper2/rpm-build/noarch/duplicate{{ item }}-1-0.noarch.rpm + {{ remote_tmp_dir | expanduser }}/zypper2/rpm-build/noarch/duplicate{{ item }}-1-0.noarch.rpm disable_gpg_check: true ignore_errors: true register: zypper_duplicate_result @@ -499,7 +499,7 @@ - name: install duplicate rpms zypper: name: >- - {{ output_dir | expanduser }}/zypper2/rpm-build/noarch/duplicate{{ item }}-1-0.noarch.rpm + {{ remote_tmp_dir | expanduser }}/zypper2/rpm-build/noarch/duplicate{{ item }}-1-0.noarch.rpm disable_gpg_check: true replacefiles: true ignore_errors: true From 05fe587a3ea2643bacd4dea07112d18464eae6bf Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Thu, 9 Sep 2021 07:58:21 -0400 Subject: [PATCH 0556/3093] Adding new maintainer (#3349) --- .github/BOTMETA.yml | 2 +- commit-rights.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 5b55449a67..3dfca22e73 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -1,7 +1,7 @@ automerge: true files: plugins/: - supershipit: quidame Ajpantuso + supershipit: quidame changelogs/: {} changelogs/fragments/: support: community diff --git a/commit-rights.md b/commit-rights.md index 58743e5048..43836350c5 100644 --- a/commit-rights.md +++ b/commit-rights.md @@ -69,5 +69,6 @@ Individuals who have been asked to become a part of this group have generally be | ------------------- | -------------------- | ------------------ | -------------------- | | Alexei Znamensky | russoz | russoz | | | Andrew Klychkov | andersson007 | andersson007_ | | +| Andrew Pantuso | Ajpantuso | ajpantuso | | | Felix Fontein | felixfontein | felixfontein | | | John R Barker | gundalow | gundalow | | From 58d018ebbd60e2cebcd8050cd61b0a80a6e4b1a1 Mon Sep 17 00:00:00 2001 From: mkschuel <77283980+mkschuel@users.noreply.github.com> Date: Fri, 10 Sep 2021 23:38:26 +0200 Subject: [PATCH 0557/3093] Adds few more gitlab group options (#3248) * Adds few more gitlab group options * Update plugins/modules/source_control/gitlab/gitlab_group.py Co-authored-by: Felix Fontein * Removes default for new string options * Removes default from argument_spec * Adds changelog fragment * Update plugins/modules/source_control/gitlab/gitlab_group.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_group.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_group.py Co-authored-by: Felix Fontein Co-authored-by: Maik Schueller Co-authored-by: Felix Fontein --- ...248-adds-few-more-gitlab-group-options.yml | 2 + .../source_control/gitlab/gitlab_group.py | 53 +++++++++++++++++-- .../targets/gitlab_group/tasks/main.yml | 25 +++++++++ .../modules/source_control/gitlab/gitlab.py | 7 ++- .../gitlab/test_gitlab_group.py | 17 ++++-- 5 files changed, 96 insertions(+), 8 deletions(-) create mode 100644 changelogs/fragments/3248-adds-few-more-gitlab-group-options.yml diff --git a/changelogs/fragments/3248-adds-few-more-gitlab-group-options.yml b/changelogs/fragments/3248-adds-few-more-gitlab-group-options.yml new file mode 100644 index 0000000000..f565fea565 --- /dev/null +++ b/changelogs/fragments/3248-adds-few-more-gitlab-group-options.yml @@ -0,0 +1,2 @@ +minor_changes: + - gitlab_group - add new options ``project_creation_level``, ``auto_devops_enabled``, ``subgroup_creation_level`` (https://github.com/ansible-collections/community.general/pull/3248). diff --git a/plugins/modules/source_control/gitlab/gitlab_group.py b/plugins/modules/source_control/gitlab/gitlab_group.py index 42e1801a81..cdf0f41b65 100644 --- a/plugins/modules/source_control/gitlab/gitlab_group.py +++ b/plugins/modules/source_control/gitlab/gitlab_group.py @@ -61,6 +61,23 @@ options: choices: ["private", "internal", "public"] default: private type: str + project_creation_level: + description: + - Determine if developers can create projects in the group. + choices: ["developer", "maintainer", "noone"] + type: str + version_added: 3.7.0 + auto_devops_enabled: + description: + - Default to Auto DevOps pipeline for all projects within this group. + type: bool + version_added: 3.7.0 + subgroup_creation_level: + description: + - Allowed to create subgroups. + choices: ["maintainer", "owner"] + type: str + version_added: 3.7.0 ''' EXAMPLES = ''' @@ -93,6 +110,20 @@ EXAMPLES = ''' path: my_first_group state: present parent: "super_parent/parent" + +# Other group which only allows sub-groups - no projects +- name: "Create GitLab Group for SubGroups only" + community.general.gitlab_group: + api_url: https://gitlab.example.com/ + validate_certs: True + api_username: dj-wasabi + api_password: "MySecretPassword" + name: my_main_group + path: my_main_group + state: present + project_creation_level: noone + auto_devops_enabled: false + subgroup_creation_level: maintainer ''' RETURN = ''' @@ -166,7 +197,10 @@ class GitLabGroup(object): 'name': name, 'path': options['path'], 'parent_id': parent_id, - 'visibility': options['visibility'] + 'visibility': options['visibility'], + 'project_creation_level': options['project_creation_level'], + 'auto_devops_enabled': options['auto_devops_enabled'], + 'subgroup_creation_level': options['subgroup_creation_level'], } if options.get('description'): payload['description'] = options['description'] @@ -176,7 +210,11 @@ class GitLabGroup(object): changed, group = self.updateGroup(self.groupObject, { 'name': name, 'description': options['description'], - 'visibility': options['visibility']}) + 'visibility': options['visibility'], + 'project_creation_level': options['project_creation_level'], + 'auto_devops_enabled': options['auto_devops_enabled'], + 'subgroup_creation_level': options['subgroup_creation_level'], + }) self.groupObject = group if changed: @@ -258,6 +296,9 @@ def main(): state=dict(type='str', default="present", choices=["absent", "present"]), parent=dict(type='str'), visibility=dict(type='str', default="private", choices=["internal", "private", "public"]), + project_creation_level=dict(type='str', choices=['developer', 'maintainer', 'noone']), + auto_devops_enabled=dict(type='bool'), + subgroup_creation_level=dict(type='str', choices=['maintainer', 'owner']), )) module = AnsibleModule( @@ -281,6 +322,9 @@ def main(): state = module.params['state'] parent_identifier = module.params['parent'] group_visibility = module.params['visibility'] + project_creation_level = module.params['project_creation_level'] + auto_devops_enabled = module.params['auto_devops_enabled'] + subgroup_creation_level = module.params['subgroup_creation_level'] if not HAS_GITLAB_PACKAGE: module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) @@ -314,7 +358,10 @@ def main(): if gitlab_group.createOrUpdateGroup(group_name, parent_group, { "path": group_path, "description": description, - "visibility": group_visibility}): + "visibility": group_visibility, + "project_creation_level": project_creation_level, + "auto_devops_enabled": auto_devops_enabled, + "subgroup_creation_level": subgroup_creation_level}): module.exit_json(changed=True, msg="Successfully created or updated the group %s" % group_name, group=gitlab_group.groupObject._attrs) else: module.exit_json(changed=False, msg="No need to update the group %s" % group_name, group=gitlab_group.groupObject._attrs) diff --git a/tests/integration/targets/gitlab_group/tasks/main.yml b/tests/integration/targets/gitlab_group/tasks/main.yml index 34444134c2..fbf8de29a0 100644 --- a/tests/integration/targets/gitlab_group/tasks/main.yml +++ b/tests/integration/targets/gitlab_group/tasks/main.yml @@ -72,3 +72,28 @@ assert: that: - gitlab_group_state_desc.group.description == "My Test Group" + +- name: Cleanup GitLab Group for project_creation_level Test + gitlab_group: + api_url: "{{ gitlab_host }}" + validate_certs: false + api_token: "{{ gitlab_login_token }}" + name: ansible_test_group + path: ansible_test_group + state: absent + +- name: Create GitLab Group for project_creation_level Test + gitlab_group: + api_url: "{{ gitlab_host }}" + validate_certs: false + api_token: "{{ gitlab_login_token }}" + name: ansible_test_group + path: ansible_test_group + project_creation_level: noone + state: present + register: gitlab_group_state_pcl + +- name: Test group created with project_creation_level + assert: + that: + - gitlab_group_state_pcl.group.project_creation_level == "noone" diff --git a/tests/unit/plugins/modules/source_control/gitlab/gitlab.py b/tests/unit/plugins/modules/source_control/gitlab/gitlab.py index 5feff78b43..cca9ab5ae6 100644 --- a/tests/unit/plugins/modules/source_control/gitlab/gitlab.py +++ b/tests/unit/plugins/modules/source_control/gitlab/gitlab.py @@ -194,6 +194,7 @@ def resp_get_group(url, request): '"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/1/foo.jpg",' '"web_url": "http://localhost:3000/groups/foo-bar", "request_access_enabled": false,' '"full_name": "Foobar Group", "full_path": "foo-bar",' + '"project_creation_level": "maintainer", "subgroup_creation_level": "maintainer",' '"file_template_project_id": 1, "parent_id": null, "projects": [{"id": 1,"description": null, "default_branch": "master",' '"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",' '"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",' @@ -225,7 +226,8 @@ def resp_create_group(url, request): '"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/1/foo.jpg",' '"web_url": "http://localhost:3000/groups/foo-bar", "request_access_enabled": false,' '"full_name": "Foobar Group", "full_path": "foo-bar",' - '"file_template_project_id": 1, "parent_id": null}') + '"file_template_project_id": 1, "parent_id": null,' + '"project_creation_level": "developer", "subgroup_creation_level": "maintainer"}') content = content.encode("utf-8") return response(200, content, headers, None, 5, request) @@ -238,7 +240,8 @@ def resp_create_subgroup(url, request): '"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/2/bar.jpg",' '"web_url": "http://localhost:3000/groups/foo-bar/bar-foo", "request_access_enabled": false,' '"full_name": "BarFoo Group", "full_path": "foo-bar/bar-foo",' - '"file_template_project_id": 1, "parent_id": 1}') + '"file_template_project_id": 1, "parent_id": 1,' + '"project_creation_level": "noone"}') content = content.encode("utf-8") return response(200, content, headers, None, 5, request) diff --git a/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_group.py b/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_group.py index abf49860f9..0b05f8a7ff 100644 --- a/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_group.py +++ b/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_group.py @@ -68,32 +68,43 @@ class TestGitlabGroup(GitlabModuleTestCase): def test_create_group(self): group = self.moduleUtil.createGroup({'name': "Foobar Group", 'path': "foo-bar", - 'description': "An interesting group"}) + 'description': "An interesting group", + 'project_creation_level': "developer", + 'subgroup_creation_level': "maintainer"}) self.assertEqual(type(group), Group) self.assertEqual(group.name, "Foobar Group") self.assertEqual(group.path, "foo-bar") self.assertEqual(group.description, "An interesting group") + self.assertEqual(group.project_creation_level, "developer") + self.assertEqual(group.subgroup_creation_level, "maintainer") self.assertEqual(group.id, 1) @with_httmock(resp_create_subgroup) def test_create_subgroup(self): - group = self.moduleUtil.createGroup({'name': "BarFoo Group", 'path': "bar-foo", "parent_id": 1}) + group = self.moduleUtil.createGroup({'name': "BarFoo Group", + 'path': "bar-foo", + 'parent_id': 1, + 'project_creation_level': "noone"}) self.assertEqual(type(group), Group) self.assertEqual(group.name, "BarFoo Group") self.assertEqual(group.full_path, "foo-bar/bar-foo") + self.assertEqual(group.project_creation_level, "noone") self.assertEqual(group.id, 2) self.assertEqual(group.parent_id, 1) @with_httmock(resp_get_group) def test_update_group(self): group = self.gitlab_instance.groups.get(1) - changed, newGroup = self.moduleUtil.updateGroup(group, {'name': "BarFoo Group", "visibility": "private"}) + changed, newGroup = self.moduleUtil.updateGroup(group, {'name': "BarFoo Group", + 'visibility': "private", + 'project_creation_level': "maintainer"}) self.assertEqual(changed, True) self.assertEqual(newGroup.name, "BarFoo Group") self.assertEqual(newGroup.visibility, "private") + self.assertEqual(newGroup.project_creation_level, "maintainer") changed, newGroup = self.moduleUtil.updateGroup(group, {'name': "BarFoo Group"}) From 612543919e13e5cf64da92c3342ce7cc7a7453e1 Mon Sep 17 00:00:00 2001 From: Roy Lenferink Date: Sun, 12 Sep 2021 13:46:17 +0200 Subject: [PATCH 0558/3093] Add ipaselinuxusermaporder option to the ipa_config module (#3178) --- ...linuxusermaporder-to-ipa-config-module.yml | 3 +++ plugins/modules/identity/ipa/ipa_config.py | 27 +++++++++++++++++-- 2 files changed, 28 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/3178-add-ipaselinuxusermaporder-to-ipa-config-module.yml diff --git a/changelogs/fragments/3178-add-ipaselinuxusermaporder-to-ipa-config-module.yml b/changelogs/fragments/3178-add-ipaselinuxusermaporder-to-ipa-config-module.yml new file mode 100644 index 0000000000..9057be911c --- /dev/null +++ b/changelogs/fragments/3178-add-ipaselinuxusermaporder-to-ipa-config-module.yml @@ -0,0 +1,3 @@ +minor_changes: + - ipa_config - add ``ipaselinuxusermaporder`` option to set the SELinux user map order + (https://github.com/ansible-collections/community.general/pull/3178). diff --git a/plugins/modules/identity/ipa/ipa_config.py b/plugins/modules/identity/ipa/ipa_config.py index e8ee073d6e..2b41dfb098 100644 --- a/plugins/modules/identity/ipa/ipa_config.py +++ b/plugins/modules/identity/ipa/ipa_config.py @@ -72,6 +72,12 @@ options: aliases: ["searchtimelimit"] type: int version_added: '2.5.0' + ipaselinuxusermaporder: + description: The SELinux user map order (order in increasing priority of SELinux users). + aliases: ["selinuxusermaporder"] + type: list + elements: str + version_added: '3.7.0' ipauserauthtype: description: The authentication type to use by default. aliases: ["userauthtype"] @@ -181,6 +187,18 @@ EXAMPLES = r''' ipa_host: localhost ipa_user: admin ipa_pass: supersecret + +- name: Ensure the SELinux user map order is set + community.general.ipa_config: + ipaselinuxusermaporder: + - "guest_u:s0" + - "xguest_u:s0" + - "user_u:s0" + - "staff_u:s0-s0:c0.c1023" + - "unconfined_u:s0-s0:c0.c1023" + ipa_host: localhost + ipa_user: admin + ipa_pass: supersecret ''' RETURN = r''' @@ -213,8 +231,8 @@ def get_config_dict(ipaconfigstring=None, ipadefaultloginshell=None, ipagroupsearchfields=None, ipahomesrootdir=None, ipakrbauthzdata=None, ipamaxusernamelength=None, ipapwdexpadvnotify=None, ipasearchrecordslimit=None, - ipasearchtimelimit=None, ipauserauthtype=None, - ipausersearchfields=None): + ipasearchtimelimit=None, ipaselinuxusermaporder=None, + ipauserauthtype=None, ipausersearchfields=None): config = {} if ipaconfigstring is not None: config['ipaconfigstring'] = ipaconfigstring @@ -238,6 +256,8 @@ def get_config_dict(ipaconfigstring=None, ipadefaultloginshell=None, config['ipasearchrecordslimit'] = str(ipasearchrecordslimit) if ipasearchtimelimit is not None: config['ipasearchtimelimit'] = str(ipasearchtimelimit) + if ipaselinuxusermaporder is not None: + config['ipaselinuxusermaporder'] = '$'.join(ipaselinuxusermaporder) if ipauserauthtype is not None: config['ipauserauthtype'] = ipauserauthtype if ipausersearchfields is not None: @@ -263,6 +283,7 @@ def ensure(module, client): ipapwdexpadvnotify=module.params.get('ipapwdexpadvnotify'), ipasearchrecordslimit=module.params.get('ipasearchrecordslimit'), ipasearchtimelimit=module.params.get('ipasearchtimelimit'), + ipaselinuxusermaporder=module.params.get('ipaselinuxusermaporder'), ipauserauthtype=module.params.get('ipauserauthtype'), ipausersearchfields=module.params.get('ipausersearchfields'), ) @@ -304,6 +325,8 @@ def main(): ipapwdexpadvnotify=dict(type='int', aliases=['pwdexpadvnotify']), ipasearchrecordslimit=dict(type='int', aliases=['searchrecordslimit']), ipasearchtimelimit=dict(type='int', aliases=['searchtimelimit']), + ipaselinuxusermaporder=dict(type='list', elements='str', + aliases=['selinuxusermaporder']), ipauserauthtype=dict(type='list', elements='str', aliases=['userauthtype'], choices=["password", "radius", "otp", "pkinit", From 29e4066944686f09c911778e8419027909c8802b Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Sun, 12 Sep 2021 07:46:53 -0400 Subject: [PATCH 0559/3093] New filter plugin - unicode_normalization (#3359) * Initial commit * Adding maintainer in BOTMETA * Adding changelog fragment * Updating filter_guide * Applying initial review suggestions --- .github/BOTMETA.yml | 2 + .../3359-add-unicode_normalize-filter.yml | 4 ++ docs/docsite/rst/filter_guide.rst | 31 ++++++++++++++ plugins/filter/unicode_normalize.py | 40 +++++++++++++++++++ .../targets/filter_unicode_normalize/aliases | 2 + .../filter_unicode_normalize/tasks/main.yml | 39 ++++++++++++++++++ .../filter_unicode_normalize/vars/main.yml | 4 ++ 7 files changed, 122 insertions(+) create mode 100644 changelogs/fragments/3359-add-unicode_normalize-filter.yml create mode 100644 plugins/filter/unicode_normalize.py create mode 100644 tests/integration/targets/filter_unicode_normalize/aliases create mode 100644 tests/integration/targets/filter_unicode_normalize/tasks/main.yml create mode 100644 tests/integration/targets/filter_unicode_normalize/vars/main.yml diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 3dfca22e73..df2520e263 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -131,6 +131,8 @@ files: $filters/random_mac.py: {} $filters/time.py: maintainers: resmo + $filters/unicode_normalize.py: + maintainers: Ajpantuso $filters/version_sort.py: maintainers: ericzolf $inventories/: diff --git a/changelogs/fragments/3359-add-unicode_normalize-filter.yml b/changelogs/fragments/3359-add-unicode_normalize-filter.yml new file mode 100644 index 0000000000..33aa06dc92 --- /dev/null +++ b/changelogs/fragments/3359-add-unicode_normalize-filter.yml @@ -0,0 +1,4 @@ +--- +add plugin.filter: + - name: unicode_normalize + description: Normalizes unicode strings to facilitate comparison of characters with normalized forms diff --git a/docs/docsite/rst/filter_guide.rst b/docs/docsite/rst/filter_guide.rst index 201b275aae..dab8464439 100644 --- a/docs/docsite/rst/filter_guide.rst +++ b/docs/docsite/rst/filter_guide.rst @@ -751,3 +751,34 @@ To extract ports from all clusters with name containing 'server1': server_name_query: "domain.server[?contains(name,'server1')].port" .. note:: while using ``starts_with`` and ``contains``, you have to use `` to_json | from_json `` filter for correct parsing of data structure. + +Working with Unicode +--------------------- + +`Unicode `_ makes it possible to produce two strings which may be visually equivalent, but are comprised of distinctly different characters/character sequences. To address this ``Unicode`` defines `normalization forms `_ which avoid these distinctions by choosing a unique character sequence for a given visual representation. + +You can use the ``community.general.unicode_normalize`` filter to normalize ``Unicode`` strings within your playbooks. + +.. code-block:: yaml+jinja + + - name: Compare Unicode representations + debug: + msg: "{{ with_combining_character | community.general.unicode_normalize == without_combining_character }}" + vars: + with_combining_character: "{{ 'Mayagu\u0308ez' }}" + without_combining_character: Mayagüez + +This produces: + +.. code-block:: ansible-output + + TASK [Compare Unicode representations] ******************************************************** + ok: [localhost] => { + "msg": true + } + +The ``community.general.unicode_normalize`` filter accepts a keyword argument to select the ``Unicode`` form used to normalize the input string. + +:form: One of ``'NFC'`` (default), ``'NFD'``, ``'NFKC'``, or ``'NFKD'``. See the `Unicode reference `_ for more information. + +.. versionadded:: 3.7.0 diff --git a/plugins/filter/unicode_normalize.py b/plugins/filter/unicode_normalize.py new file mode 100644 index 0000000000..9afbf29e3f --- /dev/null +++ b/plugins/filter/unicode_normalize.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2021, Andrew Pantuso (@ajpantuso) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from unicodedata import normalize + +from ansible.errors import AnsibleFilterError, AnsibleFilterTypeError +from ansible.module_utils.six import text_type + + +def unicode_normalize(data, form='NFC'): + """Applies normalization to 'unicode' strings. + + Args: + data: A unicode string piped into the Jinja filter + form: One of ('NFC', 'NFD', 'NFKC', 'NFKD'). + See https://docs.python.org/3/library/unicodedata.html#unicodedata.normalize for more information. + + Returns: + A normalized unicode string of the specified 'form'. + """ + + if not isinstance(data, text_type): + raise AnsibleFilterTypeError("%s is not a valid input type" % type(data)) + + if form not in ('NFC', 'NFD', 'NFKC', 'NFKD'): + raise AnsibleFilterError("%s is not a valid form" % form) + + return normalize(form, data) + + +class FilterModule(object): + def filters(self): + return { + 'unicode_normalize': unicode_normalize, + } diff --git a/tests/integration/targets/filter_unicode_normalize/aliases b/tests/integration/targets/filter_unicode_normalize/aliases new file mode 100644 index 0000000000..f04737b845 --- /dev/null +++ b/tests/integration/targets/filter_unicode_normalize/aliases @@ -0,0 +1,2 @@ +shippable/posix/group2 +skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller diff --git a/tests/integration/targets/filter_unicode_normalize/tasks/main.yml b/tests/integration/targets/filter_unicode_normalize/tasks/main.yml new file mode 100644 index 0000000000..948ca74b4b --- /dev/null +++ b/tests/integration/targets/filter_unicode_normalize/tasks/main.yml @@ -0,0 +1,39 @@ +#################################################################### +# WARNING: These are designed specifically for Ansible tests # +# and should not be used as examples of how to write Ansible roles # +#################################################################### + +- name: Test 'NFC' normalization + assert: + that: + - u_umlaut != u_umlaut_combining + - u_umlaut_combining != (u_umlaut_combining | community.general.unicode_normalize) + - u_umlaut == (u_umlaut_combining | community.general.unicode_normalize) + +- name: Test 'NFKC' normalization + assert: + that: + - latin_capital_i != roman_numeral_one + - latin_capital_i == (roman_numeral_one | community.general.unicode_normalize(form='NFKC')) + +- name: Register invalid input type + debug: + msg: "{{ 1 | community.general.unicode_normalize }}" + ignore_errors: true + register: invalid_input_type + +- name: Assert an invalid input type causes failure + assert: + that: + - invalid_input_type is failed + +- name: Register invalid form selection + debug: + msg: "{{ 'arbitrary text' | community.general.unicode_normalize(form='invalid') }}" + ignore_errors: true + register: invalid_form_selection + +- name: Assert invalid form selection causes failure + assert: + that: + - invalid_form_selection is failed diff --git a/tests/integration/targets/filter_unicode_normalize/vars/main.yml b/tests/integration/targets/filter_unicode_normalize/vars/main.yml new file mode 100644 index 0000000000..88d19b20db --- /dev/null +++ b/tests/integration/targets/filter_unicode_normalize/vars/main.yml @@ -0,0 +1,4 @@ +u_umlaut: "{{ '\u00fc' }}" +u_umlaut_combining: "{{ 'u' + '\u0308' }}" +roman_numeral_one: "{{ '\u2160' }}" +latin_capital_i: "{{ '\u0049' }}" From 0a5db85ad52115d535f9f80d330c81eb91b20b80 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rapha=C3=ABl=20Droz?= Date: Mon, 13 Sep 2021 02:16:06 -0300 Subject: [PATCH 0560/3093] gitlab_runner: Support project-scoped runners registration (#2971) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * support project-scoped gitlab.com runners registration * rename glproject variable to gitlab_project * update version * Update plugins/modules/source_control/gitlab/gitlab_runner.py Co-authored-by: Raphaël Droz Co-authored-by: Felix Fontein --- .../fragments/634-gitlab_project_runners.yaml | 2 + .../source_control/gitlab/gitlab_runner.py | 41 +++++++++++++++---- 2 files changed, 36 insertions(+), 7 deletions(-) create mode 100644 changelogs/fragments/634-gitlab_project_runners.yaml diff --git a/changelogs/fragments/634-gitlab_project_runners.yaml b/changelogs/fragments/634-gitlab_project_runners.yaml new file mode 100644 index 0000000000..0a3a733624 --- /dev/null +++ b/changelogs/fragments/634-gitlab_project_runners.yaml @@ -0,0 +1,2 @@ +minor_changes: +- gitlab_runner - support project-scoped gitlab.com runners registration (https://github.com/ansible-collections/community.general/pull/634). diff --git a/plugins/modules/source_control/gitlab/gitlab_runner.py b/plugins/modules/source_control/gitlab/gitlab_runner.py index 25490b00dd..34471b01d4 100644 --- a/plugins/modules/source_control/gitlab/gitlab_runner.py +++ b/plugins/modules/source_control/gitlab/gitlab_runner.py @@ -1,6 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- +# Copyright: (c) 2021, Raphaël Droz (raphael.droz@gmail.com) # Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) # Copyright: (c) 2018, Samy Coenen # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -38,6 +39,11 @@ options: description: - Your private token to interact with the GitLab API. type: str + project: + description: + - ID or full path of the project in the form of group/name. + type: str + version_added: '3.7.0' description: description: - The unique name of the runner. @@ -131,6 +137,15 @@ EXAMPLES = ''' description: Docker Machine t1 owned: yes state: absent + +- name: Register runner for a specific project + community.general.gitlab_runner: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + registration_token: 4gfdsg345 + description: MyProject runner + state: present + project: mygroup/mysubgroup/myproject ''' RETURN = ''' @@ -181,9 +196,13 @@ except NameError: class GitLabRunner(object): - def __init__(self, module, gitlab_instance): + def __init__(self, module, gitlab_instance, project=None): self._module = module self._gitlab = gitlab_instance + # Whether to operate on GitLab-instance-wide or project-wide runners + # See https://gitlab.com/gitlab-org/gitlab-ce/issues/60774 + # for group runner token access + self._runners_endpoint = project.runners if project else gitlab_instance.runners self.runnerObject = None def createOrUpdateRunner(self, description, options): @@ -230,7 +249,7 @@ class GitLabRunner(object): return True try: - runner = self._gitlab.runners.create(arguments) + runner = self._runners_endpoint.create(arguments) except (gitlab.exceptions.GitlabCreateError) as e: self._module.fail_json(msg="Failed to create runner: %s " % to_native(e)) @@ -265,19 +284,19 @@ class GitLabRunner(object): ''' def findRunner(self, description, owned=False): if owned: - runners = self._gitlab.runners.list(as_list=False) + runners = self._runners_endpoint.list(as_list=False) else: - runners = self._gitlab.runners.all(as_list=False) + runners = self._runners_endpoint.all(as_list=False) for runner in runners: # python-gitlab 2.2 through at least 2.5 returns a list of dicts for list() instead of a Runner # object, so we need to handle both if hasattr(runner, "description"): if (runner.description == description): - return self._gitlab.runners.get(runner.id) + return self._runners_endpoint.get(runner.id) else: if (runner['description'] == description): - return self._gitlab.runners.get(runner['id']) + return self._runners_endpoint.get(runner['id']) ''' @param description Description of the runner @@ -313,6 +332,7 @@ def main(): access_level=dict(type='str', default='ref_protected', choices=["not_protected", "ref_protected"]), maximum_timeout=dict(type='int', default=3600), registration_token=dict(type='str', no_log=True), + project=dict(type='str'), state=dict(type='str', default="present", choices=["absent", "present"]), )) @@ -344,13 +364,20 @@ def main(): access_level = module.params['access_level'] maximum_timeout = module.params['maximum_timeout'] registration_token = module.params['registration_token'] + project = module.params['project'] if not HAS_GITLAB_PACKAGE: module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) gitlab_instance = gitlabAuthentication(module) + gitlab_project = None + if project: + try: + gitlab_project = gitlab_instance.projects.get(project) + except gitlab.exceptions.GitlabGetError as e: + module.fail_json(msg='No such a project %s' % project, exception=to_native(e)) - gitlab_runner = GitLabRunner(module, gitlab_instance) + gitlab_runner = GitLabRunner(module, gitlab_instance, gitlab_project) runner_exists = gitlab_runner.existsRunner(runner_description, owned) if state == 'absent': From 118c040879c742cf3d97c452653efd6f91a4f91b Mon Sep 17 00:00:00 2001 From: Ajpantuso Date: Mon, 13 Sep 2021 01:16:49 -0400 Subject: [PATCH 0561/3093] pids - refactor module to make version-based behavior consistent (#3315) * Initial commit * Adding changelog fragment * Further refactoring * Fixing bad copy/paste and adding task for psutil >= 5.7.0 install * Inverting psutil installation order to reduce duplication * Optimizing regex compilation --- changelogs/fragments/3315-pids-refactor.yml | 4 + plugins/modules/system/pids.py | 168 +++++++++++++----- tests/integration/targets/pids/tasks/main.yml | 12 +- 3 files changed, 137 insertions(+), 47 deletions(-) create mode 100644 changelogs/fragments/3315-pids-refactor.yml diff --git a/changelogs/fragments/3315-pids-refactor.yml b/changelogs/fragments/3315-pids-refactor.yml new file mode 100644 index 0000000000..53a36c2cad --- /dev/null +++ b/changelogs/fragments/3315-pids-refactor.yml @@ -0,0 +1,4 @@ +--- +minor_changes: + - pids - refactor to add support for older ``psutil`` versions to the ``pattern`` option + (https://github.com/ansible-collections/community.general/pull/3315). diff --git a/plugins/modules/system/pids.py b/plugins/modules/system/pids.py index 622bec2500..9745c31449 100644 --- a/plugins/modules/system/pids.py +++ b/plugins/modules/system/pids.py @@ -54,9 +54,12 @@ pids: sample: [100,200] ''' +import abc import re +from distutils.version import LooseVersion from os.path import basename +from ansible.module_utils import six from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.common.text.converters import to_native @@ -68,6 +71,100 @@ except ImportError: HAS_PSUTIL = False +class PSAdapterError(Exception): + pass + + +@six.add_metaclass(abc.ABCMeta) +class PSAdapter(object): + NAME_ATTRS = ('name', 'cmdline') + PATTERN_ATTRS = ('name', 'exe', 'cmdline') + + def __init__(self, psutil): + self._psutil = psutil + + @staticmethod + def from_package(psutil): + version = LooseVersion(psutil.__version__) + if version < LooseVersion('2.0.0'): + return PSAdapter100(psutil) + elif version < LooseVersion('5.3.0'): + return PSAdapter200(psutil) + else: + return PSAdapter530(psutil) + + def get_pids_by_name(self, name): + return [p.pid for p in self._process_iter(*self.NAME_ATTRS) if self._has_name(p, name)] + + def _process_iter(self, *attrs): + return self._psutil.process_iter() + + def _has_name(self, proc, name): + attributes = self._get_proc_attributes(proc, *self.NAME_ATTRS) + return (compare_lower(attributes['name'], name) or + attributes['cmdline'] and compare_lower(attributes['cmdline'][0], name)) + + def _get_proc_attributes(self, proc, *attributes): + return dict((attribute, self._get_attribute_from_proc(proc, attribute)) for attribute in attributes) + + @staticmethod + @abc.abstractmethod + def _get_attribute_from_proc(proc, attribute): + pass + + def get_pids_by_pattern(self, pattern, ignore_case): + flags = 0 + if ignore_case: + flags |= re.I + + try: + regex = re.compile(pattern, flags) + except re.error as e: + raise PSAdapterError("'%s' is not a valid regular expression: %s" % (pattern, to_native(e))) + + return [p.pid for p in self._process_iter(*self.PATTERN_ATTRS) if self._matches_regex(p, regex)] + + def _matches_regex(self, proc, regex): + # See https://psutil.readthedocs.io/en/latest/#find-process-by-name for more information + attributes = self._get_proc_attributes(proc, *self.PATTERN_ATTRS) + matches_name = regex.search(to_native(attributes['name'])) + matches_exe = attributes['exe'] and regex.search(basename(to_native(attributes['exe']))) + matches_cmd = attributes['cmdline'] and regex.search(to_native(' '.join(attributes['cmdline']))) + + return any([matches_name, matches_exe, matches_cmd]) + + +class PSAdapter100(PSAdapter): + def __init__(self, psutil): + super(PSAdapter100, self).__init__(psutil) + + @staticmethod + def _get_attribute_from_proc(proc, attribute): + return getattr(proc, attribute) + + +class PSAdapter200(PSAdapter): + def __init__(self, psutil): + super(PSAdapter200, self).__init__(psutil) + + @staticmethod + def _get_attribute_from_proc(proc, attribute): + method = getattr(proc, attribute) + return method() + + +class PSAdapter530(PSAdapter): + def __init__(self, psutil): + super(PSAdapter530, self).__init__(psutil) + + def _process_iter(self, *attrs): + return self._psutil.process_iter(attrs=attrs) + + @staticmethod + def _get_attribute_from_proc(proc, attribute): + return proc.info[attribute] + + def compare_lower(a, b): if a is None or b is None: # this could just be "return False" but would lead to surprising behavior if both a and b are None @@ -76,38 +173,36 @@ def compare_lower(a, b): return a.lower() == b.lower() -def get_pid(name): - pids = [] +class Pids(object): + def __init__(self, module): + if not HAS_PSUTIL: + module.fail_json(msg=missing_required_lib('psutil')) - try: - for proc in psutil.process_iter(attrs=['name', 'cmdline']): - if compare_lower(proc.info['name'], name) or \ - proc.info['cmdline'] and compare_lower(proc.info['cmdline'][0], name): - pids.append(proc.pid) - except TypeError: # EL6, EL7: process_iter() takes no arguments (1 given) - for proc in psutil.process_iter(): - try: # EL7 - proc_name, proc_cmdline = proc.name(), proc.cmdline() - except TypeError: # EL6: 'str' object is not callable - proc_name, proc_cmdline = proc.name, proc.cmdline - if compare_lower(proc_name, name) or \ - proc_cmdline and compare_lower(proc_cmdline[0], name): - pids.append(proc.pid) - return pids + self._ps = PSAdapter.from_package(psutil) + self._module = module + self._name = module.params['name'] + self._pattern = module.params['pattern'] + self._ignore_case = module.params['ignore_case'] -def get_matching_command_pids(pattern, ignore_case): - flags = 0 - if ignore_case: - flags |= re.I + self._pids = [] - regex = re.compile(pattern, flags) - # See https://psutil.readthedocs.io/en/latest/#find-process-by-name for more information - return [p.pid for p in psutil.process_iter(["name", "exe", "cmdline"]) - if regex.search(to_native(p.info["name"])) - or (p.info["exe"] and regex.search(basename(to_native(p.info["exe"])))) - or (p.info["cmdline"] and regex.search(to_native(' '.join(p.cmdline())))) - ] + def execute(self): + if self._name: + self._pids = self._ps.get_pids_by_name(self._name) + else: + try: + self._pids = self._ps.get_pids_by_pattern(self._pattern, self._ignore_case) + except PSAdapterError as e: + self._module.fail_json(msg=to_native(e)) + + return self._module.exit_json(**self.result) + + @property + def result(self): + return { + 'pids': self._pids, + } def main(): @@ -126,22 +221,7 @@ def main(): supports_check_mode=True, ) - if not HAS_PSUTIL: - module.fail_json(msg=missing_required_lib('psutil')) - - name = module.params["name"] - pattern = module.params["pattern"] - ignore_case = module.params["ignore_case"] - - if name: - response = dict(pids=get_pid(name)) - else: - try: - response = dict(pids=get_matching_command_pids(pattern, ignore_case)) - except re.error as e: - module.fail_json(msg="'%s' is not a valid regular expression: %s" % (pattern, to_native(e))) - - module.exit_json(**response) + Pids(module).execute() if __name__ == '__main__': diff --git a/tests/integration/targets/pids/tasks/main.yml b/tests/integration/targets/pids/tasks/main.yml index 823d588561..a43b923e25 100644 --- a/tests/integration/targets/pids/tasks/main.yml +++ b/tests/integration/targets/pids/tasks/main.yml @@ -6,12 +6,18 @@ # Test code for the pids module # Copyright: (c) 2019, Saranya Sridharan # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -- name: "Installing the psutil module" +- name: Attempt installation of latest 'psutil' version + pip: + name: psutil + ignore_errors: true + register: psutil_latest_install + +- name: Install greatest 'psutil' version which will work with all pip versions pip: name: psutil < 5.7.0 - # Version 5.7.0 breaks on older pip versions. See https://github.com/ansible/ansible/pull/70667 + when: psutil_latest_install is failed -- name: "Checking the empty result" +- name: "Checking the empty result" pids: name: "blahblah" register: emptypids From 4e39a4b8251c6f3671a89e215bb25596f2e64da4 Mon Sep 17 00:00:00 2001 From: Ricky White Date: Mon, 13 Sep 2021 14:05:49 -0400 Subject: [PATCH 0562/3093] Added additional maintainer for the dsv and tss plugins (#3368) --- .github/BOTMETA.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index df2520e263..96c191db8b 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -175,7 +175,7 @@ files: $lookups/dnstxt.py: maintainers: jpmens $lookups/dsv.py: - maintainers: amigus + maintainers: amigus endlesstrax $lookups/etcd3.py: maintainers: eric-belhomme $lookups/etcd.py: @@ -211,7 +211,7 @@ files: maintainers: $team_ansible_core jpmens $lookups/shelvefile.py: {} $lookups/tss.py: - maintainers: amigus + maintainers: amigus endlesstrax $module_utils/: labels: module_utils $module_utils/gitlab.py: From bd63da680d2d12e6084cbff63ee007fc088c05a5 Mon Sep 17 00:00:00 2001 From: John Losito Date: Mon, 13 Sep 2021 15:52:30 -0400 Subject: [PATCH 0563/3093] Allow dependabot to check github actions (#1604) --- .github/dependabot.yml | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 .github/dependabot.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..1cd413055f --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,6 @@ +version: 2 +updates: + - package-ecosystem: "github-actions" + directory: "/" + interval: + schedule: "weekly" From dc8d076a251e213687f9ff1ff3d3ecc7010103f2 Mon Sep 17 00:00:00 2001 From: Martin Vician Date: Tue, 14 Sep 2021 12:34:59 +0100 Subject: [PATCH 0564/3093] tss: add option for token authorization (#3327) * Added token parameter for AccessTokenAuthorizer Parameters username and password are not required anymore because of this. * Added changelog fragments * Apply suggestions from code review Co-authored-by: Ajpantuso * token authorizer is prioritized token authorizer is prioritized when token parameter is set * Apply suggestions from code review Co-authored-by: Felix Fontein * domain optional if token not provided * Updated examples - `base_url` is required everywhere - examples for user, name + domain authorization included - token authorization included * Update 3327-tss-token-authorization.yml * Apply suggestions from code review Co-authored-by: Felix Fontein Co-authored-by: Ajpantuso Co-authored-by: Felix Fontein --- .../3327-tss-token-authorization.yml | 4 ++ plugins/lookup/tss.py | 70 ++++++++++++++----- 2 files changed, 57 insertions(+), 17 deletions(-) create mode 100644 changelogs/fragments/3327-tss-token-authorization.yml diff --git a/changelogs/fragments/3327-tss-token-authorization.yml b/changelogs/fragments/3327-tss-token-authorization.yml new file mode 100644 index 0000000000..5d9f56cb72 --- /dev/null +++ b/changelogs/fragments/3327-tss-token-authorization.yml @@ -0,0 +1,4 @@ +minor_changes: + - tss lookup plugin - added ``token`` parameter for token authorization; + ``username`` and ``password`` are optional when ``token`` is provided + (https://github.com/ansible-collections/community.general/pull/3327). diff --git a/plugins/lookup/tss.py b/plugins/lookup/tss.py index fe6042e130..3b561e94fc 100644 --- a/plugins/lookup/tss.py +++ b/plugins/lookup/tss.py @@ -36,19 +36,20 @@ options: ini: - section: tss_lookup key: username - required: true password: - description: The password associated with the supplied username. + description: + - The password associated with the supplied username. + - Required when I(token) is not provided. env: - name: TSS_PASSWORD ini: - section: tss_lookup key: password - required: true domain: default: "" description: - The domain with which to request the OAuth2 Access Grant. + - Optional when I(token) is not provided. - Requires C(python-tss-sdk) version 1.0.0 or greater. env: - name: TSS_DOMAIN @@ -57,6 +58,17 @@ options: key: domain required: false version_added: 3.6.0 + token: + description: + - Existing token for Thycotic authorizer. + - If provided, I(username) and I(password) are not needed. + - Requires C(python-tss-sdk) version 1.0.0 or greater. + env: + - name: TSS_TOKEN + ini: + - section: tss_lookup + key: token + version_added: 3.7.0 api_path_uri: default: /api/v1 description: The path to append to the base URL to form a valid REST @@ -83,18 +95,6 @@ _list: """ EXAMPLES = r""" -- hosts: localhost - vars: - secret: "{{ lookup('community.general.tss', 1) }}" - tasks: - - ansible.builtin.debug: - msg: > - the password is {{ - (secret['items'] - | items2dict(key_name='slug', - value_name='itemValue'))['password'] - }} - - hosts: localhost vars: secret: >- @@ -116,10 +116,39 @@ EXAMPLES = r""" value_name='itemValue'))['password'] }} +- hosts: localhost + vars: + secret: >- + {{ + lookup( + 'community.general.tss', + 102, + base_url='https://secretserver.domain.com/SecretServer/', + username='user.name', + password='password', + domain='domain' + ) + }} + tasks: + - ansible.builtin.debug: + msg: > + the password is {{ + (secret['items'] + | items2dict(key_name='slug', + value_name='itemValue'))['password'] + }} + - hosts: localhost vars: secret_password: >- - {{ ((lookup('community.general.tss', 1) | from_json).get('items') | items2dict(key_name='slug', value_name='itemValue'))['password'] }}" + {{ + ((lookup( + 'community.general.tss', + 102, + base_url='https://secretserver.domain.com/SecretServer/', + token='thycotic_access_token', + ) | from_json).get('items') | items2dict(key_name='slug', value_name='itemValue'))['password'] + }} tasks: - ansible.builtin.debug: msg: the password is {{ secret_password }} @@ -142,12 +171,13 @@ except ImportError: HAS_TSS_SDK = False try: - from thycotic.secrets.server import PasswordGrantAuthorizer, DomainPasswordGrantAuthorizer + from thycotic.secrets.server import PasswordGrantAuthorizer, DomainPasswordGrantAuthorizer, AccessTokenAuthorizer HAS_TSS_AUTHORIZER = True except ImportError: PasswordGrantAuthorizer = None DomainPasswordGrantAuthorizer = None + AccessTokenAuthorizer = None HAS_TSS_AUTHORIZER = False @@ -209,6 +239,11 @@ class TSSClientV1(TSSClient): @staticmethod def _get_authorizer(**server_parameters): + if server_parameters.get("token"): + return AccessTokenAuthorizer( + server_parameters["token"], + ) + if server_parameters.get("domain"): return DomainPasswordGrantAuthorizer( server_parameters["base_url"], @@ -238,6 +273,7 @@ class LookupModule(LookupBase): username=self.get_option("username"), password=self.get_option("password"), domain=self.get_option("domain"), + token=self.get_option("token"), api_path_uri=self.get_option("api_path_uri"), token_path_uri=self.get_option("token_path_uri"), ) From 517570a64fcbbfdad5704f88c63eabb4cf74a84d Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Tue, 14 Sep 2021 20:05:02 +0100 Subject: [PATCH 0565/3093] Add opentelemetry callback plugin (#3091) * Add opentelemetry callback plugin * Apply suggestions from code review Co-authored-by: Felix Fontein * Formatting (text), booleans and renamed env variables * This should be done in a future release * Remove insecure in favour of the OTEL env variable. Add descriptions * Use OpenTelemetrySource * Move generate_distributed_traces * Move update_span_data and set_span_attribute * Move finish_task * Move start_task * Refactor to support UTs * Add first UT * Fix codestyle * opentelemetry callback entry in the botmeta * Fix linting * Fix signature * Mock methods * Use MagicMock * Mock the methods * UT for transform_to_boolean_or_default * Fix linting * Set test data * Mock _time_ns * Exclude tests for python <= 3.6 * Remove obsoleted setup task type configuration * Remove unused docs * Apply suggestions from code review Co-authored-by: Felix Fontein * Fix docs * unrequired logic that was originally took from https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/callback/junit.py\#L226 * Use raise_from for the required dependencies * Fix linting * Add requirements for the UTs * add missing dependency for the opentelemetry plugin in the UTs * Add ANSIBLE_ prefix for the ansible specific options * Add more context in the docs and remove duplicated docs * As suggested in the code review * Verify if the OTEL env variables for the endpoint were set * Fix docs typo * Fix linting * Revert "Fix linting" This reverts commit 3a54c827c5472553a6baf5598bc76a0f63f020c1. * Revert "Verify if the OTEL env variables for the endpoint were set" This reverts commit cab9d8648899c28c0345745690c4ec7a41f7e680. * Remove console_output as suggested * Apply suggestions from code review Co-authored-by: flowerysong * Delegate the definition of OTEL_EXPORTER_OTLP_INSECURE to the user * Move definitions above, close to the class that uses them Co-authored-by: Felix Fontein Co-authored-by: flowerysong --- .github/BOTMETA.yml | 3 + plugins/callback/opentelemetry.py | 401 ++++++++++++++++++ .../plugins/callback/test_opentelemetry.py | 93 ++++ tests/unit/requirements.txt | 5 + 4 files changed, 502 insertions(+) create mode 100644 plugins/callback/opentelemetry.py create mode 100644 tests/unit/plugins/callback/test_opentelemetry.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 96c191db8b..78cd46871f 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -62,6 +62,9 @@ files: $callbacks/nrdp.py: maintainers: rverchere $callbacks/null.py: {} + $callbacks/opentelemetry.py: + maintainers: v1v + keywords: opentelemetry observability $callbacks/say.py: notify: chris-short maintainers: $team_macos diff --git a/plugins/callback/opentelemetry.py b/plugins/callback/opentelemetry.py new file mode 100644 index 0000000000..f256b7263d --- /dev/null +++ b/plugins/callback/opentelemetry.py @@ -0,0 +1,401 @@ +# (C) 2021, Victor Martinez +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = ''' + author: Victor Martinez (@v1v) + name: opentelemetry + type: notification + short_description: Create distributed traces with OpenTelemetry + version_added: 3.7.0 + description: + - This callback creates distributed traces for each Ansible task with OpenTelemetry. + - You can configure the OpenTelemetry exporter and SDK with environment variables. + - See U(https://opentelemetry-python.readthedocs.io/en/latest/exporter/otlp/otlp.html). + - See U(https://opentelemetry-python.readthedocs.io/en/latest/sdk/environment_variables.html#opentelemetry-sdk-environment-variables). + options: + hide_task_arguments: + default: false + type: bool + description: + - Hide the arguments for a task. + env: + - name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS + otel_service_name: + default: ansible + type: str + description: + - The service name resource attribute. + env: + - name: OTEL_SERVICE_NAME + requirements: + - opentelemetry-api (python lib) + - opentelemetry-exporter-otlp (python lib) + - opentelemetry-sdk (python lib) +''' + + +EXAMPLES = ''' +examples: | + Enable the plugin in ansible.cfg: + [defaults] + callbacks_enabled = community.general.opentelemetry + + Set the environment variable: + export OTEL_EXPORTER_OTLP_ENDPOINT= + export OTEL_EXPORTER_OTLP_HEADERS="authorization=Bearer your_otel_token" + export OTEL_SERVICE_NAME=your_service_name +''' + +import getpass +import os +import socket +import sys +import time +import uuid + +from os.path import basename + +from ansible.errors import AnsibleError +from ansible.module_utils.six import raise_from +from ansible.plugins.callback import CallbackBase + +try: + from opentelemetry import trace + from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter + from opentelemetry.sdk.resources import SERVICE_NAME, Resource + from opentelemetry.trace.status import Status, StatusCode + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.trace.export import ( + ConsoleSpanExporter, + SimpleSpanProcessor, + BatchSpanProcessor + ) + from opentelemetry.util._time import _time_ns +except ImportError as imp_exc: + OTEL_LIBRARY_IMPORT_ERROR = imp_exc +else: + OTEL_LIBRARY_IMPORT_ERROR = None + +try: + from collections import OrderedDict +except ImportError: + try: + from ordereddict import OrderedDict + except ImportError as imp_exc: + ORDER_LIBRARY_IMPORT_ERROR = imp_exc + else: + ORDER_LIBRARY_IMPORT_ERROR = None +else: + ORDER_LIBRARY_IMPORT_ERROR = None + + +class TaskData: + """ + Data about an individual task. + """ + + def __init__(self, uuid, name, path, play, action, args): + self.uuid = uuid + self.name = name + self.path = path + self.play = play + self.host_data = OrderedDict() + if sys.version_info >= (3, 7): + self.start = time.time_ns() + else: + self.start = _time_ns() + self.action = action + self.args = args + + def add_host(self, host): + if host.uuid in self.host_data: + if host.status == 'included': + # concatenate task include output from multiple items + host.result = '%s\n%s' % (self.host_data[host.uuid].result, host.result) + else: + return + + self.host_data[host.uuid] = host + + +class HostData: + """ + Data about an individual host. + """ + + def __init__(self, uuid, name, status, result): + self.uuid = uuid + self.name = name + self.status = status + self.result = result + if sys.version_info >= (3, 7): + self.finish = time.time_ns() + else: + self.finish = _time_ns() + + +class OpenTelemetrySource(object): + def __init__(self, display): + self.ansible_playbook = "" + self.ansible_version = None + self.session = str(uuid.uuid4()) + self.host = socket.gethostname() + try: + self.ip_address = socket.gethostbyname(socket.gethostname()) + except Exception as e: + self.ip_address = None + self.user = getpass.getuser() + + self._display = display + + def start_task(self, tasks_data, hide_task_arguments, play_name, task): + """ record the start of a task for one or more hosts """ + + uuid = task._uuid + + if uuid in tasks_data: + return + + name = task.get_name().strip() + path = task.get_path() + action = task.action + args = None + + if not task.no_log and not hide_task_arguments: + args = ', '.join(('%s=%s' % a for a in task.args.items())) + + tasks_data[uuid] = TaskData(uuid, name, path, play_name, action, args) + + def finish_task(self, tasks_data, status, result): + """ record the results of a task for a single host """ + + task_uuid = result._task._uuid + + if hasattr(result, '_host') and result._host is not None: + host_uuid = result._host._uuid + host_name = result._host.name + else: + host_uuid = 'include' + host_name = 'include' + + task = tasks_data[task_uuid] + + if self.ansible_version is None and result._task_fields['args'].get('_ansible_version'): + self.ansible_version = result._task_fields['args'].get('_ansible_version') + + task.add_host(HostData(host_uuid, host_name, status, result)) + + def generate_distributed_traces(self, otel_service_name, ansible_playbook, tasks_data, status): + """ generate distributed traces from the collected TaskData and HostData """ + + tasks = [] + parent_start_time = None + for task_uuid, task in tasks_data.items(): + if parent_start_time is None: + parent_start_time = task.start + tasks.append(task) + + trace.set_tracer_provider( + TracerProvider( + resource=Resource.create({SERVICE_NAME: otel_service_name}) + ) + ) + + processor = BatchSpanProcessor(OTLPSpanExporter()) + + trace.get_tracer_provider().add_span_processor(processor) + + tracer = trace.get_tracer(__name__) + + with tracer.start_as_current_span(ansible_playbook, start_time=parent_start_time) as parent: + parent.set_status(status) + # Populate trace metadata attributes + if self.ansible_version is not None: + parent.set_attribute("ansible.version", self.ansible_version) + parent.set_attribute("ansible.session", self.session) + parent.set_attribute("ansible.host.name", self.host) + if self.ip_address is not None: + parent.set_attribute("ansible.host.ip", self.ip_address) + parent.set_attribute("ansible.host.user", self.user) + for task in tasks: + for host_uuid, host_data in task.host_data.items(): + with tracer.start_as_current_span(task.name, start_time=task.start, end_on_exit=False) as span: + self.update_span_data(task, host_data, span) + + def update_span_data(self, task_data, host_data, span): + """ update the span with the given TaskData and HostData """ + + name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name) + + message = 'success' + status = Status(status_code=StatusCode.OK) + if host_data.status == 'included': + rc = 0 + else: + res = host_data.result._result + rc = res.get('rc', 0) + if host_data.status == 'failed': + if 'exception' in res: + message = res['exception'].strip().split('\n')[-1] + elif 'msg' in res: + message = res['msg'] + else: + message = 'failed' + status = Status(status_code=StatusCode.ERROR) + elif host_data.status == 'skipped': + if 'skip_reason' in res: + message = res['skip_reason'] + else: + message = 'skipped' + status = Status(status_code=StatusCode.UNSET) + + span.set_status(status) + self.set_span_attribute(span, "ansible.task.args", task_data.args) + self.set_span_attribute(span, "ansible.task.module", task_data.action) + self.set_span_attribute(span, "ansible.task.message", message) + self.set_span_attribute(span, "ansible.task.name", name) + self.set_span_attribute(span, "ansible.task.result", rc) + self.set_span_attribute(span, "ansible.task.host.name", host_data.name) + self.set_span_attribute(span, "ansible.task.host.status", host_data.status) + span.end(end_time=host_data.finish) + + def set_span_attribute(self, span, attributeName, attributeValue): + """ update the span attribute with the given attribute and value if not None """ + + if span is None and self._display is not None: + self._display.warning('span object is None. Please double check if that is expected.') + else: + if attributeValue is not None: + span.set_attribute(attributeName, attributeValue) + + +class CallbackModule(CallbackBase): + """ + This callback creates distributed traces. + """ + + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'notification' + CALLBACK_NAME = 'community.general.opentelemetry' + CALLBACK_NEEDS_ENABLED = True + + def __init__(self, display=None): + super(CallbackModule, self).__init__(display=display) + self.hide_task_arguments = None + self.otel_service_name = None + self.ansible_playbook = None + self.play_name = None + self.tasks_data = None + self.errors = 0 + self.disabled = False + + if OTEL_LIBRARY_IMPORT_ERROR: + raise_from( + AnsibleError('The `opentelemetry-api`, `opentelemetry-exporter-otlp` or `opentelemetry-sdk` must be installed to use this plugin'), + OTEL_LIBRARY_IMPORT_ERROR) + + if ORDER_LIBRARY_IMPORT_ERROR: + raise_from( + AnsibleError('The `ordereddict` must be installed to use this plugin'), + ORDER_LIBRARY_IMPORT_ERROR) + else: + self.tasks_data = OrderedDict() + + self.opentelemetry = OpenTelemetrySource(display=self._display) + + def set_options(self, task_keys=None, var_options=None, direct=None): + super(CallbackModule, self).set_options(task_keys=task_keys, + var_options=var_options, + direct=direct) + + self.hide_task_arguments = self.get_option('hide_task_arguments') + + self.otel_service_name = self.get_option('otel_service_name') + + if not self.otel_service_name: + self.otel_service_name = 'ansible' + + def v2_playbook_on_start(self, playbook): + self.ansible_playbook = basename(playbook._file_name) + + def v2_playbook_on_play_start(self, play): + self.play_name = play.get_name() + + def v2_runner_on_no_hosts(self, task): + self.opentelemetry.start_task( + self.tasks_data, + self.hide_task_arguments, + self.play_name, + task + ) + + def v2_playbook_on_task_start(self, task, is_conditional): + self.opentelemetry.start_task( + self.tasks_data, + self.hide_task_arguments, + self.play_name, + task + ) + + def v2_playbook_on_cleanup_task_start(self, task): + self.opentelemetry.start_task( + self.tasks_data, + self.hide_task_arguments, + self.play_name, + task + ) + + def v2_playbook_on_handler_task_start(self, task): + self.opentelemetry.start_task( + self.tasks_data, + self.hide_task_arguments, + self.play_name, + task + ) + + def v2_runner_on_failed(self, result, ignore_errors=False): + self.errors += 1 + self.opentelemetry.finish_task( + self.tasks_data, + 'failed', + result + ) + + def v2_runner_on_ok(self, result): + self.opentelemetry.finish_task( + self.tasks_data, + 'ok', + result + ) + + def v2_runner_on_skipped(self, result): + self.opentelemetry.finish_task( + self.tasks_data, + 'skipped', + result + ) + + def v2_playbook_on_include(self, included_file): + self.opentelemetry.finish_task( + self.tasks_data, + 'included', + included_file + ) + + def v2_playbook_on_stats(self, stats): + if self.errors == 0: + status = Status(status_code=StatusCode.OK) + else: + status = Status(status_code=StatusCode.ERROR) + self.opentelemetry.generate_distributed_traces( + self.otel_service_name, + self.ansible_playbook, + self.tasks_data, + status + ) + + def v2_runner_on_async_failed(self, result, **kwargs): + self.errors += 1 diff --git a/tests/unit/plugins/callback/test_opentelemetry.py b/tests/unit/plugins/callback/test_opentelemetry.py new file mode 100644 index 0000000000..7fcfc5cddb --- /dev/null +++ b/tests/unit/plugins/callback/test_opentelemetry.py @@ -0,0 +1,93 @@ +# (C) 2021, Victor Martinez +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.playbook.task import Task +from ansible.executor.task_result import TaskResult +from ansible_collections.community.general.tests.unit.compat import unittest +from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock, Mock +from ansible_collections.community.general.plugins.callback.opentelemetry import OpenTelemetrySource, TaskData, CallbackModule +from collections import OrderedDict +import sys + +OPENTELEMETRY_MINIMUM_PYTHON_VERSION = (3, 7) + + +class TestOpentelemetry(unittest.TestCase): + @patch('ansible_collections.community.general.plugins.callback.opentelemetry.socket') + def setUp(self, mock_socket): + # TODO: this python version validation won't be needed as long as the _time_ns call is mocked. + if sys.version_info < OPENTELEMETRY_MINIMUM_PYTHON_VERSION: + self.skipTest("Python %s+ is needed for OpenTelemetry" % + ",".join(map(str, OPENTELEMETRY_MINIMUM_PYTHON_VERSION))) + + mock_socket.gethostname.return_value = 'my-host' + mock_socket.gethostbyname.return_value = '1.2.3.4' + self.opentelemetry = OpenTelemetrySource(display=None) + self.task_fields = {'args': {}} + self.mock_host = Mock('MockHost') + self.mock_host.name = 'myhost' + self.mock_host._uuid = 'myhost_uuid' + self.mock_task = Task() + self.mock_task.action = 'myaction' + self.mock_task.no_log = False + self.mock_task._role = 'myrole' + self.mock_task._uuid = 'myuuid' + self.mock_task.args = {} + self.mock_task.get_name = MagicMock(return_value='mytask') + self.mock_task.get_path = MagicMock(return_value='/mypath') + self.my_task = TaskData('myuuid', 'mytask', '/mypath', 'myplay', 'myaction', '') + self.my_task_result = TaskResult(host=self.mock_host, task=self.mock_task, return_data={}, task_fields=self.task_fields) + + def test_start_task(self): + tasks_data = OrderedDict() + + self.opentelemetry.start_task( + tasks_data, + False, + 'myplay', + self.mock_task + ) + + task_data = tasks_data['myuuid'] + self.assertEqual(task_data.uuid, 'myuuid') + self.assertEqual(task_data.name, 'mytask') + self.assertEqual(task_data.path, '/mypath') + self.assertEqual(task_data.play, 'myplay') + self.assertEqual(task_data.action, 'myaction') + self.assertEqual(task_data.args, '') + + def test_finish_task_with_a_host_match(self): + tasks_data = OrderedDict() + tasks_data['myuuid'] = self.my_task + + self.opentelemetry.finish_task( + tasks_data, + 'ok', + self.my_task_result + ) + + task_data = tasks_data['myuuid'] + host_data = task_data.host_data['myhost_uuid'] + self.assertEqual(host_data.uuid, 'myhost_uuid') + self.assertEqual(host_data.name, 'myhost') + self.assertEqual(host_data.status, 'ok') + + def test_finish_task_without_a_host_match(self): + result = TaskResult(host=None, task=self.mock_task, return_data={}, task_fields=self.task_fields) + tasks_data = OrderedDict() + tasks_data['myuuid'] = self.my_task + + self.opentelemetry.finish_task( + tasks_data, + 'ok', + result + ) + + task_data = tasks_data['myuuid'] + host_data = task_data.host_data['include'] + self.assertEqual(host_data.uuid, 'include') + self.assertEqual(host_data.name, 'include') + self.assertEqual(host_data.status, 'ok') diff --git a/tests/unit/requirements.txt b/tests/unit/requirements.txt index c8294bd71a..3cf288fef9 100644 --- a/tests/unit/requirements.txt +++ b/tests/unit/requirements.txt @@ -26,3 +26,8 @@ datadog-api-client >= 1.0.0b3 ; python_version >= '3.6' # requirement for dnsimple module dnsimple >= 2 ; python_version >= '3.6' dataclasses ; python_version == '3.6' + +# requirement for the opentelemetry callback plugin +opentelemetry-api ; python_version >= '3.6' +opentelemetry-exporter-otlp ; python_version >= '3.6' +opentelemetry-sdk ; python_version >= '3.6' From b20fc7a7c32d30a4a7f094ea8e037385ba1d389d Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Wed, 15 Sep 2021 07:21:15 +0200 Subject: [PATCH 0566/3093] Install nios test requirements. (#3375) --- tests/integration/targets/prepare_nios_tests/tasks/main.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/integration/targets/prepare_nios_tests/tasks/main.yml b/tests/integration/targets/prepare_nios_tests/tasks/main.yml index e69de29bb2..f8f55f38af 100644 --- a/tests/integration/targets/prepare_nios_tests/tasks/main.yml +++ b/tests/integration/targets/prepare_nios_tests/tasks/main.yml @@ -0,0 +1,4 @@ +--- +- name: Install + pip: + name: infoblox-client From 06345839c6e333f9021d255af5e2707ecbba2c12 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Thu, 16 Sep 2021 19:22:44 +0100 Subject: [PATCH 0567/3093] opentelemetry callback: context propagation and error exception (#3378) * opentelemetry callback: context propagation and error exception * Apply suggestions from code review Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- plugins/callback/opentelemetry.py | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/plugins/callback/opentelemetry.py b/plugins/callback/opentelemetry.py index f256b7263d..b523603828 100644 --- a/plugins/callback/opentelemetry.py +++ b/plugins/callback/opentelemetry.py @@ -30,6 +30,13 @@ DOCUMENTATION = ''' - The service name resource attribute. env: - name: OTEL_SERVICE_NAME + traceparent: + default: None + type: str + description: + - The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header). + env: + - name: TRACEPARENT requirements: - opentelemetry-api (python lib) - opentelemetry-exporter-otlp (python lib) @@ -64,9 +71,11 @@ from ansible.plugins.callback import CallbackBase try: from opentelemetry import trace + from opentelemetry.trace import SpanKind from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter from opentelemetry.sdk.resources import SERVICE_NAME, Resource from opentelemetry.trace.status import Status, StatusCode + from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import ( ConsoleSpanExporter, @@ -151,6 +160,11 @@ class OpenTelemetrySource(object): self._display = display + def traceparent_context(self, traceparent): + carrier = dict() + carrier['traceparent'] = traceparent + return TraceContextTextMapPropagator().extract(carrier=carrier) + def start_task(self, tasks_data, hide_task_arguments, play_name, task): """ record the start of a task for one or more hosts """ @@ -188,7 +202,7 @@ class OpenTelemetrySource(object): task.add_host(HostData(host_uuid, host_name, status, result)) - def generate_distributed_traces(self, otel_service_name, ansible_playbook, tasks_data, status): + def generate_distributed_traces(self, otel_service_name, ansible_playbook, tasks_data, status, traceparent): """ generate distributed traces from the collected TaskData and HostData """ tasks = [] @@ -210,7 +224,8 @@ class OpenTelemetrySource(object): tracer = trace.get_tracer(__name__) - with tracer.start_as_current_span(ansible_playbook, start_time=parent_start_time) as parent: + with tracer.start_as_current_span(ansible_playbook, context=self.traceparent_context(traceparent), + start_time=parent_start_time, kind=SpanKind.SERVER) as parent: parent.set_status(status) # Populate trace metadata attributes if self.ansible_version is not None: @@ -244,7 +259,9 @@ class OpenTelemetrySource(object): message = res['msg'] else: message = 'failed' - status = Status(status_code=StatusCode.ERROR) + status = Status(status_code=StatusCode.ERROR, description=message) + # Record an exception with the task message + span.record_exception(BaseException(message)) elif host_data.status == 'skipped': if 'skip_reason' in res: message = res['skip_reason'] @@ -291,6 +308,7 @@ class CallbackModule(CallbackBase): self.tasks_data = None self.errors = 0 self.disabled = False + self.traceparent = False if OTEL_LIBRARY_IMPORT_ERROR: raise_from( @@ -318,6 +336,9 @@ class CallbackModule(CallbackBase): if not self.otel_service_name: self.otel_service_name = 'ansible' + # See https://github.com/open-telemetry/opentelemetry-specification/issues/740 + self.traceparent = self.get_option('traceparent') + def v2_playbook_on_start(self, playbook): self.ansible_playbook = basename(playbook._file_name) @@ -394,7 +415,8 @@ class CallbackModule(CallbackBase): self.otel_service_name, self.ansible_playbook, self.tasks_data, - status + status, + self.traceparent ) def v2_runner_on_async_failed(self, result, **kwargs): From 331f5bdf24bbd1d9e150c274843e83430167ac7f Mon Sep 17 00:00:00 2001 From: Patrick Pfurtscheller <57419021+PfurtschellerP@users.noreply.github.com> Date: Thu, 16 Sep 2021 22:20:49 +0200 Subject: [PATCH 0568/3093] redfish_utils: adding "Id" to the add user function (#3343) * Adding "Id" to the add user function Some implementations of Redfish (e.g. the one in Cisco's CIMC) seem to require the id of the new user for account creation. I'm not that firm with Python but lines 982 and 983 should fix it. * changed indention * created changelog fragment * Update changelogs/fragments/3343-redfish_utils-addUser-userId.yml Co-authored-by: Felix Fontein * Update change type * supplemented the description of the ID parameter * Update plugins/modules/remote_management/redfish/redfish_command.py Co-authored-by: Felix Fontein Co-authored-by: Felix Fontein --- changelogs/fragments/3343-redfish_utils-addUser-userId.yml | 2 ++ plugins/module_utils/redfish_utils.py | 2 ++ plugins/modules/remote_management/redfish/redfish_command.py | 3 ++- 3 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/3343-redfish_utils-addUser-userId.yml diff --git a/changelogs/fragments/3343-redfish_utils-addUser-userId.yml b/changelogs/fragments/3343-redfish_utils-addUser-userId.yml new file mode 100644 index 0000000000..7b8aa0b700 --- /dev/null +++ b/changelogs/fragments/3343-redfish_utils-addUser-userId.yml @@ -0,0 +1,2 @@ +bugfixes: + - redfish_utils module utils - if given, add account ID of user that should be created to HTTP request (https://github.com/ansible-collections/community.general/pull/3343/). diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index b4d0dba015..55686b2f50 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -979,6 +979,8 @@ class RedfishUtils(object): payload['Password'] = user.get('account_password') if user.get('account_roleid'): payload['RoleId'] = user.get('account_roleid') + if user.get('account_id'): + payload['Id'] = user.get('account_id') response = self.post_request(self.root_uri + self.accounts_uri, payload) if not response['ret']: diff --git a/plugins/modules/remote_management/redfish/redfish_command.py b/plugins/modules/remote_management/redfish/redfish_command.py index e79308f2d7..8702e468ca 100644 --- a/plugins/modules/remote_management/redfish/redfish_command.py +++ b/plugins/modules/remote_management/redfish/redfish_command.py @@ -56,7 +56,8 @@ options: required: false aliases: [ account_id ] description: - - ID of account to delete/modify + - ID of account to delete/modify. + - Can also be used in account creation to work around vendor issues where the ID of the new user is required in the POST request. type: str new_username: required: false From 7a2efb4775af4296bf4dfe640cbc0ae52c87d5dd Mon Sep 17 00:00:00 2001 From: Max Bidlingmaier Date: Thu, 16 Sep 2021 22:26:31 +0200 Subject: [PATCH 0569/3093] Get behavior of gitlab_project_members to the one of gitlab_group_members (#3319) * Initial change to get behaviour of gitlab_project_members like the new gitlab_group_members * added changelog * linter: removed trainling whitespaces * Update plugins/modules/source_control/gitlab/gitlab_project_members.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_project_members.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_project_members.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_project_members.py Co-authored-by: Felix Fontein * Update plugins/modules/source_control/gitlab/gitlab_project_members.py Co-authored-by: Felix Fontein * requested changes * linter fixes * undoing formatting changes to existing code Co-authored-by: Max-Florian Bidlingmaier Co-authored-by: Felix Fontein Co-authored-by: Max Bidlingmaier --- ...319-gitlab_project_members_enhancement.yml | 3 + .../gitlab/gitlab_project_members.py | 332 ++++++++++++++---- .../gitlab_project_members/defaults/main.yml | 8 + .../gitlab_project_members/tasks/main.yml | 45 ++- 4 files changed, 312 insertions(+), 76 deletions(-) create mode 100644 changelogs/fragments/3319-gitlab_project_members_enhancement.yml diff --git a/changelogs/fragments/3319-gitlab_project_members_enhancement.yml b/changelogs/fragments/3319-gitlab_project_members_enhancement.yml new file mode 100644 index 0000000000..7795cd1f02 --- /dev/null +++ b/changelogs/fragments/3319-gitlab_project_members_enhancement.yml @@ -0,0 +1,3 @@ +minor_changes: + - gitlab_project_members - ``gitlab_user`` can now also be a list of users (https://github.com/ansible-collections/community.general/pull/3319). + - gitlab_project_members - added functionality to set all members exactly as given (https://github.com/ansible-collections/community.general/pull/3319). diff --git a/plugins/modules/source_control/gitlab/gitlab_project_members.py b/plugins/modules/source_control/gitlab/gitlab_project_members.py index 0ae8f4b25c..51f60d459f 100644 --- a/plugins/modules/source_control/gitlab/gitlab_project_members.py +++ b/plugins/modules/source_control/gitlab/gitlab_project_members.py @@ -53,15 +53,37 @@ options: type: str gitlab_user: description: - - The username of the member to add to/remove from the GitLab project. - required: true - type: str + - A username or a list of usernames to add to/remove from the GitLab project. + - Mutually exclusive with I(gitlab_users_access). + type: list + elements: str access_level: description: - The access level for the user. - Required if I(state=present), user state is set to present. type: str choices: ['guest', 'reporter', 'developer', 'maintainer'] + gitlab_users_access: + description: + - Provide a list of user to access level mappings. + - Every dictionary in this list specifies a user (by username) and the access level the user should have. + - Mutually exclusive with I(gitlab_user) and I(access_level). + - Use together with I(purge_users) to remove all users not specified here from the project. + type: list + elements: dict + suboptions: + name: + description: A username or a list of usernames to add to/remove from the GitLab project. + type: str + required: true + access_level: + description: + - The access level for the user. + - Required if I(state=present), user state is set to present. + type: str + choices: ['guest', 'reporter', 'developer', 'maintainer'] + required: true + version_added: 3.7.0 state: description: - State of the member in the project. @@ -70,6 +92,15 @@ options: choices: ['present', 'absent'] default: 'present' type: str + purge_users: + description: + - Adds/remove users of the given access_level to match the given I(gitlab_user)/I(gitlab_users_access) list. + If omitted do not purge orphaned members. + - Is only used when I(state=present). + type: list + elements: str + choices: ['guest', 'reporter', 'developer', 'maintainer'] + version_added: 3.7.0 notes: - Supports C(check_mode). ''' @@ -93,6 +124,51 @@ EXAMPLES = r''' project: projectname gitlab_user: username state: absent + +- name: Add a list of Users to A GitLab project + community.general.gitlab_project_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + gitlab_project: projectname + gitlab_user: + - user1 + - user2 + access_level: developer + state: present + +- name: Add a list of Users with Dedicated Access Levels to A GitLab project + community.general.gitlab_project_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + project: projectname + gitlab_users_access: + - name: user1 + access_level: developer + - name: user2 + access_level: maintainer + state: present + +- name: Add a user, remove all others which might be on this access level + community.general.gitlab_project_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + project: projectname + gitlab_user: username + access_level: developer + pruge_users: developer + state: present + +- name: Remove a list of Users with Dedicated Access Levels to A GitLab project + community.general.gitlab_project_members: + api_url: 'https://gitlab.example.com' + api_token: 'Your-Private-Token' + project: projectname + gitlab_users_access: + - name: user1 + access_level: developer + - name: user2 + access_level: maintainer + state: absent ''' RETURN = r''' # ''' @@ -132,6 +208,17 @@ class GitLabProjectMembers(object): project = self._gitlab.projects.get(gitlab_project_id) return project.members.list(all=True) + # get single member in a project by user name + def get_member_in_a_project(self, gitlab_project_id, gitlab_user_id): + member = None + project = self._gitlab.projects.get(gitlab_project_id) + try: + member = project.members.get(gitlab_user_id) + if member: + return member + except gitlab.exceptions.GitlabGetError as e: + return None + # check if the user is a member of the project def is_user_a_member(self, members, gitlab_user_id): for member in members: @@ -141,27 +228,14 @@ class GitLabProjectMembers(object): # add user to a project def add_member_to_project(self, gitlab_user_id, gitlab_project_id, access_level): - try: - project = self._gitlab.projects.get(gitlab_project_id) - add_member = project.members.create( - {'user_id': gitlab_user_id, 'access_level': access_level}) - - if add_member: - return add_member.username - - except (gitlab.exceptions.GitlabCreateError) as e: - self._module.fail_json( - msg="Failed to add member to the project, project ID %s: %s" % (gitlab_project_id, e)) + project = self._gitlab.projects.get(gitlab_project_id) + add_member = project.members.create( + {'user_id': gitlab_user_id, 'access_level': access_level}) # remove user from a project def remove_user_from_project(self, gitlab_user_id, gitlab_project_id): - try: - project = self._gitlab.projects.get(gitlab_project_id) - project.members.delete(gitlab_user_id) - - except (gitlab.exceptions.GitlabDeleteError) as e: - self._module.fail_json( - msg="Failed to remove member from GitLab project, ID %s: %s" % (gitlab_project_id, e)) + project = self._gitlab.projects.get(gitlab_project_id) + project.members.delete(gitlab_user_id) # get user's access level def get_user_access_level(self, members, gitlab_user_id): @@ -173,12 +247,8 @@ class GitLabProjectMembers(object): def update_user_access_level(self, members, gitlab_user_id, access_level): for member in members: if member.id == gitlab_user_id: - try: - member.access_level = access_level - member.save() - except (gitlab.exceptions.GitlabCreateError) as e: - self._module.fail_json( - msg="Failed to update the access level for the member, %s: %s" % (gitlab_user_id, e)) + member.access_level = access_level + member.save() def main(): @@ -186,9 +256,20 @@ def main(): argument_spec.update(dict( api_token=dict(type='str', required=True, no_log=True), project=dict(type='str', required=True), - gitlab_user=dict(type='str', required=True), + gitlab_user=dict(type='list', elements='str'), state=dict(type='str', default='present', choices=['present', 'absent']), - access_level=dict(type='str', required=False, choices=['guest', 'reporter', 'developer', 'maintainer']) + access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer']), + purge_users=dict(type='list', elements='str', choices=[ + 'guest', 'reporter', 'developer', 'maintainer']), + gitlab_users_access=dict( + type='list', + elements='dict', + options=dict( + name=dict(type='str', required=True), + access_level=dict(type='str', choices=[ + 'guest', 'reporter', 'developer', 'maintainer'], required=True), + ) + ), )) module = AnsibleModule( @@ -196,15 +277,19 @@ def main(): mutually_exclusive=[ ['api_username', 'api_token'], ['api_password', 'api_token'], + ['gitlab_user', 'gitlab_users_access'], + ['access_level', 'gitlab_users_access'], ], required_together=[ ['api_username', 'api_password'], + ['gitlab_user', 'access_level'], ], required_one_of=[ ['api_username', 'api_token'], + ['gitlab_user', 'gitlab_users_access'], ], required_if=[ - ['state', 'present', ['access_level']], + ['state', 'present', ['access_level', 'gitlab_users_access'], True], ], supports_check_mode=True, ) @@ -212,71 +297,168 @@ def main(): if not HAS_PY_GITLAB: module.fail_json(msg=missing_required_lib('python-gitlab', url='https://python-gitlab.readthedocs.io/en/stable/'), exception=GITLAB_IMP_ERR) + access_level_int = { + 'guest': gitlab.GUEST_ACCESS, + 'reporter': gitlab.REPORTER_ACCESS, + 'developer': gitlab.DEVELOPER_ACCESS, + 'maintainer': gitlab.MAINTAINER_ACCESS, + } + gitlab_project = module.params['project'] - gitlab_user = module.params['gitlab_user'] state = module.params['state'] access_level = module.params['access_level'] + purge_users = module.params['purge_users'] - # convert access level string input to int - if access_level: - access_level_int = { - 'guest': gitlab.GUEST_ACCESS, - 'reporter': gitlab.REPORTER_ACCESS, - 'developer': gitlab.DEVELOPER_ACCESS, - 'maintainer': gitlab.MAINTAINER_ACCESS - } - - access_level = access_level_int[access_level] + if purge_users: + purge_users = [access_level_int[level] for level in purge_users] # connect to gitlab server gl = gitlabAuthentication(module) project = GitLabProjectMembers(module, gl) - gitlab_user_id = project.get_user_id(gitlab_user) gitlab_project_id = project.get_project(gitlab_project) # project doesn't exist if not gitlab_project_id: module.fail_json(msg="project '%s' not found." % gitlab_project) - # user doesn't exist - if not gitlab_user_id: - if state == 'absent': - module.exit_json(changed=False, result="user '%s' not found, and thus also not part of the project" % gitlab_user) - else: - module.fail_json(msg="user '%s' not found." % gitlab_user) + members = [] + if module.params['gitlab_user'] is not None: + gitlab_users_access = [] + gitlab_users = module.params['gitlab_user'] + for gl_user in gitlab_users: + gitlab_users_access.append( + {'name': gl_user, 'access_level': access_level_int[access_level] if access_level else None}) + elif module.params['gitlab_users_access'] is not None: + gitlab_users_access = module.params['gitlab_users_access'] + for user_level in gitlab_users_access: + user_level['access_level'] = access_level_int[user_level['access_level']] - members = project.get_members_in_a_project(gitlab_project_id) - is_user_a_member = project.is_user_a_member(members, gitlab_user_id) - - # check if the user is a member in the project - if not is_user_a_member: - if state == 'present': - # add user to the project - if not module.check_mode: - project.add_member_to_project(gitlab_user_id, gitlab_project_id, access_level) - module.exit_json(changed=True, result="Successfully added user '%s' to the project." % gitlab_user) - # state as absent - else: - module.exit_json(changed=False, result="User, '%s', is not a member in the project. No change to report" % gitlab_user) - # in case that a user is a member + if len(gitlab_users_access) == 1 and not purge_users: + # only single user given + members = [project.get_member_in_a_project( + gitlab_project_id, project.get_user_id(gitlab_users_access[0]['name']))] + if members[0] is None: + members = [] + elif len(gitlab_users_access) > 1 or purge_users: + # list of users given + members = project.get_members_in_a_project(gitlab_project_id) else: - if state == 'present': - # compare the access level - user_access_level = project.get_user_access_level(members, gitlab_user_id) - if user_access_level == access_level: - module.exit_json(changed=False, result="User, '%s', is already a member in the project. No change to report" % gitlab_user) + module.exit_json(changed='OK', result="Nothing to do, please give at least one user or set purge_users true.", + result_data=[]) + + changed = False + error = False + changed_users = [] + changed_data = [] + + for gitlab_user in gitlab_users_access: + gitlab_user_id = project.get_user_id(gitlab_user['name']) + + # user doesn't exist + if not gitlab_user_id: + if state == 'absent': + changed_users.append("user '%s' not found, and thus also not part of the project" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', + 'msg': "user '%s' not found, and thus also not part of the project" % gitlab_user['name']}) else: - # update the access level for the user - if not module.check_mode: - project.update_user_access_level(members, gitlab_user_id, access_level) - module.exit_json(changed=True, result="Successfully updated the access level for the user, '%s'" % gitlab_user) + error = True + changed_users.append("user '%s' not found." % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "user '%s' not found." % gitlab_user['name']}) + continue + + is_user_a_member = project.is_user_a_member(members, gitlab_user_id) + + # check if the user is a member in the project + if not is_user_a_member: + if state == 'present': + # add user to the project + try: + if not module.check_mode: + project.add_member_to_project(gitlab_user_id, gitlab_project_id, gitlab_user['access_level']) + changed = True + changed_users.append("Successfully added user '%s' to project" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', + 'msg': "Successfully added user '%s' to project" % gitlab_user['name']}) + except (gitlab.exceptions.GitlabCreateError) as e: + error = True + changed_users.append("Failed to updated the access level for the user, '%s'" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "Not allowed to add the access level for the member, %s: %s" % (gitlab_user['name'], e)}) + # state as absent + else: + changed_users.append("User, '%s', is not a member in the project. No change to report" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', + 'msg': "User, '%s', is not a member in the project. No change to report" % gitlab_user['name']}) + # in case that a user is a member else: - # remove the user from the project - if not module.check_mode: - project.remove_user_from_project(gitlab_user_id, gitlab_project_id) - module.exit_json(changed=True, result="Successfully removed user, '%s', from the project" % gitlab_user) + if state == 'present': + # compare the access level + user_access_level = project.get_user_access_level(members, gitlab_user_id) + if user_access_level == gitlab_user['access_level']: + changed_users.append("User, '%s', is already a member in the project. No change to report" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK', + 'msg': "User, '%s', is already a member in the project. No change to report" % gitlab_user['name']}) + else: + # update the access level for the user + try: + if not module.check_mode: + project.update_user_access_level(members, gitlab_user_id, gitlab_user['access_level']) + changed = True + changed_users.append("Successfully updated the access level for the user, '%s'" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', + 'msg': "Successfully updated the access level for the user, '%s'" % gitlab_user['name']}) + except (gitlab.exceptions.GitlabUpdateError) as e: + error = True + changed_users.append("Failed to updated the access level for the user, '%s'" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "Not allowed to update the access level for the member, %s: %s" % (gitlab_user['name'], e)}) + else: + # remove the user from the project + try: + if not module.check_mode: + project.remove_user_from_project(gitlab_user_id, gitlab_project_id) + changed = True + changed_users.append("Successfully removed user, '%s', from the project" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED', + 'msg': "Successfully removed user, '%s', from the project" % gitlab_user['name']}) + except (gitlab.exceptions.GitlabDeleteError) as e: + error = True + changed_users.append("Failed to removed user, '%s', from the project" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "Failed to remove user, '%s' from the project: %s" % (gitlab_user['name'], e)}) + + # if state = present and purge_users set delete users which are in members having give access level but not in gitlab_users + if state == 'present' and purge_users: + uppercase_names_in_gitlab_users_access = [] + for name in gitlab_users_access: + uppercase_names_in_gitlab_users_access.append(name['name'].upper()) + + for member in members: + if member.access_level in purge_users and member.username.upper() not in uppercase_names_in_gitlab_users_access: + try: + if not module.check_mode: + project.remove_user_from_project(member.id, gitlab_project_id) + changed = True + changed_users.append("Successfully removed user '%s', from project. Was not in given list" % member.username) + changed_data.append({'gitlab_user': member.username, 'result': 'CHANGED', + 'msg': "Successfully removed user '%s', from project. Was not in given list" % member.username}) + except (gitlab.exceptions.GitlabDeleteError) as e: + error = True + changed_users.append("Failed to removed user, '%s', from the project" % gitlab_user['name']) + changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED', + 'msg': "Failed to remove user, '%s' from the project: %s" % (gitlab_user['name'], e)}) + + if len(gitlab_users_access) == 1 and error: + # if single user given and an error occurred return error for list errors will be per user + module.fail_json(msg="FAILED: '%s '" % changed_users[0], result_data=changed_data) + elif error: + module.fail_json( + msg='FAILED: At least one given user/permission could not be set', result_data=changed_data) + + module.exit_json(changed=changed, msg='Successfully set memberships', result="\n".join(changed_users), result_data=changed_data) if __name__ == '__main__': diff --git a/tests/integration/targets/gitlab_project_members/defaults/main.yml b/tests/integration/targets/gitlab_project_members/defaults/main.yml index a31fc0f2d6..1b3ac19a47 100644 --- a/tests/integration/targets/gitlab_project_members/defaults/main.yml +++ b/tests/integration/targets/gitlab_project_members/defaults/main.yml @@ -3,3 +3,11 @@ gitlab_api_access_token: "token" gitlab_project: some_project username: some_user gitlab_access_level: developer +userlist: + - username1 + - username2 +dedicated_access_users: + - name: username1 + access_level: "developer" + - name: username2 + access_level: "maintainer" diff --git a/tests/integration/targets/gitlab_project_members/tasks/main.yml b/tests/integration/targets/gitlab_project_members/tasks/main.yml index c3330bae41..ade06d7ca2 100644 --- a/tests/integration/targets/gitlab_project_members/tasks/main.yml +++ b/tests/integration/targets/gitlab_project_members/tasks/main.yml @@ -19,7 +19,7 @@ api_token: "{{ gitlab_api_access_token }}" project: "{{ gitlab_project }}" gitlab_user: "{{ username }}" - state: absent + state: absent - name: Add a User to A GitLab Project community.general.gitlab_project_members: @@ -78,3 +78,46 @@ assert: that: - remove_gitlab_project_members_state_again is not changed + +- name: Add a list of Users to A GitLab Project + community.general.gitlab_project_members: + api_url: "{{ gitlab_server_url }}" + api_token: "{{ gitlab_api_access_token }}" + project: "{{ gitlab_project }}" + gitlab_user: "{{ userlist }}" + access_level: "{{ gitlab_access_level }}" + state: present + +- name: Remove a list of Users to A GitLab Project + community.general.gitlab_project_members:: + api_url: "{{ gitlab_server_url }}" + api_token: "{{ gitlab_api_access_token }}" + project: "{{ gitlab_project }}" + gitlab_user: "{{ userlist }}" + state: absent + +- name: Add a list of Users with Dedicated Access Levels to A GitLab Project + community.general.gitlab_project_members:: + api_url: "{{ gitlab_server_url }}" + api_token: "{{ gitlab_api_access_token }}" + project: "{{ gitlab_project }}" + gitlab_users_access: "{{ dedicated_access_users }}" + state: present + +- name: Remove a list of Users with Dedicated Access Levels to A GitLab Project + community.general.gitlab_project_members:: + api_url: "{{ gitlab_server_url }}" + api_token: "{{ gitlab_api_access_token }}" + project: "{{ gitlab_project }}" + gitlab_users_access: "{{ dedicated_access_users }}" + state: absent + +- name: Add a user, remove all others which might be on this access level + community.general.gitlab_project_members:: + api_url: "{{ gitlab_server_url }}" + api_token: "{{ gitlab_api_access_token }}" + project: "{{ gitlab_project }}" + gitlab_user: "{{ username }}" + access_level: "{{ gitlab_access_level }}" + pruge_users: "{{ gitlab_access_level }}" + state: present From 8ab96d95332d6e961a7e927538c6bf84baf31c58 Mon Sep 17 00:00:00 2001 From: Cliff Hults Date: Sat, 18 Sep 2021 09:19:41 -0400 Subject: [PATCH 0570/3093] Icinga2 inventory plugin (#3202) * Added Icinga2 inventory plugin * Added Icinga2 inventory plugin * Linting * Added tests * Linting * Linting * Added tests * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Added tests for icinga2 inventory * Resolved reviews and added host filters * Linting * Fixing yaml for example and updating tests * Updating test data * Fixing pep8 indentations * Missed copywriting * Missed copywriting * Updated documentation grammar * Removing Cacheable class and cleanup * Update plugins/inventory/icinga2.py * Update plugins/inventory/icinga2.py * Bump version number Co-authored-by: Felix Fontein * Update plugins/inventory/icinga2.py Co-authored-by: Alexei Znamensky <103110+russoz@users.noreply.github.com> Co-authored-by: Felix Fontein --- .github/BOTMETA.yml | 2 + plugins/inventory/icinga2.py | 222 +++++++++++++++++++ tests/unit/plugins/inventory/test_icinga2.py | 97 ++++++++ 3 files changed, 321 insertions(+) create mode 100644 plugins/inventory/icinga2.py create mode 100644 tests/unit/plugins/inventory/test_icinga2.py diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 78cd46871f..09cd8b8f3c 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -155,6 +155,8 @@ files: maintainers: sieben $inventories/proxmox.py: maintainers: $team_virt ilijamt + $inventories/icinga2.py: + maintainers: bongoeadgc6 $inventories/scaleway.py: maintainers: $team_scaleway labels: cloud scaleway diff --git a/plugins/inventory/icinga2.py b/plugins/inventory/icinga2.py new file mode 100644 index 0000000000..8a50ecd178 --- /dev/null +++ b/plugins/inventory/icinga2.py @@ -0,0 +1,222 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, Cliff Hults +# Copyright (c) 2021 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = ''' + name: icinga2 + short_description: Icinga2 inventory source + version_added: 3.7.0 + author: + - Cliff Hults (@BongoEADGC6) + description: + - Get inventory hosts from the Icinga2 API. + - "Uses a configuration file as an inventory source, it must end in + C(.icinga2.yml) or C(.icinga2.yaml)." + options: + plugin: + description: Name of the plugin. + required: true + type: string + choices: ['community.general.icinga2'] + url: + description: Root URL of Icinga2 API. + type: string + required: true + user: + description: Username to query the API. + type: string + required: true + password: + description: Password to query the API. + type: string + required: true + host_filter: + description: An Icinga2 API valid host filter. + type: string + required: false + validate_certs: + description: Enables or disables SSL certificate verification. + type: boolean + default: true +''' + +EXAMPLES = r''' +# my.icinga2.yml +plugin: community.general.icinga2 +url: http://localhost:5665 +user: ansible +password: secure +host_filter: \"linux-servers\" in host.groups +validate_certs: false +''' + +import json + +from ansible.errors import AnsibleParserError +from ansible.plugins.inventory import BaseInventoryPlugin, Constructable +from ansible.module_utils.urls import open_url + + +class InventoryModule(BaseInventoryPlugin, Constructable): + ''' Host inventory parser for ansible using Icinga2 as source. ''' + + NAME = 'community.general.icinga2' + + def __init__(self): + + super(InventoryModule, self).__init__() + + # from config + self.icinga2_url = None + self.icinga2_user = None + self.icinga2_password = None + self.ssl_verify = None + self.host_filter = None + + self.cache_key = None + self.use_cache = None + + def verify_file(self, path): + valid = False + if super(InventoryModule, self).verify_file(path): + if path.endswith(('icinga2.yaml', 'icinga2.yml')): + valid = True + else: + self.display.vvv('Skipping due to inventory source not ending in "icinga2.yaml" nor "icinga2.yml"') + return valid + + def _api_connect(self): + self.headers = { + 'User-Agent': "ansible-icinga2-inv", + 'Accept': "application/json", + } + api_status_url = self.icinga2_url + "/status" + request_args = { + 'headers': self.headers, + 'url_username': self.icinga2_user, + 'url_password': self.icinga2_password, + 'validate_certs': self.ssl_verify + } + open_url(api_status_url, **request_args) + + def _post_request(self, request_url, data=None): + self.display.vvv("Requested URL: %s" % request_url) + request_args = { + 'headers': self.headers, + 'url_username': self.icinga2_user, + 'url_password': self.icinga2_password, + 'validate_certs': self.ssl_verify + } + if data is not None: + request_args['data'] = json.dumps(data) + self.display.vvv("Request Args: %s" % request_args) + response = open_url(request_url, **request_args) + response_body = response.read() + json_data = json.loads(response_body.decode('utf-8')) + if 200 <= response.status <= 299: + return json_data + if response.status == 404 and json_data['status'] == "No objects found.": + raise AnsibleParserError( + "API returned no data -- Response: %s - %s" + % (response.status, json_data['status'])) + if response.status == 401: + raise AnsibleParserError( + "API was unable to complete query -- Response: %s - %s" + % (response.status, json_data['status'])) + if response.status == 500: + raise AnsibleParserError( + "API Response - %s - %s" + % (json_data['status'], json_data['errors'])) + raise AnsibleParserError( + "Unexpected data returned - %s - %s" + % (json_data['status'], json_data['errors'])) + + def _query_hosts(self, hosts=None, attrs=None, joins=None, host_filter=None): + query_hosts_url = "{0}/objects/hosts".format(self.icinga2_url) + self.headers['X-HTTP-Method-Override'] = 'GET' + data_dict = dict() + if hosts: + data_dict['hosts'] = hosts + if attrs is not None: + data_dict['attrs'] = attrs + if joins is not None: + data_dict['joins'] = joins + if host_filter is not None: + data_dict['filter'] = host_filter.replace("\\\"", "\"") + self.display.vvv(host_filter) + host_dict = self._post_request(query_hosts_url, data_dict) + return host_dict['results'] + + def get_inventory_from_icinga(self): + """Query for all hosts """ + self.display.vvv("Querying Icinga2 for inventory") + query_args = { + "attrs": ["address", "state_type", "state", "groups"], + } + if self.host_filter is not None: + query_args['host_filter'] = self.host_filter + # Icinga2 API Call + results_json = self._query_hosts(**query_args) + # Manipulate returned API data to Ansible inventory spec + ansible_inv = self._convert_inv(results_json) + return ansible_inv + + def _populate(self): + groups = self._to_json(self.get_inventory_from_icinga()) + return groups + + def _to_json(self, in_dict): + """Convert dictionary to JSON""" + return json.dumps(in_dict, sort_keys=True, indent=2) + + def _convert_inv(self, json_data): + """Convert Icinga2 API data to JSON format for Ansible""" + groups_dict = {"_meta": {"hostvars": {}}} + for entry in json_data: + host_name = entry['name'] + host_attrs = entry['attrs'] + if host_attrs['state'] == 0: + host_attrs['state'] = 'on' + else: + host_attrs['state'] = 'off' + host_groups = host_attrs['groups'] + host_addr = host_attrs['address'] + self.inventory.add_host(host_addr) + for group in host_groups: + if group not in self.inventory.groups.keys(): + self.inventory.add_group(group) + self.inventory.add_child(group, host_addr) + self.inventory.set_variable(host_addr, 'address', host_addr) + self.inventory.set_variable(host_addr, 'hostname', host_name) + self.inventory.set_variable(host_addr, 'state', + host_attrs['state']) + self.inventory.set_variable(host_addr, 'state_type', + host_attrs['state_type']) + return groups_dict + + def parse(self, inventory, loader, path, cache=True): + + super(InventoryModule, self).parse(inventory, loader, path) + + # read config from file, this sets 'options' + self._read_config_data(path) + + # Store the options from the YAML file + self.icinga2_url = self.get_option('url').rstrip('/') + '/v1' + self.icinga2_user = self.get_option('user') + self.icinga2_password = self.get_option('password') + self.ssl_verify = self.get_option('validate_certs') + self.host_filter = self.get_option('host_filter') + # Not currently enabled + # self.cache_key = self.get_cache_key(path) + # self.use_cache = cache and self.get_option('cache') + + # Test connection to API + self._api_connect() + + # Call our internal helper to populate the dynamic inventory + self._populate() diff --git a/tests/unit/plugins/inventory/test_icinga2.py b/tests/unit/plugins/inventory/test_icinga2.py new file mode 100644 index 0000000000..266045f203 --- /dev/null +++ b/tests/unit/plugins/inventory/test_icinga2.py @@ -0,0 +1,97 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, Cliff Hults +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# +# The API responses used in these tests were recorded from PVE version 6.2. + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import pytest + +from ansible.inventory.data import InventoryData +from ansible_collections.community.general.plugins.inventory.icinga2 import InventoryModule + + +@pytest.fixture(scope="module") +def inventory(): + r = InventoryModule() + r.inventory = InventoryData() + return r + + +def test_verify_file_bad_config(inventory): + assert inventory.verify_file('foobar.icinga2.yml') is False + + +def check_api(): + return True + + +# NOTE: when updating/adding replies to this function, +# be sure to only add only the _contents_ of the 'data' dict in the API reply +def query_hosts(hosts=None, attrs=None, joins=None, host_filter=None): + # _get_hosts - list of dicts + json_host_data = [ + { + 'attrs': { + 'address': 'test-host1.home.local', + 'groups': ['home_servers', 'servers_dell'], + 'state': 0.0, + 'state_type': 1.0 + }, + 'joins': {}, + 'meta': {}, + 'name': 'test-host1', + 'type': 'Host' + }, + { + 'attrs': { + 'address': 'test-host2.home.local', + 'groups': ['home_servers', 'servers_hp'], + 'state': 1.0, + 'state_type': 1.0 + }, + 'joins': {}, + 'meta': {}, + 'name': 'test-host2', + 'type': 'Host' + } + ] + return json_host_data + + +def test_populate(inventory, mocker): + # module settings + inventory.icinga2_user = 'ansible' + inventory.icinga2_password = 'password' + inventory.icinga2_url = 'https://localhost:5665' + '/v1' + + # bypass authentication and API fetch calls + inventory._check_api = mocker.MagicMock(side_effect=check_api) + inventory._query_hosts = mocker.MagicMock(side_effect=query_hosts) + inventory._populate() + + # get different hosts + host1_info = inventory.inventory.get_host('test-host1.home.local') + print(host1_info) + host2_info = inventory.inventory.get_host('test-host2.home.local') + print(host2_info) + + # check if host in the home_servers group + assert 'home_servers' in inventory.inventory.groups + group1_data = inventory.inventory.groups['home_servers'] + group1_test_data = [host1_info, host2_info] + print(group1_data.hosts) + print(group1_test_data) + assert group1_data.hosts == group1_test_data + # Test servers_hp group + group2_data = inventory.inventory.groups['servers_hp'] + group2_test_data = [host2_info] + print(group2_data.hosts) + print(group2_test_data) + assert group2_data.hosts == group2_test_data + + # check if host state rules apply properyl + assert host1_info.get_vars()['state'] == 'on' + assert host2_info.get_vars()['state'] == 'off' From 7aae8d5386e0ddb944f171b1847e3e16e981d635 Mon Sep 17 00:00:00 2001 From: Alexei Znamensky <103110+russoz@users.noreply.github.com> Date: Sun, 19 Sep 2021 23:44:37 +1200 Subject: [PATCH 0571/3093] Interfaces_file - improvements (#3328) * pythonific!! no camel cases, bitte * simplified iface attributes parsing * some improvements, passing tests * simplified set_interface_option() * further simplifications * remove unreachable stmt * pythonified a file open * added changelog fragment * adjustment per PR * PR: fixed the auto- case * PR: added testcase and chglog frag for the misleading change report * extra line removed * integration is not destructive --- .../3328-interfaces_file-improvements.yaml | 4 + plugins/modules/system/interfaces_file.py | 161 ++++++++---------- .../targets/interfaces_file/aliases | 1 + .../interfaces_file/files/interfaces_ff | 7 + .../targets/interfaces_file/tasks/main.yml | 33 ++++ .../interfaces_file/test_interfaces_file.py | 22 +-- 6 files changed, 123 insertions(+), 105 deletions(-) create mode 100644 changelogs/fragments/3328-interfaces_file-improvements.yaml create mode 100644 tests/integration/targets/interfaces_file/aliases create mode 100644 tests/integration/targets/interfaces_file/files/interfaces_ff create mode 100644 tests/integration/targets/interfaces_file/tasks/main.yml diff --git a/changelogs/fragments/3328-interfaces_file-improvements.yaml b/changelogs/fragments/3328-interfaces_file-improvements.yaml new file mode 100644 index 0000000000..10734af603 --- /dev/null +++ b/changelogs/fragments/3328-interfaces_file-improvements.yaml @@ -0,0 +1,4 @@ +bugfixes: + - interfaces_file - no longer reporting change when none happened (https://github.com/ansible-collections/community.general/pull/3328). +minor_changes: + - interfaces_file - minor refactor (https://github.com/ansible-collections/community.general/pull/3328). diff --git a/plugins/modules/system/interfaces_file.py b/plugins/modules/system/interfaces_file.py index c22c0ce29e..7666ba1cbc 100644 --- a/plugins/modules/system/interfaces_file.py +++ b/plugins/modules/system/interfaces_file.py @@ -148,57 +148,48 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_bytes -def lineDict(line): +def line_dict(line): return {'line': line, 'line_type': 'unknown'} -def optionDict(line, iface, option, value, address_family): +def make_option_dict(line, iface, option, value, address_family): return {'line': line, 'iface': iface, 'option': option, 'value': value, 'line_type': 'option', 'address_family': address_family} -def getValueFromLine(s): - spaceRe = re.compile(r'\s+') - for m in spaceRe.finditer(s): - pass - valueEnd = m.start() - option = s.split()[0] - optionStart = s.find(option) - optionLen = len(option) - valueStart = re.search(r'\s', s[optionLen + optionStart:]).end() + optionLen + optionStart - return s[valueStart:valueEnd] +def get_option_value(line): + patt = re.compile(r'^\s+(?P