E501 fixes (#22879)

This commit is contained in:
Matt Martz 2017-03-22 20:50:28 -05:00 committed by GitHub
commit 3164e8b561
215 changed files with 1328 additions and 761 deletions

View file

@ -284,7 +284,8 @@ class CLI(with_metaclass(ABCMeta, object)):
@staticmethod @staticmethod
def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, runtask_opts=False, vault_opts=False, module_opts=False, def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, runtask_opts=False, vault_opts=False, module_opts=False,
async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, inventory_opts=False, epilog=None, fork_opts=False, runas_prompt_opts=False): async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, inventory_opts=False, epilog=None, fork_opts=False,
runas_prompt_opts=False):
''' create an options parser for most ansible scripts ''' ''' create an options parser for most ansible scripts '''
# TODO: implement epilog parsing # TODO: implement epilog parsing
@ -448,7 +449,9 @@ class CLI(with_metaclass(ABCMeta, object)):
if hasattr(self.options, 'tags') and self.options.tags: if hasattr(self.options, 'tags') and self.options.tags:
if not C.MERGE_MULTIPLE_CLI_TAGS: if not C.MERGE_MULTIPLE_CLI_TAGS:
if len(self.options.tags) > 1: if len(self.options.tags) > 1:
display.deprecated('Specifying --tags multiple times on the command line currently uses the last specified value. In 2.4, values will be merged instead. Set merge_multiple_cli_tags=True in ansible.cfg to get this behavior now.', version=2.5, removed=False) display.deprecated('Specifying --tags multiple times on the command line currently uses the last specified value. '
'In 2.4, values will be merged instead. Set merge_multiple_cli_tags=True in ansible.cfg to get this behavior now.',
version=2.5, removed=False)
self.options.tags = [self.options.tags[-1]] self.options.tags = [self.options.tags[-1]]
tags = set() tags = set()
@ -460,7 +463,9 @@ class CLI(with_metaclass(ABCMeta, object)):
if hasattr(self.options, 'skip_tags') and self.options.skip_tags: if hasattr(self.options, 'skip_tags') and self.options.skip_tags:
if not C.MERGE_MULTIPLE_CLI_TAGS: if not C.MERGE_MULTIPLE_CLI_TAGS:
if len(self.options.skip_tags) > 1: if len(self.options.skip_tags) > 1:
display.deprecated('Specifying --skip-tags multiple times on the command line currently uses the last specified value. In 2.4, values will be merged instead. Set merge_multiple_cli_tags=True in ansible.cfg to get this behavior now.', version=2.5, removed=False) display.deprecated('Specifying --skip-tags multiple times on the command line currently uses the last specified value. '
'In 2.4, values will be merged instead. Set merge_multiple_cli_tags=True in ansible.cfg to get this behavior now.',
version=2.5, removed=False)
self.options.skip_tags = [self.options.skip_tags[-1]] self.options.skip_tags = [self.options.skip_tags[-1]]
skip_tags = set() skip_tags = set()
@ -634,7 +639,8 @@ class CLI(with_metaclass(ABCMeta, object)):
# STDERR not captured to make it easier for users to prompt for input in their scripts # STDERR not captured to make it easier for users to prompt for input in their scripts
p = subprocess.Popen(this_path, stdout=subprocess.PIPE) p = subprocess.Popen(this_path, stdout=subprocess.PIPE)
except OSError as e: except OSError as e:
raise AnsibleError("Problem running vault password script %s (%s). If this is not a script, remove the executable bit from the file." % (' '.join(this_path), e)) raise AnsibleError("Problem running vault password script %s (%s). If this is not a script, "
"remove the executable bit from the file." % (' '.join(this_path), e))
stdout, stderr = p.communicate() stdout, stderr = p.communicate()
if p.returncode != 0: if p.returncode != 0:
raise AnsibleError("Vault password script %s returned non-zero (%s): %s" % (this_path, p.returncode, p.stderr)) raise AnsibleError("Vault password script %s returned non-zero (%s): %s" % (this_path, p.returncode, p.stderr))

View file

@ -71,7 +71,8 @@ class GalaxyCLI(CLI):
# common # common
self.parser.add_option('-s', '--server', dest='api_server', default=C.GALAXY_SERVER, help='The API server destination') self.parser.add_option('-s', '--server', dest='api_server', default=C.GALAXY_SERVER, help='The API server destination')
self.parser.add_option('-c', '--ignore-certs', action='store_true', dest='ignore_certs', default=C.GALAXY_IGNORE_CERTS, help='Ignore SSL certificate validation errors.') self.parser.add_option('-c', '--ignore-certs', action='store_true', dest='ignore_certs', default=C.GALAXY_IGNORE_CERTS,
help='Ignore SSL certificate validation errors.')
# specific to actions # specific to actions
if self.action == "delete": if self.action == "delete":
@ -79,20 +80,25 @@ class GalaxyCLI(CLI):
elif self.action == "import": elif self.action == "import":
self.parser.set_usage("usage: %prog import [options] github_user github_repo") self.parser.set_usage("usage: %prog import [options] github_user github_repo")
self.parser.add_option('--no-wait', dest='wait', action='store_false', default=True, help='Don\'t wait for import results.') self.parser.add_option('--no-wait', dest='wait', action='store_false', default=True, help='Don\'t wait for import results.')
self.parser.add_option('--branch', dest='reference', help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)') self.parser.add_option('--branch', dest='reference',
help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)')
self.parser.add_option('--role-name', dest='role_name', help='The name the role should have, if different than the repo name') self.parser.add_option('--role-name', dest='role_name', help='The name the role should have, if different than the repo name')
self.parser.add_option('--status', dest='check_status', action='store_true', default=False, help='Check the status of the most recent import request for given github_user/github_repo.') self.parser.add_option('--status', dest='check_status', action='store_true', default=False,
help='Check the status of the most recent import request for given github_user/github_repo.')
elif self.action == "info": elif self.action == "info":
self.parser.set_usage("usage: %prog info [options] role_name[,version]") self.parser.set_usage("usage: %prog info [options] role_name[,version]")
elif self.action == "init": elif self.action == "init":
self.parser.set_usage("usage: %prog init [options] role_name") self.parser.set_usage("usage: %prog init [options] role_name")
self.parser.add_option('-p', '--init-path', dest='init_path', default="./", help='The path in which the skeleton role will be created. The default is the current working directory.') self.parser.add_option('-p', '--init-path', dest='init_path', default="./",
help='The path in which the skeleton role will be created. The default is the current working directory.')
self.parser.add_option('--container-enabled', dest='container_enabled', action='store_true', default=False, self.parser.add_option('--container-enabled', dest='container_enabled', action='store_true', default=False,
help='Initialize the skeleton role with default contents for a Container Enabled role.') help='Initialize the skeleton role with default contents for a Container Enabled role.')
self.parser.add_option('--role-skeleton', dest='role_skeleton', default=None, help='The path to a role skeleton that the new role should be based upon.') self.parser.add_option('--role-skeleton', dest='role_skeleton', default=None,
help='The path to a role skeleton that the new role should be based upon.')
elif self.action == "install": elif self.action == "install":
self.parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]") self.parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]")
self.parser.add_option('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False, help='Ignore errors and continue with the next specified role.') self.parser.add_option('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help='Ignore errors and continue with the next specified role.')
self.parser.add_option('-n', '--no-deps', dest='no_deps', action='store_true', default=False, help='Don\'t download roles listed as dependencies') self.parser.add_option('-n', '--no-deps', dest='no_deps', action='store_true', default=False, help='Don\'t download roles listed as dependencies')
self.parser.add_option('-r', '--role-file', dest='role_file', help='A file containing a list of roles to be imported') self.parser.add_option('-r', '--role-file', dest='role_file', help='A file containing a list of roles to be imported')
elif self.action == "remove": elif self.action == "remove":
@ -103,13 +109,15 @@ class GalaxyCLI(CLI):
self.parser.set_usage("usage: %prog login [options]") self.parser.set_usage("usage: %prog login [options]")
self.parser.add_option('--github-token', dest='token', default=None, help='Identify with github token rather than username and password.') self.parser.add_option('--github-token', dest='token', default=None, help='Identify with github token rather than username and password.')
elif self.action == "search": elif self.action == "search":
self.parser.set_usage("usage: %prog search [searchterm1 searchterm2] [--galaxy-tags galaxy_tag1,galaxy_tag2] [--platforms platform1,platform2] [--author username]") self.parser.set_usage("usage: %prog search [searchterm1 searchterm2] [--galaxy-tags galaxy_tag1,galaxy_tag2] [--platforms platform1,platform2] "
"[--author username]")
self.parser.add_option('--platforms', dest='platforms', help='list of OS platforms to filter by') self.parser.add_option('--platforms', dest='platforms', help='list of OS platforms to filter by')
self.parser.add_option('--galaxy-tags', dest='galaxy_tags', help='list of galaxy tags to filter by') self.parser.add_option('--galaxy-tags', dest='galaxy_tags', help='list of galaxy tags to filter by')
self.parser.add_option('--author', dest='author', help='GitHub username') self.parser.add_option('--author', dest='author', help='GitHub username')
elif self.action == "setup": elif self.action == "setup":
self.parser.set_usage("usage: %prog setup [options] source github_user github_repo secret") self.parser.set_usage("usage: %prog setup [options] source github_user github_repo secret")
self.parser.add_option('--remove', dest='remove_id', default=None, help='Remove the integration matching the provided ID value. Use --list to see ID values.') self.parser.add_option('--remove', dest='remove_id', default=None,
help='Remove the integration matching the provided ID value. Use --list to see ID values.')
self.parser.add_option('--list', dest="setup_list", action='store_true', default=False, help='List all of your integrations.') self.parser.add_option('--list', dest="setup_list", action='store_true', default=False, help='List all of your integrations.')
# options that apply to more than one action # options that apply to more than one action
@ -119,8 +127,10 @@ class GalaxyCLI(CLI):
if self.action not in ("delete","import","init","login","setup"): if self.action not in ("delete","import","init","login","setup"):
# NOTE: while the option type=str, the default is a list, and the # NOTE: while the option type=str, the default is a list, and the
# callback will set the value to a list. # callback will set the value to a list.
self.parser.add_option('-p', '--roles-path', dest='roles_path', action="callback", callback=CLI.expand_paths, type=str, default=C.DEFAULT_ROLES_PATH, self.parser.add_option('-p', '--roles-path', dest='roles_path', action="callback", callback=CLI.expand_paths, type=str,
help='The path to the directory containing your roles. The default is the roles_path configured in your ansible.cfg file (/etc/ansible/roles if not configured)') default=C.DEFAULT_ROLES_PATH,
help='The path to the directory containing your roles. The default is the roles_path configured in your ansible.cfg '
'file (/etc/ansible/roles if not configured)')
if self.action in ("init","install"): if self.action in ("init","install"):
self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False, help='Force overwriting an existing role') self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False, help='Force overwriting an existing role')

View file

@ -149,7 +149,8 @@ class PlaybookCLI(CLI):
self._flush_cache(inventory, variable_manager) self._flush_cache(inventory, variable_manager)
# create the playbook executor, which manages running the plays via a task queue manager # create the playbook executor, which manages running the plays via a task queue manager
pbex = PlaybookExecutor(playbooks=self.args, inventory=inventory, variable_manager=variable_manager, loader=loader, options=self.options, passwords=passwords) pbex = PlaybookExecutor(playbooks=self.args, inventory=inventory, variable_manager=variable_manager, loader=loader, options=self.options,
passwords=passwords)
results = pbex.run() results = pbex.run()

View file

@ -196,7 +196,8 @@ MERGE_MULTIPLE_CLI_TAGS = get_config(p, DEFAULTS, 'merge_multiple_cli_tags', 'AN
DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, value_type='boolean') DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, value_type='boolean')
DEFAULT_VERBOSITY = get_config(p, DEFAULTS, 'verbosity', 'ANSIBLE_VERBOSITY', 0, value_type='integer') DEFAULT_VERBOSITY = get_config(p, DEFAULTS, 'verbosity', 'ANSIBLE_VERBOSITY', 0, value_type='integer')
DEFAULT_HOST_LIST = get_config(p, DEFAULTS,'inventory', 'ANSIBLE_INVENTORY', '/etc/ansible/hosts', value_type='path') DEFAULT_HOST_LIST = get_config(p, DEFAULTS,'inventory', 'ANSIBLE_INVENTORY', '/etc/ansible/hosts', value_type='path')
DEFAULT_ROLES_PATH = get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles', value_type='pathlist', expand_relative_paths=True) DEFAULT_ROLES_PATH = get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles', value_type='pathlist',
expand_relative_paths=True)
DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '~/.ansible/tmp') DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '~/.ansible/tmp')
DEFAULT_LOCAL_TMP = get_config(p, DEFAULTS, 'local_tmp', 'ANSIBLE_LOCAL_TEMP', '~/.ansible/tmp', value_type='tmppath') DEFAULT_LOCAL_TMP = get_config(p, DEFAULTS, 'local_tmp', 'ANSIBLE_LOCAL_TEMP', '~/.ansible/tmp', value_type='tmppath')
DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command') DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command')
@ -230,7 +231,8 @@ DEFAULT_GATHER_SUBSET = get_config(p, DEFAULTS, 'gather_subset', 'ANSIBLE_GA
DEFAULT_GATHER_TIMEOUT = get_config(p, DEFAULTS, 'gather_timeout', 'ANSIBLE_GATHER_TIMEOUT', 10, value_type='integer') DEFAULT_GATHER_TIMEOUT = get_config(p, DEFAULTS, 'gather_timeout', 'ANSIBLE_GATHER_TIMEOUT', 10, value_type='integer')
DEFAULT_LOG_PATH = get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '', value_type='path') DEFAULT_LOG_PATH = get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '', value_type='path')
DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, value_type='boolean') DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, value_type='boolean')
DEFAULT_INVENTORY_IGNORE = get_config(p, DEFAULTS, 'inventory_ignore_extensions', 'ANSIBLE_INVENTORY_IGNORE', ["~", ".orig", ".bak", ".ini", ".cfg", ".retry", ".pyc", ".pyo"], value_type='list') DEFAULT_INVENTORY_IGNORE = get_config(p, DEFAULTS, 'inventory_ignore_extensions', 'ANSIBLE_INVENTORY_IGNORE',
["~", ".orig", ".bak", ".ini", ".cfg", ".retry", ".pyc", ".pyo"], value_type='list')
DEFAULT_VAR_COMPRESSION_LEVEL = get_config(p, DEFAULTS, 'var_compression_level', 'ANSIBLE_VAR_COMPRESSION_LEVEL', 0, value_type='integer') DEFAULT_VAR_COMPRESSION_LEVEL = get_config(p, DEFAULTS, 'var_compression_level', 'ANSIBLE_VAR_COMPRESSION_LEVEL', 0, value_type='integer')
DEFAULT_INTERNAL_POLL_INTERVAL = get_config(p, DEFAULTS, 'internal_poll_interval', None, 0.001, value_type='float') DEFAULT_INTERNAL_POLL_INTERVAL = get_config(p, DEFAULTS, 'internal_poll_interval', None, 0.001, value_type='float')
ERROR_ON_MISSING_HANDLER = get_config(p, DEFAULTS, 'error_on_missing_handler', 'ANSIBLE_ERROR_ON_MISSING_HANDLER', True, value_type='boolean') ERROR_ON_MISSING_HANDLER = get_config(p, DEFAULTS, 'error_on_missing_handler', 'ANSIBLE_ERROR_ON_MISSING_HANDLER', True, value_type='boolean')
@ -264,11 +266,28 @@ DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_
DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, value_type='boolean') DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, value_type='boolean')
# Become # Become
BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'doas': 'Permission denied', 'dzdo': '', 'ksu': 'Password incorrect'} #FIXME: deal with i18n BECOME_ERROR_STRINGS = {
BECOME_MISSING_STRINGS = {'sudo': 'sorry, a password is required to run sudo', 'su': '', 'pbrun': '', 'pfexec': '', 'doas': 'Authorization required', 'dzdo': '', 'ksu': 'No password given'} #FIXME: deal with i18n 'sudo': 'Sorry, try again.',
'su': 'Authentication failure',
'pbrun': '',
'pfexec': '',
'doas': 'Permission denied',
'dzdo': '',
'ksu': 'Password incorrect'
} # FIXME: deal with i18n
BECOME_MISSING_STRINGS = {
'sudo': 'sorry, a password is required to run sudo',
'su': '',
'pbrun': '',
'pfexec': '',
'doas': 'Authorization required',
'dzdo': '',
'ksu': 'No password given'
} # FIXME: deal with i18n
BECOME_METHODS = ['sudo','su','pbrun','pfexec','doas','dzdo','ksu','runas'] BECOME_METHODS = ['sudo','su','pbrun','pfexec','doas','dzdo','ksu','runas']
BECOME_ALLOW_SAME_USER = get_config(p, 'privilege_escalation', 'become_allow_same_user', 'ANSIBLE_BECOME_ALLOW_SAME_USER', False, value_type='boolean') BECOME_ALLOW_SAME_USER = get_config(p, 'privilege_escalation', 'become_allow_same_user', 'ANSIBLE_BECOME_ALLOW_SAME_USER', False, value_type='boolean')
DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower() DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD',
'sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo').lower()
DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, value_type='boolean') DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, value_type='boolean')
DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER', 'root') DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER', 'root')
DEFAULT_BECOME_EXE = get_config(p, 'privilege_escalation', 'become_exe', 'ANSIBLE_BECOME_EXE', None) DEFAULT_BECOME_EXE = get_config(p, 'privilege_escalation', 'become_exe', 'ANSIBLE_BECOME_EXE', None)
@ -283,23 +302,35 @@ DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pa
# the module takes both, bad things could happen. # the module takes both, bad things could happen.
# In the future we should probably generalize this even further # In the future we should probably generalize this even further
# (mapping of param: squash field) # (mapping of param: squash field)
DEFAULT_SQUASH_ACTIONS = get_config(p, DEFAULTS, 'squash_actions', 'ANSIBLE_SQUASH_ACTIONS', "apk, apt, dnf, homebrew, openbsd_pkg, pacman, pkgng, yum, zypper", value_type='list') DEFAULT_SQUASH_ACTIONS = get_config(p, DEFAULTS, 'squash_actions', 'ANSIBLE_SQUASH_ACTIONS',
"apk, apt, dnf, homebrew, openbsd_pkg, pacman, pkgng, yum, zypper", value_type='list')
# paths # paths
DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '~/.ansible/plugins/action:/usr/share/ansible/plugins/action', value_type='pathlist') DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS',
DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '~/.ansible/plugins/cache:/usr/share/ansible/plugins/cache', value_type='pathlist') '~/.ansible/plugins/action:/usr/share/ansible/plugins/action', value_type='pathlist')
DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '~/.ansible/plugins/callback:/usr/share/ansible/plugins/callback', value_type='pathlist') DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS',
DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS', '~/.ansible/plugins/connection:/usr/share/ansible/plugins/connection', value_type='pathlist') '~/.ansible/plugins/cache:/usr/share/ansible/plugins/cache', value_type='pathlist')
DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup:/usr/share/ansible/plugins/lookup', value_type='pathlist') DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS',
'~/.ansible/plugins/callback:/usr/share/ansible/plugins/callback', value_type='pathlist')
DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS',
'~/.ansible/plugins/connection:/usr/share/ansible/plugins/connection', value_type='pathlist')
DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS',
'~/.ansible/plugins/lookup:/usr/share/ansible/plugins/lookup', value_type='pathlist')
DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None, value_type='pathlist') DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None, value_type='pathlist')
DEFAULT_MODULE_UTILS_PATH = get_config(p, DEFAULTS, 'module_utils', 'ANSIBLE_MODULE_UTILS', None, value_type='pathlist') DEFAULT_MODULE_UTILS_PATH = get_config(p, DEFAULTS, 'module_utils', 'ANSIBLE_MODULE_UTILS', None, value_type='pathlist')
DEFAULT_INVENTORY_PLUGIN_PATH = get_config(p, DEFAULTS, 'inventory_plugins', 'ANSIBLE_INVENTORY_PLUGINS', '~/.ansible/plugins/inventory:/usr/share/ansible/plugins/inventory', value_type='pathlist') DEFAULT_INVENTORY_PLUGIN_PATH = get_config(p, DEFAULTS, 'inventory_plugins', 'ANSIBLE_INVENTORY_PLUGINS',
DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars:/usr/share/ansible/plugins/vars', value_type='pathlist') '~/.ansible/plugins/inventory:/usr/share/ansible/plugins/inventory', value_type='pathlist')
DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter:/usr/share/ansible/plugins/filter', value_type='pathlist') DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS',
DEFAULT_TEST_PLUGIN_PATH = get_config(p, DEFAULTS, 'test_plugins', 'ANSIBLE_TEST_PLUGINS', '~/.ansible/plugins/test:/usr/share/ansible/plugins/test', value_type='pathlist') '~/.ansible/plugins/vars:/usr/share/ansible/plugins/vars', value_type='pathlist')
DEFAULT_STRATEGY_PLUGIN_PATH = get_config(p, DEFAULTS, 'strategy_plugins', 'ANSIBLE_STRATEGY_PLUGINS', '~/.ansible/plugins/strategy:/usr/share/ansible/plugins/strategy', value_type='pathlist') DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS',
'~/.ansible/plugins/filter:/usr/share/ansible/plugins/filter', value_type='pathlist')
DEFAULT_TEST_PLUGIN_PATH = get_config(p, DEFAULTS, 'test_plugins', 'ANSIBLE_TEST_PLUGINS',
'~/.ansible/plugins/test:/usr/share/ansible/plugins/test', value_type='pathlist')
DEFAULT_STRATEGY_PLUGIN_PATH = get_config(p, DEFAULTS, 'strategy_plugins', 'ANSIBLE_STRATEGY_PLUGINS',
'~/.ansible/plugins/strategy:/usr/share/ansible/plugins/strategy', value_type='pathlist')
NETWORK_GROUP_MODULES = get_config(p, DEFAULTS, 'network_group_modules','NETWORK_GROUP_MODULES', ['eos', 'nxos', 'ios', 'iosxr', 'junos', 'vyos'], value_type='list') NETWORK_GROUP_MODULES = get_config(p, DEFAULTS, 'network_group_modules','NETWORK_GROUP_MODULES', ['eos', 'nxos', 'ios', 'iosxr', 'junos', 'vyos'],
value_type='list')
DEFAULT_STRATEGY = get_config(p, DEFAULTS, 'strategy', 'ANSIBLE_STRATEGY', 'linear') DEFAULT_STRATEGY = get_config(p, DEFAULTS, 'strategy', 'ANSIBLE_STRATEGY', 'linear')
DEFAULT_STDOUT_CALLBACK = get_config(p, DEFAULTS, 'stdout_callback', 'ANSIBLE_STDOUT_CALLBACK', 'default') DEFAULT_STDOUT_CALLBACK = get_config(p, DEFAULTS, 'stdout_callback', 'ANSIBLE_STDOUT_CALLBACK', 'default')
@ -367,9 +398,11 @@ GALAXY_IGNORE_CERTS = get_config(p, 'galaxy', 'ignore_certs', 'ANSIBL
# this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated # this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated
GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', 'git, hg', value_type='list') GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', 'git, hg', value_type='list')
GALAXY_ROLE_SKELETON = get_config(p, 'galaxy', 'role_skeleton', 'ANSIBLE_GALAXY_ROLE_SKELETON', None, value_type='path') GALAXY_ROLE_SKELETON = get_config(p, 'galaxy', 'role_skeleton', 'ANSIBLE_GALAXY_ROLE_SKELETON', None, value_type='path')
GALAXY_ROLE_SKELETON_IGNORE = get_config(p, 'galaxy', 'role_skeleton_ignore', 'ANSIBLE_GALAXY_ROLE_SKELETON_IGNORE', ['^.git$', '^.*/.git_keep$'], value_type='list') GALAXY_ROLE_SKELETON_IGNORE = get_config(p, 'galaxy', 'role_skeleton_ignore', 'ANSIBLE_GALAXY_ROLE_SKELETON_IGNORE', ['^.git$', '^.*/.git_keep$'],
value_type='list')
STRING_TYPE_FILTERS = get_config(p, 'jinja2', 'dont_type_filters', 'ANSIBLE_STRING_TYPE_FILTERS', ['string', 'to_json', 'to_nice_json', 'to_yaml', 'ppretty', 'json'], value_type='list' ) STRING_TYPE_FILTERS = get_config(p, 'jinja2', 'dont_type_filters', 'ANSIBLE_STRING_TYPE_FILTERS',
['string', 'to_json', 'to_nice_json', 'to_yaml', 'ppretty', 'json'], value_type='list' )
# colors # colors
COLOR_HIGHLIGHT = get_config(p, 'colors', 'highlight', 'ANSIBLE_COLOR_HIGHLIGHT', 'white') COLOR_HIGHLIGHT = get_config(p, 'colors', 'highlight', 'ANSIBLE_COLOR_HIGHLIGHT', 'white')

View file

@ -123,7 +123,11 @@ class AnsibleError(Exception):
elif ":{{" in stripped_line and "}}" in stripped_line: elif ":{{" in stripped_line and "}}" in stripped_line:
error_message += YAML_COMMON_DICT_ERROR error_message += YAML_COMMON_DICT_ERROR
# check for common unquoted colon mistakes # check for common unquoted colon mistakes
elif len(target_line) and len(target_line) > 1 and len(target_line) > col_number and target_line[col_number] == ":" and target_line.count(':') > 1: elif (len(target_line) and
len(target_line) > 1 and
len(target_line) > col_number and
target_line[col_number] == ":" and
target_line.count(':') > 1):
error_message += YAML_COMMON_UNQUOTED_COLON_ERROR error_message += YAML_COMMON_UNQUOTED_COLON_ERROR
# otherwise, check for some common quoting mistakes # otherwise, check for some common quoting mistakes
else: else:
@ -138,7 +142,11 @@ class AnsibleError(Exception):
elif middle.startswith('"') and not middle.endswith('"'): elif middle.startswith('"') and not middle.endswith('"'):
match = True match = True
if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and target_line.count("'") > 2 or target_line.count('"') > 2: if (len(middle) > 0 and
middle[0] in [ '"', "'" ] and
middle[-1] in [ '"', "'" ] and
target_line.count("'") > 2 or
target_line.count('"') > 2):
unbalanced = True unbalanced = True
if match: if match:

View file

@ -80,20 +80,21 @@ class HostState:
ret.append(states[i]) ret.append(states[i])
return "|".join(ret) return "|".join(ret)
return "HOST STATE: block=%d, task=%d, rescue=%d, always=%d, run_state=%s, fail_state=%s, pending_setup=%s, tasks child state? (%s), rescue child state? (%s), always child state? (%s), did rescue? %s, did start at task? %s" % ( return ("HOST STATE: block=%d, task=%d, rescue=%d, always=%d, run_state=%s, fail_state=%s, pending_setup=%s, tasks child state? (%s), "
self.cur_block, "rescue child state? (%s), always child state? (%s), did rescue? %s, did start at task? %s" % (
self.cur_regular_task, self.cur_block,
self.cur_rescue_task, self.cur_regular_task,
self.cur_always_task, self.cur_rescue_task,
_run_state_to_string(self.run_state), self.cur_always_task,
_failed_state_to_string(self.fail_state), _run_state_to_string(self.run_state),
self.pending_setup, _failed_state_to_string(self.fail_state),
self.tasks_child_state, self.pending_setup,
self.rescue_child_state, self.tasks_child_state,
self.always_child_state, self.rescue_child_state,
self.did_rescue, self.always_child_state,
self.did_start_at_task, self.did_rescue,
) self.did_start_at_task,
))
def __eq__(self, other): def __eq__(self, other):
if not isinstance(other, HostState): if not isinstance(other, HostState):

View file

@ -196,10 +196,12 @@ class TaskExecutor:
if self._task.loop in self._shared_loader_obj.lookup_loader: if self._task.loop in self._shared_loader_obj.lookup_loader:
if self._task.loop == 'first_found': if self._task.loop == 'first_found':
# first_found loops are special. If the item is undefined then we want to fall through to the next value rather than failing. # first_found loops are special. If the item is undefined then we want to fall through to the next value rather than failing.
loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=False, convert_bare=False) loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=False,
convert_bare=False)
loop_terms = [t for t in loop_terms if not templar._contains_vars(t)] loop_terms = [t for t in loop_terms if not templar._contains_vars(t)]
else: else:
loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=True, convert_bare=False) loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=True,
convert_bare=False)
# get lookup # get lookup
mylookup = self._shared_loader_obj.lookup_loader.get(self._task.loop, loader=self._loader, templar=templar) mylookup = self._shared_loader_obj.lookup_loader.get(self._task.loop, loader=self._loader, templar=templar)
@ -468,7 +470,9 @@ class TaskExecutor:
self._task.args = variable_params self._task.args = variable_params
# get the connection and the handler for this execution # get the connection and the handler for this execution
if not self._connection or not getattr(self._connection, 'connected', False) or self._play_context.remote_addr != self._connection._play_context.remote_addr: if (not self._connection or
not getattr(self._connection, 'connected', False) or
self._play_context.remote_addr != self._connection._play_context.remote_addr):
self._connection = self._get_connection(variables=variables, templar=templar) self._connection = self._get_connection(variables=variables, templar=templar)
hostvars = variables.get('hostvars', None) hostvars = variables.get('hostvars', None)
if hostvars: if hostvars:
@ -666,11 +670,14 @@ class TaskExecutor:
# have issues which result in a half-written/unparseable result # have issues which result in a half-written/unparseable result
# file on disk, which manifests to the user as a timeout happening # file on disk, which manifests to the user as a timeout happening
# before it's time to timeout. # before it's time to timeout.
if int(async_result.get('finished', 0)) == 1 or ('failed' in async_result and async_result.get('_ansible_parsed', False)) or 'skipped' in async_result: if (int(async_result.get('finished', 0)) == 1 or
('failed' in async_result and async_result.get('_ansible_parsed', False)) or
'skipped' in async_result):
break break
except Exception as e: except Exception as e:
# Connections can raise exceptions during polling (eg, network bounce, reboot); these should be non-fatal. # Connections can raise exceptions during polling (eg, network bounce, reboot); these should be non-fatal.
# On an exception, call the connection's reset method if it has one (eg, drop/recreate WinRM connection; some reused connections are in a broken state) # On an exception, call the connection's reset method if it has one
# (eg, drop/recreate WinRM connection; some reused connections are in a broken state)
display.vvvv("Exception during async poll, retrying... (%s)" % to_text(e)) display.vvvv("Exception during async poll, retrying... (%s)" % to_text(e))
display.debug("Async poll exception was:\n%s" % to_text(traceback.format_exc())) display.debug("Async poll exception was:\n%s" % to_text(traceback.format_exc()))
try: try:

View file

@ -247,7 +247,9 @@ class GalaxyRole(object):
self.version = 'master' self.version = 'master'
elif self.version != 'master': elif self.version != 'master':
if role_versions and str(self.version) not in [a.get('name', None) for a in role_versions]: if role_versions and str(self.version) not in [a.get('name', None) for a in role_versions]:
raise AnsibleError("- the specified version (%s) of %s was not found in the list of available versions (%s)." % (self.version, self.name, role_versions)) raise AnsibleError("- the specified version (%s) of %s was not found in the list of available versions (%s)." % (self.version,
self.name,
role_versions))
tmp_file = self.fetch(role_data) tmp_file = self.fetch(role_data)
@ -306,7 +308,8 @@ class GalaxyRole(object):
else: else:
# using --force, remove the old path # using --force, remove the old path
if not self.remove(): if not self.remove():
raise AnsibleError("%s doesn't appear to contain a role.\n please remove this directory manually if you really want to put the role here." % self.path) raise AnsibleError("%s doesn't appear to contain a role.\n please remove this directory manually if you really "
"want to put the role here." % self.path)
else: else:
os.makedirs(self.path) os.makedirs(self.path)

View file

@ -62,7 +62,8 @@ def get_file_parser(hostsfile, groups, loader):
myerr.append('Attempted to execute "%s" as inventory script: %s' % (hostsfile, to_native(e))) myerr.append('Attempted to execute "%s" as inventory script: %s' % (hostsfile, to_native(e)))
elif shebang_present: elif shebang_present:
myerr.append("The inventory file \'%s\' looks like it should be an executable inventory script, but is not marked executable. Perhaps you want to correct this with `chmod +x %s`?" % (hostsfile, hostsfile)) myerr.append("The inventory file \'%s\' looks like it should be an executable inventory script, but is not marked executable. "
"Perhaps you want to correct this with `chmod +x %s`?" % (hostsfile, hostsfile))
# YAML/JSON # YAML/JSON
if not processed and not shebang_present and os.path.splitext(hostsfile)[-1] in C.YAML_FILENAME_EXTENSIONS: if not processed and not shebang_present and os.path.splitext(hostsfile)[-1] in C.YAML_FILENAME_EXTENSIONS:

View file

@ -63,7 +63,8 @@ class InventoryScript:
try: try:
self.data = to_text(stdout, errors="strict") self.data = to_text(stdout, errors="strict")
except Exception as e: except Exception as e:
raise AnsibleError("inventory data from {0} contained characters that cannot be interpreted as UTF-8: {1}".format(to_native(self.filename), to_native(e))) raise AnsibleError("inventory data from {0} contained characters that cannot be interpreted as UTF-8: {1}".format(to_native(self.filename),
to_native(e)))
# see comment about _meta below # see comment about _meta below
self.host_vars_from_top = None self.host_vars_from_top = None
@ -82,7 +83,8 @@ class InventoryScript:
if not isinstance(self.raw, Mapping): if not isinstance(self.raw, Mapping):
sys.stderr.write(to_native(err) + "\n") sys.stderr.write(to_native(err) + "\n")
raise AnsibleError("failed to parse executable inventory script results from {0}: data needs to be formatted as a json dict".format(to_native(self.filename))) raise AnsibleError("failed to parse executable inventory script results from {0}: data needs to be formatted "
"as a json dict".format(to_native(self.filename)))
group = None group = None
for (group_name, data) in self.raw.items(): for (group_name, data) in self.raw.items():

View file

@ -620,7 +620,8 @@ def _load_params():
except KeyError: except KeyError:
# This helper does not have access to fail_json so we have to print # This helper does not have access to fail_json so we have to print
# json output on our own. # json output on our own.
print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in json data from stdin. Unable to figure out what parameters were passed", "failed": true}') print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in json data from stdin. Unable to figure out what parameters were passed", '
'"failed": true}')
sys.exit(1) sys.exit(1)
def env_fallback(*args, **kwargs): def env_fallback(*args, **kwargs):
@ -691,7 +692,9 @@ class AnsibleModule(object):
self._deprecations = [] self._deprecations = []
self.aliases = {} self.aliases = {}
self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log', '_ansible_debug', '_ansible_diff', '_ansible_verbosity', '_ansible_selinux_special_fs', '_ansible_module_name', '_ansible_version', '_ansible_syslog_facility', '_ansible_socket'] self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log', '_ansible_debug', '_ansible_diff', '_ansible_verbosity',
'_ansible_selinux_special_fs', '_ansible_module_name', '_ansible_version', '_ansible_syslog_facility',
'_ansible_socket']
if add_file_common_args: if add_file_common_args:
for k, v in FILE_COMMON_ARGUMENTS.items(): for k, v in FILE_COMMON_ARGUMENTS.items():
@ -2147,7 +2150,8 @@ class AnsibleModule(object):
# would end in something like: # would end in something like:
# file = _os.path.join(dir, pre + name + suf) # file = _os.path.join(dir, pre + name + suf)
# TypeError: can't concat bytes to str # TypeError: can't concat bytes to str
self.fail_json(msg='Failed creating temp file for atomic move. This usually happens when using Python3 less than Python3.5. Please use Python2.x or Python3.5 or greater.', exception=traceback.format_exc()) self.fail_json(msg='Failed creating temp file for atomic move. This usually happens when using Python3 less than Python3.5. '
'Please use Python2.x or Python3.5 or greater.', exception=traceback.format_exc())
b_tmp_dest_name = to_bytes(tmp_dest_name, errors='surrogate_or_strict') b_tmp_dest_name = to_bytes(tmp_dest_name, errors='surrogate_or_strict')
@ -2232,7 +2236,8 @@ class AnsibleModule(object):
return data return data
def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None, use_unsafe_shell=False, prompt_regex=None, environ_update=None, umask=None, encoding='utf-8', errors='surrogate_or_strict'): def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None,
use_unsafe_shell=False, prompt_regex=None, environ_update=None, umask=None, encoding='utf-8', errors='surrogate_or_strict'):
''' '''
Execute a command, returns rc, stdout, and stderr. Execute a command, returns rc, stdout, and stderr.

View file

@ -95,7 +95,8 @@ def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None
try: try:
return _boto3_conn(conn_type=conn_type, resource=resource, region=region, endpoint=endpoint, **params) return _boto3_conn(conn_type=conn_type, resource=resource, region=region, endpoint=endpoint, **params)
except ValueError: except ValueError:
module.fail_json(msg='There is an issue in the code of the module. You must specify either both, resource or client to the conn_type parameter in the boto3_conn function call') module.fail_json(msg='There is an issue in the code of the module. You must specify either both, resource or client to the conn_type '
'parameter in the boto3_conn function call')
def _boto3_conn(conn_type=None, resource=None, region=None, endpoint=None, **params): def _boto3_conn(conn_type=None, resource=None, region=None, endpoint=None, **params):
profile = params.pop('profile_name', None) profile = params.pop('profile_name', None)
@ -257,7 +258,8 @@ def connect_to_aws(aws_module, region, **params):
conn = aws_module.connect_to_region(region, **params) conn = aws_module.connect_to_region(region, **params)
if not conn: if not conn:
if region not in [aws_module_region.name for aws_module_region in aws_module.regions()]: if region not in [aws_module_region.name for aws_module_region in aws_module.regions()]:
raise AnsibleAWSError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade boto or extend with endpoints_path" % (region, aws_module.__name__)) raise AnsibleAWSError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade "
"boto or extend with endpoints_path" % (region, aws_module.__name__))
else: else:
raise AnsibleAWSError("Unknown problem connecting to region %s for aws module %s." % (region, aws_module.__name__)) raise AnsibleAWSError("Unknown problem connecting to region %s for aws module %s." % (region, aws_module.__name__))
if params.get('profile_name'): if params.get('profile_name'):

View file

@ -677,8 +677,8 @@ class Distribution(object):
SLC = 'RedHat', Ascendos = 'RedHat', CloudLinux = 'RedHat', PSBM = 'RedHat', SLC = 'RedHat', Ascendos = 'RedHat', CloudLinux = 'RedHat', PSBM = 'RedHat',
OracleLinux = 'RedHat', OVS = 'RedHat', OEL = 'RedHat', Amazon = 'RedHat', OracleLinux = 'RedHat', OVS = 'RedHat', OEL = 'RedHat', Amazon = 'RedHat',
XenServer = 'RedHat', Ubuntu = 'Debian', Debian = 'Debian', Raspbian = 'Debian', Slackware = 'Slackware', SLES = 'Suse', XenServer = 'RedHat', Ubuntu = 'Debian', Debian = 'Debian', Raspbian = 'Debian', Slackware = 'Slackware', SLES = 'Suse',
SLED = 'Suse', openSUSE = 'Suse', openSUSE_Tumbleweed = 'Suse', SuSE = 'Suse', SLES_SAP = 'Suse', SUSE_LINUX = 'Suse', Gentoo = 'Gentoo', Funtoo = 'Gentoo', SLED = 'Suse', openSUSE = 'Suse', openSUSE_Tumbleweed = 'Suse', SuSE = 'Suse', SLES_SAP = 'Suse', SUSE_LINUX = 'Suse', Gentoo = 'Gentoo',
Archlinux = 'Archlinux', Manjaro = 'Archlinux', Mandriva = 'Mandrake', Mandrake = 'Mandrake', Altlinux = 'Altlinux', SMGL = 'SMGL', Funtoo = 'Gentoo', Archlinux = 'Archlinux', Manjaro = 'Archlinux', Mandriva = 'Mandrake', Mandrake = 'Mandrake', Altlinux = 'Altlinux', SMGL = 'SMGL',
Solaris = 'Solaris', Nexenta = 'Solaris', OmniOS = 'Solaris', OpenIndiana = 'Solaris', Solaris = 'Solaris', Nexenta = 'Solaris', OmniOS = 'Solaris', OpenIndiana = 'Solaris',
SmartOS = 'Solaris', AIX = 'AIX', Alpine = 'Alpine', MacOSX = 'Darwin', SmartOS = 'Solaris', AIX = 'AIX', Alpine = 'Alpine', MacOSX = 'Darwin',
FreeBSD = 'FreeBSD', HPUX = 'HP-UX', openSUSE_Leap = 'Suse', Neon = 'Debian' FreeBSD = 'FreeBSD', HPUX = 'HP-UX', openSUSE_Leap = 'Suse', Neon = 'Debian'
@ -1658,7 +1658,15 @@ class SunOSHardware(Hardware):
for line in fstab.splitlines(): for line in fstab.splitlines():
fields = line.split('\t') fields = line.split('\t')
size_total, size_available = self._get_mount_size_facts(fields[1]) size_total, size_available = self._get_mount_size_facts(fields[1])
self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3], 'time': fields[4], 'size_total': size_total, 'size_available': size_available}) self.facts['mounts'].append({
'mount': fields[1],
'device': fields[0],
'fstype' : fields[2],
'options': fields[3],
'time': fields[4],
'size_total': size_total,
'size_available': size_available
})
def get_dmi_facts(self): def get_dmi_facts(self):
uname_path = self.module.get_bin_path("prtdiag") uname_path = self.module.get_bin_path("prtdiag")
@ -1785,7 +1793,14 @@ class OpenBSDHardware(Hardware):
if fields[1] == 'none' or fields[3] == 'xx': if fields[1] == 'none' or fields[3] == 'xx':
continue continue
size_total, size_available = self._get_mount_size_facts(fields[1]) size_total, size_available = self._get_mount_size_facts(fields[1])
self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3], 'size_total': size_total, 'size_available': size_available}) self.facts['mounts'].append({
'mount': fields[1],
'device': fields[0],
'fstype' : fields[2],
'options': fields[3],
'size_total': size_total,
'size_available': size_available
})
def get_memory_facts(self): def get_memory_facts(self):
@ -1926,7 +1941,14 @@ class FreeBSDHardware(Hardware):
continue continue
fields = re.sub(r'\s+',' ',line).split() fields = re.sub(r'\s+',' ',line).split()
size_total, size_available = self._get_mount_size_facts(fields[1]) size_total, size_available = self._get_mount_size_facts(fields[1])
self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3], 'size_total': size_total, 'size_available': size_available}) self.facts['mounts'].append({
'mount': fields[1],
'device': fields[0],
'fstype': fields[2],
'options': fields[3],
'size_total': size_total,
'size_available': size_available
})
def get_device_facts(self): def get_device_facts(self):
sysdir = '/dev' sysdir = '/dev'
@ -2057,7 +2079,14 @@ class NetBSDHardware(Hardware):
continue continue
fields = re.sub(r'\s+',' ',line).split() fields = re.sub(r'\s+',' ',line).split()
size_total, size_available = self._get_mount_size_facts(fields[1]) size_total, size_available = self._get_mount_size_facts(fields[1])
self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3], 'size_total': size_total, 'size_available': size_available}) self.facts['mounts'].append({
'mount': fields[1],
'device': fields[0],
'fstype' : fields[2],
'options': fields[3],
'size_total': size_total,
'size_available': size_available
})
def get_dmi_facts(self): def get_dmi_facts(self):
# We don't use dmidecode(1) here because: # We don't use dmidecode(1) here because:
@ -2316,7 +2345,8 @@ class HPUX(Hardware):
#For systems where memory details aren't sent to syslog or the log has rotated, use parsed #For systems where memory details aren't sent to syslog or the log has rotated, use parsed
#adb output. Unfortunately /dev/kmem doesn't have world-read, so this only works as root. #adb output. Unfortunately /dev/kmem doesn't have world-read, so this only works as root.
if os.access("/dev/kmem", os.R_OK): if os.access("/dev/kmem", os.R_OK):
rc, out, err = self.module.run_command("echo 'phys_mem_pages/D' | adb -k /stand/vmunix /dev/kmem | tail -1 | awk '{print $2}'", use_unsafe_shell=True) rc, out, err = self.module.run_command("echo 'phys_mem_pages/D' | adb -k /stand/vmunix /dev/kmem | tail -1 | awk '{print $2}'",
use_unsafe_shell=True)
if not err: if not err:
data = out data = out
self.facts['memtotal_mb'] = int(data) / 256 self.facts['memtotal_mb'] = int(data) / 256

View file

@ -35,7 +35,8 @@ try:
except ImportError: except ImportError:
mysqldb_found = False mysqldb_found = False
def mysql_connect(module, login_user=None, login_password=None, config_file='', ssl_cert=None, ssl_key=None, ssl_ca=None, db=None, cursor_class=None, connect_timeout=30): def mysql_connect(module, login_user=None, login_password=None, config_file='', ssl_cert=None, ssl_key=None, ssl_ca=None, db=None, cursor_class=None,
connect_timeout=30):
config = {} config = {}
if ssl_ca is not None or ssl_key is not None or ssl_cert is not None: if ssl_ca is not None or ssl_key is not None or ssl_cert is not None:

View file

@ -71,7 +71,9 @@ options:
resource_tags: resource_tags:
description: description:
- 'A dictionary array of resource tags of the form C({ tag1: value1, tag2: value2 }). - 'A dictionary array of resource tags of the form C({ tag1: value1, tag2: value2 }).
- Tags in this list are used in conjunction with CIDR block to uniquely identify a VPC in lieu of vpc_id. Therefore, if CIDR/Tag combination does not exist, a new VPC will be created. VPC tags not on this list will be ignored. Prior to 1.7, specifying a resource tag was optional.' - Tags in this list are used in conjunction with CIDR block to uniquely identify a VPC in lieu of vpc_id. Therefore,
if CIDR/Tag combination does not exist, a new VPC will be created. VPC tags not on this list will be ignored. Prior to 1.7,
specifying a resource tag was optional.'
required: true required: true
version_added: "1.6" version_added: "1.6"
internet_gateway: internet_gateway:
@ -82,7 +84,15 @@ options:
choices: [ "yes", "no" ] choices: [ "yes", "no" ]
route_tables: route_tables:
description: description:
- 'A dictionary array of route tables to add of the form: C({ subnets: [172.22.2.0/24, 172.22.3.0/24,], routes: [{ dest: 0.0.0.0/0, gw: igw},], resource_tags: ... }). Where the subnets list is those subnets the route table should be associated with, and the routes list is a list of routes to be in the table. The special keyword for the gw of igw specifies that you should the route should go through the internet gateway attached to the VPC. gw also accepts instance-ids, interface-ids, and vpc-peering-connection-ids in addition igw. resource_tags is optional and uses dictionary form: C({ "Name": "public", ... }). This module is currently unable to affect the "main" route table due to some limitations in boto, so you must explicitly define the associated subnets or they will be attached to the main table implicitly. As of 1.8, if the route_tables parameter is not specified, no existing routes will be modified.' - >
A dictionary array of route tables to add of the form:
C({ subnets: [172.22.2.0/24, 172.22.3.0/24,], routes: [{ dest: 0.0.0.0/0, gw: igw},], resource_tags: ... }). Where the subnets list is
those subnets the route table should be associated with, and the routes list is a list of routes to be in the table. The special keyword
for the gw of igw specifies that you should the route should go through the internet gateway attached to the VPC. gw also accepts instance-ids,
interface-ids, and vpc-peering-connection-ids in addition igw. resource_tags is optional and uses dictionary form: C({ "Name": "public", ... }).
This module is currently unable to affect the "main" route table due to some limitations in boto, so you must explicitly define the associated
subnets or they will be attached to the main table implicitly. As of 1.8, if the route_tables parameter is not specified, no existing routes
will be modified.
required: false required: false
default: null default: null
wait: wait:

View file

@ -283,7 +283,8 @@ def main():
if not g in statement_label: if not g in statement_label:
module.fail_json(msg='{} is an unknown grant type.'.format(g)) module.fail_json(msg='{} is an unknown grant type.'.format(g))
ret = do_grant(kms, module.params['key_arn'], module.params['role_arn'], module.params['grant_types'], mode=mode, dry_run=module.check_mode, clean_invalid_entries=module.params['clean_invalid_entries']) ret = do_grant(kms, module.params['key_arn'], module.params['role_arn'], module.params['grant_types'], mode=mode, dry_run=module.check_mode,
clean_invalid_entries=module.params['clean_invalid_entries'])
result.update(ret) result.update(ret)
except Exception as err: except Exception as err:

View file

@ -33,7 +33,8 @@ short_description: Create or delete an AWS CloudFormation stack
description: description:
- Launches or updates an AWS CloudFormation stack and waits for it complete. - Launches or updates an AWS CloudFormation stack and waits for it complete.
notes: notes:
- As of version 2.3, migrated to boto3 to enable new features. To match existing behavior, YAML parsing is done in the module, not given to AWS as YAML. This will change (in fact, it may change before 2.3 is out). - As of version 2.3, migrated to boto3 to enable new features. To match existing behavior, YAML parsing is done in the module, not given to AWS as YAML.
This will change (in fact, it may change before 2.3 is out).
version_added: "1.1" version_added: "1.1"
options: options:
stack_name: stack_name:
@ -59,8 +60,10 @@ options:
template: template:
description: description:
- The local path of the cloudformation template. - The local path of the cloudformation template.
- This must be the full path to the file, relative to the working directory. If using roles this may look like "roles/cloudformation/files/cloudformation-example.json". - This must be the full path to the file, relative to the working directory. If using roles this may look
- If 'state' is 'present' and the stack does not exist yet, either 'template' or 'template_url' must be specified (but not both). If 'state' is present, the stack does exist, and neither 'template' nor 'template_url' are specified, the previous template will be reused. like "roles/cloudformation/files/cloudformation-example.json".
- If 'state' is 'present' and the stack does not exist yet, either 'template' or 'template_url' must be specified (but not both). If 'state' is
present, the stack does exist, and neither 'template' nor 'template_url' are specified, the previous template will be reused.
required: false required: false
default: null default: null
notification_arns: notification_arns:
@ -71,7 +74,8 @@ options:
version_added: "2.0" version_added: "2.0"
stack_policy: stack_policy:
description: description:
- the path of the cloudformation stack policy. A policy cannot be removed once placed, but it can be modified. (for instance, [allow all updates](http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html#d0e9051) - the path of the cloudformation stack policy. A policy cannot be removed once placed, but it can be modified.
(for instance, [allow all updates](http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html#d0e9051)
required: false required: false
default: null default: null
version_added: "1.9" version_added: "1.9"
@ -83,20 +87,24 @@ options:
version_added: "1.4" version_added: "1.4"
template_url: template_url:
description: description:
- Location of file containing the template body. The URL must point to a template (max size 307,200 bytes) located in an S3 bucket in the same region as the stack. - Location of file containing the template body. The URL must point to a template (max size 307,200 bytes) located in an S3 bucket in the same region
- If 'state' is 'present' and the stack does not exist yet, either 'template' or 'template_url' must be specified (but not both). If 'state' is present, the stack does exist, and neither 'template' nor 'template_url' are specified, the previous template will be reused. as the stack.
- If 'state' is 'present' and the stack does not exist yet, either 'template' or 'template_url' must be specified (but not both). If 'state' is
present, the stack does exist, and neither 'template' nor 'template_url' are specified, the previous template will be reused.
required: false required: false
version_added: "2.0" version_added: "2.0"
template_format: template_format:
description: description:
- (deprecated) For local templates, allows specification of json or yaml format. Templates are now passed raw to CloudFormation regardless of format. This parameter is ignored since Ansible 2.3. - (deprecated) For local templates, allows specification of json or yaml format. Templates are now passed raw to CloudFormation regardless of format.
This parameter is ignored since Ansible 2.3.
default: json default: json
choices: [ json, yaml ] choices: [ json, yaml ]
required: false required: false
version_added: "2.0" version_added: "2.0"
role_arn: role_arn:
description: description:
- The role that AWS CloudFormation assumes to create the stack. See the AWS CloudFormation Service Role docs U(http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-servicerole.html) - The role that AWS CloudFormation assumes to create the stack. See the AWS CloudFormation Service Role
docs U(http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-servicerole.html)
required: false required: false
default: null default: null
version_added: "2.3" version_added: "2.3"
@ -212,7 +220,7 @@ stack_outputs:
description: A key:value dictionary of all the stack outputs currently defined. If there are no stack outputs, it is an empty dictionary. description: A key:value dictionary of all the stack outputs currently defined. If there are no stack outputs, it is an empty dictionary.
returned: always returned: always
sample: {"MySg": "AnsibleModuleTestYAML-CFTestSg-C8UVS567B6NS"} sample: {"MySg": "AnsibleModuleTestYAML-CFTestSg-C8UVS567B6NS"}
''' ''' # NOQA
import json import json
import time import time

View file

@ -112,11 +112,13 @@ stack_description:
returned: always returned: always
type: dict type: dict
stack_outputs: stack_outputs:
description: Dictionary of stack outputs keyed by the value of each output 'OutputKey' parameter and corresponding value of each output 'OutputValue' parameter description: Dictionary of stack outputs keyed by the value of each output 'OutputKey' parameter and corresponding value of each
output 'OutputValue' parameter
returned: always returned: always
type: dict type: dict
stack_parameters: stack_parameters:
description: Dictionary of stack parameters keyed by the value of each parameter 'ParameterKey' parameter and corresponding value of each parameter 'ParameterValue' parameter description: Dictionary of stack parameters keyed by the value of each parameter 'ParameterKey' parameter and corresponding value of
each parameter 'ParameterValue' parameter
returned: always returned: always
type: dict type: dict
stack_events: stack_events:
@ -136,7 +138,8 @@ stack_resource_list:
returned: only if all_facts or stack_resourses is true returned: only if all_facts or stack_resourses is true
type: list of resources type: list of resources
stack_resources: stack_resources:
description: Dictionary of stack resources keyed by the value of each resource 'LogicalResourceId' parameter and corresponding value of each resource 'PhysicalResourceId' parameter description: Dictionary of stack resources keyed by the value of each resource 'LogicalResourceId' parameter and corresponding value of each
resource 'PhysicalResourceId' parameter
returned: only if all_facts or stack_resourses is true returned: only if all_facts or stack_resourses is true
type: dict type: dict
''' '''

View file

@ -44,7 +44,8 @@ options:
s3_bucket_prefix: s3_bucket_prefix:
description: description:
- bucket to place CloudTrail in. - bucket to place CloudTrail in.
- this bucket should exist and have the proper policy. See U(http://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_bucket_policy.html) - this bucket should exist and have the proper policy.
See U(http://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_bucket_policy.html)
- required when state=enabled. - required when state=enabled.
required: false required: false
s3_key_prefix: s3_key_prefix:
@ -215,12 +216,14 @@ def main():
results['view'].get('S3KeyPrefix', '') != s3_key_prefix or \ results['view'].get('S3KeyPrefix', '') != s3_key_prefix or \
results['view']['IncludeGlobalServiceEvents'] != include_global_events: results['view']['IncludeGlobalServiceEvents'] != include_global_events:
if not module.check_mode: if not module.check_mode:
results['update'] = cf_man.update(name=ct_name, s3_bucket_name=s3_bucket_name, s3_key_prefix=s3_key_prefix, include_global_service_events=include_global_events) results['update'] = cf_man.update(name=ct_name, s3_bucket_name=s3_bucket_name, s3_key_prefix=s3_key_prefix,
include_global_service_events=include_global_events)
results['changed'] = True results['changed'] = True
else: else:
if not module.check_mode: if not module.check_mode:
# doesn't exist. create it. # doesn't exist. create it.
results['enable'] = cf_man.enable(name=ct_name, s3_bucket_name=s3_bucket_name, s3_key_prefix=s3_key_prefix, include_global_service_events=include_global_events) results['enable'] = cf_man.enable(name=ct_name, s3_bucket_name=s3_bucket_name, s3_key_prefix=s3_key_prefix,
include_global_service_events=include_global_events)
results['changed'] = True results['changed'] = True
# given cloudtrail should exist now. Enable the logging. # given cloudtrail should exist now. Enable the logging.

View file

@ -117,7 +117,7 @@ targets:
returned: success returned: success
type: list type: list
sample: "[{ 'arn': 'arn:aws:lambda:us-east-1:123456789012:function:MyFunction', 'id': 'MyTargetId' }]" sample: "[{ 'arn': 'arn:aws:lambda:us-east-1:123456789012:function:MyFunction', 'id': 'MyTargetId' }]"
''' ''' # NOQA
class CloudWatchEventRule(object): class CloudWatchEventRule(object):

View file

@ -332,7 +332,11 @@ def get_changed_global_indexes(table, global_indexes):
removed_indexes = dict((name, index) for name, index in table_index_info.items() if name not in set_index_info) removed_indexes = dict((name, index) for name, index in table_index_info.items() if name not in set_index_info)
added_indexes = dict((name, set_index_objects[name]) for name, index in set_index_info.items() if name not in table_index_info) added_indexes = dict((name, set_index_objects[name]) for name, index in set_index_info.items() if name not in table_index_info)
# todo: uncomment once boto has https://github.com/boto/boto/pull/3447 fixed # todo: uncomment once boto has https://github.com/boto/boto/pull/3447 fixed
# index_throughput_changes = dict((name, index.throughput) for name, index in set_index_objects.items() if name not in added_indexes and (index.throughput['read'] != str(table_index_objects[name].throughput['read']) or index.throughput['write'] != str(table_index_objects[name].throughput['write']))) # for name, index in set_index_objects.items():
# if (name not in added_indexes and
# (index.throughput['read'] != str(table_index_objects[name].throughput['read']) or
# index.throughput['write'] != str(table_index_objects[name].throughput['write']))):
# index_throughput_changes[name] = index.throughput
# todo: remove once boto has https://github.com/boto/boto/pull/3447 fixed # todo: remove once boto has https://github.com/boto/boto/pull/3447 fixed
index_throughput_changes = dict((name, index.throughput) for name, index in set_index_objects.items() if name not in added_indexes) index_throughput_changes = dict((name, index.throughput) for name, index in set_index_objects.items() if name not in added_indexes)

View file

@ -84,7 +84,8 @@ options:
default: null default: null
no_reboot: no_reboot:
description: description:
- Flag indicating that the bundling process should not attempt to shutdown the instance before bundling. If this flag is True, the responsibility of maintaining file system integrity is left to the owner of the instance. - Flag indicating that the bundling process should not attempt to shutdown the instance before bundling. If this flag is True, the
responsibility of maintaining file system integrity is left to the owner of the instance.
required: false required: false
default: no default: no
choices: [ "yes", "no" ] choices: [ "yes", "no" ]
@ -97,7 +98,9 @@ options:
version_added: "2.0" version_added: "2.0"
description: description:
- List of device hashes/dictionaries with custom configurations (same block-device-mapping parameters) - List of device hashes/dictionaries with custom configurations (same block-device-mapping parameters)
- "Valid properties include: device_name, volume_type, size (in GB), delete_on_termination (boolean), no_device (boolean), snapshot_id, iops (for io1 volume_type)" - >
Valid properties include: device_name, volume_type, size (in GB), delete_on_termination (boolean), no_device (boolean),
snapshot_id, iops (for io1 volume_type)
required: false required: false
default: null default: null
delete_snapshot: delete_snapshot:
@ -474,7 +477,8 @@ def create_image(module, ec2):
module.fail_json(msg="AMI creation failed, please see the AWS console for more details") module.fail_json(msg="AMI creation failed, please see the AWS console for more details")
except boto.exception.EC2ResponseError as e: except boto.exception.EC2ResponseError as e:
if ('InvalidAMIID.NotFound' not in e.error_code and 'InvalidAMIID.Unavailable' not in e.error_code) and wait and i == wait_timeout - 1: if ('InvalidAMIID.NotFound' not in e.error_code and 'InvalidAMIID.Unavailable' not in e.error_code) and wait and i == wait_timeout - 1:
module.fail_json(msg="Error while trying to find the new image. Using wait=yes and/or a longer wait_timeout may help. %s: %s" % (e.error_code, e.error_message)) module.fail_json(msg="Error while trying to find the new image. Using wait=yes and/or a longer "
"wait_timeout may help. %s: %s" % (e.error_code, e.error_message))
finally: finally:
time.sleep(1) time.sleep(1)
@ -569,7 +573,8 @@ def update_image(module, ec2, image_id):
try: try:
set_permissions = img.get_launch_permissions() set_permissions = img.get_launch_permissions()
if set_permissions != launch_permissions: if set_permissions != launch_permissions:
if ('user_ids' in launch_permissions and launch_permissions['user_ids']) or ('group_names' in launch_permissions and launch_permissions['group_names']): if (('user_ids' in launch_permissions and launch_permissions['user_ids']) or
('group_names' in launch_permissions and launch_permissions['group_names'])):
res = img.set_launch_permissions(**launch_permissions) res = img.set_launch_permissions(**launch_permissions)
elif ('user_ids' in set_permissions and set_permissions['user_ids']) or ('group_names' in set_permissions and set_permissions['group_names']): elif ('user_ids' in set_permissions and set_permissions['user_ids']) or ('group_names' in set_permissions and set_permissions['group_names']):
res = img.remove_launch_permissions(**set_permissions) res = img.remove_launch_permissions(**set_permissions)

View file

@ -32,7 +32,8 @@ description:
- Results can be sorted and sliced - Results can be sorted and sliced
author: "Tom Bamford (@tombamford)" author: "Tom Bamford (@tombamford)"
notes: notes:
- This module is not backwards compatible with the previous version of the ec2_search_ami module which worked only for Ubuntu AMIs listed on cloud-images.ubuntu.com. - This module is not backwards compatible with the previous version of the ec2_search_ami module which worked only for Ubuntu AMIs listed on
cloud-images.ubuntu.com.
- See the example below for a suggestion of how to search by distro/release. - See the example below for a suggestion of how to search by distro/release.
options: options:
region: region:
@ -45,7 +46,9 @@ options:
- Search AMIs owned by the specified owner - Search AMIs owned by the specified owner
- Can specify an AWS account ID, or one of the special IDs 'self', 'amazon' or 'aws-marketplace' - Can specify an AWS account ID, or one of the special IDs 'self', 'amazon' or 'aws-marketplace'
- If not specified, all EC2 AMIs in the specified region will be searched. - If not specified, all EC2 AMIs in the specified region will be searched.
- You can include wildcards in many of the search options. An asterisk (*) matches zero or more characters, and a question mark (?) matches exactly one character. You can escape special characters using a backslash (\) before the character. For example, a value of \*amazon\?\\ searches for the literal string *amazon?\. - You can include wildcards in many of the search options. An asterisk (*) matches zero or more characters, and a question mark (?) matches exactly one
character. You can escape special characters using a backslash (\) before the character. For example, a value of \*amazon\?\\ searches for the
literal string *amazon?\.
required: false required: false
default: null default: null
ami_id: ami_id:
@ -94,8 +97,24 @@ options:
description: description:
- Optional attribute which with to sort the results. - Optional attribute which with to sort the results.
- If specifying 'tag', the 'tag_name' parameter is required. - If specifying 'tag', the 'tag_name' parameter is required.
- Starting at version 2.1, additional sort choices of architecture, block_device_mapping, creationDate, hypervisor, is_public, location, owner_id, platform, root_device_name, root_device_type, state, and virtualization_type are supported. - Starting at version 2.1, additional sort choices of architecture, block_device_mapping, creationDate, hypervisor, is_public, location, owner_id,
choices: ['name', 'description', 'tag', 'architecture', 'block_device_mapping', 'creationDate', 'hypervisor', 'is_public', 'location', 'owner_id', 'platform', 'root_device_name', 'root_device_type', 'state', 'virtualization_type'] platform, root_device_name, root_device_type, state, and virtualization_type are supported.
choices:
- 'name'
- 'description'
- 'tag'
- 'architecture'
- 'block_device_mapping'
- 'creationDate'
- 'hypervisor'
- 'is_public'
- 'location'
- 'owner_id'
- 'platform'
- 'root_device_name'
- 'root_device_type'
- 'state'
- 'virtualization_type'
default: null default: null
required: false required: false
sort_tag: sort_tag:
@ -316,7 +335,8 @@ def main():
platform = dict(required=False), platform = dict(required=False),
product_code = dict(required=False), product_code = dict(required=False),
sort = dict(required=False, default=None, sort = dict(required=False, default=None,
choices=['name', 'description', 'tag', 'architecture', 'block_device_mapping', 'creationDate', 'hypervisor', 'is_public', 'location', 'owner_id', 'platform', 'root_device_name', 'root_device_type', 'state', 'virtualization_type']), choices=['name', 'description', 'tag', 'architecture', 'block_device_mapping', 'creationDate', 'hypervisor', 'is_public', 'location',
'owner_id', 'platform', 'root_device_name', 'root_device_type', 'state', 'virtualization_type']),
sort_tag = dict(required=False), sort_tag = dict(required=False),
sort_order = dict(required=False, default='ascending', sort_order = dict(required=False, default='ascending',
choices=['ascending', 'descending']), choices=['ascending', 'descending']),

View file

@ -82,7 +82,8 @@ options:
default: 1 default: 1
replace_instances: replace_instances:
description: description:
- List of instance_ids belonging to the named ASG that you would like to terminate and be replaced with instances matching the current launch configuration. - List of instance_ids belonging to the named ASG that you would like to terminate and be replaced with instances matching the current launch
configuration.
required: false required: false
version_added: "1.8" version_added: "1.8"
default: None default: None
@ -129,14 +130,16 @@ options:
version_added: "1.8" version_added: "1.8"
wait_for_instances: wait_for_instances:
description: description:
- Wait for the ASG instances to be in a ready state before exiting. If instances are behind an ELB, it will wait until the ELB determines all instances have a lifecycle_state of "InService" and a health_status of "Healthy". - Wait for the ASG instances to be in a ready state before exiting. If instances are behind an ELB, it will wait until the ELB determines all
instances have a lifecycle_state of "InService" and a health_status of "Healthy".
version_added: "1.9" version_added: "1.9"
default: yes default: yes
required: False required: False
termination_policies: termination_policies:
description: description:
- An ordered list of criteria used for selecting instances to be removed from the Auto Scaling group when reducing capacity. - An ordered list of criteria used for selecting instances to be removed from the Auto Scaling group when reducing capacity.
- For 'Default', when used to create a new autoscaling group, the "Default"i value is used. When used to change an existent autoscaling group, the current termination policies are maintained. - For 'Default', when used to create a new autoscaling group, the "Default"i value is used. When used to change an existent autoscaling group, the
current termination policies are maintained.
required: false required: false
default: Default default: Default
choices: ['OldestInstance', 'NewestInstance', 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default'] choices: ['OldestInstance', 'NewestInstance', 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default']
@ -150,7 +153,11 @@ options:
notification_types: notification_types:
description: description:
- A list of auto scaling events to trigger notifications on. - A list of auto scaling events to trigger notifications on.
default: ['autoscaling:EC2_INSTANCE_LAUNCH', 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR', 'autoscaling:EC2_INSTANCE_TERMINATE', 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR'] default:
- 'autoscaling:EC2_INSTANCE_LAUNCH'
- 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR'
- 'autoscaling:EC2_INSTANCE_TERMINATE'
- 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR'
required: false required: false
version_added: "2.2" version_added: "2.2"
suspend_processes: suspend_processes:

View file

@ -35,7 +35,9 @@ options:
required: false required: false
tags: tags:
description: description:
- "A dictionary/hash of tags in the format { tag1_name: 'tag1_value', tag2_name: 'tag2_value' } to match against the auto scaling group(s) you are searching for." - >
A dictionary/hash of tags in the format { tag1_name: 'tag1_value', tag2_name: 'tag2_value' } to match against the auto scaling
group(s) you are searching for.
required: false required: false
extends_documentation_fragment: extends_documentation_fragment:
- aws - aws
@ -232,7 +234,10 @@ def find_asgs(conn, module, name=None, tags=None):
List List
[ [
{ {
"auto_scaling_group_arn": "arn:aws:autoscaling:us-west-2:275977225706:autoScalingGroup:58abc686-9783-4528-b338-3ad6f1cbbbaf:autoScalingGroupName/public-webapp-production", "auto_scaling_group_arn": (
"arn:aws:autoscaling:us-west-2:275977225706:autoScalingGroup:58abc686-9783-4528-b338-3ad6f1cbbbaf:"
"autoScalingGroupName/public-webapp-production"
),
"auto_scaling_group_name": "public-webapp-production", "auto_scaling_group_name": "public-webapp-production",
"availability_zones": ["us-west-2c", "us-west-2b", "us-west-2a"], "availability_zones": ["us-west-2c", "us-west-2b", "us-west-2a"],
"created_time": "2016-02-02T23:28:42.481000+00:00", "created_time": "2016-02-02T23:28:42.481000+00:00",

View file

@ -28,7 +28,9 @@ version_added: "2.2"
author: Michael Baydoun (@MichaelBaydoun) author: Michael Baydoun (@MichaelBaydoun)
requirements: [ botocore, boto3 ] requirements: [ botocore, boto3 ]
notes: notes:
- You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent requests do not create new customer gateway resources. - You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the
first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent
requests do not create new customer gateway resources.
- Return values contain customer_gateway and customer_gateways keys which are identical dicts. You should use - Return values contain customer_gateway and customer_gateways keys which are identical dicts. You should use
customer_gateway. See U(https://github.com/ansible/ansible-modules-extras/issues/2773) for details. customer_gateway. See U(https://github.com/ansible/ansible-modules-extras/issues/2773) for details.
options: options:

View file

@ -69,7 +69,8 @@ options:
version_added: "1.5" version_added: "1.5"
wait_timeout: wait_timeout:
description: description:
- Number of seconds to wait for an instance to change state. If 0 then this module may return an error if a transient error occurs. If non-zero then any transient errors are ignored until the timeout is reached. Ignored when wait=no. - Number of seconds to wait for an instance to change state. If 0 then this module may return an error if a transient error occurs.
If non-zero then any transient errors are ignored until the timeout is reached. Ignored when wait=no.
required: false required: false
default: 0 default: 0
version_added: "1.6" version_added: "1.6"

View file

@ -29,7 +29,8 @@ author: "Rob White (@wimnat)"
options: options:
filters: filters:
description: description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNetworkInterfaces.html) for possible filters. - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNetworkInterfaces.html) for possible filters.
required: false required: false
default: null default: null

View file

@ -58,11 +58,14 @@ options:
required: false required: false
security_groups: security_groups:
description: description:
- A list of security groups to apply to the instances. For VPC instances, specify security group IDs. For EC2-Classic, specify either security group names or IDs. - A list of security groups to apply to the instances. For VPC instances, specify security group IDs. For EC2-Classic, specify either security
group names or IDs.
required: false required: false
volumes: volumes:
description: description:
- a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume. - a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id.
Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume.
For any volume, a volume size less than 1 will be interpreted as a request not to create the volume.
required: false required: false
user_data: user_data:
description: description:
@ -87,7 +90,8 @@ options:
default: false default: false
assign_public_ip: assign_public_ip:
description: description:
- Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud. Specifies whether to assign a public IP address to each instance launched in a Amazon VPC. - Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud. Specifies whether to assign a public IP
address to each instance launched in a Amazon VPC.
required: false required: false
version_added: "1.8" version_added: "1.8"
ramdisk_id: ramdisk_id:

View file

@ -73,7 +73,34 @@ options:
description: description:
- The threshold's unit of measurement - The threshold's unit of measurement
required: false required: false
choices: ['Seconds','Microseconds','Milliseconds','Bytes','Kilobytes','Megabytes','Gigabytes','Terabytes','Bits','Kilobits','Megabits','Gigabits','Terabits','Percent','Count','Bytes/Second','Kilobytes/Second','Megabytes/Second','Gigabytes/Second','Terabytes/Second','Bits/Second','Kilobits/Second','Megabits/Second','Gigabits/Second','Terabits/Second','Count/Second','None'] choices:
- 'Seconds'
- 'Microseconds'
- 'Milliseconds'
- 'Bytes'
- 'Kilobytes'
- 'Megabytes'
- 'Gigabytes'
- 'Terabytes'
- 'Bits'
- 'Kilobits'
- 'Megabits'
- 'Gigabits'
- 'Terabits'
- 'Percent'
- 'Count'
- 'Bytes/Second'
- 'Kilobytes/Second'
- 'Megabytes/Second'
- 'Gigabytes/Second'
- 'Terabytes/Second'
- 'Bits/Second'
- 'Kilobits/Second'
- 'Megabits/Second'
- 'Gigabits/Second'
- 'Terabits/Second'
- 'Count/Second'
- 'None'
description: description:
description: description:
- A longer description of the alarm - A longer description of the alarm
@ -254,7 +281,10 @@ def main():
comparison=dict(type='str', choices=['<=', '<', '>', '>=']), comparison=dict(type='str', choices=['<=', '<', '>', '>=']),
threshold=dict(type='float'), threshold=dict(type='float'),
period=dict(type='int'), period=dict(type='int'),
unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', 'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', 'Bytes/Second', 'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second', 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second', 'Gigabits/Second', 'Terabits/Second', 'Count/Second', 'None']), unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', 'Terabytes',
'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', 'Bytes/Second', 'Kilobytes/Second',
'Megabytes/Second', 'Gigabytes/Second', 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second',
'Gigabits/Second', 'Terabits/Second', 'Count/Second', 'None']),
evaluation_periods=dict(type='int'), evaluation_periods=dict(type='int'),
description=dict(type='str'), description=dict(type='str'),
dimensions=dict(type='dict', default={}), dimensions=dict(type='dict', default={}),

View file

@ -28,7 +28,8 @@ version_added: "2.0"
options: options:
filters: filters:
description: description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html) for possible filters. - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html) for possible filters.
required: false required: false
default: null default: null
author: author:

View file

@ -110,7 +110,8 @@ def create_scaling_policy(connection, module):
try: try:
connection.create_scaling_policy(sp) connection.create_scaling_policy(sp)
policy = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])[0] policy = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])[0]
module.exit_json(changed=True, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment, cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step) module.exit_json(changed=True, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment,
cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step)
except BotoServerError as e: except BotoServerError as e:
module.fail_json(msg=str(e)) module.fail_json(msg=str(e))
else: else:
@ -137,7 +138,8 @@ def create_scaling_policy(connection, module):
if changed: if changed:
connection.create_scaling_policy(policy) connection.create_scaling_policy(policy)
policy = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])[0] policy = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])[0]
module.exit_json(changed=changed, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment, cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step) module.exit_json(changed=changed, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment,
cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step)
except BotoServerError as e: except BotoServerError as e:
module.fail_json(msg=str(e)) module.fail_json(msg=str(e))

View file

@ -113,7 +113,9 @@ state:
type: string type: string
sample: completed sample: completed
state_message: state_message:
description: Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails (for example, if the proper AWS Key Management Service (AWS KMS) permissions are not obtained) this field displays error state details to help you diagnose why the error occurred. description: Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails (for example, if the proper
AWS Key Management Service (AWS KMS) permissions are not obtained) this field displays error state details to help you diagnose why the
error occurred.
type: string type: string
sample: sample:
start_time: start_time:

View file

@ -24,7 +24,8 @@ DOCUMENTATION = '''
module: ec2_tag module: ec2_tag
short_description: create and remove tag(s) to ec2 resources. short_description: create and remove tag(s) to ec2 resources.
description: description:
- Creates, removes and lists tags from any EC2 resource. The resource is referenced by its resource id (e.g. an instance being i-XXXXXXX). It is designed to be used with complex args (tags), see the examples. This module has a dependency on python-boto. - Creates, removes and lists tags from any EC2 resource. The resource is referenced by its resource id (e.g. an instance being i-XXXXXXX).
It is designed to be used with complex args (tags), see the examples. This module has a dependency on python-boto.
version_added: "1.3" version_added: "1.3"
options: options:
resource: resource:

View file

@ -29,7 +29,8 @@ author: "Rob White (@wimnat)"
options: options:
filters: filters:
description: description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVolumes.html) for possible filters. - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVolumes.html) for possible filters.
required: false required: false
default: null default: null
extends_documentation_fragment: extends_documentation_fragment:

View file

@ -30,7 +30,8 @@ author: "Nick Aslanidis (@naslanidis)"
options: options:
filters: filters:
description: description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRouteTables.html) for possible filters. - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRouteTables.html) for possible filters.
required: false required: false
default: null default: null
DhcpOptionsIds: DhcpOptionsIds:

View file

@ -61,7 +61,8 @@ options:
required: false required: false
tags: tags:
description: description:
- The tags you want attached to the VPC. This is independent of the name value, note if you pass a 'Name' key it would override the Name of the VPC if it's different. - The tags you want attached to the VPC. This is independent of the name value, note if you pass a 'Name' key it would override the Name of
the VPC if it's different.
default: None default: None
required: false required: false
aliases: [ 'resource_tags' ] aliases: [ 'resource_tags' ]
@ -73,7 +74,8 @@ options:
choices: [ 'present', 'absent' ] choices: [ 'present', 'absent' ]
multi_ok: multi_ok:
description: description:
- By default the module will not create another VPC if there is another VPC with the same name and CIDR block. Specify this as true if you want duplicate VPCs created. - By default the module will not create another VPC if there is another VPC with the same name and CIDR block. Specify this as true if you want
duplicate VPCs created.
default: false default: false
required: false required: false

View file

@ -29,7 +29,8 @@ author: "Rob White (@wimnat)"
options: options:
filters: filters:
description: description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html) for possible filters. - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html) for possible filters.
required: false required: false
default: null default: null

View file

@ -29,7 +29,8 @@ author: "Rob White (@wimnat)"
options: options:
filters: filters:
description: description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRouteTables.html) for possible filters. - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRouteTables.html) for possible filters.
required: false required: false
default: null default: null
extends_documentation_fragment: extends_documentation_fragment:

View file

@ -29,7 +29,8 @@ author: "Rob White (@wimnat)"
options: options:
filters: filters:
description: description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html) for possible filters. - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html) for possible filters.
required: false required: false
default: null default: null
extends_documentation_fragment: extends_documentation_fragment:

View file

@ -29,7 +29,8 @@ requirements: [ boto3 ]
options: options:
filters: filters:
description: description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRouteTables.html) for possible filters. - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRouteTables.html) for possible filters.
required: false required: false
default: None default: None
vpn_gateway_ids: vpn_gateway_ids:

View file

@ -23,7 +23,8 @@ DOCUMENTATION = '''
module: ec2_win_password module: ec2_win_password
short_description: gets the default administrator password for ec2 windows instances short_description: gets the default administrator password for ec2 windows instances
description: description:
- Gets the default administrator password from any EC2 Windows instance. The instance is referenced by its id (e.g. i-XXXXXXX). This module has a dependency on python-boto. - Gets the default administrator password from any EC2 Windows instance. The instance is referenced by its id (e.g. i-XXXXXXX). This module
has a dependency on python-boto.
version_added: "2.0" version_added: "2.0"
author: "Rick Mendes (@rickmendes)" author: "Rick Mendes (@rickmendes)"
options: options:
@ -38,7 +39,8 @@ options:
key_passphrase: key_passphrase:
version_added: "2.0" version_added: "2.0"
description: description:
- The passphrase for the instance key pair. The key must use DES or 3DES encryption for this module to decrypt it. You can use openssl to convert your password protected keys if they do not use DES or 3DES. ex) openssl rsa -in current_key -out new_key -des3. - The passphrase for the instance key pair. The key must use DES or 3DES encryption for this module to decrypt it. You can use openssl to
convert your password protected keys if they do not use DES or 3DES. ex) openssl rsa -in current_key -out new_key -des3.
required: false required: false
default: null default: null
wait: wait:

View file

@ -69,7 +69,8 @@ options:
required: false required: false
role: role:
description: description:
- The name or full Amazon Resource Name (ARN) of the IAM role that allows your Amazon ECS container agent to make calls to your load balancer on your behalf. This parameter is only required if you are using a load balancer with your service. - The name or full Amazon Resource Name (ARN) of the IAM role that allows your Amazon ECS container agent to make calls to your load balancer
on your behalf. This parameter is only required if you are using a load balancer with your service.
required: false required: false
delay: delay:
description: description:
@ -164,7 +165,9 @@ service:
returned: always returned: always
type: int type: int
serviceArn: serviceArn:
description: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example, arn:aws:ecs:region :012345678910 :service/my-service . description: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region
of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example,
arn:aws:ecs:region :012345678910 :service/my-service .
returned: always returned: always
type: string type: string
serviceName: serviceName:

View file

@ -130,7 +130,8 @@ services:
description: lost of service events description: lost of service events
returned: always returned: always
type: list of complex type: list of complex
''' ''' # NOQA
try: try:
import boto import boto
import botocore import botocore
@ -167,7 +168,8 @@ class EcsServiceManager:
# return self.client.list_clusters() # return self.client.list_clusters()
# {'failures': [], # {'failures': [],
# 'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': 'ce7b5880-1c41-11e5-8a31-47a93a8a98eb'}, # 'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': 'ce7b5880-1c41-11e5-8a31-47a93a8a98eb'},
# 'clusters': [{'activeServicesCount': 0, 'clusterArn': 'arn:aws:ecs:us-west-2:777110527155:cluster/default', 'status': 'ACTIVE', 'pendingTasksCount': 0, 'runningTasksCount': 0, 'registeredContainerInstancesCount': 0, 'clusterName': 'default'}]} # 'clusters': [{'activeServicesCount': 0, 'clusterArn': 'arn:aws:ecs:us-west-2:777110527155:cluster/default',
# 'status': 'ACTIVE', 'pendingTasksCount': 0, 'runningTasksCount': 0, 'registeredContainerInstancesCount': 0, 'clusterName': 'default'}]}
# {'failures': [{'arn': 'arn:aws:ecs:us-west-2:777110527155:cluster/bogus', 'reason': 'MISSING'}], # {'failures': [{'arn': 'arn:aws:ecs:us-west-2:777110527155:cluster/bogus', 'reason': 'MISSING'}],
# 'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': '0f66c219-1c42-11e5-8a31-47a93a8a98eb'}, # 'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': '0f66c219-1c42-11e5-8a31-47a93a8a98eb'},
# 'clusters': []} # 'clusters': []}

View file

@ -59,7 +59,8 @@ options:
version_added: 2.3 version_added: 2.3
task_role_arn: task_role_arn:
description: description:
- The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers in this task are granted the permissions that are specified in this role. - The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers in this task are granted
the permissions that are specified in this role.
required: false required: false
version_added: 2.3 version_added: 2.3
volumes: volumes:
@ -88,7 +89,10 @@ EXAMPLES = '''
hostPort: 80 hostPort: 80
- name: busybox - name: busybox
command: command:
- /bin/sh -c "while true; do echo '<html><head><title>Amazon ECS Sample App</title></head><body><div><h1>Amazon ECS Sample App</h1><h2>Congratulations!</h2><p>Your application is now running on a container in Amazon ECS.</p>' > top; /bin/date > date ; echo '</div></body></html>' > bottom; cat top date bottom > /usr/local/apache2/htdocs/index.html ; sleep 1; done" - >
/bin/sh -c "while true; do echo '<html><head><title>Amazon ECS Sample App</title></head><body><div><h1>Amazon ECS Sample App</h1><h2>Congratulations!
</h2><p>Your application is now running on a container in Amazon ECS.</p>' > top; /bin/date > date ; echo '</div></body></html>' > bottom;
cat top date bottom > /usr/local/apache2/htdocs/index.html ; sleep 1; done"
cpu: 10 cpu: 10
entryPoint: entryPoint:
- sh - sh
@ -199,7 +203,12 @@ class EcsTaskManager:
pass pass
# Return the full descriptions of the task definitions, sorted ascending by revision # Return the full descriptions of the task definitions, sorted ascending by revision
return list(sorted([self.ecs.describe_task_definition(taskDefinition=arn)['taskDefinition'] for arn in data['taskDefinitionArns']], key=lambda td: td['revision'])) return list(
sorted(
[self.ecs.describe_task_definition(taskDefinition=arn)['taskDefinition'] for arn in data['taskDefinitionArns']],
key=lambda td: td['revision']
)
)
def deregister_task(self, taskArn): def deregister_task(self, taskArn):
response = self.ecs.deregister_task_definition(taskDefinition=taskArn) response = self.ecs.deregister_task_definition(taskDefinition=taskArn)
@ -256,7 +265,8 @@ def main():
if not existing_definitions_in_family and revision != 1: if not existing_definitions_in_family and revision != 1:
module.fail_json(msg="You have specified a revision of %d but a created revision would be 1" % revision) module.fail_json(msg="You have specified a revision of %d but a created revision would be 1" % revision)
elif existing_definitions_in_family and existing_definitions_in_family[-1]['revision'] + 1 != revision: elif existing_definitions_in_family and existing_definitions_in_family[-1]['revision'] + 1 != revision:
module.fail_json(msg="You have specified a revision of %d but a created revision would be %d" % (revision, existing_definitions_in_family[-1]['revision'] + 1)) module.fail_json(msg="You have specified a revision of %d but a created revision would be %d" %
(revision, existing_definitions_in_family[-1]['revision'] + 1))
else: else:
existing = None existing = None

View file

@ -31,7 +31,8 @@ author: "Jim Dalton (@jsdalton)"
options: options:
state: state:
description: description:
- C(absent) or C(present) are idempotent actions that will create or destroy a cache cluster as needed. C(rebooted) will reboot the cluster, resulting in a momentary outage. - C(absent) or C(present) are idempotent actions that will create or destroy a cache cluster as needed. C(rebooted) will reboot the cluster,
resulting in a momentary outage.
choices: ['present', 'absent', 'rebooted'] choices: ['present', 'absent', 'rebooted']
required: true required: true
name: name:
@ -65,7 +66,8 @@ options:
default: None default: None
cache_parameter_group: cache_parameter_group:
description: description:
- The name of the cache parameter group to associate with this cache cluster. If this argument is omitted, the default cache parameter group for the specified engine will be used. - The name of the cache parameter group to associate with this cache cluster. If this argument is omitted, the default cache parameter group
for the specified engine will be used.
required: false required: false
default: None default: None
version_added: "2.0" version_added: "2.0"

View file

@ -70,7 +70,8 @@ options:
- The path to the private key of the certificate in PEM encoded format. - The path to the private key of the certificate in PEM encoded format.
dup_ok: dup_ok:
description: description:
- By default the module will not upload a certificate that is already uploaded into AWS. If set to True, it will upload the certificate as long as the name is unique. - By default the module will not upload a certificate that is already uploaded into AWS. If set to True, it will upload the certificate as
long as the name is unique.
required: false required: false
default: False default: False
aliases: [] aliases: []

View file

@ -46,7 +46,8 @@ options:
required: false required: false
policy_json: policy_json:
description: description:
- A properly json formatted policy as string (mutually exclusive with C(policy_document), see https://github.com/ansible/ansible/issues/7005#issuecomment-42894813 on how to use it properly) - A properly json formatted policy as string (mutually exclusive with C(policy_document),
see https://github.com/ansible/ansible/issues/7005#issuecomment-42894813 on how to use it properly)
required: false required: false
state: state:
description: description:
@ -56,7 +57,8 @@ options:
choices: [ "present", "absent"] choices: [ "present", "absent"]
skip_duplicates: skip_duplicates:
description: description:
- By default the module looks for any policies that match the document you pass in, if there is a match it will not make a new policy object with the same rules. You can override this by specifying false which would allow for two policy objects with different names but same rules. - By default the module looks for any policies that match the document you pass in, if there is a match it will not make a new policy object with
the same rules. You can override this by specifying false which would allow for two policy objects with different names but same rules.
required: false required: false
default: "/" default: "/"

View file

@ -43,7 +43,8 @@ options:
required: false required: false
managed_policy: managed_policy:
description: description:
- A list of managed policy ARNs (can't use friendly names due to AWS API limitation) to attach to the role. To embed an inline policy, use M(iam_policy). To remove existing policies, use an empty list item. - A list of managed policy ARNs (can't use friendly names due to AWS API limitation) to attach to the role. To embed an inline policy,
use M(iam_policy). To remove existing policies, use an empty list item.
required: true required: true
state: state:
description: description:

View file

@ -41,11 +41,13 @@ options:
choices: [ 'present', 'absent' ] choices: [ 'present', 'absent' ]
runtime: runtime:
description: description:
- The runtime environment for the Lambda function you are uploading. Required when creating a function. Use parameters as described in boto3 docs. Current example runtime environments are nodejs, nodejs4.3, java8 or python2.7 - The runtime environment for the Lambda function you are uploading. Required when creating a function. Use parameters as described in boto3 docs.
Current example runtime environments are nodejs, nodejs4.3, java8 or python2.7
required: true required: true
role: role:
description: description:
- The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it executes your function to access any other Amazon Web Services (AWS) resources. You may use the bare ARN if the role belongs to the same AWS account. - The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it executes your function to access any other Amazon Web Services (AWS)
resources. You may use the bare ARN if the role belongs to the same AWS account.
default: null default: null
handler: handler:
description: description:
@ -89,7 +91,8 @@ options:
default: 128 default: 128
vpc_subnet_ids: vpc_subnet_ids:
description: description:
- List of subnet IDs to run Lambda function in. Use this option if you need to access resources in your VPC. Leave empty if you don't want to run the function in a VPC. - List of subnet IDs to run Lambda function in. Use this option if you need to access resources in your VPC. Leave empty if you don't want to run
the function in a VPC.
required: false required: false
default: None default: None
vpc_security_group_ids: vpc_security_group_ids:

View file

@ -25,7 +25,9 @@ module: rds
version_added: "1.3" version_added: "1.3"
short_description: create, delete, or modify an Amazon rds instance short_description: create, delete, or modify an Amazon rds instance
description: description:
- Creates, deletes, or modifies rds instances. When creating an instance it can be either a new instance or a read-only replica of an existing instance. This module has a dependency on python-boto >= 2.5. The 'promote' command requires boto >= 2.18.0. Certain features such as tags rely on boto.rds2 (boto >= 2.26.0) - Creates, deletes, or modifies rds instances. When creating an instance it can be either a new instance or a read-only replica of an existing
instance. This module has a dependency on python-boto >= 2.5. The 'promote' command requires boto >= 2.18.0. Certain features such as tags rely
on boto.rds2 (boto >= 2.26.0)
options: options:
command: command:
description: description:
@ -48,7 +50,7 @@ options:
- mariadb was added in version 2.2 - mariadb was added in version 2.2
required: false required: false
default: null default: null
choices: [ 'mariadb', 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres', 'aurora'] choices: ['mariadb', 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres', 'aurora']
size: size:
description: description:
- Size in gigabytes of the initial storage for the DB instance. Used only when command=create or command=modify. - Size in gigabytes of the initial storage for the DB instance. Used only when command=create or command=modify.
@ -56,7 +58,8 @@ options:
default: null default: null
instance_type: instance_type:
description: description:
- The instance type of the database. Must be specified when command=create. Optional when command=replicate, command=modify or command=restore. If not specified then the replica inherits the same instance type as the source instance. - The instance type of the database. Must be specified when command=create. Optional when command=replicate, command=modify or command=restore.
If not specified then the replica inherits the same instance type as the source instance.
required: false required: false
default: null default: null
username: username:
@ -81,12 +84,13 @@ options:
default: null default: null
engine_version: engine_version:
description: description:
- Version number of the database engine to use. Used only when command=create. If not specified then the current Amazon RDS default engine version is used. - Version number of the database engine to use. Used only when command=create. If not specified then the current Amazon RDS default engine version is used
required: false required: false
default: null default: null
parameter_group: parameter_group:
description: description:
- Name of the DB parameter group to associate with this instance. If omitted then the RDS default DBParameterGroup will be used. Used only when command=create or command=modify. - Name of the DB parameter group to associate with this instance. If omitted then the RDS default DBParameterGroup will be used. Used only
when command=create or command=modify.
required: false required: false
default: null default: null
license_model: license_model:
@ -97,7 +101,8 @@ options:
choices: [ 'license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license' ] choices: [ 'license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license' ]
multi_zone: multi_zone:
description: description:
- Specifies if this is a Multi-availability-zone deployment. Can not be used in conjunction with zone parameter. Used only when command=create or command=modify. - Specifies if this is a Multi-availability-zone deployment. Can not be used in conjunction with zone parameter. Used only when command=create or
command=modify.
choices: [ "yes", "no" ] choices: [ "yes", "no" ]
required: false required: false
default: null default: null
@ -136,7 +141,9 @@ options:
default: null default: null
maint_window: maint_window:
description: description:
- "Maintenance window in format of ddd:hh24:mi-ddd:hh24:mi. (Example: Mon:22:00-Mon:23:15) If not specified then a random maintenance window is assigned. Used only when command=create or command=modify." - >
Maintenance window in format of ddd:hh24:mi-ddd:hh24:mi. (Example: Mon:22:00-Mon:23:15) If not specified then a random maintenance window is
assigned. Used only when command=create or command=modify.
required: false required: false
default: null default: null
backup_window: backup_window:
@ -146,7 +153,9 @@ options:
default: null default: null
backup_retention: backup_retention:
description: description:
- "Number of days backups are retained. Set to 0 to disable backups. Default is 1 day. Valid range: 0-35. Used only when command=create or command=modify." - >
Number of days backups are retained. Set to 0 to disable backups. Default is 1 day. Valid range: 0-35. Used only when command=create or
command=modify.
required: false required: false
default: null default: null
zone: zone:
@ -162,7 +171,8 @@ options:
default: null default: null
snapshot: snapshot:
description: description:
- Name of snapshot to take. When command=delete, if no snapshot name is provided then no snapshot is taken. If used with command=delete with no instance_name, the snapshot is deleted. Used with command=facts, command=delete or command=snapshot. - Name of snapshot to take. When command=delete, if no snapshot name is provided then no snapshot is taken. If used with command=delete with
no instance_name, the snapshot is deleted. Used with command=facts, command=delete or command=snapshot.
required: false required: false
default: null default: null
aws_secret_key: aws_secret_key:
@ -178,7 +188,8 @@ options:
aliases: [ 'ec2_access_key', 'access_key' ] aliases: [ 'ec2_access_key', 'access_key' ]
wait: wait:
description: description:
- When command=create, replicate, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for the database to be terminated. - When command=create, replicate, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for
the database to be terminated.
required: false required: false
default: "no" default: "no"
choices: [ "yes", "no" ] choices: [ "yes", "no" ]
@ -188,7 +199,8 @@ options:
default: 300 default: 300
apply_immediately: apply_immediately:
description: description:
- Used only when command=modify. If enabled, the modifications will be applied as soon as possible rather than waiting for the next preferred maintenance window. - Used only when command=modify. If enabled, the modifications will be applied as soon as possible rather than waiting for the next
preferred maintenance window.
default: no default: no
choices: [ "yes", "no" ] choices: [ "yes", "no" ]
force_failover: force_failover:
@ -445,7 +457,9 @@ class RDS2Connection:
def get_db_instance(self, instancename): def get_db_instance(self, instancename):
try: try:
dbinstances = self.connection.describe_db_instances(db_instance_identifier=instancename)['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances'] dbinstances = self.connection.describe_db_instances(
db_instance_identifier=instancename
)['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances']
result = RDS2DBInstance(dbinstances[0]) result = RDS2DBInstance(dbinstances[0])
return result return result
except boto.rds2.exceptions.DBInstanceNotFound as e: except boto.rds2.exceptions.DBInstanceNotFound as e:
@ -455,7 +469,10 @@ class RDS2Connection:
def get_db_snapshot(self, snapshotid): def get_db_snapshot(self, snapshotid):
try: try:
snapshots = self.connection.describe_db_snapshots(db_snapshot_identifier=snapshotid, snapshot_type='manual')['DescribeDBSnapshotsResponse']['DescribeDBSnapshotsResult']['DBSnapshots'] snapshots = self.connection.describe_db_snapshots(
db_snapshot_identifier=snapshotid,
snapshot_type='manual'
)['DescribeDBSnapshotsResponse']['DescribeDBSnapshotsResult']['DBSnapshots']
result = RDS2Snapshot(snapshots[0]) result = RDS2Snapshot(snapshots[0])
return result return result
except boto.rds2.exceptions.DBSnapshotNotFound as e: except boto.rds2.exceptions.DBSnapshotNotFound as e:
@ -472,7 +489,11 @@ class RDS2Connection:
def create_db_instance_read_replica(self, instance_name, source_instance, **params): def create_db_instance_read_replica(self, instance_name, source_instance, **params):
try: try:
result = self.connection.create_db_instance_read_replica(instance_name, source_instance, **params)['CreateDBInstanceReadReplicaResponse']['CreateDBInstanceReadReplicaResult']['DBInstance'] result = self.connection.create_db_instance_read_replica(
instance_name,
source_instance,
**params
)['CreateDBInstanceReadReplicaResponse']['CreateDBInstanceReadReplicaResult']['DBInstance']
return RDS2DBInstance(result) return RDS2DBInstance(result)
except boto.exception.BotoServerError as e: except boto.exception.BotoServerError as e:
raise RDSException(e) raise RDSException(e)
@ -507,7 +528,11 @@ class RDS2Connection:
def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params): def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params):
try: try:
result = self.connection.restore_db_instance_from_db_snapshot(instance_name, snapshot, **params)['RestoreDBInstanceFromDBSnapshotResponse']['RestoreDBInstanceFromDBSnapshotResult']['DBInstance'] result = self.connection.restore_db_instance_from_db_snapshot(
instance_name,
snapshot,
**params
)['RestoreDBInstanceFromDBSnapshotResponse']['RestoreDBInstanceFromDBSnapshotResult']['DBInstance']
return RDS2DBInstance(result) return RDS2DBInstance(result)
except boto.exception.BotoServerError as e: except boto.exception.BotoServerError as e:
raise RDSException(e) raise RDSException(e)
@ -1046,7 +1071,8 @@ def main():
command = dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'reboot', 'restore'], required=True), command = dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'reboot', 'restore'], required=True),
instance_name = dict(required=False), instance_name = dict(required=False),
source_instance = dict(required=False), source_instance = dict(required=False),
db_engine = dict(choices=['mariadb', 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres', 'aurora'], required=False), db_engine = dict(choices=['mariadb', 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex',
'sqlserver-web', 'postgres', 'aurora'], required=False),
size = dict(required=False), size = dict(required=False),
instance_type = dict(aliases=['type'], required=False), instance_type = dict(aliases=['type'], required=False),
username = dict(required=False), username = dict(required=False),

View file

@ -52,7 +52,35 @@ options:
required: false required: false
default: null default: null
aliases: [] aliases: []
choices: [ 'aurora5.6', 'mariadb10.0', 'mariadb10.1', 'mysql5.1', 'mysql5.5', 'mysql5.6', 'mysql5.7', 'oracle-ee-11.2', 'oracle-ee-12.1', 'oracle-se-11.2', 'oracle-se-12.1', 'oracle-se1-11.2', 'oracle-se1-12.1', 'postgres9.3', 'postgres9.4', 'postgres9.5', 'postgres9.6', sqlserver-ee-10.5', 'sqlserver-ee-11.0', 'sqlserver-ex-10.5', 'sqlserver-ex-11.0', 'sqlserver-ex-12.0', 'sqlserver-se-10.5', 'sqlserver-se-11.0', 'sqlserver-se-12.0', 'sqlserver-web-10.5', 'sqlserver-web-11.0', 'sqlserver-web-12.0' ] choices:
- 'aurora5.6'
- 'mariadb10.0'
- 'mariadb10.1'
- 'mysql5.1'
- 'mysql5.5'
- 'mysql5.6'
- 'mysql5.7'
- 'oracle-ee-11.2'
- 'oracle-ee-12.1'
- 'oracle-se-11.2'
- 'oracle-se-12.1'
- 'oracle-se1-11.2'
- 'oracle-se1-12.1'
- 'postgres9.3'
- 'postgres9.4'
- 'postgres9.5'
- 'postgres9.6'
- 'sqlserver-ee-10.5'
- 'sqlserver-ee-11.0'
- 'sqlserver-ex-10.5'
- 'sqlserver-ex-11.0'
- 'sqlserver-ex-12.0'
- 'sqlserver-se-10.5'
- 'sqlserver-se-11.0'
- 'sqlserver-se-12.0'
- 'sqlserver-web-10.5'
- 'sqlserver-web-11.0'
- 'sqlserver-web-12.0'
immediate: immediate:
description: description:
- Whether to apply the changes immediately, or after the next reboot of any associated instances. - Whether to apply the changes immediately, or after the next reboot of any associated instances.
@ -61,7 +89,8 @@ options:
aliases: [] aliases: []
params: params:
description: description:
- Map of parameter names and values. Numeric values may be represented as K for kilo (1024), M for mega (1024^2), G for giga (1024^3), or T for tera (1024^4), and these values will be expanded into the appropriate number before being set in the parameter group. - Map of parameter names and values. Numeric values may be represented as K for kilo (1024), M for mega (1024^2), G for giga (1024^3),
or T for tera (1024^4), and these values will be expanded into the appropriate number before being set in the parameter group.
required: false required: false
default: null default: null
aliases: [] aliases: []

View file

@ -144,7 +144,9 @@ def main():
# Sort the subnet groups before we compare them # Sort the subnet groups before we compare them
matching_groups[0].subnet_ids.sort() matching_groups[0].subnet_ids.sort()
group_subnets.sort() group_subnets.sort()
if ( (matching_groups[0].name != group_name) or (matching_groups[0].description != group_description) or (matching_groups[0].subnet_ids != group_subnets) ): if (matching_groups[0].name != group_name or
matching_groups[0].description != group_description or
matching_groups[0].subnet_ids != group_subnets):
changed_group = conn.modify_db_subnet_group(group_name, description=group_description, subnet_ids=group_subnets) changed_group = conn.modify_db_subnet_group(group_name, description=group_description, subnet_ids=group_subnets)
changed = True changed = True
except BotoServerError as e: except BotoServerError as e:

View file

@ -129,7 +129,8 @@ options:
default: null default: null
wait: wait:
description: description:
- When command=create, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for the database to be terminated. - When command=create, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for the database to be
terminated.
default: "no" default: "no"
choices: [ "yes", "no" ] choices: [ "yes", "no" ]
wait_timeout: wait_timeout:
@ -413,7 +414,8 @@ def main():
argument_spec.update(dict( argument_spec.update(dict(
command = dict(choices=['create', 'facts', 'delete', 'modify'], required=True), command = dict(choices=['create', 'facts', 'delete', 'modify'], required=True),
identifier = dict(required=True), identifier = dict(required=True),
node_type = dict(choices=['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large', 'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', 'dw2.large', 'dw2.8xlarge'], required=False), node_type = dict(choices=['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large', 'dc1.8xlarge',
'dw1.xlarge', 'dw1.8xlarge', 'dw2.large', 'dw2.8xlarge'], required=False),
username = dict(required=False), username = dict(required=False),
password = dict(no_log=True, required=False), password = dict(no_log=True, required=False),
db_name = dict(require=False), db_name = dict(require=False),

View file

@ -77,7 +77,8 @@ options:
default: false default: false
value: value:
description: description:
- The new value when creating a DNS record. Multiple comma-spaced values are allowed for non-alias records. When deleting a record all values for the record must be specified or Route53 will not delete it. - The new value when creating a DNS record. Multiple comma-spaced values are allowed for non-alias records. When deleting a record all values
for the record must be specified or Route53 will not delete it.
required: false required: false
default: null default: null
overwrite: overwrite:
@ -87,12 +88,14 @@ options:
default: null default: null
retry_interval: retry_interval:
description: description:
- In the case that route53 is still servicing a prior request, this module will wait and try again after this many seconds. If you have many domain names, the default of 500 seconds may be too long. - In the case that route53 is still servicing a prior request, this module will wait and try again after this many seconds. If you have many
domain names, the default of 500 seconds may be too long.
required: false required: false
default: 500 default: 500
private_zone: private_zone:
description: description:
- If set to true, the private zone matching the requested name within the domain will be used if there are both public and private zones. The default is to use the public zone. - If set to true, the private zone matching the requested name within the domain will be used if there are both public and private zones.
The default is to use the public zone.
required: false required: false
default: false default: false
version_added: "1.9" version_added: "1.9"

View file

@ -24,7 +24,8 @@ DOCUMENTATION = '''
module: s3 module: s3
short_description: manage objects in S3. short_description: manage objects in S3.
description: description:
- This module allows the user to manage S3 buckets and the objects within them. Includes support for creating and deleting both objects and buckets, retrieving objects as files or strings and generating download links. This module has a dependency on python-boto. - This module allows the user to manage S3 buckets and the objects within them. Includes support for creating and deleting both objects and buckets,
retrieving objects as files or strings and generating download links. This module has a dependency on python-boto.
version_added: "1.1" version_added: "1.1"
options: options:
aws_access_key: aws_access_key:
@ -89,7 +90,8 @@ options:
version_added: "1.6" version_added: "1.6"
mode: mode:
description: description:
- Switches the module behaviour between put (upload), get (download), geturl (return download url, Ansible 1.3+), getstr (download object as string (1.3+)), list (list keys, Ansible 2.0+), create (bucket), delete (bucket), and delobj (delete object, Ansible 2.0+). - Switches the module behaviour between put (upload), get (download), geturl (return download url, Ansible 1.3+),
getstr (download object as string (1.3+)), list (list keys, Ansible 2.0+), create (bucket), delete (bucket), and delobj (delete object, Ansible 2.0+).
required: true required: true
choices: ['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'] choices: ['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list']
object: object:
@ -99,7 +101,9 @@ options:
default: null default: null
permission: permission:
description: description:
- This option lets the user set the canned permissions on the object/bucket that are created. The permissions that can be set are 'private', 'public-read', 'public-read-write', 'authenticated-read'. Multiple permissions can be specified as a list. - This option lets the user set the canned permissions on the object/bucket that are created.
The permissions that can be set are 'private', 'public-read', 'public-read-write', 'authenticated-read'. Multiple permissions can be
specified as a list.
required: false required: false
default: private default: private
version_added: "2.0" version_added: "2.0"
@ -118,13 +122,17 @@ options:
version_added: "2.0" version_added: "2.0"
overwrite: overwrite:
description: description:
- Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations. Boolean or one of [always, never, different], true is equal to 'always' and false is equal to 'never', new in 2.0 - Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations.
Boolean or one of [always, never, different], true is equal to 'always' and false is equal to 'never', new in 2.0
required: false required: false
default: 'always' default: 'always'
version_added: "1.2" version_added: "1.2"
region: region:
description: description:
- "AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the S3 Location: US Standard. Prior to ansible 1.8 this parameter could be specified but had no effect." - >
AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked,
followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the
S3 Location: US Standard. Prior to ansible 1.8 this parameter could be specified but had no effect.
required: false required: false
default: null default: null
version_added: "1.8" version_added: "1.8"
@ -153,7 +161,9 @@ options:
version_added: "1.3" version_added: "1.3"
ignore_nonexistent_bucket: ignore_nonexistent_bucket:
description: description:
- "Overrides initial bucket lookups in case bucket or iam policies are restrictive. Example: a user may have the GetObject permission but no other permissions. In this case using the option mode: get will fail without specifying ignore_nonexistent_bucket: True." - >
Overrides initial bucket lookups in case bucket or iam policies are restrictive. Example: a user may have the GetObject permission but no other
permissions. In this case using the option mode: get will fail without specifying ignore_nonexistent_bucket: True.
default: false default: false
aliases: [] aliases: []
version_added: "2.3" version_added: "2.3"

View file

@ -38,7 +38,9 @@ options:
required: true required: true
expiration_date: expiration_date:
description: description:
- "Indicates the lifetime of the objects that are subject to the rule by the date they will expire. The value must be ISO-8601 format, the time must be midnight and a GMT timezone must be specified." - >
Indicates the lifetime of the objects that are subject to the rule by the date they will expire. The value must be ISO-8601 format, the time must
be midnight and a GMT timezone must be specified.
required: false required: false
default: null default: null
expiration_days: expiration_days:
@ -77,7 +79,10 @@ options:
choices: [ 'glacier', 'standard_ia'] choices: [ 'glacier', 'standard_ia']
transition_date: transition_date:
description: description:
- "Indicates the lifetime of the objects that are subject to the rule by the date they will transition to a different storage class. The value must be ISO-8601 format, the time must be midnight and a GMT timezone must be specified. If transition_days is not specified, this parameter is required." - >
Indicates the lifetime of the objects that are subject to the rule by the date they will transition to a different storage class.
The value must be ISO-8601 format, the time must be midnight and a GMT timezone must be specified. If transition_days is not specified,
this parameter is required."
required: false required: false
default: null default: null
transition_days: transition_days:
@ -110,7 +115,8 @@ EXAMPLES = '''
status: enabled status: enabled
state: present state: present
# Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier on 31 Dec 2020 and then delete on 31 Dec 2030. Note that midnight GMT must be specified. # Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier on 31 Dec 2020 and then delete on 31 Dec 2030.
# Note that midnight GMT must be specified.
# Be sure to quote your date strings # Be sure to quote your date strings
- s3_lifecycle: - s3_lifecycle:
name: mybucket name: mybucket
@ -295,7 +301,9 @@ def compare_rule(rule_a, rule_b):
if rule2_expiration is None: if rule2_expiration is None:
rule2_expiration = Expiration() rule2_expiration = Expiration()
if (rule1.__dict__ == rule2.__dict__) and (rule1_expiration.__dict__ == rule2_expiration.__dict__) and (rule1_transition.__dict__ == rule2_transition.__dict__): if (rule1.__dict__ == rule2.__dict__ and
rule1_expiration.__dict__ == rule2_expiration.__dict__ and
rule1_transition.__dict__ == rule2_transition.__dict__):
return True return True
else: else:
return False return False

View file

@ -24,7 +24,8 @@ DOCUMENTATION = '''
module: s3_sync module: s3_sync
short_description: Efficiently upload multiple files to S3 short_description: Efficiently upload multiple files to S3
description: description:
- The S3 module is great, but it is very slow for a large volume of files- even a dozen will be noticeable. In addition to speed, it handles globbing, inclusions/exclusions, mime types, expiration mapping, recursion, and smart directory mapping. - The S3 module is great, but it is very slow for a large volume of files- even a dozen will be noticeable. In addition to speed, it handles globbing,
inclusions/exclusions, mime types, expiration mapping, recursion, and smart directory mapping.
version_added: "2.3" version_added: "2.3"
options: options:
mode: mode:
@ -63,7 +64,9 @@ options:
choices: [ '', private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control ] choices: [ '', private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control ]
mime_map: mime_map:
description: description:
- 'Dict entry from extension to MIME type. This will override any default/sniffed MIME type. For example C({".txt": "application/text", ".yml": "appication/text"})' - >
Dict entry from extension to MIME type. This will override any default/sniffed MIME type.
For example C({".txt": "application/text", ".yml": "appication/text"})
required: false required: false
include: include:
description: description:
@ -362,7 +365,10 @@ def head_s3(s3, bucket, s3keys):
try: try:
retentry['s3_head'] = s3.head_object(Bucket=bucket, Key=entry['s3_path']) retentry['s3_head'] = s3.head_object(Bucket=bucket, Key=entry['s3_path'])
except botocore.exceptions.ClientError as err: except botocore.exceptions.ClientError as err:
if hasattr(err, 'response') and 'ResponseMetadata' in err.response and 'HTTPStatusCode' in err.response['ResponseMetadata'] and str(err.response['ResponseMetadata']['HTTPStatusCode']) == '404': if (hasattr(err, 'response') and
'ResponseMetadata' in err.response and
'HTTPStatusCode' in err.response['ResponseMetadata'] and
str(err.response['ResponseMetadata']['HTTPStatusCode']) == '404'):
pass pass
else: else:
raise Exception(err) raise Exception(err)
@ -444,7 +450,8 @@ def main():
bucket = dict(required=True), bucket = dict(required=True),
key_prefix = dict(required=False, default=''), key_prefix = dict(required=False, default=''),
file_root = dict(required=True, type='path'), file_root = dict(required=True, type='path'),
permission = dict(required=False, choices=['private', 'public-read', 'public-read-write', 'authenticated-read', 'aws-exec-read', 'bucket-owner-read', 'bucket-owner-full-control']), permission = dict(required=False, choices=['private', 'public-read', 'public-read-write', 'authenticated-read', 'aws-exec-read', 'bucket-owner-read',
'bucket-owner-full-control']),
retries = dict(required=False), retries = dict(required=False),
mime_map = dict(required=False, type='dict'), mime_map = dict(required=False, type='dict'),
exclude = dict(required=False, default=".*"), exclude = dict(required=False, default=".*"),

View file

@ -44,7 +44,10 @@ options:
default: null default: null
region: region:
description: description:
- "AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the S3 Location: US Standard." - >
AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked,
followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the
S3 Location: US Standard.
required: false required: false
default: null default: null
state: state:
@ -55,7 +58,10 @@ options:
choices: [ 'present', 'absent' ] choices: [ 'present', 'absent' ]
suffix: suffix:
description: description:
- "Suffix that is appended to a request that is for a directory on the website endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ the data that is returned will be for the object with the key name images/index.html). The suffix must not include a slash character." - >
Suffix that is appended to a request that is for a directory on the website endpoint (e.g. if the suffix is index.html and you make a request to
samplebucket/images/ the data that is returned will be for the object with the key name images/index.html). The suffix must not include a slash
character.
required: false required: false
default: index.html default: index.html
@ -115,7 +121,8 @@ routing_rules:
sample: ansible.com sample: ansible.com
condition: condition:
key_prefix_equals: key_prefix_equals:
description: object key name prefix when the redirect is applied. For example, to redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html description: object key name prefix when the redirect is applied. For example, to redirect requests for ExamplePage.html, the key prefix will be
ExamplePage.html
returned: when routing rule present returned: when routing rule present
type: string type: string
sample: docs/ sample: docs/

View file

@ -30,7 +30,8 @@ author: Boris Ekelchik (@bekelchik)
options: options:
role_arn: role_arn:
description: description:
- The Amazon Resource Name (ARN) of the role that the caller is assuming (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#Identifiers_ARNs) - The Amazon Resource Name (ARN) of the role that the caller is
assuming (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#Identifiers_ARNs)
required: true required: true
role_session_name: role_session_name:
description: description:
@ -43,7 +44,8 @@ options:
default: null default: null
duration_seconds: duration_seconds:
description: description:
- The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set to 3600 seconds. - The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 3600 seconds (1 hour).
By default, the value is set to 3600 seconds.
required: false required: false
default: null default: null
external_id: external_id:

View file

@ -30,7 +30,9 @@ author: Victor Costan (@pwnall)
options: options:
duration_seconds: duration_seconds:
description: description:
- The duration, in seconds, of the session token. See http://docs.aws.amazon.com/STS/latest/APIReference/API_GetSessionToken.html#API_GetSessionToken_RequestParameters for acceptable and default values. - The duration, in seconds, of the session token.
See http://docs.aws.amazon.com/STS/latest/APIReference/API_GetSessionToken.html#API_GetSessionToken_RequestParameters
for acceptable and default values.
required: false required: false
default: null default: null
mfa_serial_number: mfa_serial_number:

View file

@ -53,12 +53,14 @@ options:
required: true required: true
image: image:
description: description:
- system image for creating the virtual machine (e.g., b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-precise-12_04_3-LTS-amd64-server-20131205-en-us-30GB) - system image for creating the virtual machine
(e.g., b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-precise-12_04_3-LTS-amd64-server-20131205-en-us-30GB)
required: true required: true
default: null default: null
role_size: role_size:
description: description:
- azure role size for the new virtual machine (e.g., Small, ExtraLarge, A6). You have to pay attention to the fact that instances of type G and DS are not available in all regions (locations). Make sure if you selected the size and type of instance available in your chosen location. - azure role size for the new virtual machine (e.g., Small, ExtraLarge, A6). You have to pay attention to the fact that instances of
type G and DS are not available in all regions (locations). Make sure if you selected the size and type of instance available in your chosen location.
required: false required: false
default: Small default: Small
endpoints: endpoints:
@ -78,7 +80,8 @@ options:
default: null default: null
ssh_cert_path: ssh_cert_path:
description: description:
- path to an X509 certificate containing the public ssh key to install in the virtual machine. See http://www.windowsazure.com/en-us/manage/linux/tutorials/intro-to-linux/ for more details. - path to an X509 certificate containing the public ssh key to install in the virtual machine.
See http://www.windowsazure.com/en-us/manage/linux/tutorials/intro-to-linux/ for more details.
- if this option is specified, password-based ssh authentication will be disabled. - if this option is specified, password-based ssh authentication will be disabled.
required: false required: false
default: null default: null

View file

@ -233,7 +233,9 @@ EXAMPLES = '''
- "14.04.2-LTS" - "14.04.2-LTS"
- "15.04" - "15.04"
metadata: metadata:
description: "The Ubuntu version for the VM. This will pick a fully patched image of this given Ubuntu version. Allowed values: 12.04.5-LTS, 14.04.2-LTS, 15.04." description: >
The Ubuntu version for the VM. This will pick a fully patched image of this given Ubuntu version.
Allowed values: 12.04.5-LTS, 14.04.2-LTS, 15.04."
variables: variables:
location: "West US" location: "West US"
imagePublisher: "Canonical" imagePublisher: "Canonical"
@ -320,7 +322,9 @@ EXAMPLES = '''
osDisk: osDisk:
name: "osdisk" name: "osdisk"
vhd: vhd:
uri: "[concat('http://',parameters('newStorageAccountName'),'.blob.core.windows.net/',variables('vmStorageAccountContainerName'),'/',variables('OSDiskName'),'.vhd')]" uri: >
[concat('http://',parameters('newStorageAccountName'),'.blob.core.windows.net/',variables('vmStorageAccountContainerName'),'/',
variables('OSDiskName'),'.vhd')]
caching: "ReadWrite" caching: "ReadWrite"
createOption: "FromImage" createOption: "FromImage"
networkProfile: networkProfile:

View file

@ -120,7 +120,7 @@ azure_networkinterfaces:
"tags": {}, "tags": {},
"type": "Microsoft.Network/networkInterfaces" "type": "Microsoft.Network/networkInterfaces"
}] }]
''' ''' # NOQA
from ansible.module_utils.basic import * from ansible.module_utils.basic import *
from ansible.module_utils.azure_rm_common import * from ansible.module_utils.azure_rm_common import *

View file

@ -334,7 +334,7 @@ state:
}, },
"type": "Microsoft.Network/networkSecurityGroups" "type": "Microsoft.Network/networkSecurityGroups"
} }
''' ''' # NOQA
from ansible.module_utils.basic import * from ansible.module_utils.basic import *
from ansible.module_utils.azure_rm_common import * from ansible.module_utils.azure_rm_common import *

View file

@ -200,7 +200,7 @@ azure_securitygroups:
"type": "Microsoft.Network/networkSecurityGroups" "type": "Microsoft.Network/networkSecurityGroups"
}] }]
''' ''' # NOQA
from ansible.module_utils.basic import * from ansible.module_utils.basic import *

View file

@ -130,7 +130,7 @@ state:
description: Success or failure of the provisioning event. description: Success or failure of the provisioning event.
type: str type: str
example: "Succeeded" example: "Succeeded"
''' ''' # NOQA
from ansible.module_utils.basic import * from ansible.module_utils.basic import *

View file

@ -52,7 +52,8 @@ options:
description: description:
- Assert the state of the virtual machine. - Assert the state of the virtual machine.
- State 'present' will check that the machine exists with the requested configuration. If the configuration - State 'present' will check that the machine exists with the requested configuration. If the configuration
of the existing machine does not match, the machine will be updated. Use options started, allocated and restarted to change the machine's power state. of the existing machine does not match, the machine will be updated. Use options started, allocated and restarted to change the machine's power
state.
- State 'absent' will remove the virtual machine. - State 'absent' will remove the virtual machine.
default: present default: present
required: false required: false
@ -437,7 +438,7 @@ azure_vm:
}, },
"type": "Microsoft.Compute/virtualMachines" "type": "Microsoft.Compute/virtualMachines"
} }
''' ''' # NOQA
import random import random

View file

@ -31,12 +31,15 @@ description:
- Create, start, stop and delete servers on the cloudscale.ch IaaS service. - Create, start, stop and delete servers on the cloudscale.ch IaaS service.
- All operations are performed using the cloudscale.ch public API v1. - All operations are performed using the cloudscale.ch public API v1.
- "For details consult the full API documentation: U(https://www.cloudscale.ch/en/api/v1)." - "For details consult the full API documentation: U(https://www.cloudscale.ch/en/api/v1)."
- An valid API token is required for all operations. You can create as many tokens as you like using the cloudscale.ch control panel at U(https://control.cloudscale.ch). - An valid API token is required for all operations. You can create as many tokens as you like using the cloudscale.ch control panel at
U(https://control.cloudscale.ch).
notes: notes:
- Instead of the api_token parameter the CLOUDSCALE_API_TOKEN environment variable can be used. - Instead of the api_token parameter the CLOUDSCALE_API_TOKEN environment variable can be used.
- To create a new server at least the C(name), C(ssh_key), C(image) and C(flavor) options are required. - To create a new server at least the C(name), C(ssh_key), C(image) and C(flavor) options are required.
- If more than one server with the name given by the C(name) option exists, execution is aborted. - If more than one server with the name given by the C(name) option exists, execution is aborted.
- Once a server is created all parameters except C(state) are read-only. You can't change the name, flavor or any other property. This is a limitation of the cloudscale.ch API. The module will silently ignore differences between the configured parameters and the running server if a server with the correct name or UUID exists. Only state changes will be applied. - Once a server is created all parameters except C(state) are read-only. You can't change the name, flavor or any other property. This is a limitation
of the cloudscale.ch API. The module will silently ignore differences between the configured parameters and the running server if a server with the
correct name or UUID exists. Only state changes will be applied.
version_added: 2.3 version_added: 2.3
author: "Gaudenz Steinlin <gaudenz.steinlin@cloudscale.ch>" author: "Gaudenz Steinlin <gaudenz.steinlin@cloudscale.ch>"
options: options:

View file

@ -307,7 +307,8 @@ state:
type: string type: string
sample: Up sample: Up
suitable_for_migration: suitable_for_migration:
description: Whether this host is suitable (has enough capacity and satisfies all conditions like hosttags, max guests VM limit, etc) to migrate a VM to it or not. description: Whether this host is suitable (has enough capacity and satisfies all conditions like hosttags, max guests VM limit, etc) to migrate a VM
to it or not.
returned: success returned: success
type: string type: string
sample: true sample: true

View file

@ -150,7 +150,8 @@ options:
default: null default: null
root_disk_size: root_disk_size:
description: description:
- Root disk size in GByte required if deploying instance with KVM hypervisor and want resize the root disk size at startup (need CloudStack >= 4.4, cloud-initramfs-growroot installed and enabled in the template) - Root disk size in GByte required if deploying instance with KVM hypervisor and want resize the root disk size at startup
(need CloudStack >= 4.4, cloud-initramfs-growroot installed and enabled in the template)
required: false required: false
default: null default: null
security_groups: security_groups:
@ -984,7 +985,8 @@ def main():
memory = dict(default=None, type='int'), memory = dict(default=None, type='int'),
template = dict(default=None), template = dict(default=None),
iso = dict(default=None), iso = dict(default=None),
template_filter = dict(default="executable", aliases=['iso_filter'], choices=['featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community']), template_filter = dict(default="executable", aliases=['iso_filter'], choices=['featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable',
'community']),
networks = dict(type='list', aliases=[ 'network' ], default=None), networks = dict(type='list', aliases=[ 'network' ], default=None),
ip_to_networks = dict(type='list', aliases=['ip_to_network'], default=None), ip_to_networks = dict(type='list', aliases=['ip_to_network'], default=None),
ip_address = dict(defaul=None), ip_address = dict(defaul=None),

View file

@ -48,7 +48,8 @@ options:
default: null default: null
is_ready: is_ready:
description: description:
- This flag is used for searching existing ISOs. If set to C(true), it will only list ISO ready for deployment e.g. successfully downloaded and installed. Recommended to set it to C(false). - This flag is used for searching existing ISOs. If set to C(true), it will only list ISO ready for deployment e.g.
successfully downloaded and installed. Recommended to set it to C(false).
required: false required: false
default: false default: false
aliases: [] aliases: []

View file

@ -51,7 +51,8 @@ options:
- String, this is the name of the droplet - must be formatted by hostname rules, or the name of a SSH key. - String, this is the name of the droplet - must be formatted by hostname rules, or the name of a SSH key.
unique_name: unique_name:
description: description:
- Bool, require unique hostnames. By default, DigitalOcean allows multiple hosts with the same name. Setting this to "yes" allows only one host per name. Useful for idempotence. - Bool, require unique hostnames. By default, DigitalOcean allows multiple hosts with the same name. Setting this to "yes" allows only one host
per name. Useful for idempotence.
version_added: "1.4" version_added: "1.4"
default: "no" default: "no"
choices: [ "yes", "no" ] choices: [ "yes", "no" ]
@ -269,7 +270,8 @@ class Droplet(JsonfyMixIn):
cls.manager = DoManager(None, api_token, api_version=2) cls.manager = DoManager(None, api_token, api_version=2)
@classmethod @classmethod
def add(cls, name, size_id, image_id, region_id, ssh_key_ids=None, virtio=True, private_networking=False, backups_enabled=False, user_data=None, ipv6=False): def add(cls, name, size_id, image_id, region_id, ssh_key_ids=None, virtio=True, private_networking=False, backups_enabled=False, user_data=None,
ipv6=False):
private_networking_lower = str(private_networking).lower() private_networking_lower = str(private_networking).lower()
backups_enabled_lower = str(backups_enabled).lower() backups_enabled_lower = str(backups_enabled).lower()
ipv6_lower = str(ipv6).lower() ipv6_lower = str(ipv6).lower()
@ -463,7 +465,8 @@ def main():
), ),
) )
if not HAS_DOPY and not HAS_SIX: if not HAS_DOPY and not HAS_SIX:
module.fail_json(msg='dopy >= 0.3.2 is required for this module. dopy requires six but six is not installed. Make sure both dopy and six are installed.') module.fail_json(msg='dopy >= 0.3.2 is required for this module. dopy requires six but six is not installed. '
'Make sure both dopy and six are installed.')
if not HAS_DOPY: if not HAS_DOPY:
module.fail_json(msg='dopy >= 0.3.2 required for this module') module.fail_json(msg='dopy >= 0.3.2 required for this module')

View file

@ -25,7 +25,10 @@ module: gc_storage
version_added: "1.4" version_added: "1.4"
short_description: This module manages objects/buckets in Google Cloud Storage. short_description: This module manages objects/buckets in Google Cloud Storage.
description: description:
- This module allows users to manage their objects/buckets in Google Cloud Storage. It allows upload and download operations and can set some canned permissions. It also allows retrieval of URLs for objects for use in playbooks, and retrieval of string contents of objects. This module requires setting the default project in GCS prior to playbook usage. See U(https://developers.google.com/storage/docs/reference/v1/apiversion1) for information about setting the default project. - This module allows users to manage their objects/buckets in Google Cloud Storage. It allows upload and download operations and can set some
canned permissions. It also allows retrieval of URLs for objects for use in playbooks, and retrieval of string contents of objects. This module
requires setting the default project in GCS prior to playbook usage. See U(https://developers.google.com/storage/docs/reference/v1/apiversion1) for
information about setting the default project.
options: options:
bucket: bucket:
@ -54,7 +57,8 @@ options:
aliases: [ 'overwrite' ] aliases: [ 'overwrite' ]
permission: permission:
description: description:
- This option let's the user set the canned permissions on the object/bucket that are created. The permissions that can be set are 'private', 'public-read', 'authenticated-read'. - This option let's the user set the canned permissions on the object/bucket that are created. The permissions that can be set are 'private',
'public-read', 'authenticated-read'.
required: false required: false
default: private default: private
headers: headers:
@ -65,12 +69,14 @@ options:
default: '{}' default: '{}'
expiration: expiration:
description: description:
- Time limit (in seconds) for the URL generated and returned by GCA when performing a mode=put or mode=get_url operation. This url is only available when public-read is the acl for the object. - Time limit (in seconds) for the URL generated and returned by GCA when performing a mode=put or mode=get_url operation. This url is only
available when public-read is the acl for the object.
required: false required: false
default: null default: null
mode: mode:
description: description:
- Switches the module behaviour between upload, download, get_url (return download url) , get_str (download object as string), create (bucket) and delete (bucket). - Switches the module behaviour between upload, download, get_url (return download url) , get_str (download object as string), create (bucket) and
delete (bucket).
required: true required: true
default: null default: null
choices: [ 'get', 'put', 'get_url', 'get_str', 'delete', 'create' ] choices: [ 'get', 'put', 'get_url', 'get_str', 'delete', 'create' ]

View file

@ -36,7 +36,8 @@ options:
aliases: [] aliases: []
instance_pattern: instance_pattern:
description: description:
- The pattern of GCE instance names to match for adding/removing tags. Full-Python regex is supported. See U(https://docs.python.org/2/library/re.html) for details. - The pattern of GCE instance names to match for adding/removing tags. Full-Python regex is supported.
See U(https://docs.python.org/2/library/re.html) for details.
If instance_name is not specified, this field is required. If instance_name is not specified, this field is required.
required: false required: false
default: null default: null

View file

@ -44,7 +44,9 @@ options:
required: True required: True
subscription: subscription:
description: description:
- Dictionary containing a subscripton name associated with a topic (required), along with optional ack_deadline, push_endpoint and pull. For pulling from a subscription, message_ack (bool), max_messages (int) and return_immediate are available as subfields. See subfields name, push_endpoint and ack_deadline for more information. - Dictionary containing a subscripton name associated with a topic (required), along with optional ack_deadline, push_endpoint and pull.
For pulling from a subscription, message_ack (bool), max_messages (int) and return_immediate are available as subfields.
See subfields name, push_endpoint and ack_deadline for more information.
required: False required: False
name: name:
description: Subfield of subscription. Required if subscription is specified. See examples. description: Subfield of subscription. Required if subscription is specified. See examples.
@ -53,15 +55,25 @@ options:
description: Subfield of subscription. Not required. Default deadline for subscriptions to ACK the message before it is resent. See examples. description: Subfield of subscription. Not required. Default deadline for subscriptions to ACK the message before it is resent. See examples.
required: False required: False
pull: pull:
description: Subfield of subscription. Not required. If specified, messages will be retrieved from topic via the provided subscription name. max_messages (int; default None; max number of messages to pull), message_ack (bool; default False; acknowledge the message) and return_immediately (bool; default True, don't wait for messages to appear). If the messages are acknowledged, changed is set to True, otherwise, changed is False. description:
- Subfield of subscription. Not required. If specified, messages will be retrieved from topic via the provided subscription name.
max_messages (int; default None; max number of messages to pull), message_ack (bool; default False; acknowledge the message) and return_immediately
(bool; default True, don't wait for messages to appear). If the messages are acknowledged, changed is set to True, otherwise, changed is False.
push_endpoint: push_endpoint:
description: Subfield of subscription. Not required. If specified, message will be sent to an endpoint. See U(https://cloud.google.com/pubsub/docs/advanced#push_endpoints) for more information. description:
- Subfield of subscription. Not required. If specified, message will be sent to an endpoint.
See U(https://cloud.google.com/pubsub/docs/advanced#push_endpoints) for more information.
required: False required: False
publish: publish:
description: List of dictionaries describing messages and attributes to be published. Dictionary is in message(str):attributes(dict) format. Only message is required. description:
- List of dictionaries describing messages and attributes to be published. Dictionary is in message(str):attributes(dict) format.
Only message is required.
required: False required: False
state: state:
description: State of the topic or queue (absent, present). Applies to the most granular resource. Remove the most granular resource. If subcription is specified we remove it. If only topic is specified, that is what is removed. Note that a topic can be removed without first removing the subscription. description:
- State of the topic or queue (absent, present). Applies to the most granular resource. Remove the most granular resource. If subcription is
specified we remove it. If only topic is specified, that is what is removed. Note that a topic can be removed without first removing the
subscription.
required: False required: False
default: "present" default: "present"
''' '''
@ -144,7 +156,8 @@ gcpubsub:
RETURN = ''' RETURN = '''
publish: publish:
description: List of dictionaries describing messages and attributes to be published. Dictionary is in message(str):attributes(dict) format. Only message is required. description: List of dictionaries describing messages and attributes to be published. Dictionary is in message(str):attributes(dict) format.
Only message is required.
returned: Only when specified returned: Only when specified
type: list of dictionary type: list of dictionary
sample: "publish: ['message': 'my message', attributes: {'key1': 'value1'}]" sample: "publish: ['message': 'my message', attributes: {'key1': 'value1'}]"

View file

@ -292,7 +292,9 @@ def conn(url, user, password):
def create_vm(conn, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int): def create_vm(conn, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int):
if vmdisk_alloc == 'thin': if vmdisk_alloc == 'thin':
# define VM params # define VM params
vmparams = params.VM(name=vmname,cluster=conn.clusters.get(name=zone),os=params.OperatingSystem(type_=vmos),template=conn.templates.get(name="Blank"),memory=1024 * 1024 * int(vmmem),cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))), type_=vmtype) vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), os=params.OperatingSystem(type_=vmos),
template=conn.templates.get(name="Blank"), memory=1024 * 1024 * int(vmmem),
cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))), type_=vmtype)
# define disk params # define disk params
vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=True, interface=vmdisk_int, type_="System", format='cow', vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=True, interface=vmdisk_int, type_="System", format='cow',
storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)])) storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
@ -301,10 +303,12 @@ def create_vm(conn, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork,
nic_net1 = params.NIC(name='nic1', network=network_net, interface='virtio') nic_net1 = params.NIC(name='nic1', network=network_net, interface='virtio')
elif vmdisk_alloc == 'preallocated': elif vmdisk_alloc == 'preallocated':
# define VM params # define VM params
vmparams = params.VM(name=vmname,cluster=conn.clusters.get(name=zone),os=params.OperatingSystem(type_=vmos),template=conn.templates.get(name="Blank"),memory=1024 * 1024 * int(vmmem),cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))) ,type_=vmtype) vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), os=params.OperatingSystem(type_=vmos),
template=conn.templates.get(name="Blank"), memory=1024 * 1024 * int(vmmem),
cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))) ,type_=vmtype)
# define disk params # define disk params
vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=False, interface=vmdisk_int, type_="System", format='raw', vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=False, interface=vmdisk_int, type_="System",
storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)])) format='raw', storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
# define network parameters # define network parameters
network_net = params.Network(name=vmnetwork) network_net = params.Network(name=vmnetwork)
nic_net1 = params.NIC(name=vmnic, network=network_net, interface='virtio') nic_net1 = params.NIC(name=vmnic, network=network_net, interface='virtio')

View file

@ -762,7 +762,10 @@ def get_vminfo(module, proxmox, node, vmid, **kwargs):
k = vm[k] k = vm[k]
k = re.search('=(.*?),', k).group(1) k = re.search('=(.*?),', k).group(1)
mac[interface] = k mac[interface] = k
if re.match(r'virtio[0-9]', k) is not None or re.match(r'ide[0-9]', k) is not None or re.match(r'scsi[0-9]', k) is not None or re.match(r'sata[0-9]', k) is not None: if (re.match(r'virtio[0-9]', k) is not None or
re.match(r'ide[0-9]', k) is not None or
re.match(r'scsi[0-9]', k) is not None or
re.match(r'sata[0-9]', k) is not None):
device = k device = k
k = vm[k] k = vm[k]
k = re.search('(.*?),', k).group(1) k = re.search('(.*?),', k).group(1)

View file

@ -54,7 +54,8 @@ options:
default: us-east-1 default: us-east-1
deploy: deploy:
description: description:
- Whether or not to deploy artifacts after building them. When this option is `false` all the functions will be built, but no stack update will be run to send them out. This is mostly useful for generating artifacts to be stored/deployed elsewhere. - Whether or not to deploy artifacts after building them. When this option is `false` all the functions will be built, but no stack update will be
run to send them out. This is mostly useful for generating artifacts to be stored/deployed elsewhere.
required: false required: false
default: true default: true
notes: notes:

View file

@ -80,7 +80,8 @@ options:
version_added: "1.8" version_added: "1.8"
image_exclude: image_exclude:
description: description:
- Text to use to filter image names, for the case, such as HP, where there are multiple image names matching the common identifying portions. image_exclude is a negative match filter - it is text that may not exist in the image name. Defaults to "(deprecated)" - Text to use to filter image names, for the case, such as HP, where there are multiple image names matching the common identifying
portions. image_exclude is a negative match filter - it is text that may not exist in the image name. Defaults to "(deprecated)"
version_added: "1.8" version_added: "1.8"
flavor_id: flavor_id:
description: description:
@ -95,7 +96,8 @@ options:
version_added: "1.8" version_added: "1.8"
flavor_include: flavor_include:
description: description:
- Text to use to filter flavor names, for the case, such as Rackspace, where there are multiple flavors that have the same ram count. flavor_include is a positive match filter - it must exist in the flavor name. - Text to use to filter flavor names, for the case, such as Rackspace, where there are multiple flavors that have the same ram count.
flavor_include is a positive match filter - it must exist in the flavor name.
version_added: "1.8" version_added: "1.8"
key_name: key_name:
description: description:
@ -459,7 +461,7 @@ def _create_server(module, nova):
public = openstack_find_nova_addresses(getattr(server, 'addresses'), 'floating', 'public') public = openstack_find_nova_addresses(getattr(server, 'addresses'), 'floating', 'public')
# now exit with info # now exit with info
module.exit_json(changed = True, id = server.id, private_ip=''.join(private), public_ip=''.join(public), status = server.status, info = server._info) module.exit_json(changed=True, id=server.id, private_ip=''.join(private), public_ip=''.join(public), status=server.status, info=server._info)
if server.status == 'ERROR': if server.status == 'ERROR':
module.fail_json(msg = "Error in creating the server, please check logs") module.fail_json(msg = "Error in creating the server, please check logs")

View file

@ -30,7 +30,8 @@ module: packet_device
short_description: create, destroy, start, stop, and reboot a Packet Host machine. short_description: create, destroy, start, stop, and reboot a Packet Host machine.
description: description:
- create, destroy, update, start, stop, and reboot a Packet Host machine. When the machine is created it can optionally wait for it to have an IP address before returning. This module has a dependency on packet >= 1.0. - create, destroy, update, start, stop, and reboot a Packet Host machine. When the machine is created it can optionally wait for it to have an
IP address before returning. This module has a dependency on packet >= 1.0.
- API is documented at U(https://www.packet.net/help/api/#page:devices,header:devices-devices-post). - API is documented at U(https://www.packet.net/help/api/#page:devices,header:devices-devices-post).
version_added: 2.3 version_added: 2.3
@ -157,7 +158,7 @@ EXAMPLES = '''
user_data: | user_data: |
#cloud-config #cloud-config
ssh_authorized_keys: ssh_authorized_keys:
- ssh-dss AAAAB3NzaC1kc3MAAACBAIfNT5S0ncP4BBJBYNhNPxFF9lqVhfPeu6SM1LoCocxqDc1AT3zFRi8hjIf6TLZ2AA4FYbcAWxLMhiBxZRVldT9GdBXile78kAK5z3bKTwq152DCqpxwwbaTIggLFhsU8wrfBsPWnDuAxZ0h7mmrCjoLIE3CNLDA/NmV3iB8xMThAAAAFQCStcesSgR1adPORzBxTr7hug92LwAAAIBOProm3Gk+HWedLyE8IfofLaOeRnbBRHAOL4z0SexKkVOnQ/LGN/uDIIPGGBDYTvXgKZT+jbHeulRJ2jKgfSpGKN4JxFQ8uzVH492jEiiUJtT72Ss1dCV4PmyERVIw+f54itihV3z/t25dWgowhb0int8iC/OY3cGodlmYb3wdcQAAAIBuLbB45djZXzUkOTzzcRDIRfhaxo5WipbtEM2B1fuBt2gyrvksPpH/LK6xTjdIIb0CxPu4OCxwJG0aOz5kJoRnOWIXQGhH7VowrJhsqhIc8gN9ErbO5ea8b1L76MNcAotmBDeTUiPw01IJ8MdDxfmcsCslJKgoRKSmQpCwXQtN2g== tomk@hp2 - {{ lookup('file', 'my_packet_sshkey') }}
coreos: coreos:
etcd: etcd:
discovery: https://discovery.etcd.io/6a28e078895c5ec737174db2419bb2f3 discovery: https://discovery.etcd.io/6a28e078895c5ec737174db2419bb2f3
@ -207,7 +208,7 @@ devices:
type: array type: array
sample: '[{"hostname": "my-server.com", "id": "server-id", "public-ipv4": "147.229.15.12", "private-ipv4": "10.0.15.12", "public-ipv6": ""2604:1380:2:5200::3"}]' sample: '[{"hostname": "my-server.com", "id": "server-id", "public-ipv4": "147.229.15.12", "private-ipv4": "10.0.15.12", "public-ipv6": ""2604:1380:2:5200::3"}]'
returned: always returned: always
''' ''' # NOQA
import os import os

View file

@ -69,7 +69,7 @@ EXAMPLES = '''
hosts: localhost hosts: localhost
tasks: tasks:
packet_sshkey: packet_sshkey:
key: ssh-dss AAAAB3NzaC1kc3MAAACBAIfNT5S0ncP4BBJBYNhNPxFF9lqVhfPeu6SM1LoCocxqDc1AT3zFRi8hjIf6TLZ2AA4FYbcAWxLMhiBxZRVldT9GdBXile78kAK5z3bKTwq152DCqpxwwbaTIggLFhsU8wrfBsPWnDuAxZ0h7mmrCjoLIE3CNLDA/NmV3iB8xMThAAAAFQCStcesSgR1adPORzBxTr7hug92LwAAAIBOProm3Gk+HWedLyE8IfofLaOeRnbBRHAOL4z0SexKkVOnQ/LGN/uDIIPGGBDYTvXgKZT+jbHeulRJ2jKgfSpGKN4JxFQ8uzVH492jEiiUJtT72Ss1dCV4PmyERVIw+f54itihV3z/t25dWgowhb0int8iC/OY3cGodlmYb3wdcQAAAIBuLbB45djZXzUkOTzzcRDIRfhaxo5WipbtEM2B1fuBt2gyrvksPpH/LK6xTjdIIb0CxPu4OCxwJG0aOz5kJoRnOWIXQGhH7VowrJhsqhIc8gN9ErbO5ea8b1L76MNcAotmBDeTUiPw01IJ8MdDxfmcsCslJKgoRKSmQpCwXQtN2g== tomk@hp2 key: "{{ lookup('file', 'my_packet_sshkey.pub') }}"
- name: create sshkey from file - name: create sshkey from file
hosts: localhost hosts: localhost
@ -104,7 +104,7 @@ sshkeys:
} }
] ]
returned: always returned: always
''' ''' # NOQA
import os import os
import uuid import uuid

View file

@ -24,7 +24,8 @@ DOCUMENTATION = '''
module: profitbricks module: profitbricks
short_description: Create, destroy, start, stop, and reboot a ProfitBricks virtual machine. short_description: Create, destroy, start, stop, and reboot a ProfitBricks virtual machine.
description: description:
- Create, destroy, update, start, stop, and reboot a ProfitBricks virtual machine. When the virtual machine is created it can optionally wait for it to be 'running' before returning. This module has a dependency on profitbricks >= 1.0.0 - Create, destroy, update, start, stop, and reboot a ProfitBricks virtual machine. When the virtual machine is created it can optionally wait
for it to be 'running' before returning. This module has a dependency on profitbricks >= 1.0.0
version_added: "2.0" version_added: "2.0"
options: options:
auto_increment: auto_increment:

View file

@ -24,7 +24,8 @@ DOCUMENTATION = '''
module: profitbricks_datacenter module: profitbricks_datacenter
short_description: Create or destroy a ProfitBricks Virtual Datacenter. short_description: Create or destroy a ProfitBricks Virtual Datacenter.
description: description:
- This is a simple module that supports creating or removing vDCs. A vDC is required before you can create servers. This module has a dependency on profitbricks >= 1.0.0 - This is a simple module that supports creating or removing vDCs. A vDC is required before you can create servers. This module has a dependency
on profitbricks >= 1.0.0
version_added: "2.0" version_added: "2.0"
options: options:
name: name:

View file

@ -240,13 +240,14 @@ import time
#TODO: get this info from API #TODO: get this info from API
STATES = ['present', 'absent'] STATES = ['present', 'absent']
DATACENTERS = ['ams01','ams03','che01','dal01','dal05','dal06','dal09','dal10','fra02','hkg02','hou02','lon02','mel01','mex01','mil01','mon01','osl01','par01','sjc01','sjc03','sao01','sea01','sng01','syd01','tok02','tor01','wdc01','wdc04'] DATACENTERS = ['ams01', 'ams03', 'che01', 'dal01', 'dal05', 'dal06', 'dal09', 'dal10', 'fra02', 'hkg02', 'hou02', 'lon02', 'mel01', 'mex01', 'mil01', 'mon01',
CPU_SIZES = [1,2,4,8,16,32,56] 'osl01', 'par01', 'sjc01', 'sjc03', 'sao01', 'sea01', 'sng01', 'syd01', 'tok02', 'tor01', 'wdc01', 'wdc04']
MEMORY_SIZES = [1024,2048,4096,6144,8192,12288,16384,32768,49152,65536,131072,247808] CPU_SIZES = [1, 2, 4, 8, 16, 32, 56]
INITIALDISK_SIZES = [25,100] MEMORY_SIZES = [1024, 2048, 4096, 6144, 8192, 12288, 16384, 32768, 49152, 65536, 131072, 247808]
LOCALDISK_SIZES = [25,100,150,200,300] INITIALDISK_SIZES = [25, 100]
SANDISK_SIZES = [10,20,25,30,40,50,75,100,125,150,175,200,250,300,350,400,500,750,1000,1500,2000] LOCALDISK_SIZES = [25, 100, 150, 200, 300]
NIC_SPEEDS = [10,100,1000] SANDISK_SIZES = [10, 20, 25, 30, 40, 50, 75, 100, 125, 150, 175, 200, 250, 300, 350, 400, 500, 750, 1000, 1500, 2000]
NIC_SPEEDS = [10, 100, 1000]
try: try:
import SoftLayer import SoftLayer

View file

@ -82,7 +82,9 @@ options:
description: description:
- "Set the guest ID (Debian, RHEL, Windows...)" - "Set the guest ID (Debian, RHEL, Windows...)"
- "This field is required when creating a VM" - "This field is required when creating a VM"
- "Valid values are referenced here: https://www.vmware.com/support/developer/converter-sdk/conv55_apireference/vim.vm.GuestOsDescriptor.GuestOsIdentifier.html" - >
Valid values are referenced here:
https://www.vmware.com/support/developer/converter-sdk/conv55_apireference/vim.vm.GuestOsDescriptor.GuestOsIdentifier.html
version_added: "2.3" version_added: "2.3"
disk: disk:
description: description:
@ -675,7 +677,9 @@ class PyVmomiHelper(object):
# VDS switch # VDS switch
pg_obj = get_obj(self.content, [vim.dvs.DistributedVirtualPortgroup], network_devices[key]['name']) pg_obj = get_obj(self.content, [vim.dvs.DistributedVirtualPortgroup], network_devices[key]['name'])
if nic.device.backing and ( nic.device.backing.port.portgroupKey != pg_obj.key or nic.device.backing.port.switchUuid != pg_obj.config.distributedVirtualSwitch.uuid ): if (nic.device.backing and
(nic.device.backing.port.portgroupKey != pg_obj.key or
nic.device.backing.port.switchUuid != pg_obj.config.distributedVirtualSwitch.uuid)):
nic_change_detected = True nic_change_detected = True
dvs_port_connection = vim.dvs.PortConnection() dvs_port_connection = vim.dvs.PortConnection()
@ -792,7 +796,8 @@ class PyVmomiHelper(object):
if 'joindomain' in self.params['customization']: if 'joindomain' in self.params['customization']:
if 'domainadmin' not in self.params['customization'] or 'domainadminpassword' not in self.params['customization']: if 'domainadmin' not in self.params['customization'] or 'domainadminpassword' not in self.params['customization']:
self.module.fail_json(msg="'domainadmin' and 'domainadminpassword' entries are mandatory in 'customization' section to use joindomain feature") self.module.fail_json(msg="'domainadmin' and 'domainadminpassword' entries are mandatory in 'customization' section to use "
"joindomain feature")
ident.identification.domainAdmin = str(self.params['customization'].get('domainadmin')) ident.identification.domainAdmin = str(self.params['customization'].get('domainadmin'))
ident.identification.joinDomain = str(self.params['customization'].get('joindomain')) ident.identification.joinDomain = str(self.params['customization'].get('joindomain'))

View file

@ -75,18 +75,21 @@ options:
default: None default: None
esxi: esxi:
description: description:
- Dictionary which includes datacenter and hostname on which the VM should be created. For standalone ESXi hosts, ha-datacenter should be used as the datacenter name - Dictionary which includes datacenter and hostname on which the VM should be created. For standalone ESXi hosts, ha-datacenter should be used as the
datacenter name
required: false required: false
default: null default: null
state: state:
description: description:
- Indicate desired state of the vm. 'reconfigured' only applies changes to 'vm_cdrom', 'memory_mb', and 'num_cpus' in vm_hardware parameter. The 'memory_mb' and 'num_cpus' changes are applied to powered-on vms when hot-plugging is enabled for the guest. - Indicate desired state of the vm. 'reconfigured' only applies changes to 'vm_cdrom', 'memory_mb', and 'num_cpus' in vm_hardware parameter.
The 'memory_mb' and 'num_cpus' changes are applied to powered-on vms when hot-plugging is enabled for the guest.
default: present default: present
choices: ['present', 'powered_off', 'absent', 'powered_on', 'restarted', 'reconfigured'] choices: ['present', 'powered_off', 'absent', 'powered_on', 'restarted', 'reconfigured']
from_template: from_template:
version_added: "1.9" version_added: "1.9"
description: description:
- Specifies if the VM should be deployed from a template (mutually exclusive with 'state' parameter). No guest customization changes to hardware such as CPU, RAM, NICs or Disks can be applied when launching from template. - Specifies if the VM should be deployed from a template (mutually exclusive with 'state' parameter). No guest customization changes to hardware
such as CPU, RAM, NICs or Disks can be applied when launching from template.
default: no default: no
choices: ['yes', 'no'] choices: ['yes', 'no']
template_src: template_src:
@ -128,7 +131,8 @@ options:
default: null default: null
vm_hw_version: vm_hw_version:
description: description:
- Desired hardware version identifier (for example, "vmx-08" for vms that needs to be managed with vSphere Client). Note that changing hardware version of existing vm is not supported. - Desired hardware version identifier (for example, "vmx-08" for vms that needs to be managed with vSphere Client). Note that changing hardware
version of existing vm is not supported.
required: false required: false
default: null default: null
version_added: "1.7" version_added: "1.7"
@ -1780,7 +1784,8 @@ def main():
# CONNECT TO THE SERVER # CONNECT TO THE SERVER
viserver = VIServer() viserver = VIServer()
if validate_certs and not hasattr(ssl, 'SSLContext') and not vcenter_hostname.startswith('http://'): if validate_certs and not hasattr(ssl, 'SSLContext') and not vcenter_hostname.startswith('http://'):
module.fail_json(msg='pysphere does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task') module.fail_json(msg='pysphere does not support verifying certificates with python < 2.7.9. Either update python or set '
'validate_certs=False on the task')
try: try:
viserver.connect(vcenter_hostname, username, password) viserver.connect(vcenter_hostname, username, password)

View file

@ -41,7 +41,10 @@ description:
author: Quentin Stafford-Fraser (@quentinsf) author: Quentin Stafford-Fraser (@quentinsf)
version_added: "2.0" version_added: "2.0"
notes: notes:
- "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." - >
You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
your host, you may want to add C(serial: 1) to the plays.
- See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info. - See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info.
options: options:

View file

@ -38,7 +38,10 @@ description:
author: Quentin Stafford-Fraser (@quentinsf) author: Quentin Stafford-Fraser (@quentinsf)
version_added: "2.0" version_added: "2.0"
notes: notes:
- "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." - >
You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
your host, you may want to add C(serial: 1) to the plays.
- See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info. - See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info.
options: options:

View file

@ -36,8 +36,12 @@ description:
author: Quentin Stafford-Fraser (@quentinsf) author: Quentin Stafford-Fraser (@quentinsf)
version_added: "2.0" version_added: "2.0"
notes: notes:
- If you are I(deleting) domains by using C(state=absent), then note that if you specify subdomains, just those particular subdomains will be deleted. If you don't specify subdomains, the domain will be deleted. - If you are I(deleting) domains by using C(state=absent), then note that if you specify subdomains, just those particular subdomains will be deleted.
- "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." If you don't specify subdomains, the domain will be deleted.
- >
You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
your host, you may want to add C(serial: 1) to the plays.
- See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info. - See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info.
options: options:

View file

@ -35,7 +35,10 @@ description:
author: Quentin Stafford-Fraser (@quentinsf) author: Quentin Stafford-Fraser (@quentinsf)
version_added: "2.0" version_added: "2.0"
notes: notes:
- "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." - >
You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
your host, you may want to add C(serial: 1) to the plays.
- See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info. - See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info.
options: options:

View file

@ -36,9 +36,13 @@ description:
author: Quentin Stafford-Fraser (@quentinsf) author: Quentin Stafford-Fraser (@quentinsf)
version_added: "2.0" version_added: "2.0"
notes: notes:
- Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP address - you can use a DNS name. - Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP
address. You can use a DNS name.
- If a site of the same name exists in the account but on a different host, the operation will exit. - If a site of the same name exists in the account but on a different host, the operation will exit.
- "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." - >
You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
your host, you may want to add C(serial: 1) to the plays.
- See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info. - See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info.
options: options:

View file

@ -393,7 +393,8 @@ def parse_service(module):
) )
elif module.params.get('service_name') and not module.params.get('service_port'): elif module.params.get('service_name') and not module.params.get('service_port'):
module.fail_json( msg="service_name supplied but no service_port, a port is required to configure a service. Did you configure the 'port' argument meaning 'service_port'?") module.fail_json(msg="service_name supplied but no service_port, a port is required to configure a service. Did you configure "
"the 'port' argument meaning 'service_port'?")
class ConsulService(): class ConsulService():

View file

@ -51,7 +51,8 @@ options:
version_added: "1.5" version_added: "1.5"
notes: notes:
- It is usually preferable to write Ansible modules than pushing scripts. Convert your script to an Ansible module for bonus points! - It is usually preferable to write Ansible modules than pushing scripts. Convert your script to an Ansible module for bonus points!
- The ssh connection plugin will force psuedo-tty allocation via -tt when scripts are executed. psuedo-ttys do not have a stderr channel and all stderr is sent to stdout. If you depend on separated stdout and stderr result keys, please switch to a copy+command set of tasks instead of using script. - The ssh connection plugin will force psuedo-tty allocation via -tt when scripts are executed. psuedo-ttys do not have a stderr channel and all
stderr is sent to stdout. If you depend on separated stdout and stderr result keys, please switch to a copy+command set of tasks instead of using script.
author: author:
- Ansible Core Team - Ansible Core Team
- Michael DeHaan - Michael DeHaan

View file

@ -202,7 +202,9 @@ def alter_retention_policy(module, client, retention_policy):
elif duration == 'INF': elif duration == 'INF':
influxdb_duration_format = '0' influxdb_duration_format = '0'
if not retention_policy['duration'] == influxdb_duration_format or not retention_policy['replicaN'] == int(replication) or not retention_policy['default'] == default: if (not retention_policy['duration'] == influxdb_duration_format or
not retention_policy['replicaN'] == int(replication) or
not retention_policy['default'] == default):
if not module.check_mode: if not module.check_mode:
try: try:
client.alter_retention_policy(policy_name, database_name, duration, replication, default) client.alter_retention_policy(policy_name, database_name, duration, replication, default)

View file

@ -94,7 +94,10 @@ options:
roles: roles:
version_added: "1.3" version_added: "1.3"
description: description:
- "The database user roles valid values could either be one or more of the following strings: 'read', 'readWrite', 'dbAdmin', 'userAdmin', 'clusterAdmin', 'readAnyDatabase', 'readWriteAnyDatabase', 'userAdminAnyDatabase', 'dbAdminAnyDatabase'" - >
The database user roles valid values could either be one or more of the following strings:
'read', 'readWrite', 'dbAdmin', 'userAdmin', 'clusterAdmin', 'readAnyDatabase', 'readWriteAnyDatabase', 'userAdminAnyDatabase',
'dbAdminAnyDatabase'
- "Or the following dictionary '{ db: DATABASE_NAME, role: ROLE_NAME }'." - "Or the following dictionary '{ db: DATABASE_NAME, role: ROLE_NAME }'."
- "This param requires pymongo 2.5+. If it is a string, mongodb 2.4+ is also required. If it is a dictionary, mongo 2.6+ is required." - "This param requires pymongo 2.5+. If it is a string, mongodb 2.4+ is also required. If it is a dictionary, mongo 2.6+ is required."
required: false required: false

View file

@ -71,7 +71,8 @@ options:
required: false required: false
autocommit: autocommit:
description: description:
- Automatically commit the change only if the import succeed. Sometimes it is necessary to use autocommit=true, since some content can't be changed within a transaction. - Automatically commit the change only if the import succeed. Sometimes it is necessary to use autocommit=true, since some content can't be changed
within a transaction.
required: false required: false
default: false default: false
choices: [ "false", "true" ] choices: [ "false", "true" ]
@ -198,7 +199,8 @@ def main():
errno, errstr = e.args errno, errstr = e.args
module.fail_json(msg="ERROR: %s %s" % (errno, errstr)) module.fail_json(msg="ERROR: %s %s" % (errno, errstr))
else: else:
module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check your @sysconfdir@/freetds.conf / ${HOME}/.freetds.conf") module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check your "
"@sysconfdir@/freetds.conf / ${HOME}/.freetds.conf")
conn.autocommit(True) conn.autocommit(True)
changed = False changed = False

View file

@ -137,7 +137,8 @@ def db_delete(cursor, db):
cursor.execute(query) cursor.execute(query)
return True return True
def db_dump(module, host, user, password, db_name, target, all_databases, port, config_file, socket=None, ssl_cert=None, ssl_key=None, ssl_ca=None, single_transaction=None, quick=None): def db_dump(module, host, user, password, db_name, target, all_databases, port, config_file, socket=None, ssl_cert=None, ssl_key=None, ssl_ca=None,
single_transaction=None, quick=None):
cmd = module.get_bin_path('mysqldump', True) cmd = module.get_bin_path('mysqldump', True)
# If defined, mysqldump demands --defaults-extra-file be the first option # If defined, mysqldump demands --defaults-extra-file be the first option
if config_file: if config_file:
@ -312,7 +313,8 @@ def main():
except Exception: except Exception:
e = get_exception() e = get_exception()
if os.path.exists(config_file): if os.path.exists(config_file):
module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. Exception message: %s" % (config_file, e)) module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. "
"Exception message: %s" % (config_file, e))
else: else:
module.fail_json(msg="unable to find %s. Exception message: %s" % (config_file, e)) module.fail_json(msg="unable to find %s. Exception message: %s" % (config_file, e))

View file

@ -39,7 +39,8 @@ author: "Balazs Pocze (@banyek)"
options: options:
mode: mode:
description: description:
- module operating mode. Could be getslave (SHOW SLAVE STATUS), getmaster (SHOW MASTER STATUS), changemaster (CHANGE MASTER TO), startslave (START SLAVE), stopslave (STOP SLAVE), resetslave (RESET SLAVE), resetslaveall (RESET SLAVE ALL) - module operating mode. Could be getslave (SHOW SLAVE STATUS), getmaster (SHOW MASTER STATUS), changemaster (CHANGE MASTER TO), startslave
(START SLAVE), stopslave (STOP SLAVE), resetslave (RESET SLAVE), resetslaveall (RESET SLAVE ALL)
required: False required: False
choices: choices:
- getslave - getslave
@ -267,7 +268,8 @@ def main():
except Exception: except Exception:
e = get_exception() e = get_exception()
if os.path.exists(config_file): if os.path.exists(config_file):
module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. Exception message: %s" % (config_file, e)) module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. "
"Exception message: %s" % (config_file, e))
else: else:
module.fail_json(msg="unable to find %s. Exception message: %s" % (config_file, e)) module.fail_json(msg="unable to find %s. Exception message: %s" % (config_file, e))

View file

@ -597,7 +597,8 @@ def main():
connect_timeout=connect_timeout) connect_timeout=connect_timeout)
except Exception: except Exception:
e = get_exception() e = get_exception()
module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. Exception message: %s" % (config_file, e)) module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. "
"Exception message: %s" % (config_file, e))
if not sql_log_bin: if not sql_log_bin:
cursor.execute("SET SQL_LOG_BIN=0;") cursor.execute("SET SQL_LOG_BIN=0;")

Some files were not shown because too many files have changed in this diff Show more