mirror of
https://github.com/ansible-collections/community.general.git
synced 2025-07-22 21:00:22 -07:00
forwarded docker_extra_args to latest upstream/origin/devel
This commit is contained in:
commit
cd2c140f69
421 changed files with 22824 additions and 4800 deletions
|
@ -316,6 +316,7 @@ class PluginLoader:
|
|||
def get(self, name, *args, **kwargs):
|
||||
''' instantiates a plugin of the given name using arguments '''
|
||||
|
||||
class_only = kwargs.pop('class_only', False)
|
||||
if name in self.aliases:
|
||||
name = self.aliases[name]
|
||||
path = self.find_plugin(name)
|
||||
|
@ -325,40 +326,60 @@ class PluginLoader:
|
|||
if path not in self._module_cache:
|
||||
self._module_cache[path] = self._load_module_source('.'.join([self.package, name]), path)
|
||||
|
||||
if kwargs.get('class_only', False):
|
||||
obj = getattr(self._module_cache[path], self.class_name)
|
||||
else:
|
||||
obj = getattr(self._module_cache[path], self.class_name)(*args, **kwargs)
|
||||
if self.base_class and self.base_class not in [base.__name__ for base in obj.__class__.__bases__]:
|
||||
obj = getattr(self._module_cache[path], self.class_name)
|
||||
if self.base_class:
|
||||
# The import path is hardcoded and should be the right place,
|
||||
# so we are not expecting an ImportError.
|
||||
module = __import__(self.package, fromlist=[self.base_class])
|
||||
# Check whether this obj has the required base class.
|
||||
try:
|
||||
plugin_class = getattr(module, self.base_class)
|
||||
except AttributeError:
|
||||
return None
|
||||
if not issubclass(obj, plugin_class):
|
||||
return None
|
||||
|
||||
if not class_only:
|
||||
obj = obj(*args, **kwargs)
|
||||
|
||||
return obj
|
||||
|
||||
def all(self, *args, **kwargs):
|
||||
''' instantiates all plugins with the same arguments '''
|
||||
|
||||
class_only = kwargs.pop('class_only', False)
|
||||
all_matches = []
|
||||
|
||||
for i in self._get_paths():
|
||||
matches = glob.glob(os.path.join(i, "*.py"))
|
||||
matches.sort()
|
||||
for path in matches:
|
||||
name, _ = os.path.splitext(path)
|
||||
if '__init__' in name:
|
||||
continue
|
||||
all_matches.extend(glob.glob(os.path.join(i, "*.py")))
|
||||
|
||||
if path not in self._module_cache:
|
||||
self._module_cache[path] = self._load_module_source(name, path)
|
||||
for path in sorted(all_matches, key=lambda match: os.path.basename(match)):
|
||||
name, _ = os.path.splitext(path)
|
||||
if '__init__' in name:
|
||||
continue
|
||||
|
||||
if kwargs.get('class_only', False):
|
||||
obj = getattr(self._module_cache[path], self.class_name)
|
||||
else:
|
||||
obj = getattr(self._module_cache[path], self.class_name)(*args, **kwargs)
|
||||
if path not in self._module_cache:
|
||||
self._module_cache[path] = self._load_module_source(name, path)
|
||||
|
||||
if self.base_class and self.base_class not in [base.__name__ for base in obj.__class__.__bases__]:
|
||||
continue
|
||||
obj = getattr(self._module_cache[path], self.class_name)
|
||||
if self.base_class:
|
||||
# The import path is hardcoded and should be the right place,
|
||||
# so we are not expecting an ImportError.
|
||||
module = __import__(self.package, fromlist=[self.base_class])
|
||||
# Check whether this obj has the required base class.
|
||||
try:
|
||||
plugin_class = getattr(module, self.base_class)
|
||||
except AttributeError:
|
||||
continue
|
||||
if not issubclass(obj, plugin_class):
|
||||
continue
|
||||
|
||||
# set extra info on the module, in case we want it later
|
||||
setattr(obj, '_original_path', path)
|
||||
yield obj
|
||||
if not class_only:
|
||||
obj = obj(*args, **kwargs)
|
||||
|
||||
# set extra info on the module, in case we want it later
|
||||
setattr(obj, '_original_path', path)
|
||||
yield obj
|
||||
|
||||
action_loader = PluginLoader(
|
||||
'ActionModule',
|
||||
|
@ -444,7 +465,7 @@ fragment_loader = PluginLoader(
|
|||
strategy_loader = PluginLoader(
|
||||
'StrategyModule',
|
||||
'ansible.plugins.strategy',
|
||||
None,
|
||||
C.DEFAULT_STRATEGY_PLUGIN_PATH,
|
||||
'strategy_plugins',
|
||||
required_base_class='StrategyBase',
|
||||
)
|
||||
|
|
|
@ -24,6 +24,7 @@ import json
|
|||
import os
|
||||
import pipes
|
||||
import random
|
||||
import re
|
||||
import stat
|
||||
import tempfile
|
||||
import time
|
||||
|
@ -119,7 +120,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
module_path = self._shared_loader_obj.module_loader.find_plugin(module_name, mod_type)
|
||||
if module_path:
|
||||
break
|
||||
else:
|
||||
else: # This is a for-else: http://bit.ly/1ElPkyg
|
||||
# Use Windows version of ping module to check module paths when
|
||||
# using a connection that supports .ps1 suffixes. We check specifically
|
||||
# for win_ping here, otherwise the code would look for ping.ps1
|
||||
|
@ -151,14 +152,19 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
if not isinstance(environments, list):
|
||||
environments = [ environments ]
|
||||
|
||||
# the environments as inherited need to be reversed, to make
|
||||
# sure we merge in the parent's values first so those in the
|
||||
# block then task 'win' in precedence
|
||||
environments.reverse()
|
||||
for environment in environments:
|
||||
if environment is None:
|
||||
continue
|
||||
if not isinstance(environment, dict):
|
||||
raise AnsibleError("environment must be a dictionary, received %s (%s)" % (environment, type(environment)))
|
||||
temp_environment = self._templar.template(environment)
|
||||
if not isinstance(temp_environment, dict):
|
||||
raise AnsibleError("environment must be a dictionary, received %s (%s)" % (temp_environment, type(temp_environment)))
|
||||
# very deliberately using update here instead of combine_vars, as
|
||||
# these environment settings should not need to merge sub-dicts
|
||||
final_environment.update(environment)
|
||||
final_environment.update(temp_environment)
|
||||
|
||||
final_environment = self._templar.template(final_environment)
|
||||
return self._connection._shell.env_prefix(**final_environment)
|
||||
|
@ -186,7 +192,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
return True
|
||||
return False
|
||||
|
||||
def _make_tmp_path(self):
|
||||
def _make_tmp_path(self, remote_user):
|
||||
'''
|
||||
Create and return a temporary path on a remote box.
|
||||
'''
|
||||
|
@ -194,17 +200,13 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48))
|
||||
use_system_tmp = False
|
||||
|
||||
if self._play_context.become and self._play_context.become_user != 'root':
|
||||
if self._play_context.become and self._play_context.become_user not in ('root', remote_user):
|
||||
use_system_tmp = True
|
||||
|
||||
tmp_mode = None
|
||||
if self._play_context.remote_user != 'root' or self._play_context.become and self._play_context.become_user != 'root':
|
||||
tmp_mode = 0o755
|
||||
tmp_mode = 0o700
|
||||
|
||||
cmd = self._connection._shell.mkdtemp(basefile, use_system_tmp, tmp_mode)
|
||||
display.debug("executing _low_level_execute_command to create the tmp path")
|
||||
result = self._low_level_execute_command(cmd, sudoable=False)
|
||||
display.debug("done with creation of tmp path")
|
||||
|
||||
# error handling on this seems a little aggressive?
|
||||
if result['rc'] != 0:
|
||||
|
@ -249,9 +251,11 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
cmd = self._connection._shell.remove(tmp_path, recurse=True)
|
||||
# If we have gotten here we have a working ssh configuration.
|
||||
# If ssh breaks we could leave tmp directories out on the remote system.
|
||||
display.debug("calling _low_level_execute_command to remove the tmp path")
|
||||
self._low_level_execute_command(cmd, sudoable=False)
|
||||
display.debug("done removing the tmp path")
|
||||
|
||||
def _transfer_file(self, local_path, remote_path):
|
||||
self._connection.put_file(local_path, remote_path)
|
||||
return remote_path
|
||||
|
||||
def _transfer_data(self, remote_path, data):
|
||||
'''
|
||||
|
@ -267,54 +271,140 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
data = to_bytes(data, errors='strict')
|
||||
afo.write(data)
|
||||
except Exception as e:
|
||||
#raise AnsibleError("failure encoding into utf-8: %s" % str(e))
|
||||
raise AnsibleError("failure writing module data to temporary file for transfer: %s" % str(e))
|
||||
|
||||
afo.flush()
|
||||
afo.close()
|
||||
|
||||
try:
|
||||
self._connection.put_file(afile, remote_path)
|
||||
self._transfer_file(afile, remote_path)
|
||||
finally:
|
||||
os.unlink(afile)
|
||||
|
||||
return remote_path
|
||||
|
||||
def _remote_chmod(self, mode, path, sudoable=False):
|
||||
def _fixup_perms(self, remote_path, remote_user, execute=False, recursive=True):
|
||||
"""
|
||||
If the become_user is unprivileged and different from the
|
||||
remote_user then we need to make the files we've uploaded readable by them.
|
||||
"""
|
||||
if remote_path is None:
|
||||
# Sometimes code calls us naively -- it has a var which could
|
||||
# contain a path to a tmp dir but doesn't know if it needs to
|
||||
# exist or not. If there's no path, then there's no need for us
|
||||
# to do work
|
||||
self._display.debug('_fixup_perms called with remote_path==None. Sure this is correct?')
|
||||
return remote_path
|
||||
|
||||
if self._play_context.become and self._play_context.become_user not in ('root', remote_user):
|
||||
# Unprivileged user that's different than the ssh user. Let's get
|
||||
# to work!
|
||||
if remote_user == 'root':
|
||||
# SSh'ing as root, therefore we can chown
|
||||
self._remote_chown(remote_path, self._play_context.become_user, recursive=recursive)
|
||||
if execute:
|
||||
# root can read things that don't have read bit but can't
|
||||
# execute them.
|
||||
self._remote_chmod('u+x', remote_path, recursive=recursive)
|
||||
else:
|
||||
if execute:
|
||||
mode = 'rx'
|
||||
else:
|
||||
mode = 'rX'
|
||||
# Try to use fs acls to solve this problem
|
||||
res = self._remote_set_user_facl(remote_path, self._play_context.become_user, mode, recursive=recursive, sudoable=False)
|
||||
if res['rc'] != 0:
|
||||
if C.ALLOW_WORLD_READABLE_TMPFILES:
|
||||
# fs acls failed -- do things this insecure way only
|
||||
# if the user opted in in the config file
|
||||
self._display.warning('Using world-readable permissions for temporary files Ansible needs to create when becoming an unprivileged user which may be insecure. For information on securing this, see https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user')
|
||||
self._remote_chmod('a+%s' % mode, remote_path, recursive=recursive)
|
||||
else:
|
||||
raise AnsibleError('Failed to set permissions on the temporary files Ansible needs to create when becoming an unprivileged user. For information on working around this, see https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user')
|
||||
elif execute:
|
||||
# Can't depend on the file being transferred with execute
|
||||
# permissions. Only need user perms because no become was
|
||||
# used here
|
||||
self._remote_chmod('u+x', remote_path, recursive=recursive)
|
||||
|
||||
return remote_path
|
||||
|
||||
def _remote_chmod(self, mode, path, recursive=True, sudoable=False):
|
||||
'''
|
||||
Issue a remote chmod command
|
||||
'''
|
||||
|
||||
cmd = self._connection._shell.chmod(mode, path)
|
||||
display.debug("calling _low_level_execute_command to chmod the remote path")
|
||||
cmd = self._connection._shell.chmod(mode, path, recursive=recursive)
|
||||
res = self._low_level_execute_command(cmd, sudoable=sudoable)
|
||||
display.debug("done with chmod call")
|
||||
return res
|
||||
|
||||
def _remote_chown(self, path, user, group=None, recursive=True, sudoable=False):
|
||||
'''
|
||||
Issue a remote chown command
|
||||
'''
|
||||
cmd = self._connection._shell.chown(path, user, group, recursive=recursive)
|
||||
res = self._low_level_execute_command(cmd, sudoable=sudoable)
|
||||
return res
|
||||
|
||||
def _remote_set_user_facl(self, path, user, mode, recursive=True, sudoable=False):
|
||||
'''
|
||||
Issue a remote call to setfacl
|
||||
'''
|
||||
cmd = self._connection._shell.set_user_facl(path, user, mode, recursive=recursive)
|
||||
res = self._low_level_execute_command(cmd, sudoable=sudoable)
|
||||
return res
|
||||
|
||||
def _execute_remote_stat(self, path, all_vars, follow, tmp=None):
|
||||
'''
|
||||
Get information from remote file.
|
||||
'''
|
||||
module_args=dict(
|
||||
path=path,
|
||||
follow=follow,
|
||||
get_md5=False,
|
||||
get_checksum=True,
|
||||
checksum_algo='sha1',
|
||||
)
|
||||
mystat = self._execute_module(module_name='stat', module_args=module_args, task_vars=all_vars, tmp=tmp, delete_remote_tmp=(tmp is None))
|
||||
|
||||
if 'failed' in mystat and mystat['failed']:
|
||||
raise AnsibleError('Failed to get information on remote file (%s): %s' % (path, mystat['msg']))
|
||||
|
||||
if not mystat['stat']['exists']:
|
||||
# empty might be matched, 1 should never match, also backwards compatible
|
||||
mystat['stat']['checksum'] = '1'
|
||||
|
||||
# happens sometimes when it is a dir and not on bsd
|
||||
if not 'checksum' in mystat['stat']:
|
||||
mystat['stat']['checksum'] = ''
|
||||
|
||||
return mystat['stat']
|
||||
|
||||
def _remote_checksum(self, path, all_vars):
|
||||
'''
|
||||
Takes a remote checksum and returns 1 if no file
|
||||
Produces a remote checksum given a path,
|
||||
Returns a number 0-4 for specific errors instead of checksum, also ensures it is different
|
||||
0 = unknown error
|
||||
1 = file does not exist, this might not be an error
|
||||
2 = permissions issue
|
||||
3 = its a directory, not a file
|
||||
4 = stat module failed, likely due to not finding python
|
||||
'''
|
||||
|
||||
python_interp = all_vars.get('ansible_python_interpreter', 'python')
|
||||
|
||||
cmd = self._connection._shell.checksum(path, python_interp)
|
||||
display.debug("calling _low_level_execute_command to get the remote checksum")
|
||||
data = self._low_level_execute_command(cmd, sudoable=True)
|
||||
display.debug("done getting the remote checksum")
|
||||
x = "0" # unknown error has occured
|
||||
try:
|
||||
data2 = data['stdout'].strip().splitlines()[-1]
|
||||
if data2 == u'':
|
||||
# this may happen if the connection to the remote server
|
||||
# failed, so just return "INVALIDCHECKSUM" to avoid errors
|
||||
return "INVALIDCHECKSUM"
|
||||
remote_stat = self._execute_remote_stat(path, all_vars, follow=False)
|
||||
if remote_stat['exists'] and remote_stat['isdir']:
|
||||
x = "3" # its a directory not a file
|
||||
else:
|
||||
return data2.split()[0]
|
||||
except IndexError:
|
||||
display.warning(u"Calculating checksum failed unusually, please report this to "
|
||||
u"the list so it can be fixed\ncommand: %s\n----\noutput: %s\n----\n" % (to_unicode(cmd), data))
|
||||
# this will signal that it changed and allow things to keep going
|
||||
return "INVALIDCHECKSUM"
|
||||
x = remote_stat['checksum'] # if 1, file is missing
|
||||
except AnsibleError as e:
|
||||
errormsg = to_unicode(e)
|
||||
if errormsg.endswith('Permission denied'):
|
||||
x = "2" # cannot read file
|
||||
elif errormsg.endswith('MODULE FAILURE'):
|
||||
x = "4" # python not found or module uncaught exception
|
||||
finally:
|
||||
return x
|
||||
|
||||
|
||||
def _remote_expand_user(self, path):
|
||||
''' takes a remote path and performs tilde expansion on the remote host '''
|
||||
|
@ -329,9 +419,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
expand_path = '~%s' % self._play_context.become_user
|
||||
|
||||
cmd = self._connection._shell.expand_user(expand_path)
|
||||
display.debug("calling _low_level_execute_command to expand the remote user path")
|
||||
data = self._low_level_execute_command(cmd, sudoable=False)
|
||||
display.debug("done expanding the remote user path")
|
||||
#initial_fragment = utils.last_non_blank_line(data['stdout'])
|
||||
initial_fragment = data['stdout'].strip().splitlines()[-1]
|
||||
|
||||
|
@ -361,6 +449,14 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
|
||||
return data[idx:]
|
||||
|
||||
def _strip_success_message(self, data):
|
||||
'''
|
||||
Removes the BECOME-SUCCESS message from the data.
|
||||
'''
|
||||
if data.strip().startswith('BECOME-SUCCESS-'):
|
||||
data = re.sub(r'^((\r)?\n)?BECOME-SUCCESS.*(\r)?\n', '', data)
|
||||
return data
|
||||
|
||||
def _execute_module(self, module_name=None, module_args=None, tmp=None, task_vars=None, persist_files=False, delete_remote_tmp=True):
|
||||
'''
|
||||
Transfer and run a module along with its arguments.
|
||||
|
@ -376,33 +472,42 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
module_args = self._task.args
|
||||
|
||||
# set check mode in the module arguments, if required
|
||||
if self._play_context.check_mode and not self._task.always_run:
|
||||
if self._play_context.check_mode:
|
||||
if not self._supports_check_mode:
|
||||
raise AnsibleError("check mode is not supported for this operation")
|
||||
module_args['_ansible_check_mode'] = True
|
||||
else:
|
||||
module_args['_ansible_check_mode'] = False
|
||||
|
||||
# Get the connection user for permission checks
|
||||
remote_user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user
|
||||
|
||||
# set no log in the module arguments, if required
|
||||
if self._play_context.no_log or not C.DEFAULT_NO_TARGET_SYSLOG:
|
||||
module_args['_ansible_no_log'] = True
|
||||
module_args['_ansible_no_log'] = self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG
|
||||
|
||||
# set debug in the module arguments, if required
|
||||
if C.DEFAULT_DEBUG:
|
||||
module_args['_ansible_debug'] = True
|
||||
module_args['_ansible_debug'] = C.DEFAULT_DEBUG
|
||||
|
||||
# let module know we are in diff mode
|
||||
module_args['_ansible_diff'] = self._play_context.diff
|
||||
|
||||
# let module know our verbosity
|
||||
module_args['_ansible_verbosity'] = self._display.verbosity
|
||||
|
||||
(module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars)
|
||||
if not shebang:
|
||||
raise AnsibleError("module is missing interpreter line")
|
||||
raise AnsibleError("module (%s) is missing interpreter line" % module_name)
|
||||
|
||||
# a remote tmp path may be necessary and not already created
|
||||
remote_module_path = None
|
||||
args_file_path = None
|
||||
if not tmp and self._late_needs_tmp_path(tmp, module_style):
|
||||
tmp = self._make_tmp_path()
|
||||
tmp = self._make_tmp_path(remote_user)
|
||||
|
||||
if tmp:
|
||||
remote_module_filename = self._connection._shell.get_remote_filename(module_name)
|
||||
remote_module_path = self._connection._shell.join_path(tmp, remote_module_filename)
|
||||
if module_style == 'old':
|
||||
if module_style in ['old', 'non_native_want_json']:
|
||||
# we'll also need a temp file to hold our module arguments
|
||||
args_file_path = self._connection._shell.join_path(tmp, 'args')
|
||||
|
||||
|
@ -416,18 +521,20 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
for k,v in iteritems(module_args):
|
||||
args_data += '%s="%s" ' % (k, pipes.quote(text_type(v)))
|
||||
self._transfer_data(args_file_path, args_data)
|
||||
elif module_style == 'non_native_want_json':
|
||||
self._transfer_data(args_file_path, json.dumps(module_args))
|
||||
display.debug("done transferring module to remote")
|
||||
|
||||
environment_string = self._compute_environment_string()
|
||||
|
||||
if tmp and "tmp" in tmp and self._play_context.become and self._play_context.become_user != 'root':
|
||||
# deal with possible umask issues once sudo'ed to other user
|
||||
self._remote_chmod('a+r', remote_module_path)
|
||||
# Fix permissions of the tmp path and tmp files. This should be
|
||||
# called after all files have been transferred.
|
||||
self._fixup_perms(tmp, remote_user, recursive=True)
|
||||
|
||||
cmd = ""
|
||||
in_data = None
|
||||
|
||||
if self._connection.has_pipelining and self._play_context.pipelining and not C.DEFAULT_KEEP_REMOTE_FILES:
|
||||
if self._connection.has_pipelining and self._play_context.pipelining and not C.DEFAULT_KEEP_REMOTE_FILES and module_style == 'new':
|
||||
in_data = module_data
|
||||
else:
|
||||
if remote_module_path:
|
||||
|
@ -448,9 +555,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
# specified in the play, not the sudo_user
|
||||
sudoable = False
|
||||
|
||||
display.debug("calling _low_level_execute_command() for command %s" % cmd)
|
||||
res = self._low_level_execute_command(cmd, sudoable=sudoable, in_data=in_data)
|
||||
display.debug("_low_level_execute_command returned ok")
|
||||
|
||||
if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
|
||||
if self._play_context.become and self._play_context.become_user != 'root':
|
||||
|
@ -464,12 +569,12 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
except ValueError:
|
||||
# not valid json, lets try to capture error
|
||||
data = dict(failed=True, parsed=False)
|
||||
if 'stderr' in res and res['stderr'].startswith(u'Traceback'):
|
||||
data['exception'] = res['stderr']
|
||||
else:
|
||||
data['msg'] = res.get('stdout', u'')
|
||||
if 'stderr' in res:
|
||||
data['msg'] += res['stderr']
|
||||
data['msg'] = "MODULE FAILURE"
|
||||
data['module_stdout'] = res.get('stdout', u'')
|
||||
if 'stderr' in res:
|
||||
data['module_stderr'] = res['stderr']
|
||||
if res['stderr'].startswith(u'Traceback'):
|
||||
data['exception'] = res['stderr']
|
||||
|
||||
# pre-split stdout into lines, if stdout is in the data and there
|
||||
# isn't already a stdout_lines value there
|
||||
|
@ -479,8 +584,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
display.debug("done with _execute_module (%s, %s)" % (module_name, module_args))
|
||||
return data
|
||||
|
||||
def _low_level_execute_command(self, cmd, sudoable=True, in_data=None,
|
||||
executable=None, encoding_errors='replace'):
|
||||
def _low_level_execute_command(self, cmd, sudoable=True, in_data=None, executable=None, encoding_errors='replace'):
|
||||
'''
|
||||
This is the function which executes the low level shell command, which
|
||||
may be commands to create/remove directories for temporary files, or to
|
||||
|
@ -495,24 +599,25 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
replacement strategy (python3 could use surrogateescape)
|
||||
'''
|
||||
|
||||
if executable is not None:
|
||||
cmd = executable + ' -c ' + cmd
|
||||
|
||||
display.debug("in _low_level_execute_command() (%s)" % (cmd,))
|
||||
display.debug("_low_level_execute_command(): starting")
|
||||
if not cmd:
|
||||
# this can happen with powershell modules when there is no analog to a Windows command (like chmod)
|
||||
display.debug("no command, exiting _low_level_execute_command()")
|
||||
display.debug("_low_level_execute_command(): no command, exiting")
|
||||
return dict(stdout='', stderr='')
|
||||
|
||||
allow_same_user = C.BECOME_ALLOW_SAME_USER
|
||||
same_user = self._play_context.become_user == self._play_context.remote_user
|
||||
if sudoable and self._play_context.become and (allow_same_user or not same_user):
|
||||
display.debug("using become for this command")
|
||||
display.debug("_low_level_execute_command(): using become for this command")
|
||||
cmd = self._play_context.make_become_cmd(cmd, executable=executable)
|
||||
|
||||
display.debug("executing the command %s through the connection" % cmd)
|
||||
if self._connection.allow_executable:
|
||||
if executable is None:
|
||||
executable = self._play_context.executable
|
||||
cmd = executable + ' -c ' + pipes.quote(cmd)
|
||||
|
||||
display.debug("_low_level_execute_command(): executing: %s" % (cmd,))
|
||||
rc, stdout, stderr = self._connection.exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||
display.debug("command execution done: rc=%s" % (rc))
|
||||
|
||||
# stdout and stderr may be either a file-like or a bytes object.
|
||||
# Convert either one to a text type
|
||||
|
@ -530,11 +635,13 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
else:
|
||||
err = stderr
|
||||
|
||||
display.debug("stdout=%s, stderr=%s" % (stdout, stderr))
|
||||
display.debug("done with _low_level_execute_command() (%s)" % (cmd,))
|
||||
if rc is None:
|
||||
rc = 0
|
||||
|
||||
# be sure to remove the BECOME-SUCCESS message now
|
||||
out = self._strip_success_message(out)
|
||||
|
||||
display.debug("_low_level_execute_command() done: rc=%d, stdout=%s, stderr=%s" % (rc, stdout, stderr))
|
||||
return dict(rc=rc, stdout=out, stdout_lines=out.splitlines(), stderr=err)
|
||||
|
||||
def _get_first_available_file(self, faf, of=None, searchdir='files'):
|
||||
|
@ -572,7 +679,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
diff['before'] = ''
|
||||
elif peek_result['appears_binary']:
|
||||
diff['dst_binary'] = 1
|
||||
elif peek_result['size'] > C.MAX_FILE_SIZE_FOR_DIFF:
|
||||
elif C.MAX_FILE_SIZE_FOR_DIFF > 0 and peek_result['size'] > C.MAX_FILE_SIZE_FOR_DIFF:
|
||||
diff['dst_larger'] = C.MAX_FILE_SIZE_FOR_DIFF
|
||||
else:
|
||||
display.debug("Slurping the file %s" % source)
|
||||
|
@ -587,23 +694,31 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
diff['before'] = dest_contents
|
||||
|
||||
if source_file:
|
||||
display.debug("Reading local copy of the file %s" % source)
|
||||
try:
|
||||
src = open(source)
|
||||
src_contents = src.read(8192)
|
||||
st = os.stat(source)
|
||||
except Exception as e:
|
||||
raise AnsibleError("Unexpected error while reading source (%s) for diff: %s " % (source, str(e)))
|
||||
if "\x00" in src_contents:
|
||||
diff['src_binary'] = 1
|
||||
elif st[stat.ST_SIZE] > C.MAX_FILE_SIZE_FOR_DIFF:
|
||||
st = os.stat(source)
|
||||
if C.MAX_FILE_SIZE_FOR_DIFF > 0 and st[stat.ST_SIZE] > C.MAX_FILE_SIZE_FOR_DIFF:
|
||||
diff['src_larger'] = C.MAX_FILE_SIZE_FOR_DIFF
|
||||
else:
|
||||
diff['after_header'] = source
|
||||
diff['after'] = src_contents
|
||||
display.debug("Reading local copy of the file %s" % source)
|
||||
try:
|
||||
src = open(source)
|
||||
src_contents = src.read()
|
||||
except Exception as e:
|
||||
raise AnsibleError("Unexpected error while reading source (%s) for diff: %s " % (source, str(e)))
|
||||
|
||||
if "\x00" in src_contents:
|
||||
diff['src_binary'] = 1
|
||||
else:
|
||||
diff['after_header'] = source
|
||||
diff['after'] = src_contents
|
||||
else:
|
||||
display.debug("source of file passed in")
|
||||
diff['after_header'] = 'dynamically generated'
|
||||
diff['after'] = source
|
||||
|
||||
if self._play_context.no_log:
|
||||
if 'before' in diff:
|
||||
diff["before"] = ""
|
||||
if 'after' in diff:
|
||||
diff["after"] = " [[ Diff output has been hidden because 'no_log: true' was specified for this result ]]"
|
||||
|
||||
return diff
|
||||
|
|
|
@ -53,9 +53,13 @@ class ActionModule(ActionBase):
|
|||
new_name = self._task.args.get('name', self._task.args.get('hostname', None))
|
||||
display.vv("creating host via 'add_host': hostname=%s" % new_name)
|
||||
|
||||
name, port = parse_address(new_name, allow_ranges=False)
|
||||
if not name:
|
||||
raise AnsibleError("Invalid inventory hostname: %s" % new_name)
|
||||
try:
|
||||
name, port = parse_address(new_name, allow_ranges=False)
|
||||
except:
|
||||
# not a parsable hostname, but might still be usable
|
||||
name = new_name
|
||||
port = None
|
||||
|
||||
if port:
|
||||
self._task.args['ansible_ssh_port'] = port
|
||||
|
||||
|
|
|
@ -89,6 +89,7 @@ class ActionModule(ActionBase):
|
|||
delimiter = self._task.args.get('delimiter', None)
|
||||
remote_src = self._task.args.get('remote_src', 'yes')
|
||||
regexp = self._task.args.get('regexp', None)
|
||||
follow = self._task.args.get('follow', False)
|
||||
ignore_hidden = self._task.args.get('ignore_hidden', False)
|
||||
|
||||
if src is None or dest is None:
|
||||
|
@ -96,10 +97,17 @@ class ActionModule(ActionBase):
|
|||
result['msg'] = "src and dest are required"
|
||||
return result
|
||||
|
||||
if boolean(remote_src):
|
||||
result.update(self._execute_module(tmp=tmp, task_vars=task_vars))
|
||||
return result
|
||||
cleanup_remote_tmp = False
|
||||
remote_user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user
|
||||
if not tmp:
|
||||
tmp = self._make_tmp_path(remote_user)
|
||||
cleanup_remote_tmp = True
|
||||
|
||||
if boolean(remote_src):
|
||||
result.update(self._execute_module(tmp=tmp, task_vars=task_vars, delete_remote_tmp=False))
|
||||
if cleanup_remote_tmp:
|
||||
self._remove_tmp_path(tmp)
|
||||
return result
|
||||
elif self._task._role is not None:
|
||||
src = self._loader.path_dwim_relative(self._task._role._role_path, 'files', src)
|
||||
else:
|
||||
|
@ -109,51 +117,56 @@ class ActionModule(ActionBase):
|
|||
if regexp is not None:
|
||||
_re = re.compile(regexp)
|
||||
|
||||
if not os.path.isdir(src):
|
||||
result['failed'] = True
|
||||
result['msg'] = "Source (%s) is not a directory" % src
|
||||
return result
|
||||
|
||||
# Does all work assembling the file
|
||||
path = self._assemble_from_fragments(src, delimiter, _re, ignore_hidden)
|
||||
|
||||
path_checksum = checksum_s(path)
|
||||
dest = self._remote_expand_user(dest)
|
||||
remote_checksum = self._remote_checksum(dest, all_vars=task_vars)
|
||||
dest_stat = self._execute_remote_stat(dest, all_vars=task_vars, follow=follow, tmp=tmp)
|
||||
|
||||
diff = {}
|
||||
if path_checksum != remote_checksum:
|
||||
resultant = file(path).read()
|
||||
|
||||
# setup args for running modules
|
||||
new_module_args = self._task.args.copy()
|
||||
|
||||
# clean assemble specific options
|
||||
for opt in ['remote_src', 'regexp', 'delimiter', 'ignore_hidden']:
|
||||
if opt in new_module_args:
|
||||
del new_module_args[opt]
|
||||
|
||||
new_module_args.update(
|
||||
dict(
|
||||
dest=dest,
|
||||
original_basename=os.path.basename(src),
|
||||
)
|
||||
)
|
||||
|
||||
if path_checksum != dest_stat['checksum']:
|
||||
|
||||
if self._play_context.diff:
|
||||
diff = self._get_diff_data(dest, path, task_vars)
|
||||
|
||||
xfered = self._transfer_data('src', resultant)
|
||||
remote_path = self._connection._shell.join_path(tmp, 'src')
|
||||
xfered = self._transfer_file(path, remote_path)
|
||||
|
||||
# fix file permissions when the copy is done as a different user
|
||||
if self._play_context.become and self._play_context.become_user != 'root':
|
||||
self._remote_chmod('a+r', xfered)
|
||||
self._fixup_perms(tmp, remote_user, recursive=True)
|
||||
|
||||
# run the copy module
|
||||
new_module_args.update( dict( src=xfered,))
|
||||
|
||||
new_module_args = self._task.args.copy()
|
||||
new_module_args.update(
|
||||
dict(
|
||||
src=xfered,
|
||||
dest=dest,
|
||||
original_basename=os.path.basename(src),
|
||||
)
|
||||
)
|
||||
|
||||
res = self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars, tmp=tmp)
|
||||
res = self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars, tmp=tmp, delete_remote_tmp=False)
|
||||
if diff:
|
||||
res['diff'] = diff
|
||||
result.update(res)
|
||||
return result
|
||||
else:
|
||||
new_module_args = self._task.args.copy()
|
||||
new_module_args.update(
|
||||
dict(
|
||||
src=xfered,
|
||||
dest=dest,
|
||||
original_basename=os.path.basename(src),
|
||||
)
|
||||
)
|
||||
result.update(self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars, tmp=tmp, delete_remote_tmp=False))
|
||||
|
||||
result.update(self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars, tmp=tmp))
|
||||
return result
|
||||
if tmp and cleanup_remote_tmp:
|
||||
self._remove_tmp_path(tmp)
|
||||
|
||||
return result
|
||||
|
|
|
@ -38,8 +38,9 @@ class ActionModule(ActionBase):
|
|||
result['msg'] = 'check mode not supported for this module'
|
||||
return result
|
||||
|
||||
remote_user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user
|
||||
if not tmp:
|
||||
tmp = self._make_tmp_path()
|
||||
tmp = self._make_tmp_path(remote_user)
|
||||
|
||||
module_name = self._task.action
|
||||
async_module_path = self._connection._shell.join_path(tmp, 'async_wrapper')
|
||||
|
@ -48,21 +49,31 @@ class ActionModule(ActionBase):
|
|||
env_string = self._compute_environment_string()
|
||||
|
||||
module_args = self._task.args.copy()
|
||||
if self._play_context.no_log or not C.DEFAULT_NO_TARGET_SYSLOG:
|
||||
if self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG:
|
||||
module_args['_ansible_no_log'] = True
|
||||
|
||||
# configure, upload, and chmod the target module
|
||||
(module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars)
|
||||
self._transfer_data(remote_module_path, module_data)
|
||||
self._remote_chmod('a+rx', remote_module_path)
|
||||
|
||||
# configure, upload, and chmod the async_wrapper module
|
||||
(async_module_style, shebang, async_module_data) = self._configure_module(module_name='async_wrapper', module_args=dict(), task_vars=task_vars)
|
||||
self._transfer_data(async_module_path, async_module_data)
|
||||
self._remote_chmod('a+rx', async_module_path)
|
||||
|
||||
argsfile = self._transfer_data(self._connection._shell.join_path(tmp, 'arguments'), json.dumps(module_args))
|
||||
|
||||
self._fixup_perms(tmp, remote_user, execute=True, recursive=True)
|
||||
# Only the following two files need to be executable but we'd have to
|
||||
# make three remote calls if we wanted to just set them executable.
|
||||
# There's not really a problem with marking too many of the temp files
|
||||
# executable so we go ahead and mark them all as executable in the
|
||||
# line above (the line above is needed in any case [although
|
||||
# execute=False is okay if we uncomment the lines below] so that all
|
||||
# the files are readable in case the remote_user and become_user are
|
||||
# different and both unprivileged)
|
||||
#self._fixup_perms(remote_module_path, remote_user, execute=True, recursive=False)
|
||||
#self._fixup_perms(async_module_path, remote_user, execute=True, recursive=False)
|
||||
|
||||
async_limit = self._task.async
|
||||
async_jid = str(random.randint(0, 999999999999))
|
||||
|
||||
|
|
|
@ -46,6 +46,7 @@ class ActionModule(ActionBase):
|
|||
force = boolean(self._task.args.get('force', 'yes'))
|
||||
faf = self._task.first_available_file
|
||||
remote_src = boolean(self._task.args.get('remote_src', False))
|
||||
follow = boolean(self._task.args.get('follow', False))
|
||||
|
||||
if (source is None and content is None and faf is None) or dest is None:
|
||||
result['failed'] = True
|
||||
|
@ -106,7 +107,7 @@ class ActionModule(ActionBase):
|
|||
source_files = []
|
||||
|
||||
# If source is a directory populate our list else source is a file and translate it to a tuple.
|
||||
if os.path.isdir(source):
|
||||
if os.path.isdir(to_bytes(source, errors='strict')):
|
||||
# Get the amount of spaces to remove to get the relative path.
|
||||
if source_trailing_slash:
|
||||
sz = len(source)
|
||||
|
@ -140,9 +141,10 @@ class ActionModule(ActionBase):
|
|||
delete_remote_tmp = (len(source_files) == 1)
|
||||
|
||||
# If this is a recursive action create a tmp path that we can share as the _exec_module create is too late.
|
||||
remote_user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user
|
||||
if not delete_remote_tmp:
|
||||
if tmp is None or "-tmp-" not in tmp:
|
||||
tmp = self._make_tmp_path()
|
||||
tmp = self._make_tmp_path(remote_user)
|
||||
|
||||
# expand any user home dir specifier
|
||||
dest = self._remote_expand_user(dest)
|
||||
|
@ -167,11 +169,11 @@ class ActionModule(ActionBase):
|
|||
else:
|
||||
dest_file = self._connection._shell.join_path(dest)
|
||||
|
||||
# Attempt to get the remote checksum
|
||||
remote_checksum = self._remote_checksum(dest_file, all_vars=task_vars)
|
||||
# Attempt to get remote file info
|
||||
dest_status = self._execute_remote_stat(dest_file, all_vars=task_vars, follow=follow, tmp=tmp)
|
||||
|
||||
if remote_checksum == '3':
|
||||
# The remote_checksum was executed on a directory.
|
||||
if dest_status['exists'] and dest_status['isdir']:
|
||||
# The dest is a directory.
|
||||
if content is not None:
|
||||
# If source was defined as content remove the temporary file and fail out.
|
||||
self._remove_tempfile_if_content_defined(content, content_tempfile)
|
||||
|
@ -179,15 +181,15 @@ class ActionModule(ActionBase):
|
|||
result['msg'] = "can not use content with a dir as dest"
|
||||
return result
|
||||
else:
|
||||
# Append the relative source location to the destination and retry remote_checksum
|
||||
# Append the relative source location to the destination and get remote stats again
|
||||
dest_file = self._connection._shell.join_path(dest, source_rel)
|
||||
remote_checksum = self._remote_checksum(dest_file, all_vars=task_vars)
|
||||
dest_status = self._execute_remote_stat(dest_file, all_vars=task_vars, follow=follow, tmp=tmp)
|
||||
|
||||
if remote_checksum != '1' and not force:
|
||||
if dest_status['exists'] and not force:
|
||||
# remote_file does not exist so continue to next iteration.
|
||||
continue
|
||||
|
||||
if local_checksum != remote_checksum:
|
||||
if local_checksum != dest_status['checksum']:
|
||||
# The checksums don't match and we will change or error out.
|
||||
changed = True
|
||||
|
||||
|
@ -195,7 +197,7 @@ class ActionModule(ActionBase):
|
|||
# If this is recursive we already have a tmp path.
|
||||
if delete_remote_tmp:
|
||||
if tmp is None or "-tmp-" not in tmp:
|
||||
tmp = self._make_tmp_path()
|
||||
tmp = self._make_tmp_path(remote_user)
|
||||
|
||||
if self._play_context.diff and not raw:
|
||||
diffs.append(self._get_diff_data(dest_file, source_full, task_vars))
|
||||
|
@ -210,16 +212,15 @@ class ActionModule(ActionBase):
|
|||
tmp_src = self._connection._shell.join_path(tmp, 'source')
|
||||
|
||||
if not raw:
|
||||
self._connection.put_file(source_full, tmp_src)
|
||||
self._transfer_file(source_full, tmp_src)
|
||||
else:
|
||||
self._connection.put_file(source_full, dest_file)
|
||||
self._transfer_file(source_full, dest_file)
|
||||
|
||||
# We have copied the file remotely and no longer require our content_tempfile
|
||||
self._remove_tempfile_if_content_defined(content, content_tempfile)
|
||||
|
||||
# fix file permissions when the copy is done as a different user
|
||||
if self._play_context.become and self._play_context.become_user != 'root':
|
||||
self._remote_chmod('a+r', tmp_src)
|
||||
self._fixup_perms(tmp, remote_user, recursive=True)
|
||||
|
||||
if raw:
|
||||
# Continue to next iteration if raw is defined.
|
||||
|
|
|
@ -20,38 +20,55 @@ __metaclass__ = type
|
|||
from ansible.plugins.action import ActionBase
|
||||
from ansible.utils.boolean import boolean
|
||||
from ansible.utils.unicode import to_unicode
|
||||
from ansible.errors import AnsibleUndefinedVariable
|
||||
|
||||
class ActionModule(ActionBase):
|
||||
''' Print statements during execution '''
|
||||
|
||||
TRANSFERS_FILES = False
|
||||
VALID_ARGS = set(['msg', 'var', 'verbosity'])
|
||||
|
||||
def run(self, tmp=None, task_vars=None):
|
||||
if task_vars is None:
|
||||
task_vars = dict()
|
||||
|
||||
for arg in self._task.args:
|
||||
if arg not in self.VALID_ARGS:
|
||||
return {"failed": True, "msg": "'%s' is not a valid option in debug" % arg}
|
||||
|
||||
if 'msg' in self._task.args and 'var' in self._task.args:
|
||||
return {"failed": True, "msg": "'msg' and 'var' are incompatible options"}
|
||||
|
||||
result = super(ActionModule, self).run(tmp, task_vars)
|
||||
|
||||
if 'msg' in self._task.args:
|
||||
if 'fail' in self._task.args and boolean(self._task.args['fail']):
|
||||
result['failed'] = True
|
||||
result['msg'] = self._task.args['msg']
|
||||
else:
|
||||
result['msg'] = self._task.args['msg']
|
||||
# FIXME: move the LOOKUP_REGEX somewhere else
|
||||
elif 'var' in self._task.args: # and not utils.LOOKUP_REGEX.search(self._task.args['var']):
|
||||
results = self._templar.template(self._task.args['var'], convert_bare=True)
|
||||
if type(self._task.args['var']) in (list, dict):
|
||||
# If var is a list or dict, use the type as key to display
|
||||
result[to_unicode(type(self._task.args['var']))] = results
|
||||
else:
|
||||
if results == self._task.args['var']:
|
||||
results = "VARIABLE IS NOT DEFINED!"
|
||||
result[self._task.args['var']] = results
|
||||
else:
|
||||
result['msg'] = 'here we are'
|
||||
verbosity = 0
|
||||
# get task verbosity
|
||||
if 'verbosity' in self._task.args:
|
||||
verbosity = int(self._task.args['verbosity'])
|
||||
|
||||
# force flag to make debug output module always verbose
|
||||
result['_ansible_verbose_always'] = True
|
||||
if verbosity <= self._display.verbosity:
|
||||
if 'msg' in self._task.args:
|
||||
result['msg'] = self._task.args['msg']
|
||||
|
||||
elif 'var' in self._task.args:
|
||||
try:
|
||||
results = self._templar.template(self._task.args['var'], convert_bare=True, fail_on_undefined=True, bare_deprecated=False)
|
||||
if results == self._task.args['var']:
|
||||
raise AnsibleUndefinedVariable
|
||||
except AnsibleUndefinedVariable:
|
||||
results = "VARIABLE IS NOT DEFINED!"
|
||||
|
||||
if type(self._task.args['var']) in (list, dict):
|
||||
# If var is a list or dict, use the type as key to display
|
||||
result[to_unicode(type(self._task.args['var']))] = results
|
||||
else:
|
||||
result[self._task.args['var']] = results
|
||||
else:
|
||||
result['msg'] = 'Hello world!'
|
||||
|
||||
# force flag to make debug output module always verbose
|
||||
result['_ansible_verbose_always'] = True
|
||||
else:
|
||||
result['skipped'] = True
|
||||
|
||||
return result
|
||||
|
|
26
lib/ansible/plugins/action/eos_template.py
Normal file
26
lib/ansible/plugins/action/eos_template.py
Normal file
|
@ -0,0 +1,26 @@
|
|||
#
|
||||
# Copyright 2015 Peter Sprygada <psprygada@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.plugins.action import ActionBase
|
||||
from ansible.plugins.action.net_template import ActionModule as NetActionModule
|
||||
|
||||
class ActionModule(NetActionModule, ActionBase):
|
||||
pass
|
|
@ -25,6 +25,7 @@ from ansible.plugins.action import ActionBase
|
|||
from ansible.utils.boolean import boolean
|
||||
from ansible.utils.hashing import checksum, checksum_s, md5, secure_hash
|
||||
from ansible.utils.path import makedirs_safe
|
||||
from ansible.utils.unicode import to_bytes
|
||||
|
||||
|
||||
class ActionModule(ActionBase):
|
||||
|
@ -70,7 +71,7 @@ class ActionModule(ActionBase):
|
|||
if remote_checksum in ('1', '2', None):
|
||||
slurpres = self._execute_module(module_name='slurp', module_args=dict(src=source), task_vars=task_vars, tmp=tmp)
|
||||
if slurpres.get('failed'):
|
||||
if remote_checksum == '1' and not fail_on_missing:
|
||||
if not fail_on_missing and (slurpres.get('msg').startswith('file not found') or remote_checksum == '1'):
|
||||
result['msg'] = "the remote file does not exist, not transferring, ignored"
|
||||
result['file'] = source
|
||||
result['changed'] = False
|
||||
|
@ -158,7 +159,7 @@ class ActionModule(ActionBase):
|
|||
self._connection.fetch_file(source, dest)
|
||||
else:
|
||||
try:
|
||||
f = open(dest, 'w')
|
||||
f = open(to_bytes(dest, errors='strict'), 'w')
|
||||
f.write(remote_data)
|
||||
f.close()
|
||||
except (IOError, OSError) as e:
|
||||
|
@ -171,7 +172,9 @@ class ActionModule(ActionBase):
|
|||
new_md5 = None
|
||||
|
||||
if validate_checksum and new_checksum != remote_checksum:
|
||||
result.update(dict(failed=True, md5sum=new_md5, msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum))
|
||||
result.update(dict(failed=True, md5sum=new_md5,
|
||||
msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None,
|
||||
checksum=new_checksum, remote_checksum=remote_checksum))
|
||||
else:
|
||||
result.update(dict(changed=True, md5sum=new_md5, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum))
|
||||
else:
|
||||
|
|
|
@ -40,6 +40,6 @@ class ActionModule(ActionBase):
|
|||
group_name = self._task.args.get('key')
|
||||
group_name = group_name.replace(' ','-')
|
||||
|
||||
result['changed'] = True
|
||||
result['changed'] = False
|
||||
result['add_group'] = group_name
|
||||
return result
|
||||
|
|
28
lib/ansible/plugins/action/ios_template.py
Normal file
28
lib/ansible/plugins/action/ios_template.py
Normal file
|
@ -0,0 +1,28 @@
|
|||
#
|
||||
# Copyright 2015 Peter Sprygada <psprygada@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.plugins.action import ActionBase
|
||||
from ansible.plugins.action.net_template import ActionModule as NetActionModule
|
||||
|
||||
class ActionModule(NetActionModule, ActionBase):
|
||||
pass
|
||||
|
||||
|
28
lib/ansible/plugins/action/iosxr_template.py
Normal file
28
lib/ansible/plugins/action/iosxr_template.py
Normal file
|
@ -0,0 +1,28 @@
|
|||
#
|
||||
# Copyright 2015 Peter Sprygada <psprygada@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.plugins.action import ActionBase
|
||||
from ansible.plugins.action.net_template import ActionModule as NetActionModule
|
||||
|
||||
class ActionModule(NetActionModule, ActionBase):
|
||||
pass
|
||||
|
||||
|
28
lib/ansible/plugins/action/junos_template.py
Normal file
28
lib/ansible/plugins/action/junos_template.py
Normal file
|
@ -0,0 +1,28 @@
|
|||
#
|
||||
# Copyright 2015 Peter Sprygada <psprygada@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.plugins.action import ActionBase
|
||||
from ansible.plugins.action.net_template import ActionModule as NetActionModule
|
||||
|
||||
class ActionModule(NetActionModule, ActionBase):
|
||||
pass
|
||||
|
||||
|
98
lib/ansible/plugins/action/net_template.py
Normal file
98
lib/ansible/plugins/action/net_template.py
Normal file
|
@ -0,0 +1,98 @@
|
|||
#
|
||||
# Copyright 2015 Peter Sprygada <psprygada@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
import glob
|
||||
import urlparse
|
||||
|
||||
from ansible.plugins.action import ActionBase
|
||||
from ansible.utils.boolean import boolean
|
||||
from ansible.utils.unicode import to_unicode
|
||||
|
||||
BOOLEANS = ('true', 'false', 'yes', 'no')
|
||||
|
||||
class ActionModule(ActionBase):
|
||||
|
||||
TRANSFERS_FILES = False
|
||||
|
||||
def run(self, tmp=None, task_vars=None):
|
||||
result = super(ActionModule, self).run(tmp, task_vars)
|
||||
result['changed'] = False
|
||||
|
||||
try:
|
||||
self._handle_template()
|
||||
except ValueError as exc:
|
||||
return dict(failed=True, msg=exc.message)
|
||||
|
||||
result.update(self._execute_module(module_name=self._task.action,
|
||||
module_args=self._task.args, task_vars=task_vars))
|
||||
|
||||
if self._task.args.get('backup') and result.get('_backup'):
|
||||
# User requested backup and no error occurred in module.
|
||||
# NOTE: If there is a parameter error, _backup key may not be in results.
|
||||
self._write_backup(task_vars['inventory_hostname'], result['_backup'])
|
||||
|
||||
if '_backup' in result:
|
||||
del result['_backup']
|
||||
|
||||
return result
|
||||
|
||||
def _get_working_path(self):
|
||||
cwd = self._loader.get_basedir()
|
||||
if self._task._role is not None:
|
||||
cwd = self._task._role._role_path
|
||||
return cwd
|
||||
|
||||
def _write_backup(self, host, contents):
|
||||
backup_path = self._get_working_path() + '/backup'
|
||||
if not os.path.exists(backup_path):
|
||||
os.mkdir(backup_path)
|
||||
for fn in glob.glob('%s/%s*' % (backup_path, host)):
|
||||
os.remove(fn)
|
||||
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
|
||||
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
|
||||
open(filename, 'w').write(contents)
|
||||
|
||||
def _handle_template(self):
|
||||
src = self._task.args.get('src')
|
||||
working_path = self._get_working_path()
|
||||
|
||||
if os.path.isabs(src) or urlparse.urlsplit('src').scheme:
|
||||
source = src
|
||||
else:
|
||||
source = self._loader.path_dwim_relative(working_path, 'templates', src)
|
||||
if not source:
|
||||
source = self._loader.path_dwim_relative(working_path, src)
|
||||
|
||||
if not os.path.exists(source):
|
||||
return
|
||||
|
||||
try:
|
||||
with open(source, 'r') as f:
|
||||
template_data = to_unicode(f.read())
|
||||
except IOError:
|
||||
return dict(failed=True, msg='unable to load src file')
|
||||
|
||||
self._task.args['src'] = self._templar.template(template_data)
|
||||
|
||||
|
|
@ -18,6 +18,7 @@ from __future__ import (absolute_import, division, print_function)
|
|||
__metaclass__ = type
|
||||
|
||||
from ansible.plugins.action import ActionBase
|
||||
from ansible.utils.vars import merge_hash
|
||||
|
||||
|
||||
class ActionModule(ActionBase):
|
||||
|
@ -27,12 +28,16 @@ class ActionModule(ActionBase):
|
|||
task_vars = dict()
|
||||
|
||||
results = super(ActionModule, self).run(tmp, task_vars)
|
||||
results.update(self._execute_module(tmp=tmp, task_vars=task_vars))
|
||||
|
||||
# remove as modules might hide due to nolog
|
||||
del results['invocation']['module_args']
|
||||
results = merge_hash(results, self._execute_module(tmp=tmp, task_vars=task_vars))
|
||||
# Remove special fields from the result, which can only be set
|
||||
# internally by the executor engine. We do this only here in
|
||||
# the 'normal' action, as other action plugins may set this.
|
||||
for field in ('ansible_notify',):
|
||||
#
|
||||
# We don't want modules to determine that running the module fires
|
||||
# notify handlers. That's for the playbook to decide.
|
||||
for field in ('_ansible_notify',):
|
||||
if field in results:
|
||||
results.pop(field)
|
||||
|
||||
|
|
27
lib/ansible/plugins/action/nxos_template.py
Normal file
27
lib/ansible/plugins/action/nxos_template.py
Normal file
|
@ -0,0 +1,27 @@
|
|||
#
|
||||
# Copyright 2015 Peter Sprygada <psprygada@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.plugins.action import ActionBase
|
||||
from ansible.plugins.action.net_template import ActionModule as NetActionModule
|
||||
|
||||
class ActionModule(NetActionModule, ActionBase):
|
||||
pass
|
||||
|
50
lib/ansible/plugins/action/ops_template.py
Normal file
50
lib/ansible/plugins/action/ops_template.py
Normal file
|
@ -0,0 +1,50 @@
|
|||
#
|
||||
# Copyright 2015 Peter Sprygada <psprygada@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
|
||||
from ansible.plugins.action import ActionBase
|
||||
from ansible.plugins.action.net_template import ActionModule as NetActionModule
|
||||
|
||||
class ActionModule(NetActionModule, ActionBase):
|
||||
|
||||
def run(self, tmp=None, task_vars=None):
|
||||
if self._connection.transport == 'local':
|
||||
return super(ActionModule, self).run(tmp, task_vars)
|
||||
|
||||
result = dict(changed=False)
|
||||
|
||||
if isinstance(self._task.args['src'], basestring):
|
||||
self._handle_template()
|
||||
|
||||
result.update(self._execute_module(module_name=self._task.action,
|
||||
module_args=self._task.args, task_vars=task_vars))
|
||||
|
||||
if self._task.args.get('backup') and result.get('_backup'):
|
||||
contents = json.dumps(result['_backup'], indent=4)
|
||||
self._write_backup(task_vars['inventory_hostname'], contents)
|
||||
|
||||
if '_backup' in result:
|
||||
del result['_backup']
|
||||
|
||||
return result
|
||||
|
||||
|
|
@ -34,6 +34,7 @@ class ActionModule(ActionBase):
|
|||
|
||||
src = self._task.args.get('src', None)
|
||||
remote_src = boolean(self._task.args.get('remote_src', 'no'))
|
||||
remote_user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user
|
||||
|
||||
if src is None:
|
||||
result['failed'] = True
|
||||
|
@ -52,14 +53,12 @@ class ActionModule(ActionBase):
|
|||
|
||||
# create the remote tmp dir if needed, and put the source file there
|
||||
if tmp is None or "-tmp-" not in tmp:
|
||||
tmp = self._make_tmp_path()
|
||||
tmp = self._make_tmp_path(remote_user)
|
||||
|
||||
tmp_src = self._connection._shell.join_path(tmp, os.path.basename(src))
|
||||
self._connection.put_file(src, tmp_src)
|
||||
self._transfer_file(src, tmp_src)
|
||||
|
||||
if self._play_context.become and self._play_context.become_user != 'root':
|
||||
if not self._play_context.check_mode:
|
||||
self._remote_chmod('a+r', tmp_src)
|
||||
self._fixup_perms(tmp, remote_user, recursive=True)
|
||||
|
||||
new_module_args = self._task.args.copy()
|
||||
new_module_args.update(
|
||||
|
|
|
@ -105,6 +105,8 @@ class ActionModule(ActionBase):
|
|||
result['start'] = str(datetime.datetime.now())
|
||||
result['user_input'] = ''
|
||||
|
||||
fd = None
|
||||
old_settings = None
|
||||
try:
|
||||
if seconds is not None:
|
||||
# setup the alarm handler
|
||||
|
@ -118,23 +120,29 @@ class ActionModule(ActionBase):
|
|||
|
||||
# save the attributes on the existing (duped) stdin so
|
||||
# that we can restore them later after we set raw mode
|
||||
fd = self._connection._new_stdin.fileno()
|
||||
if isatty(fd):
|
||||
old_settings = termios.tcgetattr(fd)
|
||||
tty.setraw(fd)
|
||||
|
||||
# flush the buffer to make sure no previous key presses
|
||||
# are read in below
|
||||
termios.tcflush(self._connection._new_stdin, termios.TCIFLUSH)
|
||||
fd = None
|
||||
try:
|
||||
fd = self._connection._new_stdin.fileno()
|
||||
except ValueError:
|
||||
# someone is using a closed file descriptor as stdin
|
||||
pass
|
||||
if fd is not None:
|
||||
if isatty(fd):
|
||||
old_settings = termios.tcgetattr(fd)
|
||||
tty.setraw(fd)
|
||||
|
||||
# flush the buffer to make sure no previous key presses
|
||||
# are read in below
|
||||
termios.tcflush(self._connection._new_stdin, termios.TCIFLUSH)
|
||||
while True:
|
||||
try:
|
||||
key_pressed = self._connection._new_stdin.read(1)
|
||||
if key_pressed == '\x03':
|
||||
raise KeyboardInterrupt
|
||||
if fd is not None:
|
||||
key_pressed = self._connection._new_stdin.read(1)
|
||||
if key_pressed == '\x03':
|
||||
raise KeyboardInterrupt
|
||||
|
||||
if not seconds:
|
||||
if not isatty(fd):
|
||||
if fd is None or not isatty(fd):
|
||||
display.warning("Not waiting from prompt as stdin is not interactive")
|
||||
break
|
||||
# read key presses and act accordingly
|
||||
|
@ -152,6 +160,7 @@ class ActionModule(ActionBase):
|
|||
else:
|
||||
raise AnsibleError('user requested abort!')
|
||||
|
||||
|
||||
except AnsibleTimeoutExceeded:
|
||||
# this is the exception we expect when the alarm signal
|
||||
# fires, so we simply ignore it to move into the cleanup
|
||||
|
@ -159,7 +168,7 @@ class ActionModule(ActionBase):
|
|||
finally:
|
||||
# cleanup and save some information
|
||||
# restore the old settings for the duped stdin fd
|
||||
if isatty(fd):
|
||||
if not(None in (fd, old_settings)) and isatty(fd):
|
||||
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
|
||||
|
||||
duration = time.time() - start
|
||||
|
|
|
@ -19,8 +19,6 @@ __metaclass__ = type
|
|||
|
||||
from ansible.plugins.action import ActionBase
|
||||
|
||||
import re
|
||||
|
||||
|
||||
class ActionModule(ActionBase):
|
||||
TRANSFERS_FILES = False
|
||||
|
@ -39,10 +37,4 @@ class ActionModule(ActionBase):
|
|||
executable = self._task.args.get('executable')
|
||||
result.update(self._low_level_execute_command(self._task.args.get('_raw_params'), executable=executable))
|
||||
|
||||
# for some modules (script, raw), the sudo success key
|
||||
# may leak into the stdout due to the way the sudo/su
|
||||
# command is constructed, so we filter that out here
|
||||
if result.get('stdout','').strip().startswith('BECOME-SUCCESS-'):
|
||||
result['stdout'] = re.sub(r'^((\r)?\n)?BECOME-SUCCESS.*(\r)?\n', '', result['stdout'])
|
||||
|
||||
return result
|
||||
|
|
|
@ -38,8 +38,9 @@ class ActionModule(ActionBase):
|
|||
result['msg'] = 'check mode not supported for this module'
|
||||
return result
|
||||
|
||||
remote_user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user
|
||||
if not tmp:
|
||||
tmp = self._make_tmp_path()
|
||||
tmp = self._make_tmp_path(remote_user)
|
||||
|
||||
creates = self._task.args.get('creates')
|
||||
if creates:
|
||||
|
@ -76,16 +77,11 @@ class ActionModule(ActionBase):
|
|||
|
||||
# transfer the file to a remote tmp location
|
||||
tmp_src = self._connection._shell.join_path(tmp, os.path.basename(source))
|
||||
self._connection.put_file(source, tmp_src)
|
||||
self._transfer_file(source, tmp_src)
|
||||
|
||||
sudoable = True
|
||||
# set file permissions, more permissive when the copy is done as a different user
|
||||
if self._play_context.become and self._play_context.become_user != 'root':
|
||||
chmod_mode = 'a+rx'
|
||||
sudoable = False
|
||||
else:
|
||||
chmod_mode = '+rx'
|
||||
self._remote_chmod(chmod_mode, tmp_src, sudoable=sudoable)
|
||||
self._fixup_perms(tmp, remote_user, execute=True, recursive=True)
|
||||
|
||||
# add preparation steps to one ssh roundtrip executing the script
|
||||
env_string = self._compute_environment_string()
|
||||
|
|
|
@ -20,6 +20,7 @@ __metaclass__ = type
|
|||
|
||||
import os.path
|
||||
|
||||
from ansible.playbook.play_context import MAGIC_VARIABLE_MAPPING
|
||||
from ansible.plugins.action import ActionBase
|
||||
from ansible.plugins import connection_loader
|
||||
from ansible.utils.boolean import boolean
|
||||
|
@ -68,9 +69,22 @@ class ActionModule(ActionBase):
|
|||
path = self._get_absolute_path(path=path)
|
||||
return path
|
||||
|
||||
def _process_remote(self, host, path, user):
|
||||
def _process_remote(self, host, path, user, port_matches_localhost_port):
|
||||
"""
|
||||
:arg host: hostname for the path
|
||||
:arg path: file path
|
||||
:arg user: username for the transfer
|
||||
:arg port_matches_localhost_port: boolean whether the remote port
|
||||
matches the port used by localhost's sshd. This is used in
|
||||
conjunction with seeing whether the host is localhost to know
|
||||
if we need to have the module substitute the pathname or if it
|
||||
is a different host (for instance, an ssh tunnelled port or an
|
||||
alternative ssh port to a vagrant host.)
|
||||
"""
|
||||
transport = self._play_context.connection
|
||||
if host not in C.LOCALHOST or transport != "local":
|
||||
if port_matches_localhost_port and host in C.LOCALHOST:
|
||||
self._task.args['_substitute_controller'] = True
|
||||
return self._format_rsync_rsh_target(host, path, user)
|
||||
|
||||
if ':' not in path and not path.startswith('/'):
|
||||
|
@ -103,14 +117,40 @@ class ActionModule(ActionBase):
|
|||
|
||||
def run(self, tmp=None, task_vars=None):
|
||||
''' generates params and passes them on to the rsync module '''
|
||||
# When modifying this function be aware of the tricky convolutions
|
||||
# your thoughts have to go through:
|
||||
#
|
||||
# In normal ansible, we connect from controller to inventory_hostname
|
||||
# (playbook's hosts: field) or controller to delegate_to host and run
|
||||
# a module on one of those hosts.
|
||||
#
|
||||
# So things that are directly related to the core of ansible are in
|
||||
# terms of that sort of connection that always originate on the
|
||||
# controller.
|
||||
#
|
||||
# In synchronize we use ansible to connect to either the controller or
|
||||
# to the delegate_to host and then run rsync which makes its own
|
||||
# connection from controller to inventory_hostname or delegate_to to
|
||||
# inventory_hostname.
|
||||
#
|
||||
# That means synchronize needs to have some knowledge of the
|
||||
# controller to inventory_host/delegate host that ansible typically
|
||||
# establishes and use those to construct a command line for rsync to
|
||||
# connect from the inventory_host to the controller/delegate. The
|
||||
# challenge for coders is remembering which leg of the trip is
|
||||
# associated with the conditions that you're checking at any one time.
|
||||
if task_vars is None:
|
||||
task_vars = dict()
|
||||
|
||||
result = super(ActionModule, self).run(tmp, task_vars)
|
||||
|
||||
original_transport = task_vars.get('ansible_connection') or self._play_context.connection
|
||||
# self._play_context.connection accounts for delegate_to so
|
||||
# remote_transport is the transport ansible thought it would need
|
||||
# between the controller and the delegate_to host or the controller
|
||||
# and the remote_host if delegate_to isn't set.
|
||||
|
||||
remote_transport = False
|
||||
if original_transport != 'local':
|
||||
if self._play_context.connection != 'local':
|
||||
remote_transport = True
|
||||
|
||||
try:
|
||||
|
@ -118,6 +158,13 @@ class ActionModule(ActionBase):
|
|||
except (AttributeError, KeyError):
|
||||
delegate_to = None
|
||||
|
||||
# ssh paramiko and local are fully supported transports. Anything
|
||||
# else only works with delegate_to
|
||||
if delegate_to is None and self._play_context.connection not in ('ssh', 'paramiko', 'smart', 'local'):
|
||||
result['failed'] = True
|
||||
result['msg'] = "synchronize uses rsync to function. rsync needs to connect to the remote host via ssh or a direct filesystem copy. This remote host is being accessed via %s instead so it cannot work." % self._play_context.connection
|
||||
return result
|
||||
|
||||
use_ssh_args = self._task.args.pop('use_ssh_args', None)
|
||||
|
||||
# Parameter name needed by the ansible module
|
||||
|
@ -136,11 +183,29 @@ class ActionModule(ActionBase):
|
|||
except KeyError:
|
||||
dest_host = dest_host_inventory_vars.get('ansible_ssh_host', inventory_hostname)
|
||||
|
||||
dest_is_local = dest_host in C.LOCALHOST
|
||||
localhost_ports = set()
|
||||
for host in C.LOCALHOST:
|
||||
localhost_vars = task_vars['hostvars'].get(host, {})
|
||||
for port_var in MAGIC_VARIABLE_MAPPING['port']:
|
||||
port = localhost_vars.get(port_var, None)
|
||||
if port:
|
||||
break
|
||||
else:
|
||||
port = C.DEFAULT_REMOTE_PORT
|
||||
localhost_ports.add(port)
|
||||
|
||||
# dest_is_local tells us if the host rsync runs on is the same as the
|
||||
# host rsync puts the files on. This is about *rsync's connection*,
|
||||
# not about the ansible connection to run the module.
|
||||
dest_is_local = False
|
||||
if not delegate_to and remote_transport is False:
|
||||
dest_is_local = True
|
||||
elif delegate_to and delegate_to == dest_host:
|
||||
dest_is_local = True
|
||||
|
||||
# CHECK FOR NON-DEFAULT SSH PORT
|
||||
inv_port = task_vars.get('ansible_ssh_port', None) or C.DEFAULT_REMOTE_PORT
|
||||
if self._task.args.get('dest_port', None) is None:
|
||||
inv_port = task_vars.get('ansible_ssh_port', None) or C.DEFAULT_REMOTE_PORT
|
||||
if inv_port is not None:
|
||||
self._task.args['dest_port'] = inv_port
|
||||
|
||||
|
@ -161,23 +226,28 @@ class ActionModule(ActionBase):
|
|||
# Delegate to localhost as the source of the rsync unless we've been
|
||||
# told (via delegate_to) that a different host is the source of the
|
||||
# rsync
|
||||
transport_overridden = False
|
||||
if not use_delegate and remote_transport:
|
||||
# Create a connection to localhost to run rsync on
|
||||
new_stdin = self._connection._new_stdin
|
||||
|
||||
# Unike port, there can be only one shell
|
||||
localhost_shell = None
|
||||
for host in C.LOCALHOST:
|
||||
localhost_vars = task_vars['hostvars'].get(host, {})
|
||||
for shell_var in MAGIC_VARIABLE_MAPPING['shell']:
|
||||
localhost_shell = localhost_vars.get(shell_var, None)
|
||||
if localhost_shell:
|
||||
break
|
||||
if localhost_shell:
|
||||
break
|
||||
else:
|
||||
localhost_shell = os.path.basename(C.DEFAULT_EXECUTABLE)
|
||||
self._play_context.shell = localhost_shell
|
||||
|
||||
new_connection = connection_loader.get('local', self._play_context, new_stdin)
|
||||
self._connection = new_connection
|
||||
transport_overridden = True
|
||||
self._override_module_replaced_vars(task_vars)
|
||||
|
||||
# COMPARE DELEGATE, HOST AND TRANSPORT
|
||||
between_multiple_hosts = False
|
||||
if dest_host != src_host and remote_transport:
|
||||
# We're not copying two filesystem trees on the same host so we
|
||||
# need to correctly format the paths for rsync (like
|
||||
# user@host:path/to/tree
|
||||
between_multiple_hosts = True
|
||||
|
||||
# SWITCH SRC AND DEST HOST PER MODE
|
||||
if self._task.args.get('mode', 'push') == 'pull':
|
||||
(dest_host, src_host) = (src_host, dest_host)
|
||||
|
@ -185,7 +255,7 @@ class ActionModule(ActionBase):
|
|||
# MUNGE SRC AND DEST PER REMOTE_HOST INFO
|
||||
src = self._task.args.get('src', None)
|
||||
dest = self._task.args.get('dest', None)
|
||||
if between_multiple_hosts:
|
||||
if not dest_is_local:
|
||||
# Private key handling
|
||||
if use_delegate:
|
||||
private_key = task_vars.get('ansible_ssh_private_key_file') or self._play_context.private_key_file
|
||||
|
@ -211,12 +281,12 @@ class ActionModule(ActionBase):
|
|||
# use the mode to define src and dest's url
|
||||
if self._task.args.get('mode', 'push') == 'pull':
|
||||
# src is a remote path: <user>@<host>, dest is a local path
|
||||
src = self._process_remote(src_host, src, user)
|
||||
src = self._process_remote(src_host, src, user, inv_port in localhost_ports)
|
||||
dest = self._process_origin(dest_host, dest, user)
|
||||
else:
|
||||
# src is a local path, dest is a remote path: <user>@<host>
|
||||
src = self._process_origin(src_host, src, user)
|
||||
dest = self._process_remote(dest_host, dest, user)
|
||||
dest = self._process_remote(dest_host, dest, user, inv_port in localhost_ports)
|
||||
else:
|
||||
# Still need to munge paths (to account for roles) even if we aren't
|
||||
# copying files between hosts
|
||||
|
@ -231,9 +301,18 @@ class ActionModule(ActionBase):
|
|||
# Allow custom rsync path argument
|
||||
rsync_path = self._task.args.get('rsync_path', None)
|
||||
|
||||
# If no rsync_path is set, sudo was originally set, and dest is remote then add 'sudo rsync' argument
|
||||
if not rsync_path and transport_overridden and self._play_context.become and self._play_context.become_method == 'sudo' and not dest_is_local:
|
||||
rsync_path = 'sudo rsync'
|
||||
if not dest_is_local:
|
||||
if self._play_context.become and not rsync_path:
|
||||
# If no rsync_path is set, become was originally set, and dest is
|
||||
# remote then add privilege escalation here.
|
||||
if self._play_context.become_method == 'sudo':
|
||||
rsync_path = 'sudo rsync'
|
||||
# TODO: have to add in the rest of the become methods here
|
||||
|
||||
# We cannot use privilege escalation on the machine running the
|
||||
# module. Instead we run it on the machine rsync is connecting
|
||||
# to.
|
||||
self._play_context.become = False
|
||||
|
||||
# make sure rsync path is quoted.
|
||||
if rsync_path:
|
||||
|
@ -245,9 +324,9 @@ class ActionModule(ActionBase):
|
|||
# run the module and store the result
|
||||
result.update(self._execute_module('synchronize', task_vars=task_vars))
|
||||
|
||||
if 'SyntaxError' in result['msg']:
|
||||
if 'SyntaxError' in result.get('exception', result.get('msg', '')):
|
||||
# Emit a warning about using python3 because synchronize is
|
||||
# somewhat unique in running on localhost
|
||||
result['traceback'] = result['msg']
|
||||
result['exception'] = result['msg']
|
||||
result['msg'] = 'SyntaxError parsing module. Perhaps invoking "python" on your local (or delegate_to) machine invokes python3. You can set ansible_python_interpreter for localhost (or the delegate_to machine) to the location of python2 to fix this'
|
||||
return result
|
||||
|
|
|
@ -33,24 +33,19 @@ class ActionModule(ActionBase):
|
|||
|
||||
TRANSFERS_FILES = True
|
||||
|
||||
def get_checksum(self, dest, all_vars, try_directory=False, source=None):
|
||||
remote_checksum = self._remote_checksum(dest, all_vars=all_vars)
|
||||
def get_checksum(self, dest, all_vars, try_directory=False, source=None, tmp=None):
|
||||
try:
|
||||
dest_stat = self._execute_remote_stat(dest, all_vars=all_vars, follow=False, tmp=tmp)
|
||||
|
||||
if remote_checksum in ('0', '2', '3', '4'):
|
||||
# Note: 1 means the file is not present which is fine; template
|
||||
# will create it. 3 means directory was specified instead of file
|
||||
if try_directory and remote_checksum == '3' and source:
|
||||
if dest_stat['exists'] and dest_stat['isdir'] and try_directory and source:
|
||||
base = os.path.basename(source)
|
||||
dest = os.path.join(dest, base)
|
||||
remote_checksum = self.get_checksum(dest, all_vars=all_vars, try_directory=False)
|
||||
if remote_checksum not in ('0', '2', '3', '4'):
|
||||
return remote_checksum
|
||||
dest_stat = self._execute_remote_stat(dest, all_vars=all_vars, follow=False, tmp=tmp)
|
||||
|
||||
result = dict(failed=True, msg="failed to checksum remote file."
|
||||
" Checksum error code: %s" % remote_checksum)
|
||||
return result
|
||||
except Exception as e:
|
||||
return dict(failed=True, msg=to_bytes(e))
|
||||
|
||||
return remote_checksum
|
||||
return dest_stat['checksum']
|
||||
|
||||
def run(self, tmp=None, task_vars=None):
|
||||
''' handler for template operations '''
|
||||
|
@ -63,15 +58,17 @@ class ActionModule(ActionBase):
|
|||
dest = self._task.args.get('dest', None)
|
||||
faf = self._task.first_available_file
|
||||
force = boolean(self._task.args.get('force', True))
|
||||
state = self._task.args.get('state', None)
|
||||
|
||||
if (source is None and faf is not None) or dest is None:
|
||||
if state is not None:
|
||||
result['failed'] = True
|
||||
result['msg'] = "'state' cannot be specified on a template"
|
||||
return result
|
||||
elif (source is None and faf is not None) or dest is None:
|
||||
result['failed'] = True
|
||||
result['msg'] = "src and dest are required"
|
||||
return result
|
||||
|
||||
if tmp is None:
|
||||
tmp = self._make_tmp_path()
|
||||
|
||||
if faf:
|
||||
source = self._get_first_available_file(faf, task_vars.get('_original_file', None, 'templates'))
|
||||
if source is None:
|
||||
|
@ -140,8 +137,14 @@ class ActionModule(ActionBase):
|
|||
result['msg'] = type(e).__name__ + ": " + str(e)
|
||||
return result
|
||||
|
||||
cleanup_remote_tmp = False
|
||||
remote_user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user
|
||||
if not tmp:
|
||||
tmp = self._make_tmp_path(remote_user)
|
||||
cleanup_remote_tmp = True
|
||||
|
||||
local_checksum = checksum_s(resultant)
|
||||
remote_checksum = self.get_checksum(dest, task_vars, not directory_prepended, source=source)
|
||||
remote_checksum = self.get_checksum(dest, task_vars, not directory_prepended, source=source, tmp=tmp)
|
||||
if isinstance(remote_checksum, dict):
|
||||
# Error from remote_checksum is a dict. Valid return is a str
|
||||
result.update(remote_checksum)
|
||||
|
@ -150,7 +153,7 @@ class ActionModule(ActionBase):
|
|||
diff = {}
|
||||
new_module_args = self._task.args.copy()
|
||||
|
||||
if force and local_checksum != remote_checksum:
|
||||
if (remote_checksum == '1') or (force and local_checksum != remote_checksum):
|
||||
|
||||
result['changed'] = True
|
||||
# if showing diffs, we need to get the remote value
|
||||
|
@ -161,8 +164,7 @@ class ActionModule(ActionBase):
|
|||
xfered = self._transfer_data(self._connection._shell.join_path(tmp, 'source'), resultant)
|
||||
|
||||
# fix file permissions when the copy is done as a different user
|
||||
if self._play_context.become and self._play_context.become_user != 'root':
|
||||
self._remote_chmod('a+r', xfered)
|
||||
self._fixup_perms(tmp, remote_user, recursive=True)
|
||||
|
||||
# run the copy module
|
||||
new_module_args.update(
|
||||
|
@ -173,13 +175,11 @@ class ActionModule(ActionBase):
|
|||
follow=True,
|
||||
),
|
||||
)
|
||||
result.update(self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars))
|
||||
result.update(self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars, tmp=tmp, delete_remote_tmp=False))
|
||||
|
||||
if result.get('changed', False) and self._play_context.diff:
|
||||
result['diff'] = diff
|
||||
|
||||
return result
|
||||
|
||||
else:
|
||||
# when running the file module based on the template data, we do
|
||||
# not want the source filename (the name of the template) to be used,
|
||||
|
@ -194,6 +194,9 @@ class ActionModule(ActionBase):
|
|||
follow=True,
|
||||
),
|
||||
)
|
||||
result.update(self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars, tmp=tmp, delete_remote_tmp=False))
|
||||
|
||||
result.update(self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars))
|
||||
return result
|
||||
if tmp and cleanup_remote_tmp:
|
||||
self._remove_tmp_path(tmp)
|
||||
|
||||
return result
|
||||
|
|
|
@ -45,8 +45,9 @@ class ActionModule(ActionBase):
|
|||
result['msg'] = "src (or content) and dest are required"
|
||||
return result
|
||||
|
||||
remote_user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user
|
||||
if not tmp:
|
||||
tmp = self._make_tmp_path()
|
||||
tmp = self._make_tmp_path(remote_user)
|
||||
|
||||
if creates:
|
||||
# do not run the command if the line contains creates=filename
|
||||
|
@ -69,28 +70,26 @@ class ActionModule(ActionBase):
|
|||
source = self._loader.path_dwim_relative(self._loader.get_basedir(), 'files', source)
|
||||
|
||||
remote_checksum = self._remote_checksum(dest, all_vars=task_vars)
|
||||
if remote_checksum != '3':
|
||||
result['failed'] = True
|
||||
result['msg'] = "dest '%s' must be an existing dir" % dest
|
||||
return result
|
||||
elif remote_checksum == '4':
|
||||
if remote_checksum == '4':
|
||||
result['failed'] = True
|
||||
result['msg'] = "python isn't present on the system. Unable to compute checksum"
|
||||
return result
|
||||
elif remote_checksum != '3':
|
||||
result['failed'] = True
|
||||
result['msg'] = "dest '%s' must be an existing dir" % dest
|
||||
return result
|
||||
|
||||
if copy:
|
||||
# transfer the file to a remote tmp location
|
||||
tmp_src = tmp + 'source'
|
||||
self._connection.put_file(source, tmp_src)
|
||||
tmp_src = self._connection._shell.join_path(tmp, 'source')
|
||||
self._transfer_file(source, tmp_src)
|
||||
|
||||
# handle diff mode client side
|
||||
# handle check mode client side
|
||||
# fix file permissions when the copy is done as a different user
|
||||
if copy:
|
||||
if self._play_context.become and self._play_context.become_user != 'root':
|
||||
if not self._play_context.check_mode:
|
||||
self._remote_chmod('a+r', tmp_src)
|
||||
|
||||
if copy:
|
||||
# fix file permissions when the copy is done as a different user
|
||||
self._fixup_perms(tmp, remote_user, recursive=True)
|
||||
# Build temporary module_args.
|
||||
new_module_args = self._task.args.copy()
|
||||
new_module_args.update(
|
||||
|
|
4
lib/ansible/plugins/cache/memcached.py
vendored
4
lib/ansible/plugins/cache/memcached.py
vendored
|
@ -21,7 +21,7 @@ import collections
|
|||
import os
|
||||
import sys
|
||||
import time
|
||||
import threading
|
||||
from multiprocessing import Lock
|
||||
from itertools import chain
|
||||
|
||||
from ansible import constants as C
|
||||
|
@ -53,7 +53,7 @@ class ProxyClientPool(object):
|
|||
self._num_connections = 0
|
||||
self._available_connections = collections.deque(maxlen=self.max_connections)
|
||||
self._locked_connections = set()
|
||||
self._lock = threading.Lock()
|
||||
self._lock = Lock()
|
||||
|
||||
def _check_safe(self):
|
||||
if self.pid != os.getpid():
|
||||
|
|
|
@ -28,6 +28,7 @@ from ansible.compat.six import string_types
|
|||
|
||||
from ansible import constants as C
|
||||
from ansible.vars import strip_internal_keys
|
||||
from ansible.utils.color import stringc
|
||||
from ansible.utils.unicode import to_unicode
|
||||
|
||||
try:
|
||||
|
@ -38,6 +39,11 @@ except ImportError:
|
|||
|
||||
__all__ = ["CallbackBase"]
|
||||
|
||||
try:
|
||||
from __main__ import cli
|
||||
except ImportError:
|
||||
# using API w/o cli
|
||||
cli = False
|
||||
|
||||
class CallbackBase:
|
||||
|
||||
|
@ -53,12 +59,32 @@ class CallbackBase:
|
|||
else:
|
||||
self._display = global_display
|
||||
|
||||
if cli:
|
||||
self._options = cli.options
|
||||
else:
|
||||
self._options = None
|
||||
|
||||
if self._display.verbosity >= 4:
|
||||
name = getattr(self, 'CALLBACK_NAME', 'unnamed')
|
||||
ctype = getattr(self, 'CALLBACK_TYPE', 'old')
|
||||
version = getattr(self, 'CALLBACK_VERSION', '1.0')
|
||||
self._display.vvvv('Loaded callback %s of type %s, v%s' % (name, ctype, version))
|
||||
|
||||
''' helper for callbacks, so they don't all have to include deepcopy '''
|
||||
_copy_result = deepcopy
|
||||
|
||||
def _copy_result_exclude(self, result, exclude):
|
||||
values = []
|
||||
for e in exclude:
|
||||
values.append(getattr(result, e))
|
||||
setattr(result, e, None)
|
||||
|
||||
result_copy = deepcopy(result)
|
||||
for i,e in enumerate(exclude):
|
||||
setattr(result, e, values[i])
|
||||
|
||||
return result_copy
|
||||
|
||||
def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False):
|
||||
if result.get('_ansible_no_log', False):
|
||||
return json.dumps(dict(censored="the output has been hidden due to the fact that 'no_log: true' was specified for this result"))
|
||||
|
@ -91,7 +117,6 @@ class CallbackBase:
|
|||
try:
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter('ignore')
|
||||
ret = []
|
||||
if 'dst_binary' in diff:
|
||||
ret.append("diff skipped: destination file appears to be binary\n")
|
||||
if 'src_binary' in diff:
|
||||
|
@ -101,6 +126,10 @@ class CallbackBase:
|
|||
if 'src_larger' in diff:
|
||||
ret.append("diff skipped: source file size is greater than %d\n" % diff['src_larger'])
|
||||
if 'before' in diff and 'after' in diff:
|
||||
# format complex structures into 'files'
|
||||
for x in ['before', 'after']:
|
||||
if isinstance(diff[x], dict):
|
||||
diff[x] = json.dumps(diff[x], sort_keys=True, indent=4)
|
||||
if 'before_header' in diff:
|
||||
before_header = "before: %s" % diff['before_header']
|
||||
else:
|
||||
|
@ -109,12 +138,30 @@ class CallbackBase:
|
|||
after_header = "after: %s" % diff['after_header']
|
||||
else:
|
||||
after_header = 'after'
|
||||
differ = difflib.unified_diff(to_unicode(diff['before']).splitlines(True), to_unicode(diff['after']).splitlines(True), before_header, after_header, '', '', 10)
|
||||
ret.extend(list(differ))
|
||||
ret.append('\n')
|
||||
return u"".join(ret)
|
||||
differ = difflib.unified_diff(to_unicode(diff['before']).splitlines(True),
|
||||
to_unicode(diff['after']).splitlines(True),
|
||||
fromfile=before_header,
|
||||
tofile=after_header,
|
||||
fromfiledate='',
|
||||
tofiledate='',
|
||||
n=C.DIFF_CONTEXT)
|
||||
has_diff = False
|
||||
for line in differ:
|
||||
has_diff = True
|
||||
if line.startswith('+'):
|
||||
line = stringc(line, C.COLOR_DIFF_ADD)
|
||||
elif line.startswith('-'):
|
||||
line = stringc(line, C.COLOR_DIFF_REMOVE)
|
||||
elif line.startswith('@@'):
|
||||
line = stringc(line, C.COLOR_DIFF_LINES)
|
||||
ret.append(line)
|
||||
if has_diff:
|
||||
ret.append('\n')
|
||||
if 'prepared' in diff:
|
||||
ret.append(to_unicode(diff['prepared']))
|
||||
except UnicodeDecodeError:
|
||||
ret.append(">> the files are different, but the diff library cannot compare unicode strings\n\n")
|
||||
return u''.join(ret)
|
||||
|
||||
def _get_item(self, result):
|
||||
if result.get('_ansible_no_log', False):
|
||||
|
@ -125,16 +172,14 @@ class CallbackBase:
|
|||
return item
|
||||
|
||||
def _process_items(self, result):
|
||||
for res in result._result['results']:
|
||||
newres = deepcopy(result)
|
||||
res['item'] = self._get_item(res)
|
||||
newres._result = res
|
||||
if 'failed' in res and res['failed']:
|
||||
self.v2_playbook_item_on_failed(newres)
|
||||
elif 'skipped' in res and res['skipped']:
|
||||
self.v2_playbook_item_on_skipped(newres)
|
||||
else:
|
||||
self.v2_playbook_item_on_ok(newres)
|
||||
# just remove them as now they get handled by individual callbacks
|
||||
del result._result['results']
|
||||
|
||||
def _clean_results(self, result, task_name):
|
||||
if 'changed' in result and task_name in ['debug']:
|
||||
del result['changed']
|
||||
if 'invocation' in result and task_name in ['debug']:
|
||||
del result['invocation']
|
||||
|
||||
def set_play_context(self, play_context):
|
||||
pass
|
||||
|
@ -289,27 +334,22 @@ class CallbackBase:
|
|||
self.playbook_on_stats(stats)
|
||||
|
||||
def v2_on_file_diff(self, result):
|
||||
host = result._host.get_name()
|
||||
if 'diff' in result._result:
|
||||
host = result._host.get_name()
|
||||
self.on_file_diff(host, result._result['diff'])
|
||||
|
||||
def v2_playbook_on_item_ok(self, result):
|
||||
pass # no v1
|
||||
|
||||
def v2_playbook_on_item_failed(self, result):
|
||||
pass # no v1
|
||||
|
||||
def v2_playbook_on_item_skipped(self, result):
|
||||
pass # no v1
|
||||
|
||||
def v2_playbook_on_include(self, included_file):
|
||||
pass #no v1 correspondance
|
||||
|
||||
def v2_playbook_item_on_ok(self, result):
|
||||
def v2_runner_item_on_ok(self, result):
|
||||
pass
|
||||
|
||||
def v2_playbook_item_on_failed(self, result):
|
||||
def v2_runner_item_on_failed(self, result):
|
||||
pass
|
||||
|
||||
def v2_playbook_item_on_skipped(self, result):
|
||||
def v2_runner_item_on_skipped(self, result):
|
||||
pass
|
||||
|
||||
def v2_runner_retry(self, result):
|
||||
pass
|
||||
|
||||
|
|
75
lib/ansible/plugins/callback/actionable.py
Normal file
75
lib/ansible/plugins/callback/actionable.py
Normal file
|
@ -0,0 +1,75 @@
|
|||
# (c) 2015, Andrew Gaffney <andrew@agaffney.org>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
|
||||
|
||||
class CallbackModule(CallbackModule_default):
|
||||
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'stdout'
|
||||
CALLBACK_NAME = 'actionable'
|
||||
|
||||
def __init__(self):
|
||||
self.super_ref = super(CallbackModule, self)
|
||||
self.super_ref.__init__()
|
||||
self.last_task = None
|
||||
self.shown_title = False
|
||||
|
||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||
self.last_task = task
|
||||
self.shown_title = False
|
||||
|
||||
def display_task_banner(self):
|
||||
if not self.shown_title:
|
||||
self.super_ref.v2_playbook_on_task_start(self.last_task, None)
|
||||
self.shown_title = True
|
||||
|
||||
def v2_runner_on_failed(self, result, ignore_errors=False):
|
||||
self.display_task_banner()
|
||||
self.super_ref.v2_runner_on_failed(result, ignore_errors)
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
if result._result.get('changed', False):
|
||||
self.display_task_banner()
|
||||
self.super_ref.v2_runner_on_ok(result)
|
||||
|
||||
def v2_runner_on_unreachable(self, result):
|
||||
self.display_task_banner()
|
||||
self.super_ref.v2_runner_on_unreachable(result)
|
||||
|
||||
def v2_runner_on_skipped(self, result):
|
||||
pass
|
||||
|
||||
def v2_playbook_on_include(self, included_file):
|
||||
pass
|
||||
|
||||
def v2_runner_item_on_ok(self, result):
|
||||
if result._result.get('changed', False):
|
||||
self.display_task_banner()
|
||||
self.super_ref.v2_runner_item_on_ok(result)
|
||||
|
||||
def v2_runner_item_on_skipped(self, result):
|
||||
pass
|
||||
|
||||
def v2_runner_item_on_failed(self, result):
|
||||
self.display_task_banner()
|
||||
self.super_ref.v2_runner_item_on_failed(result)
|
||||
|
|
@ -31,8 +31,18 @@ class CallbackModule(CallbackBase):
|
|||
CALLBACK_NAME = 'context_demo'
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.task = None
|
||||
self.play = None
|
||||
|
||||
def v2_on_any(self, *args, **kwargs):
|
||||
i = 0
|
||||
if self.play:
|
||||
play_str = 'play: %s' % self.play.name
|
||||
if self.task:
|
||||
task_str = 'task: %s' % self.task
|
||||
self._display.display("--- %s %s ---" % (self.play_str, self.task_str))
|
||||
|
||||
self._display.display(" --- ARGS ")
|
||||
for a in args:
|
||||
self._display.display(' %s: %s' % (i, a))
|
||||
|
@ -41,3 +51,9 @@ class CallbackModule(CallbackBase):
|
|||
self._display.display(" --- KWARGS ")
|
||||
for k in kwargs:
|
||||
self._display.display(' %s: %s' % (k, kwargs[k]))
|
||||
|
||||
def v2_playbook_on_play_start(self, play):
|
||||
self.play = play
|
||||
|
||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||
self.task = task
|
||||
|
|
|
@ -44,24 +44,26 @@ class CallbackModule(CallbackBase):
|
|||
else:
|
||||
msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception']
|
||||
|
||||
self._display.display(msg, color='red')
|
||||
self._display.display(msg, color=C.COLOR_ERROR)
|
||||
|
||||
# finally, remove the exception from the result so it's not shown every time
|
||||
del result._result['exception']
|
||||
|
||||
if result._task.loop and 'results' in result._result:
|
||||
self._process_items(result)
|
||||
|
||||
else:
|
||||
if delegated_vars:
|
||||
self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color='red')
|
||||
self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color=C.COLOR_ERROR)
|
||||
else:
|
||||
self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red')
|
||||
self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_ERROR)
|
||||
|
||||
if result._task.ignore_errors:
|
||||
self._display.display("...ignoring", color='cyan')
|
||||
self._display.display("...ignoring", color=C.COLOR_SKIP)
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
|
||||
self._clean_results(result._result, result._task.action)
|
||||
delegated_vars = result._result.get('_ansible_delegated_vars', None)
|
||||
if result._task.action == 'include':
|
||||
return
|
||||
|
@ -70,13 +72,13 @@ class CallbackModule(CallbackBase):
|
|||
msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
|
||||
else:
|
||||
msg = "changed: [%s]" % result._host.get_name()
|
||||
color = 'yellow'
|
||||
color = C.COLOR_CHANGED
|
||||
else:
|
||||
if delegated_vars:
|
||||
msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
|
||||
else:
|
||||
msg = "ok: [%s]" % result._host.get_name()
|
||||
color = 'green'
|
||||
color = C.COLOR_OK
|
||||
|
||||
if result._task.loop and 'results' in result._result:
|
||||
self._process_items(result)
|
||||
|
@ -96,27 +98,39 @@ class CallbackModule(CallbackBase):
|
|||
msg = "skipping: [%s]" % result._host.get_name()
|
||||
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result:
|
||||
msg += " => %s" % self._dump_results(result._result)
|
||||
self._display.display(msg, color='cyan')
|
||||
self._display.display(msg, color=C.COLOR_SKIP)
|
||||
|
||||
def v2_runner_on_unreachable(self, result):
|
||||
delegated_vars = result._result.get('_ansible_delegated_vars', None)
|
||||
if delegated_vars:
|
||||
self._display.display("fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color='red')
|
||||
self._display.display("fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color=C.COLOR_UNREACHABLE)
|
||||
else:
|
||||
self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red')
|
||||
self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_UNREACHABLE)
|
||||
|
||||
def v2_playbook_on_no_hosts_matched(self):
|
||||
self._display.display("skipping: no hosts matched", color='cyan')
|
||||
self._display.display("skipping: no hosts matched", color=C.COLOR_SKIP)
|
||||
|
||||
def v2_playbook_on_no_hosts_remaining(self):
|
||||
self._display.banner("NO MORE HOSTS LEFT")
|
||||
|
||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||
self._display.banner("TASK [%s]" % task.get_name().strip())
|
||||
if self._display.verbosity > 2:
|
||||
args = ''
|
||||
# args can be specified as no_log in several places: in the task or in
|
||||
# the argument spec. We can check whether the task is no_log but the
|
||||
# argument spec can't be because that is only run on the target
|
||||
# machine and we haven't run it thereyet at this time.
|
||||
#
|
||||
# So we give people a config option to affect display of the args so
|
||||
# that they can secure this if they feel that their stdout is insecure
|
||||
# (shoulder surfing, logging stdout straight to a file, etc).
|
||||
if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT:
|
||||
args = ', '.join(('%s=%s' % a for a in task.args.items()))
|
||||
args = ' %s' % args
|
||||
self._display.banner("TASK [%s%s]" % (task.get_name().strip(), args))
|
||||
if self._display.verbosity >= 2:
|
||||
path = task.get_path()
|
||||
if path:
|
||||
self._display.display("task path: %s" % path, color='dark gray')
|
||||
self._display.display("task path: %s" % path, color=C.COLOR_DEBUG)
|
||||
|
||||
def v2_playbook_on_cleanup_task_start(self, task):
|
||||
self._display.banner("CLEANUP TASK [%s]" % task.get_name().strip())
|
||||
|
@ -134,34 +148,40 @@ class CallbackModule(CallbackBase):
|
|||
self._display.banner(msg)
|
||||
|
||||
def v2_on_file_diff(self, result):
|
||||
if 'diff' in result._result and result._result['diff']:
|
||||
self._display.display(self._get_diff(result._result['diff']))
|
||||
|
||||
def v2_playbook_item_on_ok(self, result):
|
||||
if result._task.loop and 'results' in result._result:
|
||||
for res in result._result['results']:
|
||||
if 'diff' in res and res['diff'] and res.get('changed', False):
|
||||
diff = self._get_diff(res['diff'])
|
||||
if diff:
|
||||
self._display.display(diff)
|
||||
elif 'diff' in result._result and result._result['diff'] and result._result.get('changed', False):
|
||||
diff = self._get_diff(result._result['diff'])
|
||||
if diff:
|
||||
self._display.display(diff)
|
||||
|
||||
def v2_runner_item_on_ok(self, result):
|
||||
delegated_vars = result._result.get('_ansible_delegated_vars', None)
|
||||
if result._task.action == 'include':
|
||||
return
|
||||
elif result._result.get('changed', False):
|
||||
if delegated_vars:
|
||||
msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
|
||||
else:
|
||||
msg = "changed: [%s]" % result._host.get_name()
|
||||
color = 'yellow'
|
||||
msg = 'changed'
|
||||
color = C.COLOR_CHANGED
|
||||
else:
|
||||
if delegated_vars:
|
||||
msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
|
||||
else:
|
||||
msg = "ok: [%s]" % result._host.get_name()
|
||||
color = 'green'
|
||||
msg = 'ok'
|
||||
color = C.COLOR_OK
|
||||
|
||||
msg += " => (item=%s)" % (result._result['item'],)
|
||||
if delegated_vars:
|
||||
msg += ": [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
|
||||
else:
|
||||
msg += ": [%s]" % result._host.get_name()
|
||||
|
||||
msg += " => (item=%s)" % (self._get_item(result._result),)
|
||||
|
||||
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result:
|
||||
msg += " => %s" % self._dump_results(result._result)
|
||||
self._display.display(msg, color=color)
|
||||
|
||||
def v2_playbook_item_on_failed(self, result):
|
||||
def v2_runner_item_on_failed(self, result):
|
||||
delegated_vars = result._result.get('_ansible_delegated_vars', None)
|
||||
if 'exception' in result._result:
|
||||
if self._display.verbosity < 3:
|
||||
|
@ -171,28 +191,30 @@ class CallbackModule(CallbackBase):
|
|||
else:
|
||||
msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception']
|
||||
|
||||
self._display.display(msg, color='red')
|
||||
self._display.display(msg, color=C.COLOR_ERROR)
|
||||
|
||||
# finally, remove the exception from the result so it's not shown every time
|
||||
del result._result['exception']
|
||||
|
||||
msg = "failed: "
|
||||
if delegated_vars:
|
||||
self._display.display("failed: [%s -> %s] => (item=%s) => %s" % (result._host.get_name(), delegated_vars['ansible_host'], result._result['item'], self._dump_results(result._result)), color='red')
|
||||
msg += "[%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
|
||||
else:
|
||||
self._display.display("failed: [%s] => (item=%s) => %s" % (result._host.get_name(), result._result['item'], self._dump_results(result._result)), color='red')
|
||||
msg += "[%s]" % (result._host.get_name())
|
||||
|
||||
self._display.display(msg + " (item=%s) => %s" % (self._get_item(result._result), self._dump_results(result._result)), color=C.COLOR_ERROR)
|
||||
self._handle_warnings(result._result)
|
||||
|
||||
def v2_playbook_item_on_skipped(self, result):
|
||||
msg = "skipping: [%s] => (item=%s) " % (result._host.get_name(), result._result['item'])
|
||||
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result:
|
||||
msg += " => %s" % self._dump_results(result._result)
|
||||
self._display.display(msg, color='cyan')
|
||||
def v2_runner_item_on_skipped(self, result):
|
||||
if C.DISPLAY_SKIPPED_HOSTS:
|
||||
msg = "skipping: [%s] => (item=%s) " % (result._host.get_name(), self._get_item(result._result))
|
||||
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result:
|
||||
msg += " => %s" % self._dump_results(result._result)
|
||||
self._display.display(msg, color=C.COLOR_SKIP)
|
||||
|
||||
def v2_playbook_on_include(self, included_file):
|
||||
msg = 'included: %s for %s' % (included_file._filename, ", ".join([h.name for h in included_file._hosts]))
|
||||
color = 'cyan'
|
||||
self._display.display(msg, color='cyan')
|
||||
self._display.display(msg, color=C.COLOR_SKIP)
|
||||
|
||||
def v2_playbook_on_stats(self, stats):
|
||||
self._display.banner("PLAY RECAP")
|
||||
|
@ -203,10 +225,10 @@ class CallbackModule(CallbackBase):
|
|||
|
||||
self._display.display(u"%s : %s %s %s %s" % (
|
||||
hostcolor(h, t),
|
||||
colorize(u'ok', t['ok'], 'green'),
|
||||
colorize(u'changed', t['changed'], 'yellow'),
|
||||
colorize(u'unreachable', t['unreachable'], 'red'),
|
||||
colorize(u'failed', t['failures'], 'red')),
|
||||
colorize(u'ok', t['ok'], C.COLOR_OK),
|
||||
colorize(u'changed', t['changed'], C.COLOR_CHANGED),
|
||||
colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE),
|
||||
colorize(u'failed', t['failures'], C.COLOR_ERROR)),
|
||||
screen_only=True
|
||||
)
|
||||
|
||||
|
@ -221,3 +243,22 @@ class CallbackModule(CallbackBase):
|
|||
|
||||
self._display.display("", screen_only=True)
|
||||
|
||||
def v2_playbook_on_start(self, playbook):
|
||||
if self._display.verbosity > 1:
|
||||
from os.path import basename
|
||||
self._display.banner("PLAYBOOK: %s" % basename(playbook._file_name))
|
||||
|
||||
if self._display.verbosity > 3:
|
||||
if self._options is not None:
|
||||
for option in dir(self._options):
|
||||
if option.startswith('_') or option in ['read_file', 'ensure_value', 'read_module']:
|
||||
continue
|
||||
val = getattr(self._options,option)
|
||||
if val:
|
||||
self._display.vvvv('%s: %s' % (option,val))
|
||||
|
||||
def v2_runner_retry(self, result):
|
||||
msg = "FAILED - RETRYING: %s (%d retries left)." % (result._task, result._result['retries'] - result._result['attempts'])
|
||||
if (self._display.verbosity > 2 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result:
|
||||
msg += "Result was: %s" % self._dump_results(result._result)
|
||||
self._display.display(msg, color=C.COLOR_DEBUG)
|
||||
|
|
|
@ -73,6 +73,7 @@ class CallbackModule(CallbackBase):
|
|||
|
||||
self.printed_playbook = False
|
||||
self.playbook_name = None
|
||||
self.play = None
|
||||
|
||||
def send_msg(self, msg, msg_format='text', color='yellow', notify=False):
|
||||
"""Method for sending a message to HipChat"""
|
||||
|
@ -93,9 +94,11 @@ class CallbackModule(CallbackBase):
|
|||
self.display.warning('Could not submit message to hipchat')
|
||||
|
||||
|
||||
def playbook_on_play_start(self, name):
|
||||
def v2_playbook_on_play_start(self, play):
|
||||
"""Display Playbook and play start messages"""
|
||||
|
||||
self.play = play
|
||||
name = play.name
|
||||
# This block sends information about a playbook when it starts
|
||||
# The playbook object is not immediately available at
|
||||
# playbook_on_start so we grab it via the play
|
||||
|
|
83
lib/ansible/plugins/callback/json.py
Normal file
83
lib/ansible/plugins/callback/json.py
Normal file
|
@ -0,0 +1,83 @@
|
|||
# (c) 2016, Matt Martz <matt@sivel.net>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'stdout'
|
||||
CALLBACK_NAME = 'json'
|
||||
|
||||
def __init__(self, display=None):
|
||||
super(CallbackModule, self).__init__(display)
|
||||
self.results = []
|
||||
|
||||
def _new_play(self, play):
|
||||
return {
|
||||
'play': {
|
||||
'name': play.name,
|
||||
'id': str(play._uuid)
|
||||
},
|
||||
'tasks': []
|
||||
}
|
||||
|
||||
def _new_task(self, task):
|
||||
return {
|
||||
'task': {
|
||||
'name': task.name,
|
||||
'id': str(task._uuid)
|
||||
},
|
||||
'hosts': {}
|
||||
}
|
||||
|
||||
def v2_playbook_on_play_start(self, play):
|
||||
self.results.append(self._new_play(play))
|
||||
|
||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||
self.results[-1]['tasks'].append(self._new_task(task))
|
||||
|
||||
def v2_runner_on_ok(self, result, **kwargs):
|
||||
host = result._host
|
||||
self.results[-1]['tasks'][-1]['hosts'][host.name] = result._result
|
||||
|
||||
def v2_playbook_on_stats(self, stats):
|
||||
"""Display info about playbook statistics"""
|
||||
|
||||
hosts = sorted(stats.processed.keys())
|
||||
|
||||
summary = {}
|
||||
for h in hosts:
|
||||
s = stats.summarize(h)
|
||||
summary[h] = s
|
||||
|
||||
output = {
|
||||
'plays': self.results,
|
||||
'stats': summary
|
||||
}
|
||||
|
||||
print(json.dumps(output, indent=4, sort_keys=True))
|
||||
|
||||
v2_runner_on_failed = v2_runner_on_ok
|
||||
v2_runner_on_unreachable = v2_runner_on_ok
|
||||
v2_runner_on_skipped = v2_runner_on_ok
|
|
@ -60,8 +60,9 @@ import socket
|
|||
import random
|
||||
import time
|
||||
import codecs
|
||||
import ConfigParser
|
||||
import uuid
|
||||
from ansible.compat.six.moves import configparser
|
||||
|
||||
try:
|
||||
import certifi
|
||||
HAS_CERTIFI = True
|
||||
|
@ -212,7 +213,7 @@ class CallbackModule(CallbackBase):
|
|||
'Disabling the Logentries callback plugin.')
|
||||
|
||||
config_path = os.path.abspath(os.path.dirname(__file__))
|
||||
config = ConfigParser.ConfigParser()
|
||||
config = configparser.ConfigParser()
|
||||
try:
|
||||
config.readfp(open(os.path.join(config_path, 'logentries.ini')))
|
||||
if config.has_option('logentries', 'api'):
|
||||
|
|
|
@ -53,28 +53,32 @@ class CallbackModule(CallbackBase):
|
|||
else:
|
||||
msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception']
|
||||
|
||||
self._display.display(msg, color='red')
|
||||
self._display.display(msg, color=C.COLOR_ERROR)
|
||||
|
||||
# finally, remove the exception from the result so it's not shown every time
|
||||
del result._result['exception']
|
||||
|
||||
if result._task.action in C.MODULE_NO_JSON:
|
||||
self._display.display(self._command_generic_msg(result._host.get_name(), result._result, "FAILED"), color='red')
|
||||
self._display.display(self._command_generic_msg(result._host.get_name(), result._result, "FAILED"), color=C.COLOR_ERROR)
|
||||
else:
|
||||
self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color='red')
|
||||
self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_ERROR)
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
self._clean_results(result._result, result._task.action)
|
||||
if result._task.action in C.MODULE_NO_JSON:
|
||||
self._display.display(self._command_generic_msg(result._host.get_name(), result._result, "SUCCESS"), color='green')
|
||||
self._display.display(self._command_generic_msg(result._host.get_name(), result._result, "SUCCESS"), color=C.COLOR_OK)
|
||||
else:
|
||||
self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color='green')
|
||||
if 'changed' in result._result and result._result['changed']:
|
||||
self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_CHANGED)
|
||||
else:
|
||||
self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_OK)
|
||||
self._handle_warnings(result._result)
|
||||
|
||||
def v2_runner_on_skipped(self, result):
|
||||
self._display.display("%s | SKIPPED" % (result._host.get_name()), color='cyan')
|
||||
self._display.display("%s | SKIPPED" % (result._host.get_name()), color=C.COLOR_SKIP)
|
||||
|
||||
def v2_runner_on_unreachable(self, result):
|
||||
self._display.display("%s | UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color='yellow')
|
||||
self._display.display("%s | UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_UNREACHABLE)
|
||||
|
||||
def v2_on_file_diff(self, result):
|
||||
if 'diff' in result._result and result._result['diff']:
|
||||
|
|
|
@ -52,24 +52,24 @@ class CallbackModule(CallbackBase):
|
|||
msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'].replace('\n','')
|
||||
|
||||
if result._task.action in C.MODULE_NO_JSON:
|
||||
self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'FAILED'), color='red')
|
||||
self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'FAILED'), color=C.COLOR_ERROR)
|
||||
else:
|
||||
self._display.display(msg, color='red')
|
||||
self._display.display(msg, color=C.COLOR_ERROR)
|
||||
|
||||
# finally, remove the exception from the result so it's not shown every time
|
||||
del result._result['exception']
|
||||
|
||||
self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color='red')
|
||||
self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color=C.COLOR_ERROR)
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
if result._task.action in C.MODULE_NO_JSON:
|
||||
self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'SUCCESS'), color='green')
|
||||
self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'SUCCESS'), color=C.COLOR_OK)
|
||||
else:
|
||||
self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color='green')
|
||||
self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color=C.COLOR_OK)
|
||||
|
||||
|
||||
def v2_runner_on_unreachable(self, result):
|
||||
self._display.display("%s | UNREACHABLE!" % result._host.get_name(), color='yellow')
|
||||
self._display.display("%s | UNREACHABLE!" % result._host.get_name(), color=C.COLOR_UNREACHABLE)
|
||||
|
||||
def v2_runner_on_skipped(self, result):
|
||||
self._display.display("%s | SKIPPED" % (result._host.get_name()), color='cyan')
|
||||
self._display.display("%s | SKIPPED" % (result._host.get_name()), color=C.COLOR_SKIP)
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
# (C) 2016, Joel, http://github.com/jjshoe
|
||||
# (C) 2015, Tom Paine, <github@aioue.net>
|
||||
# (C) 2014, Jharrod LaFon, @JharrodLaFon
|
||||
# (C) 2012-2013, Michael DeHaan, <michael.dehaan@gmail.com>
|
||||
|
@ -22,6 +23,8 @@
|
|||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import collections
|
||||
import os
|
||||
import time
|
||||
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
|
@ -49,7 +52,7 @@ def filled(msg, fchar="*"):
|
|||
|
||||
def timestamp(self):
|
||||
if self.current is not None:
|
||||
self.stats[self.current] = time.time() - self.stats[self.current]
|
||||
self.stats[self.current]['time'] = time.time() - self.stats[self.current]['time']
|
||||
|
||||
|
||||
def tasktime():
|
||||
|
@ -72,12 +75,22 @@ class CallbackModule(CallbackBase):
|
|||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
def __init__(self):
|
||||
self.stats = {}
|
||||
self.stats = collections.OrderedDict()
|
||||
self.current = None
|
||||
self.sort_order = os.getenv('PROFILE_TASKS_SORT_ORDER', True)
|
||||
self.task_output_limit = os.getenv('PROFILE_TASKS_TASK_OUTPUT_LIMIT', 20)
|
||||
|
||||
if self.sort_order == 'ascending':
|
||||
self.sort_order = False;
|
||||
|
||||
if self.task_output_limit == 'all':
|
||||
self.task_output_limit = None
|
||||
else:
|
||||
self.task_output_limit = int(self.task_output_limit)
|
||||
|
||||
super(CallbackModule, self).__init__()
|
||||
|
||||
def _record_task(self, name):
|
||||
def _record_task(self, task):
|
||||
"""
|
||||
Logs the start of each task
|
||||
"""
|
||||
|
@ -85,14 +98,16 @@ class CallbackModule(CallbackBase):
|
|||
timestamp(self)
|
||||
|
||||
# Record the start time of the current task
|
||||
self.current = name
|
||||
self.stats[self.current] = time.time()
|
||||
self.current = task._uuid
|
||||
self.stats[self.current] = {'time': time.time(), 'name': task.get_name()}
|
||||
if self._display.verbosity >= 2:
|
||||
self.stats[self.current][ 'path'] = task.get_path()
|
||||
|
||||
def playbook_on_task_start(self, name, is_conditional):
|
||||
self._record_task(name)
|
||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||
self._record_task(task)
|
||||
|
||||
def v2_playbook_on_handler_task_start(self, task):
|
||||
self._record_task('HANDLER: ' + task.name)
|
||||
self._record_task(task)
|
||||
|
||||
def playbook_on_setup(self):
|
||||
self._display.display(tasktime())
|
||||
|
@ -103,21 +118,23 @@ class CallbackModule(CallbackBase):
|
|||
|
||||
timestamp(self)
|
||||
|
||||
# Sort the tasks by their running time
|
||||
results = sorted(
|
||||
self.stats.items(),
|
||||
key=lambda value: value[1],
|
||||
reverse=True,
|
||||
)
|
||||
results = self.stats.items()
|
||||
|
||||
# Just keep the top 20
|
||||
results = results[:20]
|
||||
# Sort the tasks by the specified sort
|
||||
if self.sort_order != 'none':
|
||||
results = sorted(
|
||||
self.stats.iteritems(),
|
||||
key=lambda x:x[1]['time'],
|
||||
reverse=self.sort_order,
|
||||
)
|
||||
|
||||
# Display the number of tasks specified or the default of 20
|
||||
results = results[:self.task_output_limit]
|
||||
|
||||
# Print the timings
|
||||
for name, elapsed in results:
|
||||
self._display.display(
|
||||
"{0:-<70}{1:->9}".format(
|
||||
'{0} '.format(name),
|
||||
' {0:.02f}s'.format(elapsed),
|
||||
)
|
||||
)
|
||||
for uuid, result in results:
|
||||
msg = ''
|
||||
msg="{0:-<70}{1:->9}".format('{0} '.format(result['name']),' {0:.02f}s'.format(result['time']))
|
||||
if 'path' in result:
|
||||
msg += "\n{0:-<79}".format( '{0} '.format(result['path']))
|
||||
self._display.display(msg)
|
||||
|
|
75
lib/ansible/plugins/callback/profile_tasks.rst
Normal file
75
lib/ansible/plugins/callback/profile_tasks.rst
Normal file
|
@ -0,0 +1,75 @@
|
|||
profile\_tasks.py
|
||||
=================
|
||||
|
||||
Ansible plugin for timing individual tasks and overall execution time.
|
||||
|
||||
Mashup of 2 excellent original works:
|
||||
|
||||
- https://github.com/jlafon/ansible-profile
|
||||
- https://github.com/junaid18183/ansible_home/blob/master/ansible_plugins/callback_plugins/timestamp.py.old
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
Add ``profile_tasks`` to the ``callback_whitelist`` in ``ansible.cfg``.
|
||||
|
||||
Run playbooks as normal.
|
||||
|
||||
Certain options are configurable using environment variables. You can specify ``ascending`` or ``none`` for
|
||||
the environment variable ``PROFILE_TASKS_SORT_ORDER`` to adjust sorting output. If you want to see more than
|
||||
20 tasks in the output you can set ``PROFILE_TASKS_TASK_OUTPUT_LIMIT`` to any number, or the special value
|
||||
``all`` to get a list of all tasks.
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
Tasks
|
||||
~~~~~
|
||||
|
||||
Ongoing timing of each task as it happens.
|
||||
|
||||
| Format:
|
||||
| ``<task start timestamp> (<length of previous task>) <current elapsed playbook execution time>``
|
||||
|
||||
Task output example:
|
||||
|
||||
.. code:: shell
|
||||
|
||||
TASK: [ensure messaging security group exists] ********************************
|
||||
Thursday 11 June 2017 22:50:53 +0100 (0:00:00.721) 0:00:05.322 *********
|
||||
ok: [localhost]
|
||||
|
||||
TASK: [ensure db security group exists] ***************************************
|
||||
Thursday 11 June 2017 22:50:54 +0100 (0:00:00.558) 0:00:05.880 *********
|
||||
changed: [localhost]
|
||||
|
||||
Play Recap
|
||||
~~~~~~~~~~
|
||||
|
||||
Recap includes ending timestamp, total playbook execution time and a
|
||||
sorted list of the top longest running tasks.
|
||||
|
||||
No more wondering how old the results in a terminal window are.
|
||||
|
||||
.. code:: shell
|
||||
|
||||
ansible <args here>
|
||||
<normal output here>
|
||||
PLAY RECAP ********************************************************************
|
||||
Thursday 11 June 2016 22:51:00 +0100 (0:00:01.011) 0:00:43.247 *********
|
||||
===============================================================================
|
||||
old_and_slow : install tons of packages -------------------------------- 20.03s
|
||||
/home/bob/ansible/roles/old_and_slow/tasks/main.yml:4 -------------------------
|
||||
db : second task to run ------------------------------------------------- 2.03s
|
||||
/home/bob/ansible/roles/db/tasks/main.yml:4 -----------------------------------
|
||||
setup ------------------------------------------------------------------- 0.42s
|
||||
None --------------------------------------------------------------------------
|
||||
www : first task to run ------------------------------------------------- 0.03s
|
||||
/home/bob/ansible/roles/www/tasks/main.yml:1 ----------------------------------
|
||||
fast_task : first task to run ------------------------------------------- 0.01s
|
||||
/home/bob/ansible/roles/fast_task.yml:1 ---------------------------------------
|
||||
|
||||
Compatibility
|
||||
-------------
|
||||
|
||||
Ansible 2.0+
|
|
@ -19,10 +19,9 @@
|
|||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
class CallbackModule(CallbackModule_default):
|
||||
|
||||
'''
|
||||
This is the default callback interface, which simply prints messages
|
||||
|
@ -33,147 +32,8 @@ class CallbackModule(CallbackBase):
|
|||
CALLBACK_TYPE = 'stdout'
|
||||
CALLBACK_NAME = 'skippy'
|
||||
|
||||
def v2_runner_on_failed(self, result, ignore_errors=False):
|
||||
delegated_vars = result._result.get('_ansible_delegated_vars', None)
|
||||
if 'exception' in result._result:
|
||||
if self._display.verbosity < 3:
|
||||
# extract just the actual error message from the exception text
|
||||
error = result._result['exception'].strip().split('\n')[-1]
|
||||
msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error
|
||||
else:
|
||||
msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception']
|
||||
|
||||
self._display.display(msg, color='red')
|
||||
|
||||
# finally, remove the exception from the result so it's not shown every time
|
||||
del result._result['exception']
|
||||
|
||||
if result._task.loop and 'results' in result._result:
|
||||
self._process_items(result)
|
||||
else:
|
||||
if delegated_vars:
|
||||
self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color='red')
|
||||
else:
|
||||
self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red')
|
||||
|
||||
if result._task.ignore_errors:
|
||||
self._display.display("...ignoring", color='cyan')
|
||||
|
||||
def v2_runner_on_ok(self, result):
|
||||
|
||||
delegated_vars = result._result.get('_ansible_delegated_vars', None)
|
||||
if result._task.action == 'include':
|
||||
return
|
||||
elif result._result.get('changed', False):
|
||||
if delegated_vars:
|
||||
msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
|
||||
else:
|
||||
msg = "changed: [%s]" % result._host.get_name()
|
||||
color = 'yellow'
|
||||
else:
|
||||
if delegated_vars:
|
||||
msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
|
||||
else:
|
||||
msg = "ok: [%s]" % result._host.get_name()
|
||||
color = 'green'
|
||||
|
||||
if result._task.loop and 'results' in result._result:
|
||||
self._process_items(result)
|
||||
else:
|
||||
|
||||
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result:
|
||||
msg += " => %s" % (self._dump_results(result._result),)
|
||||
self._display.display(msg, color=color)
|
||||
|
||||
self._handle_warnings(result._result)
|
||||
|
||||
def v2_runner_on_unreachable(self, result):
|
||||
delegated_vars = result._result.get('_ansible_delegated_vars', None)
|
||||
if delegated_vars:
|
||||
self._display.display("fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color='red')
|
||||
else:
|
||||
self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red')
|
||||
|
||||
def v2_playbook_on_no_hosts_matched(self):
|
||||
self._display.display("skipping: no hosts matched", color='cyan')
|
||||
|
||||
def v2_playbook_on_no_hosts_remaining(self):
|
||||
self._display.banner("NO MORE HOSTS LEFT")
|
||||
|
||||
def v2_playbook_on_task_start(self, task, is_conditional):
|
||||
self._display.banner("TASK [%s]" % task.get_name().strip())
|
||||
if self._display.verbosity > 2:
|
||||
path = task.get_path()
|
||||
if path:
|
||||
self._display.display("task path: %s" % path, color='dark gray')
|
||||
|
||||
def v2_playbook_on_cleanup_task_start(self, task):
|
||||
self._display.banner("CLEANUP TASK [%s]" % task.get_name().strip())
|
||||
|
||||
def v2_playbook_on_handler_task_start(self, task):
|
||||
self._display.banner("RUNNING HANDLER [%s]" % task.get_name().strip())
|
||||
|
||||
def v2_playbook_on_play_start(self, play):
|
||||
name = play.get_name().strip()
|
||||
if not name:
|
||||
msg = "PLAY"
|
||||
else:
|
||||
msg = "PLAY [%s]" % name
|
||||
|
||||
self._display.banner(msg)
|
||||
|
||||
def v2_on_file_diff(self, result):
|
||||
if 'diff' in result._result and result._result['diff']:
|
||||
self._display.display(self._get_diff(result._result['diff']))
|
||||
|
||||
def v2_playbook_item_on_ok(self, result):
|
||||
|
||||
delegated_vars = result._result.get('_ansible_delegated_vars', None)
|
||||
if result._task.action == 'include':
|
||||
return
|
||||
elif result._result.get('changed', False):
|
||||
if delegated_vars:
|
||||
msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
|
||||
else:
|
||||
msg = "changed: [%s]" % result._host.get_name()
|
||||
color = 'yellow'
|
||||
else:
|
||||
if delegated_vars:
|
||||
msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
|
||||
else:
|
||||
msg = "ok: [%s]" % result._host.get_name()
|
||||
color = 'green'
|
||||
|
||||
msg += " => (item=%s)" % (result._result['item'],)
|
||||
|
||||
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result:
|
||||
msg += " => %s" % self._dump_results(result._result)
|
||||
self._display.display(msg, color=color)
|
||||
|
||||
def v2_playbook_item_on_failed(self, result):
|
||||
delegated_vars = result._result.get('_ansible_delegated_vars', None)
|
||||
if 'exception' in result._result:
|
||||
if self._display.verbosity < 3:
|
||||
# extract just the actual error message from the exception text
|
||||
error = result._result['exception'].strip().split('\n')[-1]
|
||||
msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error
|
||||
else:
|
||||
msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception']
|
||||
|
||||
self._display.display(msg, color='red')
|
||||
|
||||
# finally, remove the exception from the result so it's not shown every time
|
||||
del result._result['exception']
|
||||
|
||||
if delegated_vars:
|
||||
self._display.display("failed: [%s -> %s] => (item=%s) => %s" % (result._host.get_name(), delegated_vars['ansible_host'], result._result['item'], self._dump_results(result._result)), color='red')
|
||||
else:
|
||||
self._display.display("failed: [%s] => (item=%s) => %s" % (result._host.get_name(), result._result['item'], self._dump_results(result._result)), color='red')
|
||||
|
||||
self._handle_warnings(result._result)
|
||||
|
||||
def v2_playbook_on_include(self, included_file):
|
||||
msg = 'included: %s for %s' % (included_file._filename, ", ".join([h.name for h in included_file._hosts]))
|
||||
color = 'cyan'
|
||||
self._display.display(msg, color='cyan')
|
||||
def v2_runner_on_skipped(self, result):
|
||||
pass
|
||||
|
||||
def v2_runner_item_on_skipped(self, result):
|
||||
pass
|
||||
|
|
233
lib/ansible/plugins/callback/slack.py
Normal file
233
lib/ansible/plugins/callback/slack.py
Normal file
|
@ -0,0 +1,233 @@
|
|||
# (C) 2014-2015, Matt Martz <matt@sivel.net>
|
||||
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
import os
|
||||
import uuid
|
||||
|
||||
try:
|
||||
from __main__ import cli
|
||||
except ImportError:
|
||||
cli = None
|
||||
|
||||
from ansible.constants import mk_boolean
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
|
||||
try:
|
||||
import prettytable
|
||||
HAS_PRETTYTABLE = True
|
||||
except ImportError:
|
||||
HAS_PRETTYTABLE = False
|
||||
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
"""This is an ansible callback plugin that sends status
|
||||
updates to a Slack channel during playbook execution.
|
||||
|
||||
This plugin makes use of the following environment variables:
|
||||
SLACK_WEBHOOK_URL (required): Slack Webhook URL
|
||||
SLACK_CHANNEL (optional): Slack room to post in. Default: #ansible
|
||||
SLACK_USERNAME (optional): Username to post as. Default: ansible
|
||||
SLACK_INVOCATION (optional): Show command line invocation
|
||||
details. Default: False
|
||||
|
||||
Requires:
|
||||
prettytable
|
||||
|
||||
"""
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'notification'
|
||||
CALLBACK_NAME = 'slack'
|
||||
CALLBACK_NEEDS_WHITELIST = True
|
||||
|
||||
def __init__(self, display=None):
|
||||
|
||||
self.disabled = False
|
||||
|
||||
if cli:
|
||||
self._options = cli.options
|
||||
else:
|
||||
self._options = None
|
||||
|
||||
|
||||
super(CallbackModule, self).__init__(display=display)
|
||||
|
||||
if not HAS_PRETTYTABLE:
|
||||
self.disabled = True
|
||||
self._display.warning('The `prettytable` python module is not '
|
||||
'installed. Disabling the Slack callback '
|
||||
'plugin.')
|
||||
|
||||
self.webhook_url = os.getenv('SLACK_WEBHOOK_URL')
|
||||
self.channel = os.getenv('SLACK_CHANNEL', '#ansible')
|
||||
self.username = os.getenv('SLACK_USERNAME', 'ansible')
|
||||
self.show_invocation = mk_boolean(
|
||||
os.getenv('SLACK_INVOCATION', self._display.verbosity > 1)
|
||||
)
|
||||
|
||||
if self.webhook_url is None:
|
||||
self.disabled = True
|
||||
self._display.warning('Slack Webhook URL was not provided. The '
|
||||
'Slack Webhook URL can be provided using '
|
||||
'the `SLACK_WEBHOOK_URL` environment '
|
||||
'variable.')
|
||||
|
||||
self.playbook_name = None
|
||||
|
||||
# This is a 6 character identifier provided with each message
|
||||
# This makes it easier to correlate messages when there are more
|
||||
# than 1 simultaneous playbooks running
|
||||
self.guid = uuid.uuid4().hex[:6]
|
||||
|
||||
def send_msg(self, attachments):
|
||||
payload = {
|
||||
'channel': self.channel,
|
||||
'username': self.username,
|
||||
'attachments': attachments,
|
||||
'parse': 'none',
|
||||
'icon_url': ('http://cdn2.hubspot.net/hub/330046/'
|
||||
'file-449187601-png/ansible_badge.png'),
|
||||
}
|
||||
|
||||
data = json.dumps(payload)
|
||||
self._display.debug(data)
|
||||
self._display.debug(self.webhook_url)
|
||||
try:
|
||||
response = open_url(self.webhook_url, data=data)
|
||||
return response.read()
|
||||
except Exception as e:
|
||||
self._display.warning('Could not submit message to Slack: %s' %
|
||||
str(e))
|
||||
|
||||
def v2_playbook_on_start(self, playbook):
|
||||
self.playbook_name = os.path.basename(playbook._file_name)
|
||||
|
||||
title = [
|
||||
'*Playbook initiated* (_%s_)' % self.guid
|
||||
]
|
||||
invocation_items = []
|
||||
if self._options and self.show_invocation:
|
||||
tags = self._options.tags
|
||||
skip_tags = self._options.skip_tags
|
||||
extra_vars = self._options.extra_vars
|
||||
subset = self._options.subset
|
||||
inventory = os.path.basename(
|
||||
os.path.realpath(self._options.inventory)
|
||||
)
|
||||
|
||||
invocation_items.append('Inventory: %s' % inventory)
|
||||
if tags and tags != 'all':
|
||||
invocation_items.append('Tags: %s' % tags)
|
||||
if skip_tags:
|
||||
invocation_items.append('Skip Tags: %s' % skip_tags)
|
||||
if subset:
|
||||
invocation_items.append('Limit: %s' % subset)
|
||||
if extra_vars:
|
||||
invocation_items.append('Extra Vars: %s' %
|
||||
' '.join(extra_vars))
|
||||
|
||||
title.append('by *%s*' % self._options.remote_user)
|
||||
|
||||
title.append('\n\n*%s*' % self.playbook_name)
|
||||
msg_items = [' '.join(title)]
|
||||
if invocation_items:
|
||||
msg_items.append('```\n%s\n```' % '\n'.join(invocation_items))
|
||||
|
||||
msg = '\n'.join(msg_items)
|
||||
|
||||
attachments = [{
|
||||
'fallback': msg,
|
||||
'fields': [
|
||||
{
|
||||
'value': msg
|
||||
}
|
||||
],
|
||||
'color': 'warning',
|
||||
'mrkdwn_in': ['text', 'fallback', 'fields'],
|
||||
}]
|
||||
|
||||
self.send_msg(attachments=attachments)
|
||||
|
||||
def v2_playbook_on_play_start(self, play):
|
||||
"""Display Play start messages"""
|
||||
|
||||
name = play.name or 'Play name not specified (%s)' % play._uuid
|
||||
msg = '*Starting play* (_%s_)\n\n*%s*' % (self.guid, name)
|
||||
attachments = [
|
||||
{
|
||||
'fallback': msg,
|
||||
'text': msg,
|
||||
'color': 'warning',
|
||||
'mrkdwn_in': ['text', 'fallback', 'fields'],
|
||||
}
|
||||
]
|
||||
self.send_msg(attachments=attachments)
|
||||
|
||||
def v2_playbook_on_stats(self, stats):
|
||||
"""Display info about playbook statistics"""
|
||||
|
||||
hosts = sorted(stats.processed.keys())
|
||||
|
||||
t = prettytable.PrettyTable(['Host', 'Ok', 'Changed', 'Unreachable',
|
||||
'Failures'])
|
||||
|
||||
failures = False
|
||||
unreachable = False
|
||||
|
||||
for h in hosts:
|
||||
s = stats.summarize(h)
|
||||
|
||||
if s['failures'] > 0:
|
||||
failures = True
|
||||
if s['unreachable'] > 0:
|
||||
unreachable = True
|
||||
|
||||
t.add_row([h] + [s[k] for k in ['ok', 'changed', 'unreachable',
|
||||
'failures']])
|
||||
|
||||
attachments = []
|
||||
msg_items = [
|
||||
'*Playbook Complete* (_%s_)' % self.guid
|
||||
]
|
||||
if failures or unreachable:
|
||||
color = 'danger'
|
||||
msg_items.append('\n*Failed!*')
|
||||
else:
|
||||
color = 'good'
|
||||
msg_items.append('\n*Success!*')
|
||||
|
||||
msg_items.append('```\n%s\n```' % t)
|
||||
|
||||
msg = '\n'.join(msg_items)
|
||||
|
||||
attachments.append({
|
||||
'fallback': msg,
|
||||
'fields': [
|
||||
{
|
||||
'value': msg
|
||||
}
|
||||
],
|
||||
'color': color,
|
||||
'mrkdwn_in': ['text', 'fallback', 'fields']
|
||||
})
|
||||
|
||||
self.send_msg(attachments=attachments)
|
|
@ -41,7 +41,8 @@ class CallbackModule(CallbackBase):
|
|||
|
||||
self.tree = TREE_DIR
|
||||
if not self.tree:
|
||||
self._display.warnings("Disabling tree callback, invalid directory provided to tree option: %s" % self.tree)
|
||||
self.tree = os.path.expanduser("~/.ansible/tree")
|
||||
self._display.warning("The tree callback is defaulting to ~/.ansible/tree, as an invalid directory was provided: %s" % self.tree)
|
||||
|
||||
def write_tree_file(self, hostname, buf):
|
||||
''' write something into treedir/hostname '''
|
||||
|
@ -53,7 +54,7 @@ class CallbackModule(CallbackBase):
|
|||
with open(path, 'wb+') as fd:
|
||||
fd.write(buf)
|
||||
except (OSError, IOError) as e:
|
||||
self._display.warnings("Unable to write to %s's file: %s" % (hostname, str(e)))
|
||||
self._display.warning("Unable to write to %s's file: %s" % (hostname, str(e)))
|
||||
|
||||
def result_to_tree(self, result):
|
||||
if self.tree:
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
|
||||
# (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
|
@ -23,6 +23,7 @@ __metaclass__ = type
|
|||
import fcntl
|
||||
import gettext
|
||||
import os
|
||||
import shlex
|
||||
from abc import ABCMeta, abstractmethod, abstractproperty
|
||||
|
||||
from functools import wraps
|
||||
|
@ -31,6 +32,7 @@ from ansible.compat.six import with_metaclass
|
|||
from ansible import constants as C
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins import shell_loader
|
||||
from ansible.utils.unicode import to_bytes, to_unicode
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
|
@ -40,6 +42,8 @@ except ImportError:
|
|||
|
||||
__all__ = ['ConnectionBase', 'ensure_connect']
|
||||
|
||||
BUFSIZE = 65536
|
||||
|
||||
|
||||
def ensure_connect(func):
|
||||
@wraps(func)
|
||||
|
@ -60,6 +64,7 @@ class ConnectionBase(with_metaclass(ABCMeta, object)):
|
|||
# as discovered by the specified file extension. An empty string as the
|
||||
# language means any language.
|
||||
module_implementation_preferences = ('',)
|
||||
allow_executable = True
|
||||
|
||||
def __init__(self, play_context, new_stdin, *args, **kwargs):
|
||||
# All these hasattrs allow subclasses to override these parameters
|
||||
|
@ -83,7 +88,12 @@ class ConnectionBase(with_metaclass(ABCMeta, object)):
|
|||
elif hasattr(self, '_shell_type'):
|
||||
shell_type = getattr(self, '_shell_type')
|
||||
else:
|
||||
shell_type = os.path.basename(C.DEFAULT_EXECUTABLE)
|
||||
shell_type = 'sh'
|
||||
shell_filename = os.path.basename(self._play_context.executable)
|
||||
for shell in shell_loader.all():
|
||||
if shell_filename in shell.COMPATIBLE_SHELLS:
|
||||
shell_type = shell.SHELL_FAMILY
|
||||
break
|
||||
|
||||
self._shell = shell_loader.get(shell_type)
|
||||
if not self._shell:
|
||||
|
@ -91,6 +101,7 @@ class ConnectionBase(with_metaclass(ABCMeta, object)):
|
|||
|
||||
@property
|
||||
def connected(self):
|
||||
'''Read-only property holding whether the connection to the remote host is active or closed.'''
|
||||
return self._connected
|
||||
|
||||
def _become_method_supported(self):
|
||||
|
@ -112,6 +123,24 @@ class ConnectionBase(with_metaclass(ABCMeta, object)):
|
|||
'''
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def _split_ssh_args(argstring):
|
||||
"""
|
||||
Takes a string like '-o Foo=1 -o Bar="foo bar"' and returns a
|
||||
list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to
|
||||
the argument list. The list will not contain any empty elements.
|
||||
"""
|
||||
try:
|
||||
# Python 2.6.x shlex doesn't handle unicode type so we have to
|
||||
# convert args to byte string for that case. More efficient to
|
||||
# try without conversion first but python2.6 doesn't throw an
|
||||
# exception, it merely mangles the output:
|
||||
# >>> shlex.split(u't e')
|
||||
# ['t\x00\x00\x00', '\x00\x00\x00e\x00\x00\x00']
|
||||
return [to_unicode(x.strip()) for x in shlex.split(to_bytes(argstring)) if x.strip()]
|
||||
except AttributeError:
|
||||
return [to_unicode(x.strip()) for x in shlex.split(argstring) if x.strip()]
|
||||
|
||||
@abstractproperty
|
||||
def transport(self):
|
||||
"""String used to identify this Connection class from other classes"""
|
||||
|
@ -144,8 +173,8 @@ class ConnectionBase(with_metaclass(ABCMeta, object)):
|
|||
When a command is executed, it goes through multiple commands to get
|
||||
there. It looks approximately like this::
|
||||
|
||||
HardCodedShell ConnectionCommand UsersLoginShell DEFAULT_EXECUTABLE BecomeCommand DEFAULT_EXECUTABLE Command
|
||||
:HardCodedShell: Is optional. It is run locally to invoke the
|
||||
[LocalShell] ConnectionCommand [UsersLoginShell (*)] ANSIBLE_SHELL_EXECUTABLE [(BecomeCommand ANSIBLE_SHELL_EXECUTABLE)] Command
|
||||
:LocalShell: Is optional. It is run locally to invoke the
|
||||
``Connection Command``. In most instances, the
|
||||
``ConnectionCommand`` can be invoked directly instead. The ssh
|
||||
connection plugin which can have values that need expanding
|
||||
|
@ -158,30 +187,36 @@ class ConnectionBase(with_metaclass(ABCMeta, object)):
|
|||
``ansible_ssh_host`` and so forth are fed to this piece of the
|
||||
command to connect to the correct host (Examples ``ssh``,
|
||||
``chroot``)
|
||||
:UsersLoginShell: This is the shell that the ``ansible_ssh_user`` has
|
||||
configured as their login shell. In traditional UNIX parlance,
|
||||
this is the last field of a user's ``/etc/passwd`` entry We do not
|
||||
specifically try to run the ``UsersLoginShell`` when we connect.
|
||||
Instead it is implicit in the actions that the
|
||||
``ConnectionCommand`` takes when it connects to a remote machine.
|
||||
``ansible_shell_type`` may be set to inform ansible of differences
|
||||
in how the ``UsersLoginShell`` handles things like quoting if a
|
||||
shell has different semantics than the Bourne shell.
|
||||
:DEFAULT_EXECUTABLE: This is the shell accessible via
|
||||
``ansible.constants.DEFAULT_EXECUTABLE``. We explicitly invoke
|
||||
this shell so that we have predictable quoting rules at this
|
||||
point. The ``DEFAULT_EXECUTABLE`` is only settable by the user
|
||||
because some sudo setups may only allow invoking a specific Bourne
|
||||
shell. (For instance, ``/bin/bash`` may be allowed but
|
||||
``/bin/sh``, our default, may not). We invoke this twice, once
|
||||
after the ``ConnectionCommand`` and once after the
|
||||
:UsersLoginShell: This shell may or may not be created depending on
|
||||
the ConnectionCommand used by the connection plugin. This is the
|
||||
shell that the ``ansible_ssh_user`` has configured as their login
|
||||
shell. In traditional UNIX parlance, this is the last field of
|
||||
a user's ``/etc/passwd`` entry We do not specifically try to run
|
||||
the ``UsersLoginShell`` when we connect. Instead it is implicit
|
||||
in the actions that the ``ConnectionCommand`` takes when it
|
||||
connects to a remote machine. ``ansible_shell_type`` may be set
|
||||
to inform ansible of differences in how the ``UsersLoginShell``
|
||||
handles things like quoting if a shell has different semantics
|
||||
than the Bourne shell.
|
||||
:ANSIBLE_SHELL_EXECUTABLE: This is the shell set via the inventory var
|
||||
``ansible_shell_executable`` or via
|
||||
``constants.DEFAULT_EXECUTABLE`` if the inventory var is not set.
|
||||
We explicitly invoke this shell so that we have predictable
|
||||
quoting rules at this point. ``ANSIBLE_SHELL_EXECUTABLE`` is only
|
||||
settable by the user because some sudo setups may only allow
|
||||
invoking a specific shell. (For instance, ``/bin/bash`` may be
|
||||
allowed but ``/bin/sh``, our default, may not). We invoke this
|
||||
twice, once after the ``ConnectionCommand`` and once after the
|
||||
``BecomeCommand``. After the ConnectionCommand, this is run by
|
||||
the ``UsersLoginShell``. After the ``BecomeCommand`` we specify
|
||||
that the ``DEFAULT_EXECUTABLE`` is being invoked directly.
|
||||
:BecomeComand: Is the command that performs privilege escalation.
|
||||
Setting this up is performed by the action plugin prior to running
|
||||
``exec_command``. So we just get passed :param:`cmd` which has the
|
||||
BecomeCommand already added. (Examples: sudo, su)
|
||||
that the ``ANSIBLE_SHELL_EXECUTABLE`` is being invoked directly.
|
||||
:BecomeComand ANSIBLE_SHELL_EXECUTABLE: Is the command that performs
|
||||
privilege escalation. Setting this up is performed by the action
|
||||
plugin prior to running ``exec_command``. So we just get passed
|
||||
:param:`cmd` which has the BecomeCommand already added.
|
||||
(Examples: sudo, su) If we have a BecomeCommand then we will
|
||||
invoke a ANSIBLE_SHELL_EXECUTABLE shell inside of it so that we
|
||||
have a consistent view of quoting.
|
||||
:Command: Is the command we're actually trying to run remotely.
|
||||
(Examples: mkdir -p $HOME/.ansible, python $HOME/.ansible/tmp-script-file)
|
||||
"""
|
||||
|
@ -205,7 +240,10 @@ class ConnectionBase(with_metaclass(ABCMeta, object)):
|
|||
pass
|
||||
|
||||
def check_become_success(self, output):
|
||||
return self._play_context.success_key == output.rstrip()
|
||||
for line in output.splitlines(True):
|
||||
if self._play_context.success_key == line.rstrip():
|
||||
return True
|
||||
return False
|
||||
|
||||
def check_password_prompt(self, output):
|
||||
if self._play_context.prompt is None:
|
||||
|
|
|
@ -28,8 +28,9 @@ import traceback
|
|||
|
||||
from ansible import constants as C
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.connection import ConnectionBase
|
||||
from ansible.plugins.connection import ConnectionBase, BUFSIZE
|
||||
from ansible.module_utils.basic import is_executable
|
||||
from ansible.utils.unicode import to_bytes
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
|
@ -37,8 +38,6 @@ except ImportError:
|
|||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
BUFSIZE = 65536
|
||||
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
''' Local chroot based connections '''
|
||||
|
@ -90,6 +89,7 @@ class Connection(ConnectionBase):
|
|||
local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd]
|
||||
|
||||
display.vvv("EXEC %s" % (local_cmd), host=self.chroot)
|
||||
local_cmd = [to_bytes(i, errors='strict') for i in local_cmd]
|
||||
p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
|
@ -125,7 +125,7 @@ class Connection(ConnectionBase):
|
|||
|
||||
out_path = pipes.quote(self._prefix_login_path(out_path))
|
||||
try:
|
||||
with open(in_path, 'rb') as in_file:
|
||||
with open(to_bytes(in_path, errors='strict'), 'rb') as in_file:
|
||||
try:
|
||||
p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, BUFSIZE), stdin=in_file)
|
||||
except OSError:
|
||||
|
@ -151,7 +151,7 @@ class Connection(ConnectionBase):
|
|||
except OSError:
|
||||
raise AnsibleError("chroot connection requires dd command in the chroot")
|
||||
|
||||
with open(out_path, 'wb+') as out_file:
|
||||
with open(to_bytes(out_path, errors='strict'), 'wb+') as out_file:
|
||||
try:
|
||||
chunk = p.stdout.read(BUFSIZE)
|
||||
while chunk:
|
||||
|
|
|
@ -35,7 +35,8 @@ from distutils.version import LooseVersion
|
|||
|
||||
import ansible.constants as C
|
||||
from ansible.errors import AnsibleError, AnsibleFileNotFound
|
||||
from ansible.plugins.connection import ConnectionBase
|
||||
from ansible.plugins.connection import ConnectionBase, BUFSIZE
|
||||
from ansible.utils.unicode import to_bytes
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
|
@ -43,8 +44,6 @@ except ImportError:
|
|||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
BUFSIZE = 65536
|
||||
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
''' Local docker based connections '''
|
||||
|
@ -75,18 +74,31 @@ class Connection(ConnectionBase):
|
|||
if not self.docker_cmd[0]:
|
||||
raise AnsibleError("docker command not found in PATH")
|
||||
|
||||
if play_context.connection_args:
|
||||
self.docker_cmd = self.docker_cmd + play_context.connection_args.split(' ')
|
||||
|
||||
self.can_copy_bothways = False
|
||||
|
||||
docker_version = self._get_docker_version()
|
||||
if LooseVersion(docker_version) < LooseVersion('1.3'):
|
||||
raise AnsibleError('docker connection type requires docker 1.3 or higher')
|
||||
# Docker cp in 1.8.0 sets the owner and group to root rather than the
|
||||
# user that the docker container is set to use by default.
|
||||
#if LooseVersion(docker_version) >= LooseVersion('1.8.0'):
|
||||
# self.can_copy_bothways = True
|
||||
|
||||
# The remote user we will request from docker (if supported)
|
||||
self.remote_user = None
|
||||
# The actual user which will execute commands in docker (if known)
|
||||
self.actual_user = None
|
||||
|
||||
if self._play_context.remote_user is not None:
|
||||
if LooseVersion(docker_version) >= LooseVersion('1.7'):
|
||||
# Support for specifying the exec user was added in docker 1.7
|
||||
self.remote_user = self._play_context.remote_user
|
||||
self.actual_user = self.remote_user
|
||||
else:
|
||||
self.actual_user = self._get_docker_remote_user()
|
||||
|
||||
if self.actual_user != self._play_context.remote_user:
|
||||
display.warning('docker {0} does not support remote_user, using container default: {1}'
|
||||
.format(docker_version, self.actual_user or '?'))
|
||||
elif self._display.verbosity > 2:
|
||||
# Since we're not setting the actual_user, look it up so we have it for logging later
|
||||
# Only do this if display verbosity is high enough that we'll need the value
|
||||
# This saves overhead from calling into docker when we don't need to
|
||||
self.actual_user = self._get_docker_remote_user()
|
||||
|
||||
@staticmethod
|
||||
def _sanitize_version(version):
|
||||
|
@ -108,12 +120,48 @@ class Connection(ConnectionBase):
|
|||
|
||||
return self._sanitize_version(cmd_output)
|
||||
|
||||
def _get_docker_remote_user(self):
|
||||
""" Get the default user configured in the docker container """
|
||||
p = subprocess.Popen([self.docker_cmd, 'inspect', '--format', '{{.Config.User}}', self._play_context.remote_addr],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
out, err = p.communicate()
|
||||
|
||||
if p.returncode != 0:
|
||||
display.warning('unable to retrieve default user from docker container: %s' % out + err)
|
||||
return None
|
||||
|
||||
# The default exec user is root, unless it was changed in the Dockerfile with USER
|
||||
return out.strip() or 'root'
|
||||
|
||||
def _build_exec_cmd(self, cmd):
|
||||
""" Build the local docker exec command to run cmd on remote_host
|
||||
|
||||
If remote_user is available and is supported by the docker
|
||||
version we are using, it will be provided to docker exec.
|
||||
"""
|
||||
|
||||
local_cmd = [self.docker_cmd]
|
||||
|
||||
if self._play_context.docker_extra_args:
|
||||
local_cmd += self._play_context.docker_extra_args.split(' ')
|
||||
|
||||
local_cmd += ['exec']
|
||||
|
||||
if self.remote_user is not None:
|
||||
local_cmd += ['-u', self.remote_user]
|
||||
|
||||
# -i is needed to keep stdin open which allows pipelining to work
|
||||
local_cmd += ['-i', self._play_context.remote_addr] + cmd
|
||||
|
||||
return local_cmd
|
||||
|
||||
def _connect(self, port=None):
|
||||
""" Connect to the container. Nothing to do """
|
||||
super(Connection, self)._connect()
|
||||
if not self._connected:
|
||||
display.vvv("ESTABLISH DOCKER CONNECTION FOR USER: {0}".format(
|
||||
self._play_context.remote_user, host=self._play_context.remote_addr)
|
||||
display.vvv(u"ESTABLISH DOCKER CONNECTION FOR USER: {0}".format(
|
||||
self.actual_user or '?', host=self._play_context.remote_addr)
|
||||
)
|
||||
self._connected = True
|
||||
|
||||
|
@ -121,11 +169,10 @@ class Connection(ConnectionBase):
|
|||
""" Run a command on the docker host """
|
||||
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||
|
||||
executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh'
|
||||
# -i is needed to keep stdin open which allows pipelining to work
|
||||
local_cmd = self.docker_cmd + ["exec", '-i', self._play_context.remote_addr, executable, '-c', cmd]
|
||||
local_cmd = self._build_exec_cmd([self._play_context.executable, '-c', cmd])
|
||||
|
||||
display.vvv("EXEC %s" % (local_cmd), host=self._play_context.remote_addr)
|
||||
display.vvv("EXEC %s" % (local_cmd,), host=self._play_context.remote_addr)
|
||||
local_cmd = [to_bytes(i, errors='strict') for i in local_cmd]
|
||||
p = subprocess.Popen(local_cmd, shell=False, stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
|
@ -152,34 +199,27 @@ class Connection(ConnectionBase):
|
|||
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
|
||||
|
||||
out_path = self._prefix_login_path(out_path)
|
||||
if not os.path.exists(in_path):
|
||||
if not os.path.exists(to_bytes(in_path, errors='strict')):
|
||||
raise AnsibleFileNotFound(
|
||||
"file or module does not exist: %s" % in_path)
|
||||
|
||||
if self.can_copy_bothways:
|
||||
# only docker >= 1.8.1 can do this natively
|
||||
args = self.docker_cmd + ["cp", in_path, "%s:%s" % (self._play_context.remote_addr, out_path) ]
|
||||
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
out_path = pipes.quote(out_path)
|
||||
# Older docker doesn't have native support for copying files into
|
||||
# running containers, so we use docker exec to implement this
|
||||
# Although docker version 1.8 and later provide support, the
|
||||
# owner and group of the files are always set to root
|
||||
args = self._build_exec_cmd([self._play_context.executable, "-c", "dd of=%s bs=%s" % (out_path, BUFSIZE)])
|
||||
args = [to_bytes(i, errors='strict') for i in args]
|
||||
with open(to_bytes(in_path, errors='strict'), 'rb') as in_file:
|
||||
try:
|
||||
p = subprocess.Popen(args, stdin=in_file,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
except OSError:
|
||||
raise AnsibleError("docker connection requires dd command in the container to put files")
|
||||
stdout, stderr = p.communicate()
|
||||
|
||||
if p.returncode != 0:
|
||||
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
|
||||
else:
|
||||
out_path = pipes.quote(out_path)
|
||||
# Older docker doesn't have native support for copying files into
|
||||
# running containers, so we use docker exec to implement this
|
||||
executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh'
|
||||
args = self.docker_cmd + ["exec", "-i", self._play_context.remote_addr, executable, "-c",
|
||||
"dd of={0} bs={1}".format(out_path, BUFSIZE)]
|
||||
with open(in_path, 'rb') as in_file:
|
||||
try:
|
||||
p = subprocess.Popen(args, stdin=in_file,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
except OSError:
|
||||
raise AnsibleError("docker connection with docker < 1.8.1 requires dd command in the chroot")
|
||||
stdout, stderr = p.communicate()
|
||||
|
||||
if p.returncode != 0:
|
||||
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
""" Fetch a file from container to local. """
|
||||
|
@ -191,7 +231,8 @@ class Connection(ConnectionBase):
|
|||
# file path
|
||||
out_dir = os.path.dirname(out_path)
|
||||
|
||||
args = self.docker_cmd + ["cp", "%s:%s" % (self._play_context.remote_addr, in_path), out_dir]
|
||||
args = [self.docker_cmd, "cp", "%s:%s" % (self._play_context.remote_addr, in_path), out_dir]
|
||||
args = [to_bytes(i, errors='strict') for i in args]
|
||||
|
||||
p = subprocess.Popen(args, stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
@ -200,7 +241,7 @@ class Connection(ConnectionBase):
|
|||
# Rename if needed
|
||||
actual_out_path = os.path.join(out_dir, os.path.basename(in_path))
|
||||
if actual_out_path != out_path:
|
||||
os.rename(actual_out_path, out_path)
|
||||
os.rename(to_bytes(actual_out_path, errors='strict'), to_bytes(out_path, errors='strict'))
|
||||
|
||||
def close(self):
|
||||
""" Terminate the connection. Nothing to do for Docker"""
|
||||
|
|
|
@ -29,7 +29,8 @@ import traceback
|
|||
|
||||
from ansible import constants as C
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.connection import ConnectionBase
|
||||
from ansible.plugins.connection import ConnectionBase, BUFSIZE
|
||||
from ansible.utils.unicode import to_bytes
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
|
@ -37,8 +38,6 @@ except ImportError:
|
|||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
BUFSIZE = 65536
|
||||
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
''' Local BSD Jail based connections '''
|
||||
|
@ -46,7 +45,7 @@ class Connection(ConnectionBase):
|
|||
transport = 'jail'
|
||||
# Pipelining may work. Someone needs to test by setting this to True and
|
||||
# having pipelining=True in their ansible.cfg
|
||||
has_pipelining = False
|
||||
has_pipelining = True
|
||||
# Some become_methods may work in v2 (sudo works for other chroot-based
|
||||
# plugins while su seems to be failing). If some work, check chroot.py to
|
||||
# see how to disable just some methods.
|
||||
|
@ -70,7 +69,7 @@ class Connection(ConnectionBase):
|
|||
def _search_executable(executable):
|
||||
cmd = distutils.spawn.find_executable(executable)
|
||||
if not cmd:
|
||||
raise AnsibleError("%s command not found in PATH") % executable
|
||||
raise AnsibleError("%s command not found in PATH" % executable)
|
||||
return cmd
|
||||
|
||||
def list_jails(self):
|
||||
|
@ -83,7 +82,7 @@ class Connection(ConnectionBase):
|
|||
return stdout.split()
|
||||
|
||||
def get_jail_path(self):
|
||||
p = subprocess.Popen([self.jls_cmd, '-j', self.jail, '-q', 'path'],
|
||||
p = subprocess.Popen([self.jls_cmd, '-j', to_bytes(self.jail), '-q', 'path'],
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
|
@ -109,7 +108,8 @@ class Connection(ConnectionBase):
|
|||
executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh'
|
||||
local_cmd = [self.jexec_cmd, self.jail, executable, '-c', cmd]
|
||||
|
||||
display.vvv("EXEC %s" % (local_cmd), host=self.jail)
|
||||
display.vvv("EXEC %s" % (local_cmd,), host=self.jail)
|
||||
local_cmd = [to_bytes(i, errors='strict') for i in local_cmd]
|
||||
p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
|
@ -119,13 +119,6 @@ class Connection(ConnectionBase):
|
|||
''' run a command on the jail '''
|
||||
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||
|
||||
# TODO: Check whether we can send the command to stdin via
|
||||
# p.communicate(in_data)
|
||||
# If we can, then we can change this plugin to has_pipelining=True and
|
||||
# remove the error if in_data is given.
|
||||
if in_data:
|
||||
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
|
||||
|
||||
p = self._buffered_exec_command(cmd)
|
||||
|
||||
stdout, stderr = p.communicate(in_data)
|
||||
|
@ -152,7 +145,7 @@ class Connection(ConnectionBase):
|
|||
|
||||
out_path = pipes.quote(self._prefix_login_path(out_path))
|
||||
try:
|
||||
with open(in_path, 'rb') as in_file:
|
||||
with open(to_bytes(in_path, errors='strict'), 'rb') as in_file:
|
||||
try:
|
||||
p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, BUFSIZE), stdin=in_file)
|
||||
except OSError:
|
||||
|
@ -178,7 +171,7 @@ class Connection(ConnectionBase):
|
|||
except OSError:
|
||||
raise AnsibleError("jail connection requires dd command in the jail")
|
||||
|
||||
with open(out_path, 'wb+') as out_file:
|
||||
with open(to_bytes(out_path, errors='strict'), 'wb+') as out_file:
|
||||
try:
|
||||
chunk = p.stdout.read(BUFSIZE)
|
||||
while chunk:
|
||||
|
|
|
@ -29,7 +29,8 @@ import traceback
|
|||
|
||||
from ansible import constants as C
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.connection import ConnectionBase
|
||||
from ansible.plugins.connection import ConnectionBase, BUFSIZE
|
||||
from ansible.utils.unicode import to_bytes
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
|
@ -37,8 +38,6 @@ except ImportError:
|
|||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
BUFSIZE = 65536
|
||||
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
''' Local lxc based connections '''
|
||||
|
@ -65,7 +64,7 @@ class Connection(ConnectionBase):
|
|||
return cmd
|
||||
|
||||
def _check_domain(self, domain):
|
||||
p = subprocess.Popen([self.virsh, '-q', '-c', 'lxc:///', 'dominfo', domain],
|
||||
p = subprocess.Popen([self.virsh, '-q', '-c', 'lxc:///', 'dominfo', to_bytes(domain)],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
p.communicate()
|
||||
if p.returncode:
|
||||
|
@ -87,9 +86,15 @@ class Connection(ConnectionBase):
|
|||
return the process's exit code immediately.
|
||||
'''
|
||||
executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh'
|
||||
local_cmd = [self.virsh, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', executable , '-c', cmd]
|
||||
local_cmd = [self.virsh, '-q', '-c', 'lxc:///', 'lxc-enter-namespace']
|
||||
|
||||
display.vvv("EXEC %s" % (local_cmd), host=self.lxc)
|
||||
if C.DEFAULT_LIBVIRT_LXC_NOSECLABEL:
|
||||
local_cmd += ['--noseclabel']
|
||||
|
||||
local_cmd += [self.lxc, '--', executable, '-c', cmd]
|
||||
|
||||
display.vvv("EXEC %s" % (local_cmd,), host=self.lxc)
|
||||
local_cmd = [to_bytes(i, errors='strict') for i in local_cmd]
|
||||
p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
|
@ -125,7 +130,7 @@ class Connection(ConnectionBase):
|
|||
|
||||
out_path = pipes.quote(self._prefix_login_path(out_path))
|
||||
try:
|
||||
with open(in_path, 'rb') as in_file:
|
||||
with open(to_bytes(in_path, errors='strict'), 'rb') as in_file:
|
||||
try:
|
||||
p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, BUFSIZE), stdin=in_file)
|
||||
except OSError:
|
||||
|
@ -151,7 +156,7 @@ class Connection(ConnectionBase):
|
|||
except OSError:
|
||||
raise AnsibleError("chroot connection requires dd command in the chroot")
|
||||
|
||||
with open(out_path, 'wb+') as out_file:
|
||||
with open(to_bytes(out_path, errors='strict'), 'wb+') as out_file:
|
||||
try:
|
||||
chunk = p.stdout.read(BUFSIZE)
|
||||
while chunk:
|
||||
|
|
|
@ -19,16 +19,19 @@ from __future__ import (absolute_import, division, print_function)
|
|||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
import select
|
||||
import shutil
|
||||
import subprocess
|
||||
import select
|
||||
import fcntl
|
||||
import getpass
|
||||
|
||||
from ansible.compat.six import text_type, binary_type
|
||||
|
||||
import ansible.constants as C
|
||||
|
||||
from ansible.errors import AnsibleError, AnsibleFileNotFound
|
||||
from ansible.plugins.connection import ConnectionBase
|
||||
from ansible.utils.unicode import to_bytes, to_str
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
|
@ -40,10 +43,8 @@ except ImportError:
|
|||
class Connection(ConnectionBase):
|
||||
''' Local based connections '''
|
||||
|
||||
@property
|
||||
def transport(self):
|
||||
''' used to identify this connection object '''
|
||||
return 'local'
|
||||
transport = 'local'
|
||||
has_pipelining = True
|
||||
|
||||
def _connect(self):
|
||||
''' connect to the local host; nothing to do here '''
|
||||
|
@ -54,7 +55,7 @@ class Connection(ConnectionBase):
|
|||
self._play_context.remote_user = getpass.getuser()
|
||||
|
||||
if not self._connected:
|
||||
display.vvv("ESTABLISH LOCAL CONNECTION FOR USER: {0}".format(self._play_context.remote_user, host=self._play_context.remote_addr))
|
||||
display.vvv(u"ESTABLISH LOCAL CONNECTION FOR USER: {0}".format(self._play_context.remote_user, host=self._play_context.remote_addr))
|
||||
self._connected = True
|
||||
return self
|
||||
|
||||
|
@ -65,16 +66,20 @@ class Connection(ConnectionBase):
|
|||
|
||||
display.debug("in local.exec_command()")
|
||||
|
||||
if in_data:
|
||||
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
|
||||
executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else None
|
||||
|
||||
display.vvv("{0} EXEC {1}".format(self._play_context.remote_addr, cmd))
|
||||
display.vvv(u"{0} EXEC {1}".format(self._play_context.remote_addr, cmd))
|
||||
# FIXME: cwd= needs to be set to the basedir of the playbook
|
||||
display.debug("opening command with Popen()")
|
||||
|
||||
if isinstance(cmd, (text_type, binary_type)):
|
||||
cmd = to_bytes(cmd)
|
||||
else:
|
||||
cmd = map(to_bytes, cmd)
|
||||
|
||||
p = subprocess.Popen(
|
||||
cmd,
|
||||
shell=isinstance(cmd, basestring),
|
||||
shell=isinstance(cmd, (text_type, binary_type)),
|
||||
executable=executable, #cwd=...
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
|
@ -106,7 +111,7 @@ class Connection(ConnectionBase):
|
|||
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
|
||||
|
||||
display.debug("getting output with communicate()")
|
||||
stdout, stderr = p.communicate()
|
||||
stdout, stderr = p.communicate(in_data)
|
||||
display.debug("done communicating")
|
||||
|
||||
display.debug("done with local.exec_command()")
|
||||
|
@ -117,22 +122,22 @@ class Connection(ConnectionBase):
|
|||
|
||||
super(Connection, self).put_file(in_path, out_path)
|
||||
|
||||
display.vvv("{0} PUT {1} TO {2}".format(self._play_context.remote_addr, in_path, out_path))
|
||||
if not os.path.exists(in_path):
|
||||
raise AnsibleFileNotFound("file or module does not exist: {0}".format(in_path))
|
||||
display.vvv(u"{0} PUT {1} TO {2}".format(self._play_context.remote_addr, in_path, out_path))
|
||||
if not os.path.exists(to_bytes(in_path, errors='strict')):
|
||||
raise AnsibleFileNotFound("file or module does not exist: {0}".format(to_str(in_path)))
|
||||
try:
|
||||
shutil.copyfile(in_path, out_path)
|
||||
shutil.copyfile(to_bytes(in_path, errors='strict'), to_bytes(out_path, errors='strict'))
|
||||
except shutil.Error:
|
||||
raise AnsibleError("failed to copy: {0} and {1} are the same".format(in_path, out_path))
|
||||
raise AnsibleError("failed to copy: {0} and {1} are the same".format(to_str(in_path), to_str(out_path)))
|
||||
except IOError as e:
|
||||
raise AnsibleError("failed to transfer file to {0}: {1}".format(out_path, e))
|
||||
raise AnsibleError("failed to transfer file to {0}: {1}".format(to_str(out_path), to_str(e)))
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
''' fetch a file from local to local -- for copatibility '''
|
||||
|
||||
super(Connection, self).fetch_file(in_path, out_path)
|
||||
|
||||
display.vvv("{0} FETCH {1} TO {2}".format(self._play_context.remote_addr, in_path, out_path))
|
||||
display.vvv(u"{0} FETCH {1} TO {2}".format(self._play_context.remote_addr, in_path, out_path))
|
||||
self.put_file(in_path, out_path)
|
||||
|
||||
def close(self):
|
||||
|
|
|
@ -32,6 +32,7 @@ import tempfile
|
|||
import traceback
|
||||
import fcntl
|
||||
import sys
|
||||
import re
|
||||
|
||||
from termios import tcflush, TCIFLUSH
|
||||
from binascii import hexlify
|
||||
|
@ -42,6 +43,7 @@ from ansible import constants as C
|
|||
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
|
||||
from ansible.plugins.connection import ConnectionBase
|
||||
from ansible.utils.path import makedirs_safe
|
||||
from ansible.utils.unicode import to_bytes
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
|
@ -55,6 +57,9 @@ The %s key fingerprint is %s.
|
|||
Are you sure you want to continue connecting (yes/no)?
|
||||
"""
|
||||
|
||||
# SSH Options Regex
|
||||
SETTINGS_REGEX = re.compile(r'(\w+)(?:\s*=\s*|\s+)(.+)')
|
||||
|
||||
# prevent paramiko warning noise -- see http://stackoverflow.com/questions/3920502/
|
||||
HAVE_PARAMIKO=False
|
||||
with warnings.catch_warnings():
|
||||
|
@ -121,10 +126,7 @@ SFTP_CONNECTION_CACHE = {}
|
|||
class Connection(ConnectionBase):
|
||||
''' SSH based connections with Paramiko '''
|
||||
|
||||
@property
|
||||
def transport(self):
|
||||
''' used to identify this connection object from other classes '''
|
||||
return 'paramiko'
|
||||
transport = 'paramiko'
|
||||
|
||||
def _cache_key(self):
|
||||
return "%s__%s__" % (self._play_context.remote_addr, self._play_context.remote_user)
|
||||
|
@ -137,6 +139,51 @@ class Connection(ConnectionBase):
|
|||
self.ssh = SSH_CONNECTION_CACHE[cache_key] = self._connect_uncached()
|
||||
return self
|
||||
|
||||
def _parse_proxy_command(self, port=22):
|
||||
proxy_command = None
|
||||
# Parse ansible_ssh_common_args, specifically looking for ProxyCommand
|
||||
ssh_args = [
|
||||
getattr(self._play_context, 'ssh_extra_args', ''),
|
||||
getattr(self._play_context, 'ssh_common_args', ''),
|
||||
getattr(self._play_context, 'ssh_args', ''),
|
||||
]
|
||||
if ssh_args is not None:
|
||||
args = self._split_ssh_args(' '.join(ssh_args))
|
||||
for i, arg in enumerate(args):
|
||||
if arg.lower() == 'proxycommand':
|
||||
# _split_ssh_args split ProxyCommand from the command itself
|
||||
proxy_command = args[i + 1]
|
||||
else:
|
||||
# ProxyCommand and the command itself are a single string
|
||||
match = SETTINGS_REGEX.match(arg)
|
||||
if match:
|
||||
if match.group(1).lower() == 'proxycommand':
|
||||
proxy_command = match.group(2)
|
||||
|
||||
if proxy_command:
|
||||
break
|
||||
|
||||
proxy_command = proxy_command or C.PARAMIKO_PROXY_COMMAND
|
||||
|
||||
sock_kwarg = {}
|
||||
if proxy_command:
|
||||
replacers = {
|
||||
'%h': self._play_context.remote_addr,
|
||||
'%p': port,
|
||||
'%r': self._play_context.remote_user
|
||||
}
|
||||
for find, replace in replacers.items():
|
||||
proxy_command = proxy_command.replace(find, str(replace))
|
||||
try:
|
||||
sock_kwarg = {'sock': paramiko.ProxyCommand(proxy_command)}
|
||||
display.vvv("CONFIGURE PROXY COMMAND FOR CONNECTION: %s" % proxy_command, host=self._play_context.remote_addr)
|
||||
except AttributeError:
|
||||
display.warning('Paramiko ProxyCommand support unavailable. '
|
||||
'Please upgrade to Paramiko 1.9.0 or newer. '
|
||||
'Not using configured ProxyCommand')
|
||||
|
||||
return sock_kwarg
|
||||
|
||||
def _connect_uncached(self):
|
||||
''' activates the connection object '''
|
||||
|
||||
|
@ -151,13 +198,17 @@ class Connection(ConnectionBase):
|
|||
self.keyfile = os.path.expanduser("~/.ssh/known_hosts")
|
||||
|
||||
if C.HOST_KEY_CHECKING:
|
||||
try:
|
||||
#TODO: check if we need to look at several possible locations, possible for loop
|
||||
ssh.load_system_host_keys("/etc/ssh/ssh_known_hosts")
|
||||
except IOError:
|
||||
pass # file was not found, but not required to function
|
||||
for ssh_known_hosts in ("/etc/ssh/ssh_known_hosts", "/etc/openssh/ssh_known_hosts"):
|
||||
try:
|
||||
#TODO: check if we need to look at several possible locations, possible for loop
|
||||
ssh.load_system_host_keys(ssh_known_hosts)
|
||||
break
|
||||
except IOError:
|
||||
pass # file was not found, but not required to function
|
||||
ssh.load_system_host_keys()
|
||||
|
||||
sock_kwarg = self._parse_proxy_command(port)
|
||||
|
||||
ssh.set_missing_host_key_policy(MyAddPolicy(self._new_stdin, self))
|
||||
|
||||
allow_agent = True
|
||||
|
@ -179,6 +230,7 @@ class Connection(ConnectionBase):
|
|||
password=self._play_context.password,
|
||||
timeout=self._play_context.timeout,
|
||||
port=port,
|
||||
**sock_kwarg
|
||||
)
|
||||
except Exception as e:
|
||||
msg = str(e)
|
||||
|
@ -220,6 +272,8 @@ class Connection(ConnectionBase):
|
|||
|
||||
display.vvv("EXEC %s" % cmd, host=self._play_context.remote_addr)
|
||||
|
||||
cmd = to_bytes(cmd, errors='strict')
|
||||
|
||||
no_prompt_out = ''
|
||||
no_prompt_err = ''
|
||||
become_output = ''
|
||||
|
@ -268,7 +322,7 @@ class Connection(ConnectionBase):
|
|||
|
||||
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
|
||||
|
||||
if not os.path.exists(in_path):
|
||||
if not os.path.exists(to_bytes(in_path, errors='strict')):
|
||||
raise AnsibleFileNotFound("file or module does not exist: %s" % in_path)
|
||||
|
||||
try:
|
||||
|
@ -277,7 +331,7 @@ class Connection(ConnectionBase):
|
|||
raise AnsibleError("failed to open a SFTP connection (%s)" % e)
|
||||
|
||||
try:
|
||||
self.sftp.put(in_path, out_path)
|
||||
self.sftp.put(to_bytes(in_path, errors='strict'), to_bytes(out_path, errors='strict'))
|
||||
except IOError:
|
||||
raise AnsibleError("failed to transfer file to %s" % out_path)
|
||||
|
||||
|
@ -303,7 +357,7 @@ class Connection(ConnectionBase):
|
|||
raise AnsibleError("failed to open a SFTP connection (%s)", e)
|
||||
|
||||
try:
|
||||
self.sftp.get(in_path, out_path)
|
||||
self.sftp.get(to_bytes(in_path, errors='strict'), to_bytes(out_path, errors='strict'))
|
||||
except IOError:
|
||||
raise AnsibleError("failed to transfer file from %s" % in_path)
|
||||
|
||||
|
|
|
@ -24,7 +24,6 @@ import os
|
|||
import pipes
|
||||
import pty
|
||||
import select
|
||||
import shlex
|
||||
import subprocess
|
||||
import time
|
||||
|
||||
|
@ -32,7 +31,8 @@ from ansible import constants as C
|
|||
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
|
||||
from ansible.plugins.connection import ConnectionBase
|
||||
from ansible.utils.path import unfrackpath, makedirs_safe
|
||||
from ansible.utils.unicode import to_bytes, to_unicode
|
||||
from ansible.utils.unicode import to_bytes, to_unicode, to_str
|
||||
from ansible.compat.six import text_type, binary_type
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
|
@ -100,15 +100,6 @@ class Connection(ConnectionBase):
|
|||
|
||||
return controlpersist, controlpath
|
||||
|
||||
@staticmethod
|
||||
def _split_args(argstring):
|
||||
"""
|
||||
Takes a string like '-o Foo=1 -o Bar="foo bar"' and returns a
|
||||
list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to
|
||||
the argument list. The list will not contain any empty elements.
|
||||
"""
|
||||
return [to_unicode(x.strip()) for x in shlex.split(to_bytes(argstring)) if x.strip()]
|
||||
|
||||
def _add_args(self, explanation, args):
|
||||
"""
|
||||
Adds the given args to self._command and displays a caller-supplied
|
||||
|
@ -157,7 +148,7 @@ class Connection(ConnectionBase):
|
|||
# Next, we add [ssh_connection]ssh_args from ansible.cfg.
|
||||
|
||||
if self._play_context.ssh_args:
|
||||
args = self._split_args(self._play_context.ssh_args)
|
||||
args = self._split_ssh_args(self._play_context.ssh_args)
|
||||
self._add_args("ansible.cfg set ssh_args", args)
|
||||
|
||||
# Now we add various arguments controlled by configuration file settings
|
||||
|
@ -196,7 +187,7 @@ class Connection(ConnectionBase):
|
|||
if user:
|
||||
self._add_args(
|
||||
"ANSIBLE_REMOTE_USER/remote_user/ansible_user/user/-u set",
|
||||
("-o", "User={0}".format(self._play_context.remote_user))
|
||||
("-o", "User={0}".format(to_bytes(self._play_context.remote_user)))
|
||||
)
|
||||
|
||||
self._add_args(
|
||||
|
@ -210,7 +201,7 @@ class Connection(ConnectionBase):
|
|||
for opt in ['ssh_common_args', binary + '_extra_args']:
|
||||
attr = getattr(self._play_context, opt, None)
|
||||
if attr is not None:
|
||||
args = self._split_args(attr)
|
||||
args = self._split_ssh_args(attr)
|
||||
self._add_args("PlayContext set %s" % opt, args)
|
||||
|
||||
# Check if ControlPersist is enabled and add a ControlPath if one hasn't
|
||||
|
@ -230,7 +221,7 @@ class Connection(ConnectionBase):
|
|||
raise AnsibleError("Cannot write to ControlPath %s" % cpdir)
|
||||
|
||||
args = ("-o", "ControlPath={0}".format(
|
||||
C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=cpdir))
|
||||
to_bytes(C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=cpdir)))
|
||||
)
|
||||
self._add_args("found only ControlPersist; added ControlPath", args)
|
||||
|
||||
|
@ -319,8 +310,8 @@ class Connection(ConnectionBase):
|
|||
Starts the command and communicates with it until it ends.
|
||||
'''
|
||||
|
||||
display_cmd = map(pipes.quote, cmd[:-1]) + [cmd[-1]]
|
||||
display.vvv('SSH: EXEC {0}'.format(' '.join(display_cmd)), host=self.host)
|
||||
display_cmd = map(to_unicode, map(pipes.quote, cmd))
|
||||
display.vvv(u'SSH: EXEC {0}'.format(u' '.join(display_cmd)), host=self.host)
|
||||
|
||||
# Start the given command. If we don't need to pipeline data, we can try
|
||||
# to use a pseudo-tty (ssh will have been invoked with -tt). If we are
|
||||
|
@ -328,6 +319,12 @@ class Connection(ConnectionBase):
|
|||
# old pipes.
|
||||
|
||||
p = None
|
||||
|
||||
if isinstance(cmd, (text_type, binary_type)):
|
||||
cmd = to_bytes(cmd)
|
||||
else:
|
||||
cmd = list(map(to_bytes, cmd))
|
||||
|
||||
if not in_data:
|
||||
try:
|
||||
# Make sure stdin is a proper pty to avoid tcgetattr errors
|
||||
|
@ -347,7 +344,7 @@ class Connection(ConnectionBase):
|
|||
|
||||
if self._play_context.password:
|
||||
os.close(self.sshpass_pipe[0])
|
||||
os.write(self.sshpass_pipe[1], "{0}\n".format(self._play_context.password))
|
||||
os.write(self.sshpass_pipe[1], "{0}\n".format(to_bytes(self._play_context.password)))
|
||||
os.close(self.sshpass_pipe[1])
|
||||
|
||||
## SSH state machine
|
||||
|
@ -365,7 +362,7 @@ class Connection(ConnectionBase):
|
|||
# only when using ssh. Otherwise we can send initial data straightaway.
|
||||
|
||||
state = states.index('ready_to_send')
|
||||
if 'ssh' in cmd:
|
||||
if b'ssh' in cmd:
|
||||
if self._play_context.prompt:
|
||||
# We're requesting escalation with a password, so we have to
|
||||
# wait for a password prompt.
|
||||
|
@ -463,7 +460,7 @@ class Connection(ConnectionBase):
|
|||
if states[state] == 'awaiting_prompt':
|
||||
if self._flags['become_prompt']:
|
||||
display.debug('Sending become_pass in response to prompt')
|
||||
stdin.write(self._play_context.become_pass + '\n')
|
||||
stdin.write('{0}\n'.format(to_bytes(self._play_context.become_pass )))
|
||||
self._flags['become_prompt'] = False
|
||||
state += 1
|
||||
elif self._flags['become_success']:
|
||||
|
@ -538,7 +535,7 @@ class Connection(ConnectionBase):
|
|||
stdin.close()
|
||||
|
||||
if C.HOST_KEY_CHECKING:
|
||||
if cmd[0] == "sshpass" and p.returncode == 6:
|
||||
if cmd[0] == b"sshpass" and p.returncode == 6:
|
||||
raise AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support this. Please add this host\'s fingerprint to your known_hosts file to manage this host.')
|
||||
|
||||
controlpersisterror = 'Bad configuration option: ControlPersist' in stderr or 'unknown configuration option: ControlPersist' in stderr
|
||||
|
@ -555,7 +552,7 @@ class Connection(ConnectionBase):
|
|||
|
||||
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||
|
||||
display.vvv("ESTABLISH SSH CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self._play_context.remote_addr)
|
||||
display.vvv(u"ESTABLISH SSH CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self._play_context.remote_addr)
|
||||
|
||||
# we can only use tty when we are not pipelining the modules. piping
|
||||
# data into /usr/bin/python inside a tty automatically invokes the
|
||||
|
@ -588,19 +585,19 @@ class Connection(ConnectionBase):
|
|||
|
||||
remaining_tries = int(C.ANSIBLE_SSH_RETRIES) + 1
|
||||
cmd_summary = "%s..." % args[0]
|
||||
for attempt in xrange(remaining_tries):
|
||||
for attempt in range(remaining_tries):
|
||||
try:
|
||||
return_tuple = self._exec_command(*args, **kwargs)
|
||||
# 0 = success
|
||||
# 1-254 = remote command return code
|
||||
# 255 = failure from the ssh command itself
|
||||
if return_tuple[0] != 255 or attempt == (remaining_tries - 1):
|
||||
if return_tuple[0] != 255:
|
||||
break
|
||||
else:
|
||||
raise AnsibleConnectionFailure("Failed to connect to the host via ssh.")
|
||||
except (AnsibleConnectionFailure, Exception) as e:
|
||||
if attempt == remaining_tries - 1:
|
||||
raise e
|
||||
raise
|
||||
else:
|
||||
pause = 2 ** attempt - 1
|
||||
if pause > 30:
|
||||
|
@ -623,44 +620,46 @@ class Connection(ConnectionBase):
|
|||
|
||||
super(Connection, self).put_file(in_path, out_path)
|
||||
|
||||
display.vvv("PUT {0} TO {1}".format(in_path, out_path), host=self.host)
|
||||
if not os.path.exists(in_path):
|
||||
raise AnsibleFileNotFound("file or module does not exist: {0}".format(in_path))
|
||||
display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self.host)
|
||||
if not os.path.exists(to_bytes(in_path, errors='strict')):
|
||||
raise AnsibleFileNotFound("file or module does not exist: {0}".format(to_str(in_path)))
|
||||
|
||||
# scp and sftp require square brackets for IPv6 addresses, but
|
||||
# accept them for hostnames and IPv4 addresses too.
|
||||
host = '[%s]' % self.host
|
||||
|
||||
if C.DEFAULT_SCP_IF_SSH:
|
||||
cmd = self._build_command('scp', in_path, '{0}:{1}'.format(host, pipes.quote(out_path)))
|
||||
cmd = self._build_command('scp', in_path, u'{0}:{1}'.format(host, pipes.quote(out_path)))
|
||||
in_data = None
|
||||
else:
|
||||
cmd = self._build_command('sftp', host)
|
||||
in_data = "put {0} {1}\n".format(pipes.quote(in_path), pipes.quote(out_path))
|
||||
cmd = self._build_command('sftp', to_bytes(host))
|
||||
in_data = u"put {0} {1}\n".format(pipes.quote(in_path), pipes.quote(out_path))
|
||||
|
||||
in_data = to_bytes(in_data, nonstring='passthru')
|
||||
(returncode, stdout, stderr) = self._run(cmd, in_data)
|
||||
|
||||
if returncode != 0:
|
||||
raise AnsibleError("failed to transfer file to {0}:\n{1}\n{2}".format(out_path, stdout, stderr))
|
||||
raise AnsibleError("failed to transfer file to {0}:\n{1}\n{2}".format(to_str(out_path), to_str(stdout), to_str(stderr)))
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
''' fetch a file from remote to local '''
|
||||
|
||||
super(Connection, self).fetch_file(in_path, out_path)
|
||||
|
||||
display.vvv("FETCH {0} TO {1}".format(in_path, out_path), host=self.host)
|
||||
display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self.host)
|
||||
|
||||
# scp and sftp require square brackets for IPv6 addresses, but
|
||||
# accept them for hostnames and IPv4 addresses too.
|
||||
host = '[%s]' % self.host
|
||||
|
||||
if C.DEFAULT_SCP_IF_SSH:
|
||||
cmd = self._build_command('scp', '{0}:{1}'.format(host, pipes.quote(in_path)), out_path)
|
||||
cmd = self._build_command('scp', u'{0}:{1}'.format(host, pipes.quote(in_path)), out_path)
|
||||
in_data = None
|
||||
else:
|
||||
cmd = self._build_command('sftp', host)
|
||||
in_data = "get {0} {1}\n".format(pipes.quote(in_path), pipes.quote(out_path))
|
||||
in_data = u"get {0} {1}\n".format(pipes.quote(in_path), pipes.quote(out_path))
|
||||
|
||||
in_data = to_bytes(in_data, nonstring='passthru')
|
||||
(returncode, stdout, stderr) = self._run(cmd, in_data)
|
||||
|
||||
if returncode != 0:
|
||||
|
@ -674,6 +673,8 @@ class Connection(ConnectionBase):
|
|||
# temporarily disabled as we are forced to currently close connections after every task because of winrm
|
||||
# if self._connected and self._persistent:
|
||||
# cmd = self._build_command('ssh', '-O', 'stop', self.host)
|
||||
#
|
||||
# cmd = map(to_bytes, cmd)
|
||||
# p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
# stdout, stderr = p.communicate()
|
||||
|
||||
|
|
|
@ -61,8 +61,10 @@ except ImportError:
|
|||
class Connection(ConnectionBase):
|
||||
'''WinRM connections over HTTP/HTTPS.'''
|
||||
|
||||
transport = 'winrm'
|
||||
module_implementation_preferences = ('.ps1', '')
|
||||
become_methods = []
|
||||
allow_executable = False
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
|
||||
|
@ -76,11 +78,6 @@ class Connection(ConnectionBase):
|
|||
|
||||
super(Connection, self).__init__(*args, **kwargs)
|
||||
|
||||
@property
|
||||
def transport(self):
|
||||
''' used to identify this connection object from other classes '''
|
||||
return 'winrm'
|
||||
|
||||
def set_host_overrides(self, host):
|
||||
'''
|
||||
Override WinRM-specific options from host variables.
|
||||
|
@ -137,20 +134,20 @@ class Connection(ConnectionBase):
|
|||
protocol.send_message('')
|
||||
return protocol
|
||||
except Exception as e:
|
||||
err_msg = (str(e) or repr(e)).strip()
|
||||
if re.search(r'Operation\s+?timed\s+?out', err_msg, re.I):
|
||||
err_msg = to_unicode(e).strip()
|
||||
if re.search(to_unicode(r'Operation\s+?timed\s+?out'), err_msg, re.I):
|
||||
raise AnsibleError('the connection attempt timed out')
|
||||
m = re.search(r'Code\s+?(\d{3})', err_msg)
|
||||
m = re.search(to_unicode(r'Code\s+?(\d{3})'), err_msg)
|
||||
if m:
|
||||
code = int(m.groups()[0])
|
||||
if code == 401:
|
||||
err_msg = 'the username/password specified for this server was incorrect'
|
||||
elif code == 411:
|
||||
return protocol
|
||||
errors.append('%s: %s' % (transport, err_msg))
|
||||
display.vvvvv('WINRM CONNECTION ERROR: %s\n%s' % (err_msg, traceback.format_exc()), host=self._winrm_host)
|
||||
errors.append(u'%s: %s' % (transport, err_msg))
|
||||
display.vvvvv(u'WINRM CONNECTION ERROR: %s\n%s' % (err_msg, to_unicode(traceback.format_exc())), host=self._winrm_host)
|
||||
if errors:
|
||||
raise AnsibleError(', '.join(errors))
|
||||
raise AnsibleError(', '.join(map(to_str, errors)))
|
||||
else:
|
||||
raise AnsibleError('No transport found for WinRM connection')
|
||||
|
||||
|
@ -271,7 +268,7 @@ class Connection(ConnectionBase):
|
|||
if not os.path.exists(in_path):
|
||||
raise AnsibleFileNotFound('file or module does not exist: "%s"' % in_path)
|
||||
|
||||
script_template = '''
|
||||
script_template = u'''
|
||||
begin {{
|
||||
$path = "{0}"
|
||||
|
||||
|
@ -318,7 +315,7 @@ class Connection(ConnectionBase):
|
|||
local_sha1 = secure_hash(in_path)
|
||||
|
||||
if not remote_sha1 == local_sha1:
|
||||
raise AnsibleError("Remote sha1 hash {0} does not match local hash {1}".format(remote_sha1, local_sha1))
|
||||
raise AnsibleError("Remote sha1 hash {0} does not match local hash {1}".format(to_str(remote_sha1), to_str(local_sha1)))
|
||||
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
|
|
|
@ -30,7 +30,8 @@ import traceback
|
|||
|
||||
from ansible import constants as C
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.connection import ConnectionBase
|
||||
from ansible.plugins.connection import ConnectionBase, BUFSIZE
|
||||
from ansible.utils import to_bytes
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
|
@ -38,8 +39,6 @@ except ImportError:
|
|||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
BUFSIZE = 65536
|
||||
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
''' Local zone based connections '''
|
||||
|
@ -56,8 +55,8 @@ class Connection(ConnectionBase):
|
|||
if os.geteuid() != 0:
|
||||
raise AnsibleError("zone connection requires running as root")
|
||||
|
||||
self.zoneadm_cmd = self._search_executable('zoneadm')
|
||||
self.zlogin_cmd = self._search_executable('zlogin')
|
||||
self.zoneadm_cmd = to_bytes(self._search_executable('zoneadm'))
|
||||
self.zlogin_cmd = to_bytes(self._search_executable('zlogin'))
|
||||
|
||||
if self.zone not in self.list_zones():
|
||||
raise AnsibleError("incorrect zone name %s" % self.zone)
|
||||
|
@ -86,7 +85,7 @@ class Connection(ConnectionBase):
|
|||
def get_zone_path(self):
|
||||
#solaris10vm# zoneadm -z cswbuild list -p
|
||||
#-:cswbuild:installed:/zones/cswbuild:479f3c4b-d0c6-e97b-cd04-fd58f2c0238e:native:shared
|
||||
process = subprocess.Popen([self.zoneadm_cmd, '-z', self.zone, 'list', '-p'],
|
||||
process = subprocess.Popen([self.zoneadm_cmd, '-z', to_bytes(self.zone), 'list', '-p'],
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
|
@ -113,6 +112,7 @@ class Connection(ConnectionBase):
|
|||
# this through /bin/sh -c here. Instead it goes through the shell
|
||||
# that zlogin selects.
|
||||
local_cmd = [self.zlogin_cmd, self.zone, cmd]
|
||||
local_cmd = map(to_bytes, local_cmd)
|
||||
|
||||
display.vvv("EXEC %s" % (local_cmd), host=self.zone)
|
||||
p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
|
||||
|
|
|
@ -41,7 +41,7 @@ import uuid
|
|||
import yaml
|
||||
from jinja2.filters import environmentfilter
|
||||
from distutils.version import LooseVersion, StrictVersion
|
||||
from ansible.compat.six import iteritems
|
||||
from ansible.compat.six import iteritems, string_types
|
||||
|
||||
from ansible import errors
|
||||
from ansible.parsing.yaml.dumper import AnsibleDumper
|
||||
|
@ -100,15 +100,17 @@ def to_nice_json(a, *args, **kw):
|
|||
else:
|
||||
if major >= 2:
|
||||
return simplejson.dumps(a, indent=4, sort_keys=True, *args, **kw)
|
||||
try:
|
||||
return json.dumps(a, indent=4, sort_keys=True, cls=AnsibleJSONEncoder, *args, **kw)
|
||||
except:
|
||||
# Fallback to the to_json filter
|
||||
return to_json(a, *args, **kw)
|
||||
return json.dumps(a, indent=4, sort_keys=True, cls=AnsibleJSONEncoder, *args, **kw)
|
||||
|
||||
def bool(a):
|
||||
''' return a bool for the arg '''
|
||||
if a is None or type(a) == bool:
|
||||
return a
|
||||
if type(a) in types.StringTypes:
|
||||
if isinstance(a, string_types):
|
||||
a = a.lower()
|
||||
if a in ['yes', 'on', '1', 'true', 1]:
|
||||
return True
|
||||
|
@ -223,7 +225,11 @@ def get_encrypted_password(password, hashtype='sha512', salt=None):
|
|||
if hashtype in cryptmethod:
|
||||
if salt is None:
|
||||
r = SystemRandom()
|
||||
salt = ''.join([r.choice(string.ascii_letters + string.digits) for _ in range(16)])
|
||||
if hashtype in ['md5']:
|
||||
saltsize = 8
|
||||
else:
|
||||
saltsize = 16
|
||||
salt = ''.join([r.choice(string.ascii_letters + string.digits) for _ in range(saltsize)])
|
||||
|
||||
if not HAS_PASSLIB:
|
||||
if sys.platform.startswith('darwin'):
|
||||
|
|
|
@ -50,5 +50,5 @@ class LookupModule(LookupBase):
|
|||
if len(my_list) == 0:
|
||||
raise AnsibleError("with_cartesian requires at least one element in each list")
|
||||
|
||||
return [self._flatten(x) for x in product(*my_list, fillvalue=None)]
|
||||
return [self._flatten(x) for x in product(*my_list)]
|
||||
|
||||
|
|
|
@ -171,7 +171,7 @@ class LookupModule(LookupBase):
|
|||
else:
|
||||
total_search.append(term)
|
||||
else:
|
||||
total_search = terms
|
||||
total_search = self._flatten(terms)
|
||||
|
||||
roledir = variables.get('roledir')
|
||||
for fn in total_search:
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import StringIO
|
||||
from io import StringIO
|
||||
import os
|
||||
import ConfigParser
|
||||
import re
|
||||
|
@ -28,8 +28,8 @@ from ansible.plugins.lookup import LookupBase
|
|||
class LookupModule(LookupBase):
|
||||
|
||||
def read_properties(self, filename, key, dflt, is_regexp):
|
||||
config = StringIO.StringIO()
|
||||
config.write('[java_properties]\n' + open(filename).read())
|
||||
config = StringIO()
|
||||
config.write(u'[java_properties]\n' + open(filename).read())
|
||||
config.seek(0, os.SEEK_SET)
|
||||
self.cp.readfp(config)
|
||||
return self.get_value(key, 'java_properties', dflt, is_regexp)
|
||||
|
|
|
@ -26,10 +26,15 @@ class LookupModule(LookupBase):
|
|||
|
||||
def get_hosts(self, variables, pattern):
|
||||
hosts = []
|
||||
if pattern in variables['groups']:
|
||||
hosts = variables['groups'][pattern]
|
||||
elif pattern in variables['groups']['all']:
|
||||
hosts = [pattern]
|
||||
if pattern[0] in ('!','&'):
|
||||
obj = pattern[1:]
|
||||
else:
|
||||
obj = pattern
|
||||
|
||||
if obj in variables['groups']:
|
||||
hosts = variables['groups'][obj]
|
||||
elif obj in variables['groups']['all']:
|
||||
hosts = [obj]
|
||||
return hosts
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
|
|
|
@ -22,6 +22,7 @@ import os
|
|||
from ansible import constants as C
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from ansible.utils.unicode import to_unicode
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
|
@ -46,7 +47,7 @@ class LookupModule(LookupBase):
|
|||
display.vvvv("File lookup using %s as file" % lookupfile)
|
||||
if lookupfile and os.path.exists(lookupfile):
|
||||
with open(lookupfile, 'r') as f:
|
||||
template_data = f.read()
|
||||
template_data = to_unicode(f.read())
|
||||
|
||||
searchpath = [self._loader._basedir, os.path.dirname(lookupfile)]
|
||||
if 'role_path' in variables:
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
# (c) 2016 RedHat
|
||||
#
|
||||
# This file is part of Ansible
|
||||
# This file is part of Ansible.
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
|
@ -14,8 +14,127 @@
|
|||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
import re
|
||||
import pipes
|
||||
import ansible.constants as C
|
||||
import time
|
||||
import random
|
||||
|
||||
from ansible.compat.six import text_type
|
||||
|
||||
_USER_HOME_PATH_RE = re.compile(r'^~[_.A-Za-z0-9][-_.A-Za-z0-9]*$')
|
||||
|
||||
class ShellBase(object):
|
||||
|
||||
def __init__(self):
|
||||
self.env = dict(
|
||||
LANG = C.DEFAULT_MODULE_LANG,
|
||||
LC_ALL = C.DEFAULT_MODULE_LANG,
|
||||
LC_MESSAGES = C.DEFAULT_MODULE_LANG,
|
||||
)
|
||||
|
||||
def env_prefix(self, **kwargs):
|
||||
env = self.env.copy()
|
||||
env.update(kwargs)
|
||||
return ' '.join(['%s=%s' % (k, pipes.quote(text_type(v))) for k,v in env.items()])
|
||||
|
||||
def join_path(self, *args):
|
||||
return os.path.join(*args)
|
||||
|
||||
# some shells (eg, powershell) are snooty about filenames/extensions, this lets the shell plugin have a say
|
||||
def get_remote_filename(self, base_name):
|
||||
return base_name.strip()
|
||||
|
||||
def path_has_trailing_slash(self, path):
|
||||
return path.endswith('/')
|
||||
|
||||
def chmod(self, mode, path, recursive=True):
|
||||
path = pipes.quote(path)
|
||||
cmd = ['chmod', mode, path]
|
||||
if recursive:
|
||||
cmd.append('-R')
|
||||
return ' '.join(cmd)
|
||||
|
||||
def chown(self, path, user, group=None, recursive=True):
|
||||
path = pipes.quote(path)
|
||||
user = pipes.quote(user)
|
||||
|
||||
if group is None:
|
||||
cmd = ['chown', user, path]
|
||||
else:
|
||||
group = pipes.quote(group)
|
||||
cmd = ['chown', '%s:%s' % (user, group), path]
|
||||
|
||||
if recursive:
|
||||
cmd.append('-R')
|
||||
|
||||
return ' '.join(cmd)
|
||||
|
||||
def set_user_facl(self, path, user, mode, recursive=True):
|
||||
"""Only sets acls for users as that's really all we need"""
|
||||
path = pipes.quote(path)
|
||||
mode = pipes.quote(mode)
|
||||
user = pipes.quote(user)
|
||||
|
||||
cmd = ['setfacl']
|
||||
if recursive:
|
||||
cmd.append('-R')
|
||||
cmd.extend(('-m', 'u:%s:%s %s' % (user, mode, path)))
|
||||
|
||||
return ' '.join(cmd)
|
||||
|
||||
def remove(self, path, recurse=False):
|
||||
path = pipes.quote(path)
|
||||
cmd = 'rm -f '
|
||||
if recurse:
|
||||
cmd += '-r '
|
||||
return cmd + "%s %s" % (path, self._SHELL_REDIRECT_ALLNULL)
|
||||
|
||||
def mkdtemp(self, basefile=None, system=False, mode=None):
|
||||
if not basefile:
|
||||
basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48))
|
||||
basetmp = self.join_path(C.DEFAULT_REMOTE_TMP, basefile)
|
||||
if system and (basetmp.startswith('$HOME') or basetmp.startswith('~/')):
|
||||
basetmp = self.join_path('/tmp', basefile)
|
||||
cmd = 'mkdir -p %s echo %s %s' % (self._SHELL_SUB_LEFT, basetmp, self._SHELL_SUB_RIGHT)
|
||||
cmd += ' %s echo %s echo %s %s' % (self._SHELL_AND, self._SHELL_SUB_LEFT, basetmp, self._SHELL_SUB_RIGHT)
|
||||
|
||||
# change the umask in a subshell to achieve the desired mode
|
||||
# also for directories created with `mkdir -p`
|
||||
if mode:
|
||||
tmp_umask = 0o777 & ~mode
|
||||
cmd = '%s umask %o %s %s %s' % (self._SHELL_GROUP_LEFT, tmp_umask, self._SHELL_AND, cmd, self._SHELL_GROUP_RIGHT)
|
||||
|
||||
return cmd
|
||||
|
||||
def expand_user(self, user_home_path):
|
||||
''' Return a command to expand tildes in a path
|
||||
|
||||
It can be either "~" or "~username". We use the POSIX definition of
|
||||
a username:
|
||||
http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_426
|
||||
http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_276
|
||||
'''
|
||||
|
||||
# Check that the user_path to expand is safe
|
||||
if user_home_path != '~':
|
||||
if not _USER_HOME_PATH_RE.match(user_home_path):
|
||||
# pipes.quote will make the shell return the string verbatim
|
||||
user_home_path = pipes.quote(user_home_path)
|
||||
return 'echo %s' % user_home_path
|
||||
|
||||
def build_module_command(self, env_string, shebang, cmd, arg_path=None, rm_tmp=None):
|
||||
# don't quote the cmd if it's an empty string, because this will break pipelining mode
|
||||
if cmd.strip() != '':
|
||||
cmd = pipes.quote(cmd)
|
||||
cmd_parts = [env_string.strip(), shebang.replace("#!", "").strip(), cmd]
|
||||
if arg_path is not None:
|
||||
cmd_parts.append(arg_path)
|
||||
new_cmd = " ".join(cmd_parts)
|
||||
if rm_tmp:
|
||||
new_cmd = '%s; rm -rf "%s" %s' % (new_cmd, rm_tmp, self._SHELL_REDIRECT_ALLNULL)
|
||||
return new_cmd
|
||||
|
|
|
@ -17,13 +17,24 @@
|
|||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.plugins.shell.sh import ShellModule as ShModule
|
||||
from ansible.plugins.shell import ShellBase
|
||||
|
||||
class ShellModule(ShModule):
|
||||
class ShellModule(ShellBase):
|
||||
|
||||
# Common shell filenames that this plugin handles
|
||||
COMPATIBLE_SHELLS = frozenset(('csh', 'tcsh'))
|
||||
# Family of shells this has. Must match the filename without extension
|
||||
SHELL_FAMILY = 'csh'
|
||||
|
||||
# How to end lines in a python script one-liner
|
||||
_SHELL_EMBEDDED_PY_EOL = '\\\n'
|
||||
_SHELL_REDIRECT_ALLNULL = '>& /dev/null'
|
||||
_SHELL_AND = '&&'
|
||||
_SHELL_OR = '||'
|
||||
_SHELL_SUB_LEFT = '"`'
|
||||
_SHELL_SUB_RIGHT = '`"'
|
||||
_SHELL_GROUP_LEFT = '('
|
||||
_SHELL_GROUP_RIGHT = ')'
|
||||
|
||||
def env_prefix(self, **kwargs):
|
||||
return 'env %s' % super(ShellModule, self).env_prefix(**kwargs)
|
||||
|
|
|
@ -17,10 +17,19 @@
|
|||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import pipes
|
||||
from ansible.plugins.shell.sh import ShellModule as ShModule
|
||||
from ansible.compat.six import text_type
|
||||
|
||||
class ShellModule(ShModule):
|
||||
|
||||
# Common shell filenames that this plugin handles
|
||||
COMPATIBLE_SHELLS = frozenset(('fish',))
|
||||
# Family of shells this has. Must match the filename without extension
|
||||
SHELL_FAMILY = 'fish'
|
||||
|
||||
_SHELL_EMBEDDED_PY_EOL = '\n'
|
||||
_SHELL_REDIRECT_ALLNULL = '> /dev/null 2>&1'
|
||||
_SHELL_AND = '; and'
|
||||
_SHELL_OR = '; or'
|
||||
_SHELL_SUB_LEFT = '('
|
||||
|
@ -29,4 +38,57 @@ class ShellModule(ShModule):
|
|||
_SHELL_GROUP_RIGHT = ''
|
||||
|
||||
def env_prefix(self, **kwargs):
|
||||
return 'env %s' % super(ShellModule, self).env_prefix(**kwargs)
|
||||
env = self.env.copy()
|
||||
env.update(kwargs)
|
||||
return ' '.join(['set -lx %s %s;' % (k, pipes.quote(text_type(v))) for k,v in env.items()])
|
||||
|
||||
def build_module_command(self, env_string, shebang, cmd, arg_path=None, rm_tmp=None):
|
||||
# don't quote the cmd if it's an empty string, because this will break pipelining mode
|
||||
if cmd.strip() != '':
|
||||
cmd = pipes.quote(cmd)
|
||||
cmd_parts = [env_string.strip(), shebang.replace("#!", "").strip(), cmd]
|
||||
if arg_path is not None:
|
||||
cmd_parts.append(arg_path)
|
||||
new_cmd = " ".join(cmd_parts)
|
||||
if rm_tmp:
|
||||
new_cmd = 'begin ; %s; rm -rf "%s" %s ; end' % (new_cmd, rm_tmp, self._SHELL_REDIRECT_ALLNULL)
|
||||
return new_cmd
|
||||
|
||||
def checksum(self, path, python_interp):
|
||||
# The following test is fish-compliant.
|
||||
#
|
||||
# In the following test, each condition is a check and logical
|
||||
# comparison (or or and) that sets the rc value. Every check is run so
|
||||
# the last check in the series to fail will be the rc that is
|
||||
# returned.
|
||||
#
|
||||
# If a check fails we error before invoking the hash functions because
|
||||
# hash functions may successfully take the hash of a directory on BSDs
|
||||
# (UFS filesystem?) which is not what the rest of the ansible code
|
||||
# expects
|
||||
#
|
||||
# If all of the available hashing methods fail we fail with an rc of
|
||||
# 0. This logic is added to the end of the cmd at the bottom of this
|
||||
# function.
|
||||
|
||||
# Return codes:
|
||||
# checksum: success!
|
||||
# 0: Unknown error
|
||||
# 1: Remote file does not exist
|
||||
# 2: No read permissions on the file
|
||||
# 3: File is a directory
|
||||
# 4: No python interpreter
|
||||
|
||||
# Quoting gets complex here. We're writing a python string that's
|
||||
# used by a variety of shells on the remote host to invoke a python
|
||||
# "one-liner".
|
||||
shell_escaped_path = pipes.quote(path)
|
||||
test = "set rc flag; [ -r %(p)s ] %(shell_or)s set rc 2; [ -f %(p)s ] %(shell_or)s set rc 1; [ -d %(p)s ] %(shell_and)s set rc 3; %(i)s -V 2>/dev/null %(shell_or)s set rc 4; [ x\"$rc\" != \"xflag\" ] %(shell_and)s echo \"$rc \"%(p)s %(shell_and)s exit 0" % dict(p=shell_escaped_path, i=python_interp, shell_and=self._SHELL_AND, shell_or=self._SHELL_OR)
|
||||
csums = [
|
||||
u"({0} -c 'import hashlib; BLOCKSIZE = 65536; hasher = hashlib.sha1();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python > 2.4 (including python3)
|
||||
u"({0} -c 'import sha; BLOCKSIZE = 65536; hasher = sha.sha();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python == 2.4
|
||||
]
|
||||
|
||||
cmd = (" %s " % self._SHELL_OR).join(csums)
|
||||
cmd = "%s; %s %s (echo \'0 \'%s)" % (test, cmd, self._SHELL_OR, shell_escaped_path)
|
||||
return cmd
|
||||
|
|
|
@ -20,9 +20,7 @@ __metaclass__ = type
|
|||
import base64
|
||||
import os
|
||||
import re
|
||||
import random
|
||||
import shlex
|
||||
import time
|
||||
|
||||
from ansible.utils.unicode import to_bytes, to_unicode
|
||||
|
||||
|
@ -36,6 +34,13 @@ if _powershell_version:
|
|||
|
||||
class ShellModule(object):
|
||||
|
||||
# Common shell filenames that this plugin handles
|
||||
# Powershell is handled differently. It's selected when winrm is the
|
||||
# connection
|
||||
COMPATIBLE_SHELLS = frozenset()
|
||||
# Family of shells this has. Must match the filename without extension
|
||||
SHELL_FAMILY = 'powershell'
|
||||
|
||||
def env_prefix(self, **kwargs):
|
||||
return ''
|
||||
|
||||
|
|
|
@ -17,92 +17,30 @@
|
|||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
import re
|
||||
import pipes
|
||||
import ansible.constants as C
|
||||
import time
|
||||
import random
|
||||
|
||||
from ansible.compat.six import text_type
|
||||
from ansible.plugins.shell import ShellBase
|
||||
|
||||
_USER_HOME_PATH_RE = re.compile(r'^~[_.A-Za-z0-9][-_.A-Za-z0-9]*$')
|
||||
|
||||
class ShellModule(object):
|
||||
class ShellModule(ShellBase):
|
||||
|
||||
# Common shell filenames that this plugin handles.
|
||||
# Note: sh is the default shell plugin so this plugin may also be selected
|
||||
# if the filename is not listed in any Shell plugin.
|
||||
COMPATIBLE_SHELLS = frozenset(('sh', 'zsh', 'bash', 'dash', 'ksh'))
|
||||
# Family of shells this has. Must match the filename without extension
|
||||
SHELL_FAMILY = 'sh'
|
||||
|
||||
# How to end lines in a python script one-liner
|
||||
_SHELL_EMBEDDED_PY_EOL = '\n'
|
||||
_SHELL_REDIRECT_ALLNULL = '> /dev/null 2>&1'
|
||||
_SHELL_AND = '&&'
|
||||
_SHELL_OR = '||'
|
||||
_SHELL_SUB_LEFT = '"$('
|
||||
_SHELL_SUB_RIGHT = ')"'
|
||||
_SHELL_SUB_LEFT = '"`'
|
||||
_SHELL_SUB_RIGHT = '`"'
|
||||
_SHELL_GROUP_LEFT = '('
|
||||
_SHELL_GROUP_RIGHT = ')'
|
||||
|
||||
def env_prefix(self, **kwargs):
|
||||
'''Build command prefix with environment variables.'''
|
||||
env = dict(
|
||||
LANG = C.DEFAULT_MODULE_LANG,
|
||||
LC_ALL = C.DEFAULT_MODULE_LANG,
|
||||
LC_MESSAGES = C.DEFAULT_MODULE_LANG,
|
||||
)
|
||||
env.update(kwargs)
|
||||
return ' '.join(['%s=%s' % (k, pipes.quote(text_type(v))) for k,v in env.items()])
|
||||
|
||||
def join_path(self, *args):
|
||||
return os.path.join(*args)
|
||||
|
||||
# some shells (eg, powershell) are snooty about filenames/extensions, this lets the shell plugin have a say
|
||||
def get_remote_filename(self, base_name):
|
||||
return base_name.strip()
|
||||
|
||||
def path_has_trailing_slash(self, path):
|
||||
return path.endswith('/')
|
||||
|
||||
def chmod(self, mode, path):
|
||||
path = pipes.quote(path)
|
||||
return 'chmod %s %s' % (mode, path)
|
||||
|
||||
def remove(self, path, recurse=False):
|
||||
path = pipes.quote(path)
|
||||
cmd = 'rm -f '
|
||||
if recurse:
|
||||
cmd += '-r '
|
||||
return cmd + "%s %s" % (path, self._SHELL_REDIRECT_ALLNULL)
|
||||
|
||||
def mkdtemp(self, basefile=None, system=False, mode=None):
|
||||
if not basefile:
|
||||
basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48))
|
||||
basetmp = self.join_path(C.DEFAULT_REMOTE_TMP, basefile)
|
||||
if system and (basetmp.startswith('$HOME') or basetmp.startswith('~/')):
|
||||
basetmp = self.join_path('/tmp', basefile)
|
||||
cmd = 'mkdir -p %s echo %s %s' % (self._SHELL_SUB_LEFT, basetmp, self._SHELL_SUB_RIGHT)
|
||||
cmd += ' %s echo %s echo %s %s' % (self._SHELL_AND, self._SHELL_SUB_LEFT, basetmp, self._SHELL_SUB_RIGHT)
|
||||
|
||||
# change the umask in a subshell to achieve the desired mode
|
||||
# also for directories created with `mkdir -p`
|
||||
if mode:
|
||||
tmp_umask = 0o777 & ~mode
|
||||
cmd = '%s umask %o %s %s %s' % (self._SHELL_GROUP_LEFT, tmp_umask, self._SHELL_AND, cmd, self._SHELL_GROUP_RIGHT)
|
||||
|
||||
return cmd
|
||||
|
||||
def expand_user(self, user_home_path):
|
||||
''' Return a command to expand tildes in a path
|
||||
|
||||
It can be either "~" or "~username". We use the POSIX definition of
|
||||
a username:
|
||||
http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_426
|
||||
http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_276
|
||||
'''
|
||||
|
||||
# Check that the user_path to expand is safe
|
||||
if user_home_path != '~':
|
||||
if not _USER_HOME_PATH_RE.match(user_home_path):
|
||||
# pipes.quote will make the shell return the string verbatim
|
||||
user_home_path = pipes.quote(user_home_path)
|
||||
return 'echo %s' % user_home_path
|
||||
|
||||
def checksum(self, path, python_interp):
|
||||
# The following test needs to be SH-compliant. BASH-isms will
|
||||
|
@ -136,23 +74,11 @@ class ShellModule(object):
|
|||
shell_escaped_path = pipes.quote(path)
|
||||
test = "rc=flag; [ -r %(p)s ] %(shell_or)s rc=2; [ -f %(p)s ] %(shell_or)s rc=1; [ -d %(p)s ] %(shell_and)s rc=3; %(i)s -V 2>/dev/null %(shell_or)s rc=4; [ x\"$rc\" != \"xflag\" ] %(shell_and)s echo \"${rc} \"%(p)s %(shell_and)s exit 0" % dict(p=shell_escaped_path, i=python_interp, shell_and=self._SHELL_AND, shell_or=self._SHELL_OR)
|
||||
csums = [
|
||||
"({0} -c 'import hashlib; BLOCKSIZE = 65536; hasher = hashlib.sha1();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python > 2.4 (including python3)
|
||||
"({0} -c 'import sha; BLOCKSIZE = 65536; hasher = sha.sha();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python == 2.4
|
||||
u"({0} -c 'import hashlib; BLOCKSIZE = 65536; hasher = hashlib.sha1();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python > 2.4 (including python3)
|
||||
u"({0} -c 'import sha; BLOCKSIZE = 65536; hasher = sha.sha();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python == 2.4
|
||||
]
|
||||
|
||||
cmd = (" %s " % self._SHELL_OR).join(csums)
|
||||
cmd = "%s; %s %s (echo \'0 \'%s)" % (test, cmd, self._SHELL_OR, shell_escaped_path)
|
||||
return cmd
|
||||
|
||||
def build_module_command(self, env_string, shebang, cmd, arg_path=None, rm_tmp=None):
|
||||
# don't quote the cmd if it's an empty string, because this will
|
||||
# break pipelining mode
|
||||
if cmd.strip() != '':
|
||||
cmd = pipes.quote(cmd)
|
||||
cmd_parts = [env_string.strip(), shebang.replace("#!", "").strip(), cmd]
|
||||
if arg_path is not None:
|
||||
cmd_parts.append(arg_path)
|
||||
new_cmd = " ".join(cmd_parts)
|
||||
if rm_tmp:
|
||||
new_cmd = '%s; rm -rf "%s" %s' % (new_cmd, rm_tmp, self._SHELL_REDIRECT_ALLNULL)
|
||||
return new_cmd
|
||||
|
|
|
@ -30,6 +30,8 @@ from jinja2.exceptions import UndefinedError
|
|||
|
||||
from ansible import constants as C
|
||||
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable
|
||||
from ansible.executor.play_iterator import PlayIterator
|
||||
from ansible.executor.process.worker import WorkerProcess
|
||||
from ansible.executor.task_result import TaskResult
|
||||
from ansible.inventory.host import Host
|
||||
from ansible.inventory.group import Group
|
||||
|
@ -37,7 +39,9 @@ from ansible.playbook.helpers import load_list_of_blocks
|
|||
from ansible.playbook.included_file import IncludedFile
|
||||
from ansible.plugins import action_loader, connection_loader, filter_loader, lookup_loader, module_loader, test_loader
|
||||
from ansible.template import Templar
|
||||
from ansible.utils.unicode import to_unicode
|
||||
from ansible.vars.unsafe_proxy import wrap_var
|
||||
from ansible.vars import combine_vars
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
|
@ -137,38 +141,32 @@ class StrategyBase:
|
|||
|
||||
display.debug("entering _queue_task() for %s/%s" % (host, task))
|
||||
|
||||
task_vars['hostvars'] = self._tqm.hostvars
|
||||
# and then queue the new task
|
||||
display.debug("%s - putting task (%s) in queue" % (host, task))
|
||||
try:
|
||||
display.debug("worker is %d (out of %d available)" % (self._cur_worker+1, len(self._workers)))
|
||||
|
||||
(worker_prc, main_q, rslt_q) = self._workers[self._cur_worker]
|
||||
self._cur_worker += 1
|
||||
if self._cur_worker >= len(self._workers):
|
||||
self._cur_worker = 0
|
||||
|
||||
# create a dummy object with plugin loaders set as an easier
|
||||
# way to share them with the forked processes
|
||||
shared_loader_obj = SharedPluginLoaderObj()
|
||||
|
||||
# compress (and convert) the data if so configured, which can
|
||||
# help a lot when the variable dictionary is huge. We pop the
|
||||
# hostvars out of the task variables right now, due to the fact
|
||||
# that they're not JSON serializable
|
||||
compressed_vars = False
|
||||
if C.DEFAULT_VAR_COMPRESSION_LEVEL > 0:
|
||||
zip_vars = zlib.compress(json.dumps(task_vars), C.DEFAULT_VAR_COMPRESSION_LEVEL)
|
||||
compressed_vars = True
|
||||
# we're done with the original dict now, so delete it to
|
||||
# try and reclaim some memory space, which is helpful if the
|
||||
# data contained in the dict is very large
|
||||
del task_vars
|
||||
else:
|
||||
zip_vars = task_vars # noqa (pyflakes false positive because task_vars is deleted in the conditional above)
|
||||
|
||||
# and queue the task
|
||||
main_q.put((host, task, self._loader.get_basedir(), zip_vars, compressed_vars, play_context, shared_loader_obj))
|
||||
queued = False
|
||||
while True:
|
||||
(worker_prc, rslt_q) = self._workers[self._cur_worker]
|
||||
if worker_prc is None or not worker_prc.is_alive():
|
||||
worker_prc = WorkerProcess(rslt_q, task_vars, host, task, play_context, self._loader, self._variable_manager, shared_loader_obj)
|
||||
self._workers[self._cur_worker][0] = worker_prc
|
||||
worker_prc.start()
|
||||
queued = True
|
||||
self._cur_worker += 1
|
||||
if self._cur_worker >= len(self._workers):
|
||||
self._cur_worker = 0
|
||||
time.sleep(0.0001)
|
||||
if queued:
|
||||
break
|
||||
|
||||
del task_vars
|
||||
self._pending_results += 1
|
||||
except (EOFError, IOError, AssertionError) as e:
|
||||
# most likely an abort
|
||||
|
@ -176,7 +174,7 @@ class StrategyBase:
|
|||
return
|
||||
display.debug("exiting _queue_task() for %s/%s" % (host, task))
|
||||
|
||||
def _process_pending_results(self, iterator):
|
||||
def _process_pending_results(self, iterator, one_pass=False):
|
||||
'''
|
||||
Reads results off the final queue and takes appropriate action
|
||||
based on the result (executing callbacks, updating state, etc.).
|
||||
|
@ -189,10 +187,20 @@ class StrategyBase:
|
|||
result = self._final_q.get()
|
||||
display.debug("got result from result worker: %s" % ([text_type(x) for x in result],))
|
||||
|
||||
# helper method, used to find the original host from the one
|
||||
# returned in the result/message, which has been serialized and
|
||||
# thus had some information stripped from it to speed up the
|
||||
# serialization process
|
||||
def get_original_host(host):
|
||||
if host.name in self._inventory._hosts_cache:
|
||||
return self._inventory._hosts_cache[host.name]
|
||||
else:
|
||||
return self._inventory.get_host(host.name)
|
||||
|
||||
# all host status messages contain 2 entries: (msg, task_result)
|
||||
if result[0] in ('host_task_ok', 'host_task_failed', 'host_task_skipped', 'host_unreachable'):
|
||||
task_result = result[1]
|
||||
host = task_result._host
|
||||
host = get_original_host(task_result._host)
|
||||
task = task_result._task
|
||||
if result[0] == 'host_task_failed' or task_result.is_failed():
|
||||
if not task.ignore_errors:
|
||||
|
@ -202,8 +210,12 @@ class StrategyBase:
|
|||
[iterator.mark_host_failed(h) for h in self._inventory.get_hosts(iterator._play.hosts) if h.name not in self._tqm._unreachable_hosts]
|
||||
else:
|
||||
iterator.mark_host_failed(host)
|
||||
self._tqm._failed_hosts[host.name] = True
|
||||
self._tqm._stats.increment('failures', host.name)
|
||||
|
||||
# only add the host to the failed list officially if it has
|
||||
# been failed by the iterator
|
||||
if iterator.is_failed(host):
|
||||
self._tqm._failed_hosts[host.name] = True
|
||||
self._tqm._stats.increment('failures', host.name)
|
||||
else:
|
||||
self._tqm._stats.increment('ok', host.name)
|
||||
self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=task.ignore_errors)
|
||||
|
@ -221,7 +233,7 @@ class StrategyBase:
|
|||
self._tqm._stats.increment('changed', host.name)
|
||||
self._tqm.send_callback('v2_runner_on_ok', task_result)
|
||||
|
||||
if self._diff and 'diff' in task_result._result:
|
||||
if self._diff:
|
||||
self._tqm.send_callback('v2_on_file_diff', task_result)
|
||||
|
||||
self._pending_results -= 1
|
||||
|
@ -244,31 +256,30 @@ class StrategyBase:
|
|||
new_host_info = result_item.get('add_host', dict())
|
||||
|
||||
self._add_host(new_host_info, iterator)
|
||||
self._tqm._hostvars_manager.hostvars().set_inventory(self._inventory)
|
||||
|
||||
elif result[0] == 'add_group':
|
||||
host = result[1]
|
||||
host = get_original_host(result[1])
|
||||
result_item = result[2]
|
||||
self._add_group(host, result_item)
|
||||
self._tqm._hostvars_manager.hostvars().set_inventory(self._inventory)
|
||||
|
||||
elif result[0] == 'notify_handler':
|
||||
task_result = result[1]
|
||||
handler_name = result[2]
|
||||
|
||||
original_task = iterator.get_original_task(task_result._host, task_result._task)
|
||||
original_host = get_original_host(task_result._host)
|
||||
original_task = iterator.get_original_task(original_host, task_result._task)
|
||||
if handler_name not in self._notified_handlers:
|
||||
self._notified_handlers[handler_name] = []
|
||||
|
||||
if task_result._host not in self._notified_handlers[handler_name]:
|
||||
self._notified_handlers[handler_name].append(task_result._host)
|
||||
if original_host not in self._notified_handlers[handler_name]:
|
||||
self._notified_handlers[handler_name].append(original_host)
|
||||
display.vv("NOTIFIED HANDLER %s" % (handler_name,))
|
||||
|
||||
elif result[0] == 'register_host_var':
|
||||
# essentially the same as 'set_host_var' below, however we
|
||||
# never follow the delegate_to value for registered vars and
|
||||
# the variable goes in the fact_cache
|
||||
host = result[1]
|
||||
host = get_original_host(result[1])
|
||||
task = result[2]
|
||||
var_value = wrap_var(result[3])
|
||||
var_name = task.register
|
||||
|
@ -280,16 +291,15 @@ class StrategyBase:
|
|||
|
||||
for target_host in host_list:
|
||||
self._variable_manager.set_nonpersistent_facts(target_host, {var_name: var_value})
|
||||
self._tqm._hostvars_manager.hostvars().set_nonpersistent_facts(target_host, {var_name: var_value})
|
||||
|
||||
elif result[0] in ('set_host_var', 'set_host_facts'):
|
||||
host = result[1]
|
||||
host = get_original_host(result[1])
|
||||
task = result[2]
|
||||
item = result[3]
|
||||
|
||||
# find the host we're actually refering too here, which may
|
||||
# be a host that is not really in inventory at all
|
||||
if task.delegate_to is not None:
|
||||
if task.delegate_to is not None and task.delegate_facts:
|
||||
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
|
||||
self.add_tqm_variables(task_vars, play=iterator._play)
|
||||
if item is not None:
|
||||
|
@ -302,32 +312,37 @@ class StrategyBase:
|
|||
else:
|
||||
actual_host = host
|
||||
|
||||
if task.run_once:
|
||||
host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
|
||||
else:
|
||||
host_list = [actual_host]
|
||||
|
||||
if result[0] == 'set_host_var':
|
||||
var_name = result[4]
|
||||
var_value = result[5]
|
||||
|
||||
if task.run_once:
|
||||
host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
|
||||
else:
|
||||
host_list = [actual_host]
|
||||
|
||||
for target_host in host_list:
|
||||
self._variable_manager.set_host_variable(target_host, var_name, var_value)
|
||||
self._tqm._hostvars_manager.hostvars().set_host_variable(target_host, var_name, var_value)
|
||||
elif result[0] == 'set_host_facts':
|
||||
facts = result[4]
|
||||
if task.action == 'set_fact':
|
||||
self._variable_manager.set_nonpersistent_facts(actual_host, facts)
|
||||
self._tqm._hostvars_manager.hostvars().set_nonpersistent_facts(actual_host, facts)
|
||||
else:
|
||||
self._variable_manager.set_host_facts(actual_host, facts)
|
||||
self._tqm._hostvars_manager.hostvars().set_host_facts(actual_host, facts)
|
||||
|
||||
for target_host in host_list:
|
||||
if task.action == 'set_fact':
|
||||
self._variable_manager.set_nonpersistent_facts(target_host, facts)
|
||||
else:
|
||||
self._variable_manager.set_host_facts(target_host, facts)
|
||||
elif result[0].startswith('v2_runner_item') or result[0] == 'v2_runner_retry':
|
||||
self._tqm.send_callback(result[0], result[1])
|
||||
elif result[0] == 'v2_on_file_diff':
|
||||
if self._diff:
|
||||
self._tqm.send_callback('v2_on_file_diff', result[1])
|
||||
else:
|
||||
raise AnsibleError("unknown result message received: %s" % result[0])
|
||||
|
||||
except Queue.Empty:
|
||||
time.sleep(0.0001)
|
||||
|
||||
if one_pass:
|
||||
break
|
||||
|
||||
return ret_results
|
||||
|
||||
def _wait_on_pending_results(self, iterator):
|
||||
|
@ -365,9 +380,8 @@ class StrategyBase:
|
|||
allgroup.add_host(new_host)
|
||||
|
||||
# Set/update the vars for this host
|
||||
new_vars = host_info.get('host_vars', dict())
|
||||
new_host.vars = self._inventory.get_host_vars(new_host)
|
||||
new_host.vars.update(new_vars)
|
||||
new_host.vars = combine_vars(new_host.vars, self._inventory.get_host_vars(new_host))
|
||||
new_host.vars = combine_vars(new_host.vars, host_info.get('host_vars', dict()))
|
||||
|
||||
new_groups = host_info.get('groups', [])
|
||||
for group_name in new_groups:
|
||||
|
@ -442,7 +456,7 @@ class StrategyBase:
|
|||
block_list = load_list_of_blocks(
|
||||
data,
|
||||
play=included_file._task._block._play,
|
||||
parent_block=included_file._task._block,
|
||||
parent_block=None,
|
||||
task_include=included_file._task,
|
||||
role=included_file._task._role,
|
||||
use_handlers=is_handler,
|
||||
|
@ -458,7 +472,7 @@ class StrategyBase:
|
|||
# mark all of the hosts including this file as failed, send callbacks,
|
||||
# and increment the stats for this host
|
||||
for host in included_file._hosts:
|
||||
tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=str(e)))
|
||||
tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=to_unicode(e)))
|
||||
iterator.mark_host_failed(host)
|
||||
self._tqm._failed_hosts[host.name] = True
|
||||
self._tqm._stats.increment('failures', host.name)
|
||||
|
@ -468,11 +482,7 @@ class StrategyBase:
|
|||
# set the vars for this task from those specified as params to the include
|
||||
for b in block_list:
|
||||
# first make a copy of the including task, so that each has a unique copy to modify
|
||||
# FIXME: not sure if this is the best way to fix this, as we might be losing
|
||||
# information in the copy. Previously we assigned the include params to
|
||||
# the block variables directly, which caused other problems, so we may
|
||||
# need to figure out a third option if this also presents problems.
|
||||
b._task_include = b._task_include.copy(exclude_block=True)
|
||||
b._task_include = b._task_include.copy()
|
||||
# then we create a temporary set of vars to ensure the variable reference is unique
|
||||
temp_vars = b._task_include.vars.copy()
|
||||
temp_vars.update(included_file._args.copy())
|
||||
|
@ -481,10 +491,10 @@ class StrategyBase:
|
|||
# error so that users know not to specify them both ways
|
||||
tags = temp_vars.pop('tags', [])
|
||||
if isinstance(tags, string_types):
|
||||
tags = [ tags ]
|
||||
tags = tags.split(',')
|
||||
if len(tags) > 0:
|
||||
if len(b._task_include.tags) > 0:
|
||||
raise AnsibleParserError("Include tasks should not specify tags in more than one way (both via args and directly on the task)",
|
||||
raise AnsibleParserError("Include tasks should not specify tags in more than one way (both via args and directly on the task). Mixing tag specify styles is prohibited for whole import hierarchy, not only for single import statement",
|
||||
obj=included_file._task._ds)
|
||||
display.deprecated("You should not specify tags in the include parameters. All tags should be specified using the task-level option")
|
||||
b._task_include.tags = tags
|
||||
|
@ -537,7 +547,10 @@ class StrategyBase:
|
|||
# self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
|
||||
# result = False
|
||||
# break
|
||||
saved_name = handler.name
|
||||
handler.name = handler_name
|
||||
self._tqm.send_callback('v2_playbook_on_handler_task_start', handler)
|
||||
handler.name = saved_name
|
||||
|
||||
if notified_hosts is None:
|
||||
notified_hosts = self._notified_handlers[handler_name]
|
||||
|
@ -569,6 +582,7 @@ class StrategyBase:
|
|||
host_results,
|
||||
self._tqm,
|
||||
iterator=iterator,
|
||||
inventory=self._inventory,
|
||||
loader=self._loader,
|
||||
variable_manager=self._variable_manager
|
||||
)
|
||||
|
@ -587,6 +601,7 @@ class StrategyBase:
|
|||
for task in block.block:
|
||||
result = self._do_handler_run(
|
||||
handler=task,
|
||||
handler_name=None,
|
||||
iterator=iterator,
|
||||
play_context=play_context,
|
||||
notified_hosts=included_file._hosts[:],
|
||||
|
@ -608,10 +623,10 @@ class StrategyBase:
|
|||
def _take_step(self, task, host=None):
|
||||
|
||||
ret=False
|
||||
msg=u'Perform task: %s ' % task
|
||||
if host:
|
||||
msg = u'Perform task: %s on %s (y/n/c): ' % (task, host)
|
||||
else:
|
||||
msg = u'Perform task: %s (y/n/c): ' % task
|
||||
msg += u'on %s ' % host
|
||||
msg += u'(N)o/(y)es/(c)ontinue: '
|
||||
resp = display.prompt(msg)
|
||||
|
||||
if resp.lower() in ['y','yes']:
|
||||
|
@ -643,5 +658,10 @@ class StrategyBase:
|
|||
self._inventory.refresh_inventory()
|
||||
#elif meta_action == 'reset_connection':
|
||||
# connection_info.connection.close()
|
||||
elif meta_action == 'clear_host_errors':
|
||||
self._tqm._failed_hosts = dict()
|
||||
self._tqm._unreachable_hosts = dict()
|
||||
for host in iterator._host_states:
|
||||
iterator._host_states[host].fail_state = iterator.FAILED_NONE
|
||||
else:
|
||||
raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds)
|
||||
|
|
|
@ -23,7 +23,9 @@ import time
|
|||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.playbook.included_file import IncludedFile
|
||||
from ansible.plugins import action_loader
|
||||
from ansible.plugins.strategy import StrategyBase
|
||||
from ansible.template import Templar
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
|
@ -56,7 +58,7 @@ class StrategyModule(StrategyBase):
|
|||
work_to_do = True
|
||||
while work_to_do and not self._tqm._terminated:
|
||||
|
||||
hosts_left = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
|
||||
hosts_left = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts and not iterator.is_failed(host)]
|
||||
if len(hosts_left) == 0:
|
||||
self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
|
||||
result = False
|
||||
|
@ -78,7 +80,7 @@ class StrategyModule(StrategyBase):
|
|||
(state, task) = iterator.get_next_task_for_host(host, peek=True)
|
||||
display.debug("free host state: %s" % state)
|
||||
display.debug("free host task: %s" % task)
|
||||
if host_name not in self._tqm._failed_hosts and host_name not in self._tqm._unreachable_hosts and task:
|
||||
if host_name not in self._tqm._unreachable_hosts and task:
|
||||
|
||||
# set the flag so the outer loop knows we've still found
|
||||
# some work which needs to be done
|
||||
|
@ -92,10 +94,28 @@ class StrategyModule(StrategyBase):
|
|||
self._blocked_hosts[host_name] = True
|
||||
(state, task) = iterator.get_next_task_for_host(host)
|
||||
|
||||
try:
|
||||
action = action_loader.get(task.action, class_only=True)
|
||||
except KeyError:
|
||||
# we don't care here, because the action may simply not have a
|
||||
# corresponding action plugin
|
||||
action = None
|
||||
|
||||
display.debug("getting variables")
|
||||
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
|
||||
self.add_tqm_variables(task_vars, play=iterator._play)
|
||||
templar = Templar(loader=self._loader, variables=task_vars)
|
||||
display.debug("done getting variables")
|
||||
|
||||
run_once = templar.template(task.run_once) or action and getattr(action, 'BYPASS_HOST_LOOP', False)
|
||||
if run_once:
|
||||
if action and getattr(action, 'BYPASS_HOST_LOOP', False):
|
||||
raise AnsibleError("The '%s' module bypasses the host loop, which is currently not supported in the free strategy " \
|
||||
"and would instead execute for every host in the inventory list." % task.action, obj=task._ds)
|
||||
else:
|
||||
display.warning("Using run_once with the free strategy is not currently supported. This task will still be " \
|
||||
"executed for every host in the inventory list.")
|
||||
|
||||
# check to see if this task should be skipped, due to it being a member of a
|
||||
# role which has already run (and whether that role allows duplicate execution)
|
||||
if task._role and task._role.has_run(host):
|
||||
|
@ -106,24 +126,17 @@ class StrategyModule(StrategyBase):
|
|||
continue
|
||||
|
||||
if task.action == 'meta':
|
||||
# meta tasks store their args in the _raw_params field of args,
|
||||
# since they do not use k=v pairs, so get that
|
||||
meta_action = task.args.get('_raw_params')
|
||||
if meta_action == 'noop':
|
||||
continue
|
||||
elif meta_action == 'flush_handlers':
|
||||
# FIXME: in the 'free' mode, flushing handlers should result in
|
||||
# only those handlers notified for the host doing the flush
|
||||
self.run_handlers(iterator, play_context)
|
||||
else:
|
||||
raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds)
|
||||
|
||||
self._execute_meta(task, play_context, iterator)
|
||||
self._blocked_hosts[host_name] = False
|
||||
else:
|
||||
# handle step if needed, skip meta actions as they are used internally
|
||||
if not self._step or self._take_step(task, host_name):
|
||||
if task.any_errors_fatal:
|
||||
display.warning("Using any_errors_fatal with the free strategy is not supported, as tasks are executed independently on each host")
|
||||
self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
|
||||
self._queue_task(host, task, task_vars, play_context)
|
||||
else:
|
||||
display.debug("%s is blocked, skipping for now" % host_name)
|
||||
|
||||
# move on to the next host and make sure we
|
||||
# haven't gone past the end of our hosts list
|
||||
|
@ -135,12 +148,18 @@ class StrategyModule(StrategyBase):
|
|||
if last_host == starting_host:
|
||||
break
|
||||
|
||||
results = self._process_pending_results(iterator)
|
||||
results = self._wait_on_pending_results(iterator)
|
||||
host_results.extend(results)
|
||||
|
||||
try:
|
||||
included_files = IncludedFile.process_include_results(host_results, self._tqm, iterator=iterator,
|
||||
loader=self._loader, variable_manager=self._variable_manager)
|
||||
included_files = IncludedFile.process_include_results(
|
||||
host_results,
|
||||
self._tqm,
|
||||
iterator=iterator,
|
||||
inventory=self._inventory,
|
||||
loader=self._loader,
|
||||
variable_manager=self._variable_manager
|
||||
)
|
||||
except AnsibleError as e:
|
||||
return False
|
||||
|
||||
|
@ -170,13 +189,7 @@ class StrategyModule(StrategyBase):
|
|||
display.debug("done adding collected blocks to iterator")
|
||||
|
||||
# pause briefly so we don't spin lock
|
||||
time.sleep(0.05)
|
||||
|
||||
try:
|
||||
results = self._wait_on_pending_results(iterator)
|
||||
host_results.extend(results)
|
||||
except Exception as e:
|
||||
pass
|
||||
time.sleep(0.001)
|
||||
|
||||
# run the base class run() method, which executes the cleanup function
|
||||
# and runs any outstanding handlers which have been triggered
|
||||
|
|
|
@ -29,6 +29,7 @@ from ansible.playbook.task import Task
|
|||
from ansible.plugins import action_loader
|
||||
from ansible.plugins.strategy import StrategyBase
|
||||
from ansible.template import Templar
|
||||
from ansible.utils.unicode import to_unicode
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
|
@ -62,19 +63,26 @@ class StrategyModule(StrategyBase):
|
|||
num_rescue = 0
|
||||
num_always = 0
|
||||
|
||||
lowest_cur_block = len(iterator._blocks)
|
||||
|
||||
display.debug("counting tasks in each state of execution")
|
||||
for (k, v) in iteritems(host_tasks):
|
||||
if v is None:
|
||||
continue
|
||||
host_tasks_to_run = [(host, state_task)
|
||||
for host, state_task in iteritems(host_tasks)
|
||||
if state_task and state_task[1]]
|
||||
|
||||
if host_tasks_to_run:
|
||||
lowest_cur_block = min(
|
||||
(s.cur_block for h, (s, t) in host_tasks_to_run
|
||||
if s.run_state != PlayIterator.ITERATING_COMPLETE))
|
||||
else:
|
||||
# empty host_tasks_to_run will just run till the end of the function
|
||||
# without ever touching lowest_cur_block
|
||||
lowest_cur_block = None
|
||||
|
||||
for (k, v) in host_tasks_to_run:
|
||||
(s, t) = v
|
||||
if t is None:
|
||||
continue
|
||||
|
||||
if s.cur_block < lowest_cur_block and s.run_state != PlayIterator.ITERATING_COMPLETE:
|
||||
lowest_cur_block = s.cur_block
|
||||
if s.cur_block > lowest_cur_block:
|
||||
# Not the current block, ignore it
|
||||
continue
|
||||
|
||||
if s.run_state == PlayIterator.ITERATING_SETUP:
|
||||
num_setups += 1
|
||||
|
@ -98,7 +106,7 @@ class StrategyModule(StrategyBase):
|
|||
rvals = []
|
||||
display.debug("starting to advance hosts")
|
||||
for host in hosts:
|
||||
host_state_task = host_tasks[host.name]
|
||||
host_state_task = host_tasks.get(host.name)
|
||||
if host_state_task is None:
|
||||
continue
|
||||
(s, t) = host_state_task
|
||||
|
@ -155,7 +163,7 @@ class StrategyModule(StrategyBase):
|
|||
|
||||
try:
|
||||
display.debug("getting the remaining hosts for this loop")
|
||||
hosts_left = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
|
||||
hosts_left = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts and not iterator.is_failed(host)]
|
||||
display.debug("done getting the remaining hosts for this loop")
|
||||
|
||||
# queue up this task for each host in the inventory
|
||||
|
@ -169,6 +177,10 @@ class StrategyModule(StrategyBase):
|
|||
skip_rest = False
|
||||
choose_step = True
|
||||
|
||||
# flag set if task is set to any_errors_fatal
|
||||
any_errors_fatal = False
|
||||
|
||||
results = []
|
||||
for (host, task) in host_tasks:
|
||||
if not task:
|
||||
continue
|
||||
|
@ -185,12 +197,10 @@ class StrategyModule(StrategyBase):
|
|||
|
||||
try:
|
||||
action = action_loader.get(task.action, class_only=True)
|
||||
if task.run_once or getattr(action, 'BYPASS_HOST_LOOP', False):
|
||||
run_once = True
|
||||
except KeyError:
|
||||
# we don't care here, because the action may simply not have a
|
||||
# corresponding action plugin
|
||||
pass
|
||||
action = None
|
||||
|
||||
# check to see if this task should be skipped, due to it being a member of a
|
||||
# role which has already run (and whether that role allows duplicate execution)
|
||||
|
@ -218,6 +228,11 @@ class StrategyModule(StrategyBase):
|
|||
templar = Templar(loader=self._loader, variables=task_vars)
|
||||
display.debug("done getting variables")
|
||||
|
||||
run_once = templar.template(task.run_once) or action and getattr(action, 'BYPASS_HOST_LOOP', False)
|
||||
|
||||
if task.any_errors_fatal or run_once:
|
||||
any_errors_fatal = True
|
||||
|
||||
if not callback_sent:
|
||||
display.debug("sending task start callback, copying the task so we can template it temporarily")
|
||||
saved_name = task.name
|
||||
|
@ -243,12 +258,14 @@ class StrategyModule(StrategyBase):
|
|||
if run_once:
|
||||
break
|
||||
|
||||
results += self._process_pending_results(iterator, one_pass=True)
|
||||
|
||||
# go to next host/task group
|
||||
if skip_rest:
|
||||
continue
|
||||
|
||||
display.debug("done queuing things up, now waiting for results queue to drain")
|
||||
results = self._wait_on_pending_results(iterator)
|
||||
results += self._wait_on_pending_results(iterator)
|
||||
host_results.extend(results)
|
||||
|
||||
if not work_to_do and len(iterator.get_failed_hosts()) > 0:
|
||||
|
@ -258,11 +275,18 @@ class StrategyModule(StrategyBase):
|
|||
break
|
||||
|
||||
try:
|
||||
included_files = IncludedFile.process_include_results(host_results, self._tqm,
|
||||
iterator=iterator, loader=self._loader, variable_manager=self._variable_manager)
|
||||
included_files = IncludedFile.process_include_results(
|
||||
host_results,
|
||||
self._tqm,
|
||||
iterator=iterator,
|
||||
inventory=self._inventory,
|
||||
loader=self._loader,
|
||||
variable_manager=self._variable_manager
|
||||
)
|
||||
except AnsibleError as e:
|
||||
return False
|
||||
|
||||
include_failure = False
|
||||
if len(included_files) > 0:
|
||||
display.debug("we have included files to process")
|
||||
noop_task = Task()
|
||||
|
@ -307,7 +331,8 @@ class StrategyModule(StrategyBase):
|
|||
for host in included_file._hosts:
|
||||
self._tqm._failed_hosts[host.name] = True
|
||||
iterator.mark_host_failed(host)
|
||||
display.error(e, wrap_text=False)
|
||||
display.error(to_unicode(e), wrap_text=False)
|
||||
include_failure = True
|
||||
continue
|
||||
|
||||
# finally go through all of the hosts and append the
|
||||
|
@ -321,6 +346,23 @@ class StrategyModule(StrategyBase):
|
|||
display.debug("done processing included files")
|
||||
|
||||
display.debug("results queue empty")
|
||||
|
||||
display.debug("checking for any_errors_fatal")
|
||||
failed_hosts = []
|
||||
for res in results:
|
||||
if res.is_failed() or res.is_unreachable():
|
||||
failed_hosts.append(res._host.name)
|
||||
|
||||
# if any_errors_fatal and we had an error, mark all hosts as failed
|
||||
if any_errors_fatal and len(failed_hosts) > 0:
|
||||
for host in hosts_left:
|
||||
# don't double-mark hosts, or the iterator will potentially
|
||||
# fail them out of the rescue/always states
|
||||
if host.name not in failed_hosts:
|
||||
self._tqm._failed_hosts[host.name] = True
|
||||
iterator.mark_host_failed(host)
|
||||
display.debug("done checking for any_errors_fatal")
|
||||
|
||||
except (IOError, EOFError) as e:
|
||||
display.debug("got IOError/EOFError in task loop: %s" % e)
|
||||
# most likely an abort, return failed
|
||||
|
|
|
@ -62,26 +62,27 @@ def skipped(*a, **kw):
|
|||
skipped = item.get('skipped', False)
|
||||
return skipped
|
||||
|
||||
def regex(value='', pattern='', ignorecase=False, match_type='search'):
|
||||
def regex(value='', pattern='', ignorecase=False, multiline=False, match_type='search'):
|
||||
''' Expose `re` as a boolean filter using the `search` method by default.
|
||||
This is likely only useful for `search` and `match` which already
|
||||
have their own filters.
|
||||
'''
|
||||
flags = 0
|
||||
if ignorecase:
|
||||
flags = re.I
|
||||
else:
|
||||
flags = 0
|
||||
flags |= re.I
|
||||
if multiline:
|
||||
flags |= re.M
|
||||
_re = re.compile(pattern, flags=flags)
|
||||
_bool = __builtins__.get('bool')
|
||||
return _bool(getattr(_re, match_type, 'search')(value))
|
||||
|
||||
def match(value, pattern='', ignorecase=False):
|
||||
def match(value, pattern='', ignorecase=False, multiline=False):
|
||||
''' Perform a `re.match` returning a boolean '''
|
||||
return regex(value, pattern, ignorecase, 'match')
|
||||
return regex(value, pattern, ignorecase, multiline, 'match')
|
||||
|
||||
def search(value, pattern='', ignorecase=False):
|
||||
def search(value, pattern='', ignorecase=False, multiline=False):
|
||||
''' Perform a `re.search` returning a boolean '''
|
||||
return regex(value, pattern, ignorecase, 'search')
|
||||
return regex(value, pattern, ignorecase, multiline, 'search')
|
||||
|
||||
class TestModule(object):
|
||||
''' Ansible core jinja2 tests '''
|
||||
|
@ -89,14 +90,18 @@ class TestModule(object):
|
|||
def tests(self):
|
||||
return {
|
||||
# failure testing
|
||||
'failed' : failed,
|
||||
'success' : success,
|
||||
'failed' : failed,
|
||||
'failure' : failed,
|
||||
'success' : success,
|
||||
'succeeded' : success,
|
||||
|
||||
# changed testing
|
||||
'changed' : changed,
|
||||
'change' : changed,
|
||||
|
||||
# skip testing
|
||||
'skipped' : skipped,
|
||||
'skip' : skipped,
|
||||
|
||||
# regex
|
||||
'match': match,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue