mirror of
https://github.com/ansible-collections/community.general.git
synced 2025-10-03 23:14:02 -07:00
Merge pull request #10618 from jder/force-handlers
Fix --force-handlers, and allow it in plays and ansible.cfg
This commit is contained in:
commit
67512aeeb6
11 changed files with 123 additions and 12 deletions
|
@ -173,6 +173,8 @@ DEPRECATION_WARNINGS = get_config(p, DEFAULTS, 'deprecation_warnings',
|
|||
DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], islist=True)
|
||||
COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', False, boolean=True)
|
||||
DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True)
|
||||
DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, boolean=True)
|
||||
|
||||
|
||||
RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True)
|
||||
RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/')
|
||||
|
|
|
@ -375,17 +375,17 @@ class PlayBook(object):
|
|||
|
||||
# *****************************************************
|
||||
|
||||
def _trim_unavailable_hosts(self, hostlist=[]):
|
||||
def _trim_unavailable_hosts(self, hostlist=[], keep_failed=False):
|
||||
''' returns a list of hosts that haven't failed and aren't dark '''
|
||||
|
||||
return [ h for h in hostlist if (h not in self.stats.failures) and (h not in self.stats.dark)]
|
||||
return [ h for h in hostlist if (keep_failed or h not in self.stats.failures) and (h not in self.stats.dark)]
|
||||
|
||||
# *****************************************************
|
||||
|
||||
def _run_task_internal(self, task):
|
||||
def _run_task_internal(self, task, include_failed=False):
|
||||
''' run a particular module step in a playbook '''
|
||||
|
||||
hosts = self._trim_unavailable_hosts(self.inventory.list_hosts(task.play._play_hosts))
|
||||
hosts = self._trim_unavailable_hosts(self.inventory.list_hosts(task.play._play_hosts), keep_failed=include_failed)
|
||||
self.inventory.restrict_to(hosts)
|
||||
|
||||
runner = ansible.runner.Runner(
|
||||
|
@ -493,7 +493,8 @@ class PlayBook(object):
|
|||
task.ignore_errors = utils.check_conditional(cond, play.basedir, task.module_vars, fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR)
|
||||
|
||||
# load up an appropriate ansible runner to run the task in parallel
|
||||
results = self._run_task_internal(task)
|
||||
include_failed = is_handler and play.force_handlers
|
||||
results = self._run_task_internal(task, include_failed=include_failed)
|
||||
|
||||
# if no hosts are matched, carry on
|
||||
hosts_remaining = True
|
||||
|
@ -811,7 +812,7 @@ class PlayBook(object):
|
|||
|
||||
# if no hosts remain, drop out
|
||||
if not host_list:
|
||||
if self.force_handlers:
|
||||
if play.force_handlers:
|
||||
task_errors = True
|
||||
break
|
||||
else:
|
||||
|
@ -821,7 +822,7 @@ class PlayBook(object):
|
|||
# lift restrictions after each play finishes
|
||||
self.inventory.lift_also_restriction()
|
||||
|
||||
if task_errors and not self.force_handlers:
|
||||
if task_errors and not play.force_handlers:
|
||||
# if there were failed tasks and handler execution
|
||||
# is not forced, quit the play with an error
|
||||
return False
|
||||
|
@ -856,7 +857,7 @@ class PlayBook(object):
|
|||
play.max_fail_pct = 0
|
||||
if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count):
|
||||
host_list = None
|
||||
if not host_list and not self.force_handlers:
|
||||
if not host_list and not play.force_handlers:
|
||||
self.callbacks.on_no_hosts_remaining()
|
||||
return False
|
||||
|
||||
|
|
|
@ -34,9 +34,10 @@ class Play(object):
|
|||
|
||||
_pb_common = [
|
||||
'accelerate', 'accelerate_ipv6', 'accelerate_port', 'any_errors_fatal', 'become',
|
||||
'become_method', 'become_user', 'environment', 'gather_facts', 'handlers', 'hosts',
|
||||
'name', 'no_log', 'remote_user', 'roles', 'serial', 'su', 'su_user', 'sudo',
|
||||
'sudo_user', 'tags', 'vars', 'vars_files', 'vars_prompt', 'vault_password',
|
||||
'become_method', 'become_user', 'environment', 'force_handlers', 'gather_facts',
|
||||
'handlers', 'hosts', 'name', 'no_log', 'remote_user', 'roles', 'serial', 'su',
|
||||
'su_user', 'sudo', 'sudo_user', 'tags', 'vars', 'vars_files', 'vars_prompt',
|
||||
'vault_password',
|
||||
]
|
||||
|
||||
__slots__ = _pb_common + [
|
||||
|
@ -153,6 +154,7 @@ class Play(object):
|
|||
self.accelerate_ipv6 = ds.get('accelerate_ipv6', False)
|
||||
self.max_fail_pct = int(ds.get('max_fail_percentage', 100))
|
||||
self.no_log = utils.boolean(ds.get('no_log', 'false'))
|
||||
self.force_handlers = utils.boolean(ds.get('force_handlers', self.playbook.force_handlers))
|
||||
|
||||
# Fail out if user specifies conflicting privelege escalations
|
||||
if (ds.get('become') or ds.get('become_user')) and (ds.get('sudo') or ds.get('sudo_user')):
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue