Creating playbook executor and dependent classes

This commit is contained in:
James Cammarata 2014-11-14 16:14:08 -06:00
commit 62d79568be
158 changed files with 22486 additions and 2353 deletions

1
v2/samples/README.md Normal file
View file

@ -0,0 +1 @@
This is a small set of samples used for testing the v2 code.

2540
v2/samples/inv_lg Normal file

File diff suppressed because it is too large Load diff

1270
v2/samples/inv_md Normal file

File diff suppressed because it is too large Load diff

254
v2/samples/inv_sm Normal file
View file

@ -0,0 +1,254 @@
127.0.0.1
127.0.0.2
127.0.0.3
127.0.0.4
127.0.0.5
127.0.0.6
127.0.0.7
127.0.0.8
127.0.0.9
127.0.0.10
127.0.0.11
127.0.0.12
127.0.0.13
127.0.0.14
127.0.0.15
127.0.0.16
127.0.0.17
127.0.0.18
127.0.0.19
127.0.0.20
127.0.0.21
127.0.0.22
127.0.0.23
127.0.0.24
127.0.0.25
127.0.0.26
127.0.0.27
127.0.0.28
127.0.0.29
127.0.0.30
127.0.0.31
127.0.0.32
127.0.0.33
127.0.0.34
127.0.0.35
127.0.0.36
127.0.0.37
127.0.0.38
127.0.0.39
127.0.0.40
127.0.0.41
127.0.0.42
127.0.0.43
127.0.0.44
127.0.0.45
127.0.0.46
127.0.0.47
127.0.0.48
127.0.0.49
127.0.0.50
127.0.0.51
127.0.0.52
127.0.0.53
127.0.0.54
127.0.0.55
127.0.0.56
127.0.0.57
127.0.0.58
127.0.0.59
127.0.0.60
127.0.0.61
127.0.0.62
127.0.0.63
127.0.0.64
127.0.0.65
127.0.0.66
127.0.0.67
127.0.0.68
127.0.0.69
127.0.0.70
127.0.0.71
127.0.0.72
127.0.0.73
127.0.0.74
127.0.0.75
127.0.0.76
127.0.0.77
127.0.0.78
127.0.0.79
127.0.0.80
127.0.0.81
127.0.0.82
127.0.0.83
127.0.0.84
127.0.0.85
127.0.0.86
127.0.0.87
127.0.0.88
127.0.0.89
127.0.0.90
127.0.0.91
127.0.0.92
127.0.0.93
127.0.0.94
127.0.0.95
127.0.0.96
127.0.0.97
127.0.0.98
127.0.0.99
127.0.0.100
127.0.0.101
127.0.0.102
127.0.0.103
127.0.0.104
127.0.0.105
127.0.0.106
127.0.0.107
127.0.0.108
127.0.0.109
127.0.0.110
127.0.0.111
127.0.0.112
127.0.0.113
127.0.0.114
127.0.0.115
127.0.0.116
127.0.0.117
127.0.0.118
127.0.0.119
127.0.0.120
127.0.0.121
127.0.0.122
127.0.0.123
127.0.0.124
127.0.0.125
127.0.0.126
127.0.0.127
127.0.0.128
127.0.0.129
127.0.0.130
127.0.0.131
127.0.0.132
127.0.0.133
127.0.0.134
127.0.0.135
127.0.0.136
127.0.0.137
127.0.0.138
127.0.0.139
127.0.0.140
127.0.0.141
127.0.0.142
127.0.0.143
127.0.0.144
127.0.0.145
127.0.0.146
127.0.0.147
127.0.0.148
127.0.0.149
127.0.0.150
127.0.0.151
127.0.0.152
127.0.0.153
127.0.0.154
127.0.0.155
127.0.0.156
127.0.0.157
127.0.0.158
127.0.0.159
127.0.0.160
127.0.0.161
127.0.0.162
127.0.0.163
127.0.0.164
127.0.0.165
127.0.0.166
127.0.0.167
127.0.0.168
127.0.0.169
127.0.0.170
127.0.0.171
127.0.0.172
127.0.0.173
127.0.0.174
127.0.0.175
127.0.0.176
127.0.0.177
127.0.0.178
127.0.0.179
127.0.0.180
127.0.0.181
127.0.0.182
127.0.0.183
127.0.0.184
127.0.0.185
127.0.0.186
127.0.0.187
127.0.0.188
127.0.0.189
127.0.0.190
127.0.0.191
127.0.0.192
127.0.0.193
127.0.0.194
127.0.0.195
127.0.0.196
127.0.0.197
127.0.0.198
127.0.0.199
127.0.0.200
127.0.0.201
127.0.0.202
127.0.0.203
127.0.0.204
127.0.0.205
127.0.0.206
127.0.0.207
127.0.0.208
127.0.0.209
127.0.0.210
127.0.0.211
127.0.0.212
127.0.0.213
127.0.0.214
127.0.0.215
127.0.0.216
127.0.0.217
127.0.0.218
127.0.0.219
127.0.0.220
127.0.0.221
127.0.0.222
127.0.0.223
127.0.0.224
127.0.0.225
127.0.0.226
127.0.0.227
127.0.0.228
127.0.0.229
127.0.0.230
127.0.0.231
127.0.0.232
127.0.0.233
127.0.0.234
127.0.0.235
127.0.0.236
127.0.0.237
127.0.0.238
127.0.0.239
127.0.0.240
127.0.0.241
127.0.0.242
127.0.0.243
127.0.0.244
127.0.0.245
127.0.0.246
127.0.0.247
127.0.0.248
127.0.0.249
127.0.0.250
127.0.0.251
127.0.0.252
127.0.0.253
127.0.0.254

160
v2/samples/multi.py Normal file
View file

@ -0,0 +1,160 @@
#!/usr/bin/env python
import time
import Queue
import traceback
from multiprocessing import Process, Manager, Pipe, RLock
from ansible.playbook.play import Play
from ansible.playbook.task import Task
from ansible.utils.debug import debug
NUM_WORKERS = 50
NUM_HOSTS = 2500
NUM_TASKS = 1
class Foo:
def __init__(self, i, j):
self._foo = "FOO_%05d_%05d" % (i, j)
def __repr__(self):
return self._foo
def __getstate__(self):
debug("pickling %s" % self._foo)
return dict(foo=self._foo)
def __setstate__(self, data):
debug("unpickling...")
self._foo = data.get('foo', "BAD PICKLE!")
debug("unpickled %s" % self._foo)
def results(pipe, workers):
cur_worker = 0
def _read_worker_result(cur_worker):
result = None
starting_point = cur_worker
while True:
(worker_prc, main_pipe, res_pipe) = workers[cur_worker]
cur_worker += 1
if cur_worker >= len(workers):
cur_worker = 0
if res_pipe[1].poll(0.01):
debug("worker %d has data to read" % cur_worker)
result = res_pipe[1].recv()
debug("got a result from worker %d: %s" % (cur_worker, result))
break
if cur_worker == starting_point:
break
return (result, cur_worker)
while True:
result = None
try:
(result, cur_worker) = _read_worker_result(cur_worker)
if result is None:
time.sleep(0.01)
continue
pipe.send(result)
except (IOError, EOFError, KeyboardInterrupt), e:
debug("got a breaking error: %s" % e)
break
except Exception, e:
debug("EXCEPTION DURING RESULTS PROCESSING: %s" % e)
traceback.print_exc()
break
def worker(main_pipe, res_pipe):
while True:
foo = None
try:
if main_pipe.poll(0.01):
foo = main_pipe.recv()
time.sleep(0.07)
res_pipe.send(foo)
else:
time.sleep(0.01)
except (IOError, EOFError, KeyboardInterrupt), e:
debug("got a breaking error: %s" % e)
break
except Exception, e:
debug("EXCEPTION DURING WORKER PROCESSING: %s" % e)
traceback.print_exc()
break
workers = []
for i in range(NUM_WORKERS):
(main_p1, main_p2) = Pipe()
(res_p1, res_p2) = Pipe()
worker_p = Process(target=worker, args=(main_p2, res_p1))
worker_p.start()
workers.append((worker_p, (main_p1, main_p2), (res_p1, res_p2)))
in_p, out_p = Pipe()
res_p = Process(target=results, args=(in_p, workers))
res_p.start()
def send_data(obj):
global cur_worker
global workers
global pending_results
(w_proc, main_pipe, res_pipe) = workers[cur_worker]
cur_worker += 1
if cur_worker >= len(workers):
cur_worker = 0
pending_results += 1
main_pipe[0].send(obj)
def _process_pending_results():
global out_p
global pending_results
try:
#p_lock.acquire()
while out_p.poll(0.01):
result = out_p.recv()
debug("got final result: %s" % (result,))
pending_results -= 1
finally:
#p_lock.release()
pass
def _wait_on_pending_results():
global pending_results
while pending_results > 0:
debug("waiting for pending results (%d left)" % pending_results)
_process_pending_results()
time.sleep(0.01)
debug("starting")
cur_worker = 0
pending_results = 0
sample_play = Play()
for i in range(NUM_TASKS):
for j in range(NUM_HOSTS):
debug("queuing %d, %d" % (i, j))
send_data(Task().load(dict(name="task %d %d" % (i,j), ping=""), sample_play))
debug("done queuing %d, %d" % (i, j))
_process_pending_results()
debug("waiting for the results to drain...")
_wait_on_pending_results()
in_p.close()
out_p.close()
res_p.terminate()
for (w_p, main_pipe, res_pipe) in workers:
res_pipe[1].close()
res_pipe[0].close()
main_pipe[1].close()
main_pipe[0].close()
w_p.terminate()
debug("done")

175
v2/samples/multi_queues.py Normal file
View file

@ -0,0 +1,175 @@
#!/usr/bin/env python
import sys
import time
import Queue
import traceback
import multiprocessing
from ansible.inventory import Inventory
from ansible.inventory.host import Host
from ansible.playbook.play import Play
from ansible.playbook.task import Task
from ansible.executor.connection_info import ConnectionInformation
from ansible.executor.task_executor import TaskExecutor
from ansible.executor.task_result import TaskResult
from ansible.parsing import DataLoader
from ansible.vars import VariableManager
from ansible.utils.debug import debug
NUM_WORKERS = 20
NUM_HOSTS = 1778
NUM_TASKS = 1
def results(final_q, workers):
cur_worker = 0
def _read_worker_result(cur_worker):
result = None
starting_point = cur_worker
while True:
(worker_prc, main_q, res_q) = workers[cur_worker]
cur_worker += 1
if cur_worker >= len(workers):
cur_worker = 0
try:
if not res_q.empty():
debug("worker %d has data to read" % cur_worker)
result = res_q.get()
debug("got a result from worker %d: %s" % (cur_worker, result))
break
except:
pass
if cur_worker == starting_point:
break
return (result, cur_worker)
while True:
result = None
try:
(result, cur_worker) = _read_worker_result(cur_worker)
if result is None:
time.sleep(0.01)
continue
final_q.put(result, block=False)
except (IOError, EOFError, KeyboardInterrupt), e:
debug("got a breaking error: %s" % e)
break
except Exception, e:
debug("EXCEPTION DURING RESULTS PROCESSING: %s" % e)
traceback.print_exc()
break
def worker(main_q, res_q, loader):
while True:
task = None
try:
if not main_q.empty():
(host, task, task_vars, conn_info) = main_q.get(block=False)
executor_result = TaskExecutor(host, task, task_vars, conn_info, loader).run()
debug("executor result: %s" % executor_result)
task_result = TaskResult(host, task, executor_result)
res_q.put(task_result)
else:
time.sleep(0.01)
except Queue.Empty:
pass
except (IOError, EOFError, KeyboardInterrupt), e:
debug("got a breaking error: %s" % e)
break
except Exception, e:
debug("EXCEPTION DURING WORKER PROCESSING: %s" % e)
traceback.print_exc()
break
loader = DataLoader()
workers = []
for i in range(NUM_WORKERS):
main_q = multiprocessing.Queue()
res_q = multiprocessing.Queue()
worker_p = multiprocessing.Process(target=worker, args=(main_q, res_q, loader))
worker_p.start()
workers.append((worker_p, main_q, res_q))
res_q = multiprocessing.Queue()
res_p = multiprocessing.Process(target=results, args=(res_q, workers))
res_p.start()
def send_data(obj):
global cur_worker
global workers
global pending_results
(w_proc, main_q, wrkr_q) = workers[cur_worker]
cur_worker += 1
if cur_worker >= len(workers):
cur_worker = 0
pending_results += 1
main_q.put(obj, block=False)
def _process_pending_results():
global res_q
global pending_results
while not res_q.empty():
try:
result = res_q.get(block=False)
debug("got final result: %s" % (result,))
pending_results -= 1
except Queue.Empty:
pass
def _wait_on_pending_results():
global pending_results
while pending_results > 0:
debug("waiting for pending results (%d left)" % pending_results)
_process_pending_results()
time.sleep(0.01)
debug("starting")
cur_worker = 0
pending_results = 0
var_manager = VariableManager()
debug("loading inventory")
inventory = Inventory(host_list='/tmp/med_inventory', loader=loader, variable_manager=var_manager)
hosts = inventory.get_hosts()[:]
debug("done loading inventory")
ci = ConnectionInformation()
ci.connection = 'local'
for i in range(NUM_TASKS):
#for j in range(NUM_HOSTS):
for h in hosts:
debug("queuing %s %d" % (h, i))
#h = Host(name="host%06d" % j)
t = Task().load(dict(name="task %d" % (i,), debug="msg='hello from %s, %d'" % (h,i)))
#t = Task().load(dict(name="task %d" % (i,), ping=""))
#task_vars = var_manager.get_vars(loader=loader, host=h, task=t)
task_vars = dict()
new_t = t.copy()
new_t.post_validate(task_vars)
send_data((h, t, task_vars, ci))
debug("done queuing %s %d" % (h, i))
_process_pending_results()
debug("waiting for the results to drain...")
_wait_on_pending_results()
res_q.close()
res_p.terminate()
for (w_p, main_q, wrkr_q) in workers:
main_q.close()
wrkr_q.close()
w_p.terminate()
debug("done")

View file

@ -0,0 +1 @@
- debug: msg="here we are in the role, foo={{foo}}"

5
v2/samples/src Normal file
View file

@ -0,0 +1,5 @@
num_retries: 2
frag 1
frag 2
frag 3
vars_file_var: "this is in a vars file"

View file

@ -0,0 +1,4 @@
- hosts: all
gather_facts: no
tasks:
- debug: msg="hi"

View file

@ -0,0 +1,5 @@
- hosts: all
gather_facts: no
tasks:
#- debug: msg="hi"
- ping:

View file

@ -0,0 +1,7 @@
- hosts: localhost
tasks:
- debug: msg="this is play 1"
- hosts: localhost
tasks:
- debug: msg="this is play 2, facts should not have gathered"

70
v2/samples/test_pb.yml Normal file
View file

@ -0,0 +1,70 @@
# will use linear strategy by default
- hosts:
- "{{hosts|default('all')}}"
#- ubuntu1404
#- awxlocal
connection: ssh
#gather_facts: false
#strategy: free
#serial: 3
vars:
play_var: foo
test_dict:
a: 1
b: 2
vars_files:
- testing/vars.yml
tasks:
- block:
- debug: var=ansible_nodename
when: ansible_nodename == "ubuntu1404"
- block:
- debug: msg="in block for {{inventory_hostname}} ({{ansible_nodename}}), group_var is {{group_var}}, host var is {{host_var}}"
notify: foo
- debug: msg="test dictionary is {{test_dict}}"
when: asdf is defined
- command: hostname
register: hostname_result
- debug: msg="registered result is {{hostname_result.stdout}}"
- command: whoami
sudo: true
sudo_user: testing
- assemble: src=./testing/ dest=/tmp/output.txt remote_src=no
- copy: content="hello world\n" dest=/tmp/copy_content.out mode=600
- command: /bin/false
retries: "{{num_retries|default(5)}}"
delay: 1
- debug: msg="you shouldn't see me"
rescue:
- debug: msg="this is the rescue"
- command: /bin/false
- debug: msg="you should not see this rescue message"
always:
- debug: msg="this is the always block, it should always be seen"
- command: /bin/false
- debug: msg="you should not see this always message"
#- debug: msg="linear task 01"
#- debug: msg="linear task 02"
#- debug: msg="linear task 03"
# with_items:
# - a
# - b
# - c
handlers:
- name: foo
debug: msg="this is the foo handler"
- name: bar
debug: msg="this is the bar handler, you should not see this"
#- hosts: all
# connection: local
# strategy: free
# tasks:
# - ping:
# - command: /bin/false
# - debug: msg="free task 01"
# - debug: msg="free task 02"
# - debug: msg="free task 03"

8
v2/samples/test_role.yml Normal file
View file

@ -0,0 +1,8 @@
- hosts: ubuntu1404
gather_facts: no
vars:
foo: "BAD!!"
roles:
- { role: test_role, foo: bar }
tasks:
- debug: msg="done"

View file

@ -0,0 +1 @@
num_retries: 2

1
v2/samples/testing/frag1 Normal file
View file

@ -0,0 +1 @@
frag 1

1
v2/samples/testing/frag2 Normal file
View file

@ -0,0 +1 @@
frag 2

1
v2/samples/testing/frag3 Normal file
View file

@ -0,0 +1 @@
frag 3

View file

@ -0,0 +1 @@
vars_file_var: "this is in a vars file"